Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  11 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  12 *		Florian La Roche, <flla@stud.uni-sb.de>
  13 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  15 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  16 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  17 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18 *		Jorge Cwik, <jorge@laser.satlink.net>
  19 */
  20
  21/*
  22 * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
  23 *				:	Fragmentation on mtu decrease
  24 *				:	Segment collapse on retransmit
  25 *				:	AF independence
  26 *
  27 *		Linus Torvalds	:	send_delayed_ack
  28 *		David S. Miller	:	Charge memory using the right skb
  29 *					during syn/ack processing.
  30 *		David S. Miller :	Output engine completely rewritten.
  31 *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
  32 *		Cacophonix Gaul :	draft-minshall-nagle-01
  33 *		J Hadi Salim	:	ECN support
  34 *
  35 */
  36
  37#define pr_fmt(fmt) "TCP: " fmt
  38
  39#include <net/tcp.h>
  40
  41#include <linux/compiler.h>
  42#include <linux/gfp.h>
  43#include <linux/module.h>
  44
  45/* People can turn this off for buggy TCP's found in printers etc. */
  46int sysctl_tcp_retrans_collapse __read_mostly = 1;
  47
  48/* People can turn this on to work with those rare, broken TCPs that
  49 * interpret the window field as a signed quantity.
  50 */
  51int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
  52
 
 
 
  53/* This limits the percentage of the congestion window which we
  54 * will allow a single TSO frame to consume.  Building TSO frames
  55 * which are too large can cause TCP streams to be bursty.
  56 */
  57int sysctl_tcp_tso_win_divisor __read_mostly = 3;
  58
  59int sysctl_tcp_mtu_probing __read_mostly = 0;
  60int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
  61
  62/* By default, RFC2861 behavior.  */
  63int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
  64
  65int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */
  66EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
  67
 
 
  68
  69/* Account for new data that has been sent to the network. */
  70static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
  71{
 
  72	struct tcp_sock *tp = tcp_sk(sk);
  73	unsigned int prior_packets = tp->packets_out;
  74
  75	tcp_advance_send_head(sk, skb);
  76	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
  77
  78	/* Don't override Nagle indefinitely with F-RTO */
  79	if (tp->frto_counter == 2)
  80		tp->frto_counter = 3;
  81
  82	tp->packets_out += tcp_skb_pcount(skb);
  83	if (!prior_packets || tp->early_retrans_delayed)
 
  84		tcp_rearm_rto(sk);
 
 
 
 
  85}
  86
  87/* SND.NXT, if window was not shrunk.
  88 * If window has been shrunk, what should we make? It is not clear at all.
  89 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
  90 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
  91 * invalid. OK, let's make this for now:
  92 */
  93static inline __u32 tcp_acceptable_seq(const struct sock *sk)
  94{
  95	const struct tcp_sock *tp = tcp_sk(sk);
  96
  97	if (!before(tcp_wnd_end(tp), tp->snd_nxt))
  98		return tp->snd_nxt;
  99	else
 100		return tcp_wnd_end(tp);
 101}
 102
 103/* Calculate mss to advertise in SYN segment.
 104 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
 105 *
 106 * 1. It is independent of path mtu.
 107 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
 108 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
 109 *    attached devices, because some buggy hosts are confused by
 110 *    large MSS.
 111 * 4. We do not make 3, we advertise MSS, calculated from first
 112 *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
 113 *    This may be overridden via information stored in routing table.
 114 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
 115 *    probably even Jumbo".
 116 */
 117static __u16 tcp_advertise_mss(struct sock *sk)
 118{
 119	struct tcp_sock *tp = tcp_sk(sk);
 120	const struct dst_entry *dst = __sk_dst_get(sk);
 121	int mss = tp->advmss;
 122
 123	if (dst) {
 124		unsigned int metric = dst_metric_advmss(dst);
 125
 126		if (metric < mss) {
 127			mss = metric;
 128			tp->advmss = mss;
 129		}
 130	}
 131
 132	return (__u16)mss;
 133}
 134
 135/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
 136 * This is the first part of cwnd validation mechanism. */
 137static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
 138{
 139	struct tcp_sock *tp = tcp_sk(sk);
 140	s32 delta = tcp_time_stamp - tp->lsndtime;
 141	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
 142	u32 cwnd = tp->snd_cwnd;
 143
 144	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
 145
 146	tp->snd_ssthresh = tcp_current_ssthresh(sk);
 147	restart_cwnd = min(restart_cwnd, cwnd);
 148
 149	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
 150		cwnd >>= 1;
 151	tp->snd_cwnd = max(cwnd, restart_cwnd);
 152	tp->snd_cwnd_stamp = tcp_time_stamp;
 153	tp->snd_cwnd_used = 0;
 154}
 155
 156/* Congestion state accounting after a packet has been sent. */
 157static void tcp_event_data_sent(struct tcp_sock *tp,
 158				struct sock *sk)
 159{
 160	struct inet_connection_sock *icsk = inet_csk(sk);
 161	const u32 now = tcp_time_stamp;
 
 162
 163	if (sysctl_tcp_slow_start_after_idle &&
 164	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
 165		tcp_cwnd_restart(sk, __sk_dst_get(sk));
 166
 167	tp->lsndtime = now;
 168
 169	/* If it is a reply for ato after last received
 170	 * packet, enter pingpong mode.
 171	 */
 172	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
 173		icsk->icsk_ack.pingpong = 1;
 
 174}
 175
 176/* Account for an ACK we sent. */
 177static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
 178{
 179	tcp_dec_quickack_mode(sk, pkts);
 180	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 181}
 182
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 183/* Determine a window scaling and initial window to offer.
 184 * Based on the assumption that the given amount of space
 185 * will be offered. Store the results in the tp structure.
 186 * NOTE: for smooth operation initial space offering should
 187 * be a multiple of mss if possible. We assume here that mss >= 1.
 188 * This MUST be enforced by all callers.
 189 */
 190void tcp_select_initial_window(int __space, __u32 mss,
 191			       __u32 *rcv_wnd, __u32 *window_clamp,
 192			       int wscale_ok, __u8 *rcv_wscale,
 193			       __u32 init_rcv_wnd)
 194{
 195	unsigned int space = (__space < 0 ? 0 : __space);
 196
 197	/* If no clamp set the clamp to the max possible scaled window */
 198	if (*window_clamp == 0)
 199		(*window_clamp) = (65535 << 14);
 200	space = min(*window_clamp, space);
 201
 202	/* Quantize space offering to a multiple of mss if possible. */
 203	if (space > mss)
 204		space = (space / mss) * mss;
 205
 206	/* NOTE: offering an initial window larger than 32767
 207	 * will break some buggy TCP stacks. If the admin tells us
 208	 * it is likely we could be speaking with such a buggy stack
 209	 * we will truncate our initial window offering to 32K-1
 210	 * unless the remote has sent us a window scaling option,
 211	 * which we interpret as a sign the remote TCP is not
 212	 * misinterpreting the window field as a signed quantity.
 213	 */
 214	if (sysctl_tcp_workaround_signed_windows)
 215		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
 216	else
 217		(*rcv_wnd) = space;
 218
 219	(*rcv_wscale) = 0;
 220	if (wscale_ok) {
 221		/* Set window scaling on max possible window
 222		 * See RFC1323 for an explanation of the limit to 14
 223		 */
 224		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
 225		space = min_t(u32, space, *window_clamp);
 226		while (space > 65535 && (*rcv_wscale) < 14) {
 227			space >>= 1;
 228			(*rcv_wscale)++;
 229		}
 230	}
 231
 232	/* Set initial window to a value enough for senders starting with
 233	 * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
 234	 * a limit on the initial window when mss is larger than 1460.
 235	 */
 236	if (mss > (1 << *rcv_wscale)) {
 237		int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
 238		if (mss > 1460)
 239			init_cwnd =
 240			max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
 241		/* when initializing use the value from init_rcv_wnd
 242		 * rather than the default from above
 243		 */
 244		if (init_rcv_wnd)
 245			*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
 246		else
 247			*rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
 248	}
 249
 250	/* Set the clamp no higher than max representable value */
 251	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
 252}
 253EXPORT_SYMBOL(tcp_select_initial_window);
 254
 255/* Chose a new window to advertise, update state in tcp_sock for the
 256 * socket, and return result with RFC1323 scaling applied.  The return
 257 * value can be stuffed directly into th->window for an outgoing
 258 * frame.
 259 */
 260static u16 tcp_select_window(struct sock *sk)
 261{
 262	struct tcp_sock *tp = tcp_sk(sk);
 
 263	u32 cur_win = tcp_receive_window(tp);
 264	u32 new_win = __tcp_select_window(sk);
 265
 266	/* Never shrink the offered window */
 267	if (new_win < cur_win) {
 268		/* Danger Will Robinson!
 269		 * Don't update rcv_wup/rcv_wnd here or else
 270		 * we will not be able to advertise a zero
 271		 * window in time.  --DaveM
 272		 *
 273		 * Relax Will Robinson.
 274		 */
 
 
 
 275		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
 276	}
 277	tp->rcv_wnd = new_win;
 278	tp->rcv_wup = tp->rcv_nxt;
 279
 280	/* Make sure we do not exceed the maximum possible
 281	 * scaled window.
 282	 */
 283	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
 284		new_win = min(new_win, MAX_TCP_WINDOW);
 285	else
 286		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
 287
 288	/* RFC1323 scaling applied */
 289	new_win >>= tp->rx_opt.rcv_wscale;
 290
 291	/* If we advertise zero window, disable fast path. */
 292	if (new_win == 0)
 293		tp->pred_flags = 0;
 
 
 
 
 
 
 294
 295	return new_win;
 296}
 297
 298/* Packet ECN state for a SYN-ACK */
 299static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
 300{
 301	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
 302	if (!(tp->ecn_flags & TCP_ECN_OK))
 303		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
 304}
 305
 306/* Packet ECN state for a SYN.  */
 307static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
 308{
 309	struct tcp_sock *tp = tcp_sk(sk);
 310
 311	tp->ecn_flags = 0;
 312	if (sysctl_tcp_ecn == 1) {
 313		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
 314		tp->ecn_flags = TCP_ECN_OK;
 315	}
 316}
 317
 318static __inline__ void
 319TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
 320{
 321	if (inet_rsk(req)->ecn_ok)
 322		th->ece = 1;
 323}
 324
 325/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
 326 * be sent.
 327 */
 328static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
 329				int tcp_header_len)
 330{
 331	struct tcp_sock *tp = tcp_sk(sk);
 332
 333	if (tp->ecn_flags & TCP_ECN_OK) {
 334		/* Not-retransmitted data segment: set ECT and inject CWR. */
 335		if (skb->len != tcp_header_len &&
 336		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
 337			INET_ECN_xmit(sk);
 338			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
 339				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
 340				tcp_hdr(skb)->cwr = 1;
 341				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 342			}
 343		} else {
 344			/* ACK or retransmitted segment: clear ECT|CE */
 345			INET_ECN_dontxmit(sk);
 346		}
 347		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
 348			tcp_hdr(skb)->ece = 1;
 349	}
 350}
 351
 352/* Constructs common control bits of non-data skb. If SYN/FIN is present,
 353 * auto increment end seqno.
 354 */
 355static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
 356{
 
 
 357	skb->ip_summed = CHECKSUM_PARTIAL;
 358	skb->csum = 0;
 359
 360	TCP_SKB_CB(skb)->tcp_flags = flags;
 361	TCP_SKB_CB(skb)->sacked = 0;
 362
 363	skb_shinfo(skb)->gso_segs = 1;
 364	skb_shinfo(skb)->gso_size = 0;
 365	skb_shinfo(skb)->gso_type = 0;
 366
 367	TCP_SKB_CB(skb)->seq = seq;
 368	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
 369		seq++;
 370	TCP_SKB_CB(skb)->end_seq = seq;
 371}
 372
 373static inline bool tcp_urg_mode(const struct tcp_sock *tp)
 374{
 375	return tp->snd_una != tp->snd_up;
 376}
 377
 378#define OPTION_SACK_ADVERTISE	(1 << 0)
 379#define OPTION_TS		(1 << 1)
 380#define OPTION_MD5		(1 << 2)
 381#define OPTION_WSCALE		(1 << 3)
 382#define OPTION_COOKIE_EXTENSION	(1 << 4)
 383
 384struct tcp_out_options {
 385	u8 options;		/* bit field of OPTION_* */
 
 386	u8 ws;			/* window scale, 0 to disable */
 387	u8 num_sack_blocks;	/* number of SACK blocks to include */
 388	u8 hash_size;		/* bytes in hash_location */
 389	u16 mss;		/* 0 to disable */
 390	__u32 tsval, tsecr;	/* need to include OPTION_TS */
 391	__u8 *hash_location;	/* temporary pointer, overloaded */
 
 
 392};
 393
 394/* The sysctl int routines are generic, so check consistency here.
 395 */
 396static u8 tcp_cookie_size_check(u8 desired)
 397{
 398	int cookie_size;
 399
 400	if (desired > 0)
 401		/* previously specified */
 402		return desired;
 403
 404	cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
 405	if (cookie_size <= 0)
 406		/* no default specified */
 407		return 0;
 408
 409	if (cookie_size <= TCP_COOKIE_MIN)
 410		/* value too small, specify minimum */
 411		return TCP_COOKIE_MIN;
 412
 413	if (cookie_size >= TCP_COOKIE_MAX)
 414		/* value too large, specify maximum */
 415		return TCP_COOKIE_MAX;
 416
 417	if (cookie_size & 1)
 418		/* 8-bit multiple, illegal, fix it */
 419		cookie_size++;
 420
 421	return (u8)cookie_size;
 422}
 423
 424/* Write previously computed TCP options to the packet.
 425 *
 426 * Beware: Something in the Internet is very sensitive to the ordering of
 427 * TCP options, we learned this through the hard way, so be careful here.
 428 * Luckily we can at least blame others for their non-compliance but from
 429 * inter-operatibility perspective it seems that we're somewhat stuck with
 430 * the ordering which we have been using if we want to keep working with
 431 * those broken things (not that it currently hurts anybody as there isn't
 432 * particular reason why the ordering would need to be changed).
 433 *
 434 * At least SACK_PERM as the first option is known to lead to a disaster
 435 * (but it may well be that other scenarios fail similarly).
 436 */
 437static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
 438			      struct tcp_out_options *opts)
 439{
 440	u8 options = opts->options;	/* mungable copy */
 441
 442	/* Having both authentication and cookies for security is redundant,
 443	 * and there's certainly not enough room.  Instead, the cookie-less
 444	 * extension variant is proposed.
 445	 *
 446	 * Consider the pessimal case with authentication.  The options
 447	 * could look like:
 448	 *   COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40
 449	 */
 450	if (unlikely(OPTION_MD5 & options)) {
 451		if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
 452			*ptr++ = htonl((TCPOPT_COOKIE << 24) |
 453				       (TCPOLEN_COOKIE_BASE << 16) |
 454				       (TCPOPT_MD5SIG << 8) |
 455				       TCPOLEN_MD5SIG);
 456		} else {
 457			*ptr++ = htonl((TCPOPT_NOP << 24) |
 458				       (TCPOPT_NOP << 16) |
 459				       (TCPOPT_MD5SIG << 8) |
 460				       TCPOLEN_MD5SIG);
 461		}
 462		options &= ~OPTION_COOKIE_EXTENSION;
 463		/* overload cookie hash location */
 464		opts->hash_location = (__u8 *)ptr;
 465		ptr += 4;
 466	}
 467
 468	if (unlikely(opts->mss)) {
 469		*ptr++ = htonl((TCPOPT_MSS << 24) |
 470			       (TCPOLEN_MSS << 16) |
 471			       opts->mss);
 472	}
 473
 474	if (likely(OPTION_TS & options)) {
 475		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 476			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
 477				       (TCPOLEN_SACK_PERM << 16) |
 478				       (TCPOPT_TIMESTAMP << 8) |
 479				       TCPOLEN_TIMESTAMP);
 480			options &= ~OPTION_SACK_ADVERTISE;
 481		} else {
 482			*ptr++ = htonl((TCPOPT_NOP << 24) |
 483				       (TCPOPT_NOP << 16) |
 484				       (TCPOPT_TIMESTAMP << 8) |
 485				       TCPOLEN_TIMESTAMP);
 486		}
 487		*ptr++ = htonl(opts->tsval);
 488		*ptr++ = htonl(opts->tsecr);
 489	}
 490
 491	/* Specification requires after timestamp, so do it now.
 492	 *
 493	 * Consider the pessimal case without authentication.  The options
 494	 * could look like:
 495	 *   MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40
 496	 */
 497	if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
 498		__u8 *cookie_copy = opts->hash_location;
 499		u8 cookie_size = opts->hash_size;
 500
 501		/* 8-bit multiple handled in tcp_cookie_size_check() above,
 502		 * and elsewhere.
 503		 */
 504		if (0x2 & cookie_size) {
 505			__u8 *p = (__u8 *)ptr;
 506
 507			/* 16-bit multiple */
 508			*p++ = TCPOPT_COOKIE;
 509			*p++ = TCPOLEN_COOKIE_BASE + cookie_size;
 510			*p++ = *cookie_copy++;
 511			*p++ = *cookie_copy++;
 512			ptr++;
 513			cookie_size -= 2;
 514		} else {
 515			/* 32-bit multiple */
 516			*ptr++ = htonl(((TCPOPT_NOP << 24) |
 517					(TCPOPT_NOP << 16) |
 518					(TCPOPT_COOKIE << 8) |
 519					TCPOLEN_COOKIE_BASE) +
 520				       cookie_size);
 521		}
 522
 523		if (cookie_size > 0) {
 524			memcpy(ptr, cookie_copy, cookie_size);
 525			ptr += (cookie_size / 4);
 526		}
 527	}
 528
 529	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 530		*ptr++ = htonl((TCPOPT_NOP << 24) |
 531			       (TCPOPT_NOP << 16) |
 532			       (TCPOPT_SACK_PERM << 8) |
 533			       TCPOLEN_SACK_PERM);
 534	}
 535
 536	if (unlikely(OPTION_WSCALE & options)) {
 537		*ptr++ = htonl((TCPOPT_NOP << 24) |
 538			       (TCPOPT_WINDOW << 16) |
 539			       (TCPOLEN_WINDOW << 8) |
 540			       opts->ws);
 541	}
 542
 543	if (unlikely(opts->num_sack_blocks)) {
 544		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
 545			tp->duplicate_sack : tp->selective_acks;
 546		int this_sack;
 547
 548		*ptr++ = htonl((TCPOPT_NOP  << 24) |
 549			       (TCPOPT_NOP  << 16) |
 550			       (TCPOPT_SACK <<  8) |
 551			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
 552						     TCPOLEN_SACK_PERBLOCK)));
 553
 554		for (this_sack = 0; this_sack < opts->num_sack_blocks;
 555		     ++this_sack) {
 556			*ptr++ = htonl(sp[this_sack].start_seq);
 557			*ptr++ = htonl(sp[this_sack].end_seq);
 558		}
 559
 560		tp->rx_opt.dsack = 0;
 561	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 562}
 563
 564/* Compute TCP options for SYN packets. This is not the final
 565 * network wire format yet.
 566 */
 567static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
 568				struct tcp_out_options *opts,
 569				struct tcp_md5sig_key **md5)
 570{
 571	struct tcp_sock *tp = tcp_sk(sk);
 572	struct tcp_cookie_values *cvp = tp->cookie_values;
 573	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 574	u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
 575			 tcp_cookie_size_check(cvp->cookie_desired) :
 576			 0;
 577
 578#ifdef CONFIG_TCP_MD5SIG
 579	*md5 = tp->af_specific->md5_lookup(sk, sk);
 580	if (*md5) {
 581		opts->options |= OPTION_MD5;
 582		remaining -= TCPOLEN_MD5SIG_ALIGNED;
 583	}
 584#else
 585	*md5 = NULL;
 586#endif
 587
 588	/* We always get an MSS option.  The option bytes which will be seen in
 589	 * normal data packets should timestamps be used, must be in the MSS
 590	 * advertised.  But we subtract them from tp->mss_cache so that
 591	 * calculations in tcp_sendmsg are simpler etc.  So account for this
 592	 * fact here if necessary.  If we don't do this correctly, as a
 593	 * receiver we won't recognize data packets as being full sized when we
 594	 * should, and thus we won't abide by the delayed ACK rules correctly.
 595	 * SACKs don't matter, we never delay an ACK when we have any of those
 596	 * going out.  */
 597	opts->mss = tcp_advertise_mss(sk);
 598	remaining -= TCPOLEN_MSS_ALIGNED;
 599
 600	if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
 601		opts->options |= OPTION_TS;
 602		opts->tsval = TCP_SKB_CB(skb)->when;
 603		opts->tsecr = tp->rx_opt.ts_recent;
 604		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 605	}
 606	if (likely(sysctl_tcp_window_scaling)) {
 607		opts->ws = tp->rx_opt.rcv_wscale;
 608		opts->options |= OPTION_WSCALE;
 609		remaining -= TCPOLEN_WSCALE_ALIGNED;
 610	}
 611	if (likely(sysctl_tcp_sack)) {
 612		opts->options |= OPTION_SACK_ADVERTISE;
 613		if (unlikely(!(OPTION_TS & opts->options)))
 614			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 615	}
 616
 617	/* Note that timestamps are required by the specification.
 618	 *
 619	 * Odd numbers of bytes are prohibited by the specification, ensuring
 620	 * that the cookie is 16-bit aligned, and the resulting cookie pair is
 621	 * 32-bit aligned.
 622	 */
 623	if (*md5 == NULL &&
 624	    (OPTION_TS & opts->options) &&
 625	    cookie_size > 0) {
 626		int need = TCPOLEN_COOKIE_BASE + cookie_size;
 627
 628		if (0x2 & need) {
 629			/* 32-bit multiple */
 630			need += 2; /* NOPs */
 631
 632			if (need > remaining) {
 633				/* try shrinking cookie to fit */
 634				cookie_size -= 2;
 635				need -= 4;
 636			}
 637		}
 638		while (need > remaining && TCP_COOKIE_MIN <= cookie_size) {
 639			cookie_size -= 4;
 640			need -= 4;
 641		}
 642		if (TCP_COOKIE_MIN <= cookie_size) {
 643			opts->options |= OPTION_COOKIE_EXTENSION;
 644			opts->hash_location = (__u8 *)&cvp->cookie_pair[0];
 645			opts->hash_size = cookie_size;
 646
 647			/* Remember for future incarnations. */
 648			cvp->cookie_desired = cookie_size;
 649
 650			if (cvp->cookie_desired != cvp->cookie_pair_size) {
 651				/* Currently use random bytes as a nonce,
 652				 * assuming these are completely unpredictable
 653				 * by hostile users of the same system.
 654				 */
 655				get_random_bytes(&cvp->cookie_pair[0],
 656						 cookie_size);
 657				cvp->cookie_pair_size = cookie_size;
 658			}
 659
 660			remaining -= need;
 
 661		}
 662	}
 
 663	return MAX_TCP_OPTION_SPACE - remaining;
 664}
 665
 666/* Set up TCP options for SYN-ACKs. */
 667static unsigned int tcp_synack_options(struct sock *sk,
 668				   struct request_sock *req,
 669				   unsigned int mss, struct sk_buff *skb,
 670				   struct tcp_out_options *opts,
 671				   struct tcp_md5sig_key **md5,
 672				   struct tcp_extend_values *xvp)
 673{
 674	struct inet_request_sock *ireq = inet_rsk(req);
 675	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 676	u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
 677			 xvp->cookie_plus :
 678			 0;
 679
 680#ifdef CONFIG_TCP_MD5SIG
 681	*md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
 682	if (*md5) {
 683		opts->options |= OPTION_MD5;
 684		remaining -= TCPOLEN_MD5SIG_ALIGNED;
 685
 686		/* We can't fit any SACK blocks in a packet with MD5 + TS
 687		 * options. There was discussion about disabling SACK
 688		 * rather than TS in order to fit in better with old,
 689		 * buggy kernels, but that was deemed to be unnecessary.
 690		 */
 691		ireq->tstamp_ok &= !ireq->sack_ok;
 692	}
 693#else
 694	*md5 = NULL;
 695#endif
 696
 697	/* We always send an MSS option. */
 698	opts->mss = mss;
 699	remaining -= TCPOLEN_MSS_ALIGNED;
 700
 701	if (likely(ireq->wscale_ok)) {
 702		opts->ws = ireq->rcv_wscale;
 703		opts->options |= OPTION_WSCALE;
 704		remaining -= TCPOLEN_WSCALE_ALIGNED;
 705	}
 706	if (likely(ireq->tstamp_ok)) {
 707		opts->options |= OPTION_TS;
 708		opts->tsval = TCP_SKB_CB(skb)->when;
 709		opts->tsecr = req->ts_recent;
 710		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 711	}
 712	if (likely(ireq->sack_ok)) {
 713		opts->options |= OPTION_SACK_ADVERTISE;
 714		if (unlikely(!ireq->tstamp_ok))
 715			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 716	}
 717
 718	/* Similar rationale to tcp_syn_options() applies here, too.
 719	 * If the <SYN> options fit, the same options should fit now!
 720	 */
 721	if (*md5 == NULL &&
 722	    ireq->tstamp_ok &&
 723	    cookie_plus > TCPOLEN_COOKIE_BASE) {
 724		int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */
 725
 726		if (0x2 & need) {
 727			/* 32-bit multiple */
 728			need += 2; /* NOPs */
 729		}
 730		if (need <= remaining) {
 731			opts->options |= OPTION_COOKIE_EXTENSION;
 732			opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE;
 733			remaining -= need;
 734		} else {
 735			/* There's no error return, so flag it. */
 736			xvp->cookie_out_never = 1; /* true */
 737			opts->hash_size = 0;
 738		}
 739	}
 
 740	return MAX_TCP_OPTION_SPACE - remaining;
 741}
 742
 743/* Compute TCP options for ESTABLISHED sockets. This is not the
 744 * final wire format yet.
 745 */
 746static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
 747					struct tcp_out_options *opts,
 748					struct tcp_md5sig_key **md5)
 749{
 750	struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
 751	struct tcp_sock *tp = tcp_sk(sk);
 752	unsigned int size = 0;
 753	unsigned int eff_sacks;
 754
 
 
 755#ifdef CONFIG_TCP_MD5SIG
 756	*md5 = tp->af_specific->md5_lookup(sk, sk);
 757	if (unlikely(*md5)) {
 758		opts->options |= OPTION_MD5;
 759		size += TCPOLEN_MD5SIG_ALIGNED;
 760	}
 761#else
 762	*md5 = NULL;
 763#endif
 764
 765	if (likely(tp->rx_opt.tstamp_ok)) {
 766		opts->options |= OPTION_TS;
 767		opts->tsval = tcb ? tcb->when : 0;
 768		opts->tsecr = tp->rx_opt.ts_recent;
 769		size += TCPOLEN_TSTAMP_ALIGNED;
 770	}
 771
 772	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
 773	if (unlikely(eff_sacks)) {
 774		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 775		opts->num_sack_blocks =
 776			min_t(unsigned int, eff_sacks,
 777			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
 778			      TCPOLEN_SACK_PERBLOCK);
 779		size += TCPOLEN_SACK_BASE_ALIGNED +
 780			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
 781	}
 782
 783	return size;
 784}
 785
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 786/* This routine actually transmits TCP packets queued in by
 787 * tcp_do_sendmsg().  This is used by both the initial
 788 * transmission and possible later retransmissions.
 789 * All SKB's seen here are completely headerless.  It is our
 790 * job to build the TCP header, and pass the packet down to
 791 * IP so it can do the same plus pass the packet off to the
 792 * device.
 793 *
 794 * We are working here with either a clone of the original
 795 * SKB, or a fresh unique copy made by the retransmit engine.
 796 */
 797static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 798			    gfp_t gfp_mask)
 799{
 800	const struct inet_connection_sock *icsk = inet_csk(sk);
 801	struct inet_sock *inet;
 802	struct tcp_sock *tp;
 803	struct tcp_skb_cb *tcb;
 804	struct tcp_out_options opts;
 805	unsigned int tcp_options_size, tcp_header_size;
 806	struct tcp_md5sig_key *md5;
 807	struct tcphdr *th;
 808	int err;
 809
 810	BUG_ON(!skb || !tcp_skb_pcount(skb));
 811
 812	/* If congestion control is doing timestamping, we must
 813	 * take such a timestamp before we potentially clone/copy.
 814	 */
 815	if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
 816		__net_timestamp(skb);
 
 
 
 
 817
 818	if (likely(clone_it)) {
 819		if (unlikely(skb_cloned(skb)))
 820			skb = pskb_copy(skb, gfp_mask);
 821		else
 822			skb = skb_clone(skb, gfp_mask);
 823		if (unlikely(!skb))
 824			return -ENOBUFS;
 
 
 825	}
 826
 827	inet = inet_sk(sk);
 828	tp = tcp_sk(sk);
 829	tcb = TCP_SKB_CB(skb);
 830	memset(&opts, 0, sizeof(opts));
 831
 832	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
 833		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
 834	else
 835		tcp_options_size = tcp_established_options(sk, skb, &opts,
 836							   &md5);
 837	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
 838
 839	if (tcp_packets_in_flight(tp) == 0) {
 840		tcp_ca_event(sk, CA_EVENT_TX_START);
 841		skb->ooo_okay = 1;
 842	} else
 843		skb->ooo_okay = 0;
 
 
 844
 845	skb_push(skb, tcp_header_size);
 846	skb_reset_transport_header(skb);
 847	skb_set_owner_w(skb, sk);
 
 
 
 
 848
 849	/* Build TCP header and checksum it. */
 850	th = tcp_hdr(skb);
 851	th->source		= inet->inet_sport;
 852	th->dest		= inet->inet_dport;
 853	th->seq			= htonl(tcb->seq);
 854	th->ack_seq		= htonl(tp->rcv_nxt);
 855	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
 856					tcb->tcp_flags);
 857
 858	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
 859		/* RFC1323: The window in SYN & SYN/ACK segments
 860		 * is never scaled.
 861		 */
 862		th->window	= htons(min(tp->rcv_wnd, 65535U));
 863	} else {
 864		th->window	= htons(tcp_select_window(sk));
 865	}
 866	th->check		= 0;
 867	th->urg_ptr		= 0;
 868
 869	/* The urg_mode check is necessary during a below snd_una win probe */
 870	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
 871		if (before(tp->snd_up, tcb->seq + 0x10000)) {
 872			th->urg_ptr = htons(tp->snd_up - tcb->seq);
 873			th->urg = 1;
 874		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
 875			th->urg_ptr = htons(0xFFFF);
 876			th->urg = 1;
 877		}
 878	}
 879
 880	tcp_options_write((__be32 *)(th + 1), tp, &opts);
 881	if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
 882		TCP_ECN_send(sk, skb, tcp_header_size);
 883
 884#ifdef CONFIG_TCP_MD5SIG
 885	/* Calculate the MD5 hash, as we have all we need now */
 886	if (md5) {
 887		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
 888		tp->af_specific->calc_md5_hash(opts.hash_location,
 889					       md5, sk, NULL, skb);
 890	}
 891#endif
 892
 893	icsk->icsk_af_ops->send_check(sk, skb);
 894
 895	if (likely(tcb->tcp_flags & TCPHDR_ACK))
 896		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
 897
 898	if (skb->len != tcp_header_size)
 899		tcp_event_data_sent(tp, sk);
 900
 901	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
 902		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
 903			      tcp_skb_pcount(skb));
 904
 905	err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
 906	if (likely(err <= 0))
 907		return err;
 908
 909	tcp_enter_cwr(sk, 1);
 910
 911	return net_xmit_eval(err);
 912}
 913
 914/* This routine just queues the buffer for sending.
 915 *
 916 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
 917 * otherwise socket can stall.
 918 */
 919static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
 920{
 921	struct tcp_sock *tp = tcp_sk(sk);
 922
 923	/* Advance write_seq and place onto the write_queue. */
 924	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
 925	skb_header_release(skb);
 926	tcp_add_write_queue_tail(sk, skb);
 927	sk->sk_wmem_queued += skb->truesize;
 928	sk_mem_charge(sk, skb->truesize);
 929}
 930
 931/* Initialize TSO segments for a packet. */
 932static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
 933				 unsigned int mss_now)
 934{
 935	if (skb->len <= mss_now || !sk_can_gso(sk) ||
 936	    skb->ip_summed == CHECKSUM_NONE) {
 
 
 
 
 937		/* Avoid the costly divide in the normal
 938		 * non-TSO case.
 939		 */
 940		skb_shinfo(skb)->gso_segs = 1;
 941		skb_shinfo(skb)->gso_size = 0;
 942		skb_shinfo(skb)->gso_type = 0;
 943	} else {
 944		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
 945		skb_shinfo(skb)->gso_size = mss_now;
 946		skb_shinfo(skb)->gso_type = sk->sk_gso_type;
 947	}
 948}
 949
 950/* When a modification to fackets out becomes necessary, we need to check
 951 * skb is counted to fackets_out or not.
 952 */
 953static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
 954				   int decr)
 955{
 956	struct tcp_sock *tp = tcp_sk(sk);
 957
 958	if (!tp->sacked_out || tcp_is_reno(tp))
 959		return;
 960
 961	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
 962		tp->fackets_out -= decr;
 963}
 964
 965/* Pcount in the middle of the write queue got changed, we need to do various
 966 * tweaks to fix counters
 967 */
 968static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
 969{
 970	struct tcp_sock *tp = tcp_sk(sk);
 971
 972	tp->packets_out -= decr;
 973
 974	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
 975		tp->sacked_out -= decr;
 976	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
 977		tp->retrans_out -= decr;
 978	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
 979		tp->lost_out -= decr;
 980
 981	/* Reno case is special. Sigh... */
 982	if (tcp_is_reno(tp) && decr > 0)
 983		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
 984
 985	tcp_adjust_fackets_out(sk, skb, decr);
 986
 987	if (tp->lost_skb_hint &&
 988	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
 989	    (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
 990		tp->lost_cnt_hint -= decr;
 991
 992	tcp_verify_left_out(tp);
 993}
 994
 995/* Function to create two new TCP segments.  Shrinks the given segment
 996 * to the specified size and appends a new segment with the rest of the
 997 * packet to the list.  This won't be called frequently, I hope.
 998 * Remember, these are still headerless SKBs at this point.
 999 */
1000int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1001		 unsigned int mss_now)
1002{
1003	struct tcp_sock *tp = tcp_sk(sk);
1004	struct sk_buff *buff;
1005	int nsize, old_factor;
1006	int nlen;
1007	u8 flags;
1008
1009	if (WARN_ON(len > skb->len))
1010		return -EINVAL;
1011
1012	nsize = skb_headlen(skb) - len;
1013	if (nsize < 0)
1014		nsize = 0;
1015
1016	if (skb_cloned(skb) &&
1017	    skb_is_nonlinear(skb) &&
1018	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1019		return -ENOMEM;
1020
1021	/* Get a new skb... force flag on. */
1022	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
1023	if (buff == NULL)
1024		return -ENOMEM; /* We'll just try again later. */
1025
1026	sk->sk_wmem_queued += buff->truesize;
1027	sk_mem_charge(sk, buff->truesize);
1028	nlen = skb->len - len - nsize;
1029	buff->truesize += nlen;
1030	skb->truesize -= nlen;
1031
1032	/* Correct the sequence numbers. */
1033	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1034	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1035	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1036
1037	/* PSH and FIN should only be set in the second packet. */
1038	flags = TCP_SKB_CB(skb)->tcp_flags;
1039	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1040	TCP_SKB_CB(buff)->tcp_flags = flags;
1041	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1042
1043	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
1044		/* Copy and checksum data tail into the new buffer. */
1045		buff->csum = csum_partial_copy_nocheck(skb->data + len,
1046						       skb_put(buff, nsize),
1047						       nsize, 0);
1048
1049		skb_trim(skb, len);
1050
1051		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
1052	} else {
1053		skb->ip_summed = CHECKSUM_PARTIAL;
1054		skb_split(skb, buff, len);
1055	}
1056
1057	buff->ip_summed = skb->ip_summed;
1058
1059	/* Looks stupid, but our code really uses when of
1060	 * skbs, which it never sent before. --ANK
1061	 */
1062	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
1063	buff->tstamp = skb->tstamp;
1064
1065	old_factor = tcp_skb_pcount(skb);
1066
1067	/* Fix up tso_factor for both original and new SKB.  */
1068	tcp_set_skb_tso_segs(sk, skb, mss_now);
1069	tcp_set_skb_tso_segs(sk, buff, mss_now);
1070
1071	/* If this packet has been sent out already, we must
1072	 * adjust the various packet counters.
1073	 */
1074	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1075		int diff = old_factor - tcp_skb_pcount(skb) -
1076			tcp_skb_pcount(buff);
1077
1078		if (diff)
1079			tcp_adjust_pcount(sk, skb, diff);
1080	}
1081
1082	/* Link BUFF into the send queue. */
1083	skb_header_release(buff);
1084	tcp_insert_write_queue_after(skb, buff, sk);
1085
1086	return 0;
1087}
1088
1089/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
1090 * eventually). The difference is that pulled data not copied, but
1091 * immediately discarded.
1092 */
1093static void __pskb_trim_head(struct sk_buff *skb, int len)
1094{
 
1095	int i, k, eat;
1096
1097	eat = min_t(int, len, skb_headlen(skb));
1098	if (eat) {
1099		__skb_pull(skb, eat);
1100		skb->avail_size -= eat;
1101		len -= eat;
1102		if (!len)
1103			return;
1104	}
1105	eat = len;
1106	k = 0;
1107	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1108		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
 
1109
1110		if (size <= eat) {
1111			skb_frag_unref(skb, i);
1112			eat -= size;
1113		} else {
1114			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1115			if (eat) {
1116				skb_shinfo(skb)->frags[k].page_offset += eat;
1117				skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1118				eat = 0;
1119			}
1120			k++;
1121		}
1122	}
1123	skb_shinfo(skb)->nr_frags = k;
1124
1125	skb_reset_tail_pointer(skb);
1126	skb->data_len -= len;
1127	skb->len = skb->data_len;
1128}
1129
1130/* Remove acked data from a packet in the transmit queue. */
1131int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1132{
1133	if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1134		return -ENOMEM;
1135
1136	__pskb_trim_head(skb, len);
1137
1138	TCP_SKB_CB(skb)->seq += len;
1139	skb->ip_summed = CHECKSUM_PARTIAL;
1140
1141	skb->truesize	     -= len;
1142	sk->sk_wmem_queued   -= len;
1143	sk_mem_uncharge(sk, len);
1144	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1145
1146	/* Any change of skb->len requires recalculation of tso factor. */
1147	if (tcp_skb_pcount(skb) > 1)
1148		tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
1149
1150	return 0;
1151}
1152
1153/* Calculate MSS. Not accounting for SACKs here.  */
1154int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1155{
1156	const struct tcp_sock *tp = tcp_sk(sk);
1157	const struct inet_connection_sock *icsk = inet_csk(sk);
1158	int mss_now;
1159
1160	/* Calculate base mss without TCP options:
1161	   It is MMS_S - sizeof(tcphdr) of rfc1122
1162	 */
1163	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1164
1165	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1166	if (icsk->icsk_af_ops->net_frag_header_len) {
1167		const struct dst_entry *dst = __sk_dst_get(sk);
1168
1169		if (dst && dst_allfrag(dst))
1170			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1171	}
1172
1173	/* Clamp it (mss_clamp does not include tcp options) */
1174	if (mss_now > tp->rx_opt.mss_clamp)
1175		mss_now = tp->rx_opt.mss_clamp;
1176
1177	/* Now subtract optional transport overhead */
1178	mss_now -= icsk->icsk_ext_hdr_len;
1179
1180	/* Then reserve room for full set of TCP options and 8 bytes of data */
1181	if (mss_now < 48)
1182		mss_now = 48;
1183
1184	/* Now subtract TCP options size, not including SACKs */
1185	mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
1186
1187	return mss_now;
1188}
1189
 
 
 
 
 
 
 
 
1190/* Inverse of above */
1191int tcp_mss_to_mtu(struct sock *sk, int mss)
1192{
1193	const struct tcp_sock *tp = tcp_sk(sk);
1194	const struct inet_connection_sock *icsk = inet_csk(sk);
1195	int mtu;
1196
1197	mtu = mss +
1198	      tp->tcp_header_len +
1199	      icsk->icsk_ext_hdr_len +
1200	      icsk->icsk_af_ops->net_header_len;
1201
1202	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1203	if (icsk->icsk_af_ops->net_frag_header_len) {
1204		const struct dst_entry *dst = __sk_dst_get(sk);
1205
1206		if (dst && dst_allfrag(dst))
1207			mtu += icsk->icsk_af_ops->net_frag_header_len;
1208	}
1209	return mtu;
1210}
1211
1212/* MTU probing init per socket */
1213void tcp_mtup_init(struct sock *sk)
1214{
1215	struct tcp_sock *tp = tcp_sk(sk);
1216	struct inet_connection_sock *icsk = inet_csk(sk);
1217
1218	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
1219	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
1220			       icsk->icsk_af_ops->net_header_len;
1221	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
1222	icsk->icsk_mtup.probe_size = 0;
1223}
1224EXPORT_SYMBOL(tcp_mtup_init);
1225
1226/* This function synchronize snd mss to current pmtu/exthdr set.
1227
1228   tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1229   for TCP options, but includes only bare TCP header.
1230
1231   tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1232   It is minimum of user_mss and mss received with SYN.
1233   It also does not include TCP options.
1234
1235   inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1236
1237   tp->mss_cache is current effective sending mss, including
1238   all tcp options except for SACKs. It is evaluated,
1239   taking into account current pmtu, but never exceeds
1240   tp->rx_opt.mss_clamp.
1241
1242   NOTE1. rfc1122 clearly states that advertised MSS
1243   DOES NOT include either tcp or ip options.
1244
1245   NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1246   are READ ONLY outside this function.		--ANK (980731)
1247 */
1248unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1249{
1250	struct tcp_sock *tp = tcp_sk(sk);
1251	struct inet_connection_sock *icsk = inet_csk(sk);
1252	int mss_now;
1253
1254	if (icsk->icsk_mtup.search_high > pmtu)
1255		icsk->icsk_mtup.search_high = pmtu;
1256
1257	mss_now = tcp_mtu_to_mss(sk, pmtu);
1258	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1259
1260	/* And store cached results */
1261	icsk->icsk_pmtu_cookie = pmtu;
1262	if (icsk->icsk_mtup.enabled)
1263		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1264	tp->mss_cache = mss_now;
1265
1266	return mss_now;
1267}
1268EXPORT_SYMBOL(tcp_sync_mss);
1269
1270/* Compute the current effective MSS, taking SACKs and IP options,
1271 * and even PMTU discovery events into account.
1272 */
1273unsigned int tcp_current_mss(struct sock *sk)
1274{
1275	const struct tcp_sock *tp = tcp_sk(sk);
1276	const struct dst_entry *dst = __sk_dst_get(sk);
1277	u32 mss_now;
1278	unsigned int header_len;
1279	struct tcp_out_options opts;
1280	struct tcp_md5sig_key *md5;
1281
1282	mss_now = tp->mss_cache;
1283
1284	if (dst) {
1285		u32 mtu = dst_mtu(dst);
1286		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1287			mss_now = tcp_sync_mss(sk, mtu);
1288	}
1289
1290	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1291		     sizeof(struct tcphdr);
1292	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
1293	 * some common options. If this is an odd packet (because we have SACK
1294	 * blocks etc) then our calculated header_len will be different, and
1295	 * we have to adjust mss_now correspondingly */
1296	if (header_len != tp->tcp_header_len) {
1297		int delta = (int) header_len - tp->tcp_header_len;
1298		mss_now -= delta;
1299	}
1300
1301	return mss_now;
1302}
1303
1304/* Congestion window validation. (RFC2861) */
1305static void tcp_cwnd_validate(struct sock *sk)
1306{
1307	struct tcp_sock *tp = tcp_sk(sk);
1308
1309	if (tp->packets_out >= tp->snd_cwnd) {
1310		/* Network is feed fully. */
1311		tp->snd_cwnd_used = 0;
1312		tp->snd_cwnd_stamp = tcp_time_stamp;
1313	} else {
1314		/* Network starves. */
1315		if (tp->packets_out > tp->snd_cwnd_used)
1316			tp->snd_cwnd_used = tp->packets_out;
1317
1318		if (sysctl_tcp_slow_start_after_idle &&
1319		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1320			tcp_cwnd_application_limited(sk);
1321	}
1322}
1323
1324/* Returns the portion of skb which can be sent right away without
1325 * introducing MSS oddities to segment boundaries. In rare cases where
1326 * mss_now != mss_cache, we will request caller to create a small skb
1327 * per input skb which could be mostly avoided here (if desired).
1328 *
1329 * We explicitly want to create a request for splitting write queue tail
1330 * to a small skb for Nagle purposes while avoiding unnecessary modulos,
1331 * thus all the complexity (cwnd_len is always MSS multiple which we
1332 * return whenever allowed by the other factors). Basically we need the
1333 * modulo only when the receiver window alone is the limiting factor or
1334 * when we would be allowed to send the split-due-to-Nagle skb fully.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1335 */
1336static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
1337					unsigned int mss_now, unsigned int max_segs)
 
 
 
 
 
 
 
 
 
 
 
1338{
1339	const struct tcp_sock *tp = tcp_sk(sk);
1340	u32 needed, window, max_len;
1341
1342	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1343	max_len = mss_now * max_segs;
1344
1345	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
1346		return max_len;
1347
1348	needed = min(skb->len, window);
1349
1350	if (max_len <= needed)
1351		return max_len;
1352
1353	return needed - needed % mss_now;
 
 
 
 
 
 
 
 
1354}
1355
1356/* Can at least one segment of SKB be sent right now, according to the
1357 * congestion window rules?  If so, return how many segments are allowed.
1358 */
1359static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1360					 const struct sk_buff *skb)
1361{
1362	u32 in_flight, cwnd;
1363
1364	/* Don't be strict about the congestion window for the final FIN.  */
1365	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
1366	    tcp_skb_pcount(skb) == 1)
1367		return 1;
1368
1369	in_flight = tcp_packets_in_flight(tp);
1370	cwnd = tp->snd_cwnd;
1371	if (in_flight < cwnd)
1372		return (cwnd - in_flight);
1373
1374	return 0;
1375}
1376
1377/* Initialize TSO state of a skb.
1378 * This must be invoked the first time we consider transmitting
1379 * SKB onto the wire.
1380 */
1381static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1382			     unsigned int mss_now)
1383{
1384	int tso_segs = tcp_skb_pcount(skb);
1385
1386	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1387		tcp_set_skb_tso_segs(sk, skb, mss_now);
1388		tso_segs = tcp_skb_pcount(skb);
1389	}
1390	return tso_segs;
1391}
1392
1393/* Minshall's variant of the Nagle send check. */
1394static inline bool tcp_minshall_check(const struct tcp_sock *tp)
1395{
1396	return after(tp->snd_sml, tp->snd_una) &&
1397		!after(tp->snd_sml, tp->snd_nxt);
1398}
1399
1400/* Return false, if packet can be sent now without violation Nagle's rules:
1401 * 1. It is full sized.
1402 * 2. Or it contains FIN. (already checked by caller)
1403 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1404 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1405 *    With Minshall's modification: all sent small packets are ACKed.
1406 */
1407static inline bool tcp_nagle_check(const struct tcp_sock *tp,
1408				  const struct sk_buff *skb,
1409				  unsigned int mss_now, int nonagle)
1410{
1411	return skb->len < mss_now &&
1412		((nonagle & TCP_NAGLE_CORK) ||
1413		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1414}
1415
1416/* Return true if the Nagle test allows this packet to be
1417 * sent now.
1418 */
1419static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1420				  unsigned int cur_mss, int nonagle)
1421{
1422	/* Nagle rule does not apply to frames, which sit in the middle of the
1423	 * write_queue (they have no chances to get new data).
1424	 *
1425	 * This is implemented in the callers, where they modify the 'nonagle'
1426	 * argument based upon the location of SKB in the send queue.
1427	 */
1428	if (nonagle & TCP_NAGLE_PUSH)
1429		return true;
1430
1431	/* Don't use the nagle rule for urgent data (or for the final FIN).
1432	 * Nagle can be ignored during F-RTO too (see RFC4138).
1433	 */
1434	if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
1435	    (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1436		return true;
1437
1438	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1439		return true;
1440
1441	return false;
1442}
1443
1444/* Does at least the first segment of SKB fit into the send window? */
1445static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1446			     const struct sk_buff *skb,
1447			     unsigned int cur_mss)
1448{
1449	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1450
1451	if (skb->len > cur_mss)
1452		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1453
1454	return !after(end_seq, tcp_wnd_end(tp));
1455}
1456
1457/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1458 * should be put on the wire right now.  If so, it returns the number of
1459 * packets allowed by the congestion window.
1460 */
1461static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1462				 unsigned int cur_mss, int nonagle)
1463{
1464	const struct tcp_sock *tp = tcp_sk(sk);
1465	unsigned int cwnd_quota;
1466
1467	tcp_init_tso_segs(sk, skb, cur_mss);
1468
1469	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1470		return 0;
1471
1472	cwnd_quota = tcp_cwnd_test(tp, skb);
1473	if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1474		cwnd_quota = 0;
1475
1476	return cwnd_quota;
1477}
1478
1479/* Test if sending is allowed right now. */
1480bool tcp_may_send_now(struct sock *sk)
1481{
1482	const struct tcp_sock *tp = tcp_sk(sk);
1483	struct sk_buff *skb = tcp_send_head(sk);
1484
1485	return skb &&
1486		tcp_snd_test(sk, skb, tcp_current_mss(sk),
1487			     (tcp_skb_is_last(sk, skb) ?
1488			      tp->nonagle : TCP_NAGLE_PUSH));
1489}
1490
1491/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1492 * which is put after SKB on the list.  It is very much like
1493 * tcp_fragment() except that it may make several kinds of assumptions
1494 * in order to speed up the splitting operation.  In particular, we
1495 * know that all the data is in scatter-gather pages, and that the
1496 * packet has never been sent out before (and thus is not cloned).
1497 */
1498static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1499			unsigned int mss_now, gfp_t gfp)
1500{
1501	struct sk_buff *buff;
1502	int nlen = skb->len - len;
1503	u8 flags;
1504
1505	/* All of a TSO frame must be composed of paged data.  */
1506	if (skb->len != skb->data_len)
1507		return tcp_fragment(sk, skb, len, mss_now);
1508
1509	buff = sk_stream_alloc_skb(sk, 0, gfp);
1510	if (unlikely(buff == NULL))
1511		return -ENOMEM;
1512
1513	sk->sk_wmem_queued += buff->truesize;
1514	sk_mem_charge(sk, buff->truesize);
1515	buff->truesize += nlen;
1516	skb->truesize -= nlen;
1517
1518	/* Correct the sequence numbers. */
1519	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1520	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1521	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1522
1523	/* PSH and FIN should only be set in the second packet. */
1524	flags = TCP_SKB_CB(skb)->tcp_flags;
1525	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1526	TCP_SKB_CB(buff)->tcp_flags = flags;
1527
1528	/* This packet was never sent out yet, so no SACK bits. */
1529	TCP_SKB_CB(buff)->sacked = 0;
1530
1531	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1532	skb_split(skb, buff, len);
1533
1534	/* Fix up tso_factor for both original and new SKB.  */
1535	tcp_set_skb_tso_segs(sk, skb, mss_now);
1536	tcp_set_skb_tso_segs(sk, buff, mss_now);
1537
1538	/* Link BUFF into the send queue. */
1539	skb_header_release(buff);
1540	tcp_insert_write_queue_after(skb, buff, sk);
1541
1542	return 0;
1543}
1544
1545/* Try to defer sending, if possible, in order to minimize the amount
1546 * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1547 *
1548 * This algorithm is from John Heffner.
1549 */
1550static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1551{
1552	struct tcp_sock *tp = tcp_sk(sk);
1553	const struct inet_connection_sock *icsk = inet_csk(sk);
1554	u32 send_win, cong_win, limit, in_flight;
1555	int win_divisor;
1556
1557	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1558		goto send_now;
1559
1560	if (icsk->icsk_ca_state != TCP_CA_Open)
1561		goto send_now;
1562
1563	/* Defer for less than two clock ticks. */
1564	if (tp->tso_deferred &&
1565	    (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
1566		goto send_now;
1567
1568	in_flight = tcp_packets_in_flight(tp);
1569
1570	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
1571
1572	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1573
1574	/* From in_flight test above, we know that cwnd > in_flight.  */
1575	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1576
1577	limit = min(send_win, cong_win);
1578
1579	/* If a full-sized TSO skb can be sent, do it. */
1580	if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
1581			   sk->sk_gso_max_segs * tp->mss_cache))
1582		goto send_now;
1583
1584	/* Middle in queue won't get any more data, full sendable already? */
1585	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
1586		goto send_now;
1587
1588	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1589	if (win_divisor) {
1590		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1591
1592		/* If at least some fraction of a window is available,
1593		 * just use it.
1594		 */
1595		chunk /= win_divisor;
1596		if (limit >= chunk)
1597			goto send_now;
1598	} else {
1599		/* Different approach, try not to defer past a single
1600		 * ACK.  Receiver should ACK every other full sized
1601		 * frame, so if we have space for more than 3 frames
1602		 * then send now.
1603		 */
1604		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1605			goto send_now;
1606	}
1607
1608	/* Ok, it looks like it is advisable to defer.  */
1609	tp->tso_deferred = 1 | (jiffies << 1);
 
 
 
1610
1611	return true;
1612
1613send_now:
1614	tp->tso_deferred = 0;
1615	return false;
1616}
1617
1618/* Create a new MTU probe if we are ready.
1619 * MTU probe is regularly attempting to increase the path MTU by
1620 * deliberately sending larger packets.  This discovers routing
1621 * changes resulting in larger path MTUs.
1622 *
1623 * Returns 0 if we should wait to probe (no cwnd available),
1624 *         1 if a probe was sent,
1625 *         -1 otherwise
1626 */
1627static int tcp_mtu_probe(struct sock *sk)
1628{
1629	struct tcp_sock *tp = tcp_sk(sk);
1630	struct inet_connection_sock *icsk = inet_csk(sk);
1631	struct sk_buff *skb, *nskb, *next;
1632	int len;
1633	int probe_size;
1634	int size_needed;
1635	int copy;
1636	int mss_now;
1637
1638	/* Not currently probing/verifying,
1639	 * not in recovery,
1640	 * have enough cwnd, and
1641	 * not SACKing (the variable headers throw things off) */
1642	if (!icsk->icsk_mtup.enabled ||
1643	    icsk->icsk_mtup.probe_size ||
1644	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1645	    tp->snd_cwnd < 11 ||
1646	    tp->rx_opt.num_sacks || tp->rx_opt.dsack)
1647		return -1;
1648
1649	/* Very simple search strategy: just double the MSS. */
1650	mss_now = tcp_current_mss(sk);
1651	probe_size = 2 * tp->mss_cache;
1652	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
1653	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1654		/* TODO: set timer for probe_converge_event */
1655		return -1;
1656	}
1657
1658	/* Have enough data in the send queue to probe? */
1659	if (tp->write_seq - tp->snd_nxt < size_needed)
1660		return -1;
1661
1662	if (tp->snd_wnd < size_needed)
1663		return -1;
1664	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
1665		return 0;
1666
1667	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
1668	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1669		if (!tcp_packets_in_flight(tp))
1670			return -1;
1671		else
1672			return 0;
1673	}
1674
1675	/* We're allowed to probe.  Build it now. */
1676	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1677		return -1;
1678	sk->sk_wmem_queued += nskb->truesize;
1679	sk_mem_charge(sk, nskb->truesize);
1680
1681	skb = tcp_send_head(sk);
1682
1683	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1684	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1685	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
1686	TCP_SKB_CB(nskb)->sacked = 0;
1687	nskb->csum = 0;
1688	nskb->ip_summed = skb->ip_summed;
1689
1690	tcp_insert_write_queue_before(nskb, skb, sk);
1691
1692	len = 0;
1693	tcp_for_write_queue_from_safe(skb, next, sk) {
1694		copy = min_t(int, skb->len, probe_size - len);
1695		if (nskb->ip_summed)
1696			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1697		else
1698			nskb->csum = skb_copy_and_csum_bits(skb, 0,
1699							    skb_put(nskb, copy),
1700							    copy, nskb->csum);
1701
1702		if (skb->len <= copy) {
1703			/* We've eaten all the data from this skb.
1704			 * Throw it away. */
1705			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1706			tcp_unlink_write_queue(skb, sk);
1707			sk_wmem_free_skb(sk, skb);
1708		} else {
1709			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
1710						   ~(TCPHDR_FIN|TCPHDR_PSH);
1711			if (!skb_shinfo(skb)->nr_frags) {
1712				skb_pull(skb, copy);
1713				if (skb->ip_summed != CHECKSUM_PARTIAL)
1714					skb->csum = csum_partial(skb->data,
1715								 skb->len, 0);
1716			} else {
1717				__pskb_trim_head(skb, copy);
1718				tcp_set_skb_tso_segs(sk, skb, mss_now);
1719			}
1720			TCP_SKB_CB(skb)->seq += copy;
1721		}
1722
1723		len += copy;
1724
1725		if (len >= probe_size)
1726			break;
1727	}
1728	tcp_init_tso_segs(sk, nskb, nskb->len);
1729
1730	/* We're ready to send.  If this fails, the probe will
1731	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
1732	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
1733	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1734		/* Decrement cwnd here because we are sending
1735		 * effectively two packets. */
1736		tp->snd_cwnd--;
1737		tcp_event_new_data_sent(sk, nskb);
1738
1739		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
1740		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1741		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
1742
1743		return 1;
1744	}
1745
1746	return -1;
1747}
1748
1749/* This routine writes packets to the network.  It advances the
1750 * send_head.  This happens as incoming acks open up the remote
1751 * window for us.
1752 *
1753 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1754 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1755 * account rare use of URG, this is not a big flaw.
1756 *
 
 
 
1757 * Returns true, if no segments are in flight and we have queued segments,
1758 * but cannot send anything now because of SWS or another problem.
1759 */
1760static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1761			   int push_one, gfp_t gfp)
1762{
1763	struct tcp_sock *tp = tcp_sk(sk);
1764	struct sk_buff *skb;
1765	unsigned int tso_segs, sent_pkts;
1766	int cwnd_quota;
1767	int result;
1768
1769	sent_pkts = 0;
1770
1771	if (!push_one) {
1772		/* Do MTU probing. */
1773		result = tcp_mtu_probe(sk);
1774		if (!result) {
1775			return false;
1776		} else if (result > 0) {
1777			sent_pkts = 1;
1778		}
1779	}
1780
1781	while ((skb = tcp_send_head(sk))) {
1782		unsigned int limit;
1783
1784		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1785		BUG_ON(!tso_segs);
1786
 
 
 
1787		cwnd_quota = tcp_cwnd_test(tp, skb);
1788		if (!cwnd_quota)
1789			break;
 
 
 
 
 
1790
1791		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1792			break;
1793
1794		if (tso_segs == 1) {
1795			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1796						     (tcp_skb_is_last(sk, skb) ?
1797						      nonagle : TCP_NAGLE_PUSH))))
1798				break;
1799		} else {
1800			if (!push_one && tcp_tso_should_defer(sk, skb))
1801				break;
1802		}
1803
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1804		limit = mss_now;
1805		if (tso_segs > 1 && !tcp_urg_mode(tp))
1806			limit = tcp_mss_split_point(sk, skb, mss_now,
1807						    min_t(unsigned int,
1808							  cwnd_quota,
1809							  sk->sk_gso_max_segs));
 
1810
1811		if (skb->len > limit &&
1812		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
1813			break;
1814
1815		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1816
1817		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
1818			break;
1819
 
1820		/* Advance the send_head.  This one is sent out.
1821		 * This call will increment packets_out.
1822		 */
1823		tcp_event_new_data_sent(sk, skb);
1824
1825		tcp_minshall_update(tp, mss_now, skb);
1826		sent_pkts += tcp_skb_pcount(skb);
1827
1828		if (push_one)
1829			break;
1830	}
1831	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
1832		tp->prr_out += sent_pkts;
1833
1834	if (likely(sent_pkts)) {
 
 
 
 
 
 
1835		tcp_cwnd_validate(sk);
1836		return false;
1837	}
1838	return !tp->packets_out && tcp_send_head(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1839}
1840
1841/* Push out any pending frames which were held back due to
1842 * TCP_CORK or attempt at coalescing tiny packets.
1843 * The socket must be locked by the caller.
1844 */
1845void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1846			       int nonagle)
1847{
1848	/* If we are closed, the bytes will have to remain here.
1849	 * In time closedown will finish, we empty the write queue and
1850	 * all will be happy.
1851	 */
1852	if (unlikely(sk->sk_state == TCP_CLOSE))
1853		return;
1854
1855	if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC))
 
1856		tcp_check_probe_timer(sk);
1857}
1858
1859/* Send _single_ skb sitting at the send head. This function requires
1860 * true push pending frames to setup probe timer etc.
1861 */
1862void tcp_push_one(struct sock *sk, unsigned int mss_now)
1863{
1864	struct sk_buff *skb = tcp_send_head(sk);
1865
1866	BUG_ON(!skb || skb->len < mss_now);
1867
1868	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
1869}
1870
1871/* This function returns the amount that we can raise the
1872 * usable window based on the following constraints
1873 *
1874 * 1. The window can never be shrunk once it is offered (RFC 793)
1875 * 2. We limit memory per socket
1876 *
1877 * RFC 1122:
1878 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
1879 *  RECV.NEXT + RCV.WIN fixed until:
1880 *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
1881 *
1882 * i.e. don't raise the right edge of the window until you can raise
1883 * it at least MSS bytes.
1884 *
1885 * Unfortunately, the recommended algorithm breaks header prediction,
1886 * since header prediction assumes th->window stays fixed.
1887 *
1888 * Strictly speaking, keeping th->window fixed violates the receiver
1889 * side SWS prevention criteria. The problem is that under this rule
1890 * a stream of single byte packets will cause the right side of the
1891 * window to always advance by a single byte.
1892 *
1893 * Of course, if the sender implements sender side SWS prevention
1894 * then this will not be a problem.
1895 *
1896 * BSD seems to make the following compromise:
1897 *
1898 *	If the free space is less than the 1/4 of the maximum
1899 *	space available and the free space is less than 1/2 mss,
1900 *	then set the window to 0.
1901 *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
1902 *	Otherwise, just prevent the window from shrinking
1903 *	and from being larger than the largest representable value.
1904 *
1905 * This prevents incremental opening of the window in the regime
1906 * where TCP is limited by the speed of the reader side taking
1907 * data out of the TCP receive queue. It does nothing about
1908 * those cases where the window is constrained on the sender side
1909 * because the pipeline is full.
1910 *
1911 * BSD also seems to "accidentally" limit itself to windows that are a
1912 * multiple of MSS, at least until the free space gets quite small.
1913 * This would appear to be a side effect of the mbuf implementation.
1914 * Combining these two algorithms results in the observed behavior
1915 * of having a fixed window size at almost all times.
1916 *
1917 * Below we obtain similar behavior by forcing the offered window to
1918 * a multiple of the mss when it is feasible to do so.
1919 *
1920 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
1921 * Regular options like TIMESTAMP are taken into account.
1922 */
1923u32 __tcp_select_window(struct sock *sk)
1924{
1925	struct inet_connection_sock *icsk = inet_csk(sk);
1926	struct tcp_sock *tp = tcp_sk(sk);
1927	/* MSS for the peer's data.  Previous versions used mss_clamp
1928	 * here.  I don't know if the value based on our guesses
1929	 * of peer's MSS is better for the performance.  It's more correct
1930	 * but may be worse for the performance because of rcv_mss
1931	 * fluctuations.  --SAW  1998/11/1
1932	 */
1933	int mss = icsk->icsk_ack.rcv_mss;
1934	int free_space = tcp_space(sk);
1935	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
 
1936	int window;
1937
1938	if (mss > full_space)
1939		mss = full_space;
1940
1941	if (free_space < (full_space >> 1)) {
1942		icsk->icsk_ack.quick = 0;
1943
1944		if (sk_under_memory_pressure(sk))
1945			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
1946					       4U * tp->advmss);
1947
1948		if (free_space < mss)
 
 
 
 
 
 
 
 
 
 
 
 
1949			return 0;
1950	}
1951
1952	if (free_space > tp->rcv_ssthresh)
1953		free_space = tp->rcv_ssthresh;
1954
1955	/* Don't do rounding if we are using window scaling, since the
1956	 * scaled window will not line up with the MSS boundary anyway.
1957	 */
1958	window = tp->rcv_wnd;
1959	if (tp->rx_opt.rcv_wscale) {
1960		window = free_space;
1961
1962		/* Advertise enough space so that it won't get scaled away.
1963		 * Import case: prevent zero window announcement if
1964		 * 1<<rcv_wscale > mss.
1965		 */
1966		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
1967			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
1968				  << tp->rx_opt.rcv_wscale);
1969	} else {
1970		/* Get the largest window that is a nice multiple of mss.
1971		 * Window clamp already applied above.
1972		 * If our current window offering is within 1 mss of the
1973		 * free space we just keep it. This prevents the divide
1974		 * and multiply from happening most of the time.
1975		 * We also don't do any window rounding when the free space
1976		 * is too small.
1977		 */
1978		if (window <= free_space - mss || window > free_space)
1979			window = (free_space / mss) * mss;
1980		else if (mss == full_space &&
1981			 free_space > window + (full_space >> 1))
1982			window = free_space;
1983	}
1984
1985	return window;
1986}
1987
1988/* Collapses two adjacent SKB's during retransmission. */
1989static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
1990{
1991	struct tcp_sock *tp = tcp_sk(sk);
1992	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
1993	int skb_size, next_skb_size;
1994
1995	skb_size = skb->len;
1996	next_skb_size = next_skb->len;
1997
1998	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
1999
2000	tcp_highest_sack_combine(sk, next_skb, skb);
2001
2002	tcp_unlink_write_queue(next_skb, sk);
2003
2004	skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
2005				  next_skb_size);
2006
2007	if (next_skb->ip_summed == CHECKSUM_PARTIAL)
2008		skb->ip_summed = CHECKSUM_PARTIAL;
2009
2010	if (skb->ip_summed != CHECKSUM_PARTIAL)
2011		skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
2012
2013	/* Update sequence range on original skb. */
2014	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
2015
2016	/* Merge over control information. This moves PSH/FIN etc. over */
2017	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
2018
2019	/* All done, get rid of second SKB and account for it so
2020	 * packet counting does not break.
2021	 */
2022	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2023
2024	/* changed transmit queue under us so clear hints */
2025	tcp_clear_retrans_hints_partial(tp);
2026	if (next_skb == tp->retransmit_skb_hint)
2027		tp->retransmit_skb_hint = skb;
2028
2029	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2030
2031	sk_wmem_free_skb(sk, next_skb);
2032}
2033
2034/* Check if coalescing SKBs is legal. */
2035static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
2036{
2037	if (tcp_skb_pcount(skb) > 1)
2038		return false;
2039	/* TODO: SACK collapsing could be used to remove this condition */
2040	if (skb_shinfo(skb)->nr_frags != 0)
2041		return false;
2042	if (skb_cloned(skb))
2043		return false;
2044	if (skb == tcp_send_head(sk))
2045		return false;
2046	/* Some heurestics for collapsing over SACK'd could be invented */
2047	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2048		return false;
2049
2050	return true;
2051}
2052
2053/* Collapse packets in the retransmit queue to make to create
2054 * less packets on the wire. This is only done on retransmission.
2055 */
2056static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2057				     int space)
2058{
2059	struct tcp_sock *tp = tcp_sk(sk);
2060	struct sk_buff *skb = to, *tmp;
2061	bool first = true;
2062
2063	if (!sysctl_tcp_retrans_collapse)
2064		return;
2065	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2066		return;
2067
2068	tcp_for_write_queue_from_safe(skb, tmp, sk) {
2069		if (!tcp_can_collapse(sk, skb))
2070			break;
2071
2072		space -= skb->len;
2073
2074		if (first) {
2075			first = false;
2076			continue;
2077		}
2078
2079		if (space < 0)
2080			break;
2081		/* Punt if not enough space exists in the first SKB for
2082		 * the data in the second
2083		 */
2084		if (skb->len > skb_availroom(to))
2085			break;
2086
2087		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
2088			break;
2089
2090		tcp_collapse_retrans(sk, to);
2091	}
2092}
2093
2094/* This retransmits one SKB.  Policy decisions and retransmit queue
2095 * state updates are done by the caller.  Returns non-zero if an
2096 * error occurred which prevented the send.
2097 */
2098int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2099{
2100	struct tcp_sock *tp = tcp_sk(sk);
2101	struct inet_connection_sock *icsk = inet_csk(sk);
2102	unsigned int cur_mss;
2103	int err;
2104
2105	/* Inconslusive MTU probe */
2106	if (icsk->icsk_mtup.probe_size) {
2107		icsk->icsk_mtup.probe_size = 0;
2108	}
2109
2110	/* Do not sent more than we queued. 1/4 is reserved for possible
2111	 * copying overhead: fragmentation, tunneling, mangling etc.
2112	 */
2113	if (atomic_read(&sk->sk_wmem_alloc) >
2114	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
2115		return -EAGAIN;
2116
2117	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
2118		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
2119			BUG();
2120		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
2121			return -ENOMEM;
2122	}
2123
2124	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
2125		return -EHOSTUNREACH; /* Routing failure or similar. */
2126
2127	cur_mss = tcp_current_mss(sk);
2128
2129	/* If receiver has shrunk his window, and skb is out of
2130	 * new window, do not retransmit it. The exception is the
2131	 * case, when window is shrunk to zero. In this case
2132	 * our retransmit serves as a zero window probe.
2133	 */
2134	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
2135	    TCP_SKB_CB(skb)->seq != tp->snd_una)
2136		return -EAGAIN;
2137
2138	if (skb->len > cur_mss) {
2139		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
2140			return -ENOMEM; /* We'll try again later. */
2141	} else {
2142		int oldpcount = tcp_skb_pcount(skb);
2143
2144		if (unlikely(oldpcount > 1)) {
 
 
2145			tcp_init_tso_segs(sk, skb, cur_mss);
2146			tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
2147		}
2148	}
2149
2150	tcp_retrans_try_collapse(sk, skb, cur_mss);
2151
2152	/* Some Solaris stacks overoptimize and ignore the FIN on a
2153	 * retransmit when old data is attached.  So strip it off
2154	 * since it is cheap to do so and saves bytes on the network.
2155	 */
2156	if (skb->len > 0 &&
2157	    (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
2158	    tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
2159		if (!pskb_trim(skb, 0)) {
2160			/* Reuse, even though it does some unnecessary work */
2161			tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
2162					     TCP_SKB_CB(skb)->tcp_flags);
2163			skb->ip_summed = CHECKSUM_NONE;
2164		}
2165	}
2166
2167	/* Make a copy, if the first transmission SKB clone we made
2168	 * is still in somebody's hands, else make a clone.
2169	 */
2170	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2171
2172	/* make sure skb->data is aligned on arches that require it */
2173	if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
 
 
 
 
2174		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2175						   GFP_ATOMIC);
2176		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2177			     -ENOBUFS;
2178	} else {
2179		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2180	}
2181
2182	if (err == 0) {
 
2183		/* Update global TCP statistics. */
2184		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
2185
 
2186		tp->total_retrans++;
 
 
 
2187
 
 
 
 
 
 
2188#if FASTRETRANS_DEBUG > 0
2189		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2190			net_dbg_ratelimited("retrans_out leaked\n");
2191		}
2192#endif
2193		if (!tp->retrans_out)
2194			tp->lost_retrans_low = tp->snd_nxt;
2195		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
2196		tp->retrans_out += tcp_skb_pcount(skb);
2197
2198		/* Save stamp of the first retransmit. */
2199		if (!tp->retrans_stamp)
2200			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2201
2202		tp->undo_retrans += tcp_skb_pcount(skb);
2203
2204		/* snd_nxt is stored to detect loss of retransmitted segment,
2205		 * see tcp_input.c tcp_sacktag_write_queue().
2206		 */
2207		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
 
 
2208	}
2209	return err;
2210}
2211
2212/* Check if we forward retransmits are possible in the current
2213 * window/congestion state.
2214 */
2215static bool tcp_can_forward_retransmit(struct sock *sk)
2216{
2217	const struct inet_connection_sock *icsk = inet_csk(sk);
2218	const struct tcp_sock *tp = tcp_sk(sk);
2219
2220	/* Forward retransmissions are possible only during Recovery. */
2221	if (icsk->icsk_ca_state != TCP_CA_Recovery)
2222		return false;
2223
2224	/* No forward retransmissions in Reno are possible. */
2225	if (tcp_is_reno(tp))
2226		return false;
2227
2228	/* Yeah, we have to make difficult choice between forward transmission
2229	 * and retransmission... Both ways have their merits...
2230	 *
2231	 * For now we do not retransmit anything, while we have some new
2232	 * segments to send. In the other cases, follow rule 3 for
2233	 * NextSeg() specified in RFC3517.
2234	 */
2235
2236	if (tcp_may_send_now(sk))
2237		return false;
2238
2239	return true;
2240}
2241
2242/* This gets called after a retransmit timeout, and the initially
2243 * retransmitted data is acknowledged.  It tries to continue
2244 * resending the rest of the retransmit queue, until either
2245 * we've sent it all or the congestion window limit is reached.
2246 * If doing SACK, the first ACK which comes back for a timeout
2247 * based retransmit packet might feed us FACK information again.
2248 * If so, we use it to avoid unnecessarily retransmissions.
2249 */
2250void tcp_xmit_retransmit_queue(struct sock *sk)
2251{
2252	const struct inet_connection_sock *icsk = inet_csk(sk);
2253	struct tcp_sock *tp = tcp_sk(sk);
2254	struct sk_buff *skb;
2255	struct sk_buff *hole = NULL;
2256	u32 last_lost;
2257	int mib_idx;
2258	int fwd_rexmitting = 0;
2259
2260	if (!tp->packets_out)
2261		return;
2262
2263	if (!tp->lost_out)
2264		tp->retransmit_high = tp->snd_una;
2265
2266	if (tp->retransmit_skb_hint) {
2267		skb = tp->retransmit_skb_hint;
2268		last_lost = TCP_SKB_CB(skb)->end_seq;
2269		if (after(last_lost, tp->retransmit_high))
2270			last_lost = tp->retransmit_high;
2271	} else {
2272		skb = tcp_write_queue_head(sk);
2273		last_lost = tp->snd_una;
2274	}
2275
2276	tcp_for_write_queue_from(skb, sk) {
2277		__u8 sacked = TCP_SKB_CB(skb)->sacked;
2278
2279		if (skb == tcp_send_head(sk))
2280			break;
2281		/* we could do better than to assign each time */
2282		if (hole == NULL)
2283			tp->retransmit_skb_hint = skb;
2284
2285		/* Assume this retransmit will generate
2286		 * only one packet for congestion window
2287		 * calculation purposes.  This works because
2288		 * tcp_retransmit_skb() will chop up the
2289		 * packet to be MSS sized and all the
2290		 * packet counting works out.
2291		 */
2292		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2293			return;
2294
2295		if (fwd_rexmitting) {
2296begin_fwd:
2297			if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2298				break;
2299			mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
2300
2301		} else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2302			tp->retransmit_high = last_lost;
2303			if (!tcp_can_forward_retransmit(sk))
2304				break;
2305			/* Backtrack if necessary to non-L'ed skb */
2306			if (hole != NULL) {
2307				skb = hole;
2308				hole = NULL;
2309			}
2310			fwd_rexmitting = 1;
2311			goto begin_fwd;
2312
2313		} else if (!(sacked & TCPCB_LOST)) {
2314			if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
2315				hole = skb;
2316			continue;
2317
2318		} else {
2319			last_lost = TCP_SKB_CB(skb)->end_seq;
2320			if (icsk->icsk_ca_state != TCP_CA_Loss)
2321				mib_idx = LINUX_MIB_TCPFASTRETRANS;
2322			else
2323				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2324		}
2325
2326		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2327			continue;
2328
2329		if (tcp_retransmit_skb(sk, skb)) {
2330			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2331			return;
2332		}
2333		NET_INC_STATS_BH(sock_net(sk), mib_idx);
2334
2335		if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
2336			tp->prr_out += tcp_skb_pcount(skb);
2337
2338		if (skb == tcp_write_queue_head(sk))
2339			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2340						  inet_csk(sk)->icsk_rto,
2341						  TCP_RTO_MAX);
2342	}
2343}
2344
2345/* Send a fin.  The caller locks the socket for us.  This cannot be
2346 * allowed to fail queueing a FIN frame under any circumstances.
2347 */
2348void tcp_send_fin(struct sock *sk)
2349{
2350	struct tcp_sock *tp = tcp_sk(sk);
2351	struct sk_buff *skb = tcp_write_queue_tail(sk);
2352	int mss_now;
2353
2354	/* Optimization, tack on the FIN if we have a queue of
2355	 * unsent frames.  But be careful about outgoing SACKS
2356	 * and IP options.
2357	 */
2358	mss_now = tcp_current_mss(sk);
2359
2360	if (tcp_send_head(sk) != NULL) {
2361		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
2362		TCP_SKB_CB(skb)->end_seq++;
2363		tp->write_seq++;
2364	} else {
2365		/* Socket is locked, keep trying until memory is available. */
2366		for (;;) {
2367			skb = alloc_skb_fclone(MAX_TCP_HEADER,
2368					       sk->sk_allocation);
2369			if (skb)
2370				break;
2371			yield();
2372		}
2373
2374		/* Reserve space for headers and prepare control bits. */
2375		skb_reserve(skb, MAX_TCP_HEADER);
2376		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2377		tcp_init_nondata_skb(skb, tp->write_seq,
2378				     TCPHDR_ACK | TCPHDR_FIN);
2379		tcp_queue_skb(sk, skb);
2380	}
2381	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
2382}
2383
2384/* We get here when a process closes a file descriptor (either due to
2385 * an explicit close() or as a byproduct of exit()'ing) and there
2386 * was unread data in the receive queue.  This behavior is recommended
2387 * by RFC 2525, section 2.17.  -DaveM
2388 */
2389void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2390{
2391	struct sk_buff *skb;
2392
2393	/* NOTE: No TCP options attached and we never retransmit this. */
2394	skb = alloc_skb(MAX_TCP_HEADER, priority);
2395	if (!skb) {
2396		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2397		return;
2398	}
2399
2400	/* Reserve space for headers and prepare control bits. */
2401	skb_reserve(skb, MAX_TCP_HEADER);
2402	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2403			     TCPHDR_ACK | TCPHDR_RST);
2404	/* Send it off. */
2405	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2406	if (tcp_transmit_skb(sk, skb, 0, priority))
2407		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2408
2409	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
2410}
2411
2412/* Send a crossed SYN-ACK during socket establishment.
2413 * WARNING: This routine must only be called when we have already sent
2414 * a SYN packet that crossed the incoming SYN that caused this routine
2415 * to get called. If this assumption fails then the initial rcv_wnd
2416 * and rcv_wscale values will not be correct.
2417 */
2418int tcp_send_synack(struct sock *sk)
2419{
2420	struct sk_buff *skb;
2421
2422	skb = tcp_write_queue_head(sk);
2423	if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
2424		pr_debug("%s: wrong queue state\n", __func__);
2425		return -EFAULT;
2426	}
2427	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
2428		if (skb_cloned(skb)) {
2429			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2430			if (nskb == NULL)
2431				return -ENOMEM;
2432			tcp_unlink_write_queue(skb, sk);
2433			skb_header_release(nskb);
2434			__tcp_add_write_queue_head(sk, nskb);
2435			sk_wmem_free_skb(sk, skb);
2436			sk->sk_wmem_queued += nskb->truesize;
2437			sk_mem_charge(sk, nskb->truesize);
2438			skb = nskb;
2439		}
2440
2441		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
2442		TCP_ECN_send_synack(tcp_sk(sk), skb);
2443	}
2444	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2445	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2446}
2447
2448/* Prepare a SYN-ACK. */
 
 
 
 
 
 
 
 
2449struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2450				struct request_sock *req,
2451				struct request_values *rvp)
2452{
2453	struct tcp_out_options opts;
2454	struct tcp_extend_values *xvp = tcp_xv(rvp);
2455	struct inet_request_sock *ireq = inet_rsk(req);
2456	struct tcp_sock *tp = tcp_sk(sk);
2457	const struct tcp_cookie_values *cvp = tp->cookie_values;
2458	struct tcphdr *th;
2459	struct sk_buff *skb;
2460	struct tcp_md5sig_key *md5;
2461	int tcp_header_size;
2462	int mss;
2463	int s_data_desired = 0;
2464
2465	if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
2466		s_data_desired = cvp->s_data_desired;
2467	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
2468	if (skb == NULL)
2469		return NULL;
2470
2471	/* Reserve space for headers. */
2472	skb_reserve(skb, MAX_TCP_HEADER);
2473
2474	skb_dst_set(skb, dst_clone(dst));
 
2475
2476	mss = dst_metric_advmss(dst);
2477	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2478		mss = tp->rx_opt.user_mss;
2479
2480	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2481		__u8 rcv_wscale;
2482		/* Set this up on the first call only */
2483		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2484
2485		/* limit the window selection if the user enforce a smaller rx buffer */
2486		if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2487		    (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2488			req->window_clamp = tcp_full_space(sk);
2489
2490		/* tcp_full_space because it is guaranteed to be the first packet */
2491		tcp_select_initial_window(tcp_full_space(sk),
2492			mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2493			&req->rcv_wnd,
2494			&req->window_clamp,
2495			ireq->wscale_ok,
2496			&rcv_wscale,
2497			dst_metric(dst, RTAX_INITRWND));
2498		ireq->rcv_wscale = rcv_wscale;
2499	}
2500
2501	memset(&opts, 0, sizeof(opts));
2502#ifdef CONFIG_SYN_COOKIES
2503	if (unlikely(req->cookie_ts))
2504		TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
2505	else
2506#endif
2507	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2508	tcp_header_size = tcp_synack_options(sk, req, mss,
2509					     skb, &opts, &md5, xvp)
2510			+ sizeof(*th);
2511
2512	skb_push(skb, tcp_header_size);
2513	skb_reset_transport_header(skb);
2514
2515	th = tcp_hdr(skb);
2516	memset(th, 0, sizeof(struct tcphdr));
2517	th->syn = 1;
2518	th->ack = 1;
2519	TCP_ECN_make_synack(req, th);
2520	th->source = ireq->loc_port;
2521	th->dest = ireq->rmt_port;
2522	/* Setting of flags are superfluous here for callers (and ECE is
2523	 * not even correctly set)
2524	 */
2525	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2526			     TCPHDR_SYN | TCPHDR_ACK);
2527
2528	if (OPTION_COOKIE_EXTENSION & opts.options) {
2529		if (s_data_desired) {
2530			u8 *buf = skb_put(skb, s_data_desired);
2531
2532			/* copy data directly from the listening socket. */
2533			memcpy(buf, cvp->s_data_payload, s_data_desired);
2534			TCP_SKB_CB(skb)->end_seq += s_data_desired;
2535		}
2536
2537		if (opts.hash_size > 0) {
2538			__u32 workspace[SHA_WORKSPACE_WORDS];
2539			u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS];
2540			u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1];
2541
2542			/* Secret recipe depends on the Timestamp, (future)
2543			 * Sequence and Acknowledgment Numbers, Initiator
2544			 * Cookie, and others handled by IP variant caller.
2545			 */
2546			*tail-- ^= opts.tsval;
2547			*tail-- ^= tcp_rsk(req)->rcv_isn + 1;
2548			*tail-- ^= TCP_SKB_CB(skb)->seq + 1;
2549
2550			/* recommended */
2551			*tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source);
2552			*tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
2553
2554			sha_transform((__u32 *)&xvp->cookie_bakery[0],
2555				      (char *)mess,
2556				      &workspace[0]);
2557			opts.hash_location =
2558				(__u8 *)&xvp->cookie_bakery[0];
2559		}
2560	}
2561
2562	th->seq = htonl(TCP_SKB_CB(skb)->seq);
2563	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
 
2564
2565	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2566	th->window = htons(min(req->rcv_wnd, 65535U));
2567	tcp_options_write((__be32 *)(th + 1), tp, &opts);
2568	th->doff = (tcp_header_size >> 2);
2569	TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
2570
2571#ifdef CONFIG_TCP_MD5SIG
2572	/* Okay, we have all we need - do the md5 hash if needed */
2573	if (md5) {
2574		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
2575					       md5, NULL, req, skb);
2576	}
2577#endif
2578
2579	return skb;
2580}
2581EXPORT_SYMBOL(tcp_make_synack);
2582
2583/* Do all connect socket setups that can be done AF independent. */
2584void tcp_connect_init(struct sock *sk)
2585{
2586	const struct dst_entry *dst = __sk_dst_get(sk);
2587	struct tcp_sock *tp = tcp_sk(sk);
2588	__u8 rcv_wscale;
2589
2590	/* We'll fix this up when we get a response from the other end.
2591	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2592	 */
2593	tp->tcp_header_len = sizeof(struct tcphdr) +
2594		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2595
2596#ifdef CONFIG_TCP_MD5SIG
2597	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2598		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2599#endif
2600
2601	/* If user gave his TCP_MAXSEG, record it to clamp */
2602	if (tp->rx_opt.user_mss)
2603		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2604	tp->max_window = 0;
2605	tcp_mtup_init(sk);
2606	tcp_sync_mss(sk, dst_mtu(dst));
2607
2608	if (!tp->window_clamp)
2609		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2610	tp->advmss = dst_metric_advmss(dst);
2611	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2612		tp->advmss = tp->rx_opt.user_mss;
2613
2614	tcp_initialize_rcv_mss(sk);
2615
2616	/* limit the window selection if the user enforce a smaller rx buffer */
2617	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2618	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2619		tp->window_clamp = tcp_full_space(sk);
2620
2621	tcp_select_initial_window(tcp_full_space(sk),
2622				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2623				  &tp->rcv_wnd,
2624				  &tp->window_clamp,
2625				  sysctl_tcp_window_scaling,
2626				  &rcv_wscale,
2627				  dst_metric(dst, RTAX_INITRWND));
2628
2629	tp->rx_opt.rcv_wscale = rcv_wscale;
2630	tp->rcv_ssthresh = tp->rcv_wnd;
2631
2632	sk->sk_err = 0;
2633	sock_reset_flag(sk, SOCK_DONE);
2634	tp->snd_wnd = 0;
2635	tcp_init_wl(tp, 0);
2636	tp->snd_una = tp->write_seq;
2637	tp->snd_sml = tp->write_seq;
2638	tp->snd_up = tp->write_seq;
2639	tp->snd_nxt = tp->write_seq;
2640
2641	if (likely(!tp->repair))
2642		tp->rcv_nxt = 0;
 
 
2643	tp->rcv_wup = tp->rcv_nxt;
2644	tp->copied_seq = tp->rcv_nxt;
2645
2646	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2647	inet_csk(sk)->icsk_retransmits = 0;
2648	tcp_clear_retrans(tp);
2649}
2650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2651/* Build a SYN and send it off. */
2652int tcp_connect(struct sock *sk)
2653{
2654	struct tcp_sock *tp = tcp_sk(sk);
2655	struct sk_buff *buff;
2656	int err;
2657
2658	tcp_connect_init(sk);
2659
 
 
 
 
 
2660	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
2661	if (unlikely(buff == NULL))
2662		return -ENOBUFS;
2663
2664	/* Reserve space for headers. */
2665	skb_reserve(buff, MAX_TCP_HEADER);
2666
2667	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
 
 
2668	TCP_ECN_send_syn(sk, buff);
2669
2670	/* Send it off. */
2671	TCP_SKB_CB(buff)->when = tcp_time_stamp;
2672	tp->retrans_stamp = TCP_SKB_CB(buff)->when;
2673	skb_header_release(buff);
2674	__tcp_add_write_queue_tail(sk, buff);
2675	sk->sk_wmem_queued += buff->truesize;
2676	sk_mem_charge(sk, buff->truesize);
2677	tp->packets_out += tcp_skb_pcount(buff);
2678	err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
2679	if (err == -ECONNREFUSED)
2680		return err;
2681
2682	/* We change tp->snd_nxt after the tcp_transmit_skb() call
2683	 * in order to make this packet get counted in tcpOutSegs.
2684	 */
2685	tp->snd_nxt = tp->write_seq;
2686	tp->pushed_seq = tp->write_seq;
2687	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
2688
2689	/* Timer for repeating the SYN until an answer. */
2690	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2691				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
2692	return 0;
2693}
2694EXPORT_SYMBOL(tcp_connect);
2695
2696/* Send out a delayed ack, the caller does the policy checking
2697 * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
2698 * for details.
2699 */
2700void tcp_send_delayed_ack(struct sock *sk)
2701{
2702	struct inet_connection_sock *icsk = inet_csk(sk);
2703	int ato = icsk->icsk_ack.ato;
2704	unsigned long timeout;
2705
2706	if (ato > TCP_DELACK_MIN) {
2707		const struct tcp_sock *tp = tcp_sk(sk);
2708		int max_ato = HZ / 2;
2709
2710		if (icsk->icsk_ack.pingpong ||
2711		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
2712			max_ato = TCP_DELACK_MAX;
2713
2714		/* Slow path, intersegment interval is "high". */
2715
2716		/* If some rtt estimate is known, use it to bound delayed ack.
2717		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
2718		 * directly.
2719		 */
2720		if (tp->srtt) {
2721			int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
 
2722
2723			if (rtt < max_ato)
2724				max_ato = rtt;
2725		}
2726
2727		ato = min(ato, max_ato);
2728	}
2729
2730	/* Stay within the limit we were given */
2731	timeout = jiffies + ato;
2732
2733	/* Use new timeout only if there wasn't a older one earlier. */
2734	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
2735		/* If delack timer was blocked or is about to expire,
2736		 * send ACK now.
2737		 */
2738		if (icsk->icsk_ack.blocked ||
2739		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
2740			tcp_send_ack(sk);
2741			return;
2742		}
2743
2744		if (!time_before(timeout, icsk->icsk_ack.timeout))
2745			timeout = icsk->icsk_ack.timeout;
2746	}
2747	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2748	icsk->icsk_ack.timeout = timeout;
2749	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
2750}
2751
2752/* This routine sends an ack and also updates the window. */
2753void tcp_send_ack(struct sock *sk)
2754{
2755	struct sk_buff *buff;
2756
2757	/* If we have been reset, we may not send again. */
2758	if (sk->sk_state == TCP_CLOSE)
2759		return;
2760
2761	/* We are not putting this on the write queue, so
2762	 * tcp_transmit_skb() will set the ownership to this
2763	 * sock.
2764	 */
2765	buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2766	if (buff == NULL) {
2767		inet_csk_schedule_ack(sk);
2768		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
2769		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
2770					  TCP_DELACK_MAX, TCP_RTO_MAX);
2771		return;
2772	}
2773
2774	/* Reserve space for headers and prepare control bits. */
2775	skb_reserve(buff, MAX_TCP_HEADER);
2776	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
2777
2778	/* Send it off, this clears delayed acks for us. */
2779	TCP_SKB_CB(buff)->when = tcp_time_stamp;
2780	tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
2781}
2782
2783/* This routine sends a packet with an out of date sequence
2784 * number. It assumes the other end will try to ack it.
2785 *
2786 * Question: what should we make while urgent mode?
2787 * 4.4BSD forces sending single byte of data. We cannot send
2788 * out of window data, because we have SND.NXT==SND.MAX...
2789 *
2790 * Current solution: to send TWO zero-length segments in urgent mode:
2791 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
2792 * out-of-date with SND.UNA-1 to probe window.
2793 */
2794static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2795{
2796	struct tcp_sock *tp = tcp_sk(sk);
2797	struct sk_buff *skb;
2798
2799	/* We don't queue it, tcp_transmit_skb() sets ownership. */
2800	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2801	if (skb == NULL)
2802		return -1;
2803
2804	/* Reserve space for headers and set control bits. */
2805	skb_reserve(skb, MAX_TCP_HEADER);
2806	/* Use a previous sequence.  This should cause the other
2807	 * end to send an ack.  Don't queue or clone SKB, just
2808	 * send it.
2809	 */
2810	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
2811	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2812	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
2813}
2814
2815void tcp_send_window_probe(struct sock *sk)
2816{
2817	if (sk->sk_state == TCP_ESTABLISHED) {
2818		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
2819		tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
2820		tcp_xmit_probe_skb(sk, 0);
2821	}
2822}
2823
2824/* Initiate keepalive or window probe from timer. */
2825int tcp_write_wakeup(struct sock *sk)
2826{
2827	struct tcp_sock *tp = tcp_sk(sk);
2828	struct sk_buff *skb;
2829
2830	if (sk->sk_state == TCP_CLOSE)
2831		return -1;
2832
2833	if ((skb = tcp_send_head(sk)) != NULL &&
2834	    before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
2835		int err;
2836		unsigned int mss = tcp_current_mss(sk);
2837		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2838
2839		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
2840			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
2841
2842		/* We are probing the opening of a window
2843		 * but the window size is != 0
2844		 * must have been a result SWS avoidance ( sender )
2845		 */
2846		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2847		    skb->len > mss) {
2848			seg_size = min(seg_size, mss);
2849			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
2850			if (tcp_fragment(sk, skb, seg_size, mss))
2851				return -1;
2852		} else if (!tcp_skb_pcount(skb))
2853			tcp_set_skb_tso_segs(sk, skb, mss);
2854
2855		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
2856		TCP_SKB_CB(skb)->when = tcp_time_stamp;
2857		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2858		if (!err)
2859			tcp_event_new_data_sent(sk, skb);
2860		return err;
2861	} else {
2862		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
2863			tcp_xmit_probe_skb(sk, 1);
2864		return tcp_xmit_probe_skb(sk, 0);
2865	}
2866}
2867
2868/* A window probe timeout has occurred.  If window is not closed send
2869 * a partial packet else a zero probe.
2870 */
2871void tcp_send_probe0(struct sock *sk)
2872{
2873	struct inet_connection_sock *icsk = inet_csk(sk);
2874	struct tcp_sock *tp = tcp_sk(sk);
2875	int err;
2876
2877	err = tcp_write_wakeup(sk);
2878
2879	if (tp->packets_out || !tcp_send_head(sk)) {
2880		/* Cancel probe timer, if it is not required. */
2881		icsk->icsk_probes_out = 0;
2882		icsk->icsk_backoff = 0;
2883		return;
2884	}
2885
2886	if (err <= 0) {
2887		if (icsk->icsk_backoff < sysctl_tcp_retries2)
2888			icsk->icsk_backoff++;
2889		icsk->icsk_probes_out++;
2890		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2891					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2892					  TCP_RTO_MAX);
2893	} else {
2894		/* If packet was not sent due to local congestion,
2895		 * do not backoff and do not remember icsk_probes_out.
2896		 * Let local senders to fight for local resources.
2897		 *
2898		 * Use accumulated backoff yet.
2899		 */
2900		if (!icsk->icsk_probes_out)
2901			icsk->icsk_probes_out = 1;
2902		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2903					  min(icsk->icsk_rto << icsk->icsk_backoff,
2904					      TCP_RESOURCE_PROBE_INTERVAL),
2905					  TCP_RTO_MAX);
2906	}
2907}
v3.15
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  11 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  12 *		Florian La Roche, <flla@stud.uni-sb.de>
  13 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  15 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  16 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  17 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18 *		Jorge Cwik, <jorge@laser.satlink.net>
  19 */
  20
  21/*
  22 * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
  23 *				:	Fragmentation on mtu decrease
  24 *				:	Segment collapse on retransmit
  25 *				:	AF independence
  26 *
  27 *		Linus Torvalds	:	send_delayed_ack
  28 *		David S. Miller	:	Charge memory using the right skb
  29 *					during syn/ack processing.
  30 *		David S. Miller :	Output engine completely rewritten.
  31 *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
  32 *		Cacophonix Gaul :	draft-minshall-nagle-01
  33 *		J Hadi Salim	:	ECN support
  34 *
  35 */
  36
  37#define pr_fmt(fmt) "TCP: " fmt
  38
  39#include <net/tcp.h>
  40
  41#include <linux/compiler.h>
  42#include <linux/gfp.h>
  43#include <linux/module.h>
  44
  45/* People can turn this off for buggy TCP's found in printers etc. */
  46int sysctl_tcp_retrans_collapse __read_mostly = 1;
  47
  48/* People can turn this on to work with those rare, broken TCPs that
  49 * interpret the window field as a signed quantity.
  50 */
  51int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
  52
  53/* Default TSQ limit of two TSO segments */
  54int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
  55
  56/* This limits the percentage of the congestion window which we
  57 * will allow a single TSO frame to consume.  Building TSO frames
  58 * which are too large can cause TCP streams to be bursty.
  59 */
  60int sysctl_tcp_tso_win_divisor __read_mostly = 3;
  61
  62int sysctl_tcp_mtu_probing __read_mostly = 0;
  63int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
  64
  65/* By default, RFC2861 behavior.  */
  66int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
  67
  68unsigned int sysctl_tcp_notsent_lowat __read_mostly = UINT_MAX;
  69EXPORT_SYMBOL(sysctl_tcp_notsent_lowat);
  70
  71static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
  72			   int push_one, gfp_t gfp);
  73
  74/* Account for new data that has been sent to the network. */
  75static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
  76{
  77	struct inet_connection_sock *icsk = inet_csk(sk);
  78	struct tcp_sock *tp = tcp_sk(sk);
  79	unsigned int prior_packets = tp->packets_out;
  80
  81	tcp_advance_send_head(sk, skb);
  82	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
  83
 
 
 
 
  84	tp->packets_out += tcp_skb_pcount(skb);
  85	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
  86	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
  87		tcp_rearm_rto(sk);
  88	}
  89
  90	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
  91		      tcp_skb_pcount(skb));
  92}
  93
  94/* SND.NXT, if window was not shrunk.
  95 * If window has been shrunk, what should we make? It is not clear at all.
  96 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
  97 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
  98 * invalid. OK, let's make this for now:
  99 */
 100static inline __u32 tcp_acceptable_seq(const struct sock *sk)
 101{
 102	const struct tcp_sock *tp = tcp_sk(sk);
 103
 104	if (!before(tcp_wnd_end(tp), tp->snd_nxt))
 105		return tp->snd_nxt;
 106	else
 107		return tcp_wnd_end(tp);
 108}
 109
 110/* Calculate mss to advertise in SYN segment.
 111 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
 112 *
 113 * 1. It is independent of path mtu.
 114 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
 115 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
 116 *    attached devices, because some buggy hosts are confused by
 117 *    large MSS.
 118 * 4. We do not make 3, we advertise MSS, calculated from first
 119 *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
 120 *    This may be overridden via information stored in routing table.
 121 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
 122 *    probably even Jumbo".
 123 */
 124static __u16 tcp_advertise_mss(struct sock *sk)
 125{
 126	struct tcp_sock *tp = tcp_sk(sk);
 127	const struct dst_entry *dst = __sk_dst_get(sk);
 128	int mss = tp->advmss;
 129
 130	if (dst) {
 131		unsigned int metric = dst_metric_advmss(dst);
 132
 133		if (metric < mss) {
 134			mss = metric;
 135			tp->advmss = mss;
 136		}
 137	}
 138
 139	return (__u16)mss;
 140}
 141
 142/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
 143 * This is the first part of cwnd validation mechanism. */
 144static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
 145{
 146	struct tcp_sock *tp = tcp_sk(sk);
 147	s32 delta = tcp_time_stamp - tp->lsndtime;
 148	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
 149	u32 cwnd = tp->snd_cwnd;
 150
 151	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
 152
 153	tp->snd_ssthresh = tcp_current_ssthresh(sk);
 154	restart_cwnd = min(restart_cwnd, cwnd);
 155
 156	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
 157		cwnd >>= 1;
 158	tp->snd_cwnd = max(cwnd, restart_cwnd);
 159	tp->snd_cwnd_stamp = tcp_time_stamp;
 160	tp->snd_cwnd_used = 0;
 161}
 162
 163/* Congestion state accounting after a packet has been sent. */
 164static void tcp_event_data_sent(struct tcp_sock *tp,
 165				struct sock *sk)
 166{
 167	struct inet_connection_sock *icsk = inet_csk(sk);
 168	const u32 now = tcp_time_stamp;
 169	const struct dst_entry *dst = __sk_dst_get(sk);
 170
 171	if (sysctl_tcp_slow_start_after_idle &&
 172	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
 173		tcp_cwnd_restart(sk, __sk_dst_get(sk));
 174
 175	tp->lsndtime = now;
 176
 177	/* If it is a reply for ato after last received
 178	 * packet, enter pingpong mode.
 179	 */
 180	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato &&
 181	    (!dst || !dst_metric(dst, RTAX_QUICKACK)))
 182			icsk->icsk_ack.pingpong = 1;
 183}
 184
 185/* Account for an ACK we sent. */
 186static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
 187{
 188	tcp_dec_quickack_mode(sk, pkts);
 189	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 190}
 191
 192
 193u32 tcp_default_init_rwnd(u32 mss)
 194{
 195	/* Initial receive window should be twice of TCP_INIT_CWND to
 196	 * enable proper sending of new unsent data during fast recovery
 197	 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
 198	 * limit when mss is larger than 1460.
 199	 */
 200	u32 init_rwnd = TCP_INIT_CWND * 2;
 201
 202	if (mss > 1460)
 203		init_rwnd = max((1460 * init_rwnd) / mss, 2U);
 204	return init_rwnd;
 205}
 206
 207/* Determine a window scaling and initial window to offer.
 208 * Based on the assumption that the given amount of space
 209 * will be offered. Store the results in the tp structure.
 210 * NOTE: for smooth operation initial space offering should
 211 * be a multiple of mss if possible. We assume here that mss >= 1.
 212 * This MUST be enforced by all callers.
 213 */
 214void tcp_select_initial_window(int __space, __u32 mss,
 215			       __u32 *rcv_wnd, __u32 *window_clamp,
 216			       int wscale_ok, __u8 *rcv_wscale,
 217			       __u32 init_rcv_wnd)
 218{
 219	unsigned int space = (__space < 0 ? 0 : __space);
 220
 221	/* If no clamp set the clamp to the max possible scaled window */
 222	if (*window_clamp == 0)
 223		(*window_clamp) = (65535 << 14);
 224	space = min(*window_clamp, space);
 225
 226	/* Quantize space offering to a multiple of mss if possible. */
 227	if (space > mss)
 228		space = (space / mss) * mss;
 229
 230	/* NOTE: offering an initial window larger than 32767
 231	 * will break some buggy TCP stacks. If the admin tells us
 232	 * it is likely we could be speaking with such a buggy stack
 233	 * we will truncate our initial window offering to 32K-1
 234	 * unless the remote has sent us a window scaling option,
 235	 * which we interpret as a sign the remote TCP is not
 236	 * misinterpreting the window field as a signed quantity.
 237	 */
 238	if (sysctl_tcp_workaround_signed_windows)
 239		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
 240	else
 241		(*rcv_wnd) = space;
 242
 243	(*rcv_wscale) = 0;
 244	if (wscale_ok) {
 245		/* Set window scaling on max possible window
 246		 * See RFC1323 for an explanation of the limit to 14
 247		 */
 248		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
 249		space = min_t(u32, space, *window_clamp);
 250		while (space > 65535 && (*rcv_wscale) < 14) {
 251			space >>= 1;
 252			(*rcv_wscale)++;
 253		}
 254	}
 255
 
 
 
 
 256	if (mss > (1 << *rcv_wscale)) {
 257		if (!init_rcv_wnd) /* Use default unless specified otherwise */
 258			init_rcv_wnd = tcp_default_init_rwnd(mss);
 259		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
 
 
 
 
 
 
 
 
 260	}
 261
 262	/* Set the clamp no higher than max representable value */
 263	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
 264}
 265EXPORT_SYMBOL(tcp_select_initial_window);
 266
 267/* Chose a new window to advertise, update state in tcp_sock for the
 268 * socket, and return result with RFC1323 scaling applied.  The return
 269 * value can be stuffed directly into th->window for an outgoing
 270 * frame.
 271 */
 272static u16 tcp_select_window(struct sock *sk)
 273{
 274	struct tcp_sock *tp = tcp_sk(sk);
 275	u32 old_win = tp->rcv_wnd;
 276	u32 cur_win = tcp_receive_window(tp);
 277	u32 new_win = __tcp_select_window(sk);
 278
 279	/* Never shrink the offered window */
 280	if (new_win < cur_win) {
 281		/* Danger Will Robinson!
 282		 * Don't update rcv_wup/rcv_wnd here or else
 283		 * we will not be able to advertise a zero
 284		 * window in time.  --DaveM
 285		 *
 286		 * Relax Will Robinson.
 287		 */
 288		if (new_win == 0)
 289			NET_INC_STATS(sock_net(sk),
 290				      LINUX_MIB_TCPWANTZEROWINDOWADV);
 291		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
 292	}
 293	tp->rcv_wnd = new_win;
 294	tp->rcv_wup = tp->rcv_nxt;
 295
 296	/* Make sure we do not exceed the maximum possible
 297	 * scaled window.
 298	 */
 299	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
 300		new_win = min(new_win, MAX_TCP_WINDOW);
 301	else
 302		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
 303
 304	/* RFC1323 scaling applied */
 305	new_win >>= tp->rx_opt.rcv_wscale;
 306
 307	/* If we advertise zero window, disable fast path. */
 308	if (new_win == 0) {
 309		tp->pred_flags = 0;
 310		if (old_win)
 311			NET_INC_STATS(sock_net(sk),
 312				      LINUX_MIB_TCPTOZEROWINDOWADV);
 313	} else if (old_win == 0) {
 314		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
 315	}
 316
 317	return new_win;
 318}
 319
 320/* Packet ECN state for a SYN-ACK */
 321static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
 322{
 323	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
 324	if (!(tp->ecn_flags & TCP_ECN_OK))
 325		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
 326}
 327
 328/* Packet ECN state for a SYN.  */
 329static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
 330{
 331	struct tcp_sock *tp = tcp_sk(sk);
 332
 333	tp->ecn_flags = 0;
 334	if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) {
 335		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
 336		tp->ecn_flags = TCP_ECN_OK;
 337	}
 338}
 339
 340static __inline__ void
 341TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
 342{
 343	if (inet_rsk(req)->ecn_ok)
 344		th->ece = 1;
 345}
 346
 347/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
 348 * be sent.
 349 */
 350static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
 351				int tcp_header_len)
 352{
 353	struct tcp_sock *tp = tcp_sk(sk);
 354
 355	if (tp->ecn_flags & TCP_ECN_OK) {
 356		/* Not-retransmitted data segment: set ECT and inject CWR. */
 357		if (skb->len != tcp_header_len &&
 358		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
 359			INET_ECN_xmit(sk);
 360			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
 361				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
 362				tcp_hdr(skb)->cwr = 1;
 363				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 364			}
 365		} else {
 366			/* ACK or retransmitted segment: clear ECT|CE */
 367			INET_ECN_dontxmit(sk);
 368		}
 369		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
 370			tcp_hdr(skb)->ece = 1;
 371	}
 372}
 373
 374/* Constructs common control bits of non-data skb. If SYN/FIN is present,
 375 * auto increment end seqno.
 376 */
 377static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
 378{
 379	struct skb_shared_info *shinfo = skb_shinfo(skb);
 380
 381	skb->ip_summed = CHECKSUM_PARTIAL;
 382	skb->csum = 0;
 383
 384	TCP_SKB_CB(skb)->tcp_flags = flags;
 385	TCP_SKB_CB(skb)->sacked = 0;
 386
 387	shinfo->gso_segs = 1;
 388	shinfo->gso_size = 0;
 389	shinfo->gso_type = 0;
 390
 391	TCP_SKB_CB(skb)->seq = seq;
 392	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
 393		seq++;
 394	TCP_SKB_CB(skb)->end_seq = seq;
 395}
 396
 397static inline bool tcp_urg_mode(const struct tcp_sock *tp)
 398{
 399	return tp->snd_una != tp->snd_up;
 400}
 401
 402#define OPTION_SACK_ADVERTISE	(1 << 0)
 403#define OPTION_TS		(1 << 1)
 404#define OPTION_MD5		(1 << 2)
 405#define OPTION_WSCALE		(1 << 3)
 406#define OPTION_FAST_OPEN_COOKIE	(1 << 8)
 407
 408struct tcp_out_options {
 409	u16 options;		/* bit field of OPTION_* */
 410	u16 mss;		/* 0 to disable */
 411	u8 ws;			/* window scale, 0 to disable */
 412	u8 num_sack_blocks;	/* number of SACK blocks to include */
 413	u8 hash_size;		/* bytes in hash_location */
 
 
 414	__u8 *hash_location;	/* temporary pointer, overloaded */
 415	__u32 tsval, tsecr;	/* need to include OPTION_TS */
 416	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
 417};
 418
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 419/* Write previously computed TCP options to the packet.
 420 *
 421 * Beware: Something in the Internet is very sensitive to the ordering of
 422 * TCP options, we learned this through the hard way, so be careful here.
 423 * Luckily we can at least blame others for their non-compliance but from
 424 * inter-operability perspective it seems that we're somewhat stuck with
 425 * the ordering which we have been using if we want to keep working with
 426 * those broken things (not that it currently hurts anybody as there isn't
 427 * particular reason why the ordering would need to be changed).
 428 *
 429 * At least SACK_PERM as the first option is known to lead to a disaster
 430 * (but it may well be that other scenarios fail similarly).
 431 */
 432static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
 433			      struct tcp_out_options *opts)
 434{
 435	u16 options = opts->options;	/* mungable copy */
 436
 
 
 
 
 
 
 
 
 437	if (unlikely(OPTION_MD5 & options)) {
 438		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 439			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 
 
 
 
 
 
 
 
 
 
 440		/* overload cookie hash location */
 441		opts->hash_location = (__u8 *)ptr;
 442		ptr += 4;
 443	}
 444
 445	if (unlikely(opts->mss)) {
 446		*ptr++ = htonl((TCPOPT_MSS << 24) |
 447			       (TCPOLEN_MSS << 16) |
 448			       opts->mss);
 449	}
 450
 451	if (likely(OPTION_TS & options)) {
 452		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 453			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
 454				       (TCPOLEN_SACK_PERM << 16) |
 455				       (TCPOPT_TIMESTAMP << 8) |
 456				       TCPOLEN_TIMESTAMP);
 457			options &= ~OPTION_SACK_ADVERTISE;
 458		} else {
 459			*ptr++ = htonl((TCPOPT_NOP << 24) |
 460				       (TCPOPT_NOP << 16) |
 461				       (TCPOPT_TIMESTAMP << 8) |
 462				       TCPOLEN_TIMESTAMP);
 463		}
 464		*ptr++ = htonl(opts->tsval);
 465		*ptr++ = htonl(opts->tsecr);
 466	}
 467
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 468	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 469		*ptr++ = htonl((TCPOPT_NOP << 24) |
 470			       (TCPOPT_NOP << 16) |
 471			       (TCPOPT_SACK_PERM << 8) |
 472			       TCPOLEN_SACK_PERM);
 473	}
 474
 475	if (unlikely(OPTION_WSCALE & options)) {
 476		*ptr++ = htonl((TCPOPT_NOP << 24) |
 477			       (TCPOPT_WINDOW << 16) |
 478			       (TCPOLEN_WINDOW << 8) |
 479			       opts->ws);
 480	}
 481
 482	if (unlikely(opts->num_sack_blocks)) {
 483		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
 484			tp->duplicate_sack : tp->selective_acks;
 485		int this_sack;
 486
 487		*ptr++ = htonl((TCPOPT_NOP  << 24) |
 488			       (TCPOPT_NOP  << 16) |
 489			       (TCPOPT_SACK <<  8) |
 490			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
 491						     TCPOLEN_SACK_PERBLOCK)));
 492
 493		for (this_sack = 0; this_sack < opts->num_sack_blocks;
 494		     ++this_sack) {
 495			*ptr++ = htonl(sp[this_sack].start_seq);
 496			*ptr++ = htonl(sp[this_sack].end_seq);
 497		}
 498
 499		tp->rx_opt.dsack = 0;
 500	}
 501
 502	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
 503		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
 504
 505		*ptr++ = htonl((TCPOPT_EXP << 24) |
 506			       ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
 507			       TCPOPT_FASTOPEN_MAGIC);
 508
 509		memcpy(ptr, foc->val, foc->len);
 510		if ((foc->len & 3) == 2) {
 511			u8 *align = ((u8 *)ptr) + foc->len;
 512			align[0] = align[1] = TCPOPT_NOP;
 513		}
 514		ptr += (foc->len + 3) >> 2;
 515	}
 516}
 517
 518/* Compute TCP options for SYN packets. This is not the final
 519 * network wire format yet.
 520 */
 521static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
 522				struct tcp_out_options *opts,
 523				struct tcp_md5sig_key **md5)
 524{
 525	struct tcp_sock *tp = tcp_sk(sk);
 
 526	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 527	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
 
 
 528
 529#ifdef CONFIG_TCP_MD5SIG
 530	*md5 = tp->af_specific->md5_lookup(sk, sk);
 531	if (*md5) {
 532		opts->options |= OPTION_MD5;
 533		remaining -= TCPOLEN_MD5SIG_ALIGNED;
 534	}
 535#else
 536	*md5 = NULL;
 537#endif
 538
 539	/* We always get an MSS option.  The option bytes which will be seen in
 540	 * normal data packets should timestamps be used, must be in the MSS
 541	 * advertised.  But we subtract them from tp->mss_cache so that
 542	 * calculations in tcp_sendmsg are simpler etc.  So account for this
 543	 * fact here if necessary.  If we don't do this correctly, as a
 544	 * receiver we won't recognize data packets as being full sized when we
 545	 * should, and thus we won't abide by the delayed ACK rules correctly.
 546	 * SACKs don't matter, we never delay an ACK when we have any of those
 547	 * going out.  */
 548	opts->mss = tcp_advertise_mss(sk);
 549	remaining -= TCPOLEN_MSS_ALIGNED;
 550
 551	if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
 552		opts->options |= OPTION_TS;
 553		opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset;
 554		opts->tsecr = tp->rx_opt.ts_recent;
 555		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 556	}
 557	if (likely(sysctl_tcp_window_scaling)) {
 558		opts->ws = tp->rx_opt.rcv_wscale;
 559		opts->options |= OPTION_WSCALE;
 560		remaining -= TCPOLEN_WSCALE_ALIGNED;
 561	}
 562	if (likely(sysctl_tcp_sack)) {
 563		opts->options |= OPTION_SACK_ADVERTISE;
 564		if (unlikely(!(OPTION_TS & opts->options)))
 565			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 566	}
 567
 568	if (fastopen && fastopen->cookie.len >= 0) {
 569		u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
 570		need = (need + 3) & ~3U;  /* Align to 32 bits */
 571		if (remaining >= need) {
 572			opts->options |= OPTION_FAST_OPEN_COOKIE;
 573			opts->fastopen_cookie = &fastopen->cookie;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 574			remaining -= need;
 575			tp->syn_fastopen = 1;
 576		}
 577	}
 578
 579	return MAX_TCP_OPTION_SPACE - remaining;
 580}
 581
 582/* Set up TCP options for SYN-ACKs. */
 583static unsigned int tcp_synack_options(struct sock *sk,
 584				   struct request_sock *req,
 585				   unsigned int mss, struct sk_buff *skb,
 586				   struct tcp_out_options *opts,
 587				   struct tcp_md5sig_key **md5,
 588				   struct tcp_fastopen_cookie *foc)
 589{
 590	struct inet_request_sock *ireq = inet_rsk(req);
 591	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 
 
 
 592
 593#ifdef CONFIG_TCP_MD5SIG
 594	*md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
 595	if (*md5) {
 596		opts->options |= OPTION_MD5;
 597		remaining -= TCPOLEN_MD5SIG_ALIGNED;
 598
 599		/* We can't fit any SACK blocks in a packet with MD5 + TS
 600		 * options. There was discussion about disabling SACK
 601		 * rather than TS in order to fit in better with old,
 602		 * buggy kernels, but that was deemed to be unnecessary.
 603		 */
 604		ireq->tstamp_ok &= !ireq->sack_ok;
 605	}
 606#else
 607	*md5 = NULL;
 608#endif
 609
 610	/* We always send an MSS option. */
 611	opts->mss = mss;
 612	remaining -= TCPOLEN_MSS_ALIGNED;
 613
 614	if (likely(ireq->wscale_ok)) {
 615		opts->ws = ireq->rcv_wscale;
 616		opts->options |= OPTION_WSCALE;
 617		remaining -= TCPOLEN_WSCALE_ALIGNED;
 618	}
 619	if (likely(ireq->tstamp_ok)) {
 620		opts->options |= OPTION_TS;
 621		opts->tsval = TCP_SKB_CB(skb)->when;
 622		opts->tsecr = req->ts_recent;
 623		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 624	}
 625	if (likely(ireq->sack_ok)) {
 626		opts->options |= OPTION_SACK_ADVERTISE;
 627		if (unlikely(!ireq->tstamp_ok))
 628			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 629	}
 630	if (foc != NULL) {
 631		u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
 632		need = (need + 3) & ~3U;  /* Align to 32 bits */
 633		if (remaining >= need) {
 634			opts->options |= OPTION_FAST_OPEN_COOKIE;
 635			opts->fastopen_cookie = foc;
 
 
 
 
 
 
 
 
 
 
 636			remaining -= need;
 
 
 
 
 637		}
 638	}
 639
 640	return MAX_TCP_OPTION_SPACE - remaining;
 641}
 642
 643/* Compute TCP options for ESTABLISHED sockets. This is not the
 644 * final wire format yet.
 645 */
 646static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
 647					struct tcp_out_options *opts,
 648					struct tcp_md5sig_key **md5)
 649{
 650	struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
 651	struct tcp_sock *tp = tcp_sk(sk);
 652	unsigned int size = 0;
 653	unsigned int eff_sacks;
 654
 655	opts->options = 0;
 656
 657#ifdef CONFIG_TCP_MD5SIG
 658	*md5 = tp->af_specific->md5_lookup(sk, sk);
 659	if (unlikely(*md5)) {
 660		opts->options |= OPTION_MD5;
 661		size += TCPOLEN_MD5SIG_ALIGNED;
 662	}
 663#else
 664	*md5 = NULL;
 665#endif
 666
 667	if (likely(tp->rx_opt.tstamp_ok)) {
 668		opts->options |= OPTION_TS;
 669		opts->tsval = tcb ? tcb->when + tp->tsoffset : 0;
 670		opts->tsecr = tp->rx_opt.ts_recent;
 671		size += TCPOLEN_TSTAMP_ALIGNED;
 672	}
 673
 674	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
 675	if (unlikely(eff_sacks)) {
 676		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 677		opts->num_sack_blocks =
 678			min_t(unsigned int, eff_sacks,
 679			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
 680			      TCPOLEN_SACK_PERBLOCK);
 681		size += TCPOLEN_SACK_BASE_ALIGNED +
 682			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
 683	}
 684
 685	return size;
 686}
 687
 688
 689/* TCP SMALL QUEUES (TSQ)
 690 *
 691 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
 692 * to reduce RTT and bufferbloat.
 693 * We do this using a special skb destructor (tcp_wfree).
 694 *
 695 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
 696 * needs to be reallocated in a driver.
 697 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
 698 *
 699 * Since transmit from skb destructor is forbidden, we use a tasklet
 700 * to process all sockets that eventually need to send more skbs.
 701 * We use one tasklet per cpu, with its own queue of sockets.
 702 */
 703struct tsq_tasklet {
 704	struct tasklet_struct	tasklet;
 705	struct list_head	head; /* queue of tcp sockets */
 706};
 707static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
 708
 709static void tcp_tsq_handler(struct sock *sk)
 710{
 711	if ((1 << sk->sk_state) &
 712	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
 713	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK))
 714		tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
 715			       0, GFP_ATOMIC);
 716}
 717/*
 718 * One tasklet per cpu tries to send more skbs.
 719 * We run in tasklet context but need to disable irqs when
 720 * transferring tsq->head because tcp_wfree() might
 721 * interrupt us (non NAPI drivers)
 722 */
 723static void tcp_tasklet_func(unsigned long data)
 724{
 725	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
 726	LIST_HEAD(list);
 727	unsigned long flags;
 728	struct list_head *q, *n;
 729	struct tcp_sock *tp;
 730	struct sock *sk;
 731
 732	local_irq_save(flags);
 733	list_splice_init(&tsq->head, &list);
 734	local_irq_restore(flags);
 735
 736	list_for_each_safe(q, n, &list) {
 737		tp = list_entry(q, struct tcp_sock, tsq_node);
 738		list_del(&tp->tsq_node);
 739
 740		sk = (struct sock *)tp;
 741		bh_lock_sock(sk);
 742
 743		if (!sock_owned_by_user(sk)) {
 744			tcp_tsq_handler(sk);
 745		} else {
 746			/* defer the work to tcp_release_cb() */
 747			set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
 748		}
 749		bh_unlock_sock(sk);
 750
 751		clear_bit(TSQ_QUEUED, &tp->tsq_flags);
 752		sk_free(sk);
 753	}
 754}
 755
 756#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) |		\
 757			  (1UL << TCP_WRITE_TIMER_DEFERRED) |	\
 758			  (1UL << TCP_DELACK_TIMER_DEFERRED) |	\
 759			  (1UL << TCP_MTU_REDUCED_DEFERRED))
 760/**
 761 * tcp_release_cb - tcp release_sock() callback
 762 * @sk: socket
 763 *
 764 * called from release_sock() to perform protocol dependent
 765 * actions before socket release.
 766 */
 767void tcp_release_cb(struct sock *sk)
 768{
 769	struct tcp_sock *tp = tcp_sk(sk);
 770	unsigned long flags, nflags;
 771
 772	/* perform an atomic operation only if at least one flag is set */
 773	do {
 774		flags = tp->tsq_flags;
 775		if (!(flags & TCP_DEFERRED_ALL))
 776			return;
 777		nflags = flags & ~TCP_DEFERRED_ALL;
 778	} while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
 779
 780	if (flags & (1UL << TCP_TSQ_DEFERRED))
 781		tcp_tsq_handler(sk);
 782
 783	/* Here begins the tricky part :
 784	 * We are called from release_sock() with :
 785	 * 1) BH disabled
 786	 * 2) sk_lock.slock spinlock held
 787	 * 3) socket owned by us (sk->sk_lock.owned == 1)
 788	 *
 789	 * But following code is meant to be called from BH handlers,
 790	 * so we should keep BH disabled, but early release socket ownership
 791	 */
 792	sock_release_ownership(sk);
 793
 794	if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
 795		tcp_write_timer_handler(sk);
 796		__sock_put(sk);
 797	}
 798	if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
 799		tcp_delack_timer_handler(sk);
 800		__sock_put(sk);
 801	}
 802	if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
 803		sk->sk_prot->mtu_reduced(sk);
 804		__sock_put(sk);
 805	}
 806}
 807EXPORT_SYMBOL(tcp_release_cb);
 808
 809void __init tcp_tasklet_init(void)
 810{
 811	int i;
 812
 813	for_each_possible_cpu(i) {
 814		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
 815
 816		INIT_LIST_HEAD(&tsq->head);
 817		tasklet_init(&tsq->tasklet,
 818			     tcp_tasklet_func,
 819			     (unsigned long)tsq);
 820	}
 821}
 822
 823/*
 824 * Write buffer destructor automatically called from kfree_skb.
 825 * We can't xmit new skbs from this context, as we might already
 826 * hold qdisc lock.
 827 */
 828void tcp_wfree(struct sk_buff *skb)
 829{
 830	struct sock *sk = skb->sk;
 831	struct tcp_sock *tp = tcp_sk(sk);
 832
 833	if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
 834	    !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
 835		unsigned long flags;
 836		struct tsq_tasklet *tsq;
 837
 838		/* Keep a ref on socket.
 839		 * This last ref will be released in tcp_tasklet_func()
 840		 */
 841		atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc);
 842
 843		/* queue this socket to tasklet queue */
 844		local_irq_save(flags);
 845		tsq = &__get_cpu_var(tsq_tasklet);
 846		list_add(&tp->tsq_node, &tsq->head);
 847		tasklet_schedule(&tsq->tasklet);
 848		local_irq_restore(flags);
 849	} else {
 850		sock_wfree(skb);
 851	}
 852}
 853
 854/* This routine actually transmits TCP packets queued in by
 855 * tcp_do_sendmsg().  This is used by both the initial
 856 * transmission and possible later retransmissions.
 857 * All SKB's seen here are completely headerless.  It is our
 858 * job to build the TCP header, and pass the packet down to
 859 * IP so it can do the same plus pass the packet off to the
 860 * device.
 861 *
 862 * We are working here with either a clone of the original
 863 * SKB, or a fresh unique copy made by the retransmit engine.
 864 */
 865static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 866			    gfp_t gfp_mask)
 867{
 868	const struct inet_connection_sock *icsk = inet_csk(sk);
 869	struct inet_sock *inet;
 870	struct tcp_sock *tp;
 871	struct tcp_skb_cb *tcb;
 872	struct tcp_out_options opts;
 873	unsigned int tcp_options_size, tcp_header_size;
 874	struct tcp_md5sig_key *md5;
 875	struct tcphdr *th;
 876	int err;
 877
 878	BUG_ON(!skb || !tcp_skb_pcount(skb));
 879
 880	if (clone_it) {
 881		const struct sk_buff *fclone = skb + 1;
 882
 883		skb_mstamp_get(&skb->skb_mstamp);
 884
 885		if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
 886			     fclone->fclone == SKB_FCLONE_CLONE))
 887			NET_INC_STATS(sock_net(sk),
 888				      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
 889
 
 890		if (unlikely(skb_cloned(skb)))
 891			skb = pskb_copy(skb, gfp_mask);
 892		else
 893			skb = skb_clone(skb, gfp_mask);
 894		if (unlikely(!skb))
 895			return -ENOBUFS;
 896		/* Our usage of tstamp should remain private */
 897		skb->tstamp.tv64 = 0;
 898	}
 899
 900	inet = inet_sk(sk);
 901	tp = tcp_sk(sk);
 902	tcb = TCP_SKB_CB(skb);
 903	memset(&opts, 0, sizeof(opts));
 904
 905	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
 906		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
 907	else
 908		tcp_options_size = tcp_established_options(sk, skb, &opts,
 909							   &md5);
 910	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
 911
 912	if (tcp_packets_in_flight(tp) == 0)
 913		tcp_ca_event(sk, CA_EVENT_TX_START);
 914
 915	/* if no packet is in qdisc/device queue, then allow XPS to select
 916	 * another queue.
 917	 */
 918	skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
 919
 920	skb_push(skb, tcp_header_size);
 921	skb_reset_transport_header(skb);
 922
 923	skb_orphan(skb);
 924	skb->sk = sk;
 925	skb->destructor = tcp_wfree;
 926	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
 927
 928	/* Build TCP header and checksum it. */
 929	th = tcp_hdr(skb);
 930	th->source		= inet->inet_sport;
 931	th->dest		= inet->inet_dport;
 932	th->seq			= htonl(tcb->seq);
 933	th->ack_seq		= htonl(tp->rcv_nxt);
 934	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
 935					tcb->tcp_flags);
 936
 937	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
 938		/* RFC1323: The window in SYN & SYN/ACK segments
 939		 * is never scaled.
 940		 */
 941		th->window	= htons(min(tp->rcv_wnd, 65535U));
 942	} else {
 943		th->window	= htons(tcp_select_window(sk));
 944	}
 945	th->check		= 0;
 946	th->urg_ptr		= 0;
 947
 948	/* The urg_mode check is necessary during a below snd_una win probe */
 949	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
 950		if (before(tp->snd_up, tcb->seq + 0x10000)) {
 951			th->urg_ptr = htons(tp->snd_up - tcb->seq);
 952			th->urg = 1;
 953		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
 954			th->urg_ptr = htons(0xFFFF);
 955			th->urg = 1;
 956		}
 957	}
 958
 959	tcp_options_write((__be32 *)(th + 1), tp, &opts);
 960	if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
 961		TCP_ECN_send(sk, skb, tcp_header_size);
 962
 963#ifdef CONFIG_TCP_MD5SIG
 964	/* Calculate the MD5 hash, as we have all we need now */
 965	if (md5) {
 966		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
 967		tp->af_specific->calc_md5_hash(opts.hash_location,
 968					       md5, sk, NULL, skb);
 969	}
 970#endif
 971
 972	icsk->icsk_af_ops->send_check(sk, skb);
 973
 974	if (likely(tcb->tcp_flags & TCPHDR_ACK))
 975		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
 976
 977	if (skb->len != tcp_header_size)
 978		tcp_event_data_sent(tp, sk);
 979
 980	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
 981		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
 982			      tcp_skb_pcount(skb));
 983
 984	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
 985	if (likely(err <= 0))
 986		return err;
 987
 988	tcp_enter_cwr(sk, 1);
 989
 990	return net_xmit_eval(err);
 991}
 992
 993/* This routine just queues the buffer for sending.
 994 *
 995 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
 996 * otherwise socket can stall.
 997 */
 998static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
 999{
1000	struct tcp_sock *tp = tcp_sk(sk);
1001
1002	/* Advance write_seq and place onto the write_queue. */
1003	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
1004	skb_header_release(skb);
1005	tcp_add_write_queue_tail(sk, skb);
1006	sk->sk_wmem_queued += skb->truesize;
1007	sk_mem_charge(sk, skb->truesize);
1008}
1009
1010/* Initialize TSO segments for a packet. */
1011static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
1012				 unsigned int mss_now)
1013{
1014	struct skb_shared_info *shinfo = skb_shinfo(skb);
1015
1016	/* Make sure we own this skb before messing gso_size/gso_segs */
1017	WARN_ON_ONCE(skb_cloned(skb));
1018
1019	if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
1020		/* Avoid the costly divide in the normal
1021		 * non-TSO case.
1022		 */
1023		shinfo->gso_segs = 1;
1024		shinfo->gso_size = 0;
1025		shinfo->gso_type = 0;
1026	} else {
1027		shinfo->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
1028		shinfo->gso_size = mss_now;
1029		shinfo->gso_type = sk->sk_gso_type;
1030	}
1031}
1032
1033/* When a modification to fackets out becomes necessary, we need to check
1034 * skb is counted to fackets_out or not.
1035 */
1036static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
1037				   int decr)
1038{
1039	struct tcp_sock *tp = tcp_sk(sk);
1040
1041	if (!tp->sacked_out || tcp_is_reno(tp))
1042		return;
1043
1044	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
1045		tp->fackets_out -= decr;
1046}
1047
1048/* Pcount in the middle of the write queue got changed, we need to do various
1049 * tweaks to fix counters
1050 */
1051static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1052{
1053	struct tcp_sock *tp = tcp_sk(sk);
1054
1055	tp->packets_out -= decr;
1056
1057	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1058		tp->sacked_out -= decr;
1059	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1060		tp->retrans_out -= decr;
1061	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1062		tp->lost_out -= decr;
1063
1064	/* Reno case is special. Sigh... */
1065	if (tcp_is_reno(tp) && decr > 0)
1066		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1067
1068	tcp_adjust_fackets_out(sk, skb, decr);
1069
1070	if (tp->lost_skb_hint &&
1071	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1072	    (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
1073		tp->lost_cnt_hint -= decr;
1074
1075	tcp_verify_left_out(tp);
1076}
1077
1078/* Function to create two new TCP segments.  Shrinks the given segment
1079 * to the specified size and appends a new segment with the rest of the
1080 * packet to the list.  This won't be called frequently, I hope.
1081 * Remember, these are still headerless SKBs at this point.
1082 */
1083int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1084		 unsigned int mss_now)
1085{
1086	struct tcp_sock *tp = tcp_sk(sk);
1087	struct sk_buff *buff;
1088	int nsize, old_factor;
1089	int nlen;
1090	u8 flags;
1091
1092	if (WARN_ON(len > skb->len))
1093		return -EINVAL;
1094
1095	nsize = skb_headlen(skb) - len;
1096	if (nsize < 0)
1097		nsize = 0;
1098
1099	if (skb_unclone(skb, GFP_ATOMIC))
 
 
1100		return -ENOMEM;
1101
1102	/* Get a new skb... force flag on. */
1103	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
1104	if (buff == NULL)
1105		return -ENOMEM; /* We'll just try again later. */
1106
1107	sk->sk_wmem_queued += buff->truesize;
1108	sk_mem_charge(sk, buff->truesize);
1109	nlen = skb->len - len - nsize;
1110	buff->truesize += nlen;
1111	skb->truesize -= nlen;
1112
1113	/* Correct the sequence numbers. */
1114	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1115	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1116	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1117
1118	/* PSH and FIN should only be set in the second packet. */
1119	flags = TCP_SKB_CB(skb)->tcp_flags;
1120	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1121	TCP_SKB_CB(buff)->tcp_flags = flags;
1122	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1123
1124	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
1125		/* Copy and checksum data tail into the new buffer. */
1126		buff->csum = csum_partial_copy_nocheck(skb->data + len,
1127						       skb_put(buff, nsize),
1128						       nsize, 0);
1129
1130		skb_trim(skb, len);
1131
1132		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
1133	} else {
1134		skb->ip_summed = CHECKSUM_PARTIAL;
1135		skb_split(skb, buff, len);
1136	}
1137
1138	buff->ip_summed = skb->ip_summed;
1139
1140	/* Looks stupid, but our code really uses when of
1141	 * skbs, which it never sent before. --ANK
1142	 */
1143	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
1144	buff->tstamp = skb->tstamp;
1145
1146	old_factor = tcp_skb_pcount(skb);
1147
1148	/* Fix up tso_factor for both original and new SKB.  */
1149	tcp_set_skb_tso_segs(sk, skb, mss_now);
1150	tcp_set_skb_tso_segs(sk, buff, mss_now);
1151
1152	/* If this packet has been sent out already, we must
1153	 * adjust the various packet counters.
1154	 */
1155	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1156		int diff = old_factor - tcp_skb_pcount(skb) -
1157			tcp_skb_pcount(buff);
1158
1159		if (diff)
1160			tcp_adjust_pcount(sk, skb, diff);
1161	}
1162
1163	/* Link BUFF into the send queue. */
1164	skb_header_release(buff);
1165	tcp_insert_write_queue_after(skb, buff, sk);
1166
1167	return 0;
1168}
1169
1170/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
1171 * eventually). The difference is that pulled data not copied, but
1172 * immediately discarded.
1173 */
1174static void __pskb_trim_head(struct sk_buff *skb, int len)
1175{
1176	struct skb_shared_info *shinfo;
1177	int i, k, eat;
1178
1179	eat = min_t(int, len, skb_headlen(skb));
1180	if (eat) {
1181		__skb_pull(skb, eat);
 
1182		len -= eat;
1183		if (!len)
1184			return;
1185	}
1186	eat = len;
1187	k = 0;
1188	shinfo = skb_shinfo(skb);
1189	for (i = 0; i < shinfo->nr_frags; i++) {
1190		int size = skb_frag_size(&shinfo->frags[i]);
1191
1192		if (size <= eat) {
1193			skb_frag_unref(skb, i);
1194			eat -= size;
1195		} else {
1196			shinfo->frags[k] = shinfo->frags[i];
1197			if (eat) {
1198				shinfo->frags[k].page_offset += eat;
1199				skb_frag_size_sub(&shinfo->frags[k], eat);
1200				eat = 0;
1201			}
1202			k++;
1203		}
1204	}
1205	shinfo->nr_frags = k;
1206
1207	skb_reset_tail_pointer(skb);
1208	skb->data_len -= len;
1209	skb->len = skb->data_len;
1210}
1211
1212/* Remove acked data from a packet in the transmit queue. */
1213int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1214{
1215	if (skb_unclone(skb, GFP_ATOMIC))
1216		return -ENOMEM;
1217
1218	__pskb_trim_head(skb, len);
1219
1220	TCP_SKB_CB(skb)->seq += len;
1221	skb->ip_summed = CHECKSUM_PARTIAL;
1222
1223	skb->truesize	     -= len;
1224	sk->sk_wmem_queued   -= len;
1225	sk_mem_uncharge(sk, len);
1226	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1227
1228	/* Any change of skb->len requires recalculation of tso factor. */
1229	if (tcp_skb_pcount(skb) > 1)
1230		tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
1231
1232	return 0;
1233}
1234
1235/* Calculate MSS not accounting any TCP options.  */
1236static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
1237{
1238	const struct tcp_sock *tp = tcp_sk(sk);
1239	const struct inet_connection_sock *icsk = inet_csk(sk);
1240	int mss_now;
1241
1242	/* Calculate base mss without TCP options:
1243	   It is MMS_S - sizeof(tcphdr) of rfc1122
1244	 */
1245	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1246
1247	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1248	if (icsk->icsk_af_ops->net_frag_header_len) {
1249		const struct dst_entry *dst = __sk_dst_get(sk);
1250
1251		if (dst && dst_allfrag(dst))
1252			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1253	}
1254
1255	/* Clamp it (mss_clamp does not include tcp options) */
1256	if (mss_now > tp->rx_opt.mss_clamp)
1257		mss_now = tp->rx_opt.mss_clamp;
1258
1259	/* Now subtract optional transport overhead */
1260	mss_now -= icsk->icsk_ext_hdr_len;
1261
1262	/* Then reserve room for full set of TCP options and 8 bytes of data */
1263	if (mss_now < 48)
1264		mss_now = 48;
 
 
 
 
1265	return mss_now;
1266}
1267
1268/* Calculate MSS. Not accounting for SACKs here.  */
1269int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1270{
1271	/* Subtract TCP options size, not including SACKs */
1272	return __tcp_mtu_to_mss(sk, pmtu) -
1273	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
1274}
1275
1276/* Inverse of above */
1277int tcp_mss_to_mtu(struct sock *sk, int mss)
1278{
1279	const struct tcp_sock *tp = tcp_sk(sk);
1280	const struct inet_connection_sock *icsk = inet_csk(sk);
1281	int mtu;
1282
1283	mtu = mss +
1284	      tp->tcp_header_len +
1285	      icsk->icsk_ext_hdr_len +
1286	      icsk->icsk_af_ops->net_header_len;
1287
1288	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1289	if (icsk->icsk_af_ops->net_frag_header_len) {
1290		const struct dst_entry *dst = __sk_dst_get(sk);
1291
1292		if (dst && dst_allfrag(dst))
1293			mtu += icsk->icsk_af_ops->net_frag_header_len;
1294	}
1295	return mtu;
1296}
1297
1298/* MTU probing init per socket */
1299void tcp_mtup_init(struct sock *sk)
1300{
1301	struct tcp_sock *tp = tcp_sk(sk);
1302	struct inet_connection_sock *icsk = inet_csk(sk);
1303
1304	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
1305	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
1306			       icsk->icsk_af_ops->net_header_len;
1307	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
1308	icsk->icsk_mtup.probe_size = 0;
1309}
1310EXPORT_SYMBOL(tcp_mtup_init);
1311
1312/* This function synchronize snd mss to current pmtu/exthdr set.
1313
1314   tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1315   for TCP options, but includes only bare TCP header.
1316
1317   tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1318   It is minimum of user_mss and mss received with SYN.
1319   It also does not include TCP options.
1320
1321   inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1322
1323   tp->mss_cache is current effective sending mss, including
1324   all tcp options except for SACKs. It is evaluated,
1325   taking into account current pmtu, but never exceeds
1326   tp->rx_opt.mss_clamp.
1327
1328   NOTE1. rfc1122 clearly states that advertised MSS
1329   DOES NOT include either tcp or ip options.
1330
1331   NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1332   are READ ONLY outside this function.		--ANK (980731)
1333 */
1334unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1335{
1336	struct tcp_sock *tp = tcp_sk(sk);
1337	struct inet_connection_sock *icsk = inet_csk(sk);
1338	int mss_now;
1339
1340	if (icsk->icsk_mtup.search_high > pmtu)
1341		icsk->icsk_mtup.search_high = pmtu;
1342
1343	mss_now = tcp_mtu_to_mss(sk, pmtu);
1344	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1345
1346	/* And store cached results */
1347	icsk->icsk_pmtu_cookie = pmtu;
1348	if (icsk->icsk_mtup.enabled)
1349		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1350	tp->mss_cache = mss_now;
1351
1352	return mss_now;
1353}
1354EXPORT_SYMBOL(tcp_sync_mss);
1355
1356/* Compute the current effective MSS, taking SACKs and IP options,
1357 * and even PMTU discovery events into account.
1358 */
1359unsigned int tcp_current_mss(struct sock *sk)
1360{
1361	const struct tcp_sock *tp = tcp_sk(sk);
1362	const struct dst_entry *dst = __sk_dst_get(sk);
1363	u32 mss_now;
1364	unsigned int header_len;
1365	struct tcp_out_options opts;
1366	struct tcp_md5sig_key *md5;
1367
1368	mss_now = tp->mss_cache;
1369
1370	if (dst) {
1371		u32 mtu = dst_mtu(dst);
1372		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1373			mss_now = tcp_sync_mss(sk, mtu);
1374	}
1375
1376	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1377		     sizeof(struct tcphdr);
1378	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
1379	 * some common options. If this is an odd packet (because we have SACK
1380	 * blocks etc) then our calculated header_len will be different, and
1381	 * we have to adjust mss_now correspondingly */
1382	if (header_len != tp->tcp_header_len) {
1383		int delta = (int) header_len - tp->tcp_header_len;
1384		mss_now -= delta;
1385	}
1386
1387	return mss_now;
1388}
1389
1390/* Congestion window validation. (RFC2861) */
1391static void tcp_cwnd_validate(struct sock *sk)
1392{
1393	struct tcp_sock *tp = tcp_sk(sk);
1394
1395	if (tp->packets_out >= tp->snd_cwnd) {
1396		/* Network is feed fully. */
1397		tp->snd_cwnd_used = 0;
1398		tp->snd_cwnd_stamp = tcp_time_stamp;
1399	} else {
1400		/* Network starves. */
1401		if (tp->packets_out > tp->snd_cwnd_used)
1402			tp->snd_cwnd_used = tp->packets_out;
1403
1404		if (sysctl_tcp_slow_start_after_idle &&
1405		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1406			tcp_cwnd_application_limited(sk);
1407	}
1408}
1409
1410/* Minshall's variant of the Nagle send check. */
1411static bool tcp_minshall_check(const struct tcp_sock *tp)
1412{
1413	return after(tp->snd_sml, tp->snd_una) &&
1414		!after(tp->snd_sml, tp->snd_nxt);
1415}
1416
1417/* Update snd_sml if this skb is under mss
1418 * Note that a TSO packet might end with a sub-mss segment
1419 * The test is really :
1420 * if ((skb->len % mss) != 0)
1421 *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1422 * But we can avoid doing the divide again given we already have
1423 *  skb_pcount = skb->len / mss_now
1424 */
1425static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1426				const struct sk_buff *skb)
1427{
1428	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1429		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1430}
1431
1432/* Return false, if packet can be sent now without violation Nagle's rules:
1433 * 1. It is full sized. (provided by caller in %partial bool)
1434 * 2. Or it contains FIN. (already checked by caller)
1435 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1436 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1437 *    With Minshall's modification: all sent small packets are ACKed.
1438 */
1439static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1440			    int nonagle)
1441{
1442	return partial &&
1443		((nonagle & TCP_NAGLE_CORK) ||
1444		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1445}
1446/* Returns the portion of skb which can be sent right away */
1447static unsigned int tcp_mss_split_point(const struct sock *sk,
1448					const struct sk_buff *skb,
1449					unsigned int mss_now,
1450					unsigned int max_segs,
1451					int nonagle)
1452{
1453	const struct tcp_sock *tp = tcp_sk(sk);
1454	u32 partial, needed, window, max_len;
1455
1456	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1457	max_len = mss_now * max_segs;
1458
1459	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
1460		return max_len;
1461
1462	needed = min(skb->len, window);
1463
1464	if (max_len <= needed)
1465		return max_len;
1466
1467	partial = needed % mss_now;
1468	/* If last segment is not a full MSS, check if Nagle rules allow us
1469	 * to include this last segment in this skb.
1470	 * Otherwise, we'll split the skb at last MSS boundary
1471	 */
1472	if (tcp_nagle_check(partial != 0, tp, nonagle))
1473		return needed - partial;
1474
1475	return needed;
1476}
1477
1478/* Can at least one segment of SKB be sent right now, according to the
1479 * congestion window rules?  If so, return how many segments are allowed.
1480 */
1481static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1482					 const struct sk_buff *skb)
1483{
1484	u32 in_flight, cwnd;
1485
1486	/* Don't be strict about the congestion window for the final FIN.  */
1487	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
1488	    tcp_skb_pcount(skb) == 1)
1489		return 1;
1490
1491	in_flight = tcp_packets_in_flight(tp);
1492	cwnd = tp->snd_cwnd;
1493	if (in_flight < cwnd)
1494		return (cwnd - in_flight);
1495
1496	return 0;
1497}
1498
1499/* Initialize TSO state of a skb.
1500 * This must be invoked the first time we consider transmitting
1501 * SKB onto the wire.
1502 */
1503static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1504			     unsigned int mss_now)
1505{
1506	int tso_segs = tcp_skb_pcount(skb);
1507
1508	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1509		tcp_set_skb_tso_segs(sk, skb, mss_now);
1510		tso_segs = tcp_skb_pcount(skb);
1511	}
1512	return tso_segs;
1513}
1514
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1515
1516/* Return true if the Nagle test allows this packet to be
1517 * sent now.
1518 */
1519static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1520				  unsigned int cur_mss, int nonagle)
1521{
1522	/* Nagle rule does not apply to frames, which sit in the middle of the
1523	 * write_queue (they have no chances to get new data).
1524	 *
1525	 * This is implemented in the callers, where they modify the 'nonagle'
1526	 * argument based upon the location of SKB in the send queue.
1527	 */
1528	if (nonagle & TCP_NAGLE_PUSH)
1529		return true;
1530
1531	/* Don't use the nagle rule for urgent data (or for the final FIN). */
1532	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
 
 
 
1533		return true;
1534
1535	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
1536		return true;
1537
1538	return false;
1539}
1540
1541/* Does at least the first segment of SKB fit into the send window? */
1542static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1543			     const struct sk_buff *skb,
1544			     unsigned int cur_mss)
1545{
1546	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1547
1548	if (skb->len > cur_mss)
1549		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1550
1551	return !after(end_seq, tcp_wnd_end(tp));
1552}
1553
1554/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1555 * should be put on the wire right now.  If so, it returns the number of
1556 * packets allowed by the congestion window.
1557 */
1558static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1559				 unsigned int cur_mss, int nonagle)
1560{
1561	const struct tcp_sock *tp = tcp_sk(sk);
1562	unsigned int cwnd_quota;
1563
1564	tcp_init_tso_segs(sk, skb, cur_mss);
1565
1566	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1567		return 0;
1568
1569	cwnd_quota = tcp_cwnd_test(tp, skb);
1570	if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1571		cwnd_quota = 0;
1572
1573	return cwnd_quota;
1574}
1575
1576/* Test if sending is allowed right now. */
1577bool tcp_may_send_now(struct sock *sk)
1578{
1579	const struct tcp_sock *tp = tcp_sk(sk);
1580	struct sk_buff *skb = tcp_send_head(sk);
1581
1582	return skb &&
1583		tcp_snd_test(sk, skb, tcp_current_mss(sk),
1584			     (tcp_skb_is_last(sk, skb) ?
1585			      tp->nonagle : TCP_NAGLE_PUSH));
1586}
1587
1588/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1589 * which is put after SKB on the list.  It is very much like
1590 * tcp_fragment() except that it may make several kinds of assumptions
1591 * in order to speed up the splitting operation.  In particular, we
1592 * know that all the data is in scatter-gather pages, and that the
1593 * packet has never been sent out before (and thus is not cloned).
1594 */
1595static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1596			unsigned int mss_now, gfp_t gfp)
1597{
1598	struct sk_buff *buff;
1599	int nlen = skb->len - len;
1600	u8 flags;
1601
1602	/* All of a TSO frame must be composed of paged data.  */
1603	if (skb->len != skb->data_len)
1604		return tcp_fragment(sk, skb, len, mss_now);
1605
1606	buff = sk_stream_alloc_skb(sk, 0, gfp);
1607	if (unlikely(buff == NULL))
1608		return -ENOMEM;
1609
1610	sk->sk_wmem_queued += buff->truesize;
1611	sk_mem_charge(sk, buff->truesize);
1612	buff->truesize += nlen;
1613	skb->truesize -= nlen;
1614
1615	/* Correct the sequence numbers. */
1616	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1617	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1618	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1619
1620	/* PSH and FIN should only be set in the second packet. */
1621	flags = TCP_SKB_CB(skb)->tcp_flags;
1622	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1623	TCP_SKB_CB(buff)->tcp_flags = flags;
1624
1625	/* This packet was never sent out yet, so no SACK bits. */
1626	TCP_SKB_CB(buff)->sacked = 0;
1627
1628	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1629	skb_split(skb, buff, len);
1630
1631	/* Fix up tso_factor for both original and new SKB.  */
1632	tcp_set_skb_tso_segs(sk, skb, mss_now);
1633	tcp_set_skb_tso_segs(sk, buff, mss_now);
1634
1635	/* Link BUFF into the send queue. */
1636	skb_header_release(buff);
1637	tcp_insert_write_queue_after(skb, buff, sk);
1638
1639	return 0;
1640}
1641
1642/* Try to defer sending, if possible, in order to minimize the amount
1643 * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1644 *
1645 * This algorithm is from John Heffner.
1646 */
1647static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1648{
1649	struct tcp_sock *tp = tcp_sk(sk);
1650	const struct inet_connection_sock *icsk = inet_csk(sk);
1651	u32 send_win, cong_win, limit, in_flight;
1652	int win_divisor;
1653
1654	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1655		goto send_now;
1656
1657	if (icsk->icsk_ca_state != TCP_CA_Open)
1658		goto send_now;
1659
1660	/* Defer for less than two clock ticks. */
1661	if (tp->tso_deferred &&
1662	    (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
1663		goto send_now;
1664
1665	in_flight = tcp_packets_in_flight(tp);
1666
1667	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
1668
1669	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1670
1671	/* From in_flight test above, we know that cwnd > in_flight.  */
1672	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1673
1674	limit = min(send_win, cong_win);
1675
1676	/* If a full-sized TSO skb can be sent, do it. */
1677	if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
1678			   tp->xmit_size_goal_segs * tp->mss_cache))
1679		goto send_now;
1680
1681	/* Middle in queue won't get any more data, full sendable already? */
1682	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
1683		goto send_now;
1684
1685	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1686	if (win_divisor) {
1687		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1688
1689		/* If at least some fraction of a window is available,
1690		 * just use it.
1691		 */
1692		chunk /= win_divisor;
1693		if (limit >= chunk)
1694			goto send_now;
1695	} else {
1696		/* Different approach, try not to defer past a single
1697		 * ACK.  Receiver should ACK every other full sized
1698		 * frame, so if we have space for more than 3 frames
1699		 * then send now.
1700		 */
1701		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1702			goto send_now;
1703	}
1704
1705	/* Ok, it looks like it is advisable to defer.
1706	 * Do not rearm the timer if already set to not break TCP ACK clocking.
1707	 */
1708	if (!tp->tso_deferred)
1709		tp->tso_deferred = 1 | (jiffies << 1);
1710
1711	return true;
1712
1713send_now:
1714	tp->tso_deferred = 0;
1715	return false;
1716}
1717
1718/* Create a new MTU probe if we are ready.
1719 * MTU probe is regularly attempting to increase the path MTU by
1720 * deliberately sending larger packets.  This discovers routing
1721 * changes resulting in larger path MTUs.
1722 *
1723 * Returns 0 if we should wait to probe (no cwnd available),
1724 *         1 if a probe was sent,
1725 *         -1 otherwise
1726 */
1727static int tcp_mtu_probe(struct sock *sk)
1728{
1729	struct tcp_sock *tp = tcp_sk(sk);
1730	struct inet_connection_sock *icsk = inet_csk(sk);
1731	struct sk_buff *skb, *nskb, *next;
1732	int len;
1733	int probe_size;
1734	int size_needed;
1735	int copy;
1736	int mss_now;
1737
1738	/* Not currently probing/verifying,
1739	 * not in recovery,
1740	 * have enough cwnd, and
1741	 * not SACKing (the variable headers throw things off) */
1742	if (!icsk->icsk_mtup.enabled ||
1743	    icsk->icsk_mtup.probe_size ||
1744	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1745	    tp->snd_cwnd < 11 ||
1746	    tp->rx_opt.num_sacks || tp->rx_opt.dsack)
1747		return -1;
1748
1749	/* Very simple search strategy: just double the MSS. */
1750	mss_now = tcp_current_mss(sk);
1751	probe_size = 2 * tp->mss_cache;
1752	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
1753	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1754		/* TODO: set timer for probe_converge_event */
1755		return -1;
1756	}
1757
1758	/* Have enough data in the send queue to probe? */
1759	if (tp->write_seq - tp->snd_nxt < size_needed)
1760		return -1;
1761
1762	if (tp->snd_wnd < size_needed)
1763		return -1;
1764	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
1765		return 0;
1766
1767	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
1768	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1769		if (!tcp_packets_in_flight(tp))
1770			return -1;
1771		else
1772			return 0;
1773	}
1774
1775	/* We're allowed to probe.  Build it now. */
1776	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1777		return -1;
1778	sk->sk_wmem_queued += nskb->truesize;
1779	sk_mem_charge(sk, nskb->truesize);
1780
1781	skb = tcp_send_head(sk);
1782
1783	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1784	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1785	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
1786	TCP_SKB_CB(nskb)->sacked = 0;
1787	nskb->csum = 0;
1788	nskb->ip_summed = skb->ip_summed;
1789
1790	tcp_insert_write_queue_before(nskb, skb, sk);
1791
1792	len = 0;
1793	tcp_for_write_queue_from_safe(skb, next, sk) {
1794		copy = min_t(int, skb->len, probe_size - len);
1795		if (nskb->ip_summed)
1796			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1797		else
1798			nskb->csum = skb_copy_and_csum_bits(skb, 0,
1799							    skb_put(nskb, copy),
1800							    copy, nskb->csum);
1801
1802		if (skb->len <= copy) {
1803			/* We've eaten all the data from this skb.
1804			 * Throw it away. */
1805			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1806			tcp_unlink_write_queue(skb, sk);
1807			sk_wmem_free_skb(sk, skb);
1808		} else {
1809			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
1810						   ~(TCPHDR_FIN|TCPHDR_PSH);
1811			if (!skb_shinfo(skb)->nr_frags) {
1812				skb_pull(skb, copy);
1813				if (skb->ip_summed != CHECKSUM_PARTIAL)
1814					skb->csum = csum_partial(skb->data,
1815								 skb->len, 0);
1816			} else {
1817				__pskb_trim_head(skb, copy);
1818				tcp_set_skb_tso_segs(sk, skb, mss_now);
1819			}
1820			TCP_SKB_CB(skb)->seq += copy;
1821		}
1822
1823		len += copy;
1824
1825		if (len >= probe_size)
1826			break;
1827	}
1828	tcp_init_tso_segs(sk, nskb, nskb->len);
1829
1830	/* We're ready to send.  If this fails, the probe will
1831	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
1832	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
1833	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1834		/* Decrement cwnd here because we are sending
1835		 * effectively two packets. */
1836		tp->snd_cwnd--;
1837		tcp_event_new_data_sent(sk, nskb);
1838
1839		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
1840		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1841		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
1842
1843		return 1;
1844	}
1845
1846	return -1;
1847}
1848
1849/* This routine writes packets to the network.  It advances the
1850 * send_head.  This happens as incoming acks open up the remote
1851 * window for us.
1852 *
1853 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1854 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1855 * account rare use of URG, this is not a big flaw.
1856 *
1857 * Send at most one packet when push_one > 0. Temporarily ignore
1858 * cwnd limit to force at most one packet out when push_one == 2.
1859
1860 * Returns true, if no segments are in flight and we have queued segments,
1861 * but cannot send anything now because of SWS or another problem.
1862 */
1863static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1864			   int push_one, gfp_t gfp)
1865{
1866	struct tcp_sock *tp = tcp_sk(sk);
1867	struct sk_buff *skb;
1868	unsigned int tso_segs, sent_pkts;
1869	int cwnd_quota;
1870	int result;
1871
1872	sent_pkts = 0;
1873
1874	if (!push_one) {
1875		/* Do MTU probing. */
1876		result = tcp_mtu_probe(sk);
1877		if (!result) {
1878			return false;
1879		} else if (result > 0) {
1880			sent_pkts = 1;
1881		}
1882	}
1883
1884	while ((skb = tcp_send_head(sk))) {
1885		unsigned int limit;
1886
1887		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1888		BUG_ON(!tso_segs);
1889
1890		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
1891			goto repair; /* Skip network transmission */
1892
1893		cwnd_quota = tcp_cwnd_test(tp, skb);
1894		if (!cwnd_quota) {
1895			if (push_one == 2)
1896				/* Force out a loss probe pkt. */
1897				cwnd_quota = 1;
1898			else
1899				break;
1900		}
1901
1902		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1903			break;
1904
1905		if (tso_segs == 1) {
1906			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1907						     (tcp_skb_is_last(sk, skb) ?
1908						      nonagle : TCP_NAGLE_PUSH))))
1909				break;
1910		} else {
1911			if (!push_one && tcp_tso_should_defer(sk, skb))
1912				break;
1913		}
1914
1915		/* TCP Small Queues :
1916		 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
1917		 * This allows for :
1918		 *  - better RTT estimation and ACK scheduling
1919		 *  - faster recovery
1920		 *  - high rates
1921		 * Alas, some drivers / subsystems require a fair amount
1922		 * of queued bytes to ensure line rate.
1923		 * One example is wifi aggregation (802.11 AMPDU)
1924		 */
1925		limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes,
1926			      sk->sk_pacing_rate >> 10);
1927
1928		if (atomic_read(&sk->sk_wmem_alloc) > limit) {
1929			set_bit(TSQ_THROTTLED, &tp->tsq_flags);
1930			/* It is possible TX completion already happened
1931			 * before we set TSQ_THROTTLED, so we must
1932			 * test again the condition.
1933			 * We abuse smp_mb__after_clear_bit() because
1934			 * there is no smp_mb__after_set_bit() yet
1935			 */
1936			smp_mb__after_clear_bit();
1937			if (atomic_read(&sk->sk_wmem_alloc) > limit)
1938				break;
1939		}
1940
1941		limit = mss_now;
1942		if (tso_segs > 1 && !tcp_urg_mode(tp))
1943			limit = tcp_mss_split_point(sk, skb, mss_now,
1944						    min_t(unsigned int,
1945							  cwnd_quota,
1946							  sk->sk_gso_max_segs),
1947						    nonagle);
1948
1949		if (skb->len > limit &&
1950		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
1951			break;
1952
1953		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1954
1955		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
1956			break;
1957
1958repair:
1959		/* Advance the send_head.  This one is sent out.
1960		 * This call will increment packets_out.
1961		 */
1962		tcp_event_new_data_sent(sk, skb);
1963
1964		tcp_minshall_update(tp, mss_now, skb);
1965		sent_pkts += tcp_skb_pcount(skb);
1966
1967		if (push_one)
1968			break;
1969	}
 
 
1970
1971	if (likely(sent_pkts)) {
1972		if (tcp_in_cwnd_reduction(sk))
1973			tp->prr_out += sent_pkts;
1974
1975		/* Send one loss probe per tail loss episode. */
1976		if (push_one != 2)
1977			tcp_schedule_loss_probe(sk);
1978		tcp_cwnd_validate(sk);
1979		return false;
1980	}
1981	return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
1982}
1983
1984bool tcp_schedule_loss_probe(struct sock *sk)
1985{
1986	struct inet_connection_sock *icsk = inet_csk(sk);
1987	struct tcp_sock *tp = tcp_sk(sk);
1988	u32 timeout, tlp_time_stamp, rto_time_stamp;
1989	u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
1990
1991	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
1992		return false;
1993	/* No consecutive loss probes. */
1994	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
1995		tcp_rearm_rto(sk);
1996		return false;
1997	}
1998	/* Don't do any loss probe on a Fast Open connection before 3WHS
1999	 * finishes.
2000	 */
2001	if (sk->sk_state == TCP_SYN_RECV)
2002		return false;
2003
2004	/* TLP is only scheduled when next timer event is RTO. */
2005	if (icsk->icsk_pending != ICSK_TIME_RETRANS)
2006		return false;
2007
2008	/* Schedule a loss probe in 2*RTT for SACK capable connections
2009	 * in Open state, that are either limited by cwnd or application.
2010	 */
2011	if (sysctl_tcp_early_retrans < 3 || !tp->srtt_us || !tp->packets_out ||
2012	    !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
2013		return false;
2014
2015	if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
2016	     tcp_send_head(sk))
2017		return false;
2018
2019	/* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
2020	 * for delayed ack when there's one outstanding packet.
2021	 */
2022	timeout = rtt << 1;
2023	if (tp->packets_out == 1)
2024		timeout = max_t(u32, timeout,
2025				(rtt + (rtt >> 1) + TCP_DELACK_MAX));
2026	timeout = max_t(u32, timeout, msecs_to_jiffies(10));
2027
2028	/* If RTO is shorter, just schedule TLP in its place. */
2029	tlp_time_stamp = tcp_time_stamp + timeout;
2030	rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
2031	if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
2032		s32 delta = rto_time_stamp - tcp_time_stamp;
2033		if (delta > 0)
2034			timeout = delta;
2035	}
2036
2037	inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
2038				  TCP_RTO_MAX);
2039	return true;
2040}
2041
2042/* When probe timeout (PTO) fires, send a new segment if one exists, else
2043 * retransmit the last segment.
2044 */
2045void tcp_send_loss_probe(struct sock *sk)
2046{
2047	struct tcp_sock *tp = tcp_sk(sk);
2048	struct sk_buff *skb;
2049	int pcount;
2050	int mss = tcp_current_mss(sk);
2051	int err = -1;
2052
2053	if (tcp_send_head(sk) != NULL) {
2054		err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2055		goto rearm_timer;
2056	}
2057
2058	/* At most one outstanding TLP retransmission. */
2059	if (tp->tlp_high_seq)
2060		goto rearm_timer;
2061
2062	/* Retransmit last segment. */
2063	skb = tcp_write_queue_tail(sk);
2064	if (WARN_ON(!skb))
2065		goto rearm_timer;
2066
2067	pcount = tcp_skb_pcount(skb);
2068	if (WARN_ON(!pcount))
2069		goto rearm_timer;
2070
2071	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2072		if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss)))
2073			goto rearm_timer;
2074		skb = tcp_write_queue_tail(sk);
2075	}
2076
2077	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2078		goto rearm_timer;
2079
2080	/* Probe with zero data doesn't trigger fast recovery. */
2081	if (skb->len > 0)
2082		err = __tcp_retransmit_skb(sk, skb);
2083
2084	/* Record snd_nxt for loss detection. */
2085	if (likely(!err))
2086		tp->tlp_high_seq = tp->snd_nxt;
2087
2088rearm_timer:
2089	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2090				  inet_csk(sk)->icsk_rto,
2091				  TCP_RTO_MAX);
2092
2093	if (likely(!err))
2094		NET_INC_STATS_BH(sock_net(sk),
2095				 LINUX_MIB_TCPLOSSPROBES);
2096}
2097
2098/* Push out any pending frames which were held back due to
2099 * TCP_CORK or attempt at coalescing tiny packets.
2100 * The socket must be locked by the caller.
2101 */
2102void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
2103			       int nonagle)
2104{
2105	/* If we are closed, the bytes will have to remain here.
2106	 * In time closedown will finish, we empty the write queue and
2107	 * all will be happy.
2108	 */
2109	if (unlikely(sk->sk_state == TCP_CLOSE))
2110		return;
2111
2112	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
2113			   sk_gfp_atomic(sk, GFP_ATOMIC)))
2114		tcp_check_probe_timer(sk);
2115}
2116
2117/* Send _single_ skb sitting at the send head. This function requires
2118 * true push pending frames to setup probe timer etc.
2119 */
2120void tcp_push_one(struct sock *sk, unsigned int mss_now)
2121{
2122	struct sk_buff *skb = tcp_send_head(sk);
2123
2124	BUG_ON(!skb || skb->len < mss_now);
2125
2126	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2127}
2128
2129/* This function returns the amount that we can raise the
2130 * usable window based on the following constraints
2131 *
2132 * 1. The window can never be shrunk once it is offered (RFC 793)
2133 * 2. We limit memory per socket
2134 *
2135 * RFC 1122:
2136 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
2137 *  RECV.NEXT + RCV.WIN fixed until:
2138 *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
2139 *
2140 * i.e. don't raise the right edge of the window until you can raise
2141 * it at least MSS bytes.
2142 *
2143 * Unfortunately, the recommended algorithm breaks header prediction,
2144 * since header prediction assumes th->window stays fixed.
2145 *
2146 * Strictly speaking, keeping th->window fixed violates the receiver
2147 * side SWS prevention criteria. The problem is that under this rule
2148 * a stream of single byte packets will cause the right side of the
2149 * window to always advance by a single byte.
2150 *
2151 * Of course, if the sender implements sender side SWS prevention
2152 * then this will not be a problem.
2153 *
2154 * BSD seems to make the following compromise:
2155 *
2156 *	If the free space is less than the 1/4 of the maximum
2157 *	space available and the free space is less than 1/2 mss,
2158 *	then set the window to 0.
2159 *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
2160 *	Otherwise, just prevent the window from shrinking
2161 *	and from being larger than the largest representable value.
2162 *
2163 * This prevents incremental opening of the window in the regime
2164 * where TCP is limited by the speed of the reader side taking
2165 * data out of the TCP receive queue. It does nothing about
2166 * those cases where the window is constrained on the sender side
2167 * because the pipeline is full.
2168 *
2169 * BSD also seems to "accidentally" limit itself to windows that are a
2170 * multiple of MSS, at least until the free space gets quite small.
2171 * This would appear to be a side effect of the mbuf implementation.
2172 * Combining these two algorithms results in the observed behavior
2173 * of having a fixed window size at almost all times.
2174 *
2175 * Below we obtain similar behavior by forcing the offered window to
2176 * a multiple of the mss when it is feasible to do so.
2177 *
2178 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
2179 * Regular options like TIMESTAMP are taken into account.
2180 */
2181u32 __tcp_select_window(struct sock *sk)
2182{
2183	struct inet_connection_sock *icsk = inet_csk(sk);
2184	struct tcp_sock *tp = tcp_sk(sk);
2185	/* MSS for the peer's data.  Previous versions used mss_clamp
2186	 * here.  I don't know if the value based on our guesses
2187	 * of peer's MSS is better for the performance.  It's more correct
2188	 * but may be worse for the performance because of rcv_mss
2189	 * fluctuations.  --SAW  1998/11/1
2190	 */
2191	int mss = icsk->icsk_ack.rcv_mss;
2192	int free_space = tcp_space(sk);
2193	int allowed_space = tcp_full_space(sk);
2194	int full_space = min_t(int, tp->window_clamp, allowed_space);
2195	int window;
2196
2197	if (mss > full_space)
2198		mss = full_space;
2199
2200	if (free_space < (full_space >> 1)) {
2201		icsk->icsk_ack.quick = 0;
2202
2203		if (sk_under_memory_pressure(sk))
2204			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2205					       4U * tp->advmss);
2206
2207		/* free_space might become our new window, make sure we don't
2208		 * increase it due to wscale.
2209		 */
2210		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
2211
2212		/* if free space is less than mss estimate, or is below 1/16th
2213		 * of the maximum allowed, try to move to zero-window, else
2214		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
2215		 * new incoming data is dropped due to memory limits.
2216		 * With large window, mss test triggers way too late in order
2217		 * to announce zero window in time before rmem limit kicks in.
2218		 */
2219		if (free_space < (allowed_space >> 4) || free_space < mss)
2220			return 0;
2221	}
2222
2223	if (free_space > tp->rcv_ssthresh)
2224		free_space = tp->rcv_ssthresh;
2225
2226	/* Don't do rounding if we are using window scaling, since the
2227	 * scaled window will not line up with the MSS boundary anyway.
2228	 */
2229	window = tp->rcv_wnd;
2230	if (tp->rx_opt.rcv_wscale) {
2231		window = free_space;
2232
2233		/* Advertise enough space so that it won't get scaled away.
2234		 * Import case: prevent zero window announcement if
2235		 * 1<<rcv_wscale > mss.
2236		 */
2237		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
2238			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
2239				  << tp->rx_opt.rcv_wscale);
2240	} else {
2241		/* Get the largest window that is a nice multiple of mss.
2242		 * Window clamp already applied above.
2243		 * If our current window offering is within 1 mss of the
2244		 * free space we just keep it. This prevents the divide
2245		 * and multiply from happening most of the time.
2246		 * We also don't do any window rounding when the free space
2247		 * is too small.
2248		 */
2249		if (window <= free_space - mss || window > free_space)
2250			window = (free_space / mss) * mss;
2251		else if (mss == full_space &&
2252			 free_space > window + (full_space >> 1))
2253			window = free_space;
2254	}
2255
2256	return window;
2257}
2258
2259/* Collapses two adjacent SKB's during retransmission. */
2260static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2261{
2262	struct tcp_sock *tp = tcp_sk(sk);
2263	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
2264	int skb_size, next_skb_size;
2265
2266	skb_size = skb->len;
2267	next_skb_size = next_skb->len;
2268
2269	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
2270
2271	tcp_highest_sack_combine(sk, next_skb, skb);
2272
2273	tcp_unlink_write_queue(next_skb, sk);
2274
2275	skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
2276				  next_skb_size);
2277
2278	if (next_skb->ip_summed == CHECKSUM_PARTIAL)
2279		skb->ip_summed = CHECKSUM_PARTIAL;
2280
2281	if (skb->ip_summed != CHECKSUM_PARTIAL)
2282		skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
2283
2284	/* Update sequence range on original skb. */
2285	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
2286
2287	/* Merge over control information. This moves PSH/FIN etc. over */
2288	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
2289
2290	/* All done, get rid of second SKB and account for it so
2291	 * packet counting does not break.
2292	 */
2293	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2294
2295	/* changed transmit queue under us so clear hints */
2296	tcp_clear_retrans_hints_partial(tp);
2297	if (next_skb == tp->retransmit_skb_hint)
2298		tp->retransmit_skb_hint = skb;
2299
2300	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2301
2302	sk_wmem_free_skb(sk, next_skb);
2303}
2304
2305/* Check if coalescing SKBs is legal. */
2306static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
2307{
2308	if (tcp_skb_pcount(skb) > 1)
2309		return false;
2310	/* TODO: SACK collapsing could be used to remove this condition */
2311	if (skb_shinfo(skb)->nr_frags != 0)
2312		return false;
2313	if (skb_cloned(skb))
2314		return false;
2315	if (skb == tcp_send_head(sk))
2316		return false;
2317	/* Some heurestics for collapsing over SACK'd could be invented */
2318	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2319		return false;
2320
2321	return true;
2322}
2323
2324/* Collapse packets in the retransmit queue to make to create
2325 * less packets on the wire. This is only done on retransmission.
2326 */
2327static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2328				     int space)
2329{
2330	struct tcp_sock *tp = tcp_sk(sk);
2331	struct sk_buff *skb = to, *tmp;
2332	bool first = true;
2333
2334	if (!sysctl_tcp_retrans_collapse)
2335		return;
2336	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2337		return;
2338
2339	tcp_for_write_queue_from_safe(skb, tmp, sk) {
2340		if (!tcp_can_collapse(sk, skb))
2341			break;
2342
2343		space -= skb->len;
2344
2345		if (first) {
2346			first = false;
2347			continue;
2348		}
2349
2350		if (space < 0)
2351			break;
2352		/* Punt if not enough space exists in the first SKB for
2353		 * the data in the second
2354		 */
2355		if (skb->len > skb_availroom(to))
2356			break;
2357
2358		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
2359			break;
2360
2361		tcp_collapse_retrans(sk, to);
2362	}
2363}
2364
2365/* This retransmits one SKB.  Policy decisions and retransmit queue
2366 * state updates are done by the caller.  Returns non-zero if an
2367 * error occurred which prevented the send.
2368 */
2369int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2370{
2371	struct tcp_sock *tp = tcp_sk(sk);
2372	struct inet_connection_sock *icsk = inet_csk(sk);
2373	unsigned int cur_mss;
2374	int err;
2375
2376	/* Inconslusive MTU probe */
2377	if (icsk->icsk_mtup.probe_size) {
2378		icsk->icsk_mtup.probe_size = 0;
2379	}
2380
2381	/* Do not sent more than we queued. 1/4 is reserved for possible
2382	 * copying overhead: fragmentation, tunneling, mangling etc.
2383	 */
2384	if (atomic_read(&sk->sk_wmem_alloc) >
2385	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
2386		return -EAGAIN;
2387
2388	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
2389		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
2390			BUG();
2391		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
2392			return -ENOMEM;
2393	}
2394
2395	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
2396		return -EHOSTUNREACH; /* Routing failure or similar. */
2397
2398	cur_mss = tcp_current_mss(sk);
2399
2400	/* If receiver has shrunk his window, and skb is out of
2401	 * new window, do not retransmit it. The exception is the
2402	 * case, when window is shrunk to zero. In this case
2403	 * our retransmit serves as a zero window probe.
2404	 */
2405	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
2406	    TCP_SKB_CB(skb)->seq != tp->snd_una)
2407		return -EAGAIN;
2408
2409	if (skb->len > cur_mss) {
2410		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
2411			return -ENOMEM; /* We'll try again later. */
2412	} else {
2413		int oldpcount = tcp_skb_pcount(skb);
2414
2415		if (unlikely(oldpcount > 1)) {
2416			if (skb_unclone(skb, GFP_ATOMIC))
2417				return -ENOMEM;
2418			tcp_init_tso_segs(sk, skb, cur_mss);
2419			tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
2420		}
2421	}
2422
2423	tcp_retrans_try_collapse(sk, skb, cur_mss);
2424
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2425	/* Make a copy, if the first transmission SKB clone we made
2426	 * is still in somebody's hands, else make a clone.
2427	 */
2428	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2429
2430	/* make sure skb->data is aligned on arches that require it
2431	 * and check if ack-trimming & collapsing extended the headroom
2432	 * beyond what csum_start can cover.
2433	 */
2434	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
2435		     skb_headroom(skb) >= 0xFFFF)) {
2436		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2437						   GFP_ATOMIC);
2438		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2439			     -ENOBUFS;
2440	} else {
2441		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2442	}
2443
2444	if (likely(!err)) {
2445		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2446		/* Update global TCP statistics. */
2447		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
2448		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2449			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2450		tp->total_retrans++;
2451	}
2452	return err;
2453}
2454
2455int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2456{
2457	struct tcp_sock *tp = tcp_sk(sk);
2458	int err = __tcp_retransmit_skb(sk, skb);
2459
2460	if (err == 0) {
2461#if FASTRETRANS_DEBUG > 0
2462		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2463			net_dbg_ratelimited("retrans_out leaked\n");
2464		}
2465#endif
2466		if (!tp->retrans_out)
2467			tp->lost_retrans_low = tp->snd_nxt;
2468		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
2469		tp->retrans_out += tcp_skb_pcount(skb);
2470
2471		/* Save stamp of the first retransmit. */
2472		if (!tp->retrans_stamp)
2473			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2474
2475		tp->undo_retrans += tcp_skb_pcount(skb);
2476
2477		/* snd_nxt is stored to detect loss of retransmitted segment,
2478		 * see tcp_input.c tcp_sacktag_write_queue().
2479		 */
2480		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
2481	} else {
2482		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2483	}
2484	return err;
2485}
2486
2487/* Check if we forward retransmits are possible in the current
2488 * window/congestion state.
2489 */
2490static bool tcp_can_forward_retransmit(struct sock *sk)
2491{
2492	const struct inet_connection_sock *icsk = inet_csk(sk);
2493	const struct tcp_sock *tp = tcp_sk(sk);
2494
2495	/* Forward retransmissions are possible only during Recovery. */
2496	if (icsk->icsk_ca_state != TCP_CA_Recovery)
2497		return false;
2498
2499	/* No forward retransmissions in Reno are possible. */
2500	if (tcp_is_reno(tp))
2501		return false;
2502
2503	/* Yeah, we have to make difficult choice between forward transmission
2504	 * and retransmission... Both ways have their merits...
2505	 *
2506	 * For now we do not retransmit anything, while we have some new
2507	 * segments to send. In the other cases, follow rule 3 for
2508	 * NextSeg() specified in RFC3517.
2509	 */
2510
2511	if (tcp_may_send_now(sk))
2512		return false;
2513
2514	return true;
2515}
2516
2517/* This gets called after a retransmit timeout, and the initially
2518 * retransmitted data is acknowledged.  It tries to continue
2519 * resending the rest of the retransmit queue, until either
2520 * we've sent it all or the congestion window limit is reached.
2521 * If doing SACK, the first ACK which comes back for a timeout
2522 * based retransmit packet might feed us FACK information again.
2523 * If so, we use it to avoid unnecessarily retransmissions.
2524 */
2525void tcp_xmit_retransmit_queue(struct sock *sk)
2526{
2527	const struct inet_connection_sock *icsk = inet_csk(sk);
2528	struct tcp_sock *tp = tcp_sk(sk);
2529	struct sk_buff *skb;
2530	struct sk_buff *hole = NULL;
2531	u32 last_lost;
2532	int mib_idx;
2533	int fwd_rexmitting = 0;
2534
2535	if (!tp->packets_out)
2536		return;
2537
2538	if (!tp->lost_out)
2539		tp->retransmit_high = tp->snd_una;
2540
2541	if (tp->retransmit_skb_hint) {
2542		skb = tp->retransmit_skb_hint;
2543		last_lost = TCP_SKB_CB(skb)->end_seq;
2544		if (after(last_lost, tp->retransmit_high))
2545			last_lost = tp->retransmit_high;
2546	} else {
2547		skb = tcp_write_queue_head(sk);
2548		last_lost = tp->snd_una;
2549	}
2550
2551	tcp_for_write_queue_from(skb, sk) {
2552		__u8 sacked = TCP_SKB_CB(skb)->sacked;
2553
2554		if (skb == tcp_send_head(sk))
2555			break;
2556		/* we could do better than to assign each time */
2557		if (hole == NULL)
2558			tp->retransmit_skb_hint = skb;
2559
2560		/* Assume this retransmit will generate
2561		 * only one packet for congestion window
2562		 * calculation purposes.  This works because
2563		 * tcp_retransmit_skb() will chop up the
2564		 * packet to be MSS sized and all the
2565		 * packet counting works out.
2566		 */
2567		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2568			return;
2569
2570		if (fwd_rexmitting) {
2571begin_fwd:
2572			if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2573				break;
2574			mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
2575
2576		} else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2577			tp->retransmit_high = last_lost;
2578			if (!tcp_can_forward_retransmit(sk))
2579				break;
2580			/* Backtrack if necessary to non-L'ed skb */
2581			if (hole != NULL) {
2582				skb = hole;
2583				hole = NULL;
2584			}
2585			fwd_rexmitting = 1;
2586			goto begin_fwd;
2587
2588		} else if (!(sacked & TCPCB_LOST)) {
2589			if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
2590				hole = skb;
2591			continue;
2592
2593		} else {
2594			last_lost = TCP_SKB_CB(skb)->end_seq;
2595			if (icsk->icsk_ca_state != TCP_CA_Loss)
2596				mib_idx = LINUX_MIB_TCPFASTRETRANS;
2597			else
2598				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2599		}
2600
2601		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2602			continue;
2603
2604		if (tcp_retransmit_skb(sk, skb))
 
2605			return;
2606
2607		NET_INC_STATS_BH(sock_net(sk), mib_idx);
2608
2609		if (tcp_in_cwnd_reduction(sk))
2610			tp->prr_out += tcp_skb_pcount(skb);
2611
2612		if (skb == tcp_write_queue_head(sk))
2613			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2614						  inet_csk(sk)->icsk_rto,
2615						  TCP_RTO_MAX);
2616	}
2617}
2618
2619/* Send a fin.  The caller locks the socket for us.  This cannot be
2620 * allowed to fail queueing a FIN frame under any circumstances.
2621 */
2622void tcp_send_fin(struct sock *sk)
2623{
2624	struct tcp_sock *tp = tcp_sk(sk);
2625	struct sk_buff *skb = tcp_write_queue_tail(sk);
2626	int mss_now;
2627
2628	/* Optimization, tack on the FIN if we have a queue of
2629	 * unsent frames.  But be careful about outgoing SACKS
2630	 * and IP options.
2631	 */
2632	mss_now = tcp_current_mss(sk);
2633
2634	if (tcp_send_head(sk) != NULL) {
2635		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
2636		TCP_SKB_CB(skb)->end_seq++;
2637		tp->write_seq++;
2638	} else {
2639		/* Socket is locked, keep trying until memory is available. */
2640		for (;;) {
2641			skb = alloc_skb_fclone(MAX_TCP_HEADER,
2642					       sk->sk_allocation);
2643			if (skb)
2644				break;
2645			yield();
2646		}
2647
2648		/* Reserve space for headers and prepare control bits. */
2649		skb_reserve(skb, MAX_TCP_HEADER);
2650		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2651		tcp_init_nondata_skb(skb, tp->write_seq,
2652				     TCPHDR_ACK | TCPHDR_FIN);
2653		tcp_queue_skb(sk, skb);
2654	}
2655	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
2656}
2657
2658/* We get here when a process closes a file descriptor (either due to
2659 * an explicit close() or as a byproduct of exit()'ing) and there
2660 * was unread data in the receive queue.  This behavior is recommended
2661 * by RFC 2525, section 2.17.  -DaveM
2662 */
2663void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2664{
2665	struct sk_buff *skb;
2666
2667	/* NOTE: No TCP options attached and we never retransmit this. */
2668	skb = alloc_skb(MAX_TCP_HEADER, priority);
2669	if (!skb) {
2670		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2671		return;
2672	}
2673
2674	/* Reserve space for headers and prepare control bits. */
2675	skb_reserve(skb, MAX_TCP_HEADER);
2676	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2677			     TCPHDR_ACK | TCPHDR_RST);
2678	/* Send it off. */
2679	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2680	if (tcp_transmit_skb(sk, skb, 0, priority))
2681		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2682
2683	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
2684}
2685
2686/* Send a crossed SYN-ACK during socket establishment.
2687 * WARNING: This routine must only be called when we have already sent
2688 * a SYN packet that crossed the incoming SYN that caused this routine
2689 * to get called. If this assumption fails then the initial rcv_wnd
2690 * and rcv_wscale values will not be correct.
2691 */
2692int tcp_send_synack(struct sock *sk)
2693{
2694	struct sk_buff *skb;
2695
2696	skb = tcp_write_queue_head(sk);
2697	if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
2698		pr_debug("%s: wrong queue state\n", __func__);
2699		return -EFAULT;
2700	}
2701	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
2702		if (skb_cloned(skb)) {
2703			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2704			if (nskb == NULL)
2705				return -ENOMEM;
2706			tcp_unlink_write_queue(skb, sk);
2707			skb_header_release(nskb);
2708			__tcp_add_write_queue_head(sk, nskb);
2709			sk_wmem_free_skb(sk, skb);
2710			sk->sk_wmem_queued += nskb->truesize;
2711			sk_mem_charge(sk, nskb->truesize);
2712			skb = nskb;
2713		}
2714
2715		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
2716		TCP_ECN_send_synack(tcp_sk(sk), skb);
2717	}
2718	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2719	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2720}
2721
2722/**
2723 * tcp_make_synack - Prepare a SYN-ACK.
2724 * sk: listener socket
2725 * dst: dst entry attached to the SYNACK
2726 * req: request_sock pointer
2727 *
2728 * Allocate one skb and build a SYNACK packet.
2729 * @dst is consumed : Caller should not use it again.
2730 */
2731struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2732				struct request_sock *req,
2733				struct tcp_fastopen_cookie *foc)
2734{
2735	struct tcp_out_options opts;
 
2736	struct inet_request_sock *ireq = inet_rsk(req);
2737	struct tcp_sock *tp = tcp_sk(sk);
 
2738	struct tcphdr *th;
2739	struct sk_buff *skb;
2740	struct tcp_md5sig_key *md5;
2741	int tcp_header_size;
2742	int mss;
 
2743
2744	skb = sock_wmalloc(sk, MAX_TCP_HEADER, 1, GFP_ATOMIC);
2745	if (unlikely(!skb)) {
2746		dst_release(dst);
 
2747		return NULL;
2748	}
2749	/* Reserve space for headers. */
2750	skb_reserve(skb, MAX_TCP_HEADER);
2751
2752	skb_dst_set(skb, dst);
2753	security_skb_owned_by(skb, sk);
2754
2755	mss = dst_metric_advmss(dst);
2756	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2757		mss = tp->rx_opt.user_mss;
2758
2759	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2760		__u8 rcv_wscale;
2761		/* Set this up on the first call only */
2762		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2763
2764		/* limit the window selection if the user enforce a smaller rx buffer */
2765		if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2766		    (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2767			req->window_clamp = tcp_full_space(sk);
2768
2769		/* tcp_full_space because it is guaranteed to be the first packet */
2770		tcp_select_initial_window(tcp_full_space(sk),
2771			mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2772			&req->rcv_wnd,
2773			&req->window_clamp,
2774			ireq->wscale_ok,
2775			&rcv_wscale,
2776			dst_metric(dst, RTAX_INITRWND));
2777		ireq->rcv_wscale = rcv_wscale;
2778	}
2779
2780	memset(&opts, 0, sizeof(opts));
2781#ifdef CONFIG_SYN_COOKIES
2782	if (unlikely(req->cookie_ts))
2783		TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
2784	else
2785#endif
2786	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2787	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
2788					     foc) + sizeof(*th);
 
2789
2790	skb_push(skb, tcp_header_size);
2791	skb_reset_transport_header(skb);
2792
2793	th = tcp_hdr(skb);
2794	memset(th, 0, sizeof(struct tcphdr));
2795	th->syn = 1;
2796	th->ack = 1;
2797	TCP_ECN_make_synack(req, th);
2798	th->source = htons(ireq->ir_num);
2799	th->dest = ireq->ir_rmt_port;
2800	/* Setting of flags are superfluous here for callers (and ECE is
2801	 * not even correctly set)
2802	 */
2803	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2804			     TCPHDR_SYN | TCPHDR_ACK);
2805
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2806	th->seq = htonl(TCP_SKB_CB(skb)->seq);
2807	/* XXX data is queued and acked as is. No buffer/window check */
2808	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
2809
2810	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2811	th->window = htons(min(req->rcv_wnd, 65535U));
2812	tcp_options_write((__be32 *)(th + 1), tp, &opts);
2813	th->doff = (tcp_header_size >> 2);
2814	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
2815
2816#ifdef CONFIG_TCP_MD5SIG
2817	/* Okay, we have all we need - do the md5 hash if needed */
2818	if (md5) {
2819		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
2820					       md5, NULL, req, skb);
2821	}
2822#endif
2823
2824	return skb;
2825}
2826EXPORT_SYMBOL(tcp_make_synack);
2827
2828/* Do all connect socket setups that can be done AF independent. */
2829static void tcp_connect_init(struct sock *sk)
2830{
2831	const struct dst_entry *dst = __sk_dst_get(sk);
2832	struct tcp_sock *tp = tcp_sk(sk);
2833	__u8 rcv_wscale;
2834
2835	/* We'll fix this up when we get a response from the other end.
2836	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2837	 */
2838	tp->tcp_header_len = sizeof(struct tcphdr) +
2839		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2840
2841#ifdef CONFIG_TCP_MD5SIG
2842	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2843		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2844#endif
2845
2846	/* If user gave his TCP_MAXSEG, record it to clamp */
2847	if (tp->rx_opt.user_mss)
2848		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2849	tp->max_window = 0;
2850	tcp_mtup_init(sk);
2851	tcp_sync_mss(sk, dst_mtu(dst));
2852
2853	if (!tp->window_clamp)
2854		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2855	tp->advmss = dst_metric_advmss(dst);
2856	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2857		tp->advmss = tp->rx_opt.user_mss;
2858
2859	tcp_initialize_rcv_mss(sk);
2860
2861	/* limit the window selection if the user enforce a smaller rx buffer */
2862	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2863	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2864		tp->window_clamp = tcp_full_space(sk);
2865
2866	tcp_select_initial_window(tcp_full_space(sk),
2867				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2868				  &tp->rcv_wnd,
2869				  &tp->window_clamp,
2870				  sysctl_tcp_window_scaling,
2871				  &rcv_wscale,
2872				  dst_metric(dst, RTAX_INITRWND));
2873
2874	tp->rx_opt.rcv_wscale = rcv_wscale;
2875	tp->rcv_ssthresh = tp->rcv_wnd;
2876
2877	sk->sk_err = 0;
2878	sock_reset_flag(sk, SOCK_DONE);
2879	tp->snd_wnd = 0;
2880	tcp_init_wl(tp, 0);
2881	tp->snd_una = tp->write_seq;
2882	tp->snd_sml = tp->write_seq;
2883	tp->snd_up = tp->write_seq;
2884	tp->snd_nxt = tp->write_seq;
2885
2886	if (likely(!tp->repair))
2887		tp->rcv_nxt = 0;
2888	else
2889		tp->rcv_tstamp = tcp_time_stamp;
2890	tp->rcv_wup = tp->rcv_nxt;
2891	tp->copied_seq = tp->rcv_nxt;
2892
2893	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2894	inet_csk(sk)->icsk_retransmits = 0;
2895	tcp_clear_retrans(tp);
2896}
2897
2898static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
2899{
2900	struct tcp_sock *tp = tcp_sk(sk);
2901	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2902
2903	tcb->end_seq += skb->len;
2904	skb_header_release(skb);
2905	__tcp_add_write_queue_tail(sk, skb);
2906	sk->sk_wmem_queued += skb->truesize;
2907	sk_mem_charge(sk, skb->truesize);
2908	tp->write_seq = tcb->end_seq;
2909	tp->packets_out += tcp_skb_pcount(skb);
2910}
2911
2912/* Build and send a SYN with data and (cached) Fast Open cookie. However,
2913 * queue a data-only packet after the regular SYN, such that regular SYNs
2914 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
2915 * only the SYN sequence, the data are retransmitted in the first ACK.
2916 * If cookie is not cached or other error occurs, falls back to send a
2917 * regular SYN with Fast Open cookie request option.
2918 */
2919static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2920{
2921	struct tcp_sock *tp = tcp_sk(sk);
2922	struct tcp_fastopen_request *fo = tp->fastopen_req;
2923	int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
2924	struct sk_buff *syn_data = NULL, *data;
2925	unsigned long last_syn_loss = 0;
2926
2927	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
2928	tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
2929			       &syn_loss, &last_syn_loss);
2930	/* Recurring FO SYN losses: revert to regular handshake temporarily */
2931	if (syn_loss > 1 &&
2932	    time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
2933		fo->cookie.len = -1;
2934		goto fallback;
2935	}
2936
2937	if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
2938		fo->cookie.len = -1;
2939	else if (fo->cookie.len <= 0)
2940		goto fallback;
2941
2942	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
2943	 * user-MSS. Reserve maximum option space for middleboxes that add
2944	 * private TCP options. The cost is reduced data space in SYN :(
2945	 */
2946	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
2947		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2948	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
2949		MAX_TCP_OPTION_SPACE;
2950
2951	space = min_t(size_t, space, fo->size);
2952
2953	/* limit to order-0 allocations */
2954	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
2955
2956	syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
2957				   sk->sk_allocation);
2958	if (syn_data == NULL)
2959		goto fallback;
2960
2961	for (i = 0; i < iovlen && syn_data->len < space; ++i) {
2962		struct iovec *iov = &fo->data->msg_iov[i];
2963		unsigned char __user *from = iov->iov_base;
2964		int len = iov->iov_len;
2965
2966		if (syn_data->len + len > space)
2967			len = space - syn_data->len;
2968		else if (i + 1 == iovlen)
2969			/* No more data pending in inet_wait_for_connect() */
2970			fo->data = NULL;
2971
2972		if (skb_add_data(syn_data, from, len))
2973			goto fallback;
2974	}
2975
2976	/* Queue a data-only packet after the regular SYN for retransmission */
2977	data = pskb_copy(syn_data, sk->sk_allocation);
2978	if (data == NULL)
2979		goto fallback;
2980	TCP_SKB_CB(data)->seq++;
2981	TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
2982	TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
2983	tcp_connect_queue_skb(sk, data);
2984	fo->copied = data->len;
2985
2986	/* syn_data is about to be sent, we need to take current time stamps
2987	 * for the packets that are in write queue : SYN packet and DATA
2988	 */
2989	skb_mstamp_get(&syn->skb_mstamp);
2990	data->skb_mstamp = syn->skb_mstamp;
2991
2992	if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
2993		tp->syn_data = (fo->copied > 0);
2994		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
2995		goto done;
2996	}
2997	syn_data = NULL;
2998
2999fallback:
3000	/* Send a regular SYN with Fast Open cookie request option */
3001	if (fo->cookie.len > 0)
3002		fo->cookie.len = 0;
3003	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3004	if (err)
3005		tp->syn_fastopen = 0;
3006	kfree_skb(syn_data);
3007done:
3008	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3009	return err;
3010}
3011
3012/* Build a SYN and send it off. */
3013int tcp_connect(struct sock *sk)
3014{
3015	struct tcp_sock *tp = tcp_sk(sk);
3016	struct sk_buff *buff;
3017	int err;
3018
3019	tcp_connect_init(sk);
3020
3021	if (unlikely(tp->repair)) {
3022		tcp_finish_connect(sk, NULL);
3023		return 0;
3024	}
3025
3026	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
3027	if (unlikely(buff == NULL))
3028		return -ENOBUFS;
3029
3030	/* Reserve space for headers. */
3031	skb_reserve(buff, MAX_TCP_HEADER);
3032
3033	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
3034	tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
3035	tcp_connect_queue_skb(sk, buff);
3036	TCP_ECN_send_syn(sk, buff);
3037
3038	/* Send off SYN; include data in Fast Open. */
3039	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3040	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
 
 
 
 
 
 
3041	if (err == -ECONNREFUSED)
3042		return err;
3043
3044	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3045	 * in order to make this packet get counted in tcpOutSegs.
3046	 */
3047	tp->snd_nxt = tp->write_seq;
3048	tp->pushed_seq = tp->write_seq;
3049	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
3050
3051	/* Timer for repeating the SYN until an answer. */
3052	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3053				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
3054	return 0;
3055}
3056EXPORT_SYMBOL(tcp_connect);
3057
3058/* Send out a delayed ack, the caller does the policy checking
3059 * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
3060 * for details.
3061 */
3062void tcp_send_delayed_ack(struct sock *sk)
3063{
3064	struct inet_connection_sock *icsk = inet_csk(sk);
3065	int ato = icsk->icsk_ack.ato;
3066	unsigned long timeout;
3067
3068	if (ato > TCP_DELACK_MIN) {
3069		const struct tcp_sock *tp = tcp_sk(sk);
3070		int max_ato = HZ / 2;
3071
3072		if (icsk->icsk_ack.pingpong ||
3073		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
3074			max_ato = TCP_DELACK_MAX;
3075
3076		/* Slow path, intersegment interval is "high". */
3077
3078		/* If some rtt estimate is known, use it to bound delayed ack.
3079		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
3080		 * directly.
3081		 */
3082		if (tp->srtt_us) {
3083			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3084					TCP_DELACK_MIN);
3085
3086			if (rtt < max_ato)
3087				max_ato = rtt;
3088		}
3089
3090		ato = min(ato, max_ato);
3091	}
3092
3093	/* Stay within the limit we were given */
3094	timeout = jiffies + ato;
3095
3096	/* Use new timeout only if there wasn't a older one earlier. */
3097	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
3098		/* If delack timer was blocked or is about to expire,
3099		 * send ACK now.
3100		 */
3101		if (icsk->icsk_ack.blocked ||
3102		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
3103			tcp_send_ack(sk);
3104			return;
3105		}
3106
3107		if (!time_before(timeout, icsk->icsk_ack.timeout))
3108			timeout = icsk->icsk_ack.timeout;
3109	}
3110	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3111	icsk->icsk_ack.timeout = timeout;
3112	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
3113}
3114
3115/* This routine sends an ack and also updates the window. */
3116void tcp_send_ack(struct sock *sk)
3117{
3118	struct sk_buff *buff;
3119
3120	/* If we have been reset, we may not send again. */
3121	if (sk->sk_state == TCP_CLOSE)
3122		return;
3123
3124	/* We are not putting this on the write queue, so
3125	 * tcp_transmit_skb() will set the ownership to this
3126	 * sock.
3127	 */
3128	buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
3129	if (buff == NULL) {
3130		inet_csk_schedule_ack(sk);
3131		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
3132		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
3133					  TCP_DELACK_MAX, TCP_RTO_MAX);
3134		return;
3135	}
3136
3137	/* Reserve space for headers and prepare control bits. */
3138	skb_reserve(buff, MAX_TCP_HEADER);
3139	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
3140
3141	/* Send it off, this clears delayed acks for us. */
3142	TCP_SKB_CB(buff)->when = tcp_time_stamp;
3143	tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
3144}
3145
3146/* This routine sends a packet with an out of date sequence
3147 * number. It assumes the other end will try to ack it.
3148 *
3149 * Question: what should we make while urgent mode?
3150 * 4.4BSD forces sending single byte of data. We cannot send
3151 * out of window data, because we have SND.NXT==SND.MAX...
3152 *
3153 * Current solution: to send TWO zero-length segments in urgent mode:
3154 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
3155 * out-of-date with SND.UNA-1 to probe window.
3156 */
3157static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
3158{
3159	struct tcp_sock *tp = tcp_sk(sk);
3160	struct sk_buff *skb;
3161
3162	/* We don't queue it, tcp_transmit_skb() sets ownership. */
3163	skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
3164	if (skb == NULL)
3165		return -1;
3166
3167	/* Reserve space for headers and set control bits. */
3168	skb_reserve(skb, MAX_TCP_HEADER);
3169	/* Use a previous sequence.  This should cause the other
3170	 * end to send an ack.  Don't queue or clone SKB, just
3171	 * send it.
3172	 */
3173	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3174	TCP_SKB_CB(skb)->when = tcp_time_stamp;
3175	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
3176}
3177
3178void tcp_send_window_probe(struct sock *sk)
3179{
3180	if (sk->sk_state == TCP_ESTABLISHED) {
3181		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
 
3182		tcp_xmit_probe_skb(sk, 0);
3183	}
3184}
3185
3186/* Initiate keepalive or window probe from timer. */
3187int tcp_write_wakeup(struct sock *sk)
3188{
3189	struct tcp_sock *tp = tcp_sk(sk);
3190	struct sk_buff *skb;
3191
3192	if (sk->sk_state == TCP_CLOSE)
3193		return -1;
3194
3195	if ((skb = tcp_send_head(sk)) != NULL &&
3196	    before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
3197		int err;
3198		unsigned int mss = tcp_current_mss(sk);
3199		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
3200
3201		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
3202			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
3203
3204		/* We are probing the opening of a window
3205		 * but the window size is != 0
3206		 * must have been a result SWS avoidance ( sender )
3207		 */
3208		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
3209		    skb->len > mss) {
3210			seg_size = min(seg_size, mss);
3211			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3212			if (tcp_fragment(sk, skb, seg_size, mss))
3213				return -1;
3214		} else if (!tcp_skb_pcount(skb))
3215			tcp_set_skb_tso_segs(sk, skb, mss);
3216
3217		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3218		TCP_SKB_CB(skb)->when = tcp_time_stamp;
3219		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3220		if (!err)
3221			tcp_event_new_data_sent(sk, skb);
3222		return err;
3223	} else {
3224		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
3225			tcp_xmit_probe_skb(sk, 1);
3226		return tcp_xmit_probe_skb(sk, 0);
3227	}
3228}
3229
3230/* A window probe timeout has occurred.  If window is not closed send
3231 * a partial packet else a zero probe.
3232 */
3233void tcp_send_probe0(struct sock *sk)
3234{
3235	struct inet_connection_sock *icsk = inet_csk(sk);
3236	struct tcp_sock *tp = tcp_sk(sk);
3237	int err;
3238
3239	err = tcp_write_wakeup(sk);
3240
3241	if (tp->packets_out || !tcp_send_head(sk)) {
3242		/* Cancel probe timer, if it is not required. */
3243		icsk->icsk_probes_out = 0;
3244		icsk->icsk_backoff = 0;
3245		return;
3246	}
3247
3248	if (err <= 0) {
3249		if (icsk->icsk_backoff < sysctl_tcp_retries2)
3250			icsk->icsk_backoff++;
3251		icsk->icsk_probes_out++;
3252		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3253					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
3254					  TCP_RTO_MAX);
3255	} else {
3256		/* If packet was not sent due to local congestion,
3257		 * do not backoff and do not remember icsk_probes_out.
3258		 * Let local senders to fight for local resources.
3259		 *
3260		 * Use accumulated backoff yet.
3261		 */
3262		if (!icsk->icsk_probes_out)
3263			icsk->icsk_probes_out = 1;
3264		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3265					  min(icsk->icsk_rto << icsk->icsk_backoff,
3266					      TCP_RESOURCE_PROBE_INTERVAL),
3267					  TCP_RTO_MAX);
3268	}
3269}