Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v3.15
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Definitions for the TCP module.
   7 *
   8 * Version:	@(#)tcp.h	1.0.5	05/23/93
   9 *
  10 * Authors:	Ross Biro
  11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *
  13 *		This program is free software; you can redistribute it and/or
  14 *		modify it under the terms of the GNU General Public License
  15 *		as published by the Free Software Foundation; either version
  16 *		2 of the License, or (at your option) any later version.
  17 */
  18#ifndef _TCP_H
  19#define _TCP_H
  20
  21#define FASTRETRANS_DEBUG 1
  22
  23#include <linux/list.h>
  24#include <linux/tcp.h>
  25#include <linux/bug.h>
  26#include <linux/slab.h>
  27#include <linux/cache.h>
  28#include <linux/percpu.h>
  29#include <linux/skbuff.h>
  30#include <linux/dmaengine.h>
  31#include <linux/crypto.h>
  32#include <linux/cryptohash.h>
  33#include <linux/kref.h>
  34#include <linux/ktime.h>
  35
  36#include <net/inet_connection_sock.h>
  37#include <net/inet_timewait_sock.h>
  38#include <net/inet_hashtables.h>
  39#include <net/checksum.h>
  40#include <net/request_sock.h>
  41#include <net/sock.h>
  42#include <net/snmp.h>
  43#include <net/ip.h>
  44#include <net/tcp_states.h>
  45#include <net/inet_ecn.h>
  46#include <net/dst.h>
  47
  48#include <linux/seq_file.h>
  49#include <linux/memcontrol.h>
  50
  51extern struct inet_hashinfo tcp_hashinfo;
  52
  53extern struct percpu_counter tcp_orphan_count;
  54void tcp_time_wait(struct sock *sk, int state, int timeo);
  55
  56#define MAX_TCP_HEADER	(128 + MAX_HEADER)
  57#define MAX_TCP_OPTION_SPACE 40
  58
  59/* 
  60 * Never offer a window over 32767 without using window scaling. Some
  61 * poor stacks do signed 16bit maths! 
  62 */
  63#define MAX_TCP_WINDOW		32767U
  64
  65/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
  66#define TCP_MIN_MSS		88U
  67
  68/* The least MTU to use for probing */
  69#define TCP_BASE_MSS		512
 
 
 
 
 
 
  70
  71/* After receiving this amount of duplicate ACKs fast retransmit starts. */
  72#define TCP_FASTRETRANS_THRESH 3
  73
  74/* Maximal reordering. */
  75#define TCP_MAX_REORDERING	127
  76
  77/* Maximal number of ACKs sent quickly to accelerate slow-start. */
  78#define TCP_MAX_QUICKACKS	16U
  79
  80/* urg_data states */
  81#define TCP_URG_VALID	0x0100
  82#define TCP_URG_NOTYET	0x0200
  83#define TCP_URG_READ	0x0400
  84
  85#define TCP_RETR1	3	/*
  86				 * This is how many retries it does before it
  87				 * tries to figure out if the gateway is
  88				 * down. Minimal RFC value is 3; it corresponds
  89				 * to ~3sec-8min depending on RTO.
  90				 */
  91
  92#define TCP_RETR2	15	/*
  93				 * This should take at least
  94				 * 90 minutes to time out.
  95				 * RFC1122 says that the limit is 100 sec.
  96				 * 15 is ~13-30min depending on RTO.
  97				 */
  98
  99#define TCP_SYN_RETRIES	 6	/* This is how many retries are done
 100				 * when active opening a connection.
 101				 * RFC1122 says the minimum retry MUST
 102				 * be at least 180secs.  Nevertheless
 103				 * this value is corresponding to
 104				 * 63secs of retransmission with the
 105				 * current initial RTO.
 106				 */
 107
 108#define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
 109				 * when passive opening a connection.
 110				 * This is corresponding to 31secs of
 111				 * retransmission with the current
 112				 * initial RTO.
 113				 */
 114
 115#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
 116				  * state, about 60 seconds	*/
 117#define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
 118                                 /* BSD style FIN_WAIT2 deadlock breaker.
 119				  * It used to be 3min, new value is 60sec,
 120				  * to combine FIN-WAIT-2 timeout with
 121				  * TIME-WAIT timer.
 122				  */
 123
 124#define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
 125#if HZ >= 100
 126#define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
 127#define TCP_ATO_MIN	((unsigned)(HZ/25))
 128#else
 129#define TCP_DELACK_MIN	4U
 130#define TCP_ATO_MIN	4U
 131#endif
 132#define TCP_RTO_MAX	((unsigned)(120*HZ))
 133#define TCP_RTO_MIN	((unsigned)(HZ/5))
 134#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
 135#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
 136						 * used as a fallback RTO for the
 137						 * initial data transmission if no
 138						 * valid RTT sample has been acquired,
 139						 * most likely due to retrans in 3WHS.
 140						 */
 141
 142#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
 143					                 * for local resources.
 144					                 */
 145
 146#define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
 147#define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
 148#define TCP_KEEPALIVE_INTVL	(75*HZ)
 149
 150#define MAX_TCP_KEEPIDLE	32767
 151#define MAX_TCP_KEEPINTVL	32767
 152#define MAX_TCP_KEEPCNT		127
 153#define MAX_TCP_SYNCNT		127
 154
 155#define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
 156
 157#define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
 158#define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
 159					 * after this time. It should be equal
 160					 * (or greater than) TCP_TIMEWAIT_LEN
 161					 * to provide reliability equal to one
 162					 * provided by timewait state.
 163					 */
 164#define TCP_PAWS_WINDOW	1		/* Replay window for per-host
 165					 * timestamps. It must be less than
 166					 * minimal timewait lifetime.
 167					 */
 168/*
 169 *	TCP option
 170 */
 171 
 172#define TCPOPT_NOP		1	/* Padding */
 173#define TCPOPT_EOL		0	/* End of options */
 174#define TCPOPT_MSS		2	/* Segment size negotiating */
 175#define TCPOPT_WINDOW		3	/* Window scaling */
 176#define TCPOPT_SACK_PERM        4       /* SACK Permitted */
 177#define TCPOPT_SACK             5       /* SACK Block */
 178#define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
 179#define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
 
 180#define TCPOPT_EXP		254	/* Experimental */
 181/* Magic number to be after the option value for sharing TCP
 182 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
 183 */
 184#define TCPOPT_FASTOPEN_MAGIC	0xF989
 185
 186/*
 187 *     TCP option lengths
 188 */
 189
 190#define TCPOLEN_MSS            4
 191#define TCPOLEN_WINDOW         3
 192#define TCPOLEN_SACK_PERM      2
 193#define TCPOLEN_TIMESTAMP      10
 194#define TCPOLEN_MD5SIG         18
 
 195#define TCPOLEN_EXP_FASTOPEN_BASE  4
 196
 197/* But this is what stacks really send out. */
 198#define TCPOLEN_TSTAMP_ALIGNED		12
 199#define TCPOLEN_WSCALE_ALIGNED		4
 200#define TCPOLEN_SACKPERM_ALIGNED	4
 201#define TCPOLEN_SACK_BASE		2
 202#define TCPOLEN_SACK_BASE_ALIGNED	4
 203#define TCPOLEN_SACK_PERBLOCK		8
 204#define TCPOLEN_MD5SIG_ALIGNED		20
 205#define TCPOLEN_MSS_ALIGNED		4
 206
 207/* Flags in tp->nonagle */
 208#define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
 209#define TCP_NAGLE_CORK		2	/* Socket is corked	    */
 210#define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
 211
 212/* TCP thin-stream limits */
 213#define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 214
 215/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
 216#define TCP_INIT_CWND		10
 217
 218/* Bit Flags for sysctl_tcp_fastopen */
 219#define	TFO_CLIENT_ENABLE	1
 220#define	TFO_SERVER_ENABLE	2
 221#define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
 222
 223/* Process SYN data but skip cookie validation */
 224#define	TFO_SERVER_COOKIE_NOT_CHKED	0x100
 225/* Accept SYN data w/o any cookie option */
 226#define	TFO_SERVER_COOKIE_NOT_REQD	0x200
 227
 228/* Force enable TFO on all listeners, i.e., not requiring the
 229 * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
 230 */
 231#define	TFO_SERVER_WO_SOCKOPT1	0x400
 232#define	TFO_SERVER_WO_SOCKOPT2	0x800
 233/* Always create TFO child sockets on a TFO listener even when
 234 * cookie/data not present. (For testing purpose!)
 235 */
 236#define	TFO_SERVER_ALWAYS	0x1000
 237
 238extern struct inet_timewait_death_row tcp_death_row;
 239
 240/* sysctl variables for tcp */
 241extern int sysctl_tcp_timestamps;
 242extern int sysctl_tcp_window_scaling;
 243extern int sysctl_tcp_sack;
 244extern int sysctl_tcp_fin_timeout;
 245extern int sysctl_tcp_keepalive_time;
 246extern int sysctl_tcp_keepalive_probes;
 247extern int sysctl_tcp_keepalive_intvl;
 248extern int sysctl_tcp_syn_retries;
 249extern int sysctl_tcp_synack_retries;
 250extern int sysctl_tcp_retries1;
 251extern int sysctl_tcp_retries2;
 252extern int sysctl_tcp_orphan_retries;
 253extern int sysctl_tcp_syncookies;
 254extern int sysctl_tcp_fastopen;
 255extern int sysctl_tcp_retrans_collapse;
 256extern int sysctl_tcp_stdurg;
 257extern int sysctl_tcp_rfc1337;
 258extern int sysctl_tcp_abort_on_overflow;
 259extern int sysctl_tcp_max_orphans;
 260extern int sysctl_tcp_fack;
 261extern int sysctl_tcp_reordering;
 
 262extern int sysctl_tcp_dsack;
 263extern long sysctl_tcp_mem[3];
 264extern int sysctl_tcp_wmem[3];
 265extern int sysctl_tcp_rmem[3];
 266extern int sysctl_tcp_app_win;
 267extern int sysctl_tcp_adv_win_scale;
 268extern int sysctl_tcp_tw_reuse;
 269extern int sysctl_tcp_frto;
 270extern int sysctl_tcp_low_latency;
 271extern int sysctl_tcp_dma_copybreak;
 272extern int sysctl_tcp_nometrics_save;
 273extern int sysctl_tcp_moderate_rcvbuf;
 274extern int sysctl_tcp_tso_win_divisor;
 275extern int sysctl_tcp_mtu_probing;
 276extern int sysctl_tcp_base_mss;
 277extern int sysctl_tcp_workaround_signed_windows;
 278extern int sysctl_tcp_slow_start_after_idle;
 279extern int sysctl_tcp_thin_linear_timeouts;
 280extern int sysctl_tcp_thin_dupack;
 281extern int sysctl_tcp_early_retrans;
 282extern int sysctl_tcp_limit_output_bytes;
 283extern int sysctl_tcp_challenge_ack_limit;
 284extern unsigned int sysctl_tcp_notsent_lowat;
 285extern int sysctl_tcp_min_tso_segs;
 
 286extern int sysctl_tcp_autocorking;
 
 
 
 287
 288extern atomic_long_t tcp_memory_allocated;
 289extern struct percpu_counter tcp_sockets_allocated;
 290extern int tcp_memory_pressure;
 291
 
 
 
 
 
 
 
 
 
 292/*
 293 * The next routines deal with comparing 32 bit unsigned ints
 294 * and worry about wraparound (automatic with unsigned arithmetic).
 295 */
 296
 297static inline bool before(__u32 seq1, __u32 seq2)
 298{
 299        return (__s32)(seq1-seq2) < 0;
 300}
 301#define after(seq2, seq1) 	before(seq1, seq2)
 302
 303/* is s2<=s1<=s3 ? */
 304static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
 305{
 306	return seq3 - seq2 >= seq1 - seq2;
 307}
 308
 309static inline bool tcp_out_of_memory(struct sock *sk)
 310{
 311	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
 312	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
 313		return true;
 314	return false;
 315}
 316
 
 
 317static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 318{
 319	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
 320	int orphans = percpu_counter_read_positive(ocp);
 321
 322	if (orphans << shift > sysctl_tcp_max_orphans) {
 323		orphans = percpu_counter_sum_positive(ocp);
 324		if (orphans << shift > sysctl_tcp_max_orphans)
 325			return true;
 326	}
 327	return false;
 328}
 329
 330bool tcp_check_oom(struct sock *sk, int shift);
 331
 332/* syncookies: remember time of last synqueue overflow */
 333static inline void tcp_synq_overflow(struct sock *sk)
 334{
 335	tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
 336}
 337
 338/* syncookies: no recent synqueue overflow on this listening socket? */
 339static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
 340{
 341	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
 342	return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
 343}
 344
 345extern struct proto tcp_prot;
 346
 347#define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 348#define TCP_INC_STATS_BH(net, field)	SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
 349#define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
 350#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
 351#define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 352
 353void tcp_tasklet_init(void);
 354
 355void tcp_v4_err(struct sk_buff *skb, u32);
 356
 357void tcp_shutdown(struct sock *sk, int how);
 358
 359void tcp_v4_early_demux(struct sk_buff *skb);
 360int tcp_v4_rcv(struct sk_buff *skb);
 361
 362int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
 363int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 364		size_t size);
 365int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
 366		 int flags);
 367void tcp_release_cb(struct sock *sk);
 368void tcp_wfree(struct sk_buff *skb);
 369void tcp_write_timer_handler(struct sock *sk);
 370void tcp_delack_timer_handler(struct sock *sk);
 371int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 372int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 373			  const struct tcphdr *th, unsigned int len);
 374void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 375			 const struct tcphdr *th, unsigned int len);
 376void tcp_rcv_space_adjust(struct sock *sk);
 377void tcp_cleanup_rbuf(struct sock *sk, int copied);
 378int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
 379void tcp_twsk_destructor(struct sock *sk);
 380ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
 381			struct pipe_inode_info *pipe, size_t len,
 382			unsigned int flags);
 383
 384static inline void tcp_dec_quickack_mode(struct sock *sk,
 385					 const unsigned int pkts)
 386{
 387	struct inet_connection_sock *icsk = inet_csk(sk);
 388
 389	if (icsk->icsk_ack.quick) {
 390		if (pkts >= icsk->icsk_ack.quick) {
 391			icsk->icsk_ack.quick = 0;
 392			/* Leaving quickack mode we deflate ATO. */
 393			icsk->icsk_ack.ato   = TCP_ATO_MIN;
 394		} else
 395			icsk->icsk_ack.quick -= pkts;
 396	}
 397}
 398
 399#define	TCP_ECN_OK		1
 400#define	TCP_ECN_QUEUE_CWR	2
 401#define	TCP_ECN_DEMAND_CWR	4
 402#define	TCP_ECN_SEEN		8
 403
 404enum tcp_tw_status {
 405	TCP_TW_SUCCESS = 0,
 406	TCP_TW_RST = 1,
 407	TCP_TW_ACK = 2,
 408	TCP_TW_SYN = 3
 409};
 410
 411
 412enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
 413					      struct sk_buff *skb,
 414					      const struct tcphdr *th);
 415struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 416			   struct request_sock *req, struct request_sock **prev,
 417			   bool fastopen);
 418int tcp_child_process(struct sock *parent, struct sock *child,
 419		      struct sk_buff *skb);
 420void tcp_enter_loss(struct sock *sk, int how);
 421void tcp_clear_retrans(struct tcp_sock *tp);
 422void tcp_update_metrics(struct sock *sk);
 423void tcp_init_metrics(struct sock *sk);
 424void tcp_metrics_init(void);
 425bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
 426			bool paws_check);
 427bool tcp_remember_stamp(struct sock *sk);
 428bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
 429void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
 430void tcp_disable_fack(struct tcp_sock *tp);
 431void tcp_close(struct sock *sk, long timeout);
 432void tcp_init_sock(struct sock *sk);
 433unsigned int tcp_poll(struct file *file, struct socket *sock,
 434		      struct poll_table_struct *wait);
 435int tcp_getsockopt(struct sock *sk, int level, int optname,
 436		   char __user *optval, int __user *optlen);
 437int tcp_setsockopt(struct sock *sk, int level, int optname,
 438		   char __user *optval, unsigned int optlen);
 439int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 440			  char __user *optval, int __user *optlen);
 441int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
 442			  char __user *optval, unsigned int optlen);
 443void tcp_set_keepalive(struct sock *sk, int val);
 444void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
 445int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 446		size_t len, int nonblock, int flags, int *addr_len);
 447void tcp_parse_options(const struct sk_buff *skb,
 448		       struct tcp_options_received *opt_rx,
 449		       int estab, struct tcp_fastopen_cookie *foc);
 450const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 451
 452/*
 453 *	TCP v4 functions exported for the inet6 API
 454 */
 455
 456void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 
 
 457int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 458struct sock *tcp_create_openreq_child(struct sock *sk,
 459				      struct request_sock *req,
 460				      struct sk_buff *skb);
 461struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
 462				  struct request_sock *req,
 463				  struct dst_entry *dst);
 
 
 464int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 465int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 466int tcp_connect(struct sock *sk);
 467struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
 
 
 
 
 
 468				struct request_sock *req,
 469				struct tcp_fastopen_cookie *foc);
 
 470int tcp_disconnect(struct sock *sk, int flags);
 471
 472void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
 473int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 474void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 475
 476/* From syncookies.c */
 
 
 
 477int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 478		      u32 cookie);
 479struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
 480			     struct ip_options *opt);
 481#ifdef CONFIG_SYN_COOKIES
 482
 483/* Syncookies use a monotonic timer which increments every 60 seconds.
 484 * This counter is used both as a hash input and partially encoded into
 485 * the cookie value.  A cookie is only validated further if the delta
 486 * between the current counter value and the encoded one is less than this,
 487 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
 488 * the counter advances immediately after a cookie is generated).
 489 */
 490#define MAX_SYNCOOKIE_AGE 2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 491
 492static inline u32 tcp_cookie_time(void)
 493{
 494	u64 val = get_jiffies_64();
 495
 496	do_div(val, 60 * HZ);
 497	return val;
 498}
 499
 500u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 501			      u16 *mssp);
 502__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss);
 503#else
 504static inline __u32 cookie_v4_init_sequence(struct sock *sk,
 505					    struct sk_buff *skb,
 506					    __u16 *mss)
 507{
 508	return 0;
 509}
 510#endif
 511
 512__u32 cookie_init_timestamp(struct request_sock *req);
 513bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net,
 514			    bool *ecn_ok);
 
 515
 516/* From net/ipv6/syncookies.c */
 517int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
 518		      u32 cookie);
 519struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
 520#ifdef CONFIG_SYN_COOKIES
 521u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
 522			      const struct tcphdr *th, u16 *mssp);
 523__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
 524			      __u16 *mss);
 525#else
 526static inline __u32 cookie_v6_init_sequence(struct sock *sk,
 527					    struct sk_buff *skb,
 528					    __u16 *mss)
 529{
 530	return 0;
 531}
 532#endif
 533/* tcp_output.c */
 534
 
 
 535void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
 536			       int nonagle);
 537bool tcp_may_send_now(struct sock *sk);
 538int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
 539int tcp_retransmit_skb(struct sock *, struct sk_buff *);
 540void tcp_retransmit_timer(struct sock *sk);
 541void tcp_xmit_retransmit_queue(struct sock *);
 542void tcp_simple_retransmit(struct sock *);
 543int tcp_trim_head(struct sock *, struct sk_buff *, u32);
 544int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
 545
 546void tcp_send_probe0(struct sock *);
 547void tcp_send_partial(struct sock *);
 548int tcp_write_wakeup(struct sock *);
 549void tcp_send_fin(struct sock *sk);
 550void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 551int tcp_send_synack(struct sock *);
 552bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
 553			  const char *proto);
 554void tcp_push_one(struct sock *, unsigned int mss_now);
 555void tcp_send_ack(struct sock *sk);
 556void tcp_send_delayed_ack(struct sock *sk);
 557void tcp_send_loss_probe(struct sock *sk);
 558bool tcp_schedule_loss_probe(struct sock *sk);
 
 
 559
 560/* tcp_input.c */
 561void tcp_cwnd_application_limited(struct sock *sk);
 562void tcp_resume_early_retransmit(struct sock *sk);
 563void tcp_rearm_rto(struct sock *sk);
 
 564void tcp_reset(struct sock *sk);
 
 
 565
 566/* tcp_timer.c */
 567void tcp_init_xmit_timers(struct sock *);
 568static inline void tcp_clear_xmit_timers(struct sock *sk)
 569{
 570	inet_csk_clear_xmit_timers(sk);
 571}
 572
 573unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 574unsigned int tcp_current_mss(struct sock *sk);
 575
 576/* Bound MSS / TSO packet size with the half of the window */
 577static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 578{
 579	int cutoff;
 580
 581	/* When peer uses tiny windows, there is no use in packetizing
 582	 * to sub-MSS pieces for the sake of SWS or making sure there
 583	 * are enough packets in the pipe for fast recovery.
 584	 *
 585	 * On the other hand, for extremely large MSS devices, handling
 586	 * smaller than MSS windows in this way does make sense.
 587	 */
 588	if (tp->max_window >= 512)
 589		cutoff = (tp->max_window >> 1);
 590	else
 591		cutoff = tp->max_window;
 592
 593	if (cutoff && pktsize > cutoff)
 594		return max_t(int, cutoff, 68U - tp->tcp_header_len);
 595	else
 596		return pktsize;
 597}
 598
 599/* tcp.c */
 600void tcp_get_info(const struct sock *, struct tcp_info *);
 601
 602/* Read 'sendfile()'-style from a TCP socket */
 603typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
 604				unsigned int, size_t);
 605int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
 606		  sk_read_actor_t recv_actor);
 607
 608void tcp_initialize_rcv_mss(struct sock *sk);
 609
 610int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 611int tcp_mss_to_mtu(struct sock *sk, int mss);
 612void tcp_mtup_init(struct sock *sk);
 613void tcp_init_buffer_space(struct sock *sk);
 614
 615static inline void tcp_bound_rto(const struct sock *sk)
 616{
 617	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
 618		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
 619}
 620
 621static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
 622{
 623	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
 624}
 625
 626static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 627{
 628	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
 629			       ntohl(TCP_FLAG_ACK) |
 630			       snd_wnd);
 631}
 632
 633static inline void tcp_fast_path_on(struct tcp_sock *tp)
 634{
 635	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
 636}
 637
 638static inline void tcp_fast_path_check(struct sock *sk)
 639{
 640	struct tcp_sock *tp = tcp_sk(sk);
 641
 642	if (skb_queue_empty(&tp->out_of_order_queue) &&
 643	    tp->rcv_wnd &&
 644	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
 645	    !tp->urg_data)
 646		tcp_fast_path_on(tp);
 647}
 648
 649/* Compute the actual rto_min value */
 650static inline u32 tcp_rto_min(struct sock *sk)
 651{
 652	const struct dst_entry *dst = __sk_dst_get(sk);
 653	u32 rto_min = TCP_RTO_MIN;
 654
 655	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
 656		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
 657	return rto_min;
 658}
 659
 660static inline u32 tcp_rto_min_us(struct sock *sk)
 661{
 662	return jiffies_to_usecs(tcp_rto_min(sk));
 663}
 664
 
 
 
 
 
 
 
 
 
 
 
 665/* Compute the actual receive window we are currently advertising.
 666 * Rcv_nxt can be after the window if our peer push more data
 667 * than the offered window.
 668 */
 669static inline u32 tcp_receive_window(const struct tcp_sock *tp)
 670{
 671	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
 672
 673	if (win < 0)
 674		win = 0;
 675	return (u32) win;
 676}
 677
 678/* Choose a new window, without checks for shrinking, and without
 679 * scaling applied to the result.  The caller does these things
 680 * if necessary.  This is a "raw" window selection.
 681 */
 682u32 __tcp_select_window(struct sock *sk);
 683
 684void tcp_send_window_probe(struct sock *sk);
 685
 686/* TCP timestamps are only 32-bits, this causes a slight
 687 * complication on 64-bit systems since we store a snapshot
 688 * of jiffies in the buffer control blocks below.  We decided
 689 * to use only the low 32-bits of jiffies and hide the ugly
 690 * casts with the following macro.
 691 */
 692#define tcp_time_stamp		((__u32)(jiffies))
 693
 
 
 
 
 
 
 694#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 695
 696#define TCPHDR_FIN 0x01
 697#define TCPHDR_SYN 0x02
 698#define TCPHDR_RST 0x04
 699#define TCPHDR_PSH 0x08
 700#define TCPHDR_ACK 0x10
 701#define TCPHDR_URG 0x20
 702#define TCPHDR_ECE 0x40
 703#define TCPHDR_CWR 0x80
 704
 
 
 705/* This is what the send packet queuing engine uses to pass
 706 * TCP per-packet control information to the transmission code.
 707 * We also store the host-order sequence numbers in here too.
 708 * This is 44 bytes if IPV6 is enabled.
 709 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
 710 */
 711struct tcp_skb_cb {
 712	union {
 713		struct inet_skb_parm	h4;
 714#if IS_ENABLED(CONFIG_IPV6)
 715		struct inet6_skb_parm	h6;
 716#endif
 717	} header;	/* For incoming frames		*/
 718	__u32		seq;		/* Starting sequence number	*/
 719	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
 720	__u32		when;		/* used to compute rtt's	*/
 
 
 
 
 
 
 
 
 
 
 
 
 721	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
 722
 723	__u8		sacked;		/* State flags for SACK/FACK.	*/
 724#define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
 725#define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
 726#define TCPCB_LOST		0x04	/* SKB is lost			*/
 727#define TCPCB_TAGBITS		0x07	/* All tag bits			*/
 
 728#define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
 729#define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
 
 730
 731	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
 732	/* 1 byte hole */
 
 
 733	__u32		ack_seq;	/* Sequence number ACK'd	*/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 734};
 735
 736#define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
 737
 738/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
 739 *
 740 * If we receive a SYN packet with these bits set, it means a network is
 741 * playing bad games with TOS bits. In order to avoid possible false congestion
 742 * notifications, we disable TCP ECN negociation.
 743 */
 744static inline void
 745TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb,
 746		struct net *net)
 747{
 748	const struct tcphdr *th = tcp_hdr(skb);
 749
 750	if (net->ipv4.sysctl_tcp_ecn && th->ece && th->cwr &&
 751	    INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
 752		inet_rsk(req)->ecn_ok = 1;
 
 
 
 
 
 
 
 753}
 754
 755/* Due to TSO, an SKB can be composed of multiple actual
 756 * packets.  To keep these tracked properly, we use this.
 757 */
 758static inline int tcp_skb_pcount(const struct sk_buff *skb)
 759{
 760	return skb_shinfo(skb)->gso_segs;
 761}
 762
 763/* This is valid iff tcp_skb_pcount() > 1. */
 
 
 
 
 
 
 
 
 
 
 764static inline int tcp_skb_mss(const struct sk_buff *skb)
 765{
 766	return skb_shinfo(skb)->gso_size;
 
 
 
 
 
 767}
 768
 769/* Events passed to congestion control interface */
 770enum tcp_ca_event {
 771	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
 772	CA_EVENT_CWND_RESTART,	/* congestion window restart */
 773	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
 774	CA_EVENT_LOSS,		/* loss timeout */
 775	CA_EVENT_FAST_ACK,	/* in sequence ack */
 776	CA_EVENT_SLOW_ACK,	/* other ack */
 
 
 
 
 
 
 
 
 
 777};
 778
 779/*
 780 * Interface for adding new TCP congestion control handlers
 781 */
 782#define TCP_CA_NAME_MAX	16
 783#define TCP_CA_MAX	128
 784#define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
 785
 
 
 
 786#define TCP_CONG_NON_RESTRICTED 0x1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 787
 788struct tcp_congestion_ops {
 789	struct list_head	list;
 790	unsigned long flags;
 
 791
 792	/* initialize private data (optional) */
 793	void (*init)(struct sock *sk);
 794	/* cleanup private data  (optional) */
 795	void (*release)(struct sock *sk);
 796
 797	/* return slow start threshold (required) */
 798	u32 (*ssthresh)(struct sock *sk);
 799	/* do new cwnd calculation (required) */
 800	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
 801	/* call before changing ca_state (optional) */
 802	void (*set_state)(struct sock *sk, u8 new_state);
 803	/* call when cwnd event occurs (optional) */
 804	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
 
 
 805	/* new value of cwnd after loss (optional) */
 806	u32  (*undo_cwnd)(struct sock *sk);
 807	/* hook for packet ack accounting (optional) */
 808	void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
 
 
 
 
 
 
 
 
 809	/* get info for inet_diag (optional) */
 810	void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
 
 811
 812	char 		name[TCP_CA_NAME_MAX];
 813	struct module 	*owner;
 814};
 815
 816int tcp_register_congestion_control(struct tcp_congestion_ops *type);
 817void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
 818
 
 819void tcp_init_congestion_control(struct sock *sk);
 820void tcp_cleanup_congestion_control(struct sock *sk);
 821int tcp_set_default_congestion_control(const char *name);
 822void tcp_get_default_congestion_control(char *name);
 823void tcp_get_available_congestion_control(char *buf, size_t len);
 824void tcp_get_allowed_congestion_control(char *buf, size_t len);
 825int tcp_set_allowed_congestion_control(char *allowed);
 826int tcp_set_congestion_control(struct sock *sk, const char *name);
 827int tcp_slow_start(struct tcp_sock *tp, u32 acked);
 828void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
 829
 830extern struct tcp_congestion_ops tcp_init_congestion_ops;
 831u32 tcp_reno_ssthresh(struct sock *sk);
 832void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
 
 833extern struct tcp_congestion_ops tcp_reno;
 834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
 836{
 837	struct inet_connection_sock *icsk = inet_csk(sk);
 838
 839	if (icsk->icsk_ca_ops->set_state)
 840		icsk->icsk_ca_ops->set_state(sk, ca_state);
 841	icsk->icsk_ca_state = ca_state;
 842}
 843
 844static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
 845{
 846	const struct inet_connection_sock *icsk = inet_csk(sk);
 847
 848	if (icsk->icsk_ca_ops->cwnd_event)
 849		icsk->icsk_ca_ops->cwnd_event(sk, event);
 850}
 851
 
 
 
 
 
 
 
 
 852/* These functions determine how the current flow behaves in respect of SACK
 853 * handling. SACK is negotiated with the peer, and therefore it can vary
 854 * between different flows.
 855 *
 856 * tcp_is_sack - SACK enabled
 857 * tcp_is_reno - No SACK
 858 * tcp_is_fack - FACK enabled, implies SACK enabled
 859 */
 860static inline int tcp_is_sack(const struct tcp_sock *tp)
 861{
 862	return tp->rx_opt.sack_ok;
 863}
 864
 865static inline bool tcp_is_reno(const struct tcp_sock *tp)
 866{
 867	return !tcp_is_sack(tp);
 868}
 869
 870static inline bool tcp_is_fack(const struct tcp_sock *tp)
 871{
 872	return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
 873}
 874
 875static inline void tcp_enable_fack(struct tcp_sock *tp)
 876{
 877	tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
 878}
 879
 880/* TCP early-retransmit (ER) is similar to but more conservative than
 881 * the thin-dupack feature.  Enable ER only if thin-dupack is disabled.
 882 */
 883static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
 884{
 
 
 885	tp->do_early_retrans = sysctl_tcp_early_retrans &&
 886		sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
 887		sysctl_tcp_reordering == 3;
 888}
 889
 890static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
 891{
 892	tp->do_early_retrans = 0;
 893}
 894
 895static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
 896{
 897	return tp->sacked_out + tp->lost_out;
 898}
 899
 900/* This determines how many packets are "in the network" to the best
 901 * of our knowledge.  In many cases it is conservative, but where
 902 * detailed information is available from the receiver (via SACK
 903 * blocks etc.) we can make more aggressive calculations.
 904 *
 905 * Use this for decisions involving congestion control, use just
 906 * tp->packets_out to determine if the send queue is empty or not.
 907 *
 908 * Read this equation as:
 909 *
 910 *	"Packets sent once on transmission queue" MINUS
 911 *	"Packets left network, but not honestly ACKed yet" PLUS
 912 *	"Packets fast retransmitted"
 913 */
 914static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
 915{
 916	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
 917}
 918
 919#define TCP_INFINITE_SSTHRESH	0x7fffffff
 920
 
 
 
 
 
 921static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
 922{
 923	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
 924}
 925
 926static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
 927{
 928	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
 929	       (1 << inet_csk(sk)->icsk_ca_state);
 930}
 931
 932/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
 933 * The exception is cwnd reduction phase, when cwnd is decreasing towards
 934 * ssthresh.
 935 */
 936static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 937{
 938	const struct tcp_sock *tp = tcp_sk(sk);
 939
 940	if (tcp_in_cwnd_reduction(sk))
 941		return tp->snd_ssthresh;
 942	else
 943		return max(tp->snd_ssthresh,
 944			   ((tp->snd_cwnd >> 1) +
 945			    (tp->snd_cwnd >> 2)));
 946}
 947
 948/* Use define here intentionally to get WARN_ON location shown at the caller */
 949#define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
 950
 951void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 952__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
 953
 954/* The maximum number of MSS of available cwnd for which TSO defers
 955 * sending if not using sysctl_tcp_tso_win_divisor.
 956 */
 957static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
 958{
 959	return 3;
 960}
 961
 962/* Slow start with delack produces 3 packets of burst, so that
 963 * it is safe "de facto".  This will be the default - same as
 964 * the default reordering threshold - but if reordering increases,
 965 * we must be able to allow cwnd to burst at least this much in order
 966 * to not pull it back when holes are filled.
 967 */
 968static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
 969{
 970	return tp->reordering;
 971}
 972
 973/* Returns end sequence number of the receiver's advertised window */
 974static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
 975{
 976	return tp->snd_una + tp->snd_wnd;
 977}
 978bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
 979
 980static inline void tcp_check_probe_timer(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 981{
 982	const struct tcp_sock *tp = tcp_sk(sk);
 983	const struct inet_connection_sock *icsk = inet_csk(sk);
 984
 985	if (!tp->packets_out && !icsk->icsk_pending)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 986		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
 987					  icsk->icsk_rto, TCP_RTO_MAX);
 988}
 989
 990static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
 991{
 992	tp->snd_wl1 = seq;
 993}
 994
 995static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
 996{
 997	tp->snd_wl1 = seq;
 998}
 999
1000/*
1001 * Calculate(/check) TCP checksum
1002 */
1003static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1004				   __be32 daddr, __wsum base)
1005{
1006	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1007}
1008
1009static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1010{
1011	return __skb_checksum_complete(skb);
1012}
1013
1014static inline bool tcp_checksum_complete(struct sk_buff *skb)
1015{
1016	return !skb_csum_unnecessary(skb) &&
1017		__tcp_checksum_complete(skb);
1018}
1019
1020/* Prequeue for VJ style copy to user, combined with checksumming. */
1021
1022static inline void tcp_prequeue_init(struct tcp_sock *tp)
1023{
1024	tp->ucopy.task = NULL;
1025	tp->ucopy.len = 0;
1026	tp->ucopy.memory = 0;
1027	skb_queue_head_init(&tp->ucopy.prequeue);
1028#ifdef CONFIG_NET_DMA
1029	tp->ucopy.dma_chan = NULL;
1030	tp->ucopy.wakeup = 0;
1031	tp->ucopy.pinned_list = NULL;
1032	tp->ucopy.dma_cookie = 0;
1033#endif
1034}
1035
1036bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
 
 
1037
1038#undef STATE_TRACE
1039
1040#ifdef STATE_TRACE
1041static const char *statename[]={
1042	"Unused","Established","Syn Sent","Syn Recv",
1043	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1044	"Close Wait","Last ACK","Listen","Closing"
1045};
1046#endif
1047void tcp_set_state(struct sock *sk, int state);
1048
1049void tcp_done(struct sock *sk);
1050
 
 
1051static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1052{
1053	rx_opt->dsack = 0;
1054	rx_opt->num_sacks = 0;
1055}
1056
1057u32 tcp_default_init_rwnd(u32 mss);
 
 
 
 
 
 
 
 
 
 
 
 
 
1058
1059/* Determine a window scaling and initial window to offer. */
1060void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
1061			       __u32 *window_clamp, int wscale_ok,
1062			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1063
1064static inline int tcp_win_from_space(int space)
1065{
1066	return sysctl_tcp_adv_win_scale<=0 ?
1067		(space>>(-sysctl_tcp_adv_win_scale)) :
1068		space - (space>>sysctl_tcp_adv_win_scale);
1069}
1070
1071/* Note: caller must be prepared to deal with negative returns */ 
1072static inline int tcp_space(const struct sock *sk)
1073{
1074	return tcp_win_from_space(sk->sk_rcvbuf -
1075				  atomic_read(&sk->sk_rmem_alloc));
1076} 
1077
1078static inline int tcp_full_space(const struct sock *sk)
1079{
1080	return tcp_win_from_space(sk->sk_rcvbuf); 
1081}
1082
1083static inline void tcp_openreq_init(struct request_sock *req,
1084				    struct tcp_options_received *rx_opt,
1085				    struct sk_buff *skb)
1086{
1087	struct inet_request_sock *ireq = inet_rsk(req);
1088
1089	req->rcv_wnd = 0;		/* So that tcp_send_synack() knows! */
1090	req->cookie_ts = 0;
1091	tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1092	tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
1093	tcp_rsk(req)->snt_synack = 0;
1094	req->mss = rx_opt->mss_clamp;
1095	req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1096	ireq->tstamp_ok = rx_opt->tstamp_ok;
1097	ireq->sack_ok = rx_opt->sack_ok;
1098	ireq->snd_wscale = rx_opt->snd_wscale;
1099	ireq->wscale_ok = rx_opt->wscale_ok;
1100	ireq->acked = 0;
1101	ireq->ecn_ok = 0;
1102	ireq->ir_rmt_port = tcp_hdr(skb)->source;
1103	ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
1104}
1105
1106void tcp_enter_memory_pressure(struct sock *sk);
1107
1108static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1109{
1110	return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
 
 
1111}
1112
1113static inline int keepalive_time_when(const struct tcp_sock *tp)
1114{
1115	return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
 
 
1116}
1117
1118static inline int keepalive_probes(const struct tcp_sock *tp)
1119{
1120	return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
 
 
1121}
1122
1123static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1124{
1125	const struct inet_connection_sock *icsk = &tp->inet_conn;
1126
1127	return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1128			  tcp_time_stamp - tp->rcv_tstamp);
1129}
1130
1131static inline int tcp_fin_time(const struct sock *sk)
1132{
1133	int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1134	const int rto = inet_csk(sk)->icsk_rto;
1135
1136	if (fin_timeout < (rto << 2) - (rto >> 1))
1137		fin_timeout = (rto << 2) - (rto >> 1);
1138
1139	return fin_timeout;
1140}
1141
1142static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1143				  int paws_win)
1144{
1145	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1146		return true;
1147	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1148		return true;
1149	/*
1150	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1151	 * then following tcp messages have valid values. Ignore 0 value,
1152	 * or else 'negative' tsval might forbid us to accept their packets.
1153	 */
1154	if (!rx_opt->ts_recent)
1155		return true;
1156	return false;
1157}
1158
1159static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1160				   int rst)
1161{
1162	if (tcp_paws_check(rx_opt, 0))
1163		return false;
1164
1165	/* RST segments are not recommended to carry timestamp,
1166	   and, if they do, it is recommended to ignore PAWS because
1167	   "their cleanup function should take precedence over timestamps."
1168	   Certainly, it is mistake. It is necessary to understand the reasons
1169	   of this constraint to relax it: if peer reboots, clock may go
1170	   out-of-sync and half-open connections will not be reset.
1171	   Actually, the problem would be not existing if all
1172	   the implementations followed draft about maintaining clock
1173	   via reboots. Linux-2.2 DOES NOT!
1174
1175	   However, we can relax time bounds for RST segments to MSL.
1176	 */
1177	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1178		return false;
1179	return true;
1180}
1181
 
 
 
1182static inline void tcp_mib_init(struct net *net)
1183{
1184	/* See RFC 2012 */
1185	TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1186	TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1187	TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1188	TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1189}
1190
1191/* from STCP */
1192static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1193{
1194	tp->lost_skb_hint = NULL;
1195}
1196
1197static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1198{
1199	tcp_clear_retrans_hints_partial(tp);
1200	tp->retransmit_skb_hint = NULL;
1201}
1202
1203/* MD5 Signature */
1204struct crypto_hash;
1205
1206union tcp_md5_addr {
1207	struct in_addr  a4;
1208#if IS_ENABLED(CONFIG_IPV6)
1209	struct in6_addr	a6;
1210#endif
1211};
1212
1213/* - key database */
1214struct tcp_md5sig_key {
1215	struct hlist_node	node;
1216	u8			keylen;
1217	u8			family; /* AF_INET or AF_INET6 */
1218	union tcp_md5_addr	addr;
1219	u8			key[TCP_MD5SIG_MAXKEYLEN];
1220	struct rcu_head		rcu;
1221};
1222
1223/* - sock block */
1224struct tcp_md5sig_info {
1225	struct hlist_head	head;
1226	struct rcu_head		rcu;
1227};
1228
1229/* - pseudo header */
1230struct tcp4_pseudohdr {
1231	__be32		saddr;
1232	__be32		daddr;
1233	__u8		pad;
1234	__u8		protocol;
1235	__be16		len;
1236};
1237
1238struct tcp6_pseudohdr {
1239	struct in6_addr	saddr;
1240	struct in6_addr daddr;
1241	__be32		len;
1242	__be32		protocol;	/* including padding */
1243};
1244
1245union tcp_md5sum_block {
1246	struct tcp4_pseudohdr ip4;
1247#if IS_ENABLED(CONFIG_IPV6)
1248	struct tcp6_pseudohdr ip6;
1249#endif
1250};
1251
1252/* - pool: digest algorithm, hash description and scratch buffer */
1253struct tcp_md5sig_pool {
1254	struct hash_desc	md5_desc;
1255	union tcp_md5sum_block	md5_blk;
1256};
1257
1258/* - functions */
1259int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1260			const struct sock *sk, const struct request_sock *req,
1261			const struct sk_buff *skb);
1262int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1263		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
1264int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1265		   int family);
1266struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1267					 struct sock *addr_sk);
1268
1269#ifdef CONFIG_TCP_MD5SIG
1270struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1271					 const union tcp_md5_addr *addr,
1272					 int family);
1273#define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1274#else
1275static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1276					 const union tcp_md5_addr *addr,
1277					 int family)
1278{
1279	return NULL;
1280}
1281#define tcp_twsk_md5_key(twsk)	NULL
1282#endif
1283
1284bool tcp_alloc_md5sig_pool(void);
1285
1286struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1287static inline void tcp_put_md5sig_pool(void)
1288{
1289	local_bh_enable();
1290}
1291
1292int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1293int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1294			  unsigned int header_len);
1295int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1296		     const struct tcp_md5sig_key *key);
1297
1298/* From tcp_fastopen.c */
1299void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1300			    struct tcp_fastopen_cookie *cookie, int *syn_loss,
1301			    unsigned long *last_syn_loss);
1302void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1303			    struct tcp_fastopen_cookie *cookie, bool syn_lost);
 
1304struct tcp_fastopen_request {
1305	/* Fast Open cookie. Size 0 means a cookie request */
1306	struct tcp_fastopen_cookie	cookie;
1307	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1308	size_t				size;
1309	int				copied;	/* queued in tcp_connect() */
1310};
1311void tcp_free_fastopen_req(struct tcp_sock *tp);
1312
1313extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1314int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1315void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
1316			     struct tcp_fastopen_cookie *foc);
 
 
 
1317void tcp_fastopen_init_key_once(bool publish);
1318#define TCP_FASTOPEN_KEY_LENGTH 16
1319
1320/* Fastopen key context */
1321struct tcp_fastopen_context {
1322	struct crypto_cipher	*tfm;
1323	__u8			key[TCP_FASTOPEN_KEY_LENGTH];
1324	struct rcu_head		rcu;
1325};
1326
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1327/* write queue abstraction */
1328static inline void tcp_write_queue_purge(struct sock *sk)
1329{
1330	struct sk_buff *skb;
1331
 
1332	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1333		sk_wmem_free_skb(sk, skb);
1334	sk_mem_reclaim(sk);
1335	tcp_clear_all_retrans_hints(tcp_sk(sk));
1336}
1337
1338static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1339{
1340	return skb_peek(&sk->sk_write_queue);
1341}
1342
1343static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1344{
1345	return skb_peek_tail(&sk->sk_write_queue);
1346}
1347
1348static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1349						   const struct sk_buff *skb)
1350{
1351	return skb_queue_next(&sk->sk_write_queue, skb);
1352}
1353
1354static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1355						   const struct sk_buff *skb)
1356{
1357	return skb_queue_prev(&sk->sk_write_queue, skb);
1358}
1359
1360#define tcp_for_write_queue(skb, sk)					\
1361	skb_queue_walk(&(sk)->sk_write_queue, skb)
1362
1363#define tcp_for_write_queue_from(skb, sk)				\
1364	skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1365
1366#define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1367	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1368
1369static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1370{
1371	return sk->sk_send_head;
1372}
1373
1374static inline bool tcp_skb_is_last(const struct sock *sk,
1375				   const struct sk_buff *skb)
1376{
1377	return skb_queue_is_last(&sk->sk_write_queue, skb);
1378}
1379
1380static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1381{
1382	if (tcp_skb_is_last(sk, skb))
1383		sk->sk_send_head = NULL;
1384	else
1385		sk->sk_send_head = tcp_write_queue_next(sk, skb);
1386}
1387
1388static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1389{
1390	if (sk->sk_send_head == skb_unlinked)
1391		sk->sk_send_head = NULL;
 
 
 
 
1392}
1393
1394static inline void tcp_init_send_head(struct sock *sk)
1395{
1396	sk->sk_send_head = NULL;
1397}
1398
1399static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1400{
1401	__skb_queue_tail(&sk->sk_write_queue, skb);
1402}
1403
1404static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1405{
1406	__tcp_add_write_queue_tail(sk, skb);
1407
1408	/* Queue it, remembering where we must start sending. */
1409	if (sk->sk_send_head == NULL) {
1410		sk->sk_send_head = skb;
 
1411
1412		if (tcp_sk(sk)->highest_sack == NULL)
1413			tcp_sk(sk)->highest_sack = skb;
1414	}
1415}
1416
1417static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1418{
1419	__skb_queue_head(&sk->sk_write_queue, skb);
1420}
1421
1422/* Insert buff after skb on the write queue of sk.  */
1423static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1424						struct sk_buff *buff,
1425						struct sock *sk)
1426{
1427	__skb_queue_after(&sk->sk_write_queue, skb, buff);
1428}
1429
1430/* Insert new before skb on the write queue of sk.  */
1431static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1432						  struct sk_buff *skb,
1433						  struct sock *sk)
1434{
1435	__skb_queue_before(&sk->sk_write_queue, skb, new);
1436
1437	if (sk->sk_send_head == skb)
1438		sk->sk_send_head = new;
1439}
1440
1441static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1442{
1443	__skb_unlink(skb, &sk->sk_write_queue);
1444}
1445
1446static inline bool tcp_write_queue_empty(struct sock *sk)
1447{
1448	return skb_queue_empty(&sk->sk_write_queue);
1449}
1450
1451static inline void tcp_push_pending_frames(struct sock *sk)
1452{
1453	if (tcp_send_head(sk)) {
1454		struct tcp_sock *tp = tcp_sk(sk);
1455
1456		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1457	}
1458}
1459
1460/* Start sequence of the skb just after the highest skb with SACKed
1461 * bit, valid only if sacked_out > 0 or when the caller has ensured
1462 * validity by itself.
1463 */
1464static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1465{
1466	if (!tp->sacked_out)
1467		return tp->snd_una;
1468
1469	if (tp->highest_sack == NULL)
1470		return tp->snd_nxt;
1471
1472	return TCP_SKB_CB(tp->highest_sack)->seq;
1473}
1474
1475static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1476{
1477	tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1478						tcp_write_queue_next(sk, skb);
1479}
1480
1481static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1482{
1483	return tcp_sk(sk)->highest_sack;
1484}
1485
1486static inline void tcp_highest_sack_reset(struct sock *sk)
1487{
1488	tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1489}
1490
1491/* Called when old skb is about to be deleted (to be combined with new skb) */
1492static inline void tcp_highest_sack_combine(struct sock *sk,
1493					    struct sk_buff *old,
1494					    struct sk_buff *new)
1495{
1496	if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1497		tcp_sk(sk)->highest_sack = new;
1498}
1499
 
 
 
 
 
 
 
 
 
 
 
 
1500/* Determines whether this is a thin stream (which may suffer from
1501 * increased latency). Used to trigger latency-reducing mechanisms.
1502 */
1503static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1504{
1505	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1506}
1507
1508/* /proc */
1509enum tcp_seq_states {
1510	TCP_SEQ_STATE_LISTENING,
1511	TCP_SEQ_STATE_OPENREQ,
1512	TCP_SEQ_STATE_ESTABLISHED,
1513};
1514
1515int tcp_seq_open(struct inode *inode, struct file *file);
1516
1517struct tcp_seq_afinfo {
1518	char				*name;
1519	sa_family_t			family;
1520	const struct file_operations	*seq_fops;
1521	struct seq_operations		seq_ops;
1522};
1523
1524struct tcp_iter_state {
1525	struct seq_net_private	p;
1526	sa_family_t		family;
1527	enum tcp_seq_states	state;
1528	struct sock		*syn_wait_sk;
1529	int			bucket, offset, sbucket, num;
1530	kuid_t			uid;
1531	loff_t			last_pos;
1532};
1533
1534int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1535void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1536
1537extern struct request_sock_ops tcp_request_sock_ops;
1538extern struct request_sock_ops tcp6_request_sock_ops;
1539
1540void tcp_v4_destroy_sock(struct sock *sk);
1541
1542struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1543				netdev_features_t features);
1544struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1545int tcp_gro_complete(struct sk_buff *skb);
1546
1547void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1548
1549static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1550{
1551	return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat;
 
1552}
1553
1554static inline bool tcp_stream_memory_free(const struct sock *sk)
1555{
1556	const struct tcp_sock *tp = tcp_sk(sk);
1557	u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1558
1559	return notsent_bytes < tcp_notsent_lowat(tp);
1560}
1561
1562#ifdef CONFIG_PROC_FS
1563int tcp4_proc_init(void);
1564void tcp4_proc_exit(void);
1565#endif
1566
 
 
 
 
 
1567/* TCP af-specific functions */
1568struct tcp_sock_af_ops {
1569#ifdef CONFIG_TCP_MD5SIG
1570	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk,
1571						struct sock *addr_sk);
1572	int			(*calc_md5_hash) (char *location,
1573						  struct tcp_md5sig_key *md5,
1574						  const struct sock *sk,
1575						  const struct request_sock *req,
1576						  const struct sk_buff *skb);
1577	int			(*md5_parse) (struct sock *sk,
1578					      char __user *optval,
1579					      int optlen);
1580#endif
1581};
1582
1583struct tcp_request_sock_ops {
 
1584#ifdef CONFIG_TCP_MD5SIG
1585	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk,
1586						struct request_sock *req);
1587	int			(*calc_md5_hash) (char *location,
1588						  struct tcp_md5sig_key *md5,
1589						  const struct sock *sk,
1590						  const struct request_sock *req,
1591						  const struct sk_buff *skb);
1592#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1593};
1594
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1595int tcpv4_offload_init(void);
1596
1597void tcp_v4_init(void);
1598void tcp_init(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1599
1600#endif	/* _TCP_H */
v4.10.11
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Definitions for the TCP module.
   7 *
   8 * Version:	@(#)tcp.h	1.0.5	05/23/93
   9 *
  10 * Authors:	Ross Biro
  11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *
  13 *		This program is free software; you can redistribute it and/or
  14 *		modify it under the terms of the GNU General Public License
  15 *		as published by the Free Software Foundation; either version
  16 *		2 of the License, or (at your option) any later version.
  17 */
  18#ifndef _TCP_H
  19#define _TCP_H
  20
  21#define FASTRETRANS_DEBUG 1
  22
  23#include <linux/list.h>
  24#include <linux/tcp.h>
  25#include <linux/bug.h>
  26#include <linux/slab.h>
  27#include <linux/cache.h>
  28#include <linux/percpu.h>
  29#include <linux/skbuff.h>
 
 
  30#include <linux/cryptohash.h>
  31#include <linux/kref.h>
  32#include <linux/ktime.h>
  33
  34#include <net/inet_connection_sock.h>
  35#include <net/inet_timewait_sock.h>
  36#include <net/inet_hashtables.h>
  37#include <net/checksum.h>
  38#include <net/request_sock.h>
  39#include <net/sock.h>
  40#include <net/snmp.h>
  41#include <net/ip.h>
  42#include <net/tcp_states.h>
  43#include <net/inet_ecn.h>
  44#include <net/dst.h>
  45
  46#include <linux/seq_file.h>
  47#include <linux/memcontrol.h>
  48
  49extern struct inet_hashinfo tcp_hashinfo;
  50
  51extern struct percpu_counter tcp_orphan_count;
  52void tcp_time_wait(struct sock *sk, int state, int timeo);
  53
  54#define MAX_TCP_HEADER	(128 + MAX_HEADER)
  55#define MAX_TCP_OPTION_SPACE 40
  56
  57/*
  58 * Never offer a window over 32767 without using window scaling. Some
  59 * poor stacks do signed 16bit maths!
  60 */
  61#define MAX_TCP_WINDOW		32767U
  62
  63/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
  64#define TCP_MIN_MSS		88U
  65
  66/* The least MTU to use for probing */
  67#define TCP_BASE_MSS		1024
  68
  69/* probing interval, default to 10 minutes as per RFC4821 */
  70#define TCP_PROBE_INTERVAL	600
  71
  72/* Specify interval when tcp mtu probing will stop */
  73#define TCP_PROBE_THRESHOLD	8
  74
  75/* After receiving this amount of duplicate ACKs fast retransmit starts. */
  76#define TCP_FASTRETRANS_THRESH 3
  77
 
 
 
  78/* Maximal number of ACKs sent quickly to accelerate slow-start. */
  79#define TCP_MAX_QUICKACKS	16U
  80
  81/* urg_data states */
  82#define TCP_URG_VALID	0x0100
  83#define TCP_URG_NOTYET	0x0200
  84#define TCP_URG_READ	0x0400
  85
  86#define TCP_RETR1	3	/*
  87				 * This is how many retries it does before it
  88				 * tries to figure out if the gateway is
  89				 * down. Minimal RFC value is 3; it corresponds
  90				 * to ~3sec-8min depending on RTO.
  91				 */
  92
  93#define TCP_RETR2	15	/*
  94				 * This should take at least
  95				 * 90 minutes to time out.
  96				 * RFC1122 says that the limit is 100 sec.
  97				 * 15 is ~13-30min depending on RTO.
  98				 */
  99
 100#define TCP_SYN_RETRIES	 6	/* This is how many retries are done
 101				 * when active opening a connection.
 102				 * RFC1122 says the minimum retry MUST
 103				 * be at least 180secs.  Nevertheless
 104				 * this value is corresponding to
 105				 * 63secs of retransmission with the
 106				 * current initial RTO.
 107				 */
 108
 109#define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
 110				 * when passive opening a connection.
 111				 * This is corresponding to 31secs of
 112				 * retransmission with the current
 113				 * initial RTO.
 114				 */
 115
 116#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
 117				  * state, about 60 seconds	*/
 118#define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
 119                                 /* BSD style FIN_WAIT2 deadlock breaker.
 120				  * It used to be 3min, new value is 60sec,
 121				  * to combine FIN-WAIT-2 timeout with
 122				  * TIME-WAIT timer.
 123				  */
 124
 125#define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
 126#if HZ >= 100
 127#define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
 128#define TCP_ATO_MIN	((unsigned)(HZ/25))
 129#else
 130#define TCP_DELACK_MIN	4U
 131#define TCP_ATO_MIN	4U
 132#endif
 133#define TCP_RTO_MAX	((unsigned)(120*HZ))
 134#define TCP_RTO_MIN	((unsigned)(HZ/5))
 135#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
 136#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
 137						 * used as a fallback RTO for the
 138						 * initial data transmission if no
 139						 * valid RTT sample has been acquired,
 140						 * most likely due to retrans in 3WHS.
 141						 */
 142
 143#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
 144					                 * for local resources.
 145					                 */
 146
 147#define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
 148#define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
 149#define TCP_KEEPALIVE_INTVL	(75*HZ)
 150
 151#define MAX_TCP_KEEPIDLE	32767
 152#define MAX_TCP_KEEPINTVL	32767
 153#define MAX_TCP_KEEPCNT		127
 154#define MAX_TCP_SYNCNT		127
 155
 156#define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
 157
 158#define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
 159#define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
 160					 * after this time. It should be equal
 161					 * (or greater than) TCP_TIMEWAIT_LEN
 162					 * to provide reliability equal to one
 163					 * provided by timewait state.
 164					 */
 165#define TCP_PAWS_WINDOW	1		/* Replay window for per-host
 166					 * timestamps. It must be less than
 167					 * minimal timewait lifetime.
 168					 */
 169/*
 170 *	TCP option
 171 */
 172
 173#define TCPOPT_NOP		1	/* Padding */
 174#define TCPOPT_EOL		0	/* End of options */
 175#define TCPOPT_MSS		2	/* Segment size negotiating */
 176#define TCPOPT_WINDOW		3	/* Window scaling */
 177#define TCPOPT_SACK_PERM        4       /* SACK Permitted */
 178#define TCPOPT_SACK             5       /* SACK Block */
 179#define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
 180#define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
 181#define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
 182#define TCPOPT_EXP		254	/* Experimental */
 183/* Magic number to be after the option value for sharing TCP
 184 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
 185 */
 186#define TCPOPT_FASTOPEN_MAGIC	0xF989
 187
 188/*
 189 *     TCP option lengths
 190 */
 191
 192#define TCPOLEN_MSS            4
 193#define TCPOLEN_WINDOW         3
 194#define TCPOLEN_SACK_PERM      2
 195#define TCPOLEN_TIMESTAMP      10
 196#define TCPOLEN_MD5SIG         18
 197#define TCPOLEN_FASTOPEN_BASE  2
 198#define TCPOLEN_EXP_FASTOPEN_BASE  4
 199
 200/* But this is what stacks really send out. */
 201#define TCPOLEN_TSTAMP_ALIGNED		12
 202#define TCPOLEN_WSCALE_ALIGNED		4
 203#define TCPOLEN_SACKPERM_ALIGNED	4
 204#define TCPOLEN_SACK_BASE		2
 205#define TCPOLEN_SACK_BASE_ALIGNED	4
 206#define TCPOLEN_SACK_PERBLOCK		8
 207#define TCPOLEN_MD5SIG_ALIGNED		20
 208#define TCPOLEN_MSS_ALIGNED		4
 209
 210/* Flags in tp->nonagle */
 211#define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
 212#define TCP_NAGLE_CORK		2	/* Socket is corked	    */
 213#define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
 214
 215/* TCP thin-stream limits */
 216#define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 217
 218/* TCP initial congestion window as per rfc6928 */
 219#define TCP_INIT_CWND		10
 220
 221/* Bit Flags for sysctl_tcp_fastopen */
 222#define	TFO_CLIENT_ENABLE	1
 223#define	TFO_SERVER_ENABLE	2
 224#define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
 225
 
 
 226/* Accept SYN data w/o any cookie option */
 227#define	TFO_SERVER_COOKIE_NOT_REQD	0x200
 228
 229/* Force enable TFO on all listeners, i.e., not requiring the
 230 * TCP_FASTOPEN socket option.
 231 */
 232#define	TFO_SERVER_WO_SOCKOPT1	0x400
 
 
 
 
 
 233
 234extern struct inet_timewait_death_row tcp_death_row;
 235
 236/* sysctl variables for tcp */
 237extern int sysctl_tcp_timestamps;
 238extern int sysctl_tcp_window_scaling;
 239extern int sysctl_tcp_sack;
 
 
 
 
 
 
 
 
 
 
 240extern int sysctl_tcp_fastopen;
 241extern int sysctl_tcp_retrans_collapse;
 242extern int sysctl_tcp_stdurg;
 243extern int sysctl_tcp_rfc1337;
 244extern int sysctl_tcp_abort_on_overflow;
 245extern int sysctl_tcp_max_orphans;
 246extern int sysctl_tcp_fack;
 247extern int sysctl_tcp_reordering;
 248extern int sysctl_tcp_max_reordering;
 249extern int sysctl_tcp_dsack;
 250extern long sysctl_tcp_mem[3];
 251extern int sysctl_tcp_wmem[3];
 252extern int sysctl_tcp_rmem[3];
 253extern int sysctl_tcp_app_win;
 254extern int sysctl_tcp_adv_win_scale;
 
 255extern int sysctl_tcp_frto;
 256extern int sysctl_tcp_low_latency;
 
 257extern int sysctl_tcp_nometrics_save;
 258extern int sysctl_tcp_moderate_rcvbuf;
 259extern int sysctl_tcp_tso_win_divisor;
 
 
 260extern int sysctl_tcp_workaround_signed_windows;
 261extern int sysctl_tcp_slow_start_after_idle;
 262extern int sysctl_tcp_thin_linear_timeouts;
 263extern int sysctl_tcp_thin_dupack;
 264extern int sysctl_tcp_early_retrans;
 265extern int sysctl_tcp_limit_output_bytes;
 266extern int sysctl_tcp_challenge_ack_limit;
 
 267extern int sysctl_tcp_min_tso_segs;
 268extern int sysctl_tcp_min_rtt_wlen;
 269extern int sysctl_tcp_autocorking;
 270extern int sysctl_tcp_invalid_ratelimit;
 271extern int sysctl_tcp_pacing_ss_ratio;
 272extern int sysctl_tcp_pacing_ca_ratio;
 273
 274extern atomic_long_t tcp_memory_allocated;
 275extern struct percpu_counter tcp_sockets_allocated;
 276extern int tcp_memory_pressure;
 277
 278/* optimized version of sk_under_memory_pressure() for TCP sockets */
 279static inline bool tcp_under_memory_pressure(const struct sock *sk)
 280{
 281	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
 282	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
 283		return true;
 284
 285	return tcp_memory_pressure;
 286}
 287/*
 288 * The next routines deal with comparing 32 bit unsigned ints
 289 * and worry about wraparound (automatic with unsigned arithmetic).
 290 */
 291
 292static inline bool before(__u32 seq1, __u32 seq2)
 293{
 294        return (__s32)(seq1-seq2) < 0;
 295}
 296#define after(seq2, seq1) 	before(seq1, seq2)
 297
 298/* is s2<=s1<=s3 ? */
 299static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
 300{
 301	return seq3 - seq2 >= seq1 - seq2;
 302}
 303
 304static inline bool tcp_out_of_memory(struct sock *sk)
 305{
 306	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
 307	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
 308		return true;
 309	return false;
 310}
 311
 312void sk_forced_mem_schedule(struct sock *sk, int size);
 313
 314static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 315{
 316	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
 317	int orphans = percpu_counter_read_positive(ocp);
 318
 319	if (orphans << shift > sysctl_tcp_max_orphans) {
 320		orphans = percpu_counter_sum_positive(ocp);
 321		if (orphans << shift > sysctl_tcp_max_orphans)
 322			return true;
 323	}
 324	return false;
 325}
 326
 327bool tcp_check_oom(struct sock *sk, int shift);
 328
 
 
 
 
 
 
 
 
 
 
 
 
 329
 330extern struct proto tcp_prot;
 331
 332#define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 333#define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 334#define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
 
 335#define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 336
 337void tcp_tasklet_init(void);
 338
 339void tcp_v4_err(struct sk_buff *skb, u32);
 340
 341void tcp_shutdown(struct sock *sk, int how);
 342
 343void tcp_v4_early_demux(struct sk_buff *skb);
 344int tcp_v4_rcv(struct sk_buff *skb);
 345
 346int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
 347int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 
 348int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
 349		 int flags);
 350void tcp_release_cb(struct sock *sk);
 351void tcp_wfree(struct sk_buff *skb);
 352void tcp_write_timer_handler(struct sock *sk);
 353void tcp_delack_timer_handler(struct sock *sk);
 354int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 355int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
 
 356void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 357			 const struct tcphdr *th, unsigned int len);
 358void tcp_rcv_space_adjust(struct sock *sk);
 
 359int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
 360void tcp_twsk_destructor(struct sock *sk);
 361ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
 362			struct pipe_inode_info *pipe, size_t len,
 363			unsigned int flags);
 364
 365static inline void tcp_dec_quickack_mode(struct sock *sk,
 366					 const unsigned int pkts)
 367{
 368	struct inet_connection_sock *icsk = inet_csk(sk);
 369
 370	if (icsk->icsk_ack.quick) {
 371		if (pkts >= icsk->icsk_ack.quick) {
 372			icsk->icsk_ack.quick = 0;
 373			/* Leaving quickack mode we deflate ATO. */
 374			icsk->icsk_ack.ato   = TCP_ATO_MIN;
 375		} else
 376			icsk->icsk_ack.quick -= pkts;
 377	}
 378}
 379
 380#define	TCP_ECN_OK		1
 381#define	TCP_ECN_QUEUE_CWR	2
 382#define	TCP_ECN_DEMAND_CWR	4
 383#define	TCP_ECN_SEEN		8
 384
 385enum tcp_tw_status {
 386	TCP_TW_SUCCESS = 0,
 387	TCP_TW_RST = 1,
 388	TCP_TW_ACK = 2,
 389	TCP_TW_SYN = 3
 390};
 391
 392
 393enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
 394					      struct sk_buff *skb,
 395					      const struct tcphdr *th);
 396struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 397			   struct request_sock *req, bool fastopen);
 
 398int tcp_child_process(struct sock *parent, struct sock *child,
 399		      struct sk_buff *skb);
 400void tcp_enter_loss(struct sock *sk);
 401void tcp_clear_retrans(struct tcp_sock *tp);
 402void tcp_update_metrics(struct sock *sk);
 403void tcp_init_metrics(struct sock *sk);
 404void tcp_metrics_init(void);
 405bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
 406			bool paws_check, bool timestamps);
 407bool tcp_remember_stamp(struct sock *sk);
 408bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
 409void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
 410void tcp_disable_fack(struct tcp_sock *tp);
 411void tcp_close(struct sock *sk, long timeout);
 412void tcp_init_sock(struct sock *sk);
 413unsigned int tcp_poll(struct file *file, struct socket *sock,
 414		      struct poll_table_struct *wait);
 415int tcp_getsockopt(struct sock *sk, int level, int optname,
 416		   char __user *optval, int __user *optlen);
 417int tcp_setsockopt(struct sock *sk, int level, int optname,
 418		   char __user *optval, unsigned int optlen);
 419int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 420			  char __user *optval, int __user *optlen);
 421int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
 422			  char __user *optval, unsigned int optlen);
 423void tcp_set_keepalive(struct sock *sk, int val);
 424void tcp_syn_ack_timeout(const struct request_sock *req);
 425int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
 426		int flags, int *addr_len);
 427void tcp_parse_options(const struct sk_buff *skb,
 428		       struct tcp_options_received *opt_rx,
 429		       int estab, struct tcp_fastopen_cookie *foc);
 430const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 431
 432/*
 433 *	TCP v4 functions exported for the inet6 API
 434 */
 435
 436void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 437void tcp_v4_mtu_reduced(struct sock *sk);
 438void tcp_req_err(struct sock *sk, u32 seq, bool abort);
 439int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 440struct sock *tcp_create_openreq_child(const struct sock *sk,
 441				      struct request_sock *req,
 442				      struct sk_buff *skb);
 443void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
 444struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 445				  struct request_sock *req,
 446				  struct dst_entry *dst,
 447				  struct request_sock *req_unhash,
 448				  bool *own_req);
 449int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 450int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 451int tcp_connect(struct sock *sk);
 452enum tcp_synack_type {
 453	TCP_SYNACK_NORMAL,
 454	TCP_SYNACK_FASTOPEN,
 455	TCP_SYNACK_COOKIE,
 456};
 457struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 458				struct request_sock *req,
 459				struct tcp_fastopen_cookie *foc,
 460				enum tcp_synack_type synack_type);
 461int tcp_disconnect(struct sock *sk, int flags);
 462
 463void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
 464int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 465void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 466
 467/* From syncookies.c */
 468struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
 469				 struct request_sock *req,
 470				 struct dst_entry *dst);
 471int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 472		      u32 cookie);
 473struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
 
 474#ifdef CONFIG_SYN_COOKIES
 475
 476/* Syncookies use a monotonic timer which increments every 60 seconds.
 477 * This counter is used both as a hash input and partially encoded into
 478 * the cookie value.  A cookie is only validated further if the delta
 479 * between the current counter value and the encoded one is less than this,
 480 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
 481 * the counter advances immediately after a cookie is generated).
 482 */
 483#define MAX_SYNCOOKIE_AGE	2
 484#define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
 485#define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
 486
 487/* syncookies: remember time of last synqueue overflow
 488 * But do not dirty this field too often (once per second is enough)
 489 * It is racy as we do not hold a lock, but race is very minor.
 490 */
 491static inline void tcp_synq_overflow(const struct sock *sk)
 492{
 493	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
 494	unsigned long now = jiffies;
 495
 496	if (time_after(now, last_overflow + HZ))
 497		tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
 498}
 499
 500/* syncookies: no recent synqueue overflow on this listening socket? */
 501static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
 502{
 503	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
 504
 505	return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
 506}
 507
 508static inline u32 tcp_cookie_time(void)
 509{
 510	u64 val = get_jiffies_64();
 511
 512	do_div(val, TCP_SYNCOOKIE_PERIOD);
 513	return val;
 514}
 515
 516u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 517			      u16 *mssp);
 518__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
 
 
 
 
 
 
 
 
 
 519__u32 cookie_init_timestamp(struct request_sock *req);
 520bool cookie_timestamp_decode(struct tcp_options_received *opt);
 521bool cookie_ecn_ok(const struct tcp_options_received *opt,
 522		   const struct net *net, const struct dst_entry *dst);
 523
 524/* From net/ipv6/syncookies.c */
 525int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
 526		      u32 cookie);
 527struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
 528
 529u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
 530			      const struct tcphdr *th, u16 *mssp);
 531__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
 
 
 
 
 
 
 
 
 532#endif
 533/* tcp_output.c */
 534
 535u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
 536		     int min_tso_segs);
 537void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
 538			       int nonagle);
 539bool tcp_may_send_now(struct sock *sk);
 540int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 541int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 542void tcp_retransmit_timer(struct sock *sk);
 543void tcp_xmit_retransmit_queue(struct sock *);
 544void tcp_simple_retransmit(struct sock *);
 545int tcp_trim_head(struct sock *, struct sk_buff *, u32);
 546int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
 547
 548void tcp_send_probe0(struct sock *);
 549void tcp_send_partial(struct sock *);
 550int tcp_write_wakeup(struct sock *, int mib);
 551void tcp_send_fin(struct sock *sk);
 552void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 553int tcp_send_synack(struct sock *);
 
 
 554void tcp_push_one(struct sock *, unsigned int mss_now);
 555void tcp_send_ack(struct sock *sk);
 556void tcp_send_delayed_ack(struct sock *sk);
 557void tcp_send_loss_probe(struct sock *sk);
 558bool tcp_schedule_loss_probe(struct sock *sk);
 559void tcp_skb_collapse_tstamp(struct sk_buff *skb,
 560			     const struct sk_buff *next_skb);
 561
 562/* tcp_input.c */
 
 563void tcp_resume_early_retransmit(struct sock *sk);
 564void tcp_rearm_rto(struct sock *sk);
 565void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
 566void tcp_reset(struct sock *sk);
 567void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
 568void tcp_fin(struct sock *sk);
 569
 570/* tcp_timer.c */
 571void tcp_init_xmit_timers(struct sock *);
 572static inline void tcp_clear_xmit_timers(struct sock *sk)
 573{
 574	inet_csk_clear_xmit_timers(sk);
 575}
 576
 577unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 578unsigned int tcp_current_mss(struct sock *sk);
 579
 580/* Bound MSS / TSO packet size with the half of the window */
 581static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 582{
 583	int cutoff;
 584
 585	/* When peer uses tiny windows, there is no use in packetizing
 586	 * to sub-MSS pieces for the sake of SWS or making sure there
 587	 * are enough packets in the pipe for fast recovery.
 588	 *
 589	 * On the other hand, for extremely large MSS devices, handling
 590	 * smaller than MSS windows in this way does make sense.
 591	 */
 592	if (tp->max_window > TCP_MSS_DEFAULT)
 593		cutoff = (tp->max_window >> 1);
 594	else
 595		cutoff = tp->max_window;
 596
 597	if (cutoff && pktsize > cutoff)
 598		return max_t(int, cutoff, 68U - tp->tcp_header_len);
 599	else
 600		return pktsize;
 601}
 602
 603/* tcp.c */
 604void tcp_get_info(struct sock *, struct tcp_info *);
 605
 606/* Read 'sendfile()'-style from a TCP socket */
 
 
 607int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
 608		  sk_read_actor_t recv_actor);
 609
 610void tcp_initialize_rcv_mss(struct sock *sk);
 611
 612int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 613int tcp_mss_to_mtu(struct sock *sk, int mss);
 614void tcp_mtup_init(struct sock *sk);
 615void tcp_init_buffer_space(struct sock *sk);
 616
 617static inline void tcp_bound_rto(const struct sock *sk)
 618{
 619	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
 620		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
 621}
 622
 623static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
 624{
 625	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
 626}
 627
 628static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 629{
 630	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
 631			       ntohl(TCP_FLAG_ACK) |
 632			       snd_wnd);
 633}
 634
 635static inline void tcp_fast_path_on(struct tcp_sock *tp)
 636{
 637	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
 638}
 639
 640static inline void tcp_fast_path_check(struct sock *sk)
 641{
 642	struct tcp_sock *tp = tcp_sk(sk);
 643
 644	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
 645	    tp->rcv_wnd &&
 646	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
 647	    !tp->urg_data)
 648		tcp_fast_path_on(tp);
 649}
 650
 651/* Compute the actual rto_min value */
 652static inline u32 tcp_rto_min(struct sock *sk)
 653{
 654	const struct dst_entry *dst = __sk_dst_get(sk);
 655	u32 rto_min = TCP_RTO_MIN;
 656
 657	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
 658		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
 659	return rto_min;
 660}
 661
 662static inline u32 tcp_rto_min_us(struct sock *sk)
 663{
 664	return jiffies_to_usecs(tcp_rto_min(sk));
 665}
 666
 667static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
 668{
 669	return dst_metric_locked(dst, RTAX_CC_ALGO);
 670}
 671
 672/* Minimum RTT in usec. ~0 means not available. */
 673static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
 674{
 675	return minmax_get(&tp->rtt_min);
 676}
 677
 678/* Compute the actual receive window we are currently advertising.
 679 * Rcv_nxt can be after the window if our peer push more data
 680 * than the offered window.
 681 */
 682static inline u32 tcp_receive_window(const struct tcp_sock *tp)
 683{
 684	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
 685
 686	if (win < 0)
 687		win = 0;
 688	return (u32) win;
 689}
 690
 691/* Choose a new window, without checks for shrinking, and without
 692 * scaling applied to the result.  The caller does these things
 693 * if necessary.  This is a "raw" window selection.
 694 */
 695u32 __tcp_select_window(struct sock *sk);
 696
 697void tcp_send_window_probe(struct sock *sk);
 698
 699/* TCP timestamps are only 32-bits, this causes a slight
 700 * complication on 64-bit systems since we store a snapshot
 701 * of jiffies in the buffer control blocks below.  We decided
 702 * to use only the low 32-bits of jiffies and hide the ugly
 703 * casts with the following macro.
 704 */
 705#define tcp_time_stamp		((__u32)(jiffies))
 706
 707static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
 708{
 709	return skb->skb_mstamp.stamp_jiffies;
 710}
 711
 712
 713#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 714
 715#define TCPHDR_FIN 0x01
 716#define TCPHDR_SYN 0x02
 717#define TCPHDR_RST 0x04
 718#define TCPHDR_PSH 0x08
 719#define TCPHDR_ACK 0x10
 720#define TCPHDR_URG 0x20
 721#define TCPHDR_ECE 0x40
 722#define TCPHDR_CWR 0x80
 723
 724#define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
 725
 726/* This is what the send packet queuing engine uses to pass
 727 * TCP per-packet control information to the transmission code.
 728 * We also store the host-order sequence numbers in here too.
 729 * This is 44 bytes if IPV6 is enabled.
 730 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
 731 */
 732struct tcp_skb_cb {
 
 
 
 
 
 
 733	__u32		seq;		/* Starting sequence number	*/
 734	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
 735	union {
 736		/* Note : tcp_tw_isn is used in input path only
 737		 *	  (isn chosen by tcp_timewait_state_process())
 738		 *
 739		 * 	  tcp_gso_segs/size are used in write queue only,
 740		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
 741		 */
 742		__u32		tcp_tw_isn;
 743		struct {
 744			u16	tcp_gso_segs;
 745			u16	tcp_gso_size;
 746		};
 747	};
 748	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
 749
 750	__u8		sacked;		/* State flags for SACK/FACK.	*/
 751#define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
 752#define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
 753#define TCPCB_LOST		0x04	/* SKB is lost			*/
 754#define TCPCB_TAGBITS		0x07	/* All tag bits			*/
 755#define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp)	*/
 756#define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
 757#define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
 758				TCPCB_REPAIRED)
 759
 760	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
 761	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
 762			eor:1,		/* Is skb MSG_EOR marked? */
 763			unused:6;
 764	__u32		ack_seq;	/* Sequence number ACK'd	*/
 765	union {
 766		struct {
 767			/* There is space for up to 24 bytes */
 768			__u32 in_flight:30,/* Bytes in flight at transmit */
 769			      is_app_limited:1, /* cwnd not fully used? */
 770			      unused:1;
 771			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
 772			__u32 delivered;
 773			/* start of send pipeline phase */
 774			struct skb_mstamp first_tx_mstamp;
 775			/* when we reached the "delivered" count */
 776			struct skb_mstamp delivered_mstamp;
 777		} tx;   /* only used for outgoing skbs */
 778		union {
 779			struct inet_skb_parm	h4;
 780#if IS_ENABLED(CONFIG_IPV6)
 781			struct inet6_skb_parm	h6;
 782#endif
 783		} header;	/* For incoming skbs */
 784	};
 785};
 786
 787#define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
 788
 789
 790#if IS_ENABLED(CONFIG_IPV6)
 791/* This is the variant of inet6_iif() that must be used by TCP,
 792 * as TCP moves IP6CB into a different location in skb->cb[]
 793 */
 794static inline int tcp_v6_iif(const struct sk_buff *skb)
 795{
 796	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
 797
 798	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
 799}
 800#endif
 801
 802/* TCP_SKB_CB reference means this can not be used from early demux */
 803static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
 804{
 805#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 806	if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
 807	    skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
 808		return true;
 809#endif
 810	return false;
 811}
 812
 813/* Due to TSO, an SKB can be composed of multiple actual
 814 * packets.  To keep these tracked properly, we use this.
 815 */
 816static inline int tcp_skb_pcount(const struct sk_buff *skb)
 817{
 818	return TCP_SKB_CB(skb)->tcp_gso_segs;
 819}
 820
 821static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
 822{
 823	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
 824}
 825
 826static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
 827{
 828	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
 829}
 830
 831/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
 832static inline int tcp_skb_mss(const struct sk_buff *skb)
 833{
 834	return TCP_SKB_CB(skb)->tcp_gso_size;
 835}
 836
 837static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
 838{
 839	return likely(!TCP_SKB_CB(skb)->eor);
 840}
 841
 842/* Events passed to congestion control interface */
 843enum tcp_ca_event {
 844	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
 845	CA_EVENT_CWND_RESTART,	/* congestion window restart */
 846	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
 847	CA_EVENT_LOSS,		/* loss timeout */
 848	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
 849	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
 850	CA_EVENT_DELAYED_ACK,	/* Delayed ack is sent */
 851	CA_EVENT_NON_DELAYED_ACK,
 852};
 853
 854/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
 855enum tcp_ca_ack_event_flags {
 856	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
 857	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
 858	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
 859};
 860
 861/*
 862 * Interface for adding new TCP congestion control handlers
 863 */
 864#define TCP_CA_NAME_MAX	16
 865#define TCP_CA_MAX	128
 866#define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
 867
 868#define TCP_CA_UNSPEC	0
 869
 870/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
 871#define TCP_CONG_NON_RESTRICTED 0x1
 872/* Requires ECN/ECT set on all packets */
 873#define TCP_CONG_NEEDS_ECN	0x2
 874
 875union tcp_cc_info;
 876
 877struct ack_sample {
 878	u32 pkts_acked;
 879	s32 rtt_us;
 880	u32 in_flight;
 881};
 882
 883/* A rate sample measures the number of (original/retransmitted) data
 884 * packets delivered "delivered" over an interval of time "interval_us".
 885 * The tcp_rate.c code fills in the rate sample, and congestion
 886 * control modules that define a cong_control function to run at the end
 887 * of ACK processing can optionally chose to consult this sample when
 888 * setting cwnd and pacing rate.
 889 * A sample is invalid if "delivered" or "interval_us" is negative.
 890 */
 891struct rate_sample {
 892	struct	skb_mstamp prior_mstamp; /* starting timestamp for interval */
 893	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
 894	s32  delivered;		/* number of packets delivered over interval */
 895	long interval_us;	/* time for tp->delivered to incr "delivered" */
 896	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
 897	int  losses;		/* number of packets marked lost upon ACK */
 898	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
 899	u32  prior_in_flight;	/* in flight before this ACK */
 900	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
 901	bool is_retrans;	/* is sample from retransmission? */
 902};
 903
 904struct tcp_congestion_ops {
 905	struct list_head	list;
 906	u32 key;
 907	u32 flags;
 908
 909	/* initialize private data (optional) */
 910	void (*init)(struct sock *sk);
 911	/* cleanup private data  (optional) */
 912	void (*release)(struct sock *sk);
 913
 914	/* return slow start threshold (required) */
 915	u32 (*ssthresh)(struct sock *sk);
 916	/* do new cwnd calculation (required) */
 917	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
 918	/* call before changing ca_state (optional) */
 919	void (*set_state)(struct sock *sk, u8 new_state);
 920	/* call when cwnd event occurs (optional) */
 921	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
 922	/* call when ack arrives (optional) */
 923	void (*in_ack_event)(struct sock *sk, u32 flags);
 924	/* new value of cwnd after loss (optional) */
 925	u32  (*undo_cwnd)(struct sock *sk);
 926	/* hook for packet ack accounting (optional) */
 927	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
 928	/* suggest number of segments for each skb to transmit (optional) */
 929	u32 (*tso_segs_goal)(struct sock *sk);
 930	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
 931	u32 (*sndbuf_expand)(struct sock *sk);
 932	/* call when packets are delivered to update cwnd and pacing rate,
 933	 * after all the ca_state processing. (optional)
 934	 */
 935	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
 936	/* get info for inet_diag (optional) */
 937	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
 938			   union tcp_cc_info *info);
 939
 940	char 		name[TCP_CA_NAME_MAX];
 941	struct module 	*owner;
 942};
 943
 944int tcp_register_congestion_control(struct tcp_congestion_ops *type);
 945void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
 946
 947void tcp_assign_congestion_control(struct sock *sk);
 948void tcp_init_congestion_control(struct sock *sk);
 949void tcp_cleanup_congestion_control(struct sock *sk);
 950int tcp_set_default_congestion_control(const char *name);
 951void tcp_get_default_congestion_control(char *name);
 952void tcp_get_available_congestion_control(char *buf, size_t len);
 953void tcp_get_allowed_congestion_control(char *buf, size_t len);
 954int tcp_set_allowed_congestion_control(char *allowed);
 955int tcp_set_congestion_control(struct sock *sk, const char *name);
 956u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
 957void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
 958
 
 959u32 tcp_reno_ssthresh(struct sock *sk);
 960u32 tcp_reno_undo_cwnd(struct sock *sk);
 961void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
 962extern struct tcp_congestion_ops tcp_reno;
 963
 964struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
 965u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
 966#ifdef CONFIG_INET
 967char *tcp_ca_get_name_by_key(u32 key, char *buffer);
 968#else
 969static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
 970{
 971	return NULL;
 972}
 973#endif
 974
 975static inline bool tcp_ca_needs_ecn(const struct sock *sk)
 976{
 977	const struct inet_connection_sock *icsk = inet_csk(sk);
 978
 979	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
 980}
 981
 982static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
 983{
 984	struct inet_connection_sock *icsk = inet_csk(sk);
 985
 986	if (icsk->icsk_ca_ops->set_state)
 987		icsk->icsk_ca_ops->set_state(sk, ca_state);
 988	icsk->icsk_ca_state = ca_state;
 989}
 990
 991static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
 992{
 993	const struct inet_connection_sock *icsk = inet_csk(sk);
 994
 995	if (icsk->icsk_ca_ops->cwnd_event)
 996		icsk->icsk_ca_ops->cwnd_event(sk, event);
 997}
 998
 999/* From tcp_rate.c */
1000void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1001void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1002			    struct rate_sample *rs);
1003void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1004		  struct skb_mstamp *now, struct rate_sample *rs);
1005void tcp_rate_check_app_limited(struct sock *sk);
1006
1007/* These functions determine how the current flow behaves in respect of SACK
1008 * handling. SACK is negotiated with the peer, and therefore it can vary
1009 * between different flows.
1010 *
1011 * tcp_is_sack - SACK enabled
1012 * tcp_is_reno - No SACK
1013 * tcp_is_fack - FACK enabled, implies SACK enabled
1014 */
1015static inline int tcp_is_sack(const struct tcp_sock *tp)
1016{
1017	return tp->rx_opt.sack_ok;
1018}
1019
1020static inline bool tcp_is_reno(const struct tcp_sock *tp)
1021{
1022	return !tcp_is_sack(tp);
1023}
1024
1025static inline bool tcp_is_fack(const struct tcp_sock *tp)
1026{
1027	return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
1028}
1029
1030static inline void tcp_enable_fack(struct tcp_sock *tp)
1031{
1032	tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
1033}
1034
1035/* TCP early-retransmit (ER) is similar to but more conservative than
1036 * the thin-dupack feature.  Enable ER only if thin-dupack is disabled.
1037 */
1038static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
1039{
1040	struct net *net = sock_net((struct sock *)tp);
1041
1042	tp->do_early_retrans = sysctl_tcp_early_retrans &&
1043		sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
1044		net->ipv4.sysctl_tcp_reordering == 3;
1045}
1046
1047static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
1048{
1049	tp->do_early_retrans = 0;
1050}
1051
1052static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1053{
1054	return tp->sacked_out + tp->lost_out;
1055}
1056
1057/* This determines how many packets are "in the network" to the best
1058 * of our knowledge.  In many cases it is conservative, but where
1059 * detailed information is available from the receiver (via SACK
1060 * blocks etc.) we can make more aggressive calculations.
1061 *
1062 * Use this for decisions involving congestion control, use just
1063 * tp->packets_out to determine if the send queue is empty or not.
1064 *
1065 * Read this equation as:
1066 *
1067 *	"Packets sent once on transmission queue" MINUS
1068 *	"Packets left network, but not honestly ACKed yet" PLUS
1069 *	"Packets fast retransmitted"
1070 */
1071static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1072{
1073	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1074}
1075
1076#define TCP_INFINITE_SSTHRESH	0x7fffffff
1077
1078static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1079{
1080	return tp->snd_cwnd < tp->snd_ssthresh;
1081}
1082
1083static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1084{
1085	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1086}
1087
1088static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1089{
1090	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1091	       (1 << inet_csk(sk)->icsk_ca_state);
1092}
1093
1094/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1095 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1096 * ssthresh.
1097 */
1098static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1099{
1100	const struct tcp_sock *tp = tcp_sk(sk);
1101
1102	if (tcp_in_cwnd_reduction(sk))
1103		return tp->snd_ssthresh;
1104	else
1105		return max(tp->snd_ssthresh,
1106			   ((tp->snd_cwnd >> 1) +
1107			    (tp->snd_cwnd >> 2)));
1108}
1109
1110/* Use define here intentionally to get WARN_ON location shown at the caller */
1111#define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1112
1113void tcp_enter_cwr(struct sock *sk);
1114__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1115
1116/* The maximum number of MSS of available cwnd for which TSO defers
1117 * sending if not using sysctl_tcp_tso_win_divisor.
1118 */
1119static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1120{
1121	return 3;
1122}
1123
 
 
 
 
 
 
 
 
 
 
 
1124/* Returns end sequence number of the receiver's advertised window */
1125static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1126{
1127	return tp->snd_una + tp->snd_wnd;
1128}
 
1129
1130/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1131 * flexible approach. The RFC suggests cwnd should not be raised unless
1132 * it was fully used previously. And that's exactly what we do in
1133 * congestion avoidance mode. But in slow start we allow cwnd to grow
1134 * as long as the application has used half the cwnd.
1135 * Example :
1136 *    cwnd is 10 (IW10), but application sends 9 frames.
1137 *    We allow cwnd to reach 18 when all frames are ACKed.
1138 * This check is safe because it's as aggressive as slow start which already
1139 * risks 100% overshoot. The advantage is that we discourage application to
1140 * either send more filler packets or data to artificially blow up the cwnd
1141 * usage, and allow application-limited process to probe bw more aggressively.
1142 */
1143static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1144{
1145	const struct tcp_sock *tp = tcp_sk(sk);
 
1146
1147	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1148	if (tcp_in_slow_start(tp))
1149		return tp->snd_cwnd < 2 * tp->max_packets_out;
1150
1151	return tp->is_cwnd_limited;
1152}
1153
1154/* Something is really bad, we could not queue an additional packet,
1155 * because qdisc is full or receiver sent a 0 window.
1156 * We do not want to add fuel to the fire, or abort too early,
1157 * so make sure the timer we arm now is at least 200ms in the future,
1158 * regardless of current icsk_rto value (as it could be ~2ms)
1159 */
1160static inline unsigned long tcp_probe0_base(const struct sock *sk)
1161{
1162	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1163}
1164
1165/* Variant of inet_csk_rto_backoff() used for zero window probes */
1166static inline unsigned long tcp_probe0_when(const struct sock *sk,
1167					    unsigned long max_when)
1168{
1169	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1170
1171	return (unsigned long)min_t(u64, when, max_when);
1172}
1173
1174static inline void tcp_check_probe_timer(struct sock *sk)
1175{
1176	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1177		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1178					  tcp_probe0_base(sk), TCP_RTO_MAX);
1179}
1180
1181static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1182{
1183	tp->snd_wl1 = seq;
1184}
1185
1186static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1187{
1188	tp->snd_wl1 = seq;
1189}
1190
1191/*
1192 * Calculate(/check) TCP checksum
1193 */
1194static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1195				   __be32 daddr, __wsum base)
1196{
1197	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1198}
1199
1200static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1201{
1202	return __skb_checksum_complete(skb);
1203}
1204
1205static inline bool tcp_checksum_complete(struct sk_buff *skb)
1206{
1207	return !skb_csum_unnecessary(skb) &&
1208		__tcp_checksum_complete(skb);
1209}
1210
1211/* Prequeue for VJ style copy to user, combined with checksumming. */
1212
1213static inline void tcp_prequeue_init(struct tcp_sock *tp)
1214{
1215	tp->ucopy.task = NULL;
1216	tp->ucopy.len = 0;
1217	tp->ucopy.memory = 0;
1218	skb_queue_head_init(&tp->ucopy.prequeue);
 
 
 
 
 
 
1219}
1220
1221bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1222bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1223int tcp_filter(struct sock *sk, struct sk_buff *skb);
1224
1225#undef STATE_TRACE
1226
1227#ifdef STATE_TRACE
1228static const char *statename[]={
1229	"Unused","Established","Syn Sent","Syn Recv",
1230	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1231	"Close Wait","Last ACK","Listen","Closing"
1232};
1233#endif
1234void tcp_set_state(struct sock *sk, int state);
1235
1236void tcp_done(struct sock *sk);
1237
1238int tcp_abort(struct sock *sk, int err);
1239
1240static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1241{
1242	rx_opt->dsack = 0;
1243	rx_opt->num_sacks = 0;
1244}
1245
1246u32 tcp_default_init_rwnd(u32 mss);
1247void tcp_cwnd_restart(struct sock *sk, s32 delta);
1248
1249static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1250{
1251	struct tcp_sock *tp = tcp_sk(sk);
1252	s32 delta;
1253
1254	if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
1255		return;
1256	delta = tcp_time_stamp - tp->lsndtime;
1257	if (delta > inet_csk(sk)->icsk_rto)
1258		tcp_cwnd_restart(sk, delta);
1259}
1260
1261/* Determine a window scaling and initial window to offer. */
1262void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
1263			       __u32 *window_clamp, int wscale_ok,
1264			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1265
1266static inline int tcp_win_from_space(int space)
1267{
1268	return sysctl_tcp_adv_win_scale<=0 ?
1269		(space>>(-sysctl_tcp_adv_win_scale)) :
1270		space - (space>>sysctl_tcp_adv_win_scale);
1271}
1272
1273/* Note: caller must be prepared to deal with negative returns */
1274static inline int tcp_space(const struct sock *sk)
1275{
1276	return tcp_win_from_space(sk->sk_rcvbuf -
1277				  atomic_read(&sk->sk_rmem_alloc));
1278}
1279
1280static inline int tcp_full_space(const struct sock *sk)
1281{
1282	return tcp_win_from_space(sk->sk_rcvbuf);
1283}
1284
1285extern void tcp_openreq_init_rwin(struct request_sock *req,
1286				  const struct sock *sk_listener,
1287				  const struct dst_entry *dst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1288
1289void tcp_enter_memory_pressure(struct sock *sk);
1290
1291static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1292{
1293	struct net *net = sock_net((struct sock *)tp);
1294
1295	return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
1296}
1297
1298static inline int keepalive_time_when(const struct tcp_sock *tp)
1299{
1300	struct net *net = sock_net((struct sock *)tp);
1301
1302	return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
1303}
1304
1305static inline int keepalive_probes(const struct tcp_sock *tp)
1306{
1307	struct net *net = sock_net((struct sock *)tp);
1308
1309	return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1310}
1311
1312static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1313{
1314	const struct inet_connection_sock *icsk = &tp->inet_conn;
1315
1316	return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1317			  tcp_time_stamp - tp->rcv_tstamp);
1318}
1319
1320static inline int tcp_fin_time(const struct sock *sk)
1321{
1322	int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1323	const int rto = inet_csk(sk)->icsk_rto;
1324
1325	if (fin_timeout < (rto << 2) - (rto >> 1))
1326		fin_timeout = (rto << 2) - (rto >> 1);
1327
1328	return fin_timeout;
1329}
1330
1331static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1332				  int paws_win)
1333{
1334	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1335		return true;
1336	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1337		return true;
1338	/*
1339	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1340	 * then following tcp messages have valid values. Ignore 0 value,
1341	 * or else 'negative' tsval might forbid us to accept their packets.
1342	 */
1343	if (!rx_opt->ts_recent)
1344		return true;
1345	return false;
1346}
1347
1348static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1349				   int rst)
1350{
1351	if (tcp_paws_check(rx_opt, 0))
1352		return false;
1353
1354	/* RST segments are not recommended to carry timestamp,
1355	   and, if they do, it is recommended to ignore PAWS because
1356	   "their cleanup function should take precedence over timestamps."
1357	   Certainly, it is mistake. It is necessary to understand the reasons
1358	   of this constraint to relax it: if peer reboots, clock may go
1359	   out-of-sync and half-open connections will not be reset.
1360	   Actually, the problem would be not existing if all
1361	   the implementations followed draft about maintaining clock
1362	   via reboots. Linux-2.2 DOES NOT!
1363
1364	   However, we can relax time bounds for RST segments to MSL.
1365	 */
1366	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1367		return false;
1368	return true;
1369}
1370
1371bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1372			  int mib_idx, u32 *last_oow_ack_time);
1373
1374static inline void tcp_mib_init(struct net *net)
1375{
1376	/* See RFC 2012 */
1377	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1378	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1379	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1380	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1381}
1382
1383/* from STCP */
1384static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1385{
1386	tp->lost_skb_hint = NULL;
1387}
1388
1389static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1390{
1391	tcp_clear_retrans_hints_partial(tp);
1392	tp->retransmit_skb_hint = NULL;
1393}
1394
 
 
 
1395union tcp_md5_addr {
1396	struct in_addr  a4;
1397#if IS_ENABLED(CONFIG_IPV6)
1398	struct in6_addr	a6;
1399#endif
1400};
1401
1402/* - key database */
1403struct tcp_md5sig_key {
1404	struct hlist_node	node;
1405	u8			keylen;
1406	u8			family; /* AF_INET or AF_INET6 */
1407	union tcp_md5_addr	addr;
1408	u8			key[TCP_MD5SIG_MAXKEYLEN];
1409	struct rcu_head		rcu;
1410};
1411
1412/* - sock block */
1413struct tcp_md5sig_info {
1414	struct hlist_head	head;
1415	struct rcu_head		rcu;
1416};
1417
1418/* - pseudo header */
1419struct tcp4_pseudohdr {
1420	__be32		saddr;
1421	__be32		daddr;
1422	__u8		pad;
1423	__u8		protocol;
1424	__be16		len;
1425};
1426
1427struct tcp6_pseudohdr {
1428	struct in6_addr	saddr;
1429	struct in6_addr daddr;
1430	__be32		len;
1431	__be32		protocol;	/* including padding */
1432};
1433
1434union tcp_md5sum_block {
1435	struct tcp4_pseudohdr ip4;
1436#if IS_ENABLED(CONFIG_IPV6)
1437	struct tcp6_pseudohdr ip6;
1438#endif
1439};
1440
1441/* - pool: digest algorithm, hash description and scratch buffer */
1442struct tcp_md5sig_pool {
1443	struct ahash_request	*md5_req;
1444	void			*scratch;
1445};
1446
1447/* - functions */
1448int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1449			const struct sock *sk, const struct sk_buff *skb);
 
1450int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1451		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
1452int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1453		   int family);
1454struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1455					 const struct sock *addr_sk);
1456
1457#ifdef CONFIG_TCP_MD5SIG
1458struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1459					 const union tcp_md5_addr *addr,
1460					 int family);
1461#define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1462#else
1463static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1464					 const union tcp_md5_addr *addr,
1465					 int family)
1466{
1467	return NULL;
1468}
1469#define tcp_twsk_md5_key(twsk)	NULL
1470#endif
1471
1472bool tcp_alloc_md5sig_pool(void);
1473
1474struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1475static inline void tcp_put_md5sig_pool(void)
1476{
1477	local_bh_enable();
1478}
1479
 
1480int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1481			  unsigned int header_len);
1482int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1483		     const struct tcp_md5sig_key *key);
1484
1485/* From tcp_fastopen.c */
1486void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1487			    struct tcp_fastopen_cookie *cookie, int *syn_loss,
1488			    unsigned long *last_syn_loss);
1489void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1490			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
1491			    u16 try_exp);
1492struct tcp_fastopen_request {
1493	/* Fast Open cookie. Size 0 means a cookie request */
1494	struct tcp_fastopen_cookie	cookie;
1495	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1496	size_t				size;
1497	int				copied;	/* queued in tcp_connect() */
1498};
1499void tcp_free_fastopen_req(struct tcp_sock *tp);
1500
1501extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1502int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1503void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1504struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1505			      struct request_sock *req,
1506			      struct tcp_fastopen_cookie *foc,
1507			      struct dst_entry *dst);
1508void tcp_fastopen_init_key_once(bool publish);
1509#define TCP_FASTOPEN_KEY_LENGTH 16
1510
1511/* Fastopen key context */
1512struct tcp_fastopen_context {
1513	struct crypto_cipher	*tfm;
1514	__u8			key[TCP_FASTOPEN_KEY_LENGTH];
1515	struct rcu_head		rcu;
1516};
1517
1518/* Latencies incurred by various limits for a sender. They are
1519 * chronograph-like stats that are mutually exclusive.
1520 */
1521enum tcp_chrono {
1522	TCP_CHRONO_UNSPEC,
1523	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1524	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1525	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1526	__TCP_CHRONO_MAX,
1527};
1528
1529void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1530void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1531
1532/* write queue abstraction */
1533static inline void tcp_write_queue_purge(struct sock *sk)
1534{
1535	struct sk_buff *skb;
1536
1537	tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1538	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1539		sk_wmem_free_skb(sk, skb);
1540	sk_mem_reclaim(sk);
1541	tcp_clear_all_retrans_hints(tcp_sk(sk));
1542}
1543
1544static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1545{
1546	return skb_peek(&sk->sk_write_queue);
1547}
1548
1549static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1550{
1551	return skb_peek_tail(&sk->sk_write_queue);
1552}
1553
1554static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1555						   const struct sk_buff *skb)
1556{
1557	return skb_queue_next(&sk->sk_write_queue, skb);
1558}
1559
1560static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1561						   const struct sk_buff *skb)
1562{
1563	return skb_queue_prev(&sk->sk_write_queue, skb);
1564}
1565
1566#define tcp_for_write_queue(skb, sk)					\
1567	skb_queue_walk(&(sk)->sk_write_queue, skb)
1568
1569#define tcp_for_write_queue_from(skb, sk)				\
1570	skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1571
1572#define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1573	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1574
1575static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1576{
1577	return sk->sk_send_head;
1578}
1579
1580static inline bool tcp_skb_is_last(const struct sock *sk,
1581				   const struct sk_buff *skb)
1582{
1583	return skb_queue_is_last(&sk->sk_write_queue, skb);
1584}
1585
1586static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1587{
1588	if (tcp_skb_is_last(sk, skb))
1589		sk->sk_send_head = NULL;
1590	else
1591		sk->sk_send_head = tcp_write_queue_next(sk, skb);
1592}
1593
1594static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1595{
1596	if (sk->sk_send_head == skb_unlinked) {
1597		sk->sk_send_head = NULL;
1598		tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1599	}
1600	if (tcp_sk(sk)->highest_sack == skb_unlinked)
1601		tcp_sk(sk)->highest_sack = NULL;
1602}
1603
1604static inline void tcp_init_send_head(struct sock *sk)
1605{
1606	sk->sk_send_head = NULL;
1607}
1608
1609static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1610{
1611	__skb_queue_tail(&sk->sk_write_queue, skb);
1612}
1613
1614static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1615{
1616	__tcp_add_write_queue_tail(sk, skb);
1617
1618	/* Queue it, remembering where we must start sending. */
1619	if (sk->sk_send_head == NULL) {
1620		sk->sk_send_head = skb;
1621		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1622
1623		if (tcp_sk(sk)->highest_sack == NULL)
1624			tcp_sk(sk)->highest_sack = skb;
1625	}
1626}
1627
1628static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1629{
1630	__skb_queue_head(&sk->sk_write_queue, skb);
1631}
1632
1633/* Insert buff after skb on the write queue of sk.  */
1634static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1635						struct sk_buff *buff,
1636						struct sock *sk)
1637{
1638	__skb_queue_after(&sk->sk_write_queue, skb, buff);
1639}
1640
1641/* Insert new before skb on the write queue of sk.  */
1642static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1643						  struct sk_buff *skb,
1644						  struct sock *sk)
1645{
1646	__skb_queue_before(&sk->sk_write_queue, skb, new);
1647
1648	if (sk->sk_send_head == skb)
1649		sk->sk_send_head = new;
1650}
1651
1652static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1653{
1654	__skb_unlink(skb, &sk->sk_write_queue);
1655}
1656
1657static inline bool tcp_write_queue_empty(struct sock *sk)
1658{
1659	return skb_queue_empty(&sk->sk_write_queue);
1660}
1661
1662static inline void tcp_push_pending_frames(struct sock *sk)
1663{
1664	if (tcp_send_head(sk)) {
1665		struct tcp_sock *tp = tcp_sk(sk);
1666
1667		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1668	}
1669}
1670
1671/* Start sequence of the skb just after the highest skb with SACKed
1672 * bit, valid only if sacked_out > 0 or when the caller has ensured
1673 * validity by itself.
1674 */
1675static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1676{
1677	if (!tp->sacked_out)
1678		return tp->snd_una;
1679
1680	if (tp->highest_sack == NULL)
1681		return tp->snd_nxt;
1682
1683	return TCP_SKB_CB(tp->highest_sack)->seq;
1684}
1685
1686static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1687{
1688	tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1689						tcp_write_queue_next(sk, skb);
1690}
1691
1692static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1693{
1694	return tcp_sk(sk)->highest_sack;
1695}
1696
1697static inline void tcp_highest_sack_reset(struct sock *sk)
1698{
1699	tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1700}
1701
1702/* Called when old skb is about to be deleted (to be combined with new skb) */
1703static inline void tcp_highest_sack_combine(struct sock *sk,
1704					    struct sk_buff *old,
1705					    struct sk_buff *new)
1706{
1707	if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1708		tcp_sk(sk)->highest_sack = new;
1709}
1710
1711/* This helper checks if socket has IP_TRANSPARENT set */
1712static inline bool inet_sk_transparent(const struct sock *sk)
1713{
1714	switch (sk->sk_state) {
1715	case TCP_TIME_WAIT:
1716		return inet_twsk(sk)->tw_transparent;
1717	case TCP_NEW_SYN_RECV:
1718		return inet_rsk(inet_reqsk(sk))->no_srccheck;
1719	}
1720	return inet_sk(sk)->transparent;
1721}
1722
1723/* Determines whether this is a thin stream (which may suffer from
1724 * increased latency). Used to trigger latency-reducing mechanisms.
1725 */
1726static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1727{
1728	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1729}
1730
1731/* /proc */
1732enum tcp_seq_states {
1733	TCP_SEQ_STATE_LISTENING,
 
1734	TCP_SEQ_STATE_ESTABLISHED,
1735};
1736
1737int tcp_seq_open(struct inode *inode, struct file *file);
1738
1739struct tcp_seq_afinfo {
1740	char				*name;
1741	sa_family_t			family;
1742	const struct file_operations	*seq_fops;
1743	struct seq_operations		seq_ops;
1744};
1745
1746struct tcp_iter_state {
1747	struct seq_net_private	p;
1748	sa_family_t		family;
1749	enum tcp_seq_states	state;
1750	struct sock		*syn_wait_sk;
1751	int			bucket, offset, sbucket, num;
 
1752	loff_t			last_pos;
1753};
1754
1755int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1756void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1757
1758extern struct request_sock_ops tcp_request_sock_ops;
1759extern struct request_sock_ops tcp6_request_sock_ops;
1760
1761void tcp_v4_destroy_sock(struct sock *sk);
1762
1763struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1764				netdev_features_t features);
1765struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1766int tcp_gro_complete(struct sk_buff *skb);
1767
1768void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1769
1770static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1771{
1772	struct net *net = sock_net((struct sock *)tp);
1773	return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1774}
1775
1776static inline bool tcp_stream_memory_free(const struct sock *sk)
1777{
1778	const struct tcp_sock *tp = tcp_sk(sk);
1779	u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1780
1781	return notsent_bytes < tcp_notsent_lowat(tp);
1782}
1783
1784#ifdef CONFIG_PROC_FS
1785int tcp4_proc_init(void);
1786void tcp4_proc_exit(void);
1787#endif
1788
1789int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1790int tcp_conn_request(struct request_sock_ops *rsk_ops,
1791		     const struct tcp_request_sock_ops *af_ops,
1792		     struct sock *sk, struct sk_buff *skb);
1793
1794/* TCP af-specific functions */
1795struct tcp_sock_af_ops {
1796#ifdef CONFIG_TCP_MD5SIG
1797	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
1798						const struct sock *addr_sk);
1799	int		(*calc_md5_hash)(char *location,
1800					 const struct tcp_md5sig_key *md5,
1801					 const struct sock *sk,
1802					 const struct sk_buff *skb);
1803	int		(*md5_parse)(struct sock *sk,
1804				     char __user *optval,
1805				     int optlen);
 
1806#endif
1807};
1808
1809struct tcp_request_sock_ops {
1810	u16 mss_clamp;
1811#ifdef CONFIG_TCP_MD5SIG
1812	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1813						 const struct sock *addr_sk);
1814	int		(*calc_md5_hash) (char *location,
1815					  const struct tcp_md5sig_key *md5,
1816					  const struct sock *sk,
1817					  const struct sk_buff *skb);
 
1818#endif
1819	void (*init_req)(struct request_sock *req,
1820			 const struct sock *sk_listener,
1821			 struct sk_buff *skb);
1822#ifdef CONFIG_SYN_COOKIES
1823	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
1824				 __u16 *mss);
1825#endif
1826	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
1827				       const struct request_sock *req,
1828				       bool *strict);
1829	__u32 (*init_seq)(const struct sk_buff *skb, u32 *tsoff);
1830	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1831			   struct flowi *fl, struct request_sock *req,
1832			   struct tcp_fastopen_cookie *foc,
1833			   enum tcp_synack_type synack_type);
1834};
1835
1836#ifdef CONFIG_SYN_COOKIES
1837static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1838					 const struct sock *sk, struct sk_buff *skb,
1839					 __u16 *mss)
1840{
1841	tcp_synq_overflow(sk);
1842	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
1843	return ops->cookie_init_seq(skb, mss);
1844}
1845#else
1846static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1847					 const struct sock *sk, struct sk_buff *skb,
1848					 __u16 *mss)
1849{
1850	return 0;
1851}
1852#endif
1853
1854int tcpv4_offload_init(void);
1855
1856void tcp_v4_init(void);
1857void tcp_init(void);
1858
1859/* tcp_recovery.c */
1860
1861/* Flags to enable various loss recovery features. See below */
1862extern int sysctl_tcp_recovery;
1863
1864/* Use TCP RACK to detect (some) tail and retransmit losses */
1865#define TCP_RACK_LOST_RETRANS  0x1
1866
1867extern int tcp_rack_mark_lost(struct sock *sk);
1868
1869extern void tcp_rack_advance(struct tcp_sock *tp,
1870			     const struct skb_mstamp *xmit_time, u8 sacked);
1871
1872/*
1873 * Save and compile IPv4 options, return a pointer to it
1874 */
1875static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
1876{
1877	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
1878	struct ip_options_rcu *dopt = NULL;
1879
1880	if (opt->optlen) {
1881		int opt_size = sizeof(*dopt) + opt->optlen;
1882
1883		dopt = kmalloc(opt_size, GFP_ATOMIC);
1884		if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
1885			kfree(dopt);
1886			dopt = NULL;
1887		}
1888	}
1889	return dopt;
1890}
1891
1892/* locally generated TCP pure ACKs have skb->truesize == 2
1893 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
1894 * This is much faster than dissecting the packet to find out.
1895 * (Think of GRE encapsulations, IPv4, IPv6, ...)
1896 */
1897static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
1898{
1899	return skb->truesize == 2;
1900}
1901
1902static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
1903{
1904	skb->truesize = 2;
1905}
1906
1907static inline int tcp_inq(struct sock *sk)
1908{
1909	struct tcp_sock *tp = tcp_sk(sk);
1910	int answ;
1911
1912	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1913		answ = 0;
1914	} else if (sock_flag(sk, SOCK_URGINLINE) ||
1915		   !tp->urg_data ||
1916		   before(tp->urg_seq, tp->copied_seq) ||
1917		   !before(tp->urg_seq, tp->rcv_nxt)) {
1918
1919		answ = tp->rcv_nxt - tp->copied_seq;
1920
1921		/* Subtract 1, if FIN was received */
1922		if (answ && sock_flag(sk, SOCK_DONE))
1923			answ--;
1924	} else {
1925		answ = tp->urg_seq - tp->copied_seq;
1926	}
1927
1928	return answ;
1929}
1930
1931int tcp_peek_len(struct socket *sock);
1932
1933static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
1934{
1935	u16 segs_in;
1936
1937	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1938	tp->segs_in += segs_in;
1939	if (skb->len > tcp_hdrlen(skb))
1940		tp->data_segs_in += segs_in;
1941}
1942
1943/*
1944 * TCP listen path runs lockless.
1945 * We forced "struct sock" to be const qualified to make sure
1946 * we don't modify one of its field by mistake.
1947 * Here, we increment sk_drops which is an atomic_t, so we can safely
1948 * make sock writable again.
1949 */
1950static inline void tcp_listendrop(const struct sock *sk)
1951{
1952	atomic_inc(&((struct sock *)sk)->sk_drops);
1953	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
1954}
1955
1956#endif	/* _TCP_H */