Linux Audio

Check our new training course

Loading...
v5.14.15
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Definitions for the TCP module.
   8 *
   9 * Version:	@(#)tcp.h	1.0.5	05/23/93
  10 *
  11 * Authors:	Ross Biro
  12 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 
 
 
 
 
  13 */
  14#ifndef _TCP_H
  15#define _TCP_H
  16
  17#define FASTRETRANS_DEBUG 1
  18
  19#include <linux/list.h>
  20#include <linux/tcp.h>
  21#include <linux/bug.h>
  22#include <linux/slab.h>
  23#include <linux/cache.h>
  24#include <linux/percpu.h>
  25#include <linux/skbuff.h>
 
 
 
  26#include <linux/kref.h>
  27#include <linux/ktime.h>
  28#include <linux/indirect_call_wrapper.h>
  29
  30#include <net/inet_connection_sock.h>
  31#include <net/inet_timewait_sock.h>
  32#include <net/inet_hashtables.h>
  33#include <net/checksum.h>
  34#include <net/request_sock.h>
  35#include <net/sock_reuseport.h>
  36#include <net/sock.h>
  37#include <net/snmp.h>
  38#include <net/ip.h>
  39#include <net/tcp_states.h>
  40#include <net/inet_ecn.h>
  41#include <net/dst.h>
  42#include <net/mptcp.h>
  43
  44#include <linux/seq_file.h>
  45#include <linux/memcontrol.h>
  46#include <linux/bpf-cgroup.h>
  47#include <linux/siphash.h>
  48
  49extern struct inet_hashinfo tcp_hashinfo;
  50
  51extern struct percpu_counter tcp_orphan_count;
  52void tcp_time_wait(struct sock *sk, int state, int timeo);
  53
  54#define MAX_TCP_HEADER	L1_CACHE_ALIGN(128 + MAX_HEADER)
  55#define MAX_TCP_OPTION_SPACE 40
  56#define TCP_MIN_SND_MSS		48
  57#define TCP_MIN_GSO_SIZE	(TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
  58
  59/*
  60 * Never offer a window over 32767 without using window scaling. Some
  61 * poor stacks do signed 16bit maths!
  62 */
  63#define MAX_TCP_WINDOW		32767U
  64
 
 
 
  65/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
  66#define TCP_MIN_MSS		88U
  67
  68/* The initial MTU to use for probing */
  69#define TCP_BASE_MSS		1024
  70
  71/* probing interval, default to 10 minutes as per RFC4821 */
  72#define TCP_PROBE_INTERVAL	600
  73
  74/* Specify interval when tcp mtu probing will stop */
  75#define TCP_PROBE_THRESHOLD	8
  76
  77/* After receiving this amount of duplicate ACKs fast retransmit starts. */
  78#define TCP_FASTRETRANS_THRESH 3
  79
 
 
 
  80/* Maximal number of ACKs sent quickly to accelerate slow-start. */
  81#define TCP_MAX_QUICKACKS	16U
  82
  83/* Maximal number of window scale according to RFC1323 */
  84#define TCP_MAX_WSCALE		14U
  85
  86/* urg_data states */
  87#define TCP_URG_VALID	0x0100
  88#define TCP_URG_NOTYET	0x0200
  89#define TCP_URG_READ	0x0400
  90
  91#define TCP_RETR1	3	/*
  92				 * This is how many retries it does before it
  93				 * tries to figure out if the gateway is
  94				 * down. Minimal RFC value is 3; it corresponds
  95				 * to ~3sec-8min depending on RTO.
  96				 */
  97
  98#define TCP_RETR2	15	/*
  99				 * This should take at least
 100				 * 90 minutes to time out.
 101				 * RFC1122 says that the limit is 100 sec.
 102				 * 15 is ~13-30min depending on RTO.
 103				 */
 104
 105#define TCP_SYN_RETRIES	 6	/* This is how many retries are done
 106				 * when active opening a connection.
 107				 * RFC1122 says the minimum retry MUST
 108				 * be at least 180secs.  Nevertheless
 109				 * this value is corresponding to
 110				 * 63secs of retransmission with the
 111				 * current initial RTO.
 112				 */
 113
 114#define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
 115				 * when passive opening a connection.
 116				 * This is corresponding to 31secs of
 117				 * retransmission with the current
 118				 * initial RTO.
 119				 */
 120
 121#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
 122				  * state, about 60 seconds	*/
 123#define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
 124                                 /* BSD style FIN_WAIT2 deadlock breaker.
 125				  * It used to be 3min, new value is 60sec,
 126				  * to combine FIN-WAIT-2 timeout with
 127				  * TIME-WAIT timer.
 128				  */
 129#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
 130
 131#define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
 132#if HZ >= 100
 133#define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
 134#define TCP_ATO_MIN	((unsigned)(HZ/25))
 135#else
 136#define TCP_DELACK_MIN	4U
 137#define TCP_ATO_MIN	4U
 138#endif
 139#define TCP_RTO_MAX	((unsigned)(120*HZ))
 140#define TCP_RTO_MIN	((unsigned)(HZ/5))
 141#define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
 142#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
 143#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
 144						 * used as a fallback RTO for the
 145						 * initial data transmission if no
 146						 * valid RTT sample has been acquired,
 147						 * most likely due to retrans in 3WHS.
 148						 */
 149
 150#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
 151					                 * for local resources.
 152					                 */
 
 153#define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
 154#define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
 155#define TCP_KEEPALIVE_INTVL	(75*HZ)
 156
 157#define MAX_TCP_KEEPIDLE	32767
 158#define MAX_TCP_KEEPINTVL	32767
 159#define MAX_TCP_KEEPCNT		127
 160#define MAX_TCP_SYNCNT		127
 161
 162#define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
 163
 164#define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
 165#define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
 166					 * after this time. It should be equal
 167					 * (or greater than) TCP_TIMEWAIT_LEN
 168					 * to provide reliability equal to one
 169					 * provided by timewait state.
 170					 */
 171#define TCP_PAWS_WINDOW	1		/* Replay window for per-host
 172					 * timestamps. It must be less than
 173					 * minimal timewait lifetime.
 174					 */
 175/*
 176 *	TCP option
 177 */
 178
 179#define TCPOPT_NOP		1	/* Padding */
 180#define TCPOPT_EOL		0	/* End of options */
 181#define TCPOPT_MSS		2	/* Segment size negotiating */
 182#define TCPOPT_WINDOW		3	/* Window scaling */
 183#define TCPOPT_SACK_PERM        4       /* SACK Permitted */
 184#define TCPOPT_SACK             5       /* SACK Block */
 185#define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
 186#define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
 187#define TCPOPT_MPTCP		30	/* Multipath TCP (RFC6824) */
 188#define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
 189#define TCPOPT_EXP		254	/* Experimental */
 190/* Magic number to be after the option value for sharing TCP
 191 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
 192 */
 193#define TCPOPT_FASTOPEN_MAGIC	0xF989
 194#define TCPOPT_SMC_MAGIC	0xE2D4C3D9
 195
 196/*
 197 *     TCP option lengths
 198 */
 199
 200#define TCPOLEN_MSS            4
 201#define TCPOLEN_WINDOW         3
 202#define TCPOLEN_SACK_PERM      2
 203#define TCPOLEN_TIMESTAMP      10
 204#define TCPOLEN_MD5SIG         18
 205#define TCPOLEN_FASTOPEN_BASE  2
 206#define TCPOLEN_EXP_FASTOPEN_BASE  4
 207#define TCPOLEN_EXP_SMC_BASE   6
 
 208
 209/* But this is what stacks really send out. */
 210#define TCPOLEN_TSTAMP_ALIGNED		12
 211#define TCPOLEN_WSCALE_ALIGNED		4
 212#define TCPOLEN_SACKPERM_ALIGNED	4
 213#define TCPOLEN_SACK_BASE		2
 214#define TCPOLEN_SACK_BASE_ALIGNED	4
 215#define TCPOLEN_SACK_PERBLOCK		8
 216#define TCPOLEN_MD5SIG_ALIGNED		20
 217#define TCPOLEN_MSS_ALIGNED		4
 218#define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
 219
 220/* Flags in tp->nonagle */
 221#define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
 222#define TCP_NAGLE_CORK		2	/* Socket is corked	    */
 223#define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
 224
 225/* TCP thin-stream limits */
 226#define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 227
 228/* TCP initial congestion window as per rfc6928 */
 229#define TCP_INIT_CWND		10
 230
 231/* Bit Flags for sysctl_tcp_fastopen */
 232#define	TFO_CLIENT_ENABLE	1
 233#define	TFO_SERVER_ENABLE	2
 234#define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
 235
 236/* Accept SYN data w/o any cookie option */
 237#define	TFO_SERVER_COOKIE_NOT_REQD	0x200
 238
 239/* Force enable TFO on all listeners, i.e., not requiring the
 240 * TCP_FASTOPEN socket option.
 241 */
 242#define	TFO_SERVER_WO_SOCKOPT1	0x400
 243
 244
 245/* sysctl variables for tcp */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 246extern int sysctl_tcp_max_orphans;
 247extern long sysctl_tcp_mem[3];
 248
 249#define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
 250#define TCP_RACK_STATIC_REO_WND  0x2 /* Use static RACK reo wnd */
 251#define TCP_RACK_NO_DUPTHRESH    0x4 /* Do not use DUPACK threshold in RACK */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 252
 253extern atomic_long_t tcp_memory_allocated;
 254extern struct percpu_counter tcp_sockets_allocated;
 255extern unsigned long tcp_memory_pressure;
 256
 257/* optimized version of sk_under_memory_pressure() for TCP sockets */
 258static inline bool tcp_under_memory_pressure(const struct sock *sk)
 259{
 260	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
 261	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
 262		return true;
 263
 264	return READ_ONCE(tcp_memory_pressure);
 265}
 266/*
 267 * The next routines deal with comparing 32 bit unsigned ints
 268 * and worry about wraparound (automatic with unsigned arithmetic).
 269 */
 270
 271static inline bool before(__u32 seq1, __u32 seq2)
 272{
 273        return (__s32)(seq1-seq2) < 0;
 274}
 275#define after(seq2, seq1) 	before(seq1, seq2)
 276
 277/* is s2<=s1<=s3 ? */
 278static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
 279{
 280	return seq3 - seq2 >= seq1 - seq2;
 281}
 282
 283static inline bool tcp_out_of_memory(struct sock *sk)
 284{
 285	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
 286	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
 287		return true;
 288	return false;
 289}
 290
 291void sk_forced_mem_schedule(struct sock *sk, int size);
 292
 293static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 294{
 295	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
 296	int orphans = percpu_counter_read_positive(ocp);
 297
 298	if (orphans << shift > sysctl_tcp_max_orphans) {
 299		orphans = percpu_counter_sum_positive(ocp);
 300		if (orphans << shift > sysctl_tcp_max_orphans)
 301			return true;
 302	}
 303	return false;
 304}
 305
 306bool tcp_check_oom(struct sock *sk, int shift);
 307
 
 
 
 
 
 
 
 
 
 
 
 
 308
 309extern struct proto tcp_prot;
 310
 311#define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 312#define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 313#define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
 
 314#define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 315
 316void tcp_tasklet_init(void);
 317
 318int tcp_v4_err(struct sk_buff *skb, u32);
 319
 320void tcp_shutdown(struct sock *sk, int how);
 321
 322int tcp_v4_early_demux(struct sk_buff *skb);
 323int tcp_v4_rcv(struct sk_buff *skb);
 324
 325void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb);
 326int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
 327int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 328int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
 329int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
 330		 int flags);
 331int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
 332			size_t size, int flags);
 333struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
 334			       struct page *page, int offset, size_t *size);
 335ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
 336		 size_t size, int flags);
 337int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
 338void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
 339	      int size_goal);
 340void tcp_release_cb(struct sock *sk);
 341void tcp_wfree(struct sk_buff *skb);
 342void tcp_write_timer_handler(struct sock *sk);
 343void tcp_delack_timer_handler(struct sock *sk);
 344int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 345int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
 346void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
 347void tcp_rcv_space_adjust(struct sock *sk);
 348int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
 349void tcp_twsk_destructor(struct sock *sk);
 350ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
 351			struct pipe_inode_info *pipe, size_t len,
 352			unsigned int flags);
 353
 354void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
 355static inline void tcp_dec_quickack_mode(struct sock *sk,
 356					 const unsigned int pkts)
 357{
 358	struct inet_connection_sock *icsk = inet_csk(sk);
 359
 360	if (icsk->icsk_ack.quick) {
 361		if (pkts >= icsk->icsk_ack.quick) {
 362			icsk->icsk_ack.quick = 0;
 363			/* Leaving quickack mode we deflate ATO. */
 364			icsk->icsk_ack.ato   = TCP_ATO_MIN;
 365		} else
 366			icsk->icsk_ack.quick -= pkts;
 367	}
 368}
 369
 370#define	TCP_ECN_OK		1
 371#define	TCP_ECN_QUEUE_CWR	2
 372#define	TCP_ECN_DEMAND_CWR	4
 373#define	TCP_ECN_SEEN		8
 374
 375enum tcp_tw_status {
 376	TCP_TW_SUCCESS = 0,
 377	TCP_TW_RST = 1,
 378	TCP_TW_ACK = 2,
 379	TCP_TW_SYN = 3
 380};
 381
 382
 383enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
 384					      struct sk_buff *skb,
 385					      const struct tcphdr *th);
 386struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 387			   struct request_sock *req, bool fastopen,
 388			   bool *lost_race);
 389int tcp_child_process(struct sock *parent, struct sock *child,
 390		      struct sk_buff *skb);
 391void tcp_enter_loss(struct sock *sk);
 392void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
 393void tcp_clear_retrans(struct tcp_sock *tp);
 394void tcp_update_metrics(struct sock *sk);
 395void tcp_init_metrics(struct sock *sk);
 396void tcp_metrics_init(void);
 397bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
 398void __tcp_close(struct sock *sk, long timeout);
 399void tcp_close(struct sock *sk, long timeout);
 400void tcp_init_sock(struct sock *sk);
 401void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
 402__poll_t tcp_poll(struct file *file, struct socket *sock,
 403		      struct poll_table_struct *wait);
 404int tcp_getsockopt(struct sock *sk, int level, int optname,
 405		   char __user *optval, int __user *optlen);
 406bool tcp_bpf_bypass_getsockopt(int level, int optname);
 407int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
 408		   unsigned int optlen);
 409void tcp_set_keepalive(struct sock *sk, int val);
 410void tcp_syn_ack_timeout(const struct request_sock *req);
 411int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
 412		int flags, int *addr_len);
 413int tcp_set_rcvlowat(struct sock *sk, int val);
 414int tcp_set_window_clamp(struct sock *sk, int val);
 415void tcp_update_recv_tstamps(struct sk_buff *skb,
 416			     struct scm_timestamping_internal *tss);
 417void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
 418			struct scm_timestamping_internal *tss);
 419void tcp_data_ready(struct sock *sk);
 420#ifdef CONFIG_MMU
 421int tcp_mmap(struct file *file, struct socket *sock,
 422	     struct vm_area_struct *vma);
 423#endif
 424void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
 425		       struct tcp_options_received *opt_rx,
 426		       int estab, struct tcp_fastopen_cookie *foc);
 427const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 428
 429/*
 430 *	BPF SKB-less helpers
 431 */
 432u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
 433			 struct tcphdr *th, u32 *cookie);
 434u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
 435			 struct tcphdr *th, u32 *cookie);
 436u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
 437			  const struct tcp_request_sock_ops *af_ops,
 438			  struct sock *sk, struct tcphdr *th);
 439/*
 440 *	TCP v4 functions exported for the inet6 API
 441 */
 442
 443void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 444void tcp_v4_mtu_reduced(struct sock *sk);
 445void tcp_req_err(struct sock *sk, u32 seq, bool abort);
 446void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
 447int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 448struct sock *tcp_create_openreq_child(const struct sock *sk,
 449				      struct request_sock *req,
 450				      struct sk_buff *skb);
 451void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
 452struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 453				  struct request_sock *req,
 454				  struct dst_entry *dst,
 455				  struct request_sock *req_unhash,
 456				  bool *own_req);
 457int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 458int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 459int tcp_connect(struct sock *sk);
 460enum tcp_synack_type {
 461	TCP_SYNACK_NORMAL,
 462	TCP_SYNACK_FASTOPEN,
 463	TCP_SYNACK_COOKIE,
 464};
 465struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 466				struct request_sock *req,
 467				struct tcp_fastopen_cookie *foc,
 468				enum tcp_synack_type synack_type,
 469				struct sk_buff *syn_skb);
 470int tcp_disconnect(struct sock *sk, int flags);
 471
 
 472void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
 473int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 474void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 475
 476/* From syncookies.c */
 477struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
 478				 struct request_sock *req,
 479				 struct dst_entry *dst, u32 tsoff);
 480int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 481		      u32 cookie);
 482struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
 483struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
 484					    struct sock *sk, struct sk_buff *skb);
 485#ifdef CONFIG_SYN_COOKIES
 486
 487/* Syncookies use a monotonic timer which increments every 60 seconds.
 488 * This counter is used both as a hash input and partially encoded into
 489 * the cookie value.  A cookie is only validated further if the delta
 490 * between the current counter value and the encoded one is less than this,
 491 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
 492 * the counter advances immediately after a cookie is generated).
 493 */
 494#define MAX_SYNCOOKIE_AGE	2
 495#define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
 496#define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
 497
 498/* syncookies: remember time of last synqueue overflow
 499 * But do not dirty this field too often (once per second is enough)
 500 * It is racy as we do not hold a lock, but race is very minor.
 501 */
 502static inline void tcp_synq_overflow(const struct sock *sk)
 503{
 504	unsigned int last_overflow;
 505	unsigned int now = jiffies;
 506
 507	if (sk->sk_reuseport) {
 508		struct sock_reuseport *reuse;
 509
 510		reuse = rcu_dereference(sk->sk_reuseport_cb);
 511		if (likely(reuse)) {
 512			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
 513			if (!time_between32(now, last_overflow,
 514					    last_overflow + HZ))
 515				WRITE_ONCE(reuse->synq_overflow_ts, now);
 516			return;
 517		}
 518	}
 519
 520	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
 521	if (!time_between32(now, last_overflow, last_overflow + HZ))
 522		WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
 523}
 524
 525/* syncookies: no recent synqueue overflow on this listening socket? */
 526static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
 527{
 528	unsigned int last_overflow;
 529	unsigned int now = jiffies;
 530
 531	if (sk->sk_reuseport) {
 532		struct sock_reuseport *reuse;
 533
 534		reuse = rcu_dereference(sk->sk_reuseport_cb);
 535		if (likely(reuse)) {
 536			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
 537			return !time_between32(now, last_overflow - HZ,
 538					       last_overflow +
 539					       TCP_SYNCOOKIE_VALID);
 540		}
 541	}
 542
 543	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
 544
 545	/* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
 546	 * then we're under synflood. However, we have to use
 547	 * 'last_overflow - HZ' as lower bound. That's because a concurrent
 548	 * tcp_synq_overflow() could update .ts_recent_stamp after we read
 549	 * jiffies but before we store .ts_recent_stamp into last_overflow,
 550	 * which could lead to rejecting a valid syncookie.
 551	 */
 552	return !time_between32(now, last_overflow - HZ,
 553			       last_overflow + TCP_SYNCOOKIE_VALID);
 554}
 555
 556static inline u32 tcp_cookie_time(void)
 557{
 558	u64 val = get_jiffies_64();
 559
 560	do_div(val, TCP_SYNCOOKIE_PERIOD);
 561	return val;
 562}
 
 563
 564u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 565			      u16 *mssp);
 566__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
 567u64 cookie_init_timestamp(struct request_sock *req, u64 now);
 568bool cookie_timestamp_decode(const struct net *net,
 569			     struct tcp_options_received *opt);
 570bool cookie_ecn_ok(const struct tcp_options_received *opt,
 571		   const struct net *net, const struct dst_entry *dst);
 572
 573/* From net/ipv6/syncookies.c */
 574int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
 575		      u32 cookie);
 576struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
 577
 578u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
 579			      const struct tcphdr *th, u16 *mssp);
 580__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
 
 
 
 
 581#endif
 582/* tcp_output.c */
 583
 584void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
 585			       int nonagle);
 586int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 587int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 588void tcp_retransmit_timer(struct sock *sk);
 589void tcp_xmit_retransmit_queue(struct sock *);
 590void tcp_simple_retransmit(struct sock *);
 591void tcp_enter_recovery(struct sock *sk, bool ece_ack);
 592int tcp_trim_head(struct sock *, struct sk_buff *, u32);
 593enum tcp_queue {
 594	TCP_FRAG_IN_WRITE_QUEUE,
 595	TCP_FRAG_IN_RTX_QUEUE,
 596};
 597int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
 598		 struct sk_buff *skb, u32 len,
 599		 unsigned int mss_now, gfp_t gfp);
 600
 601void tcp_send_probe0(struct sock *);
 602void tcp_send_partial(struct sock *);
 603int tcp_write_wakeup(struct sock *, int mib);
 604void tcp_send_fin(struct sock *sk);
 605void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 606int tcp_send_synack(struct sock *);
 607void tcp_push_one(struct sock *, unsigned int mss_now);
 608void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
 609void tcp_send_ack(struct sock *sk);
 610void tcp_send_delayed_ack(struct sock *sk);
 611void tcp_send_loss_probe(struct sock *sk);
 612bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
 613void tcp_skb_collapse_tstamp(struct sk_buff *skb,
 614			     const struct sk_buff *next_skb);
 615
 616/* tcp_input.c */
 617void tcp_rearm_rto(struct sock *sk);
 618void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
 619void tcp_reset(struct sock *sk, struct sk_buff *skb);
 620void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
 621void tcp_fin(struct sock *sk);
 622
 623/* tcp_timer.c */
 624void tcp_init_xmit_timers(struct sock *);
 625static inline void tcp_clear_xmit_timers(struct sock *sk)
 626{
 627	if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
 628		__sock_put(sk);
 629
 630	if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
 631		__sock_put(sk);
 632
 633	inet_csk_clear_xmit_timers(sk);
 634}
 635
 636unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 637unsigned int tcp_current_mss(struct sock *sk);
 638u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
 639
 640/* Bound MSS / TSO packet size with the half of the window */
 641static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 642{
 643	int cutoff;
 644
 645	/* When peer uses tiny windows, there is no use in packetizing
 646	 * to sub-MSS pieces for the sake of SWS or making sure there
 647	 * are enough packets in the pipe for fast recovery.
 648	 *
 649	 * On the other hand, for extremely large MSS devices, handling
 650	 * smaller than MSS windows in this way does make sense.
 651	 */
 652	if (tp->max_window > TCP_MSS_DEFAULT)
 653		cutoff = (tp->max_window >> 1);
 654	else
 655		cutoff = tp->max_window;
 656
 657	if (cutoff && pktsize > cutoff)
 658		return max_t(int, cutoff, 68U - tp->tcp_header_len);
 659	else
 660		return pktsize;
 661}
 662
 663/* tcp.c */
 664void tcp_get_info(struct sock *, struct tcp_info *);
 665
 666/* Read 'sendfile()'-style from a TCP socket */
 667int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
 668		  sk_read_actor_t recv_actor);
 669
 670void tcp_initialize_rcv_mss(struct sock *sk);
 671
 672int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 673int tcp_mss_to_mtu(struct sock *sk, int mss);
 674void tcp_mtup_init(struct sock *sk);
 
 
 
 675
 676static inline void tcp_bound_rto(const struct sock *sk)
 677{
 678	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
 679		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
 680}
 681
 682static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
 683{
 684	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
 685}
 686
 687static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 688{
 689	/* mptcp hooks are only on the slow path */
 690	if (sk_is_mptcp((struct sock *)tp))
 691		return;
 692
 693	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
 694			       ntohl(TCP_FLAG_ACK) |
 695			       snd_wnd);
 696}
 697
 698static inline void tcp_fast_path_on(struct tcp_sock *tp)
 699{
 700	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
 701}
 702
 703static inline void tcp_fast_path_check(struct sock *sk)
 704{
 705	struct tcp_sock *tp = tcp_sk(sk);
 706
 707	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
 708	    tp->rcv_wnd &&
 709	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
 710	    !tp->urg_data)
 711		tcp_fast_path_on(tp);
 712}
 713
 714/* Compute the actual rto_min value */
 715static inline u32 tcp_rto_min(struct sock *sk)
 716{
 717	const struct dst_entry *dst = __sk_dst_get(sk);
 718	u32 rto_min = inet_csk(sk)->icsk_rto_min;
 719
 720	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
 721		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
 722	return rto_min;
 723}
 724
 725static inline u32 tcp_rto_min_us(struct sock *sk)
 726{
 727	return jiffies_to_usecs(tcp_rto_min(sk));
 728}
 729
 730static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
 731{
 732	return dst_metric_locked(dst, RTAX_CC_ALGO);
 733}
 734
 735/* Minimum RTT in usec. ~0 means not available. */
 736static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
 737{
 738	return minmax_get(&tp->rtt_min);
 739}
 740
 741/* Compute the actual receive window we are currently advertising.
 742 * Rcv_nxt can be after the window if our peer push more data
 743 * than the offered window.
 744 */
 745static inline u32 tcp_receive_window(const struct tcp_sock *tp)
 746{
 747	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
 748
 749	if (win < 0)
 750		win = 0;
 751	return (u32) win;
 752}
 753
 754/* Choose a new window, without checks for shrinking, and without
 755 * scaling applied to the result.  The caller does these things
 756 * if necessary.  This is a "raw" window selection.
 757 */
 758u32 __tcp_select_window(struct sock *sk);
 759
 760void tcp_send_window_probe(struct sock *sk);
 761
 762/* TCP uses 32bit jiffies to save some space.
 763 * Note that this is different from tcp_time_stamp, which
 764 * historically has been the same until linux-4.13.
 765 */
 766#define tcp_jiffies32 ((u32)jiffies)
 767
 768/*
 769 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
 770 * It is no longer tied to jiffies, but to 1 ms clock.
 771 * Note: double check if you want to use tcp_jiffies32 instead of this.
 772 */
 773#define TCP_TS_HZ	1000
 774
 775static inline u64 tcp_clock_ns(void)
 776{
 777	return ktime_get_ns();
 778}
 779
 780static inline u64 tcp_clock_us(void)
 781{
 782	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
 783}
 784
 785/* This should only be used in contexts where tp->tcp_mstamp is up to date */
 786static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
 787{
 788	return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
 789}
 790
 791/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
 792static inline u32 tcp_ns_to_ts(u64 ns)
 793{
 794	return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
 795}
 796
 797/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
 798static inline u32 tcp_time_stamp_raw(void)
 799{
 800	return tcp_ns_to_ts(tcp_clock_ns());
 801}
 802
 803void tcp_mstamp_refresh(struct tcp_sock *tp);
 804
 805static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
 806{
 807	return max_t(s64, t1 - t0, 0);
 808}
 809
 810static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
 811{
 812	return tcp_ns_to_ts(skb->skb_mstamp_ns);
 813}
 814
 815/* provide the departure time in us unit */
 816static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
 817{
 818	return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
 819}
 820
 821
 822#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 823
 824#define TCPHDR_FIN 0x01
 825#define TCPHDR_SYN 0x02
 826#define TCPHDR_RST 0x04
 827#define TCPHDR_PSH 0x08
 828#define TCPHDR_ACK 0x10
 829#define TCPHDR_URG 0x20
 830#define TCPHDR_ECE 0x40
 831#define TCPHDR_CWR 0x80
 832
 833#define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
 834
 835/* This is what the send packet queuing engine uses to pass
 836 * TCP per-packet control information to the transmission code.
 837 * We also store the host-order sequence numbers in here too.
 838 * This is 44 bytes if IPV6 is enabled.
 839 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
 840 */
 841struct tcp_skb_cb {
 
 
 
 
 
 
 842	__u32		seq;		/* Starting sequence number	*/
 843	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
 844	union {
 845		/* Note : tcp_tw_isn is used in input path only
 846		 *	  (isn chosen by tcp_timewait_state_process())
 847		 *
 848		 * 	  tcp_gso_segs/size are used in write queue only,
 849		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
 850		 */
 851		__u32		tcp_tw_isn;
 852		struct {
 853			u16	tcp_gso_segs;
 854			u16	tcp_gso_size;
 855		};
 856	};
 857	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
 858
 859	__u8		sacked;		/* State flags for SACK.	*/
 860#define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
 861#define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
 862#define TCPCB_LOST		0x04	/* SKB is lost			*/
 863#define TCPCB_TAGBITS		0x07	/* All tag bits			*/
 864#define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp_ns)	*/
 865#define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
 866#define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
 867				TCPCB_REPAIRED)
 868
 869	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
 870	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
 871			eor:1,		/* Is skb MSG_EOR marked? */
 872			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
 873			unused:5;
 874	__u32		ack_seq;	/* Sequence number ACK'd	*/
 875	union {
 876		struct {
 877			/* There is space for up to 24 bytes */
 878			__u32 in_flight:30,/* Bytes in flight at transmit */
 879			      is_app_limited:1, /* cwnd not fully used? */
 880			      unused:1;
 881			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
 882			__u32 delivered;
 883			/* start of send pipeline phase */
 884			u64 first_tx_mstamp;
 885			/* when we reached the "delivered" count */
 886			u64 delivered_mstamp;
 887		} tx;   /* only used for outgoing skbs */
 888		union {
 889			struct inet_skb_parm	h4;
 890#if IS_ENABLED(CONFIG_IPV6)
 891			struct inet6_skb_parm	h6;
 892#endif
 893		} header;	/* For incoming skbs */
 894	};
 895};
 896
 897#define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
 898
 899extern const struct inet_connection_sock_af_ops ipv4_specific;
 900
 901#if IS_ENABLED(CONFIG_IPV6)
 902/* This is the variant of inet6_iif() that must be used by TCP,
 903 * as TCP moves IP6CB into a different location in skb->cb[]
 904 */
 905static inline int tcp_v6_iif(const struct sk_buff *skb)
 906{
 907	return TCP_SKB_CB(skb)->header.h6.iif;
 908}
 909
 910static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
 911{
 912	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
 913
 914	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
 915}
 916
 917/* TCP_SKB_CB reference means this can not be used from early demux */
 918static inline int tcp_v6_sdif(const struct sk_buff *skb)
 919{
 920#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 921	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
 922		return TCP_SKB_CB(skb)->header.h6.iif;
 923#endif
 924	return 0;
 925}
 926
 927extern const struct inet_connection_sock_af_ops ipv6_specific;
 928
 929INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
 930INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
 931INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb));
 932
 933#endif
 934
 935/* TCP_SKB_CB reference means this can not be used from early demux */
 936static inline int tcp_v4_sdif(struct sk_buff *skb)
 937{
 938#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 939	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
 940		return TCP_SKB_CB(skb)->header.h4.iif;
 941#endif
 942	return 0;
 943}
 944
 945/* Due to TSO, an SKB can be composed of multiple actual
 946 * packets.  To keep these tracked properly, we use this.
 947 */
 948static inline int tcp_skb_pcount(const struct sk_buff *skb)
 949{
 950	return TCP_SKB_CB(skb)->tcp_gso_segs;
 951}
 952
 953static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
 954{
 955	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
 956}
 957
 958static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
 959{
 960	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
 961}
 962
 963/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
 964static inline int tcp_skb_mss(const struct sk_buff *skb)
 965{
 966	return TCP_SKB_CB(skb)->tcp_gso_size;
 967}
 968
 969static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
 970{
 971	return likely(!TCP_SKB_CB(skb)->eor);
 972}
 973
 974static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
 975					const struct sk_buff *from)
 976{
 977	return likely(tcp_skb_can_collapse_to(to) &&
 978		      mptcp_skb_can_collapse(to, from));
 979}
 980
 981/* Events passed to congestion control interface */
 982enum tcp_ca_event {
 983	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
 984	CA_EVENT_CWND_RESTART,	/* congestion window restart */
 985	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
 
 986	CA_EVENT_LOSS,		/* loss timeout */
 987	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
 988	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
 989};
 990
 991/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
 992enum tcp_ca_ack_event_flags {
 993	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
 994	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
 995	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
 996};
 997
 998/*
 999 * Interface for adding new TCP congestion control handlers
1000 */
1001#define TCP_CA_NAME_MAX	16
1002#define TCP_CA_MAX	128
1003#define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
1004
1005#define TCP_CA_UNSPEC	0
1006
1007/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1008#define TCP_CONG_NON_RESTRICTED 0x1
1009/* Requires ECN/ECT set on all packets */
1010#define TCP_CONG_NEEDS_ECN	0x2
1011#define TCP_CONG_MASK	(TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1012
1013union tcp_cc_info;
1014
1015struct ack_sample {
1016	u32 pkts_acked;
1017	s32 rtt_us;
1018	u32 in_flight;
1019};
1020
1021/* A rate sample measures the number of (original/retransmitted) data
1022 * packets delivered "delivered" over an interval of time "interval_us".
1023 * The tcp_rate.c code fills in the rate sample, and congestion
1024 * control modules that define a cong_control function to run at the end
1025 * of ACK processing can optionally chose to consult this sample when
1026 * setting cwnd and pacing rate.
1027 * A sample is invalid if "delivered" or "interval_us" is negative.
1028 */
1029struct rate_sample {
1030	u64  prior_mstamp; /* starting timestamp for interval */
1031	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
1032	s32  delivered;		/* number of packets delivered over interval */
1033	long interval_us;	/* time for tp->delivered to incr "delivered" */
1034	u32 snd_interval_us;	/* snd interval for delivered packets */
1035	u32 rcv_interval_us;	/* rcv interval for delivered packets */
1036	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
1037	int  losses;		/* number of packets marked lost upon ACK */
1038	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
1039	u32  prior_in_flight;	/* in flight before this ACK */
1040	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
1041	bool is_retrans;	/* is sample from retransmission? */
1042	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
1043};
1044
1045struct tcp_congestion_ops {
1046/* fast path fields are put first to fill one cache line */
 
 
 
 
 
 
1047
1048	/* return slow start threshold (required) */
1049	u32 (*ssthresh)(struct sock *sk);
1050
 
1051	/* do new cwnd calculation (required) */
1052	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1053
1054	/* call before changing ca_state (optional) */
1055	void (*set_state)(struct sock *sk, u8 new_state);
1056
1057	/* call when cwnd event occurs (optional) */
1058	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1059
1060	/* call when ack arrives (optional) */
1061	void (*in_ack_event)(struct sock *sk, u32 flags);
1062
1063	/* hook for packet ack accounting (optional) */
1064	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1065
1066	/* override sysctl_tcp_min_tso_segs */
1067	u32 (*min_tso_segs)(struct sock *sk);
1068
1069	/* call when packets are delivered to update cwnd and pacing rate,
1070	 * after all the ca_state processing. (optional)
1071	 */
1072	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1073
1074
1075	/* new value of cwnd after loss (required) */
1076	u32  (*undo_cwnd)(struct sock *sk);
1077	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
1078	u32 (*sndbuf_expand)(struct sock *sk);
1079
1080/* control/slow paths put last */
1081	/* get info for inet_diag (optional) */
1082	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1083			   union tcp_cc_info *info);
1084
1085	char 			name[TCP_CA_NAME_MAX];
1086	struct module		*owner;
1087	struct list_head	list;
1088	u32			key;
1089	u32			flags;
1090
1091	/* initialize private data (optional) */
1092	void (*init)(struct sock *sk);
1093	/* cleanup private data  (optional) */
1094	void (*release)(struct sock *sk);
1095} ____cacheline_aligned_in_smp;
1096
1097int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1098void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1099
1100void tcp_assign_congestion_control(struct sock *sk);
1101void tcp_init_congestion_control(struct sock *sk);
1102void tcp_cleanup_congestion_control(struct sock *sk);
1103int tcp_set_default_congestion_control(struct net *net, const char *name);
1104void tcp_get_default_congestion_control(struct net *net, char *name);
1105void tcp_get_available_congestion_control(char *buf, size_t len);
1106void tcp_get_allowed_congestion_control(char *buf, size_t len);
1107int tcp_set_allowed_congestion_control(char *allowed);
1108int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1109			       bool cap_net_admin);
1110u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1111void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1112
1113u32 tcp_reno_ssthresh(struct sock *sk);
1114u32 tcp_reno_undo_cwnd(struct sock *sk);
1115void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1116extern struct tcp_congestion_ops tcp_reno;
1117
1118struct tcp_congestion_ops *tcp_ca_find(const char *name);
1119struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1120u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1121#ifdef CONFIG_INET
1122char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1123#else
1124static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1125{
1126	return NULL;
1127}
1128#endif
1129
1130static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1131{
1132	const struct inet_connection_sock *icsk = inet_csk(sk);
1133
1134	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1135}
1136
1137static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
1138{
1139	struct inet_connection_sock *icsk = inet_csk(sk);
1140
1141	if (icsk->icsk_ca_ops->set_state)
1142		icsk->icsk_ca_ops->set_state(sk, ca_state);
1143	icsk->icsk_ca_state = ca_state;
1144}
1145
1146static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1147{
1148	const struct inet_connection_sock *icsk = inet_csk(sk);
1149
1150	if (icsk->icsk_ca_ops->cwnd_event)
1151		icsk->icsk_ca_ops->cwnd_event(sk, event);
1152}
1153
1154/* From tcp_rate.c */
1155void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1156void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1157			    struct rate_sample *rs);
1158void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1159		  bool is_sack_reneg, struct rate_sample *rs);
1160void tcp_rate_check_app_limited(struct sock *sk);
1161
1162/* These functions determine how the current flow behaves in respect of SACK
1163 * handling. SACK is negotiated with the peer, and therefore it can vary
1164 * between different flows.
1165 *
1166 * tcp_is_sack - SACK enabled
1167 * tcp_is_reno - No SACK
 
1168 */
1169static inline int tcp_is_sack(const struct tcp_sock *tp)
1170{
1171	return likely(tp->rx_opt.sack_ok);
1172}
1173
1174static inline bool tcp_is_reno(const struct tcp_sock *tp)
1175{
1176	return !tcp_is_sack(tp);
1177}
1178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1179static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1180{
1181	return tp->sacked_out + tp->lost_out;
1182}
1183
1184/* This determines how many packets are "in the network" to the best
1185 * of our knowledge.  In many cases it is conservative, but where
1186 * detailed information is available from the receiver (via SACK
1187 * blocks etc.) we can make more aggressive calculations.
1188 *
1189 * Use this for decisions involving congestion control, use just
1190 * tp->packets_out to determine if the send queue is empty or not.
1191 *
1192 * Read this equation as:
1193 *
1194 *	"Packets sent once on transmission queue" MINUS
1195 *	"Packets left network, but not honestly ACKed yet" PLUS
1196 *	"Packets fast retransmitted"
1197 */
1198static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1199{
1200	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1201}
1202
1203#define TCP_INFINITE_SSTHRESH	0x7fffffff
1204
1205static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1206{
1207	return tp->snd_cwnd < tp->snd_ssthresh;
1208}
1209
1210static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1211{
1212	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1213}
1214
1215static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1216{
1217	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1218	       (1 << inet_csk(sk)->icsk_ca_state);
1219}
1220
1221/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1222 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1223 * ssthresh.
1224 */
1225static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1226{
1227	const struct tcp_sock *tp = tcp_sk(sk);
1228
1229	if (tcp_in_cwnd_reduction(sk))
1230		return tp->snd_ssthresh;
1231	else
1232		return max(tp->snd_ssthresh,
1233			   ((tp->snd_cwnd >> 1) +
1234			    (tp->snd_cwnd >> 2)));
1235}
1236
1237/* Use define here intentionally to get WARN_ON location shown at the caller */
1238#define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1239
1240void tcp_enter_cwr(struct sock *sk);
1241__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1242
1243/* The maximum number of MSS of available cwnd for which TSO defers
1244 * sending if not using sysctl_tcp_tso_win_divisor.
1245 */
1246static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1247{
1248	return 3;
1249}
1250
1251/* Returns end sequence number of the receiver's advertised window */
1252static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1253{
1254	return tp->snd_una + tp->snd_wnd;
1255}
1256
1257/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1258 * flexible approach. The RFC suggests cwnd should not be raised unless
1259 * it was fully used previously. And that's exactly what we do in
1260 * congestion avoidance mode. But in slow start we allow cwnd to grow
1261 * as long as the application has used half the cwnd.
1262 * Example :
1263 *    cwnd is 10 (IW10), but application sends 9 frames.
1264 *    We allow cwnd to reach 18 when all frames are ACKed.
1265 * This check is safe because it's as aggressive as slow start which already
1266 * risks 100% overshoot. The advantage is that we discourage application to
1267 * either send more filler packets or data to artificially blow up the cwnd
1268 * usage, and allow application-limited process to probe bw more aggressively.
1269 */
1270static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1271{
1272	const struct tcp_sock *tp = tcp_sk(sk);
1273
1274	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1275	if (tcp_in_slow_start(tp))
1276		return tp->snd_cwnd < 2 * tp->max_packets_out;
1277
1278	return tp->is_cwnd_limited;
1279}
1280
1281/* BBR congestion control needs pacing.
1282 * Same remark for SO_MAX_PACING_RATE.
1283 * sch_fq packet scheduler is efficiently handling pacing,
1284 * but is not always installed/used.
1285 * Return true if TCP stack should pace packets itself.
1286 */
1287static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1288{
1289	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1290}
1291
1292/* Estimates in how many jiffies next packet for this flow can be sent.
1293 * Scheduling a retransmit timer too early would be silly.
1294 */
1295static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1296{
1297	s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1298
1299	return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1300}
1301
1302static inline void tcp_reset_xmit_timer(struct sock *sk,
1303					const int what,
1304					unsigned long when,
1305					const unsigned long max_when)
1306{
1307	inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1308				  max_when);
1309}
1310
1311/* Something is really bad, we could not queue an additional packet,
1312 * because qdisc is full or receiver sent a 0 window, or we are paced.
1313 * We do not want to add fuel to the fire, or abort too early,
1314 * so make sure the timer we arm now is at least 200ms in the future,
1315 * regardless of current icsk_rto value (as it could be ~2ms)
1316 */
1317static inline unsigned long tcp_probe0_base(const struct sock *sk)
1318{
1319	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1320}
 
1321
1322/* Variant of inet_csk_rto_backoff() used for zero window probes */
1323static inline unsigned long tcp_probe0_when(const struct sock *sk,
1324					    unsigned long max_when)
1325{
1326	u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1327			   inet_csk(sk)->icsk_backoff);
1328	u64 when = (u64)tcp_probe0_base(sk) << backoff;
1329
1330	return (unsigned long)min_t(u64, when, max_when);
1331}
1332
1333static inline void tcp_check_probe_timer(struct sock *sk)
1334{
1335	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1336		tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1337				     tcp_probe0_base(sk), TCP_RTO_MAX);
 
 
 
1338}
1339
1340static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1341{
1342	tp->snd_wl1 = seq;
1343}
1344
1345static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1346{
1347	tp->snd_wl1 = seq;
1348}
1349
1350/*
1351 * Calculate(/check) TCP checksum
1352 */
1353static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1354				   __be32 daddr, __wsum base)
1355{
1356	return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
 
 
 
 
 
1357}
1358
1359static inline bool tcp_checksum_complete(struct sk_buff *skb)
1360{
1361	return !skb_csum_unnecessary(skb) &&
1362		__skb_checksum_complete(skb);
1363}
1364
1365bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1366int tcp_filter(struct sock *sk, struct sk_buff *skb);
1367void tcp_set_state(struct sock *sk, int state);
1368void tcp_done(struct sock *sk);
1369int tcp_abort(struct sock *sk, int err);
1370
1371static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1372{
1373	rx_opt->dsack = 0;
1374	rx_opt->num_sacks = 0;
 
 
 
 
 
 
 
 
1375}
1376
1377void tcp_cwnd_restart(struct sock *sk, s32 delta);
1378
1379static inline void tcp_slow_start_after_idle_check(struct sock *sk)
 
 
 
 
 
 
1380{
1381	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1382	struct tcp_sock *tp = tcp_sk(sk);
1383	s32 delta;
1384
1385	if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
1386	    ca_ops->cong_control)
1387		return;
1388	delta = tcp_jiffies32 - tp->lsndtime;
1389	if (delta > inet_csk(sk)->icsk_rto)
1390		tcp_cwnd_restart(sk, delta);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1391}
1392
1393/* Determine a window scaling and initial window to offer. */
1394void tcp_select_initial_window(const struct sock *sk, int __space,
1395			       __u32 mss, __u32 *rcv_wnd,
1396			       __u32 *window_clamp, int wscale_ok,
1397			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1398
1399static inline int tcp_win_from_space(const struct sock *sk, int space)
1400{
1401	int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
1402
1403	return tcp_adv_win_scale <= 0 ?
1404		(space>>(-tcp_adv_win_scale)) :
1405		space - (space>>tcp_adv_win_scale);
1406}
 
 
 
 
1407
1408/* Note: caller must be prepared to deal with negative returns */
1409static inline int tcp_space(const struct sock *sk)
1410{
1411	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1412				  READ_ONCE(sk->sk_backlog.len) -
1413				  atomic_read(&sk->sk_rmem_alloc));
1414}
1415
1416static inline int tcp_full_space(const struct sock *sk)
1417{
1418	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
 
1419}
1420
1421void tcp_cleanup_rbuf(struct sock *sk, int copied);
 
 
 
 
1422
1423/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1424 * If 87.5 % (7/8) of the space has been consumed, we want to override
1425 * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1426 * len/truesize ratio.
1427 */
1428static inline bool tcp_rmem_pressure(const struct sock *sk)
1429{
1430	int rcvbuf, threshold;
1431
1432	if (tcp_under_memory_pressure(sk))
1433		return true;
1434
1435	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1436	threshold = rcvbuf - (rcvbuf >> 3);
1437
1438	return atomic_read(&sk->sk_rmem_alloc) > threshold;
1439}
1440
1441static inline bool tcp_epollin_ready(const struct sock *sk, int target)
 
1442{
1443	const struct tcp_sock *tp = tcp_sk(sk);
1444	int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1445
1446	if (avail <= 0)
1447		return false;
1448
1449	return (avail >= target) || tcp_rmem_pressure(sk) ||
1450	       (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
 
1451}
1452
1453extern void tcp_openreq_init_rwin(struct request_sock *req,
1454				  const struct sock *sk_listener,
1455				  const struct dst_entry *dst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1456
1457void tcp_enter_memory_pressure(struct sock *sk);
1458void tcp_leave_memory_pressure(struct sock *sk);
1459
1460static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1461{
1462	struct net *net = sock_net((struct sock *)tp);
1463
1464	return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
1465}
1466
1467static inline int keepalive_time_when(const struct tcp_sock *tp)
1468{
1469	struct net *net = sock_net((struct sock *)tp);
1470
1471	return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
1472}
1473
1474static inline int keepalive_probes(const struct tcp_sock *tp)
1475{
1476	struct net *net = sock_net((struct sock *)tp);
1477
1478	return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1479}
1480
1481static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1482{
1483	const struct inet_connection_sock *icsk = &tp->inet_conn;
1484
1485	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1486			  tcp_jiffies32 - tp->rcv_tstamp);
1487}
1488
1489static inline int tcp_fin_time(const struct sock *sk)
1490{
1491	int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1492	const int rto = inet_csk(sk)->icsk_rto;
1493
1494	if (fin_timeout < (rto << 2) - (rto >> 1))
1495		fin_timeout = (rto << 2) - (rto >> 1);
1496
1497	return fin_timeout;
1498}
1499
1500static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1501				  int paws_win)
1502{
1503	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1504		return true;
1505	if (unlikely(!time_before32(ktime_get_seconds(),
1506				    rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
1507		return true;
1508	/*
1509	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1510	 * then following tcp messages have valid values. Ignore 0 value,
1511	 * or else 'negative' tsval might forbid us to accept their packets.
1512	 */
1513	if (!rx_opt->ts_recent)
1514		return true;
1515	return false;
1516}
1517
1518static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1519				   int rst)
1520{
1521	if (tcp_paws_check(rx_opt, 0))
1522		return false;
1523
1524	/* RST segments are not recommended to carry timestamp,
1525	   and, if they do, it is recommended to ignore PAWS because
1526	   "their cleanup function should take precedence over timestamps."
1527	   Certainly, it is mistake. It is necessary to understand the reasons
1528	   of this constraint to relax it: if peer reboots, clock may go
1529	   out-of-sync and half-open connections will not be reset.
1530	   Actually, the problem would be not existing if all
1531	   the implementations followed draft about maintaining clock
1532	   via reboots. Linux-2.2 DOES NOT!
1533
1534	   However, we can relax time bounds for RST segments to MSL.
1535	 */
1536	if (rst && !time_before32(ktime_get_seconds(),
1537				  rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1538		return false;
1539	return true;
1540}
1541
1542bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1543			  int mib_idx, u32 *last_oow_ack_time);
1544
1545static inline void tcp_mib_init(struct net *net)
1546{
1547	/* See RFC 2012 */
1548	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1549	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1550	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1551	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1552}
1553
1554/* from STCP */
1555static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1556{
1557	tp->lost_skb_hint = NULL;
 
1558}
1559
1560static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1561{
1562	tcp_clear_retrans_hints_partial(tp);
1563	tp->retransmit_skb_hint = NULL;
1564}
1565
 
 
 
1566union tcp_md5_addr {
1567	struct in_addr  a4;
1568#if IS_ENABLED(CONFIG_IPV6)
1569	struct in6_addr	a6;
1570#endif
1571};
1572
1573/* - key database */
1574struct tcp_md5sig_key {
1575	struct hlist_node	node;
1576	u8			keylen;
1577	u8			family; /* AF_INET or AF_INET6 */
1578	u8			prefixlen;
1579	union tcp_md5_addr	addr;
1580	int			l3index; /* set if key added with L3 scope */
1581	u8			key[TCP_MD5SIG_MAXKEYLEN];
1582	struct rcu_head		rcu;
1583};
1584
1585/* - sock block */
1586struct tcp_md5sig_info {
1587	struct hlist_head	head;
1588	struct rcu_head		rcu;
1589};
1590
1591/* - pseudo header */
1592struct tcp4_pseudohdr {
1593	__be32		saddr;
1594	__be32		daddr;
1595	__u8		pad;
1596	__u8		protocol;
1597	__be16		len;
1598};
1599
1600struct tcp6_pseudohdr {
1601	struct in6_addr	saddr;
1602	struct in6_addr daddr;
1603	__be32		len;
1604	__be32		protocol;	/* including padding */
1605};
1606
1607union tcp_md5sum_block {
1608	struct tcp4_pseudohdr ip4;
1609#if IS_ENABLED(CONFIG_IPV6)
1610	struct tcp6_pseudohdr ip6;
1611#endif
1612};
1613
1614/* - pool: digest algorithm, hash description and scratch buffer */
1615struct tcp_md5sig_pool {
1616	struct ahash_request	*md5_req;
1617	void			*scratch;
1618};
1619
1620/* - functions */
1621int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1622			const struct sock *sk, const struct sk_buff *skb);
1623int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1624		   int family, u8 prefixlen, int l3index,
1625		   const u8 *newkey, u8 newkeylen, gfp_t gfp);
1626int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1627		   int family, u8 prefixlen, int l3index);
1628struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1629					 const struct sock *addr_sk);
 
 
1630
1631#ifdef CONFIG_TCP_MD5SIG
1632#include <linux/jump_label.h>
1633extern struct static_key_false tcp_md5_needed;
1634struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1635					   const union tcp_md5_addr *addr,
1636					   int family);
1637static inline struct tcp_md5sig_key *
1638tcp_md5_do_lookup(const struct sock *sk, int l3index,
1639		  const union tcp_md5_addr *addr, int family)
1640{
1641	if (!static_branch_unlikely(&tcp_md5_needed))
1642		return NULL;
1643	return __tcp_md5_do_lookup(sk, l3index, addr, family);
1644}
1645
1646#define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1647#else
1648static inline struct tcp_md5sig_key *
1649tcp_md5_do_lookup(const struct sock *sk, int l3index,
1650		  const union tcp_md5_addr *addr, int family)
1651{
1652	return NULL;
1653}
1654#define tcp_twsk_md5_key(twsk)	NULL
1655#endif
1656
1657bool tcp_alloc_md5sig_pool(void);
 
1658
1659struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1660static inline void tcp_put_md5sig_pool(void)
1661{
1662	local_bh_enable();
1663}
1664
1665int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1666			  unsigned int header_len);
1667int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1668		     const struct tcp_md5sig_key *key);
1669
1670/* From tcp_fastopen.c */
1671void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1672			    struct tcp_fastopen_cookie *cookie);
1673void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1674			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
1675			    u16 try_exp);
1676struct tcp_fastopen_request {
1677	/* Fast Open cookie. Size 0 means a cookie request */
1678	struct tcp_fastopen_cookie	cookie;
1679	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1680	size_t				size;
1681	int				copied;	/* queued in tcp_connect() */
1682	struct ubuf_info		*uarg;
1683};
1684void tcp_free_fastopen_req(struct tcp_sock *tp);
1685void tcp_fastopen_destroy_cipher(struct sock *sk);
1686void tcp_fastopen_ctx_destroy(struct net *net);
1687int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1688			      void *primary_key, void *backup_key);
1689int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1690			    u64 *key);
1691void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1692struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1693			      struct request_sock *req,
1694			      struct tcp_fastopen_cookie *foc,
1695			      const struct dst_entry *dst);
1696void tcp_fastopen_init_key_once(struct net *net);
1697bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1698			     struct tcp_fastopen_cookie *cookie);
1699bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1700#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1701#define TCP_FASTOPEN_KEY_MAX 2
1702#define TCP_FASTOPEN_KEY_BUF_LENGTH \
1703	(TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1704
1705/* Fastopen key context */
1706struct tcp_fastopen_context {
1707	siphash_key_t	key[TCP_FASTOPEN_KEY_MAX];
1708	int		num;
1709	struct rcu_head	rcu;
1710};
1711
1712void tcp_fastopen_active_disable(struct sock *sk);
1713bool tcp_fastopen_active_should_disable(struct sock *sk);
1714void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1715void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1716
1717/* Caller needs to wrap with rcu_read_(un)lock() */
1718static inline
1719struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1720{
1721	struct tcp_fastopen_context *ctx;
1722
1723	ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1724	if (!ctx)
1725		ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1726	return ctx;
1727}
1728
1729static inline
1730bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1731			       const struct tcp_fastopen_cookie *orig)
1732{
1733	if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1734	    orig->len == foc->len &&
1735	    !memcmp(orig->val, foc->val, foc->len))
1736		return true;
1737	return false;
1738}
1739
1740static inline
1741int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1742{
1743	return ctx->num;
1744}
1745
1746/* Latencies incurred by various limits for a sender. They are
1747 * chronograph-like stats that are mutually exclusive.
1748 */
1749enum tcp_chrono {
1750	TCP_CHRONO_UNSPEC,
1751	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1752	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1753	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1754	__TCP_CHRONO_MAX,
1755};
1756
1757void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1758void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1759
1760/* This helper is needed, because skb->tcp_tsorted_anchor uses
1761 * the same memory storage than skb->destructor/_skb_refdst
1762 */
1763static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1764{
1765	skb->destructor = NULL;
1766	skb->_skb_refdst = 0UL;
1767}
1768
1769#define tcp_skb_tsorted_save(skb) {		\
1770	unsigned long _save = skb->_skb_refdst;	\
1771	skb->_skb_refdst = 0UL;
1772
1773#define tcp_skb_tsorted_restore(skb)		\
1774	skb->_skb_refdst = _save;		\
1775}
1776
1777void tcp_write_queue_purge(struct sock *sk);
1778
1779static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1780{
1781	return skb_rb_first(&sk->tcp_rtx_queue);
1782}
1783
1784static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
 
1785{
1786	return skb_rb_last(&sk->tcp_rtx_queue);
1787}
1788
1789static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1790{
1791	return skb_peek(&sk->sk_write_queue);
1792}
1793
1794static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1795{
1796	return skb_peek_tail(&sk->sk_write_queue);
1797}
1798
1799#define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1800	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1801
1802static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1803{
1804	return skb_peek(&sk->sk_write_queue);
1805}
1806
1807static inline bool tcp_skb_is_last(const struct sock *sk,
1808				   const struct sk_buff *skb)
1809{
1810	return skb_queue_is_last(&sk->sk_write_queue, skb);
1811}
1812
1813/**
1814 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
1815 * @sk: socket
1816 *
1817 * Since the write queue can have a temporary empty skb in it,
1818 * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
1819 */
1820static inline bool tcp_write_queue_empty(const struct sock *sk)
1821{
1822	const struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 
1823
1824	return tp->write_seq == tp->snd_nxt;
 
 
 
1825}
1826
1827static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1828{
1829	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1830}
1831
1832static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1833{
1834	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1835}
1836
1837static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1838{
1839	__skb_queue_tail(&sk->sk_write_queue, skb);
1840
1841	/* Queue it, remembering where we must start sending. */
1842	if (sk->sk_write_queue.next == skb)
1843		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1844}
1845
1846/* Insert new before skb on the write queue of sk.  */
1847static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1848						  struct sk_buff *skb,
1849						  struct sock *sk)
1850{
1851	__skb_queue_before(&sk->sk_write_queue, skb, new);
 
 
 
1852}
1853
1854static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1855{
1856	tcp_skb_tsorted_anchor_cleanup(skb);
1857	__skb_unlink(skb, &sk->sk_write_queue);
1858}
1859
1860void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1861
1862static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1863{
1864	tcp_skb_tsorted_anchor_cleanup(skb);
1865	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1866}
1867
1868static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1869{
1870	list_del(&skb->tcp_tsorted_anchor);
1871	tcp_rtx_queue_unlink(skb, sk);
1872	sk_wmem_free_skb(sk, skb);
1873}
1874
1875static inline void tcp_push_pending_frames(struct sock *sk)
1876{
1877	if (tcp_send_head(sk)) {
1878		struct tcp_sock *tp = tcp_sk(sk);
1879
1880		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1881	}
1882}
1883
1884/* Start sequence of the skb just after the highest skb with SACKed
1885 * bit, valid only if sacked_out > 0 or when the caller has ensured
1886 * validity by itself.
1887 */
1888static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1889{
1890	if (!tp->sacked_out)
1891		return tp->snd_una;
1892
1893	if (tp->highest_sack == NULL)
1894		return tp->snd_nxt;
1895
1896	return TCP_SKB_CB(tp->highest_sack)->seq;
1897}
1898
1899static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1900{
1901	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
 
1902}
1903
1904static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1905{
1906	return tcp_sk(sk)->highest_sack;
1907}
1908
1909static inline void tcp_highest_sack_reset(struct sock *sk)
1910{
1911	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
1912}
1913
1914/* Called when old skb is about to be deleted and replaced by new skb */
1915static inline void tcp_highest_sack_replace(struct sock *sk,
1916					    struct sk_buff *old,
1917					    struct sk_buff *new)
1918{
1919	if (old == tcp_highest_sack(sk))
1920		tcp_sk(sk)->highest_sack = new;
1921}
1922
1923/* This helper checks if socket has IP_TRANSPARENT set */
1924static inline bool inet_sk_transparent(const struct sock *sk)
1925{
1926	switch (sk->sk_state) {
1927	case TCP_TIME_WAIT:
1928		return inet_twsk(sk)->tw_transparent;
1929	case TCP_NEW_SYN_RECV:
1930		return inet_rsk(inet_reqsk(sk))->no_srccheck;
1931	}
1932	return inet_sk(sk)->transparent;
1933}
1934
1935/* Determines whether this is a thin stream (which may suffer from
1936 * increased latency). Used to trigger latency-reducing mechanisms.
1937 */
1938static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1939{
1940	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1941}
1942
1943/* /proc */
1944enum tcp_seq_states {
1945	TCP_SEQ_STATE_LISTENING,
 
1946	TCP_SEQ_STATE_ESTABLISHED,
 
1947};
1948
1949void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
1950void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1951void tcp_seq_stop(struct seq_file *seq, void *v);
1952
1953struct tcp_seq_afinfo {
 
1954	sa_family_t			family;
 
 
1955};
1956
1957struct tcp_iter_state {
1958	struct seq_net_private	p;
 
1959	enum tcp_seq_states	state;
1960	struct sock		*syn_wait_sk;
1961	struct tcp_seq_afinfo	*bpf_seq_afinfo;
1962	int			bucket, offset, sbucket, num;
1963	loff_t			last_pos;
1964};
1965
 
 
 
1966extern struct request_sock_ops tcp_request_sock_ops;
1967extern struct request_sock_ops tcp6_request_sock_ops;
1968
1969void tcp_v4_destroy_sock(struct sock *sk);
1970
1971struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1972				netdev_features_t features);
1973struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
1974INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
1975INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
1976INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
1977INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
1978int tcp_gro_complete(struct sk_buff *skb);
1979
1980void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1981
1982static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1983{
1984	struct net *net = sock_net((struct sock *)tp);
1985	return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1986}
1987
1988bool tcp_stream_memory_free(const struct sock *sk, int wake);
 
 
1989
1990#ifdef CONFIG_PROC_FS
1991int tcp4_proc_init(void);
1992void tcp4_proc_exit(void);
1993#endif
1994
1995int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1996int tcp_conn_request(struct request_sock_ops *rsk_ops,
1997		     const struct tcp_request_sock_ops *af_ops,
1998		     struct sock *sk, struct sk_buff *skb);
1999
2000/* TCP af-specific functions */
2001struct tcp_sock_af_ops {
2002#ifdef CONFIG_TCP_MD5SIG
2003	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
2004						const struct sock *addr_sk);
2005	int		(*calc_md5_hash)(char *location,
2006					 const struct tcp_md5sig_key *md5,
2007					 const struct sock *sk,
2008					 const struct sk_buff *skb);
2009	int		(*md5_parse)(struct sock *sk,
2010				     int optname,
2011				     sockptr_t optval,
2012				     int optlen);
2013#endif
2014};
2015
2016struct tcp_request_sock_ops {
2017	u16 mss_clamp;
2018#ifdef CONFIG_TCP_MD5SIG
2019	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2020						 const struct sock *addr_sk);
2021	int		(*calc_md5_hash) (char *location,
2022					  const struct tcp_md5sig_key *md5,
2023					  const struct sock *sk,
2024					  const struct sk_buff *skb);
 
2025#endif
2026#ifdef CONFIG_SYN_COOKIES
2027	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
2028				 __u16 *mss);
2029#endif
2030	struct dst_entry *(*route_req)(const struct sock *sk,
2031				       struct sk_buff *skb,
2032				       struct flowi *fl,
2033				       struct request_sock *req);
2034	u32 (*init_seq)(const struct sk_buff *skb);
2035	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2036	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2037			   struct flowi *fl, struct request_sock *req,
2038			   struct tcp_fastopen_cookie *foc,
2039			   enum tcp_synack_type synack_type,
2040			   struct sk_buff *syn_skb);
2041};
2042
2043extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2044#if IS_ENABLED(CONFIG_IPV6)
2045extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2046#endif
2047
2048#ifdef CONFIG_SYN_COOKIES
2049static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2050					 const struct sock *sk, struct sk_buff *skb,
2051					 __u16 *mss)
2052{
2053	tcp_synq_overflow(sk);
2054	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2055	return ops->cookie_init_seq(skb, mss);
2056}
2057#else
2058static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2059					 const struct sock *sk, struct sk_buff *skb,
2060					 __u16 *mss)
2061{
2062	return 0;
2063}
2064#endif
2065
2066int tcpv4_offload_init(void);
2067
2068void tcp_v4_init(void);
2069void tcp_init(void);
2070
2071/* tcp_recovery.c */
2072void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2073void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2074extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2075				u32 reo_wnd);
2076extern bool tcp_rack_mark_lost(struct sock *sk);
2077extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2078			     u64 xmit_time);
2079extern void tcp_rack_reo_timeout(struct sock *sk);
2080extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2081
2082/* At how many usecs into the future should the RTO fire? */
2083static inline s64 tcp_rto_delta_us(const struct sock *sk)
2084{
2085	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2086	u32 rto = inet_csk(sk)->icsk_rto;
2087	u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2088
2089	return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2090}
2091
2092/*
2093 * Save and compile IPv4 options, return a pointer to it
2094 */
2095static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2096							 struct sk_buff *skb)
2097{
2098	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2099	struct ip_options_rcu *dopt = NULL;
2100
2101	if (opt->optlen) {
2102		int opt_size = sizeof(*dopt) + opt->optlen;
2103
2104		dopt = kmalloc(opt_size, GFP_ATOMIC);
2105		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2106			kfree(dopt);
2107			dopt = NULL;
2108		}
2109	}
2110	return dopt;
2111}
2112
2113/* locally generated TCP pure ACKs have skb->truesize == 2
2114 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2115 * This is much faster than dissecting the packet to find out.
2116 * (Think of GRE encapsulations, IPv4, IPv6, ...)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2117 */
2118static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2119{
2120	return skb->truesize == 2;
2121}
2122
2123static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2124{
2125	skb->truesize = 2;
2126}
2127
2128static inline int tcp_inq(struct sock *sk)
2129{
2130	struct tcp_sock *tp = tcp_sk(sk);
2131	int answ;
2132
2133	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2134		answ = 0;
2135	} else if (sock_flag(sk, SOCK_URGINLINE) ||
2136		   !tp->urg_data ||
2137		   before(tp->urg_seq, tp->copied_seq) ||
2138		   !before(tp->urg_seq, tp->rcv_nxt)) {
2139
2140		answ = tp->rcv_nxt - tp->copied_seq;
2141
2142		/* Subtract 1, if FIN was received */
2143		if (answ && sock_flag(sk, SOCK_DONE))
2144			answ--;
2145	} else {
2146		answ = tp->urg_seq - tp->copied_seq;
2147	}
2148
2149	return answ;
2150}
2151
2152int tcp_peek_len(struct socket *sock);
2153
2154static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2155{
2156	u16 segs_in;
2157
2158	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2159	tp->segs_in += segs_in;
2160	if (skb->len > tcp_hdrlen(skb))
2161		tp->data_segs_in += segs_in;
2162}
2163
2164/*
2165 * TCP listen path runs lockless.
2166 * We forced "struct sock" to be const qualified to make sure
2167 * we don't modify one of its field by mistake.
2168 * Here, we increment sk_drops which is an atomic_t, so we can safely
2169 * make sock writable again.
2170 */
2171static inline void tcp_listendrop(const struct sock *sk)
2172{
2173	atomic_inc(&((struct sock *)sk)->sk_drops);
2174	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
 
2175}
2176
2177enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2178
2179/*
2180 * Interface for adding Upper Level Protocols over TCP
 
 
 
 
 
 
 
2181 */
2182
2183#define TCP_ULP_NAME_MAX	16
2184#define TCP_ULP_MAX		128
2185#define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2186
2187struct tcp_ulp_ops {
2188	struct list_head	list;
2189
2190	/* initialize ulp */
2191	int (*init)(struct sock *sk);
2192	/* update ulp */
2193	void (*update)(struct sock *sk, struct proto *p,
2194		       void (*write_space)(struct sock *sk));
2195	/* cleanup ulp */
2196	void (*release)(struct sock *sk);
2197	/* diagnostic */
2198	int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2199	size_t (*get_info_size)(const struct sock *sk);
2200	/* clone ulp */
2201	void (*clone)(const struct request_sock *req, struct sock *newsk,
2202		      const gfp_t priority);
2203
2204	char		name[TCP_ULP_NAME_MAX];
2205	struct module	*owner;
2206};
2207int tcp_register_ulp(struct tcp_ulp_ops *type);
2208void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2209int tcp_set_ulp(struct sock *sk, const char *name);
2210void tcp_get_available_ulp(char *buf, size_t len);
2211void tcp_cleanup_ulp(struct sock *sk);
2212void tcp_update_ulp(struct sock *sk, struct proto *p,
2213		    void (*write_space)(struct sock *sk));
2214
2215#define MODULE_ALIAS_TCP_ULP(name)				\
2216	__MODULE_INFO(alias, alias_userspace, name);		\
2217	__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2218
2219#ifdef CONFIG_NET_SOCK_MSG
2220struct sk_msg;
2221struct sk_psock;
2222
2223#ifdef CONFIG_BPF_SYSCALL
2224struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
2225int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2226void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2227#endif /* CONFIG_BPF_SYSCALL */
2228
2229int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
2230			  int flags);
2231#endif /* CONFIG_NET_SOCK_MSG */
2232
2233#if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2234static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2235{
 
2236}
2237#endif
2238
2239#ifdef CONFIG_CGROUP_BPF
2240static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2241				      struct sk_buff *skb,
2242				      unsigned int end_offset)
2243{
2244	skops->skb = skb;
2245	skops->skb_data_end = skb->data + end_offset;
2246}
2247#else
2248static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2249				      struct sk_buff *skb,
2250				      unsigned int end_offset)
2251{
2252}
2253#endif
2254
2255/* Call BPF_SOCK_OPS program that returns an int. If the return value
2256 * is < 0, then the BPF op failed (for example if the loaded BPF
2257 * program does not support the chosen operation or there is no BPF
2258 * program loaded).
2259 */
2260#ifdef CONFIG_BPF
2261static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2262{
2263	struct bpf_sock_ops_kern sock_ops;
2264	int ret;
2265
2266	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2267	if (sk_fullsock(sk)) {
2268		sock_ops.is_fullsock = 1;
2269		sock_owned_by_me(sk);
2270	}
2271
2272	sock_ops.sk = sk;
2273	sock_ops.op = op;
2274	if (nargs > 0)
2275		memcpy(sock_ops.args, args, nargs * sizeof(*args));
2276
2277	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2278	if (ret == 0)
2279		ret = sock_ops.reply;
2280	else
2281		ret = -1;
2282	return ret;
2283}
2284
2285static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2286{
2287	u32 args[2] = {arg1, arg2};
2288
2289	return tcp_call_bpf(sk, op, 2, args);
2290}
2291
2292static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2293				    u32 arg3)
2294{
2295	u32 args[3] = {arg1, arg2, arg3};
2296
2297	return tcp_call_bpf(sk, op, 3, args);
2298}
2299
2300#else
2301static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2302{
2303	return -EPERM;
2304}
2305
2306static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2307{
2308	return -EPERM;
2309}
2310
2311static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2312				    u32 arg3)
2313{
2314	return -EPERM;
2315}
2316
2317#endif
2318
2319static inline u32 tcp_timeout_init(struct sock *sk)
2320{
2321	int timeout;
2322
2323	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2324
2325	if (timeout <= 0)
2326		timeout = TCP_TIMEOUT_INIT;
2327	return timeout;
2328}
2329
2330static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2331{
2332	int rwnd;
2333
2334	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2335
2336	if (rwnd < 0)
2337		rwnd = 0;
2338	return rwnd;
2339}
2340
2341static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2342{
2343	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2344}
2345
2346static inline void tcp_bpf_rtt(struct sock *sk)
2347{
2348	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2349		tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
2350}
2351
2352#if IS_ENABLED(CONFIG_SMC)
2353extern struct static_key_false tcp_have_smc;
2354#endif
2355
2356#if IS_ENABLED(CONFIG_TLS_DEVICE)
2357void clean_acked_data_enable(struct inet_connection_sock *icsk,
2358			     void (*cad)(struct sock *sk, u32 ack_seq));
2359void clean_acked_data_disable(struct inet_connection_sock *icsk);
2360void clean_acked_data_flush(void);
2361#endif
2362
2363DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2364static inline void tcp_add_tx_delay(struct sk_buff *skb,
2365				    const struct tcp_sock *tp)
2366{
2367	if (static_branch_unlikely(&tcp_tx_delay_enabled))
2368		skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2369}
2370
2371/* Compute Earliest Departure Time for some control packets
2372 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2373 */
2374static inline u64 tcp_transmit_time(const struct sock *sk)
2375{
2376	if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2377		u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2378			tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2379
2380		return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2381	}
2382	return 0;
2383}
2384
2385#endif	/* _TCP_H */
v3.5.6
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Definitions for the TCP module.
   7 *
   8 * Version:	@(#)tcp.h	1.0.5	05/23/93
   9 *
  10 * Authors:	Ross Biro
  11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *
  13 *		This program is free software; you can redistribute it and/or
  14 *		modify it under the terms of the GNU General Public License
  15 *		as published by the Free Software Foundation; either version
  16 *		2 of the License, or (at your option) any later version.
  17 */
  18#ifndef _TCP_H
  19#define _TCP_H
  20
  21#define FASTRETRANS_DEBUG 1
  22
  23#include <linux/list.h>
  24#include <linux/tcp.h>
  25#include <linux/bug.h>
  26#include <linux/slab.h>
  27#include <linux/cache.h>
  28#include <linux/percpu.h>
  29#include <linux/skbuff.h>
  30#include <linux/dmaengine.h>
  31#include <linux/crypto.h>
  32#include <linux/cryptohash.h>
  33#include <linux/kref.h>
 
 
  34
  35#include <net/inet_connection_sock.h>
  36#include <net/inet_timewait_sock.h>
  37#include <net/inet_hashtables.h>
  38#include <net/checksum.h>
  39#include <net/request_sock.h>
 
  40#include <net/sock.h>
  41#include <net/snmp.h>
  42#include <net/ip.h>
  43#include <net/tcp_states.h>
  44#include <net/inet_ecn.h>
  45#include <net/dst.h>
 
  46
  47#include <linux/seq_file.h>
  48#include <linux/memcontrol.h>
 
 
  49
  50extern struct inet_hashinfo tcp_hashinfo;
  51
  52extern struct percpu_counter tcp_orphan_count;
  53extern void tcp_time_wait(struct sock *sk, int state, int timeo);
  54
  55#define MAX_TCP_HEADER	(128 + MAX_HEADER)
  56#define MAX_TCP_OPTION_SPACE 40
 
 
  57
  58/* 
  59 * Never offer a window over 32767 without using window scaling. Some
  60 * poor stacks do signed 16bit maths! 
  61 */
  62#define MAX_TCP_WINDOW		32767U
  63
  64/* Offer an initial receive window of 10 mss. */
  65#define TCP_DEFAULT_INIT_RCVWND	10
  66
  67/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
  68#define TCP_MIN_MSS		88U
  69
  70/* The least MTU to use for probing */
  71#define TCP_BASE_MSS		512
 
 
 
 
 
 
  72
  73/* After receiving this amount of duplicate ACKs fast retransmit starts. */
  74#define TCP_FASTRETRANS_THRESH 3
  75
  76/* Maximal reordering. */
  77#define TCP_MAX_REORDERING	127
  78
  79/* Maximal number of ACKs sent quickly to accelerate slow-start. */
  80#define TCP_MAX_QUICKACKS	16U
  81
 
 
 
  82/* urg_data states */
  83#define TCP_URG_VALID	0x0100
  84#define TCP_URG_NOTYET	0x0200
  85#define TCP_URG_READ	0x0400
  86
  87#define TCP_RETR1	3	/*
  88				 * This is how many retries it does before it
  89				 * tries to figure out if the gateway is
  90				 * down. Minimal RFC value is 3; it corresponds
  91				 * to ~3sec-8min depending on RTO.
  92				 */
  93
  94#define TCP_RETR2	15	/*
  95				 * This should take at least
  96				 * 90 minutes to time out.
  97				 * RFC1122 says that the limit is 100 sec.
  98				 * 15 is ~13-30min depending on RTO.
  99				 */
 100
 101#define TCP_SYN_RETRIES	 5	/* number of times to retry active opening a
 102				 * connection: ~180sec is RFC minimum	*/
 
 
 
 
 
 
 103
 104#define TCP_SYNACK_RETRIES 5	/* number of times to retry passive opening a
 105				 * connection: ~180sec is RFC minimum	*/
 
 
 
 
 106
 107#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
 108				  * state, about 60 seconds	*/
 109#define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
 110                                 /* BSD style FIN_WAIT2 deadlock breaker.
 111				  * It used to be 3min, new value is 60sec,
 112				  * to combine FIN-WAIT-2 timeout with
 113				  * TIME-WAIT timer.
 114				  */
 
 115
 116#define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
 117#if HZ >= 100
 118#define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
 119#define TCP_ATO_MIN	((unsigned)(HZ/25))
 120#else
 121#define TCP_DELACK_MIN	4U
 122#define TCP_ATO_MIN	4U
 123#endif
 124#define TCP_RTO_MAX	((unsigned)(120*HZ))
 125#define TCP_RTO_MIN	((unsigned)(HZ/5))
 
 126#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
 127#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
 128						 * used as a fallback RTO for the
 129						 * initial data transmission if no
 130						 * valid RTT sample has been acquired,
 131						 * most likely due to retrans in 3WHS.
 132						 */
 133
 134#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
 135					                 * for local resources.
 136					                 */
 137
 138#define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
 139#define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
 140#define TCP_KEEPALIVE_INTVL	(75*HZ)
 141
 142#define MAX_TCP_KEEPIDLE	32767
 143#define MAX_TCP_KEEPINTVL	32767
 144#define MAX_TCP_KEEPCNT		127
 145#define MAX_TCP_SYNCNT		127
 146
 147#define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
 148
 149#define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
 150#define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
 151					 * after this time. It should be equal
 152					 * (or greater than) TCP_TIMEWAIT_LEN
 153					 * to provide reliability equal to one
 154					 * provided by timewait state.
 155					 */
 156#define TCP_PAWS_WINDOW	1		/* Replay window for per-host
 157					 * timestamps. It must be less than
 158					 * minimal timewait lifetime.
 159					 */
 160/*
 161 *	TCP option
 162 */
 163 
 164#define TCPOPT_NOP		1	/* Padding */
 165#define TCPOPT_EOL		0	/* End of options */
 166#define TCPOPT_MSS		2	/* Segment size negotiating */
 167#define TCPOPT_WINDOW		3	/* Window scaling */
 168#define TCPOPT_SACK_PERM        4       /* SACK Permitted */
 169#define TCPOPT_SACK             5       /* SACK Block */
 170#define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
 171#define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
 172#define TCPOPT_COOKIE		253	/* Cookie extension (experimental) */
 
 
 
 
 
 
 
 173
 174/*
 175 *     TCP option lengths
 176 */
 177
 178#define TCPOLEN_MSS            4
 179#define TCPOLEN_WINDOW         3
 180#define TCPOLEN_SACK_PERM      2
 181#define TCPOLEN_TIMESTAMP      10
 182#define TCPOLEN_MD5SIG         18
 183#define TCPOLEN_COOKIE_BASE    2	/* Cookie-less header extension */
 184#define TCPOLEN_COOKIE_PAIR    3	/* Cookie pair header extension */
 185#define TCPOLEN_COOKIE_MIN     (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
 186#define TCPOLEN_COOKIE_MAX     (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
 187
 188/* But this is what stacks really send out. */
 189#define TCPOLEN_TSTAMP_ALIGNED		12
 190#define TCPOLEN_WSCALE_ALIGNED		4
 191#define TCPOLEN_SACKPERM_ALIGNED	4
 192#define TCPOLEN_SACK_BASE		2
 193#define TCPOLEN_SACK_BASE_ALIGNED	4
 194#define TCPOLEN_SACK_PERBLOCK		8
 195#define TCPOLEN_MD5SIG_ALIGNED		20
 196#define TCPOLEN_MSS_ALIGNED		4
 
 197
 198/* Flags in tp->nonagle */
 199#define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
 200#define TCP_NAGLE_CORK		2	/* Socket is corked	    */
 201#define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
 202
 203/* TCP thin-stream limits */
 204#define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 205
 206/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
 207#define TCP_INIT_CWND		10
 208
 209extern struct inet_timewait_death_row tcp_death_row;
 
 
 
 
 
 
 
 
 
 
 
 
 210
 211/* sysctl variables for tcp */
 212extern int sysctl_tcp_timestamps;
 213extern int sysctl_tcp_window_scaling;
 214extern int sysctl_tcp_sack;
 215extern int sysctl_tcp_fin_timeout;
 216extern int sysctl_tcp_keepalive_time;
 217extern int sysctl_tcp_keepalive_probes;
 218extern int sysctl_tcp_keepalive_intvl;
 219extern int sysctl_tcp_syn_retries;
 220extern int sysctl_tcp_synack_retries;
 221extern int sysctl_tcp_retries1;
 222extern int sysctl_tcp_retries2;
 223extern int sysctl_tcp_orphan_retries;
 224extern int sysctl_tcp_syncookies;
 225extern int sysctl_tcp_retrans_collapse;
 226extern int sysctl_tcp_stdurg;
 227extern int sysctl_tcp_rfc1337;
 228extern int sysctl_tcp_abort_on_overflow;
 229extern int sysctl_tcp_max_orphans;
 230extern int sysctl_tcp_fack;
 231extern int sysctl_tcp_reordering;
 232extern int sysctl_tcp_ecn;
 233extern int sysctl_tcp_dsack;
 234extern int sysctl_tcp_wmem[3];
 235extern int sysctl_tcp_rmem[3];
 236extern int sysctl_tcp_app_win;
 237extern int sysctl_tcp_adv_win_scale;
 238extern int sysctl_tcp_tw_reuse;
 239extern int sysctl_tcp_frto;
 240extern int sysctl_tcp_frto_response;
 241extern int sysctl_tcp_low_latency;
 242extern int sysctl_tcp_dma_copybreak;
 243extern int sysctl_tcp_nometrics_save;
 244extern int sysctl_tcp_moderate_rcvbuf;
 245extern int sysctl_tcp_tso_win_divisor;
 246extern int sysctl_tcp_abc;
 247extern int sysctl_tcp_mtu_probing;
 248extern int sysctl_tcp_base_mss;
 249extern int sysctl_tcp_workaround_signed_windows;
 250extern int sysctl_tcp_slow_start_after_idle;
 251extern int sysctl_tcp_max_ssthresh;
 252extern int sysctl_tcp_cookie_size;
 253extern int sysctl_tcp_thin_linear_timeouts;
 254extern int sysctl_tcp_thin_dupack;
 255extern int sysctl_tcp_early_retrans;
 256
 257extern atomic_long_t tcp_memory_allocated;
 258extern struct percpu_counter tcp_sockets_allocated;
 259extern int tcp_memory_pressure;
 
 
 
 
 
 
 
 260
 
 
 261/*
 262 * The next routines deal with comparing 32 bit unsigned ints
 263 * and worry about wraparound (automatic with unsigned arithmetic).
 264 */
 265
 266static inline bool before(__u32 seq1, __u32 seq2)
 267{
 268        return (__s32)(seq1-seq2) < 0;
 269}
 270#define after(seq2, seq1) 	before(seq1, seq2)
 271
 272/* is s2<=s1<=s3 ? */
 273static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
 274{
 275	return seq3 - seq2 >= seq1 - seq2;
 276}
 277
 278static inline bool tcp_out_of_memory(struct sock *sk)
 279{
 280	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
 281	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
 282		return true;
 283	return false;
 284}
 285
 
 
 286static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 287{
 288	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
 289	int orphans = percpu_counter_read_positive(ocp);
 290
 291	if (orphans << shift > sysctl_tcp_max_orphans) {
 292		orphans = percpu_counter_sum_positive(ocp);
 293		if (orphans << shift > sysctl_tcp_max_orphans)
 294			return true;
 295	}
 296	return false;
 297}
 298
 299extern bool tcp_check_oom(struct sock *sk, int shift);
 300
 301/* syncookies: remember time of last synqueue overflow */
 302static inline void tcp_synq_overflow(struct sock *sk)
 303{
 304	tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
 305}
 306
 307/* syncookies: no recent synqueue overflow on this listening socket? */
 308static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
 309{
 310	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
 311	return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
 312}
 313
 314extern struct proto tcp_prot;
 315
 316#define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 317#define TCP_INC_STATS_BH(net, field)	SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
 318#define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
 319#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
 320#define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 321
 322extern void tcp_init_mem(struct net *net);
 323
 324extern void tcp_v4_err(struct sk_buff *skb, u32);
 325
 326extern void tcp_shutdown (struct sock *sk, int how);
 327
 328extern int tcp_v4_rcv(struct sk_buff *skb);
 
 329
 330extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it);
 331extern void *tcp_v4_tw_get_peer(struct sock *sk);
 332extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
 333extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 334		       size_t size);
 335extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
 
 336			size_t size, int flags);
 337extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 338extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 339				 const struct tcphdr *th, unsigned int len);
 340extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 341			       const struct tcphdr *th, unsigned int len);
 342extern void tcp_rcv_space_adjust(struct sock *sk);
 343extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
 344extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
 345extern void tcp_twsk_destructor(struct sock *sk);
 346extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
 347			       struct pipe_inode_info *pipe, size_t len,
 348			       unsigned int flags);
 
 
 
 
 
 
 
 
 349
 
 350static inline void tcp_dec_quickack_mode(struct sock *sk,
 351					 const unsigned int pkts)
 352{
 353	struct inet_connection_sock *icsk = inet_csk(sk);
 354
 355	if (icsk->icsk_ack.quick) {
 356		if (pkts >= icsk->icsk_ack.quick) {
 357			icsk->icsk_ack.quick = 0;
 358			/* Leaving quickack mode we deflate ATO. */
 359			icsk->icsk_ack.ato   = TCP_ATO_MIN;
 360		} else
 361			icsk->icsk_ack.quick -= pkts;
 362	}
 363}
 364
 365#define	TCP_ECN_OK		1
 366#define	TCP_ECN_QUEUE_CWR	2
 367#define	TCP_ECN_DEMAND_CWR	4
 368#define	TCP_ECN_SEEN		8
 369
 370enum tcp_tw_status {
 371	TCP_TW_SUCCESS = 0,
 372	TCP_TW_RST = 1,
 373	TCP_TW_ACK = 2,
 374	TCP_TW_SYN = 3
 375};
 376
 377
 378extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
 379						     struct sk_buff *skb,
 380						     const struct tcphdr *th);
 381extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
 382				   struct request_sock *req,
 383				   struct request_sock **prev);
 384extern int tcp_child_process(struct sock *parent, struct sock *child,
 385			     struct sk_buff *skb);
 386extern bool tcp_use_frto(struct sock *sk);
 387extern void tcp_enter_frto(struct sock *sk);
 388extern void tcp_enter_loss(struct sock *sk, int how);
 389extern void tcp_clear_retrans(struct tcp_sock *tp);
 390extern void tcp_update_metrics(struct sock *sk);
 391extern void tcp_close(struct sock *sk, long timeout);
 392extern void tcp_init_sock(struct sock *sk);
 393extern unsigned int tcp_poll(struct file * file, struct socket *sock,
 394			     struct poll_table_struct *wait);
 395extern int tcp_getsockopt(struct sock *sk, int level, int optname,
 396			  char __user *optval, int __user *optlen);
 397extern int tcp_setsockopt(struct sock *sk, int level, int optname,
 398			  char __user *optval, unsigned int optlen);
 399extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 400				 char __user *optval, int __user *optlen);
 401extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
 402				 char __user *optval, unsigned int optlen);
 403extern void tcp_set_keepalive(struct sock *sk, int val);
 404extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
 405extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 406		       size_t len, int nonblock, int flags, int *addr_len);
 407extern void tcp_parse_options(const struct sk_buff *skb,
 408			      struct tcp_options_received *opt_rx, const u8 **hvpp,
 409			      int estab);
 410extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 
 
 
 
 
 
 
 
 
 
 
 
 411
 412/*
 
 
 
 
 
 
 
 
 
 
 413 *	TCP v4 functions exported for the inet6 API
 414 */
 415
 416extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 417extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 418extern struct sock * tcp_create_openreq_child(struct sock *sk,
 419					      struct request_sock *req,
 420					      struct sk_buff *skb);
 421extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 422					  struct request_sock *req,
 423					  struct dst_entry *dst);
 424extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 425extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
 426			  int addr_len);
 427extern int tcp_connect(struct sock *sk);
 428extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
 429					struct request_sock *req,
 430					struct request_values *rvp);
 431extern int tcp_disconnect(struct sock *sk, int flags);
 
 
 
 
 
 
 
 
 
 
 
 
 432
 433void tcp_connect_init(struct sock *sk);
 434void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
 435int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 
 436
 437/* From syncookies.c */
 438extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
 439extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 
 440				    struct ip_options *opt);
 
 
 
 
 
 441#ifdef CONFIG_SYN_COOKIES
 442extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, 
 443				     __u16 *mss);
 444#else
 445static inline __u32 cookie_v4_init_sequence(struct sock *sk,
 446					    struct sk_buff *skb,
 447					    __u16 *mss)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 448{
 449	return 0;
 
 
 
 450}
 451#endif
 452
 453extern __u32 cookie_init_timestamp(struct request_sock *req);
 454extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
 
 
 
 
 
 
 455
 456/* From net/ipv6/syncookies.c */
 457extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
 458#ifdef CONFIG_SYN_COOKIES
 459extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
 460				     __u16 *mss);
 461#else
 462static inline __u32 cookie_v6_init_sequence(struct sock *sk,
 463					    struct sk_buff *skb,
 464					    __u16 *mss)
 465{
 466	return 0;
 467}
 468#endif
 469/* tcp_output.c */
 470
 471extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
 472				      int nonagle);
 473extern bool tcp_may_send_now(struct sock *sk);
 474extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
 475extern void tcp_retransmit_timer(struct sock *sk);
 476extern void tcp_xmit_retransmit_queue(struct sock *);
 477extern void tcp_simple_retransmit(struct sock *);
 478extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
 479extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
 480
 481extern void tcp_send_probe0(struct sock *);
 482extern void tcp_send_partial(struct sock *);
 483extern int tcp_write_wakeup(struct sock *);
 484extern void tcp_send_fin(struct sock *sk);
 485extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 486extern int tcp_send_synack(struct sock *);
 487extern bool tcp_syn_flood_action(struct sock *sk,
 488				 const struct sk_buff *skb,
 489				 const char *proto);
 490extern void tcp_push_one(struct sock *, unsigned int mss_now);
 491extern void tcp_send_ack(struct sock *sk);
 492extern void tcp_send_delayed_ack(struct sock *sk);
 
 
 
 
 
 
 
 
 
 493
 494/* tcp_input.c */
 495extern void tcp_cwnd_application_limited(struct sock *sk);
 496extern void tcp_resume_early_retransmit(struct sock *sk);
 497extern void tcp_rearm_rto(struct sock *sk);
 
 
 498
 499/* tcp_timer.c */
 500extern void tcp_init_xmit_timers(struct sock *);
 501static inline void tcp_clear_xmit_timers(struct sock *sk)
 502{
 
 
 
 
 
 
 503	inet_csk_clear_xmit_timers(sk);
 504}
 505
 506extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 507extern unsigned int tcp_current_mss(struct sock *sk);
 
 508
 509/* Bound MSS / TSO packet size with the half of the window */
 510static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 511{
 512	int cutoff;
 513
 514	/* When peer uses tiny windows, there is no use in packetizing
 515	 * to sub-MSS pieces for the sake of SWS or making sure there
 516	 * are enough packets in the pipe for fast recovery.
 517	 *
 518	 * On the other hand, for extremely large MSS devices, handling
 519	 * smaller than MSS windows in this way does make sense.
 520	 */
 521	if (tp->max_window >= 512)
 522		cutoff = (tp->max_window >> 1);
 523	else
 524		cutoff = tp->max_window;
 525
 526	if (cutoff && pktsize > cutoff)
 527		return max_t(int, cutoff, 68U - tp->tcp_header_len);
 528	else
 529		return pktsize;
 530}
 531
 532/* tcp.c */
 533extern void tcp_get_info(const struct sock *, struct tcp_info *);
 534
 535/* Read 'sendfile()'-style from a TCP socket */
 536typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
 537				unsigned int, size_t);
 538extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
 539			 sk_read_actor_t recv_actor);
 540
 541extern void tcp_initialize_rcv_mss(struct sock *sk);
 542
 543extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 544extern int tcp_mss_to_mtu(struct sock *sk, int mss);
 545extern void tcp_mtup_init(struct sock *sk);
 546extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
 547
 548static inline void tcp_bound_rto(const struct sock *sk)
 549{
 550	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
 551		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
 552}
 553
 554static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
 555{
 556	return (tp->srtt >> 3) + tp->rttvar;
 557}
 558
 559static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 560{
 
 
 
 
 561	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
 562			       ntohl(TCP_FLAG_ACK) |
 563			       snd_wnd);
 564}
 565
 566static inline void tcp_fast_path_on(struct tcp_sock *tp)
 567{
 568	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
 569}
 570
 571static inline void tcp_fast_path_check(struct sock *sk)
 572{
 573	struct tcp_sock *tp = tcp_sk(sk);
 574
 575	if (skb_queue_empty(&tp->out_of_order_queue) &&
 576	    tp->rcv_wnd &&
 577	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
 578	    !tp->urg_data)
 579		tcp_fast_path_on(tp);
 580}
 581
 582/* Compute the actual rto_min value */
 583static inline u32 tcp_rto_min(struct sock *sk)
 584{
 585	const struct dst_entry *dst = __sk_dst_get(sk);
 586	u32 rto_min = TCP_RTO_MIN;
 587
 588	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
 589		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
 590	return rto_min;
 591}
 592
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593/* Compute the actual receive window we are currently advertising.
 594 * Rcv_nxt can be after the window if our peer push more data
 595 * than the offered window.
 596 */
 597static inline u32 tcp_receive_window(const struct tcp_sock *tp)
 598{
 599	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
 600
 601	if (win < 0)
 602		win = 0;
 603	return (u32) win;
 604}
 605
 606/* Choose a new window, without checks for shrinking, and without
 607 * scaling applied to the result.  The caller does these things
 608 * if necessary.  This is a "raw" window selection.
 609 */
 610extern u32 __tcp_select_window(struct sock *sk);
 611
 612void tcp_send_window_probe(struct sock *sk);
 613
 614/* TCP timestamps are only 32-bits, this causes a slight
 615 * complication on 64-bit systems since we store a snapshot
 616 * of jiffies in the buffer control blocks below.  We decided
 617 * to use only the low 32-bits of jiffies and hide the ugly
 618 * casts with the following macro.
 
 
 
 
 
 619 */
 620#define tcp_time_stamp		((__u32)(jiffies))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 621
 622#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 623
 624#define TCPHDR_FIN 0x01
 625#define TCPHDR_SYN 0x02
 626#define TCPHDR_RST 0x04
 627#define TCPHDR_PSH 0x08
 628#define TCPHDR_ACK 0x10
 629#define TCPHDR_URG 0x20
 630#define TCPHDR_ECE 0x40
 631#define TCPHDR_CWR 0x80
 632
 
 
 633/* This is what the send packet queuing engine uses to pass
 634 * TCP per-packet control information to the transmission code.
 635 * We also store the host-order sequence numbers in here too.
 636 * This is 44 bytes if IPV6 is enabled.
 637 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
 638 */
 639struct tcp_skb_cb {
 640	union {
 641		struct inet_skb_parm	h4;
 642#if IS_ENABLED(CONFIG_IPV6)
 643		struct inet6_skb_parm	h6;
 644#endif
 645	} header;	/* For incoming frames		*/
 646	__u32		seq;		/* Starting sequence number	*/
 647	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
 648	__u32		when;		/* used to compute rtt's	*/
 
 
 
 
 
 
 
 
 
 
 
 
 649	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
 650
 651	__u8		sacked;		/* State flags for SACK/FACK.	*/
 652#define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
 653#define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
 654#define TCPCB_LOST		0x04	/* SKB is lost			*/
 655#define TCPCB_TAGBITS		0x07	/* All tag bits			*/
 
 656#define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
 657#define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
 
 658
 659	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
 660	/* 1 byte hole */
 
 
 
 661	__u32		ack_seq;	/* Sequence number ACK'd	*/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662};
 663
 664#define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
 665
 666/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
 667 *
 668 * If we receive a SYN packet with these bits set, it means a network is
 669 * playing bad games with TOS bits. In order to avoid possible false congestion
 670 * notifications, we disable TCP ECN negociation.
 671 */
 672static inline void
 673TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb)
 674{
 675	const struct tcphdr *th = tcp_hdr(skb);
 676
 677	if (sysctl_tcp_ecn && th->ece && th->cwr &&
 678	    INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
 679		inet_rsk(req)->ecn_ok = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 680}
 681
 682/* Due to TSO, an SKB can be composed of multiple actual
 683 * packets.  To keep these tracked properly, we use this.
 684 */
 685static inline int tcp_skb_pcount(const struct sk_buff *skb)
 686{
 687	return skb_shinfo(skb)->gso_segs;
 
 
 
 
 
 
 
 
 
 
 688}
 689
 690/* This is valid iff tcp_skb_pcount() > 1. */
 691static inline int tcp_skb_mss(const struct sk_buff *skb)
 692{
 693	return skb_shinfo(skb)->gso_size;
 
 
 
 
 
 
 
 
 
 
 
 
 694}
 695
 696/* Events passed to congestion control interface */
 697enum tcp_ca_event {
 698	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
 699	CA_EVENT_CWND_RESTART,	/* congestion window restart */
 700	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
 701	CA_EVENT_FRTO,		/* fast recovery timeout */
 702	CA_EVENT_LOSS,		/* loss timeout */
 703	CA_EVENT_FAST_ACK,	/* in sequence ack */
 704	CA_EVENT_SLOW_ACK,	/* other ack */
 
 
 
 
 
 
 
 705};
 706
 707/*
 708 * Interface for adding new TCP congestion control handlers
 709 */
 710#define TCP_CA_NAME_MAX	16
 711#define TCP_CA_MAX	128
 712#define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
 713
 
 
 
 714#define TCP_CONG_NON_RESTRICTED 0x1
 715#define TCP_CONG_RTT_STAMP	0x2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 716
 717struct tcp_congestion_ops {
 718	struct list_head	list;
 719	unsigned long flags;
 720
 721	/* initialize private data (optional) */
 722	void (*init)(struct sock *sk);
 723	/* cleanup private data  (optional) */
 724	void (*release)(struct sock *sk);
 725
 726	/* return slow start threshold (required) */
 727	u32 (*ssthresh)(struct sock *sk);
 728	/* lower bound for congestion window (optional) */
 729	u32 (*min_cwnd)(const struct sock *sk);
 730	/* do new cwnd calculation (required) */
 731	void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
 
 732	/* call before changing ca_state (optional) */
 733	void (*set_state)(struct sock *sk, u8 new_state);
 
 734	/* call when cwnd event occurs (optional) */
 735	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
 736	/* new value of cwnd after loss (optional) */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 737	u32  (*undo_cwnd)(struct sock *sk);
 738	/* hook for packet ack accounting (optional) */
 739	void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
 
 
 740	/* get info for inet_diag (optional) */
 741	void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
 
 
 
 
 
 
 
 742
 743	char 		name[TCP_CA_NAME_MAX];
 744	struct module 	*owner;
 745};
 
 
 746
 747extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
 748extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
 749
 750extern void tcp_init_congestion_control(struct sock *sk);
 751extern void tcp_cleanup_congestion_control(struct sock *sk);
 752extern int tcp_set_default_congestion_control(const char *name);
 753extern void tcp_get_default_congestion_control(char *name);
 754extern void tcp_get_available_congestion_control(char *buf, size_t len);
 755extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
 756extern int tcp_set_allowed_congestion_control(char *allowed);
 757extern int tcp_set_congestion_control(struct sock *sk, const char *name);
 758extern void tcp_slow_start(struct tcp_sock *tp);
 759extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
 760
 761extern struct tcp_congestion_ops tcp_init_congestion_ops;
 762extern u32 tcp_reno_ssthresh(struct sock *sk);
 763extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
 764extern u32 tcp_reno_min_cwnd(const struct sock *sk);
 
 765extern struct tcp_congestion_ops tcp_reno;
 766
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 767static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
 768{
 769	struct inet_connection_sock *icsk = inet_csk(sk);
 770
 771	if (icsk->icsk_ca_ops->set_state)
 772		icsk->icsk_ca_ops->set_state(sk, ca_state);
 773	icsk->icsk_ca_state = ca_state;
 774}
 775
 776static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
 777{
 778	const struct inet_connection_sock *icsk = inet_csk(sk);
 779
 780	if (icsk->icsk_ca_ops->cwnd_event)
 781		icsk->icsk_ca_ops->cwnd_event(sk, event);
 782}
 783
 
 
 
 
 
 
 
 
 784/* These functions determine how the current flow behaves in respect of SACK
 785 * handling. SACK is negotiated with the peer, and therefore it can vary
 786 * between different flows.
 787 *
 788 * tcp_is_sack - SACK enabled
 789 * tcp_is_reno - No SACK
 790 * tcp_is_fack - FACK enabled, implies SACK enabled
 791 */
 792static inline int tcp_is_sack(const struct tcp_sock *tp)
 793{
 794	return tp->rx_opt.sack_ok;
 795}
 796
 797static inline bool tcp_is_reno(const struct tcp_sock *tp)
 798{
 799	return !tcp_is_sack(tp);
 800}
 801
 802static inline bool tcp_is_fack(const struct tcp_sock *tp)
 803{
 804	return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
 805}
 806
 807static inline void tcp_enable_fack(struct tcp_sock *tp)
 808{
 809	tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
 810}
 811
 812/* TCP early-retransmit (ER) is similar to but more conservative than
 813 * the thin-dupack feature.  Enable ER only if thin-dupack is disabled.
 814 */
 815static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
 816{
 817	tp->do_early_retrans = sysctl_tcp_early_retrans &&
 818		!sysctl_tcp_thin_dupack && sysctl_tcp_reordering == 3;
 819	tp->early_retrans_delayed = 0;
 820}
 821
 822static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
 823{
 824	tp->do_early_retrans = 0;
 825}
 826
 827static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
 828{
 829	return tp->sacked_out + tp->lost_out;
 830}
 831
 832/* This determines how many packets are "in the network" to the best
 833 * of our knowledge.  In many cases it is conservative, but where
 834 * detailed information is available from the receiver (via SACK
 835 * blocks etc.) we can make more aggressive calculations.
 836 *
 837 * Use this for decisions involving congestion control, use just
 838 * tp->packets_out to determine if the send queue is empty or not.
 839 *
 840 * Read this equation as:
 841 *
 842 *	"Packets sent once on transmission queue" MINUS
 843 *	"Packets left network, but not honestly ACKed yet" PLUS
 844 *	"Packets fast retransmitted"
 845 */
 846static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
 847{
 848	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
 849}
 850
 851#define TCP_INFINITE_SSTHRESH	0x7fffffff
 852
 
 
 
 
 
 853static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
 854{
 855	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
 856}
 857
 
 
 
 
 
 
 858/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
 859 * The exception is rate halving phase, when cwnd is decreasing towards
 860 * ssthresh.
 861 */
 862static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 863{
 864	const struct tcp_sock *tp = tcp_sk(sk);
 865
 866	if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
 867		return tp->snd_ssthresh;
 868	else
 869		return max(tp->snd_ssthresh,
 870			   ((tp->snd_cwnd >> 1) +
 871			    (tp->snd_cwnd >> 2)));
 872}
 873
 874/* Use define here intentionally to get WARN_ON location shown at the caller */
 875#define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
 876
 877extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 878extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
 879
 880/* The maximum number of MSS of available cwnd for which TSO defers
 881 * sending if not using sysctl_tcp_tso_win_divisor.
 882 */
 883static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
 884{
 885	return 3;
 886}
 887
 888/* Slow start with delack produces 3 packets of burst, so that
 889 * it is safe "de facto".  This will be the default - same as
 890 * the default reordering threshold - but if reordering increases,
 891 * we must be able to allow cwnd to burst at least this much in order
 892 * to not pull it back when holes are filled.
 
 
 
 
 
 
 
 
 
 
 
 
 
 893 */
 894static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
 895{
 896	return tp->reordering;
 
 
 
 
 
 
 897}
 898
 899/* Returns end sequence number of the receiver's advertised window */
 900static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 901{
 902	return tp->snd_una + tp->snd_wnd;
 903}
 904extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
 905
 906static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
 907				       const struct sk_buff *skb)
 
 908{
 909	if (skb->len < mss)
 910		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
 
 
 
 911}
 912
 913static inline void tcp_check_probe_timer(struct sock *sk)
 914{
 915	const struct tcp_sock *tp = tcp_sk(sk);
 916	const struct inet_connection_sock *icsk = inet_csk(sk);
 917
 918	if (!tp->packets_out && !icsk->icsk_pending)
 919		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
 920					  icsk->icsk_rto, TCP_RTO_MAX);
 921}
 922
 923static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
 924{
 925	tp->snd_wl1 = seq;
 926}
 927
 928static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
 929{
 930	tp->snd_wl1 = seq;
 931}
 932
 933/*
 934 * Calculate(/check) TCP checksum
 935 */
 936static inline __sum16 tcp_v4_check(int len, __be32 saddr,
 937				   __be32 daddr, __wsum base)
 938{
 939	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
 940}
 941
 942static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
 943{
 944	return __skb_checksum_complete(skb);
 945}
 946
 947static inline bool tcp_checksum_complete(struct sk_buff *skb)
 948{
 949	return !skb_csum_unnecessary(skb) &&
 950		__tcp_checksum_complete(skb);
 951}
 952
 953/* Prequeue for VJ style copy to user, combined with checksumming. */
 
 
 
 
 954
 955static inline void tcp_prequeue_init(struct tcp_sock *tp)
 956{
 957	tp->ucopy.task = NULL;
 958	tp->ucopy.len = 0;
 959	tp->ucopy.memory = 0;
 960	skb_queue_head_init(&tp->ucopy.prequeue);
 961#ifdef CONFIG_NET_DMA
 962	tp->ucopy.dma_chan = NULL;
 963	tp->ucopy.wakeup = 0;
 964	tp->ucopy.pinned_list = NULL;
 965	tp->ucopy.dma_cookie = 0;
 966#endif
 967}
 968
 969/* Packet is added to VJ-style prequeue for processing in process
 970 * context, if a reader task is waiting. Apparently, this exciting
 971 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
 972 * failed somewhere. Latency? Burstiness? Well, at least now we will
 973 * see, why it failed. 8)8)				  --ANK
 974 *
 975 * NOTE: is this not too big to inline?
 976 */
 977static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
 978{
 
 979	struct tcp_sock *tp = tcp_sk(sk);
 
 980
 981	if (sysctl_tcp_low_latency || !tp->ucopy.task)
 982		return false;
 983
 984	__skb_queue_tail(&tp->ucopy.prequeue, skb);
 985	tp->ucopy.memory += skb->truesize;
 986	if (tp->ucopy.memory > sk->sk_rcvbuf) {
 987		struct sk_buff *skb1;
 988
 989		BUG_ON(sock_owned_by_user(sk));
 990
 991		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
 992			sk_backlog_rcv(sk, skb1);
 993			NET_INC_STATS_BH(sock_net(sk),
 994					 LINUX_MIB_TCPPREQUEUEDROPPED);
 995		}
 996
 997		tp->ucopy.memory = 0;
 998	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
 999		wake_up_interruptible_sync_poll(sk_sleep(sk),
1000					   POLLIN | POLLRDNORM | POLLRDBAND);
1001		if (!inet_csk_ack_scheduled(sk))
1002			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1003						  (3 * tcp_rto_min(sk)) / 4,
1004						  TCP_RTO_MAX);
1005	}
1006	return true;
1007}
1008
 
 
 
 
 
1009
1010#undef STATE_TRACE
 
 
1011
1012#ifdef STATE_TRACE
1013static const char *statename[]={
1014	"Unused","Established","Syn Sent","Syn Recv",
1015	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1016	"Close Wait","Last ACK","Listen","Closing"
1017};
1018#endif
1019extern void tcp_set_state(struct sock *sk, int state);
1020
1021extern void tcp_done(struct sock *sk);
 
 
 
 
 
 
1022
1023static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1024{
1025	rx_opt->dsack = 0;
1026	rx_opt->num_sacks = 0;
1027}
1028
1029/* Determine a window scaling and initial window to offer. */
1030extern void tcp_select_initial_window(int __space, __u32 mss,
1031				      __u32 *rcv_wnd, __u32 *window_clamp,
1032				      int wscale_ok, __u8 *rcv_wscale,
1033				      __u32 init_rcv_wnd);
1034
1035static inline int tcp_win_from_space(int space)
 
 
 
 
 
1036{
1037	return sysctl_tcp_adv_win_scale<=0 ?
1038		(space>>(-sysctl_tcp_adv_win_scale)) :
1039		space - (space>>sysctl_tcp_adv_win_scale);
 
 
 
 
 
 
1040}
1041
1042/* Note: caller must be prepared to deal with negative returns */ 
1043static inline int tcp_space(const struct sock *sk)
1044{
1045	return tcp_win_from_space(sk->sk_rcvbuf -
1046				  atomic_read(&sk->sk_rmem_alloc));
1047} 
 
 
1048
1049static inline int tcp_full_space(const struct sock *sk)
1050{
1051	return tcp_win_from_space(sk->sk_rcvbuf); 
1052}
1053
1054static inline void tcp_openreq_init(struct request_sock *req,
1055				    struct tcp_options_received *rx_opt,
1056				    struct sk_buff *skb)
1057{
1058	struct inet_request_sock *ireq = inet_rsk(req);
1059
1060	req->rcv_wnd = 0;		/* So that tcp_send_synack() knows! */
1061	req->cookie_ts = 0;
1062	tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1063	req->mss = rx_opt->mss_clamp;
1064	req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1065	ireq->tstamp_ok = rx_opt->tstamp_ok;
1066	ireq->sack_ok = rx_opt->sack_ok;
1067	ireq->snd_wscale = rx_opt->snd_wscale;
1068	ireq->wscale_ok = rx_opt->wscale_ok;
1069	ireq->acked = 0;
1070	ireq->ecn_ok = 0;
1071	ireq->rmt_port = tcp_hdr(skb)->source;
1072	ireq->loc_port = tcp_hdr(skb)->dest;
1073}
1074
1075extern void tcp_enter_memory_pressure(struct sock *sk);
 
1076
1077static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1078{
1079	return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
 
 
1080}
1081
1082static inline int keepalive_time_when(const struct tcp_sock *tp)
1083{
1084	return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
 
 
1085}
1086
1087static inline int keepalive_probes(const struct tcp_sock *tp)
1088{
1089	return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
 
 
1090}
1091
1092static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1093{
1094	const struct inet_connection_sock *icsk = &tp->inet_conn;
1095
1096	return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1097			  tcp_time_stamp - tp->rcv_tstamp);
1098}
1099
1100static inline int tcp_fin_time(const struct sock *sk)
1101{
1102	int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1103	const int rto = inet_csk(sk)->icsk_rto;
1104
1105	if (fin_timeout < (rto << 2) - (rto >> 1))
1106		fin_timeout = (rto << 2) - (rto >> 1);
1107
1108	return fin_timeout;
1109}
1110
1111static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1112				  int paws_win)
1113{
1114	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1115		return true;
1116	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
 
1117		return true;
1118	/*
1119	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1120	 * then following tcp messages have valid values. Ignore 0 value,
1121	 * or else 'negative' tsval might forbid us to accept their packets.
1122	 */
1123	if (!rx_opt->ts_recent)
1124		return true;
1125	return false;
1126}
1127
1128static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1129				   int rst)
1130{
1131	if (tcp_paws_check(rx_opt, 0))
1132		return false;
1133
1134	/* RST segments are not recommended to carry timestamp,
1135	   and, if they do, it is recommended to ignore PAWS because
1136	   "their cleanup function should take precedence over timestamps."
1137	   Certainly, it is mistake. It is necessary to understand the reasons
1138	   of this constraint to relax it: if peer reboots, clock may go
1139	   out-of-sync and half-open connections will not be reset.
1140	   Actually, the problem would be not existing if all
1141	   the implementations followed draft about maintaining clock
1142	   via reboots. Linux-2.2 DOES NOT!
1143
1144	   However, we can relax time bounds for RST segments to MSL.
1145	 */
1146	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
 
1147		return false;
1148	return true;
1149}
1150
 
 
 
1151static inline void tcp_mib_init(struct net *net)
1152{
1153	/* See RFC 2012 */
1154	TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1155	TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1156	TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1157	TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1158}
1159
1160/* from STCP */
1161static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1162{
1163	tp->lost_skb_hint = NULL;
1164	tp->scoreboard_skb_hint = NULL;
1165}
1166
1167static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1168{
1169	tcp_clear_retrans_hints_partial(tp);
1170	tp->retransmit_skb_hint = NULL;
1171}
1172
1173/* MD5 Signature */
1174struct crypto_hash;
1175
1176union tcp_md5_addr {
1177	struct in_addr  a4;
1178#if IS_ENABLED(CONFIG_IPV6)
1179	struct in6_addr	a6;
1180#endif
1181};
1182
1183/* - key database */
1184struct tcp_md5sig_key {
1185	struct hlist_node	node;
1186	u8			keylen;
1187	u8			family; /* AF_INET or AF_INET6 */
 
1188	union tcp_md5_addr	addr;
 
1189	u8			key[TCP_MD5SIG_MAXKEYLEN];
1190	struct rcu_head		rcu;
1191};
1192
1193/* - sock block */
1194struct tcp_md5sig_info {
1195	struct hlist_head	head;
1196	struct rcu_head		rcu;
1197};
1198
1199/* - pseudo header */
1200struct tcp4_pseudohdr {
1201	__be32		saddr;
1202	__be32		daddr;
1203	__u8		pad;
1204	__u8		protocol;
1205	__be16		len;
1206};
1207
1208struct tcp6_pseudohdr {
1209	struct in6_addr	saddr;
1210	struct in6_addr daddr;
1211	__be32		len;
1212	__be32		protocol;	/* including padding */
1213};
1214
1215union tcp_md5sum_block {
1216	struct tcp4_pseudohdr ip4;
1217#if IS_ENABLED(CONFIG_IPV6)
1218	struct tcp6_pseudohdr ip6;
1219#endif
1220};
1221
1222/* - pool: digest algorithm, hash description and scratch buffer */
1223struct tcp_md5sig_pool {
1224	struct hash_desc	md5_desc;
1225	union tcp_md5sum_block	md5_blk;
1226};
1227
1228/* - functions */
1229extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1230			       const struct sock *sk,
1231			       const struct request_sock *req,
1232			       const struct sk_buff *skb);
1233extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1234			  int family, const u8 *newkey,
1235			  u8 newkeylen, gfp_t gfp);
1236extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1237			  int family);
1238extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1239					 struct sock *addr_sk);
1240
1241#ifdef CONFIG_TCP_MD5SIG
1242extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1243			const union tcp_md5_addr *addr, int family);
 
 
 
 
 
 
 
 
 
 
 
 
1244#define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1245#else
1246static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1247					 const union tcp_md5_addr *addr,
1248					 int family)
1249{
1250	return NULL;
1251}
1252#define tcp_twsk_md5_key(twsk)	NULL
1253#endif
1254
1255extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *);
1256extern void tcp_free_md5sig_pool(void);
1257
1258extern struct tcp_md5sig_pool	*tcp_get_md5sig_pool(void);
1259extern void tcp_put_md5sig_pool(void);
 
 
 
1260
1261extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1262extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1263				 unsigned int header_len);
1264extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1265			    const struct tcp_md5sig_key *key);
1266
1267/* write queue abstraction */
1268static inline void tcp_write_queue_purge(struct sock *sk)
1269{
1270	struct sk_buff *skb;
1271
1272	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1273		sk_wmem_free_skb(sk, skb);
1274	sk_mem_reclaim(sk);
1275	tcp_clear_all_retrans_hints(tcp_sk(sk));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1276}
1277
1278static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
 
1279{
1280	return skb_peek(&sk->sk_write_queue);
1281}
1282
1283static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1284{
1285	return skb_peek_tail(&sk->sk_write_queue);
 
 
 
 
 
 
 
 
 
1286}
1287
1288static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1289						   const struct sk_buff *skb)
 
1290{
1291	return skb_queue_next(&sk->sk_write_queue, skb);
1292}
1293
1294static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1295						   const struct sk_buff *skb)
1296{
1297	return skb_queue_prev(&sk->sk_write_queue, skb);
1298}
1299
1300#define tcp_for_write_queue(skb, sk)					\
1301	skb_queue_walk(&(sk)->sk_write_queue, skb)
 
 
1302
1303#define tcp_for_write_queue_from(skb, sk)				\
1304	skb_queue_walk_from(&(sk)->sk_write_queue, skb)
 
 
1305
1306#define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1307	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1308
1309static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1310{
1311	return sk->sk_send_head;
1312}
1313
1314static inline bool tcp_skb_is_last(const struct sock *sk,
1315				   const struct sk_buff *skb)
1316{
1317	return skb_queue_is_last(&sk->sk_write_queue, skb);
1318}
1319
1320static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
 
 
 
 
 
 
 
1321{
1322	if (tcp_skb_is_last(sk, skb))
1323		sk->sk_send_head = NULL;
1324	else
1325		sk->sk_send_head = tcp_write_queue_next(sk, skb);
1326}
1327
1328static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1329{
1330	if (sk->sk_send_head == skb_unlinked)
1331		sk->sk_send_head = NULL;
1332}
1333
1334static inline void tcp_init_send_head(struct sock *sk)
1335{
1336	sk->sk_send_head = NULL;
1337}
1338
1339static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1340{
1341	__skb_queue_tail(&sk->sk_write_queue, skb);
1342}
1343
1344static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1345{
1346	__tcp_add_write_queue_tail(sk, skb);
1347
1348	/* Queue it, remembering where we must start sending. */
1349	if (sk->sk_send_head == NULL) {
1350		sk->sk_send_head = skb;
1351
1352		if (tcp_sk(sk)->highest_sack == NULL)
1353			tcp_sk(sk)->highest_sack = skb;
1354	}
1355}
1356
1357static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1358{
1359	__skb_queue_head(&sk->sk_write_queue, skb);
1360}
1361
1362/* Insert buff after skb on the write queue of sk.  */
1363static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1364						struct sk_buff *buff,
1365						struct sock *sk)
1366{
1367	__skb_queue_after(&sk->sk_write_queue, skb, buff);
1368}
1369
1370/* Insert new before skb on the write queue of sk.  */
1371static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1372						  struct sk_buff *skb,
1373						  struct sock *sk)
1374{
1375	__skb_queue_before(&sk->sk_write_queue, skb, new);
1376
1377	if (sk->sk_send_head == skb)
1378		sk->sk_send_head = new;
1379}
1380
1381static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1382{
 
1383	__skb_unlink(skb, &sk->sk_write_queue);
1384}
1385
1386static inline bool tcp_write_queue_empty(struct sock *sk)
 
 
 
 
 
 
 
 
1387{
1388	return skb_queue_empty(&sk->sk_write_queue);
 
 
1389}
1390
1391static inline void tcp_push_pending_frames(struct sock *sk)
1392{
1393	if (tcp_send_head(sk)) {
1394		struct tcp_sock *tp = tcp_sk(sk);
1395
1396		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1397	}
1398}
1399
1400/* Start sequence of the skb just after the highest skb with SACKed
1401 * bit, valid only if sacked_out > 0 or when the caller has ensured
1402 * validity by itself.
1403 */
1404static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1405{
1406	if (!tp->sacked_out)
1407		return tp->snd_una;
1408
1409	if (tp->highest_sack == NULL)
1410		return tp->snd_nxt;
1411
1412	return TCP_SKB_CB(tp->highest_sack)->seq;
1413}
1414
1415static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1416{
1417	tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1418						tcp_write_queue_next(sk, skb);
1419}
1420
1421static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1422{
1423	return tcp_sk(sk)->highest_sack;
1424}
1425
1426static inline void tcp_highest_sack_reset(struct sock *sk)
1427{
1428	tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1429}
1430
1431/* Called when old skb is about to be deleted (to be combined with new skb) */
1432static inline void tcp_highest_sack_combine(struct sock *sk,
1433					    struct sk_buff *old,
1434					    struct sk_buff *new)
1435{
1436	if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1437		tcp_sk(sk)->highest_sack = new;
1438}
1439
 
 
 
 
 
 
 
 
 
 
 
 
1440/* Determines whether this is a thin stream (which may suffer from
1441 * increased latency). Used to trigger latency-reducing mechanisms.
1442 */
1443static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1444{
1445	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1446}
1447
1448/* /proc */
1449enum tcp_seq_states {
1450	TCP_SEQ_STATE_LISTENING,
1451	TCP_SEQ_STATE_OPENREQ,
1452	TCP_SEQ_STATE_ESTABLISHED,
1453	TCP_SEQ_STATE_TIME_WAIT,
1454};
1455
1456int tcp_seq_open(struct inode *inode, struct file *file);
 
 
1457
1458struct tcp_seq_afinfo {
1459	char				*name;
1460	sa_family_t			family;
1461	const struct file_operations	*seq_fops;
1462	struct seq_operations		seq_ops;
1463};
1464
1465struct tcp_iter_state {
1466	struct seq_net_private	p;
1467	sa_family_t		family;
1468	enum tcp_seq_states	state;
1469	struct sock		*syn_wait_sk;
1470	int			bucket, offset, sbucket, num, uid;
 
1471	loff_t			last_pos;
1472};
1473
1474extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1475extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1476
1477extern struct request_sock_ops tcp_request_sock_ops;
1478extern struct request_sock_ops tcp6_request_sock_ops;
1479
1480extern void tcp_v4_destroy_sock(struct sock *sk);
 
 
 
 
 
 
 
 
 
 
 
1481
1482extern int tcp_v4_gso_send_check(struct sk_buff *skb);
1483extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
1484				       netdev_features_t features);
1485extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1486					struct sk_buff *skb);
1487extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1488					 struct sk_buff *skb);
1489extern int tcp_gro_complete(struct sk_buff *skb);
1490extern int tcp4_gro_complete(struct sk_buff *skb);
1491
1492#ifdef CONFIG_PROC_FS
1493extern int tcp4_proc_init(void);
1494extern void tcp4_proc_exit(void);
1495#endif
1496
 
 
 
 
 
1497/* TCP af-specific functions */
1498struct tcp_sock_af_ops {
1499#ifdef CONFIG_TCP_MD5SIG
1500	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk,
1501						struct sock *addr_sk);
1502	int			(*calc_md5_hash) (char *location,
1503						  struct tcp_md5sig_key *md5,
1504						  const struct sock *sk,
1505						  const struct request_sock *req,
1506						  const struct sk_buff *skb);
1507	int			(*md5_parse) (struct sock *sk,
1508					      char __user *optval,
1509					      int optlen);
1510#endif
1511};
1512
1513struct tcp_request_sock_ops {
 
1514#ifdef CONFIG_TCP_MD5SIG
1515	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk,
1516						struct request_sock *req);
1517	int			(*calc_md5_hash) (char *location,
1518						  struct tcp_md5sig_key *md5,
1519						  const struct sock *sk,
1520						  const struct request_sock *req,
1521						  const struct sk_buff *skb);
1522#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1523};
1524
1525/* Using SHA1 for now, define some constants.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1526 */
1527#define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS)
1528#define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4)
1529#define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS)
 
 
1530
1531extern int tcp_cookie_generator(u32 *bakery);
 
 
 
 
 
 
 
 
 
 
1532
1533/**
1534 *	struct tcp_cookie_values - each socket needs extra space for the
1535 *	cookies, together with (optional) space for any SYN data.
1536 *
1537 *	A tcp_sock contains a pointer to the current value, and this is
1538 *	cloned to the tcp_timewait_sock.
1539 *
1540 * @cookie_pair:	variable data from the option exchange.
1541 *
1542 * @cookie_desired:	user specified tcpct_cookie_desired.  Zero
1543 *			indicates default (sysctl_tcp_cookie_size).
1544 *			After cookie sent, remembers size of cookie.
1545 *			Range 0, TCP_COOKIE_MIN to TCP_COOKIE_MAX.
1546 *
1547 * @s_data_desired:	user specified tcpct_s_data_desired.  When the
1548 *			constant payload is specified (@s_data_constant),
1549 *			holds its length instead.
1550 *			Range 0 to TCP_MSS_DESIRED.
1551 *
1552 * @s_data_payload:	constant data that is to be included in the
1553 *			payload of SYN or SYNACK segments when the
1554 *			cookie option is present.
1555 */
1556struct tcp_cookie_values {
1557	struct kref	kref;
1558	u8		cookie_pair[TCP_COOKIE_PAIR_SIZE];
1559	u8		cookie_pair_size;
1560	u8		cookie_desired;
1561	u16		s_data_desired:11,
1562			s_data_constant:1,
1563			s_data_in:1,
1564			s_data_out:1,
1565			s_data_unused:2;
1566	u8		s_data_payload[0];
1567};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1568
1569static inline void tcp_cookie_values_release(struct kref *kref)
1570{
1571	kfree(container_of(kref, struct tcp_cookie_values, kref));
 
 
 
 
 
1572}
1573
1574/* The length of constant payload data.  Note that s_data_desired is
1575 * overloaded, depending on s_data_constant: either the length of constant
1576 * data (returned here) or the limit on variable data.
 
 
 
1577 */
1578static inline int tcp_s_data_size(const struct tcp_sock *tp)
1579{
1580	return (tp->cookie_values != NULL && tp->cookie_values->s_data_constant)
1581		? tp->cookie_values->s_data_desired
1582		: 0;
1583}
1584
1585/**
1586 *	struct tcp_extend_values - tcp_ipv?.c to tcp_output.c workspace.
1587 *
1588 *	As tcp_request_sock has already been extended in other places, the
1589 *	only remaining method is to pass stack values along as function
1590 *	parameters.  These parameters are not needed after sending SYNACK.
1591 *
1592 * @cookie_bakery:	cryptographic secret and message workspace.
1593 *
1594 * @cookie_plus:	bytes in authenticator/cookie option, copied from
1595 *			struct tcp_options_received (above).
1596 */
1597struct tcp_extend_values {
1598	struct request_values		rv;
1599	u32				cookie_bakery[COOKIE_WORKSPACE_WORDS];
1600	u8				cookie_plus:6,
1601					cookie_out_never:1,
1602					cookie_in_always:1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1603};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1604
1605static inline struct tcp_extend_values *tcp_xv(struct request_values *rvp)
 
1606{
1607	return (struct tcp_extend_values *)rvp;
1608}
 
1609
1610extern void tcp_v4_init(void);
1611extern void tcp_init(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1612
1613#endif	/* _TCP_H */