Linux Audio

Check our new training course

Loading...
v5.9
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Definitions for the TCP module.
   8 *
   9 * Version:	@(#)tcp.h	1.0.5	05/23/93
  10 *
  11 * Authors:	Ross Biro
  12 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 
 
 
 
 
  13 */
  14#ifndef _TCP_H
  15#define _TCP_H
  16
  17#define FASTRETRANS_DEBUG 1
  18
  19#include <linux/list.h>
  20#include <linux/tcp.h>
  21#include <linux/bug.h>
  22#include <linux/slab.h>
  23#include <linux/cache.h>
  24#include <linux/percpu.h>
  25#include <linux/skbuff.h>
 
  26#include <linux/kref.h>
  27#include <linux/ktime.h>
  28#include <linux/indirect_call_wrapper.h>
  29
  30#include <net/inet_connection_sock.h>
  31#include <net/inet_timewait_sock.h>
  32#include <net/inet_hashtables.h>
  33#include <net/checksum.h>
  34#include <net/request_sock.h>
  35#include <net/sock_reuseport.h>
  36#include <net/sock.h>
  37#include <net/snmp.h>
  38#include <net/ip.h>
  39#include <net/tcp_states.h>
  40#include <net/inet_ecn.h>
  41#include <net/dst.h>
  42#include <net/mptcp.h>
  43
  44#include <linux/seq_file.h>
  45#include <linux/memcontrol.h>
  46#include <linux/bpf-cgroup.h>
  47#include <linux/siphash.h>
  48
  49extern struct inet_hashinfo tcp_hashinfo;
  50
  51extern struct percpu_counter tcp_orphan_count;
  52void tcp_time_wait(struct sock *sk, int state, int timeo);
  53
  54#define MAX_TCP_HEADER	L1_CACHE_ALIGN(128 + MAX_HEADER)
  55#define MAX_TCP_OPTION_SPACE 40
  56#define TCP_MIN_SND_MSS		48
  57#define TCP_MIN_GSO_SIZE	(TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
  58
  59/*
  60 * Never offer a window over 32767 without using window scaling. Some
  61 * poor stacks do signed 16bit maths!
  62 */
  63#define MAX_TCP_WINDOW		32767U
  64
  65/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
  66#define TCP_MIN_MSS		88U
  67
  68/* The initial MTU to use for probing */
  69#define TCP_BASE_MSS		1024
  70
  71/* probing interval, default to 10 minutes as per RFC4821 */
  72#define TCP_PROBE_INTERVAL	600
  73
  74/* Specify interval when tcp mtu probing will stop */
  75#define TCP_PROBE_THRESHOLD	8
  76
  77/* After receiving this amount of duplicate ACKs fast retransmit starts. */
  78#define TCP_FASTRETRANS_THRESH 3
  79
  80/* Maximal number of ACKs sent quickly to accelerate slow-start. */
  81#define TCP_MAX_QUICKACKS	16U
  82
  83/* Maximal number of window scale according to RFC1323 */
  84#define TCP_MAX_WSCALE		14U
  85
  86/* urg_data states */
  87#define TCP_URG_VALID	0x0100
  88#define TCP_URG_NOTYET	0x0200
  89#define TCP_URG_READ	0x0400
  90
  91#define TCP_RETR1	3	/*
  92				 * This is how many retries it does before it
  93				 * tries to figure out if the gateway is
  94				 * down. Minimal RFC value is 3; it corresponds
  95				 * to ~3sec-8min depending on RTO.
  96				 */
  97
  98#define TCP_RETR2	15	/*
  99				 * This should take at least
 100				 * 90 minutes to time out.
 101				 * RFC1122 says that the limit is 100 sec.
 102				 * 15 is ~13-30min depending on RTO.
 103				 */
 104
 105#define TCP_SYN_RETRIES	 6	/* This is how many retries are done
 106				 * when active opening a connection.
 107				 * RFC1122 says the minimum retry MUST
 108				 * be at least 180secs.  Nevertheless
 109				 * this value is corresponding to
 110				 * 63secs of retransmission with the
 111				 * current initial RTO.
 112				 */
 113
 114#define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
 115				 * when passive opening a connection.
 116				 * This is corresponding to 31secs of
 117				 * retransmission with the current
 118				 * initial RTO.
 119				 */
 120
 121#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
 122				  * state, about 60 seconds	*/
 123#define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
 124                                 /* BSD style FIN_WAIT2 deadlock breaker.
 125				  * It used to be 3min, new value is 60sec,
 126				  * to combine FIN-WAIT-2 timeout with
 127				  * TIME-WAIT timer.
 128				  */
 129#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
 130
 131#define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
 132#if HZ >= 100
 133#define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
 134#define TCP_ATO_MIN	((unsigned)(HZ/25))
 135#else
 136#define TCP_DELACK_MIN	4U
 137#define TCP_ATO_MIN	4U
 138#endif
 139#define TCP_RTO_MAX	((unsigned)(120*HZ))
 140#define TCP_RTO_MIN	((unsigned)(HZ/5))
 141#define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
 142#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
 143#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
 144						 * used as a fallback RTO for the
 145						 * initial data transmission if no
 146						 * valid RTT sample has been acquired,
 147						 * most likely due to retrans in 3WHS.
 148						 */
 149
 150#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
 151					                 * for local resources.
 152					                 */
 153#define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
 154#define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
 155#define TCP_KEEPALIVE_INTVL	(75*HZ)
 156
 157#define MAX_TCP_KEEPIDLE	32767
 158#define MAX_TCP_KEEPINTVL	32767
 159#define MAX_TCP_KEEPCNT		127
 160#define MAX_TCP_SYNCNT		127
 161
 162#define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
 163
 164#define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
 165#define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
 166					 * after this time. It should be equal
 167					 * (or greater than) TCP_TIMEWAIT_LEN
 168					 * to provide reliability equal to one
 169					 * provided by timewait state.
 170					 */
 171#define TCP_PAWS_WINDOW	1		/* Replay window for per-host
 172					 * timestamps. It must be less than
 173					 * minimal timewait lifetime.
 174					 */
 175/*
 176 *	TCP option
 177 */
 178
 179#define TCPOPT_NOP		1	/* Padding */
 180#define TCPOPT_EOL		0	/* End of options */
 181#define TCPOPT_MSS		2	/* Segment size negotiating */
 182#define TCPOPT_WINDOW		3	/* Window scaling */
 183#define TCPOPT_SACK_PERM        4       /* SACK Permitted */
 184#define TCPOPT_SACK             5       /* SACK Block */
 185#define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
 186#define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
 187#define TCPOPT_MPTCP		30	/* Multipath TCP (RFC6824) */
 188#define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
 189#define TCPOPT_EXP		254	/* Experimental */
 190/* Magic number to be after the option value for sharing TCP
 191 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
 192 */
 193#define TCPOPT_FASTOPEN_MAGIC	0xF989
 194#define TCPOPT_SMC_MAGIC	0xE2D4C3D9
 195
 196/*
 197 *     TCP option lengths
 198 */
 199
 200#define TCPOLEN_MSS            4
 201#define TCPOLEN_WINDOW         3
 202#define TCPOLEN_SACK_PERM      2
 203#define TCPOLEN_TIMESTAMP      10
 204#define TCPOLEN_MD5SIG         18
 205#define TCPOLEN_FASTOPEN_BASE  2
 206#define TCPOLEN_EXP_FASTOPEN_BASE  4
 207#define TCPOLEN_EXP_SMC_BASE   6
 208
 209/* But this is what stacks really send out. */
 210#define TCPOLEN_TSTAMP_ALIGNED		12
 211#define TCPOLEN_WSCALE_ALIGNED		4
 212#define TCPOLEN_SACKPERM_ALIGNED	4
 213#define TCPOLEN_SACK_BASE		2
 214#define TCPOLEN_SACK_BASE_ALIGNED	4
 215#define TCPOLEN_SACK_PERBLOCK		8
 216#define TCPOLEN_MD5SIG_ALIGNED		20
 217#define TCPOLEN_MSS_ALIGNED		4
 218#define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
 219
 220/* Flags in tp->nonagle */
 221#define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
 222#define TCP_NAGLE_CORK		2	/* Socket is corked	    */
 223#define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
 224
 225/* TCP thin-stream limits */
 226#define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 227
 228/* TCP initial congestion window as per rfc6928 */
 229#define TCP_INIT_CWND		10
 230
 231/* Bit Flags for sysctl_tcp_fastopen */
 232#define	TFO_CLIENT_ENABLE	1
 233#define	TFO_SERVER_ENABLE	2
 234#define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
 235
 236/* Accept SYN data w/o any cookie option */
 237#define	TFO_SERVER_COOKIE_NOT_REQD	0x200
 238
 239/* Force enable TFO on all listeners, i.e., not requiring the
 240 * TCP_FASTOPEN socket option.
 241 */
 242#define	TFO_SERVER_WO_SOCKOPT1	0x400
 243
 244
 245/* sysctl variables for tcp */
 246extern int sysctl_tcp_max_orphans;
 247extern long sysctl_tcp_mem[3];
 248
 249#define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
 250#define TCP_RACK_STATIC_REO_WND  0x2 /* Use static RACK reo wnd */
 251#define TCP_RACK_NO_DUPTHRESH    0x4 /* Do not use DUPACK threshold in RACK */
 252
 253extern atomic_long_t tcp_memory_allocated;
 254extern struct percpu_counter tcp_sockets_allocated;
 255extern unsigned long tcp_memory_pressure;
 256
 257/* optimized version of sk_under_memory_pressure() for TCP sockets */
 258static inline bool tcp_under_memory_pressure(const struct sock *sk)
 259{
 260	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
 261	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
 262		return true;
 263
 264	return READ_ONCE(tcp_memory_pressure);
 265}
 266/*
 267 * The next routines deal with comparing 32 bit unsigned ints
 268 * and worry about wraparound (automatic with unsigned arithmetic).
 269 */
 270
 271static inline bool before(__u32 seq1, __u32 seq2)
 272{
 273        return (__s32)(seq1-seq2) < 0;
 274}
 275#define after(seq2, seq1) 	before(seq1, seq2)
 276
 277/* is s2<=s1<=s3 ? */
 278static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
 279{
 280	return seq3 - seq2 >= seq1 - seq2;
 281}
 282
 283static inline bool tcp_out_of_memory(struct sock *sk)
 284{
 285	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
 286	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
 287		return true;
 288	return false;
 289}
 290
 291void sk_forced_mem_schedule(struct sock *sk, int size);
 292
 293static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 294{
 295	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
 296	int orphans = percpu_counter_read_positive(ocp);
 297
 298	if (orphans << shift > sysctl_tcp_max_orphans) {
 299		orphans = percpu_counter_sum_positive(ocp);
 300		if (orphans << shift > sysctl_tcp_max_orphans)
 301			return true;
 302	}
 303	return false;
 304}
 305
 306bool tcp_check_oom(struct sock *sk, int shift);
 307
 308
 309extern struct proto tcp_prot;
 310
 311#define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 312#define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 313#define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
 314#define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 315
 316void tcp_tasklet_init(void);
 317
 318int tcp_v4_err(struct sk_buff *skb, u32);
 319
 320void tcp_shutdown(struct sock *sk, int how);
 321
 322int tcp_v4_early_demux(struct sk_buff *skb);
 323int tcp_v4_rcv(struct sk_buff *skb);
 324
 325int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
 326int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 327int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
 328int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
 329		 int flags);
 330int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
 331			size_t size, int flags);
 332ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
 333		 size_t size, int flags);
 334int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
 335void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
 336	      int size_goal);
 337void tcp_release_cb(struct sock *sk);
 338void tcp_wfree(struct sk_buff *skb);
 339void tcp_write_timer_handler(struct sock *sk);
 340void tcp_delack_timer_handler(struct sock *sk);
 341int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 342int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
 343void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
 
 344void tcp_rcv_space_adjust(struct sock *sk);
 345int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
 346void tcp_twsk_destructor(struct sock *sk);
 347ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
 348			struct pipe_inode_info *pipe, size_t len,
 349			unsigned int flags);
 350
 351void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
 352static inline void tcp_dec_quickack_mode(struct sock *sk,
 353					 const unsigned int pkts)
 354{
 355	struct inet_connection_sock *icsk = inet_csk(sk);
 356
 357	if (icsk->icsk_ack.quick) {
 358		if (pkts >= icsk->icsk_ack.quick) {
 359			icsk->icsk_ack.quick = 0;
 360			/* Leaving quickack mode we deflate ATO. */
 361			icsk->icsk_ack.ato   = TCP_ATO_MIN;
 362		} else
 363			icsk->icsk_ack.quick -= pkts;
 364	}
 365}
 366
 367#define	TCP_ECN_OK		1
 368#define	TCP_ECN_QUEUE_CWR	2
 369#define	TCP_ECN_DEMAND_CWR	4
 370#define	TCP_ECN_SEEN		8
 371
 372enum tcp_tw_status {
 373	TCP_TW_SUCCESS = 0,
 374	TCP_TW_RST = 1,
 375	TCP_TW_ACK = 2,
 376	TCP_TW_SYN = 3
 377};
 378
 379
 380enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
 381					      struct sk_buff *skb,
 382					      const struct tcphdr *th);
 383struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 384			   struct request_sock *req, bool fastopen,
 385			   bool *lost_race);
 386int tcp_child_process(struct sock *parent, struct sock *child,
 387		      struct sk_buff *skb);
 388void tcp_enter_loss(struct sock *sk);
 389void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
 390void tcp_clear_retrans(struct tcp_sock *tp);
 391void tcp_update_metrics(struct sock *sk);
 392void tcp_init_metrics(struct sock *sk);
 393void tcp_metrics_init(void);
 394bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
 395void tcp_close(struct sock *sk, long timeout);
 396void tcp_init_sock(struct sock *sk);
 397void tcp_init_transfer(struct sock *sk, int bpf_op);
 398__poll_t tcp_poll(struct file *file, struct socket *sock,
 399		      struct poll_table_struct *wait);
 400int tcp_getsockopt(struct sock *sk, int level, int optname,
 401		   char __user *optval, int __user *optlen);
 402int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
 403		   unsigned int optlen);
 
 
 
 
 404void tcp_set_keepalive(struct sock *sk, int val);
 405void tcp_syn_ack_timeout(const struct request_sock *req);
 406int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
 407		int flags, int *addr_len);
 408int tcp_set_rcvlowat(struct sock *sk, int val);
 409void tcp_data_ready(struct sock *sk);
 410#ifdef CONFIG_MMU
 411int tcp_mmap(struct file *file, struct socket *sock,
 412	     struct vm_area_struct *vma);
 413#endif
 414void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
 415		       struct tcp_options_received *opt_rx,
 416		       int estab, struct tcp_fastopen_cookie *foc);
 417const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 418
 419/*
 420 *	BPF SKB-less helpers
 421 */
 422u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
 423			 struct tcphdr *th, u32 *cookie);
 424u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
 425			 struct tcphdr *th, u32 *cookie);
 426u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
 427			  const struct tcp_request_sock_ops *af_ops,
 428			  struct sock *sk, struct tcphdr *th);
 429/*
 430 *	TCP v4 functions exported for the inet6 API
 431 */
 432
 433void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 434void tcp_v4_mtu_reduced(struct sock *sk);
 435void tcp_req_err(struct sock *sk, u32 seq, bool abort);
 436void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
 437int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 438struct sock *tcp_create_openreq_child(const struct sock *sk,
 439				      struct request_sock *req,
 440				      struct sk_buff *skb);
 441void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
 442struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 443				  struct request_sock *req,
 444				  struct dst_entry *dst,
 445				  struct request_sock *req_unhash,
 446				  bool *own_req);
 447int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 448int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 449int tcp_connect(struct sock *sk);
 450enum tcp_synack_type {
 451	TCP_SYNACK_NORMAL,
 452	TCP_SYNACK_FASTOPEN,
 453	TCP_SYNACK_COOKIE,
 454};
 455struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 456				struct request_sock *req,
 457				struct tcp_fastopen_cookie *foc,
 458				enum tcp_synack_type synack_type);
 459int tcp_disconnect(struct sock *sk, int flags);
 460
 461void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
 462int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 463void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 464
 465/* From syncookies.c */
 466struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
 467				 struct request_sock *req,
 468				 struct dst_entry *dst, u32 tsoff);
 469int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 470		      u32 cookie);
 471struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
 472struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
 473					    struct sock *sk, struct sk_buff *skb);
 474#ifdef CONFIG_SYN_COOKIES
 475
 476/* Syncookies use a monotonic timer which increments every 60 seconds.
 477 * This counter is used both as a hash input and partially encoded into
 478 * the cookie value.  A cookie is only validated further if the delta
 479 * between the current counter value and the encoded one is less than this,
 480 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
 481 * the counter advances immediately after a cookie is generated).
 482 */
 483#define MAX_SYNCOOKIE_AGE	2
 484#define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
 485#define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
 486
 487/* syncookies: remember time of last synqueue overflow
 488 * But do not dirty this field too often (once per second is enough)
 489 * It is racy as we do not hold a lock, but race is very minor.
 490 */
 491static inline void tcp_synq_overflow(const struct sock *sk)
 492{
 493	unsigned int last_overflow;
 494	unsigned int now = jiffies;
 495
 496	if (sk->sk_reuseport) {
 497		struct sock_reuseport *reuse;
 498
 499		reuse = rcu_dereference(sk->sk_reuseport_cb);
 500		if (likely(reuse)) {
 501			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
 502			if (!time_between32(now, last_overflow,
 503					    last_overflow + HZ))
 504				WRITE_ONCE(reuse->synq_overflow_ts, now);
 505			return;
 506		}
 507	}
 508
 509	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
 510	if (!time_between32(now, last_overflow, last_overflow + HZ))
 511		WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
 512}
 513
 514/* syncookies: no recent synqueue overflow on this listening socket? */
 515static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
 516{
 517	unsigned int last_overflow;
 518	unsigned int now = jiffies;
 519
 520	if (sk->sk_reuseport) {
 521		struct sock_reuseport *reuse;
 522
 523		reuse = rcu_dereference(sk->sk_reuseport_cb);
 524		if (likely(reuse)) {
 525			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
 526			return !time_between32(now, last_overflow - HZ,
 527					       last_overflow +
 528					       TCP_SYNCOOKIE_VALID);
 529		}
 530	}
 531
 532	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
 533
 534	/* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
 535	 * then we're under synflood. However, we have to use
 536	 * 'last_overflow - HZ' as lower bound. That's because a concurrent
 537	 * tcp_synq_overflow() could update .ts_recent_stamp after we read
 538	 * jiffies but before we store .ts_recent_stamp into last_overflow,
 539	 * which could lead to rejecting a valid syncookie.
 540	 */
 541	return !time_between32(now, last_overflow - HZ,
 542			       last_overflow + TCP_SYNCOOKIE_VALID);
 543}
 544
 545static inline u32 tcp_cookie_time(void)
 546{
 547	u64 val = get_jiffies_64();
 548
 549	do_div(val, TCP_SYNCOOKIE_PERIOD);
 550	return val;
 551}
 552
 553u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 554			      u16 *mssp);
 555__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
 556u64 cookie_init_timestamp(struct request_sock *req, u64 now);
 557bool cookie_timestamp_decode(const struct net *net,
 558			     struct tcp_options_received *opt);
 559bool cookie_ecn_ok(const struct tcp_options_received *opt,
 560		   const struct net *net, const struct dst_entry *dst);
 561
 562/* From net/ipv6/syncookies.c */
 563int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
 564		      u32 cookie);
 565struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
 566
 567u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
 568			      const struct tcphdr *th, u16 *mssp);
 569__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
 570#endif
 571/* tcp_output.c */
 572
 573void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
 574			       int nonagle);
 575int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 576int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 577void tcp_retransmit_timer(struct sock *sk);
 578void tcp_xmit_retransmit_queue(struct sock *);
 579void tcp_simple_retransmit(struct sock *);
 580void tcp_enter_recovery(struct sock *sk, bool ece_ack);
 581int tcp_trim_head(struct sock *, struct sk_buff *, u32);
 582enum tcp_queue {
 583	TCP_FRAG_IN_WRITE_QUEUE,
 584	TCP_FRAG_IN_RTX_QUEUE,
 585};
 586int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
 587		 struct sk_buff *skb, u32 len,
 588		 unsigned int mss_now, gfp_t gfp);
 589
 590void tcp_send_probe0(struct sock *);
 591void tcp_send_partial(struct sock *);
 592int tcp_write_wakeup(struct sock *, int mib);
 593void tcp_send_fin(struct sock *sk);
 594void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 595int tcp_send_synack(struct sock *);
 596void tcp_push_one(struct sock *, unsigned int mss_now);
 597void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
 598void tcp_send_ack(struct sock *sk);
 599void tcp_send_delayed_ack(struct sock *sk);
 600void tcp_send_loss_probe(struct sock *sk);
 601bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
 602void tcp_skb_collapse_tstamp(struct sk_buff *skb,
 603			     const struct sk_buff *next_skb);
 604
 605/* tcp_input.c */
 606void tcp_rearm_rto(struct sock *sk);
 607void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
 608void tcp_reset(struct sock *sk);
 609void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
 610void tcp_fin(struct sock *sk);
 611
 612/* tcp_timer.c */
 613void tcp_init_xmit_timers(struct sock *);
 614static inline void tcp_clear_xmit_timers(struct sock *sk)
 615{
 616	if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
 617		__sock_put(sk);
 618
 619	if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
 620		__sock_put(sk);
 621
 622	inet_csk_clear_xmit_timers(sk);
 623}
 624
 625unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 626unsigned int tcp_current_mss(struct sock *sk);
 627
 628/* Bound MSS / TSO packet size with the half of the window */
 629static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 630{
 631	int cutoff;
 632
 633	/* When peer uses tiny windows, there is no use in packetizing
 634	 * to sub-MSS pieces for the sake of SWS or making sure there
 635	 * are enough packets in the pipe for fast recovery.
 636	 *
 637	 * On the other hand, for extremely large MSS devices, handling
 638	 * smaller than MSS windows in this way does make sense.
 639	 */
 640	if (tp->max_window > TCP_MSS_DEFAULT)
 641		cutoff = (tp->max_window >> 1);
 642	else
 643		cutoff = tp->max_window;
 644
 645	if (cutoff && pktsize > cutoff)
 646		return max_t(int, cutoff, 68U - tp->tcp_header_len);
 647	else
 648		return pktsize;
 649}
 650
 651/* tcp.c */
 652void tcp_get_info(struct sock *, struct tcp_info *);
 653
 654/* Read 'sendfile()'-style from a TCP socket */
 655int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
 656		  sk_read_actor_t recv_actor);
 657
 658void tcp_initialize_rcv_mss(struct sock *sk);
 659
 660int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 661int tcp_mss_to_mtu(struct sock *sk, int mss);
 662void tcp_mtup_init(struct sock *sk);
 
 663
 664static inline void tcp_bound_rto(const struct sock *sk)
 665{
 666	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
 667		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
 668}
 669
 670static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
 671{
 672	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
 673}
 674
 675static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 676{
 677	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
 678			       ntohl(TCP_FLAG_ACK) |
 679			       snd_wnd);
 680}
 681
 682static inline void tcp_fast_path_on(struct tcp_sock *tp)
 683{
 684	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
 685}
 686
 687static inline void tcp_fast_path_check(struct sock *sk)
 688{
 689	struct tcp_sock *tp = tcp_sk(sk);
 690
 691	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
 692	    tp->rcv_wnd &&
 693	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
 694	    !tp->urg_data)
 695		tcp_fast_path_on(tp);
 696}
 697
 698/* Compute the actual rto_min value */
 699static inline u32 tcp_rto_min(struct sock *sk)
 700{
 701	const struct dst_entry *dst = __sk_dst_get(sk);
 702	u32 rto_min = TCP_RTO_MIN;
 703
 704	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
 705		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
 706	return rto_min;
 707}
 708
 709static inline u32 tcp_rto_min_us(struct sock *sk)
 710{
 711	return jiffies_to_usecs(tcp_rto_min(sk));
 712}
 713
 714static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
 715{
 716	return dst_metric_locked(dst, RTAX_CC_ALGO);
 717}
 718
 719/* Minimum RTT in usec. ~0 means not available. */
 720static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
 721{
 722	return minmax_get(&tp->rtt_min);
 723}
 724
 725/* Compute the actual receive window we are currently advertising.
 726 * Rcv_nxt can be after the window if our peer push more data
 727 * than the offered window.
 728 */
 729static inline u32 tcp_receive_window(const struct tcp_sock *tp)
 730{
 731	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
 732
 733	if (win < 0)
 734		win = 0;
 735	return (u32) win;
 736}
 737
 738/* Choose a new window, without checks for shrinking, and without
 739 * scaling applied to the result.  The caller does these things
 740 * if necessary.  This is a "raw" window selection.
 741 */
 742u32 __tcp_select_window(struct sock *sk);
 743
 744void tcp_send_window_probe(struct sock *sk);
 745
 746/* TCP uses 32bit jiffies to save some space.
 747 * Note that this is different from tcp_time_stamp, which
 748 * historically has been the same until linux-4.13.
 749 */
 750#define tcp_jiffies32 ((u32)jiffies)
 751
 752/*
 753 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
 754 * It is no longer tied to jiffies, but to 1 ms clock.
 755 * Note: double check if you want to use tcp_jiffies32 instead of this.
 756 */
 757#define TCP_TS_HZ	1000
 758
 759static inline u64 tcp_clock_ns(void)
 760{
 761	return ktime_get_ns();
 762}
 763
 764static inline u64 tcp_clock_us(void)
 765{
 766	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
 767}
 768
 769/* This should only be used in contexts where tp->tcp_mstamp is up to date */
 770static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
 771{
 772	return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
 773}
 774
 775/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
 776static inline u32 tcp_ns_to_ts(u64 ns)
 777{
 778	return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
 779}
 780
 781/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
 782static inline u32 tcp_time_stamp_raw(void)
 783{
 784	return tcp_ns_to_ts(tcp_clock_ns());
 785}
 786
 787void tcp_mstamp_refresh(struct tcp_sock *tp);
 788
 789static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
 
 
 
 790{
 791	return max_t(s64, t1 - t0, 0);
 
 
 
 792}
 793
 794static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
 795{
 796	return tcp_ns_to_ts(skb->skb_mstamp_ns);
 797}
 798
 799/* provide the departure time in us unit */
 800static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
 801{
 802	return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
 803}
 804
 805
 806#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 807
 808#define TCPHDR_FIN 0x01
 809#define TCPHDR_SYN 0x02
 810#define TCPHDR_RST 0x04
 811#define TCPHDR_PSH 0x08
 812#define TCPHDR_ACK 0x10
 813#define TCPHDR_URG 0x20
 814#define TCPHDR_ECE 0x40
 815#define TCPHDR_CWR 0x80
 816
 817#define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
 818
 819/* This is what the send packet queuing engine uses to pass
 820 * TCP per-packet control information to the transmission code.
 821 * We also store the host-order sequence numbers in here too.
 822 * This is 44 bytes if IPV6 is enabled.
 823 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
 824 */
 825struct tcp_skb_cb {
 826	__u32		seq;		/* Starting sequence number	*/
 827	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
 828	union {
 829		/* Note : tcp_tw_isn is used in input path only
 830		 *	  (isn chosen by tcp_timewait_state_process())
 831		 *
 832		 * 	  tcp_gso_segs/size are used in write queue only,
 833		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
 834		 */
 835		__u32		tcp_tw_isn;
 836		struct {
 837			u16	tcp_gso_segs;
 838			u16	tcp_gso_size;
 839		};
 840	};
 841	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
 842
 843	__u8		sacked;		/* State flags for SACK.	*/
 844#define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
 845#define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
 846#define TCPCB_LOST		0x04	/* SKB is lost			*/
 847#define TCPCB_TAGBITS		0x07	/* All tag bits			*/
 848#define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp_ns)	*/
 849#define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
 850#define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
 851				TCPCB_REPAIRED)
 852
 853	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
 854	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
 855			eor:1,		/* Is skb MSG_EOR marked? */
 856			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
 857			unused:5;
 858	__u32		ack_seq;	/* Sequence number ACK'd	*/
 859	union {
 860		struct {
 861			/* There is space for up to 24 bytes */
 862			__u32 in_flight:30,/* Bytes in flight at transmit */
 863			      is_app_limited:1, /* cwnd not fully used? */
 864			      unused:1;
 865			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
 866			__u32 delivered;
 867			/* start of send pipeline phase */
 868			u64 first_tx_mstamp;
 869			/* when we reached the "delivered" count */
 870			u64 delivered_mstamp;
 871		} tx;   /* only used for outgoing skbs */
 872		union {
 873			struct inet_skb_parm	h4;
 874#if IS_ENABLED(CONFIG_IPV6)
 875			struct inet6_skb_parm	h6;
 876#endif
 877		} header;	/* For incoming skbs */
 878		struct {
 
 879			__u32 flags;
 880			struct sock *sk_redir;
 881			void *data_end;
 882		} bpf;
 883	};
 884};
 885
 886#define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
 887
 888static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
 889{
 890	TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
 891}
 892
 893static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
 894{
 895	return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
 896}
 897
 898static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
 899{
 900	return TCP_SKB_CB(skb)->bpf.sk_redir;
 901}
 902
 903static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
 904{
 905	TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
 906}
 907
 908extern const struct inet_connection_sock_af_ops ipv4_specific;
 909
 910#if IS_ENABLED(CONFIG_IPV6)
 911/* This is the variant of inet6_iif() that must be used by TCP,
 912 * as TCP moves IP6CB into a different location in skb->cb[]
 913 */
 914static inline int tcp_v6_iif(const struct sk_buff *skb)
 915{
 916	return TCP_SKB_CB(skb)->header.h6.iif;
 917}
 918
 919static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
 920{
 921	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
 922
 923	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
 924}
 925
 926/* TCP_SKB_CB reference means this can not be used from early demux */
 927static inline int tcp_v6_sdif(const struct sk_buff *skb)
 928{
 929#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 930	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
 931		return TCP_SKB_CB(skb)->header.h6.iif;
 932#endif
 933	return 0;
 934}
 935
 936extern const struct inet_connection_sock_af_ops ipv6_specific;
 937
 938INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
 939INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
 940INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb));
 941
 942#endif
 943
 944static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
 945{
 946#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 947	if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
 948	    skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
 949		return true;
 950#endif
 951	return false;
 952}
 953
 954/* TCP_SKB_CB reference means this can not be used from early demux */
 955static inline int tcp_v4_sdif(struct sk_buff *skb)
 956{
 957#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 958	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
 959		return TCP_SKB_CB(skb)->header.h4.iif;
 960#endif
 961	return 0;
 962}
 963
 964/* Due to TSO, an SKB can be composed of multiple actual
 965 * packets.  To keep these tracked properly, we use this.
 966 */
 967static inline int tcp_skb_pcount(const struct sk_buff *skb)
 968{
 969	return TCP_SKB_CB(skb)->tcp_gso_segs;
 970}
 971
 972static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
 973{
 974	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
 975}
 976
 977static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
 978{
 979	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
 980}
 981
 982/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
 983static inline int tcp_skb_mss(const struct sk_buff *skb)
 984{
 985	return TCP_SKB_CB(skb)->tcp_gso_size;
 986}
 987
 988static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
 989{
 990	return likely(!TCP_SKB_CB(skb)->eor);
 991}
 992
 993static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
 994					const struct sk_buff *from)
 995{
 996	return likely(tcp_skb_can_collapse_to(to) &&
 997		      mptcp_skb_can_collapse(to, from));
 998}
 999
1000/* Events passed to congestion control interface */
1001enum tcp_ca_event {
1002	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
1003	CA_EVENT_CWND_RESTART,	/* congestion window restart */
1004	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
1005	CA_EVENT_LOSS,		/* loss timeout */
1006	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
1007	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
 
 
1008};
1009
1010/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1011enum tcp_ca_ack_event_flags {
1012	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
1013	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
1014	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
1015};
1016
1017/*
1018 * Interface for adding new TCP congestion control handlers
1019 */
1020#define TCP_CA_NAME_MAX	16
1021#define TCP_CA_MAX	128
1022#define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
1023
1024#define TCP_CA_UNSPEC	0
1025
1026/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1027#define TCP_CONG_NON_RESTRICTED 0x1
1028/* Requires ECN/ECT set on all packets */
1029#define TCP_CONG_NEEDS_ECN	0x2
1030#define TCP_CONG_MASK	(TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1031
1032union tcp_cc_info;
1033
1034struct ack_sample {
1035	u32 pkts_acked;
1036	s32 rtt_us;
1037	u32 in_flight;
1038};
1039
1040/* A rate sample measures the number of (original/retransmitted) data
1041 * packets delivered "delivered" over an interval of time "interval_us".
1042 * The tcp_rate.c code fills in the rate sample, and congestion
1043 * control modules that define a cong_control function to run at the end
1044 * of ACK processing can optionally chose to consult this sample when
1045 * setting cwnd and pacing rate.
1046 * A sample is invalid if "delivered" or "interval_us" is negative.
1047 */
1048struct rate_sample {
1049	u64  prior_mstamp; /* starting timestamp for interval */
1050	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
1051	s32  delivered;		/* number of packets delivered over interval */
1052	long interval_us;	/* time for tp->delivered to incr "delivered" */
1053	u32 snd_interval_us;	/* snd interval for delivered packets */
1054	u32 rcv_interval_us;	/* rcv interval for delivered packets */
1055	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
1056	int  losses;		/* number of packets marked lost upon ACK */
1057	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
1058	u32  prior_in_flight;	/* in flight before this ACK */
1059	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
1060	bool is_retrans;	/* is sample from retransmission? */
1061	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
1062};
1063
1064struct tcp_congestion_ops {
1065	struct list_head	list;
1066	u32 key;
1067	u32 flags;
1068
1069	/* initialize private data (optional) */
1070	void (*init)(struct sock *sk);
1071	/* cleanup private data  (optional) */
1072	void (*release)(struct sock *sk);
1073
1074	/* return slow start threshold (required) */
1075	u32 (*ssthresh)(struct sock *sk);
1076	/* do new cwnd calculation (required) */
1077	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1078	/* call before changing ca_state (optional) */
1079	void (*set_state)(struct sock *sk, u8 new_state);
1080	/* call when cwnd event occurs (optional) */
1081	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1082	/* call when ack arrives (optional) */
1083	void (*in_ack_event)(struct sock *sk, u32 flags);
1084	/* new value of cwnd after loss (required) */
1085	u32  (*undo_cwnd)(struct sock *sk);
1086	/* hook for packet ack accounting (optional) */
1087	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1088	/* override sysctl_tcp_min_tso_segs */
1089	u32 (*min_tso_segs)(struct sock *sk);
1090	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
1091	u32 (*sndbuf_expand)(struct sock *sk);
1092	/* call when packets are delivered to update cwnd and pacing rate,
1093	 * after all the ca_state processing. (optional)
1094	 */
1095	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1096	/* get info for inet_diag (optional) */
1097	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1098			   union tcp_cc_info *info);
1099
1100	char 		name[TCP_CA_NAME_MAX];
1101	struct module 	*owner;
1102};
1103
1104int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1105void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1106
1107void tcp_assign_congestion_control(struct sock *sk);
1108void tcp_init_congestion_control(struct sock *sk);
1109void tcp_cleanup_congestion_control(struct sock *sk);
1110int tcp_set_default_congestion_control(struct net *net, const char *name);
1111void tcp_get_default_congestion_control(struct net *net, char *name);
1112void tcp_get_available_congestion_control(char *buf, size_t len);
1113void tcp_get_allowed_congestion_control(char *buf, size_t len);
1114int tcp_set_allowed_congestion_control(char *allowed);
1115int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1116			       bool reinit, bool cap_net_admin);
1117u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1118void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1119
1120u32 tcp_reno_ssthresh(struct sock *sk);
1121u32 tcp_reno_undo_cwnd(struct sock *sk);
1122void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1123extern struct tcp_congestion_ops tcp_reno;
1124
1125struct tcp_congestion_ops *tcp_ca_find(const char *name);
1126struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1127u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1128#ifdef CONFIG_INET
1129char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1130#else
1131static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1132{
1133	return NULL;
1134}
1135#endif
1136
1137static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1138{
1139	const struct inet_connection_sock *icsk = inet_csk(sk);
1140
1141	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1142}
1143
1144static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
1145{
1146	struct inet_connection_sock *icsk = inet_csk(sk);
1147
1148	if (icsk->icsk_ca_ops->set_state)
1149		icsk->icsk_ca_ops->set_state(sk, ca_state);
1150	icsk->icsk_ca_state = ca_state;
1151}
1152
1153static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1154{
1155	const struct inet_connection_sock *icsk = inet_csk(sk);
1156
1157	if (icsk->icsk_ca_ops->cwnd_event)
1158		icsk->icsk_ca_ops->cwnd_event(sk, event);
1159}
1160
1161/* From tcp_rate.c */
1162void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1163void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1164			    struct rate_sample *rs);
1165void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1166		  bool is_sack_reneg, struct rate_sample *rs);
1167void tcp_rate_check_app_limited(struct sock *sk);
1168
1169/* These functions determine how the current flow behaves in respect of SACK
1170 * handling. SACK is negotiated with the peer, and therefore it can vary
1171 * between different flows.
1172 *
1173 * tcp_is_sack - SACK enabled
1174 * tcp_is_reno - No SACK
1175 */
1176static inline int tcp_is_sack(const struct tcp_sock *tp)
1177{
1178	return likely(tp->rx_opt.sack_ok);
1179}
1180
1181static inline bool tcp_is_reno(const struct tcp_sock *tp)
1182{
1183	return !tcp_is_sack(tp);
1184}
1185
1186static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1187{
1188	return tp->sacked_out + tp->lost_out;
1189}
1190
1191/* This determines how many packets are "in the network" to the best
1192 * of our knowledge.  In many cases it is conservative, but where
1193 * detailed information is available from the receiver (via SACK
1194 * blocks etc.) we can make more aggressive calculations.
1195 *
1196 * Use this for decisions involving congestion control, use just
1197 * tp->packets_out to determine if the send queue is empty or not.
1198 *
1199 * Read this equation as:
1200 *
1201 *	"Packets sent once on transmission queue" MINUS
1202 *	"Packets left network, but not honestly ACKed yet" PLUS
1203 *	"Packets fast retransmitted"
1204 */
1205static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1206{
1207	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1208}
1209
1210#define TCP_INFINITE_SSTHRESH	0x7fffffff
1211
1212static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1213{
1214	return tp->snd_cwnd < tp->snd_ssthresh;
1215}
1216
1217static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1218{
1219	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1220}
1221
1222static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1223{
1224	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1225	       (1 << inet_csk(sk)->icsk_ca_state);
1226}
1227
1228/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1229 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1230 * ssthresh.
1231 */
1232static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1233{
1234	const struct tcp_sock *tp = tcp_sk(sk);
1235
1236	if (tcp_in_cwnd_reduction(sk))
1237		return tp->snd_ssthresh;
1238	else
1239		return max(tp->snd_ssthresh,
1240			   ((tp->snd_cwnd >> 1) +
1241			    (tp->snd_cwnd >> 2)));
1242}
1243
1244/* Use define here intentionally to get WARN_ON location shown at the caller */
1245#define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1246
1247void tcp_enter_cwr(struct sock *sk);
1248__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1249
1250/* The maximum number of MSS of available cwnd for which TSO defers
1251 * sending if not using sysctl_tcp_tso_win_divisor.
1252 */
1253static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1254{
1255	return 3;
1256}
1257
1258/* Returns end sequence number of the receiver's advertised window */
1259static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1260{
1261	return tp->snd_una + tp->snd_wnd;
1262}
1263
1264/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1265 * flexible approach. The RFC suggests cwnd should not be raised unless
1266 * it was fully used previously. And that's exactly what we do in
1267 * congestion avoidance mode. But in slow start we allow cwnd to grow
1268 * as long as the application has used half the cwnd.
1269 * Example :
1270 *    cwnd is 10 (IW10), but application sends 9 frames.
1271 *    We allow cwnd to reach 18 when all frames are ACKed.
1272 * This check is safe because it's as aggressive as slow start which already
1273 * risks 100% overshoot. The advantage is that we discourage application to
1274 * either send more filler packets or data to artificially blow up the cwnd
1275 * usage, and allow application-limited process to probe bw more aggressively.
1276 */
1277static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1278{
1279	const struct tcp_sock *tp = tcp_sk(sk);
1280
1281	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1282	if (tcp_in_slow_start(tp))
1283		return tp->snd_cwnd < 2 * tp->max_packets_out;
1284
1285	return tp->is_cwnd_limited;
1286}
1287
1288/* BBR congestion control needs pacing.
1289 * Same remark for SO_MAX_PACING_RATE.
1290 * sch_fq packet scheduler is efficiently handling pacing,
1291 * but is not always installed/used.
1292 * Return true if TCP stack should pace packets itself.
1293 */
1294static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1295{
1296	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1297}
1298
1299/* Estimates in how many jiffies next packet for this flow can be sent.
1300 * Scheduling a retransmit timer too early would be silly.
1301 */
1302static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1303{
1304	s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1305
1306	return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1307}
1308
1309static inline void tcp_reset_xmit_timer(struct sock *sk,
1310					const int what,
1311					unsigned long when,
1312					const unsigned long max_when)
1313{
1314	inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1315				  max_when);
1316}
1317
1318/* Something is really bad, we could not queue an additional packet,
1319 * because qdisc is full or receiver sent a 0 window, or we are paced.
1320 * We do not want to add fuel to the fire, or abort too early,
1321 * so make sure the timer we arm now is at least 200ms in the future,
1322 * regardless of current icsk_rto value (as it could be ~2ms)
1323 */
1324static inline unsigned long tcp_probe0_base(const struct sock *sk)
1325{
1326	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1327}
1328
1329/* Variant of inet_csk_rto_backoff() used for zero window probes */
1330static inline unsigned long tcp_probe0_when(const struct sock *sk,
1331					    unsigned long max_when)
1332{
1333	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1334
1335	return (unsigned long)min_t(u64, when, max_when);
1336}
1337
1338static inline void tcp_check_probe_timer(struct sock *sk)
1339{
1340	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1341		tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1342				     tcp_probe0_base(sk), TCP_RTO_MAX);
1343}
1344
1345static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1346{
1347	tp->snd_wl1 = seq;
1348}
1349
1350static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1351{
1352	tp->snd_wl1 = seq;
1353}
1354
1355/*
1356 * Calculate(/check) TCP checksum
1357 */
1358static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1359				   __be32 daddr, __wsum base)
1360{
1361	return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
 
 
 
 
 
1362}
1363
1364static inline bool tcp_checksum_complete(struct sk_buff *skb)
1365{
1366	return !skb_csum_unnecessary(skb) &&
1367		__skb_checksum_complete(skb);
1368}
1369
1370bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1371int tcp_filter(struct sock *sk, struct sk_buff *skb);
 
 
 
 
 
 
 
 
 
 
1372void tcp_set_state(struct sock *sk, int state);
 
1373void tcp_done(struct sock *sk);
 
1374int tcp_abort(struct sock *sk, int err);
1375
1376static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1377{
1378	rx_opt->dsack = 0;
1379	rx_opt->num_sacks = 0;
1380}
1381
 
1382void tcp_cwnd_restart(struct sock *sk, s32 delta);
1383
1384static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1385{
1386	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1387	struct tcp_sock *tp = tcp_sk(sk);
1388	s32 delta;
1389
1390	if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
1391	    ca_ops->cong_control)
1392		return;
1393	delta = tcp_jiffies32 - tp->lsndtime;
1394	if (delta > inet_csk(sk)->icsk_rto)
1395		tcp_cwnd_restart(sk, delta);
1396}
1397
1398/* Determine a window scaling and initial window to offer. */
1399void tcp_select_initial_window(const struct sock *sk, int __space,
1400			       __u32 mss, __u32 *rcv_wnd,
1401			       __u32 *window_clamp, int wscale_ok,
1402			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1403
1404static inline int tcp_win_from_space(const struct sock *sk, int space)
1405{
1406	int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
1407
1408	return tcp_adv_win_scale <= 0 ?
1409		(space>>(-tcp_adv_win_scale)) :
1410		space - (space>>tcp_adv_win_scale);
1411}
1412
1413/* Note: caller must be prepared to deal with negative returns */
1414static inline int tcp_space(const struct sock *sk)
1415{
1416	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1417				  READ_ONCE(sk->sk_backlog.len) -
1418				  atomic_read(&sk->sk_rmem_alloc));
1419}
1420
1421static inline int tcp_full_space(const struct sock *sk)
1422{
1423	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1424}
1425
1426/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1427 * If 87.5 % (7/8) of the space has been consumed, we want to override
1428 * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1429 * len/truesize ratio.
1430 */
1431static inline bool tcp_rmem_pressure(const struct sock *sk)
1432{
1433	int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1434	int threshold = rcvbuf - (rcvbuf >> 3);
1435
1436	return atomic_read(&sk->sk_rmem_alloc) > threshold;
1437}
1438
1439extern void tcp_openreq_init_rwin(struct request_sock *req,
1440				  const struct sock *sk_listener,
1441				  const struct dst_entry *dst);
1442
1443void tcp_enter_memory_pressure(struct sock *sk);
1444void tcp_leave_memory_pressure(struct sock *sk);
1445
1446static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1447{
1448	struct net *net = sock_net((struct sock *)tp);
1449
1450	return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
1451}
1452
1453static inline int keepalive_time_when(const struct tcp_sock *tp)
1454{
1455	struct net *net = sock_net((struct sock *)tp);
1456
1457	return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
1458}
1459
1460static inline int keepalive_probes(const struct tcp_sock *tp)
1461{
1462	struct net *net = sock_net((struct sock *)tp);
1463
1464	return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1465}
1466
1467static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1468{
1469	const struct inet_connection_sock *icsk = &tp->inet_conn;
1470
1471	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1472			  tcp_jiffies32 - tp->rcv_tstamp);
1473}
1474
1475static inline int tcp_fin_time(const struct sock *sk)
1476{
1477	int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1478	const int rto = inet_csk(sk)->icsk_rto;
1479
1480	if (fin_timeout < (rto << 2) - (rto >> 1))
1481		fin_timeout = (rto << 2) - (rto >> 1);
1482
1483	return fin_timeout;
1484}
1485
1486static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1487				  int paws_win)
1488{
1489	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1490		return true;
1491	if (unlikely(!time_before32(ktime_get_seconds(),
1492				    rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
1493		return true;
1494	/*
1495	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1496	 * then following tcp messages have valid values. Ignore 0 value,
1497	 * or else 'negative' tsval might forbid us to accept their packets.
1498	 */
1499	if (!rx_opt->ts_recent)
1500		return true;
1501	return false;
1502}
1503
1504static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1505				   int rst)
1506{
1507	if (tcp_paws_check(rx_opt, 0))
1508		return false;
1509
1510	/* RST segments are not recommended to carry timestamp,
1511	   and, if they do, it is recommended to ignore PAWS because
1512	   "their cleanup function should take precedence over timestamps."
1513	   Certainly, it is mistake. It is necessary to understand the reasons
1514	   of this constraint to relax it: if peer reboots, clock may go
1515	   out-of-sync and half-open connections will not be reset.
1516	   Actually, the problem would be not existing if all
1517	   the implementations followed draft about maintaining clock
1518	   via reboots. Linux-2.2 DOES NOT!
1519
1520	   However, we can relax time bounds for RST segments to MSL.
1521	 */
1522	if (rst && !time_before32(ktime_get_seconds(),
1523				  rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1524		return false;
1525	return true;
1526}
1527
1528bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1529			  int mib_idx, u32 *last_oow_ack_time);
1530
1531static inline void tcp_mib_init(struct net *net)
1532{
1533	/* See RFC 2012 */
1534	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1535	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1536	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1537	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1538}
1539
1540/* from STCP */
1541static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1542{
1543	tp->lost_skb_hint = NULL;
1544}
1545
1546static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1547{
1548	tcp_clear_retrans_hints_partial(tp);
1549	tp->retransmit_skb_hint = NULL;
1550}
1551
1552union tcp_md5_addr {
1553	struct in_addr  a4;
1554#if IS_ENABLED(CONFIG_IPV6)
1555	struct in6_addr	a6;
1556#endif
1557};
1558
1559/* - key database */
1560struct tcp_md5sig_key {
1561	struct hlist_node	node;
1562	u8			keylen;
1563	u8			family; /* AF_INET or AF_INET6 */
1564	u8			prefixlen;
1565	union tcp_md5_addr	addr;
1566	int			l3index; /* set if key added with L3 scope */
1567	u8			key[TCP_MD5SIG_MAXKEYLEN];
1568	struct rcu_head		rcu;
1569};
1570
1571/* - sock block */
1572struct tcp_md5sig_info {
1573	struct hlist_head	head;
1574	struct rcu_head		rcu;
1575};
1576
1577/* - pseudo header */
1578struct tcp4_pseudohdr {
1579	__be32		saddr;
1580	__be32		daddr;
1581	__u8		pad;
1582	__u8		protocol;
1583	__be16		len;
1584};
1585
1586struct tcp6_pseudohdr {
1587	struct in6_addr	saddr;
1588	struct in6_addr daddr;
1589	__be32		len;
1590	__be32		protocol;	/* including padding */
1591};
1592
1593union tcp_md5sum_block {
1594	struct tcp4_pseudohdr ip4;
1595#if IS_ENABLED(CONFIG_IPV6)
1596	struct tcp6_pseudohdr ip6;
1597#endif
1598};
1599
1600/* - pool: digest algorithm, hash description and scratch buffer */
1601struct tcp_md5sig_pool {
1602	struct ahash_request	*md5_req;
1603	void			*scratch;
1604};
1605
1606/* - functions */
1607int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1608			const struct sock *sk, const struct sk_buff *skb);
1609int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1610		   int family, u8 prefixlen, int l3index,
1611		   const u8 *newkey, u8 newkeylen, gfp_t gfp);
1612int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1613		   int family, u8 prefixlen, int l3index);
1614struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1615					 const struct sock *addr_sk);
1616
1617#ifdef CONFIG_TCP_MD5SIG
1618#include <linux/jump_label.h>
1619extern struct static_key_false tcp_md5_needed;
1620struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1621					   const union tcp_md5_addr *addr,
1622					   int family);
1623static inline struct tcp_md5sig_key *
1624tcp_md5_do_lookup(const struct sock *sk, int l3index,
1625		  const union tcp_md5_addr *addr, int family)
1626{
1627	if (!static_branch_unlikely(&tcp_md5_needed))
1628		return NULL;
1629	return __tcp_md5_do_lookup(sk, l3index, addr, family);
1630}
1631
1632#define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1633#else
1634static inline struct tcp_md5sig_key *
1635tcp_md5_do_lookup(const struct sock *sk, int l3index,
1636		  const union tcp_md5_addr *addr, int family)
1637{
1638	return NULL;
1639}
1640#define tcp_twsk_md5_key(twsk)	NULL
1641#endif
1642
1643bool tcp_alloc_md5sig_pool(void);
1644
1645struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1646static inline void tcp_put_md5sig_pool(void)
1647{
1648	local_bh_enable();
1649}
1650
1651int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1652			  unsigned int header_len);
1653int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1654		     const struct tcp_md5sig_key *key);
1655
1656/* From tcp_fastopen.c */
1657void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1658			    struct tcp_fastopen_cookie *cookie);
1659void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1660			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
1661			    u16 try_exp);
1662struct tcp_fastopen_request {
1663	/* Fast Open cookie. Size 0 means a cookie request */
1664	struct tcp_fastopen_cookie	cookie;
1665	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1666	size_t				size;
1667	int				copied;	/* queued in tcp_connect() */
1668	struct ubuf_info		*uarg;
1669};
1670void tcp_free_fastopen_req(struct tcp_sock *tp);
1671void tcp_fastopen_destroy_cipher(struct sock *sk);
1672void tcp_fastopen_ctx_destroy(struct net *net);
1673int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1674			      void *primary_key, void *backup_key);
1675int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1676			    u64 *key);
1677void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1678struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1679			      struct request_sock *req,
1680			      struct tcp_fastopen_cookie *foc,
1681			      const struct dst_entry *dst);
1682void tcp_fastopen_init_key_once(struct net *net);
1683bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1684			     struct tcp_fastopen_cookie *cookie);
1685bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1686#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1687#define TCP_FASTOPEN_KEY_MAX 2
1688#define TCP_FASTOPEN_KEY_BUF_LENGTH \
1689	(TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1690
1691/* Fastopen key context */
1692struct tcp_fastopen_context {
1693	siphash_key_t	key[TCP_FASTOPEN_KEY_MAX];
1694	int		num;
1695	struct rcu_head	rcu;
1696};
1697
1698extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
1699void tcp_fastopen_active_disable(struct sock *sk);
1700bool tcp_fastopen_active_should_disable(struct sock *sk);
1701void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1702void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1703
1704/* Caller needs to wrap with rcu_read_(un)lock() */
1705static inline
1706struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1707{
1708	struct tcp_fastopen_context *ctx;
1709
1710	ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1711	if (!ctx)
1712		ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1713	return ctx;
1714}
1715
1716static inline
1717bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1718			       const struct tcp_fastopen_cookie *orig)
1719{
1720	if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1721	    orig->len == foc->len &&
1722	    !memcmp(orig->val, foc->val, foc->len))
1723		return true;
1724	return false;
1725}
1726
1727static inline
1728int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1729{
1730	return ctx->num;
1731}
1732
1733/* Latencies incurred by various limits for a sender. They are
1734 * chronograph-like stats that are mutually exclusive.
1735 */
1736enum tcp_chrono {
1737	TCP_CHRONO_UNSPEC,
1738	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1739	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1740	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1741	__TCP_CHRONO_MAX,
1742};
1743
1744void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1745void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1746
1747/* This helper is needed, because skb->tcp_tsorted_anchor uses
1748 * the same memory storage than skb->destructor/_skb_refdst
1749 */
1750static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1751{
1752	skb->destructor = NULL;
1753	skb->_skb_refdst = 0UL;
1754}
1755
1756#define tcp_skb_tsorted_save(skb) {		\
1757	unsigned long _save = skb->_skb_refdst;	\
1758	skb->_skb_refdst = 0UL;
1759
1760#define tcp_skb_tsorted_restore(skb)		\
1761	skb->_skb_refdst = _save;		\
1762}
1763
1764void tcp_write_queue_purge(struct sock *sk);
1765
1766static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1767{
1768	return skb_rb_first(&sk->tcp_rtx_queue);
1769}
1770
1771static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1772{
1773	return skb_rb_last(&sk->tcp_rtx_queue);
1774}
1775
1776static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1777{
1778	return skb_peek(&sk->sk_write_queue);
1779}
1780
1781static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1782{
1783	return skb_peek_tail(&sk->sk_write_queue);
1784}
1785
1786#define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1787	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1788
1789static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1790{
1791	return skb_peek(&sk->sk_write_queue);
1792}
1793
1794static inline bool tcp_skb_is_last(const struct sock *sk,
1795				   const struct sk_buff *skb)
1796{
1797	return skb_queue_is_last(&sk->sk_write_queue, skb);
1798}
1799
1800/**
1801 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
1802 * @sk: socket
1803 *
1804 * Since the write queue can have a temporary empty skb in it,
1805 * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
1806 */
1807static inline bool tcp_write_queue_empty(const struct sock *sk)
1808{
1809	const struct tcp_sock *tp = tcp_sk(sk);
1810
1811	return tp->write_seq == tp->snd_nxt;
1812}
1813
1814static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1815{
1816	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1817}
1818
1819static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1820{
1821	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1822}
1823
1824static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
 
 
 
 
 
 
1825{
1826	__skb_queue_tail(&sk->sk_write_queue, skb);
 
 
 
 
 
1827
1828	/* Queue it, remembering where we must start sending. */
1829	if (sk->sk_write_queue.next == skb)
1830		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1831}
1832
1833/* Insert new before skb on the write queue of sk.  */
1834static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1835						  struct sk_buff *skb,
1836						  struct sock *sk)
1837{
1838	__skb_queue_before(&sk->sk_write_queue, skb, new);
1839}
1840
1841static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1842{
1843	tcp_skb_tsorted_anchor_cleanup(skb);
1844	__skb_unlink(skb, &sk->sk_write_queue);
1845}
1846
1847void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1848
1849static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1850{
1851	tcp_skb_tsorted_anchor_cleanup(skb);
1852	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1853}
1854
1855static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1856{
1857	list_del(&skb->tcp_tsorted_anchor);
1858	tcp_rtx_queue_unlink(skb, sk);
1859	sk_wmem_free_skb(sk, skb);
1860}
1861
1862static inline void tcp_push_pending_frames(struct sock *sk)
1863{
1864	if (tcp_send_head(sk)) {
1865		struct tcp_sock *tp = tcp_sk(sk);
1866
1867		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1868	}
1869}
1870
1871/* Start sequence of the skb just after the highest skb with SACKed
1872 * bit, valid only if sacked_out > 0 or when the caller has ensured
1873 * validity by itself.
1874 */
1875static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1876{
1877	if (!tp->sacked_out)
1878		return tp->snd_una;
1879
1880	if (tp->highest_sack == NULL)
1881		return tp->snd_nxt;
1882
1883	return TCP_SKB_CB(tp->highest_sack)->seq;
1884}
1885
1886static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1887{
1888	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
1889}
1890
1891static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1892{
1893	return tcp_sk(sk)->highest_sack;
1894}
1895
1896static inline void tcp_highest_sack_reset(struct sock *sk)
1897{
1898	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
1899}
1900
1901/* Called when old skb is about to be deleted and replaced by new skb */
1902static inline void tcp_highest_sack_replace(struct sock *sk,
1903					    struct sk_buff *old,
1904					    struct sk_buff *new)
1905{
1906	if (old == tcp_highest_sack(sk))
1907		tcp_sk(sk)->highest_sack = new;
1908}
1909
1910/* This helper checks if socket has IP_TRANSPARENT set */
1911static inline bool inet_sk_transparent(const struct sock *sk)
1912{
1913	switch (sk->sk_state) {
1914	case TCP_TIME_WAIT:
1915		return inet_twsk(sk)->tw_transparent;
1916	case TCP_NEW_SYN_RECV:
1917		return inet_rsk(inet_reqsk(sk))->no_srccheck;
1918	}
1919	return inet_sk(sk)->transparent;
1920}
1921
1922/* Determines whether this is a thin stream (which may suffer from
1923 * increased latency). Used to trigger latency-reducing mechanisms.
1924 */
1925static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1926{
1927	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1928}
1929
1930/* /proc */
1931enum tcp_seq_states {
1932	TCP_SEQ_STATE_LISTENING,
1933	TCP_SEQ_STATE_ESTABLISHED,
1934};
1935
1936void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
1937void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1938void tcp_seq_stop(struct seq_file *seq, void *v);
1939
1940struct tcp_seq_afinfo {
 
1941	sa_family_t			family;
 
 
1942};
1943
1944struct tcp_iter_state {
1945	struct seq_net_private	p;
 
1946	enum tcp_seq_states	state;
1947	struct sock		*syn_wait_sk;
1948	struct tcp_seq_afinfo	*bpf_seq_afinfo;
1949	int			bucket, offset, sbucket, num;
1950	loff_t			last_pos;
1951};
1952
 
 
 
1953extern struct request_sock_ops tcp_request_sock_ops;
1954extern struct request_sock_ops tcp6_request_sock_ops;
1955
1956void tcp_v4_destroy_sock(struct sock *sk);
1957
1958struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1959				netdev_features_t features);
1960struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
1961INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
1962INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
1963INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
1964INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
1965int tcp_gro_complete(struct sk_buff *skb);
1966
1967void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1968
1969static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1970{
1971	struct net *net = sock_net((struct sock *)tp);
1972	return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1973}
1974
1975/* @wake is one when sk_stream_write_space() calls us.
1976 * This sends EPOLLOUT only if notsent_bytes is half the limit.
1977 * This mimics the strategy used in sock_def_write_space().
1978 */
1979static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
1980{
1981	const struct tcp_sock *tp = tcp_sk(sk);
1982	u32 notsent_bytes = READ_ONCE(tp->write_seq) -
1983			    READ_ONCE(tp->snd_nxt);
1984
1985	return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
1986}
1987
1988#ifdef CONFIG_PROC_FS
1989int tcp4_proc_init(void);
1990void tcp4_proc_exit(void);
1991#endif
1992
1993int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1994int tcp_conn_request(struct request_sock_ops *rsk_ops,
1995		     const struct tcp_request_sock_ops *af_ops,
1996		     struct sock *sk, struct sk_buff *skb);
1997
1998/* TCP af-specific functions */
1999struct tcp_sock_af_ops {
2000#ifdef CONFIG_TCP_MD5SIG
2001	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
2002						const struct sock *addr_sk);
2003	int		(*calc_md5_hash)(char *location,
2004					 const struct tcp_md5sig_key *md5,
2005					 const struct sock *sk,
2006					 const struct sk_buff *skb);
2007	int		(*md5_parse)(struct sock *sk,
2008				     int optname,
2009				     sockptr_t optval,
2010				     int optlen);
2011#endif
2012};
2013
2014struct tcp_request_sock_ops {
2015	u16 mss_clamp;
2016#ifdef CONFIG_TCP_MD5SIG
2017	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2018						 const struct sock *addr_sk);
2019	int		(*calc_md5_hash) (char *location,
2020					  const struct tcp_md5sig_key *md5,
2021					  const struct sock *sk,
2022					  const struct sk_buff *skb);
2023#endif
2024	void (*init_req)(struct request_sock *req,
2025			 const struct sock *sk_listener,
2026			 struct sk_buff *skb);
2027#ifdef CONFIG_SYN_COOKIES
2028	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
2029				 __u16 *mss);
2030#endif
2031	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
2032				       const struct request_sock *req);
2033	u32 (*init_seq)(const struct sk_buff *skb);
2034	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2035	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2036			   struct flowi *fl, struct request_sock *req,
2037			   struct tcp_fastopen_cookie *foc,
2038			   enum tcp_synack_type synack_type);
2039};
2040
2041extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2042#if IS_ENABLED(CONFIG_IPV6)
2043extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2044#endif
2045
2046#ifdef CONFIG_SYN_COOKIES
2047static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2048					 const struct sock *sk, struct sk_buff *skb,
2049					 __u16 *mss)
2050{
2051	tcp_synq_overflow(sk);
2052	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2053	return ops->cookie_init_seq(skb, mss);
2054}
2055#else
2056static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2057					 const struct sock *sk, struct sk_buff *skb,
2058					 __u16 *mss)
2059{
2060	return 0;
2061}
2062#endif
2063
2064int tcpv4_offload_init(void);
2065
2066void tcp_v4_init(void);
2067void tcp_init(void);
2068
2069/* tcp_recovery.c */
2070void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2071void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2072extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2073				u32 reo_wnd);
2074extern void tcp_rack_mark_lost(struct sock *sk);
2075extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2076			     u64 xmit_time);
2077extern void tcp_rack_reo_timeout(struct sock *sk);
2078extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2079
2080/* At how many usecs into the future should the RTO fire? */
2081static inline s64 tcp_rto_delta_us(const struct sock *sk)
2082{
2083	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2084	u32 rto = inet_csk(sk)->icsk_rto;
2085	u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2086
2087	return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2088}
2089
2090/*
2091 * Save and compile IPv4 options, return a pointer to it
2092 */
2093static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2094							 struct sk_buff *skb)
2095{
2096	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2097	struct ip_options_rcu *dopt = NULL;
2098
2099	if (opt->optlen) {
2100		int opt_size = sizeof(*dopt) + opt->optlen;
2101
2102		dopt = kmalloc(opt_size, GFP_ATOMIC);
2103		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2104			kfree(dopt);
2105			dopt = NULL;
2106		}
2107	}
2108	return dopt;
2109}
2110
2111/* locally generated TCP pure ACKs have skb->truesize == 2
2112 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2113 * This is much faster than dissecting the packet to find out.
2114 * (Think of GRE encapsulations, IPv4, IPv6, ...)
2115 */
2116static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2117{
2118	return skb->truesize == 2;
2119}
2120
2121static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2122{
2123	skb->truesize = 2;
2124}
2125
2126static inline int tcp_inq(struct sock *sk)
2127{
2128	struct tcp_sock *tp = tcp_sk(sk);
2129	int answ;
2130
2131	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2132		answ = 0;
2133	} else if (sock_flag(sk, SOCK_URGINLINE) ||
2134		   !tp->urg_data ||
2135		   before(tp->urg_seq, tp->copied_seq) ||
2136		   !before(tp->urg_seq, tp->rcv_nxt)) {
2137
2138		answ = tp->rcv_nxt - tp->copied_seq;
2139
2140		/* Subtract 1, if FIN was received */
2141		if (answ && sock_flag(sk, SOCK_DONE))
2142			answ--;
2143	} else {
2144		answ = tp->urg_seq - tp->copied_seq;
2145	}
2146
2147	return answ;
2148}
2149
2150int tcp_peek_len(struct socket *sock);
2151
2152static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2153{
2154	u16 segs_in;
2155
2156	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2157	tp->segs_in += segs_in;
2158	if (skb->len > tcp_hdrlen(skb))
2159		tp->data_segs_in += segs_in;
2160}
2161
2162/*
2163 * TCP listen path runs lockless.
2164 * We forced "struct sock" to be const qualified to make sure
2165 * we don't modify one of its field by mistake.
2166 * Here, we increment sk_drops which is an atomic_t, so we can safely
2167 * make sock writable again.
2168 */
2169static inline void tcp_listendrop(const struct sock *sk)
2170{
2171	atomic_inc(&((struct sock *)sk)->sk_drops);
2172	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2173}
2174
2175enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2176
2177/*
2178 * Interface for adding Upper Level Protocols over TCP
2179 */
2180
2181#define TCP_ULP_NAME_MAX	16
2182#define TCP_ULP_MAX		128
2183#define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2184
 
 
 
 
 
2185struct tcp_ulp_ops {
2186	struct list_head	list;
2187
2188	/* initialize ulp */
2189	int (*init)(struct sock *sk);
2190	/* update ulp */
2191	void (*update)(struct sock *sk, struct proto *p,
2192		       void (*write_space)(struct sock *sk));
2193	/* cleanup ulp */
2194	void (*release)(struct sock *sk);
2195	/* diagnostic */
2196	int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2197	size_t (*get_info_size)(const struct sock *sk);
2198	/* clone ulp */
2199	void (*clone)(const struct request_sock *req, struct sock *newsk,
2200		      const gfp_t priority);
2201
 
2202	char		name[TCP_ULP_NAME_MAX];
 
2203	struct module	*owner;
2204};
2205int tcp_register_ulp(struct tcp_ulp_ops *type);
2206void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2207int tcp_set_ulp(struct sock *sk, const char *name);
 
2208void tcp_get_available_ulp(char *buf, size_t len);
2209void tcp_cleanup_ulp(struct sock *sk);
2210void tcp_update_ulp(struct sock *sk, struct proto *p,
2211		    void (*write_space)(struct sock *sk));
2212
2213#define MODULE_ALIAS_TCP_ULP(name)				\
2214	__MODULE_INFO(alias, alias_userspace, name);		\
2215	__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2216
2217struct sk_msg;
2218struct sk_psock;
2219
2220#ifdef CONFIG_BPF_STREAM_PARSER
2221struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
2222void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2223#else
2224static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2225{
2226}
2227#endif /* CONFIG_BPF_STREAM_PARSER */
2228
2229#ifdef CONFIG_NET_SOCK_MSG
2230int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
2231			  int flags);
2232int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
2233		      struct msghdr *msg, int len, int flags);
2234#endif /* CONFIG_NET_SOCK_MSG */
2235
2236/* Call BPF_SOCK_OPS program that returns an int. If the return value
2237 * is < 0, then the BPF op failed (for example if the loaded BPF
2238 * program does not support the chosen operation or there is no BPF
2239 * program loaded).
2240 */
2241#ifdef CONFIG_BPF
2242static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2243{
2244	struct bpf_sock_ops_kern sock_ops;
2245	int ret;
2246
2247	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2248	if (sk_fullsock(sk)) {
2249		sock_ops.is_fullsock = 1;
2250		sock_owned_by_me(sk);
2251	}
2252
2253	sock_ops.sk = sk;
2254	sock_ops.op = op;
2255	if (nargs > 0)
2256		memcpy(sock_ops.args, args, nargs * sizeof(*args));
2257
2258	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2259	if (ret == 0)
2260		ret = sock_ops.reply;
2261	else
2262		ret = -1;
2263	return ret;
2264}
2265
2266static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2267{
2268	u32 args[2] = {arg1, arg2};
2269
2270	return tcp_call_bpf(sk, op, 2, args);
2271}
2272
2273static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2274				    u32 arg3)
2275{
2276	u32 args[3] = {arg1, arg2, arg3};
2277
2278	return tcp_call_bpf(sk, op, 3, args);
2279}
2280
2281#else
2282static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2283{
2284	return -EPERM;
2285}
2286
2287static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2288{
2289	return -EPERM;
2290}
2291
2292static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2293				    u32 arg3)
2294{
2295	return -EPERM;
2296}
2297
2298#endif
2299
2300static inline u32 tcp_timeout_init(struct sock *sk)
2301{
2302	int timeout;
2303
2304	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2305
2306	if (timeout <= 0)
2307		timeout = TCP_TIMEOUT_INIT;
2308	return timeout;
2309}
2310
2311static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2312{
2313	int rwnd;
2314
2315	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2316
2317	if (rwnd < 0)
2318		rwnd = 0;
2319	return rwnd;
2320}
2321
2322static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2323{
2324	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2325}
2326
2327static inline void tcp_bpf_rtt(struct sock *sk)
2328{
2329	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2330		tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
2331}
2332
2333#if IS_ENABLED(CONFIG_SMC)
2334extern struct static_key_false tcp_have_smc;
2335#endif
2336
2337#if IS_ENABLED(CONFIG_TLS_DEVICE)
2338void clean_acked_data_enable(struct inet_connection_sock *icsk,
2339			     void (*cad)(struct sock *sk, u32 ack_seq));
2340void clean_acked_data_disable(struct inet_connection_sock *icsk);
2341void clean_acked_data_flush(void);
2342#endif
2343
2344DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2345static inline void tcp_add_tx_delay(struct sk_buff *skb,
2346				    const struct tcp_sock *tp)
2347{
2348	if (static_branch_unlikely(&tcp_tx_delay_enabled))
2349		skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2350}
2351
2352/* Compute Earliest Departure Time for some control packets
2353 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2354 */
2355static inline u64 tcp_transmit_time(const struct sock *sk)
2356{
2357	if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2358		u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2359			tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2360
2361		return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2362	}
2363	return 0;
2364}
2365
2366#endif	/* _TCP_H */
v4.17
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Definitions for the TCP module.
   7 *
   8 * Version:	@(#)tcp.h	1.0.5	05/23/93
   9 *
  10 * Authors:	Ross Biro
  11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *
  13 *		This program is free software; you can redistribute it and/or
  14 *		modify it under the terms of the GNU General Public License
  15 *		as published by the Free Software Foundation; either version
  16 *		2 of the License, or (at your option) any later version.
  17 */
  18#ifndef _TCP_H
  19#define _TCP_H
  20
  21#define FASTRETRANS_DEBUG 1
  22
  23#include <linux/list.h>
  24#include <linux/tcp.h>
  25#include <linux/bug.h>
  26#include <linux/slab.h>
  27#include <linux/cache.h>
  28#include <linux/percpu.h>
  29#include <linux/skbuff.h>
  30#include <linux/cryptohash.h>
  31#include <linux/kref.h>
  32#include <linux/ktime.h>
 
  33
  34#include <net/inet_connection_sock.h>
  35#include <net/inet_timewait_sock.h>
  36#include <net/inet_hashtables.h>
  37#include <net/checksum.h>
  38#include <net/request_sock.h>
 
  39#include <net/sock.h>
  40#include <net/snmp.h>
  41#include <net/ip.h>
  42#include <net/tcp_states.h>
  43#include <net/inet_ecn.h>
  44#include <net/dst.h>
 
  45
  46#include <linux/seq_file.h>
  47#include <linux/memcontrol.h>
  48#include <linux/bpf-cgroup.h>
 
  49
  50extern struct inet_hashinfo tcp_hashinfo;
  51
  52extern struct percpu_counter tcp_orphan_count;
  53void tcp_time_wait(struct sock *sk, int state, int timeo);
  54
  55#define MAX_TCP_HEADER	(128 + MAX_HEADER)
  56#define MAX_TCP_OPTION_SPACE 40
 
 
  57
  58/*
  59 * Never offer a window over 32767 without using window scaling. Some
  60 * poor stacks do signed 16bit maths!
  61 */
  62#define MAX_TCP_WINDOW		32767U
  63
  64/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
  65#define TCP_MIN_MSS		88U
  66
  67/* The least MTU to use for probing */
  68#define TCP_BASE_MSS		1024
  69
  70/* probing interval, default to 10 minutes as per RFC4821 */
  71#define TCP_PROBE_INTERVAL	600
  72
  73/* Specify interval when tcp mtu probing will stop */
  74#define TCP_PROBE_THRESHOLD	8
  75
  76/* After receiving this amount of duplicate ACKs fast retransmit starts. */
  77#define TCP_FASTRETRANS_THRESH 3
  78
  79/* Maximal number of ACKs sent quickly to accelerate slow-start. */
  80#define TCP_MAX_QUICKACKS	16U
  81
  82/* Maximal number of window scale according to RFC1323 */
  83#define TCP_MAX_WSCALE		14U
  84
  85/* urg_data states */
  86#define TCP_URG_VALID	0x0100
  87#define TCP_URG_NOTYET	0x0200
  88#define TCP_URG_READ	0x0400
  89
  90#define TCP_RETR1	3	/*
  91				 * This is how many retries it does before it
  92				 * tries to figure out if the gateway is
  93				 * down. Minimal RFC value is 3; it corresponds
  94				 * to ~3sec-8min depending on RTO.
  95				 */
  96
  97#define TCP_RETR2	15	/*
  98				 * This should take at least
  99				 * 90 minutes to time out.
 100				 * RFC1122 says that the limit is 100 sec.
 101				 * 15 is ~13-30min depending on RTO.
 102				 */
 103
 104#define TCP_SYN_RETRIES	 6	/* This is how many retries are done
 105				 * when active opening a connection.
 106				 * RFC1122 says the minimum retry MUST
 107				 * be at least 180secs.  Nevertheless
 108				 * this value is corresponding to
 109				 * 63secs of retransmission with the
 110				 * current initial RTO.
 111				 */
 112
 113#define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
 114				 * when passive opening a connection.
 115				 * This is corresponding to 31secs of
 116				 * retransmission with the current
 117				 * initial RTO.
 118				 */
 119
 120#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
 121				  * state, about 60 seconds	*/
 122#define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
 123                                 /* BSD style FIN_WAIT2 deadlock breaker.
 124				  * It used to be 3min, new value is 60sec,
 125				  * to combine FIN-WAIT-2 timeout with
 126				  * TIME-WAIT timer.
 127				  */
 
 128
 129#define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
 130#if HZ >= 100
 131#define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
 132#define TCP_ATO_MIN	((unsigned)(HZ/25))
 133#else
 134#define TCP_DELACK_MIN	4U
 135#define TCP_ATO_MIN	4U
 136#endif
 137#define TCP_RTO_MAX	((unsigned)(120*HZ))
 138#define TCP_RTO_MIN	((unsigned)(HZ/5))
 139#define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
 140#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
 141#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
 142						 * used as a fallback RTO for the
 143						 * initial data transmission if no
 144						 * valid RTT sample has been acquired,
 145						 * most likely due to retrans in 3WHS.
 146						 */
 147
 148#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
 149					                 * for local resources.
 150					                 */
 151#define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
 152#define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
 153#define TCP_KEEPALIVE_INTVL	(75*HZ)
 154
 155#define MAX_TCP_KEEPIDLE	32767
 156#define MAX_TCP_KEEPINTVL	32767
 157#define MAX_TCP_KEEPCNT		127
 158#define MAX_TCP_SYNCNT		127
 159
 160#define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
 161
 162#define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
 163#define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
 164					 * after this time. It should be equal
 165					 * (or greater than) TCP_TIMEWAIT_LEN
 166					 * to provide reliability equal to one
 167					 * provided by timewait state.
 168					 */
 169#define TCP_PAWS_WINDOW	1		/* Replay window for per-host
 170					 * timestamps. It must be less than
 171					 * minimal timewait lifetime.
 172					 */
 173/*
 174 *	TCP option
 175 */
 176
 177#define TCPOPT_NOP		1	/* Padding */
 178#define TCPOPT_EOL		0	/* End of options */
 179#define TCPOPT_MSS		2	/* Segment size negotiating */
 180#define TCPOPT_WINDOW		3	/* Window scaling */
 181#define TCPOPT_SACK_PERM        4       /* SACK Permitted */
 182#define TCPOPT_SACK             5       /* SACK Block */
 183#define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
 184#define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
 
 185#define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
 186#define TCPOPT_EXP		254	/* Experimental */
 187/* Magic number to be after the option value for sharing TCP
 188 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
 189 */
 190#define TCPOPT_FASTOPEN_MAGIC	0xF989
 191#define TCPOPT_SMC_MAGIC	0xE2D4C3D9
 192
 193/*
 194 *     TCP option lengths
 195 */
 196
 197#define TCPOLEN_MSS            4
 198#define TCPOLEN_WINDOW         3
 199#define TCPOLEN_SACK_PERM      2
 200#define TCPOLEN_TIMESTAMP      10
 201#define TCPOLEN_MD5SIG         18
 202#define TCPOLEN_FASTOPEN_BASE  2
 203#define TCPOLEN_EXP_FASTOPEN_BASE  4
 204#define TCPOLEN_EXP_SMC_BASE   6
 205
 206/* But this is what stacks really send out. */
 207#define TCPOLEN_TSTAMP_ALIGNED		12
 208#define TCPOLEN_WSCALE_ALIGNED		4
 209#define TCPOLEN_SACKPERM_ALIGNED	4
 210#define TCPOLEN_SACK_BASE		2
 211#define TCPOLEN_SACK_BASE_ALIGNED	4
 212#define TCPOLEN_SACK_PERBLOCK		8
 213#define TCPOLEN_MD5SIG_ALIGNED		20
 214#define TCPOLEN_MSS_ALIGNED		4
 215#define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
 216
 217/* Flags in tp->nonagle */
 218#define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
 219#define TCP_NAGLE_CORK		2	/* Socket is corked	    */
 220#define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
 221
 222/* TCP thin-stream limits */
 223#define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 224
 225/* TCP initial congestion window as per rfc6928 */
 226#define TCP_INIT_CWND		10
 227
 228/* Bit Flags for sysctl_tcp_fastopen */
 229#define	TFO_CLIENT_ENABLE	1
 230#define	TFO_SERVER_ENABLE	2
 231#define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
 232
 233/* Accept SYN data w/o any cookie option */
 234#define	TFO_SERVER_COOKIE_NOT_REQD	0x200
 235
 236/* Force enable TFO on all listeners, i.e., not requiring the
 237 * TCP_FASTOPEN socket option.
 238 */
 239#define	TFO_SERVER_WO_SOCKOPT1	0x400
 240
 241
 242/* sysctl variables for tcp */
 243extern int sysctl_tcp_max_orphans;
 244extern long sysctl_tcp_mem[3];
 245
 246#define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
 247#define TCP_RACK_STATIC_REO_WND  0x2 /* Use static RACK reo wnd */
 
 248
 249extern atomic_long_t tcp_memory_allocated;
 250extern struct percpu_counter tcp_sockets_allocated;
 251extern unsigned long tcp_memory_pressure;
 252
 253/* optimized version of sk_under_memory_pressure() for TCP sockets */
 254static inline bool tcp_under_memory_pressure(const struct sock *sk)
 255{
 256	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
 257	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
 258		return true;
 259
 260	return tcp_memory_pressure;
 261}
 262/*
 263 * The next routines deal with comparing 32 bit unsigned ints
 264 * and worry about wraparound (automatic with unsigned arithmetic).
 265 */
 266
 267static inline bool before(__u32 seq1, __u32 seq2)
 268{
 269        return (__s32)(seq1-seq2) < 0;
 270}
 271#define after(seq2, seq1) 	before(seq1, seq2)
 272
 273/* is s2<=s1<=s3 ? */
 274static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
 275{
 276	return seq3 - seq2 >= seq1 - seq2;
 277}
 278
 279static inline bool tcp_out_of_memory(struct sock *sk)
 280{
 281	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
 282	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
 283		return true;
 284	return false;
 285}
 286
 287void sk_forced_mem_schedule(struct sock *sk, int size);
 288
 289static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 290{
 291	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
 292	int orphans = percpu_counter_read_positive(ocp);
 293
 294	if (orphans << shift > sysctl_tcp_max_orphans) {
 295		orphans = percpu_counter_sum_positive(ocp);
 296		if (orphans << shift > sysctl_tcp_max_orphans)
 297			return true;
 298	}
 299	return false;
 300}
 301
 302bool tcp_check_oom(struct sock *sk, int shift);
 303
 304
 305extern struct proto tcp_prot;
 306
 307#define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 308#define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 309#define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
 310#define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 311
 312void tcp_tasklet_init(void);
 313
 314void tcp_v4_err(struct sk_buff *skb, u32);
 315
 316void tcp_shutdown(struct sock *sk, int how);
 317
 318int tcp_v4_early_demux(struct sk_buff *skb);
 319int tcp_v4_rcv(struct sk_buff *skb);
 320
 321int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
 322int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 323int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
 324int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
 325		 int flags);
 326int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
 327			size_t size, int flags);
 328ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
 329		 size_t size, int flags);
 
 
 
 330void tcp_release_cb(struct sock *sk);
 331void tcp_wfree(struct sk_buff *skb);
 332void tcp_write_timer_handler(struct sock *sk);
 333void tcp_delack_timer_handler(struct sock *sk);
 334int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 335int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
 336void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 337			 const struct tcphdr *th);
 338void tcp_rcv_space_adjust(struct sock *sk);
 339int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
 340void tcp_twsk_destructor(struct sock *sk);
 341ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
 342			struct pipe_inode_info *pipe, size_t len,
 343			unsigned int flags);
 344
 
 345static inline void tcp_dec_quickack_mode(struct sock *sk,
 346					 const unsigned int pkts)
 347{
 348	struct inet_connection_sock *icsk = inet_csk(sk);
 349
 350	if (icsk->icsk_ack.quick) {
 351		if (pkts >= icsk->icsk_ack.quick) {
 352			icsk->icsk_ack.quick = 0;
 353			/* Leaving quickack mode we deflate ATO. */
 354			icsk->icsk_ack.ato   = TCP_ATO_MIN;
 355		} else
 356			icsk->icsk_ack.quick -= pkts;
 357	}
 358}
 359
 360#define	TCP_ECN_OK		1
 361#define	TCP_ECN_QUEUE_CWR	2
 362#define	TCP_ECN_DEMAND_CWR	4
 363#define	TCP_ECN_SEEN		8
 364
 365enum tcp_tw_status {
 366	TCP_TW_SUCCESS = 0,
 367	TCP_TW_RST = 1,
 368	TCP_TW_ACK = 2,
 369	TCP_TW_SYN = 3
 370};
 371
 372
 373enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
 374					      struct sk_buff *skb,
 375					      const struct tcphdr *th);
 376struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 377			   struct request_sock *req, bool fastopen,
 378			   bool *lost_race);
 379int tcp_child_process(struct sock *parent, struct sock *child,
 380		      struct sk_buff *skb);
 381void tcp_enter_loss(struct sock *sk);
 382void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
 383void tcp_clear_retrans(struct tcp_sock *tp);
 384void tcp_update_metrics(struct sock *sk);
 385void tcp_init_metrics(struct sock *sk);
 386void tcp_metrics_init(void);
 387bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
 388void tcp_close(struct sock *sk, long timeout);
 389void tcp_init_sock(struct sock *sk);
 390void tcp_init_transfer(struct sock *sk, int bpf_op);
 391__poll_t tcp_poll(struct file *file, struct socket *sock,
 392		      struct poll_table_struct *wait);
 393int tcp_getsockopt(struct sock *sk, int level, int optname,
 394		   char __user *optval, int __user *optlen);
 395int tcp_setsockopt(struct sock *sk, int level, int optname,
 396		   char __user *optval, unsigned int optlen);
 397int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 398			  char __user *optval, int __user *optlen);
 399int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
 400			  char __user *optval, unsigned int optlen);
 401void tcp_set_keepalive(struct sock *sk, int val);
 402void tcp_syn_ack_timeout(const struct request_sock *req);
 403int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
 404		int flags, int *addr_len);
 
 
 
 
 
 
 405void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
 406		       struct tcp_options_received *opt_rx,
 407		       int estab, struct tcp_fastopen_cookie *foc);
 408const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 409
 410/*
 
 
 
 
 
 
 
 
 
 
 411 *	TCP v4 functions exported for the inet6 API
 412 */
 413
 414void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 415void tcp_v4_mtu_reduced(struct sock *sk);
 416void tcp_req_err(struct sock *sk, u32 seq, bool abort);
 
 417int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 418struct sock *tcp_create_openreq_child(const struct sock *sk,
 419				      struct request_sock *req,
 420				      struct sk_buff *skb);
 421void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
 422struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 423				  struct request_sock *req,
 424				  struct dst_entry *dst,
 425				  struct request_sock *req_unhash,
 426				  bool *own_req);
 427int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 428int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 429int tcp_connect(struct sock *sk);
 430enum tcp_synack_type {
 431	TCP_SYNACK_NORMAL,
 432	TCP_SYNACK_FASTOPEN,
 433	TCP_SYNACK_COOKIE,
 434};
 435struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 436				struct request_sock *req,
 437				struct tcp_fastopen_cookie *foc,
 438				enum tcp_synack_type synack_type);
 439int tcp_disconnect(struct sock *sk, int flags);
 440
 441void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
 442int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 443void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 444
 445/* From syncookies.c */
 446struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
 447				 struct request_sock *req,
 448				 struct dst_entry *dst, u32 tsoff);
 449int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 450		      u32 cookie);
 451struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
 
 
 452#ifdef CONFIG_SYN_COOKIES
 453
 454/* Syncookies use a monotonic timer which increments every 60 seconds.
 455 * This counter is used both as a hash input and partially encoded into
 456 * the cookie value.  A cookie is only validated further if the delta
 457 * between the current counter value and the encoded one is less than this,
 458 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
 459 * the counter advances immediately after a cookie is generated).
 460 */
 461#define MAX_SYNCOOKIE_AGE	2
 462#define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
 463#define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
 464
 465/* syncookies: remember time of last synqueue overflow
 466 * But do not dirty this field too often (once per second is enough)
 467 * It is racy as we do not hold a lock, but race is very minor.
 468 */
 469static inline void tcp_synq_overflow(const struct sock *sk)
 470{
 471	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
 472	unsigned long now = jiffies;
 
 
 
 473
 474	if (time_after(now, last_overflow + HZ))
 475		tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
 
 
 
 
 
 
 
 
 
 
 
 476}
 477
 478/* syncookies: no recent synqueue overflow on this listening socket? */
 479static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
 480{
 481	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
 
 
 
 
 482
 483	return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 484}
 485
 486static inline u32 tcp_cookie_time(void)
 487{
 488	u64 val = get_jiffies_64();
 489
 490	do_div(val, TCP_SYNCOOKIE_PERIOD);
 491	return val;
 492}
 493
 494u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 495			      u16 *mssp);
 496__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
 497u64 cookie_init_timestamp(struct request_sock *req);
 498bool cookie_timestamp_decode(const struct net *net,
 499			     struct tcp_options_received *opt);
 500bool cookie_ecn_ok(const struct tcp_options_received *opt,
 501		   const struct net *net, const struct dst_entry *dst);
 502
 503/* From net/ipv6/syncookies.c */
 504int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
 505		      u32 cookie);
 506struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
 507
 508u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
 509			      const struct tcphdr *th, u16 *mssp);
 510__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
 511#endif
 512/* tcp_output.c */
 513
 514void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
 515			       int nonagle);
 516int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 517int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 518void tcp_retransmit_timer(struct sock *sk);
 519void tcp_xmit_retransmit_queue(struct sock *);
 520void tcp_simple_retransmit(struct sock *);
 521void tcp_enter_recovery(struct sock *sk, bool ece_ack);
 522int tcp_trim_head(struct sock *, struct sk_buff *, u32);
 523enum tcp_queue {
 524	TCP_FRAG_IN_WRITE_QUEUE,
 525	TCP_FRAG_IN_RTX_QUEUE,
 526};
 527int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
 528		 struct sk_buff *skb, u32 len,
 529		 unsigned int mss_now, gfp_t gfp);
 530
 531void tcp_send_probe0(struct sock *);
 532void tcp_send_partial(struct sock *);
 533int tcp_write_wakeup(struct sock *, int mib);
 534void tcp_send_fin(struct sock *sk);
 535void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 536int tcp_send_synack(struct sock *);
 537void tcp_push_one(struct sock *, unsigned int mss_now);
 
 538void tcp_send_ack(struct sock *sk);
 539void tcp_send_delayed_ack(struct sock *sk);
 540void tcp_send_loss_probe(struct sock *sk);
 541bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
 542void tcp_skb_collapse_tstamp(struct sk_buff *skb,
 543			     const struct sk_buff *next_skb);
 544
 545/* tcp_input.c */
 546void tcp_rearm_rto(struct sock *sk);
 547void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
 548void tcp_reset(struct sock *sk);
 549void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
 550void tcp_fin(struct sock *sk);
 551
 552/* tcp_timer.c */
 553void tcp_init_xmit_timers(struct sock *);
 554static inline void tcp_clear_xmit_timers(struct sock *sk)
 555{
 556	hrtimer_cancel(&tcp_sk(sk)->pacing_timer);
 
 
 
 
 
 557	inet_csk_clear_xmit_timers(sk);
 558}
 559
 560unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 561unsigned int tcp_current_mss(struct sock *sk);
 562
 563/* Bound MSS / TSO packet size with the half of the window */
 564static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 565{
 566	int cutoff;
 567
 568	/* When peer uses tiny windows, there is no use in packetizing
 569	 * to sub-MSS pieces for the sake of SWS or making sure there
 570	 * are enough packets in the pipe for fast recovery.
 571	 *
 572	 * On the other hand, for extremely large MSS devices, handling
 573	 * smaller than MSS windows in this way does make sense.
 574	 */
 575	if (tp->max_window > TCP_MSS_DEFAULT)
 576		cutoff = (tp->max_window >> 1);
 577	else
 578		cutoff = tp->max_window;
 579
 580	if (cutoff && pktsize > cutoff)
 581		return max_t(int, cutoff, 68U - tp->tcp_header_len);
 582	else
 583		return pktsize;
 584}
 585
 586/* tcp.c */
 587void tcp_get_info(struct sock *, struct tcp_info *);
 588
 589/* Read 'sendfile()'-style from a TCP socket */
 590int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
 591		  sk_read_actor_t recv_actor);
 592
 593void tcp_initialize_rcv_mss(struct sock *sk);
 594
 595int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 596int tcp_mss_to_mtu(struct sock *sk, int mss);
 597void tcp_mtup_init(struct sock *sk);
 598void tcp_init_buffer_space(struct sock *sk);
 599
 600static inline void tcp_bound_rto(const struct sock *sk)
 601{
 602	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
 603		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
 604}
 605
 606static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
 607{
 608	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
 609}
 610
 611static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 612{
 613	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
 614			       ntohl(TCP_FLAG_ACK) |
 615			       snd_wnd);
 616}
 617
 618static inline void tcp_fast_path_on(struct tcp_sock *tp)
 619{
 620	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
 621}
 622
 623static inline void tcp_fast_path_check(struct sock *sk)
 624{
 625	struct tcp_sock *tp = tcp_sk(sk);
 626
 627	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
 628	    tp->rcv_wnd &&
 629	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
 630	    !tp->urg_data)
 631		tcp_fast_path_on(tp);
 632}
 633
 634/* Compute the actual rto_min value */
 635static inline u32 tcp_rto_min(struct sock *sk)
 636{
 637	const struct dst_entry *dst = __sk_dst_get(sk);
 638	u32 rto_min = TCP_RTO_MIN;
 639
 640	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
 641		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
 642	return rto_min;
 643}
 644
 645static inline u32 tcp_rto_min_us(struct sock *sk)
 646{
 647	return jiffies_to_usecs(tcp_rto_min(sk));
 648}
 649
 650static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
 651{
 652	return dst_metric_locked(dst, RTAX_CC_ALGO);
 653}
 654
 655/* Minimum RTT in usec. ~0 means not available. */
 656static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
 657{
 658	return minmax_get(&tp->rtt_min);
 659}
 660
 661/* Compute the actual receive window we are currently advertising.
 662 * Rcv_nxt can be after the window if our peer push more data
 663 * than the offered window.
 664 */
 665static inline u32 tcp_receive_window(const struct tcp_sock *tp)
 666{
 667	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
 668
 669	if (win < 0)
 670		win = 0;
 671	return (u32) win;
 672}
 673
 674/* Choose a new window, without checks for shrinking, and without
 675 * scaling applied to the result.  The caller does these things
 676 * if necessary.  This is a "raw" window selection.
 677 */
 678u32 __tcp_select_window(struct sock *sk);
 679
 680void tcp_send_window_probe(struct sock *sk);
 681
 682/* TCP uses 32bit jiffies to save some space.
 683 * Note that this is different from tcp_time_stamp, which
 684 * historically has been the same until linux-4.13.
 685 */
 686#define tcp_jiffies32 ((u32)jiffies)
 687
 688/*
 689 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
 690 * It is no longer tied to jiffies, but to 1 ms clock.
 691 * Note: double check if you want to use tcp_jiffies32 instead of this.
 692 */
 693#define TCP_TS_HZ	1000
 694
 695static inline u64 tcp_clock_ns(void)
 696{
 697	return local_clock();
 698}
 699
 700static inline u64 tcp_clock_us(void)
 701{
 702	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
 703}
 704
 705/* This should only be used in contexts where tp->tcp_mstamp is up to date */
 706static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
 707{
 708	return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
 709}
 710
 
 
 
 
 
 
 711/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
 712static inline u32 tcp_time_stamp_raw(void)
 713{
 714	return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
 715}
 716
 
 717
 718/* Refresh 1us clock of a TCP socket,
 719 * ensuring monotically increasing values.
 720 */
 721static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
 722{
 723	u64 val = tcp_clock_us();
 724
 725	if (val > tp->tcp_mstamp)
 726		tp->tcp_mstamp = val;
 727}
 728
 729static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
 730{
 731	return max_t(s64, t1 - t0, 0);
 732}
 733
 734static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
 
 735{
 736	return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
 737}
 738
 739
 740#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 741
 742#define TCPHDR_FIN 0x01
 743#define TCPHDR_SYN 0x02
 744#define TCPHDR_RST 0x04
 745#define TCPHDR_PSH 0x08
 746#define TCPHDR_ACK 0x10
 747#define TCPHDR_URG 0x20
 748#define TCPHDR_ECE 0x40
 749#define TCPHDR_CWR 0x80
 750
 751#define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
 752
 753/* This is what the send packet queuing engine uses to pass
 754 * TCP per-packet control information to the transmission code.
 755 * We also store the host-order sequence numbers in here too.
 756 * This is 44 bytes if IPV6 is enabled.
 757 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
 758 */
 759struct tcp_skb_cb {
 760	__u32		seq;		/* Starting sequence number	*/
 761	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
 762	union {
 763		/* Note : tcp_tw_isn is used in input path only
 764		 *	  (isn chosen by tcp_timewait_state_process())
 765		 *
 766		 * 	  tcp_gso_segs/size are used in write queue only,
 767		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
 768		 */
 769		__u32		tcp_tw_isn;
 770		struct {
 771			u16	tcp_gso_segs;
 772			u16	tcp_gso_size;
 773		};
 774	};
 775	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
 776
 777	__u8		sacked;		/* State flags for SACK.	*/
 778#define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
 779#define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
 780#define TCPCB_LOST		0x04	/* SKB is lost			*/
 781#define TCPCB_TAGBITS		0x07	/* All tag bits			*/
 782#define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp)	*/
 783#define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
 784#define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
 785				TCPCB_REPAIRED)
 786
 787	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
 788	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
 789			eor:1,		/* Is skb MSG_EOR marked? */
 790			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
 791			unused:5;
 792	__u32		ack_seq;	/* Sequence number ACK'd	*/
 793	union {
 794		struct {
 795			/* There is space for up to 24 bytes */
 796			__u32 in_flight:30,/* Bytes in flight at transmit */
 797			      is_app_limited:1, /* cwnd not fully used? */
 798			      unused:1;
 799			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
 800			__u32 delivered;
 801			/* start of send pipeline phase */
 802			u64 first_tx_mstamp;
 803			/* when we reached the "delivered" count */
 804			u64 delivered_mstamp;
 805		} tx;   /* only used for outgoing skbs */
 806		union {
 807			struct inet_skb_parm	h4;
 808#if IS_ENABLED(CONFIG_IPV6)
 809			struct inet6_skb_parm	h6;
 810#endif
 811		} header;	/* For incoming skbs */
 812		struct {
 813			__u32 key;
 814			__u32 flags;
 815			struct bpf_map *map;
 816			void *data_end;
 817		} bpf;
 818	};
 819};
 820
 821#define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
 822
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 823
 824#if IS_ENABLED(CONFIG_IPV6)
 825/* This is the variant of inet6_iif() that must be used by TCP,
 826 * as TCP moves IP6CB into a different location in skb->cb[]
 827 */
 828static inline int tcp_v6_iif(const struct sk_buff *skb)
 829{
 
 
 
 
 
 830	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
 831
 832	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
 833}
 834
 835/* TCP_SKB_CB reference means this can not be used from early demux */
 836static inline int tcp_v6_sdif(const struct sk_buff *skb)
 837{
 838#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 839	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
 840		return TCP_SKB_CB(skb)->header.h6.iif;
 841#endif
 842	return 0;
 843}
 
 
 
 
 
 
 
 844#endif
 845
 846static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
 847{
 848#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 849	if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
 850	    skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
 851		return true;
 852#endif
 853	return false;
 854}
 855
 856/* TCP_SKB_CB reference means this can not be used from early demux */
 857static inline int tcp_v4_sdif(struct sk_buff *skb)
 858{
 859#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 860	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
 861		return TCP_SKB_CB(skb)->header.h4.iif;
 862#endif
 863	return 0;
 864}
 865
 866/* Due to TSO, an SKB can be composed of multiple actual
 867 * packets.  To keep these tracked properly, we use this.
 868 */
 869static inline int tcp_skb_pcount(const struct sk_buff *skb)
 870{
 871	return TCP_SKB_CB(skb)->tcp_gso_segs;
 872}
 873
 874static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
 875{
 876	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
 877}
 878
 879static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
 880{
 881	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
 882}
 883
 884/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
 885static inline int tcp_skb_mss(const struct sk_buff *skb)
 886{
 887	return TCP_SKB_CB(skb)->tcp_gso_size;
 888}
 889
 890static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
 891{
 892	return likely(!TCP_SKB_CB(skb)->eor);
 893}
 894
 
 
 
 
 
 
 
 895/* Events passed to congestion control interface */
 896enum tcp_ca_event {
 897	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
 898	CA_EVENT_CWND_RESTART,	/* congestion window restart */
 899	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
 900	CA_EVENT_LOSS,		/* loss timeout */
 901	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
 902	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
 903	CA_EVENT_DELAYED_ACK,	/* Delayed ack is sent */
 904	CA_EVENT_NON_DELAYED_ACK,
 905};
 906
 907/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
 908enum tcp_ca_ack_event_flags {
 909	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
 910	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
 911	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
 912};
 913
 914/*
 915 * Interface for adding new TCP congestion control handlers
 916 */
 917#define TCP_CA_NAME_MAX	16
 918#define TCP_CA_MAX	128
 919#define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
 920
 921#define TCP_CA_UNSPEC	0
 922
 923/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
 924#define TCP_CONG_NON_RESTRICTED 0x1
 925/* Requires ECN/ECT set on all packets */
 926#define TCP_CONG_NEEDS_ECN	0x2
 
 927
 928union tcp_cc_info;
 929
 930struct ack_sample {
 931	u32 pkts_acked;
 932	s32 rtt_us;
 933	u32 in_flight;
 934};
 935
 936/* A rate sample measures the number of (original/retransmitted) data
 937 * packets delivered "delivered" over an interval of time "interval_us".
 938 * The tcp_rate.c code fills in the rate sample, and congestion
 939 * control modules that define a cong_control function to run at the end
 940 * of ACK processing can optionally chose to consult this sample when
 941 * setting cwnd and pacing rate.
 942 * A sample is invalid if "delivered" or "interval_us" is negative.
 943 */
 944struct rate_sample {
 945	u64  prior_mstamp; /* starting timestamp for interval */
 946	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
 947	s32  delivered;		/* number of packets delivered over interval */
 948	long interval_us;	/* time for tp->delivered to incr "delivered" */
 
 
 949	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
 950	int  losses;		/* number of packets marked lost upon ACK */
 951	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
 952	u32  prior_in_flight;	/* in flight before this ACK */
 953	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
 954	bool is_retrans;	/* is sample from retransmission? */
 955	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
 956};
 957
 958struct tcp_congestion_ops {
 959	struct list_head	list;
 960	u32 key;
 961	u32 flags;
 962
 963	/* initialize private data (optional) */
 964	void (*init)(struct sock *sk);
 965	/* cleanup private data  (optional) */
 966	void (*release)(struct sock *sk);
 967
 968	/* return slow start threshold (required) */
 969	u32 (*ssthresh)(struct sock *sk);
 970	/* do new cwnd calculation (required) */
 971	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
 972	/* call before changing ca_state (optional) */
 973	void (*set_state)(struct sock *sk, u8 new_state);
 974	/* call when cwnd event occurs (optional) */
 975	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
 976	/* call when ack arrives (optional) */
 977	void (*in_ack_event)(struct sock *sk, u32 flags);
 978	/* new value of cwnd after loss (required) */
 979	u32  (*undo_cwnd)(struct sock *sk);
 980	/* hook for packet ack accounting (optional) */
 981	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
 982	/* override sysctl_tcp_min_tso_segs */
 983	u32 (*min_tso_segs)(struct sock *sk);
 984	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
 985	u32 (*sndbuf_expand)(struct sock *sk);
 986	/* call when packets are delivered to update cwnd and pacing rate,
 987	 * after all the ca_state processing. (optional)
 988	 */
 989	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
 990	/* get info for inet_diag (optional) */
 991	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
 992			   union tcp_cc_info *info);
 993
 994	char 		name[TCP_CA_NAME_MAX];
 995	struct module 	*owner;
 996};
 997
 998int tcp_register_congestion_control(struct tcp_congestion_ops *type);
 999void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1000
1001void tcp_assign_congestion_control(struct sock *sk);
1002void tcp_init_congestion_control(struct sock *sk);
1003void tcp_cleanup_congestion_control(struct sock *sk);
1004int tcp_set_default_congestion_control(struct net *net, const char *name);
1005void tcp_get_default_congestion_control(struct net *net, char *name);
1006void tcp_get_available_congestion_control(char *buf, size_t len);
1007void tcp_get_allowed_congestion_control(char *buf, size_t len);
1008int tcp_set_allowed_congestion_control(char *allowed);
1009int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
 
1010u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1011void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1012
1013u32 tcp_reno_ssthresh(struct sock *sk);
1014u32 tcp_reno_undo_cwnd(struct sock *sk);
1015void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1016extern struct tcp_congestion_ops tcp_reno;
1017
 
1018struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1019u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1020#ifdef CONFIG_INET
1021char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1022#else
1023static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1024{
1025	return NULL;
1026}
1027#endif
1028
1029static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1030{
1031	const struct inet_connection_sock *icsk = inet_csk(sk);
1032
1033	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1034}
1035
1036static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
1037{
1038	struct inet_connection_sock *icsk = inet_csk(sk);
1039
1040	if (icsk->icsk_ca_ops->set_state)
1041		icsk->icsk_ca_ops->set_state(sk, ca_state);
1042	icsk->icsk_ca_state = ca_state;
1043}
1044
1045static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1046{
1047	const struct inet_connection_sock *icsk = inet_csk(sk);
1048
1049	if (icsk->icsk_ca_ops->cwnd_event)
1050		icsk->icsk_ca_ops->cwnd_event(sk, event);
1051}
1052
1053/* From tcp_rate.c */
1054void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1055void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1056			    struct rate_sample *rs);
1057void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1058		  bool is_sack_reneg, struct rate_sample *rs);
1059void tcp_rate_check_app_limited(struct sock *sk);
1060
1061/* These functions determine how the current flow behaves in respect of SACK
1062 * handling. SACK is negotiated with the peer, and therefore it can vary
1063 * between different flows.
1064 *
1065 * tcp_is_sack - SACK enabled
1066 * tcp_is_reno - No SACK
1067 */
1068static inline int tcp_is_sack(const struct tcp_sock *tp)
1069{
1070	return tp->rx_opt.sack_ok;
1071}
1072
1073static inline bool tcp_is_reno(const struct tcp_sock *tp)
1074{
1075	return !tcp_is_sack(tp);
1076}
1077
1078static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1079{
1080	return tp->sacked_out + tp->lost_out;
1081}
1082
1083/* This determines how many packets are "in the network" to the best
1084 * of our knowledge.  In many cases it is conservative, but where
1085 * detailed information is available from the receiver (via SACK
1086 * blocks etc.) we can make more aggressive calculations.
1087 *
1088 * Use this for decisions involving congestion control, use just
1089 * tp->packets_out to determine if the send queue is empty or not.
1090 *
1091 * Read this equation as:
1092 *
1093 *	"Packets sent once on transmission queue" MINUS
1094 *	"Packets left network, but not honestly ACKed yet" PLUS
1095 *	"Packets fast retransmitted"
1096 */
1097static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1098{
1099	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1100}
1101
1102#define TCP_INFINITE_SSTHRESH	0x7fffffff
1103
1104static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1105{
1106	return tp->snd_cwnd < tp->snd_ssthresh;
1107}
1108
1109static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1110{
1111	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1112}
1113
1114static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1115{
1116	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1117	       (1 << inet_csk(sk)->icsk_ca_state);
1118}
1119
1120/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1121 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1122 * ssthresh.
1123 */
1124static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1125{
1126	const struct tcp_sock *tp = tcp_sk(sk);
1127
1128	if (tcp_in_cwnd_reduction(sk))
1129		return tp->snd_ssthresh;
1130	else
1131		return max(tp->snd_ssthresh,
1132			   ((tp->snd_cwnd >> 1) +
1133			    (tp->snd_cwnd >> 2)));
1134}
1135
1136/* Use define here intentionally to get WARN_ON location shown at the caller */
1137#define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1138
1139void tcp_enter_cwr(struct sock *sk);
1140__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1141
1142/* The maximum number of MSS of available cwnd for which TSO defers
1143 * sending if not using sysctl_tcp_tso_win_divisor.
1144 */
1145static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1146{
1147	return 3;
1148}
1149
1150/* Returns end sequence number of the receiver's advertised window */
1151static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1152{
1153	return tp->snd_una + tp->snd_wnd;
1154}
1155
1156/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1157 * flexible approach. The RFC suggests cwnd should not be raised unless
1158 * it was fully used previously. And that's exactly what we do in
1159 * congestion avoidance mode. But in slow start we allow cwnd to grow
1160 * as long as the application has used half the cwnd.
1161 * Example :
1162 *    cwnd is 10 (IW10), but application sends 9 frames.
1163 *    We allow cwnd to reach 18 when all frames are ACKed.
1164 * This check is safe because it's as aggressive as slow start which already
1165 * risks 100% overshoot. The advantage is that we discourage application to
1166 * either send more filler packets or data to artificially blow up the cwnd
1167 * usage, and allow application-limited process to probe bw more aggressively.
1168 */
1169static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1170{
1171	const struct tcp_sock *tp = tcp_sk(sk);
1172
1173	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1174	if (tcp_in_slow_start(tp))
1175		return tp->snd_cwnd < 2 * tp->max_packets_out;
1176
1177	return tp->is_cwnd_limited;
1178}
1179
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1180/* Something is really bad, we could not queue an additional packet,
1181 * because qdisc is full or receiver sent a 0 window.
1182 * We do not want to add fuel to the fire, or abort too early,
1183 * so make sure the timer we arm now is at least 200ms in the future,
1184 * regardless of current icsk_rto value (as it could be ~2ms)
1185 */
1186static inline unsigned long tcp_probe0_base(const struct sock *sk)
1187{
1188	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1189}
1190
1191/* Variant of inet_csk_rto_backoff() used for zero window probes */
1192static inline unsigned long tcp_probe0_when(const struct sock *sk,
1193					    unsigned long max_when)
1194{
1195	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1196
1197	return (unsigned long)min_t(u64, when, max_when);
1198}
1199
1200static inline void tcp_check_probe_timer(struct sock *sk)
1201{
1202	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1203		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1204					  tcp_probe0_base(sk), TCP_RTO_MAX);
1205}
1206
1207static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1208{
1209	tp->snd_wl1 = seq;
1210}
1211
1212static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1213{
1214	tp->snd_wl1 = seq;
1215}
1216
1217/*
1218 * Calculate(/check) TCP checksum
1219 */
1220static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1221				   __be32 daddr, __wsum base)
1222{
1223	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1224}
1225
1226static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1227{
1228	return __skb_checksum_complete(skb);
1229}
1230
1231static inline bool tcp_checksum_complete(struct sk_buff *skb)
1232{
1233	return !skb_csum_unnecessary(skb) &&
1234		__tcp_checksum_complete(skb);
1235}
1236
1237bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1238int tcp_filter(struct sock *sk, struct sk_buff *skb);
1239
1240#undef STATE_TRACE
1241
1242#ifdef STATE_TRACE
1243static const char *statename[]={
1244	"Unused","Established","Syn Sent","Syn Recv",
1245	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1246	"Close Wait","Last ACK","Listen","Closing"
1247};
1248#endif
1249void tcp_set_state(struct sock *sk, int state);
1250
1251void tcp_done(struct sock *sk);
1252
1253int tcp_abort(struct sock *sk, int err);
1254
1255static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1256{
1257	rx_opt->dsack = 0;
1258	rx_opt->num_sacks = 0;
1259}
1260
1261u32 tcp_default_init_rwnd(u32 mss);
1262void tcp_cwnd_restart(struct sock *sk, s32 delta);
1263
1264static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1265{
1266	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1267	struct tcp_sock *tp = tcp_sk(sk);
1268	s32 delta;
1269
1270	if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
1271	    ca_ops->cong_control)
1272		return;
1273	delta = tcp_jiffies32 - tp->lsndtime;
1274	if (delta > inet_csk(sk)->icsk_rto)
1275		tcp_cwnd_restart(sk, delta);
1276}
1277
1278/* Determine a window scaling and initial window to offer. */
1279void tcp_select_initial_window(const struct sock *sk, int __space,
1280			       __u32 mss, __u32 *rcv_wnd,
1281			       __u32 *window_clamp, int wscale_ok,
1282			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1283
1284static inline int tcp_win_from_space(const struct sock *sk, int space)
1285{
1286	int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
1287
1288	return tcp_adv_win_scale <= 0 ?
1289		(space>>(-tcp_adv_win_scale)) :
1290		space - (space>>tcp_adv_win_scale);
1291}
1292
1293/* Note: caller must be prepared to deal with negative returns */
1294static inline int tcp_space(const struct sock *sk)
1295{
1296	return tcp_win_from_space(sk, sk->sk_rcvbuf -
 
1297				  atomic_read(&sk->sk_rmem_alloc));
1298}
1299
1300static inline int tcp_full_space(const struct sock *sk)
1301{
1302	return tcp_win_from_space(sk, sk->sk_rcvbuf);
 
 
 
 
 
 
 
 
 
 
 
 
 
1303}
1304
1305extern void tcp_openreq_init_rwin(struct request_sock *req,
1306				  const struct sock *sk_listener,
1307				  const struct dst_entry *dst);
1308
1309void tcp_enter_memory_pressure(struct sock *sk);
1310void tcp_leave_memory_pressure(struct sock *sk);
1311
1312static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1313{
1314	struct net *net = sock_net((struct sock *)tp);
1315
1316	return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
1317}
1318
1319static inline int keepalive_time_when(const struct tcp_sock *tp)
1320{
1321	struct net *net = sock_net((struct sock *)tp);
1322
1323	return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
1324}
1325
1326static inline int keepalive_probes(const struct tcp_sock *tp)
1327{
1328	struct net *net = sock_net((struct sock *)tp);
1329
1330	return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1331}
1332
1333static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1334{
1335	const struct inet_connection_sock *icsk = &tp->inet_conn;
1336
1337	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1338			  tcp_jiffies32 - tp->rcv_tstamp);
1339}
1340
1341static inline int tcp_fin_time(const struct sock *sk)
1342{
1343	int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1344	const int rto = inet_csk(sk)->icsk_rto;
1345
1346	if (fin_timeout < (rto << 2) - (rto >> 1))
1347		fin_timeout = (rto << 2) - (rto >> 1);
1348
1349	return fin_timeout;
1350}
1351
1352static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1353				  int paws_win)
1354{
1355	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1356		return true;
1357	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
 
1358		return true;
1359	/*
1360	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1361	 * then following tcp messages have valid values. Ignore 0 value,
1362	 * or else 'negative' tsval might forbid us to accept their packets.
1363	 */
1364	if (!rx_opt->ts_recent)
1365		return true;
1366	return false;
1367}
1368
1369static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1370				   int rst)
1371{
1372	if (tcp_paws_check(rx_opt, 0))
1373		return false;
1374
1375	/* RST segments are not recommended to carry timestamp,
1376	   and, if they do, it is recommended to ignore PAWS because
1377	   "their cleanup function should take precedence over timestamps."
1378	   Certainly, it is mistake. It is necessary to understand the reasons
1379	   of this constraint to relax it: if peer reboots, clock may go
1380	   out-of-sync and half-open connections will not be reset.
1381	   Actually, the problem would be not existing if all
1382	   the implementations followed draft about maintaining clock
1383	   via reboots. Linux-2.2 DOES NOT!
1384
1385	   However, we can relax time bounds for RST segments to MSL.
1386	 */
1387	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
 
1388		return false;
1389	return true;
1390}
1391
1392bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1393			  int mib_idx, u32 *last_oow_ack_time);
1394
1395static inline void tcp_mib_init(struct net *net)
1396{
1397	/* See RFC 2012 */
1398	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1399	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1400	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1401	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1402}
1403
1404/* from STCP */
1405static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1406{
1407	tp->lost_skb_hint = NULL;
1408}
1409
1410static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1411{
1412	tcp_clear_retrans_hints_partial(tp);
1413	tp->retransmit_skb_hint = NULL;
1414}
1415
1416union tcp_md5_addr {
1417	struct in_addr  a4;
1418#if IS_ENABLED(CONFIG_IPV6)
1419	struct in6_addr	a6;
1420#endif
1421};
1422
1423/* - key database */
1424struct tcp_md5sig_key {
1425	struct hlist_node	node;
1426	u8			keylen;
1427	u8			family; /* AF_INET or AF_INET6 */
 
1428	union tcp_md5_addr	addr;
1429	u8			prefixlen;
1430	u8			key[TCP_MD5SIG_MAXKEYLEN];
1431	struct rcu_head		rcu;
1432};
1433
1434/* - sock block */
1435struct tcp_md5sig_info {
1436	struct hlist_head	head;
1437	struct rcu_head		rcu;
1438};
1439
1440/* - pseudo header */
1441struct tcp4_pseudohdr {
1442	__be32		saddr;
1443	__be32		daddr;
1444	__u8		pad;
1445	__u8		protocol;
1446	__be16		len;
1447};
1448
1449struct tcp6_pseudohdr {
1450	struct in6_addr	saddr;
1451	struct in6_addr daddr;
1452	__be32		len;
1453	__be32		protocol;	/* including padding */
1454};
1455
1456union tcp_md5sum_block {
1457	struct tcp4_pseudohdr ip4;
1458#if IS_ENABLED(CONFIG_IPV6)
1459	struct tcp6_pseudohdr ip6;
1460#endif
1461};
1462
1463/* - pool: digest algorithm, hash description and scratch buffer */
1464struct tcp_md5sig_pool {
1465	struct ahash_request	*md5_req;
1466	void			*scratch;
1467};
1468
1469/* - functions */
1470int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1471			const struct sock *sk, const struct sk_buff *skb);
1472int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1473		   int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1474		   gfp_t gfp);
1475int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1476		   int family, u8 prefixlen);
1477struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1478					 const struct sock *addr_sk);
1479
1480#ifdef CONFIG_TCP_MD5SIG
1481struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1482					 const union tcp_md5_addr *addr,
1483					 int family);
 
 
 
 
 
 
 
 
 
 
 
1484#define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1485#else
1486static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1487					 const union tcp_md5_addr *addr,
1488					 int family)
1489{
1490	return NULL;
1491}
1492#define tcp_twsk_md5_key(twsk)	NULL
1493#endif
1494
1495bool tcp_alloc_md5sig_pool(void);
1496
1497struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1498static inline void tcp_put_md5sig_pool(void)
1499{
1500	local_bh_enable();
1501}
1502
1503int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1504			  unsigned int header_len);
1505int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1506		     const struct tcp_md5sig_key *key);
1507
1508/* From tcp_fastopen.c */
1509void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1510			    struct tcp_fastopen_cookie *cookie);
1511void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1512			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
1513			    u16 try_exp);
1514struct tcp_fastopen_request {
1515	/* Fast Open cookie. Size 0 means a cookie request */
1516	struct tcp_fastopen_cookie	cookie;
1517	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1518	size_t				size;
1519	int				copied;	/* queued in tcp_connect() */
 
1520};
1521void tcp_free_fastopen_req(struct tcp_sock *tp);
1522void tcp_fastopen_destroy_cipher(struct sock *sk);
1523void tcp_fastopen_ctx_destroy(struct net *net);
1524int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1525			      void *key, unsigned int len);
 
 
1526void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1527struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1528			      struct request_sock *req,
1529			      struct tcp_fastopen_cookie *foc,
1530			      const struct dst_entry *dst);
1531void tcp_fastopen_init_key_once(struct net *net);
1532bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1533			     struct tcp_fastopen_cookie *cookie);
1534bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1535#define TCP_FASTOPEN_KEY_LENGTH 16
 
 
 
1536
1537/* Fastopen key context */
1538struct tcp_fastopen_context {
1539	struct crypto_cipher	*tfm;
1540	__u8			key[TCP_FASTOPEN_KEY_LENGTH];
1541	struct rcu_head		rcu;
1542};
1543
1544extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
1545void tcp_fastopen_active_disable(struct sock *sk);
1546bool tcp_fastopen_active_should_disable(struct sock *sk);
1547void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1548void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1549
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1550/* Latencies incurred by various limits for a sender. They are
1551 * chronograph-like stats that are mutually exclusive.
1552 */
1553enum tcp_chrono {
1554	TCP_CHRONO_UNSPEC,
1555	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1556	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1557	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1558	__TCP_CHRONO_MAX,
1559};
1560
1561void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1562void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1563
1564/* This helper is needed, because skb->tcp_tsorted_anchor uses
1565 * the same memory storage than skb->destructor/_skb_refdst
1566 */
1567static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1568{
1569	skb->destructor = NULL;
1570	skb->_skb_refdst = 0UL;
1571}
1572
1573#define tcp_skb_tsorted_save(skb) {		\
1574	unsigned long _save = skb->_skb_refdst;	\
1575	skb->_skb_refdst = 0UL;
1576
1577#define tcp_skb_tsorted_restore(skb)		\
1578	skb->_skb_refdst = _save;		\
1579}
1580
1581void tcp_write_queue_purge(struct sock *sk);
1582
1583static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1584{
1585	return skb_rb_first(&sk->tcp_rtx_queue);
1586}
1587
 
 
 
 
 
1588static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1589{
1590	return skb_peek(&sk->sk_write_queue);
1591}
1592
1593static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1594{
1595	return skb_peek_tail(&sk->sk_write_queue);
1596}
1597
1598#define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1599	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1600
1601static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1602{
1603	return skb_peek(&sk->sk_write_queue);
1604}
1605
1606static inline bool tcp_skb_is_last(const struct sock *sk,
1607				   const struct sk_buff *skb)
1608{
1609	return skb_queue_is_last(&sk->sk_write_queue, skb);
1610}
1611
 
 
 
 
 
 
 
1612static inline bool tcp_write_queue_empty(const struct sock *sk)
1613{
1614	return skb_queue_empty(&sk->sk_write_queue);
 
 
1615}
1616
1617static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1618{
1619	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1620}
1621
1622static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1623{
1624	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1625}
1626
1627static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1628{
1629	if (tcp_write_queue_empty(sk))
1630		tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1631}
1632
1633static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1634{
1635	__skb_queue_tail(&sk->sk_write_queue, skb);
1636}
1637
1638static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1639{
1640	__tcp_add_write_queue_tail(sk, skb);
1641
1642	/* Queue it, remembering where we must start sending. */
1643	if (sk->sk_write_queue.next == skb)
1644		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1645}
1646
1647/* Insert new before skb on the write queue of sk.  */
1648static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1649						  struct sk_buff *skb,
1650						  struct sock *sk)
1651{
1652	__skb_queue_before(&sk->sk_write_queue, skb, new);
1653}
1654
1655static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1656{
1657	tcp_skb_tsorted_anchor_cleanup(skb);
1658	__skb_unlink(skb, &sk->sk_write_queue);
1659}
1660
1661void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1662
1663static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1664{
1665	tcp_skb_tsorted_anchor_cleanup(skb);
1666	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1667}
1668
1669static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1670{
1671	list_del(&skb->tcp_tsorted_anchor);
1672	tcp_rtx_queue_unlink(skb, sk);
1673	sk_wmem_free_skb(sk, skb);
1674}
1675
1676static inline void tcp_push_pending_frames(struct sock *sk)
1677{
1678	if (tcp_send_head(sk)) {
1679		struct tcp_sock *tp = tcp_sk(sk);
1680
1681		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1682	}
1683}
1684
1685/* Start sequence of the skb just after the highest skb with SACKed
1686 * bit, valid only if sacked_out > 0 or when the caller has ensured
1687 * validity by itself.
1688 */
1689static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1690{
1691	if (!tp->sacked_out)
1692		return tp->snd_una;
1693
1694	if (tp->highest_sack == NULL)
1695		return tp->snd_nxt;
1696
1697	return TCP_SKB_CB(tp->highest_sack)->seq;
1698}
1699
1700static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1701{
1702	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
1703}
1704
1705static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1706{
1707	return tcp_sk(sk)->highest_sack;
1708}
1709
1710static inline void tcp_highest_sack_reset(struct sock *sk)
1711{
1712	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
1713}
1714
1715/* Called when old skb is about to be deleted and replaced by new skb */
1716static inline void tcp_highest_sack_replace(struct sock *sk,
1717					    struct sk_buff *old,
1718					    struct sk_buff *new)
1719{
1720	if (old == tcp_highest_sack(sk))
1721		tcp_sk(sk)->highest_sack = new;
1722}
1723
1724/* This helper checks if socket has IP_TRANSPARENT set */
1725static inline bool inet_sk_transparent(const struct sock *sk)
1726{
1727	switch (sk->sk_state) {
1728	case TCP_TIME_WAIT:
1729		return inet_twsk(sk)->tw_transparent;
1730	case TCP_NEW_SYN_RECV:
1731		return inet_rsk(inet_reqsk(sk))->no_srccheck;
1732	}
1733	return inet_sk(sk)->transparent;
1734}
1735
1736/* Determines whether this is a thin stream (which may suffer from
1737 * increased latency). Used to trigger latency-reducing mechanisms.
1738 */
1739static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1740{
1741	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1742}
1743
1744/* /proc */
1745enum tcp_seq_states {
1746	TCP_SEQ_STATE_LISTENING,
1747	TCP_SEQ_STATE_ESTABLISHED,
1748};
1749
1750int tcp_seq_open(struct inode *inode, struct file *file);
 
 
1751
1752struct tcp_seq_afinfo {
1753	char				*name;
1754	sa_family_t			family;
1755	const struct file_operations	*seq_fops;
1756	struct seq_operations		seq_ops;
1757};
1758
1759struct tcp_iter_state {
1760	struct seq_net_private	p;
1761	sa_family_t		family;
1762	enum tcp_seq_states	state;
1763	struct sock		*syn_wait_sk;
 
1764	int			bucket, offset, sbucket, num;
1765	loff_t			last_pos;
1766};
1767
1768int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1769void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1770
1771extern struct request_sock_ops tcp_request_sock_ops;
1772extern struct request_sock_ops tcp6_request_sock_ops;
1773
1774void tcp_v4_destroy_sock(struct sock *sk);
1775
1776struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1777				netdev_features_t features);
1778struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
 
 
 
 
1779int tcp_gro_complete(struct sk_buff *skb);
1780
1781void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1782
1783static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1784{
1785	struct net *net = sock_net((struct sock *)tp);
1786	return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1787}
1788
1789static inline bool tcp_stream_memory_free(const struct sock *sk)
 
 
 
 
1790{
1791	const struct tcp_sock *tp = tcp_sk(sk);
1792	u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
 
1793
1794	return notsent_bytes < tcp_notsent_lowat(tp);
1795}
1796
1797#ifdef CONFIG_PROC_FS
1798int tcp4_proc_init(void);
1799void tcp4_proc_exit(void);
1800#endif
1801
1802int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1803int tcp_conn_request(struct request_sock_ops *rsk_ops,
1804		     const struct tcp_request_sock_ops *af_ops,
1805		     struct sock *sk, struct sk_buff *skb);
1806
1807/* TCP af-specific functions */
1808struct tcp_sock_af_ops {
1809#ifdef CONFIG_TCP_MD5SIG
1810	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
1811						const struct sock *addr_sk);
1812	int		(*calc_md5_hash)(char *location,
1813					 const struct tcp_md5sig_key *md5,
1814					 const struct sock *sk,
1815					 const struct sk_buff *skb);
1816	int		(*md5_parse)(struct sock *sk,
1817				     int optname,
1818				     char __user *optval,
1819				     int optlen);
1820#endif
1821};
1822
1823struct tcp_request_sock_ops {
1824	u16 mss_clamp;
1825#ifdef CONFIG_TCP_MD5SIG
1826	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1827						 const struct sock *addr_sk);
1828	int		(*calc_md5_hash) (char *location,
1829					  const struct tcp_md5sig_key *md5,
1830					  const struct sock *sk,
1831					  const struct sk_buff *skb);
1832#endif
1833	void (*init_req)(struct request_sock *req,
1834			 const struct sock *sk_listener,
1835			 struct sk_buff *skb);
1836#ifdef CONFIG_SYN_COOKIES
1837	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
1838				 __u16 *mss);
1839#endif
1840	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
1841				       const struct request_sock *req);
1842	u32 (*init_seq)(const struct sk_buff *skb);
1843	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
1844	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1845			   struct flowi *fl, struct request_sock *req,
1846			   struct tcp_fastopen_cookie *foc,
1847			   enum tcp_synack_type synack_type);
1848};
1849
 
 
 
 
 
1850#ifdef CONFIG_SYN_COOKIES
1851static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1852					 const struct sock *sk, struct sk_buff *skb,
1853					 __u16 *mss)
1854{
1855	tcp_synq_overflow(sk);
1856	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
1857	return ops->cookie_init_seq(skb, mss);
1858}
1859#else
1860static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1861					 const struct sock *sk, struct sk_buff *skb,
1862					 __u16 *mss)
1863{
1864	return 0;
1865}
1866#endif
1867
1868int tcpv4_offload_init(void);
1869
1870void tcp_v4_init(void);
1871void tcp_init(void);
1872
1873/* tcp_recovery.c */
 
 
 
 
1874extern void tcp_rack_mark_lost(struct sock *sk);
1875extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
1876			     u64 xmit_time);
1877extern void tcp_rack_reo_timeout(struct sock *sk);
1878extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
1879
1880/* At how many usecs into the future should the RTO fire? */
1881static inline s64 tcp_rto_delta_us(const struct sock *sk)
1882{
1883	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
1884	u32 rto = inet_csk(sk)->icsk_rto;
1885	u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
1886
1887	return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
1888}
1889
1890/*
1891 * Save and compile IPv4 options, return a pointer to it
1892 */
1893static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
1894							 struct sk_buff *skb)
1895{
1896	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
1897	struct ip_options_rcu *dopt = NULL;
1898
1899	if (opt->optlen) {
1900		int opt_size = sizeof(*dopt) + opt->optlen;
1901
1902		dopt = kmalloc(opt_size, GFP_ATOMIC);
1903		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
1904			kfree(dopt);
1905			dopt = NULL;
1906		}
1907	}
1908	return dopt;
1909}
1910
1911/* locally generated TCP pure ACKs have skb->truesize == 2
1912 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
1913 * This is much faster than dissecting the packet to find out.
1914 * (Think of GRE encapsulations, IPv4, IPv6, ...)
1915 */
1916static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
1917{
1918	return skb->truesize == 2;
1919}
1920
1921static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
1922{
1923	skb->truesize = 2;
1924}
1925
1926static inline int tcp_inq(struct sock *sk)
1927{
1928	struct tcp_sock *tp = tcp_sk(sk);
1929	int answ;
1930
1931	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1932		answ = 0;
1933	} else if (sock_flag(sk, SOCK_URGINLINE) ||
1934		   !tp->urg_data ||
1935		   before(tp->urg_seq, tp->copied_seq) ||
1936		   !before(tp->urg_seq, tp->rcv_nxt)) {
1937
1938		answ = tp->rcv_nxt - tp->copied_seq;
1939
1940		/* Subtract 1, if FIN was received */
1941		if (answ && sock_flag(sk, SOCK_DONE))
1942			answ--;
1943	} else {
1944		answ = tp->urg_seq - tp->copied_seq;
1945	}
1946
1947	return answ;
1948}
1949
1950int tcp_peek_len(struct socket *sock);
1951
1952static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
1953{
1954	u16 segs_in;
1955
1956	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1957	tp->segs_in += segs_in;
1958	if (skb->len > tcp_hdrlen(skb))
1959		tp->data_segs_in += segs_in;
1960}
1961
1962/*
1963 * TCP listen path runs lockless.
1964 * We forced "struct sock" to be const qualified to make sure
1965 * we don't modify one of its field by mistake.
1966 * Here, we increment sk_drops which is an atomic_t, so we can safely
1967 * make sock writable again.
1968 */
1969static inline void tcp_listendrop(const struct sock *sk)
1970{
1971	atomic_inc(&((struct sock *)sk)->sk_drops);
1972	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
1973}
1974
1975enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
1976
1977/*
1978 * Interface for adding Upper Level Protocols over TCP
1979 */
1980
1981#define TCP_ULP_NAME_MAX	16
1982#define TCP_ULP_MAX		128
1983#define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
1984
1985enum {
1986	TCP_ULP_TLS,
1987	TCP_ULP_BPF,
1988};
1989
1990struct tcp_ulp_ops {
1991	struct list_head	list;
1992
1993	/* initialize ulp */
1994	int (*init)(struct sock *sk);
 
 
 
1995	/* cleanup ulp */
1996	void (*release)(struct sock *sk);
 
 
 
 
 
 
1997
1998	int		uid;
1999	char		name[TCP_ULP_NAME_MAX];
2000	bool		user_visible;
2001	struct module	*owner;
2002};
2003int tcp_register_ulp(struct tcp_ulp_ops *type);
2004void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2005int tcp_set_ulp(struct sock *sk, const char *name);
2006int tcp_set_ulp_id(struct sock *sk, const int ulp);
2007void tcp_get_available_ulp(char *buf, size_t len);
2008void tcp_cleanup_ulp(struct sock *sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2009
2010/* Call BPF_SOCK_OPS program that returns an int. If the return value
2011 * is < 0, then the BPF op failed (for example if the loaded BPF
2012 * program does not support the chosen operation or there is no BPF
2013 * program loaded).
2014 */
2015#ifdef CONFIG_BPF
2016static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2017{
2018	struct bpf_sock_ops_kern sock_ops;
2019	int ret;
2020
2021	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2022	if (sk_fullsock(sk)) {
2023		sock_ops.is_fullsock = 1;
2024		sock_owned_by_me(sk);
2025	}
2026
2027	sock_ops.sk = sk;
2028	sock_ops.op = op;
2029	if (nargs > 0)
2030		memcpy(sock_ops.args, args, nargs * sizeof(*args));
2031
2032	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2033	if (ret == 0)
2034		ret = sock_ops.reply;
2035	else
2036		ret = -1;
2037	return ret;
2038}
2039
2040static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2041{
2042	u32 args[2] = {arg1, arg2};
2043
2044	return tcp_call_bpf(sk, op, 2, args);
2045}
2046
2047static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2048				    u32 arg3)
2049{
2050	u32 args[3] = {arg1, arg2, arg3};
2051
2052	return tcp_call_bpf(sk, op, 3, args);
2053}
2054
2055#else
2056static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2057{
2058	return -EPERM;
2059}
2060
2061static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2062{
2063	return -EPERM;
2064}
2065
2066static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2067				    u32 arg3)
2068{
2069	return -EPERM;
2070}
2071
2072#endif
2073
2074static inline u32 tcp_timeout_init(struct sock *sk)
2075{
2076	int timeout;
2077
2078	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2079
2080	if (timeout <= 0)
2081		timeout = TCP_TIMEOUT_INIT;
2082	return timeout;
2083}
2084
2085static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2086{
2087	int rwnd;
2088
2089	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2090
2091	if (rwnd < 0)
2092		rwnd = 0;
2093	return rwnd;
2094}
2095
2096static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2097{
2098	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2099}
2100
 
 
 
 
 
 
2101#if IS_ENABLED(CONFIG_SMC)
2102extern struct static_key_false tcp_have_smc;
2103#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2104#endif	/* _TCP_H */