Linux Audio

Check our new training course

Loading...
v6.13.7
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Definitions for the TCP module.
   8 *
   9 * Version:	@(#)tcp.h	1.0.5	05/23/93
  10 *
  11 * Authors:	Ross Biro
  12 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 
 
 
 
 
  13 */
  14#ifndef _TCP_H
  15#define _TCP_H
  16
  17#define FASTRETRANS_DEBUG 1
  18
  19#include <linux/list.h>
  20#include <linux/tcp.h>
  21#include <linux/bug.h>
  22#include <linux/slab.h>
  23#include <linux/cache.h>
  24#include <linux/percpu.h>
  25#include <linux/skbuff.h>
 
  26#include <linux/kref.h>
  27#include <linux/ktime.h>
  28#include <linux/indirect_call_wrapper.h>
  29
  30#include <net/inet_connection_sock.h>
  31#include <net/inet_timewait_sock.h>
  32#include <net/inet_hashtables.h>
  33#include <net/checksum.h>
  34#include <net/request_sock.h>
  35#include <net/sock_reuseport.h>
  36#include <net/sock.h>
  37#include <net/snmp.h>
  38#include <net/ip.h>
  39#include <net/tcp_states.h>
  40#include <net/tcp_ao.h>
  41#include <net/inet_ecn.h>
  42#include <net/dst.h>
  43#include <net/mptcp.h>
  44#include <net/xfrm.h>
  45
  46#include <linux/seq_file.h>
  47#include <linux/memcontrol.h>
  48#include <linux/bpf-cgroup.h>
  49#include <linux/siphash.h>
  50
  51extern struct inet_hashinfo tcp_hashinfo;
  52
  53DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
  54int tcp_orphan_count_sum(void);
  55
  56DECLARE_PER_CPU(u32, tcp_tw_isn);
  57
  58void tcp_time_wait(struct sock *sk, int state, int timeo);
  59
  60#define MAX_TCP_HEADER	L1_CACHE_ALIGN(128 + MAX_HEADER)
  61#define MAX_TCP_OPTION_SPACE 40
  62#define TCP_MIN_SND_MSS		48
  63#define TCP_MIN_GSO_SIZE	(TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
  64
  65/*
  66 * Never offer a window over 32767 without using window scaling. Some
  67 * poor stacks do signed 16bit maths!
  68 */
  69#define MAX_TCP_WINDOW		32767U
  70
  71/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
  72#define TCP_MIN_MSS		88U
  73
  74/* The initial MTU to use for probing */
  75#define TCP_BASE_MSS		1024
  76
  77/* probing interval, default to 10 minutes as per RFC4821 */
  78#define TCP_PROBE_INTERVAL	600
  79
  80/* Specify interval when tcp mtu probing will stop */
  81#define TCP_PROBE_THRESHOLD	8
  82
  83/* After receiving this amount of duplicate ACKs fast retransmit starts. */
  84#define TCP_FASTRETRANS_THRESH 3
  85
  86/* Maximal number of ACKs sent quickly to accelerate slow-start. */
  87#define TCP_MAX_QUICKACKS	16U
  88
  89/* Maximal number of window scale according to RFC1323 */
  90#define TCP_MAX_WSCALE		14U
  91
  92/* urg_data states */
  93#define TCP_URG_VALID	0x0100
  94#define TCP_URG_NOTYET	0x0200
  95#define TCP_URG_READ	0x0400
  96
  97#define TCP_RETR1	3	/*
  98				 * This is how many retries it does before it
  99				 * tries to figure out if the gateway is
 100				 * down. Minimal RFC value is 3; it corresponds
 101				 * to ~3sec-8min depending on RTO.
 102				 */
 103
 104#define TCP_RETR2	15	/*
 105				 * This should take at least
 106				 * 90 minutes to time out.
 107				 * RFC1122 says that the limit is 100 sec.
 108				 * 15 is ~13-30min depending on RTO.
 109				 */
 110
 111#define TCP_SYN_RETRIES	 6	/* This is how many retries are done
 112				 * when active opening a connection.
 113				 * RFC1122 says the minimum retry MUST
 114				 * be at least 180secs.  Nevertheless
 115				 * this value is corresponding to
 116				 * 63secs of retransmission with the
 117				 * current initial RTO.
 118				 */
 119
 120#define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
 121				 * when passive opening a connection.
 122				 * This is corresponding to 31secs of
 123				 * retransmission with the current
 124				 * initial RTO.
 125				 */
 126
 127#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
 128				  * state, about 60 seconds	*/
 129#define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
 130                                 /* BSD style FIN_WAIT2 deadlock breaker.
 131				  * It used to be 3min, new value is 60sec,
 132				  * to combine FIN-WAIT-2 timeout with
 133				  * TIME-WAIT timer.
 134				  */
 135#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
 136
 137#define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
 138static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
 139
 140#if HZ >= 100
 141#define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
 142#define TCP_ATO_MIN	((unsigned)(HZ/25))
 143#else
 144#define TCP_DELACK_MIN	4U
 145#define TCP_ATO_MIN	4U
 146#endif
 147#define TCP_RTO_MAX	((unsigned)(120*HZ))
 148#define TCP_RTO_MIN	((unsigned)(HZ/5))
 149#define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
 150
 151#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
 152
 153#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
 154#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
 155						 * used as a fallback RTO for the
 156						 * initial data transmission if no
 157						 * valid RTT sample has been acquired,
 158						 * most likely due to retrans in 3WHS.
 159						 */
 160
 161#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
 162					                 * for local resources.
 163					                 */
 164#define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
 165#define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
 166#define TCP_KEEPALIVE_INTVL	(75*HZ)
 167
 168#define MAX_TCP_KEEPIDLE	32767
 169#define MAX_TCP_KEEPINTVL	32767
 170#define MAX_TCP_KEEPCNT		127
 171#define MAX_TCP_SYNCNT		127
 172
 173/* Ensure that TCP PAWS checks are relaxed after ~2147 seconds
 174 * to avoid overflows. This assumes a clock smaller than 1 Mhz.
 175 * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz.
 176 */
 177#define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
 178
 
 179#define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
 180					 * after this time. It should be equal
 181					 * (or greater than) TCP_TIMEWAIT_LEN
 182					 * to provide reliability equal to one
 183					 * provided by timewait state.
 184					 */
 185#define TCP_PAWS_WINDOW	1		/* Replay window for per-host
 186					 * timestamps. It must be less than
 187					 * minimal timewait lifetime.
 188					 */
 189/*
 190 *	TCP option
 191 */
 192
 193#define TCPOPT_NOP		1	/* Padding */
 194#define TCPOPT_EOL		0	/* End of options */
 195#define TCPOPT_MSS		2	/* Segment size negotiating */
 196#define TCPOPT_WINDOW		3	/* Window scaling */
 197#define TCPOPT_SACK_PERM        4       /* SACK Permitted */
 198#define TCPOPT_SACK             5       /* SACK Block */
 199#define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
 200#define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
 201#define TCPOPT_AO		29	/* Authentication Option (RFC5925) */
 202#define TCPOPT_MPTCP		30	/* Multipath TCP (RFC6824) */
 203#define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
 204#define TCPOPT_EXP		254	/* Experimental */
 205/* Magic number to be after the option value for sharing TCP
 206 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
 207 */
 208#define TCPOPT_FASTOPEN_MAGIC	0xF989
 209#define TCPOPT_SMC_MAGIC	0xE2D4C3D9
 210
 211/*
 212 *     TCP option lengths
 213 */
 214
 215#define TCPOLEN_MSS            4
 216#define TCPOLEN_WINDOW         3
 217#define TCPOLEN_SACK_PERM      2
 218#define TCPOLEN_TIMESTAMP      10
 219#define TCPOLEN_MD5SIG         18
 220#define TCPOLEN_FASTOPEN_BASE  2
 221#define TCPOLEN_EXP_FASTOPEN_BASE  4
 222#define TCPOLEN_EXP_SMC_BASE   6
 223
 224/* But this is what stacks really send out. */
 225#define TCPOLEN_TSTAMP_ALIGNED		12
 226#define TCPOLEN_WSCALE_ALIGNED		4
 227#define TCPOLEN_SACKPERM_ALIGNED	4
 228#define TCPOLEN_SACK_BASE		2
 229#define TCPOLEN_SACK_BASE_ALIGNED	4
 230#define TCPOLEN_SACK_PERBLOCK		8
 231#define TCPOLEN_MD5SIG_ALIGNED		20
 232#define TCPOLEN_MSS_ALIGNED		4
 233#define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
 234
 235/* Flags in tp->nonagle */
 236#define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
 237#define TCP_NAGLE_CORK		2	/* Socket is corked	    */
 238#define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
 239
 240/* TCP thin-stream limits */
 241#define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 242
 243/* TCP initial congestion window as per rfc6928 */
 244#define TCP_INIT_CWND		10
 245
 246/* Bit Flags for sysctl_tcp_fastopen */
 247#define	TFO_CLIENT_ENABLE	1
 248#define	TFO_SERVER_ENABLE	2
 249#define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
 250
 251/* Accept SYN data w/o any cookie option */
 252#define	TFO_SERVER_COOKIE_NOT_REQD	0x200
 253
 254/* Force enable TFO on all listeners, i.e., not requiring the
 255 * TCP_FASTOPEN socket option.
 256 */
 257#define	TFO_SERVER_WO_SOCKOPT1	0x400
 258
 259
 260/* sysctl variables for tcp */
 261extern int sysctl_tcp_max_orphans;
 262extern long sysctl_tcp_mem[3];
 263
 264#define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
 265#define TCP_RACK_STATIC_REO_WND  0x2 /* Use static RACK reo wnd */
 266#define TCP_RACK_NO_DUPTHRESH    0x4 /* Do not use DUPACK threshold in RACK */
 267
 268extern atomic_long_t tcp_memory_allocated;
 269DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
 270
 271extern struct percpu_counter tcp_sockets_allocated;
 272extern unsigned long tcp_memory_pressure;
 273
 274/* optimized version of sk_under_memory_pressure() for TCP sockets */
 275static inline bool tcp_under_memory_pressure(const struct sock *sk)
 276{
 277	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
 278	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
 279		return true;
 280
 281	return READ_ONCE(tcp_memory_pressure);
 282}
 283/*
 284 * The next routines deal with comparing 32 bit unsigned ints
 285 * and worry about wraparound (automatic with unsigned arithmetic).
 286 */
 287
 288static inline bool before(__u32 seq1, __u32 seq2)
 289{
 290        return (__s32)(seq1-seq2) < 0;
 291}
 292#define after(seq2, seq1) 	before(seq1, seq2)
 293
 294/* is s2<=s1<=s3 ? */
 295static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
 296{
 297	return seq3 - seq2 >= seq1 - seq2;
 298}
 299
 300static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
 301{
 302	sk_wmem_queued_add(sk, -skb->truesize);
 303	if (!skb_zcopy_pure(skb))
 304		sk_mem_uncharge(sk, skb->truesize);
 305	else
 306		sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
 307	__kfree_skb(skb);
 308}
 309
 310void sk_forced_mem_schedule(struct sock *sk, int size);
 311
 312bool tcp_check_oom(const struct sock *sk, int shift);
 
 
 
 
 
 
 
 
 
 
 
 
 
 313
 314
 315extern struct proto tcp_prot;
 316
 317#define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 318#define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 319#define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
 320#define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 321
 322void tcp_tasklet_init(void);
 323
 324int tcp_v4_err(struct sk_buff *skb, u32);
 325
 326void tcp_shutdown(struct sock *sk, int how);
 327
 328int tcp_v4_early_demux(struct sk_buff *skb);
 329int tcp_v4_rcv(struct sk_buff *skb);
 330
 331void tcp_remove_empty_skb(struct sock *sk);
 332int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 333int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
 334int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
 335			 size_t size, struct ubuf_info *uarg);
 336void tcp_splice_eof(struct socket *sock);
 337int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
 338int tcp_wmem_schedule(struct sock *sk, int copy);
 339void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
 340	      int size_goal);
 341void tcp_release_cb(struct sock *sk);
 342void tcp_wfree(struct sk_buff *skb);
 343void tcp_write_timer_handler(struct sock *sk);
 344void tcp_delack_timer_handler(struct sock *sk);
 345int tcp_ioctl(struct sock *sk, int cmd, int *karg);
 346enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
 347void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
 
 348void tcp_rcv_space_adjust(struct sock *sk);
 349int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
 350void tcp_twsk_destructor(struct sock *sk);
 351void tcp_twsk_purge(struct list_head *net_exit_list);
 352ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
 353			struct pipe_inode_info *pipe, size_t len,
 354			unsigned int flags);
 355struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
 356				     bool force_schedule);
 357
 358static inline void tcp_dec_quickack_mode(struct sock *sk)
 
 359{
 360	struct inet_connection_sock *icsk = inet_csk(sk);
 361
 362	if (icsk->icsk_ack.quick) {
 363		/* How many ACKs S/ACKing new data have we sent? */
 364		const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
 365
 366		if (pkts >= icsk->icsk_ack.quick) {
 367			icsk->icsk_ack.quick = 0;
 368			/* Leaving quickack mode we deflate ATO. */
 369			icsk->icsk_ack.ato   = TCP_ATO_MIN;
 370		} else
 371			icsk->icsk_ack.quick -= pkts;
 372	}
 373}
 374
 375#define	TCP_ECN_OK		1
 376#define	TCP_ECN_QUEUE_CWR	2
 377#define	TCP_ECN_DEMAND_CWR	4
 378#define	TCP_ECN_SEEN		8
 379
 380enum tcp_tw_status {
 381	TCP_TW_SUCCESS = 0,
 382	TCP_TW_RST = 1,
 383	TCP_TW_ACK = 2,
 384	TCP_TW_SYN = 3
 385};
 386
 387
 388enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
 389					      struct sk_buff *skb,
 390					      const struct tcphdr *th,
 391					      u32 *tw_isn);
 392struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 393			   struct request_sock *req, bool fastopen,
 394			   bool *lost_race);
 395enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
 396				       struct sk_buff *skb);
 397void tcp_enter_loss(struct sock *sk);
 398void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
 399void tcp_clear_retrans(struct tcp_sock *tp);
 400void tcp_update_metrics(struct sock *sk);
 401void tcp_init_metrics(struct sock *sk);
 402void tcp_metrics_init(void);
 403bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
 404void __tcp_close(struct sock *sk, long timeout);
 405void tcp_close(struct sock *sk, long timeout);
 406void tcp_init_sock(struct sock *sk);
 407void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
 408__poll_t tcp_poll(struct file *file, struct socket *sock,
 409		      struct poll_table_struct *wait);
 410int do_tcp_getsockopt(struct sock *sk, int level,
 411		      int optname, sockptr_t optval, sockptr_t optlen);
 412int tcp_getsockopt(struct sock *sk, int level, int optname,
 413		   char __user *optval, int __user *optlen);
 414bool tcp_bpf_bypass_getsockopt(int level, int optname);
 415int do_tcp_setsockopt(struct sock *sk, int level, int optname,
 416		      sockptr_t optval, unsigned int optlen);
 417int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
 418		   unsigned int optlen);
 
 419void tcp_set_keepalive(struct sock *sk, int val);
 420void tcp_syn_ack_timeout(const struct request_sock *req);
 421int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 422		int flags, int *addr_len);
 423int tcp_set_rcvlowat(struct sock *sk, int val);
 424int tcp_set_window_clamp(struct sock *sk, int val);
 425void tcp_update_recv_tstamps(struct sk_buff *skb,
 426			     struct scm_timestamping_internal *tss);
 427void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
 428			struct scm_timestamping_internal *tss);
 429void tcp_data_ready(struct sock *sk);
 430#ifdef CONFIG_MMU
 431int tcp_mmap(struct file *file, struct socket *sock,
 432	     struct vm_area_struct *vma);
 433#endif
 434void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
 435		       struct tcp_options_received *opt_rx,
 436		       int estab, struct tcp_fastopen_cookie *foc);
 
 437
 438/*
 439 *	BPF SKB-less helpers
 440 */
 441u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
 442			 struct tcphdr *th, u32 *cookie);
 443u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
 444			 struct tcphdr *th, u32 *cookie);
 445u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
 446u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
 447			  const struct tcp_request_sock_ops *af_ops,
 448			  struct sock *sk, struct tcphdr *th);
 449/*
 450 *	TCP v4 functions exported for the inet6 API
 451 */
 452
 453void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 454void tcp_v4_mtu_reduced(struct sock *sk);
 455void tcp_req_err(struct sock *sk, u32 seq, bool abort);
 456void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
 457int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 458struct sock *tcp_create_openreq_child(const struct sock *sk,
 459				      struct request_sock *req,
 460				      struct sk_buff *skb);
 461void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
 462struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 463				  struct request_sock *req,
 464				  struct dst_entry *dst,
 465				  struct request_sock *req_unhash,
 466				  bool *own_req);
 467int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 468int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 469int tcp_connect(struct sock *sk);
 470enum tcp_synack_type {
 471	TCP_SYNACK_NORMAL,
 472	TCP_SYNACK_FASTOPEN,
 473	TCP_SYNACK_COOKIE,
 474};
 475struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 476				struct request_sock *req,
 477				struct tcp_fastopen_cookie *foc,
 478				enum tcp_synack_type synack_type,
 479				struct sk_buff *syn_skb);
 480int tcp_disconnect(struct sock *sk, int flags);
 481
 482void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
 483int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 484void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 485
 486/* From syncookies.c */
 487struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
 488				 struct request_sock *req,
 489				 struct dst_entry *dst);
 490int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th);
 
 491struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
 492struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
 493					    struct sock *sk, struct sk_buff *skb,
 494					    struct tcp_options_received *tcp_opt,
 495					    int mss, u32 tsoff);
 496
 497#if IS_ENABLED(CONFIG_BPF)
 498struct bpf_tcp_req_attrs {
 499	u32 rcv_tsval;
 500	u32 rcv_tsecr;
 501	u16 mss;
 502	u8 rcv_wscale;
 503	u8 snd_wscale;
 504	u8 ecn_ok;
 505	u8 wscale_ok;
 506	u8 sack_ok;
 507	u8 tstamp_ok;
 508	u8 usec_ts_ok;
 509	u8 reserved[3];
 510};
 511#endif
 512
 513#ifdef CONFIG_SYN_COOKIES
 514
 515/* Syncookies use a monotonic timer which increments every 60 seconds.
 516 * This counter is used both as a hash input and partially encoded into
 517 * the cookie value.  A cookie is only validated further if the delta
 518 * between the current counter value and the encoded one is less than this,
 519 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
 520 * the counter advances immediately after a cookie is generated).
 521 */
 522#define MAX_SYNCOOKIE_AGE	2
 523#define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
 524#define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
 525
 526/* syncookies: remember time of last synqueue overflow
 527 * But do not dirty this field too often (once per second is enough)
 528 * It is racy as we do not hold a lock, but race is very minor.
 529 */
 530static inline void tcp_synq_overflow(const struct sock *sk)
 531{
 532	unsigned int last_overflow;
 533	unsigned int now = jiffies;
 534
 535	if (sk->sk_reuseport) {
 536		struct sock_reuseport *reuse;
 537
 538		reuse = rcu_dereference(sk->sk_reuseport_cb);
 539		if (likely(reuse)) {
 540			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
 541			if (!time_between32(now, last_overflow,
 542					    last_overflow + HZ))
 543				WRITE_ONCE(reuse->synq_overflow_ts, now);
 544			return;
 545		}
 546	}
 547
 548	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
 549	if (!time_between32(now, last_overflow, last_overflow + HZ))
 550		WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
 551}
 552
 553/* syncookies: no recent synqueue overflow on this listening socket? */
 554static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
 555{
 556	unsigned int last_overflow;
 557	unsigned int now = jiffies;
 558
 559	if (sk->sk_reuseport) {
 560		struct sock_reuseport *reuse;
 561
 562		reuse = rcu_dereference(sk->sk_reuseport_cb);
 563		if (likely(reuse)) {
 564			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
 565			return !time_between32(now, last_overflow - HZ,
 566					       last_overflow +
 567					       TCP_SYNCOOKIE_VALID);
 568		}
 569	}
 570
 571	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
 572
 573	/* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
 574	 * then we're under synflood. However, we have to use
 575	 * 'last_overflow - HZ' as lower bound. That's because a concurrent
 576	 * tcp_synq_overflow() could update .ts_recent_stamp after we read
 577	 * jiffies but before we store .ts_recent_stamp into last_overflow,
 578	 * which could lead to rejecting a valid syncookie.
 579	 */
 580	return !time_between32(now, last_overflow - HZ,
 581			       last_overflow + TCP_SYNCOOKIE_VALID);
 582}
 583
 584static inline u32 tcp_cookie_time(void)
 585{
 586	u64 val = get_jiffies_64();
 587
 588	do_div(val, TCP_SYNCOOKIE_PERIOD);
 589	return val;
 590}
 591
 592/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
 593static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
 594{
 595	if (usec_ts)
 596		return div_u64(val, NSEC_PER_USEC);
 597
 598	return div_u64(val, NSEC_PER_MSEC);
 599}
 600
 601u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 602			      u16 *mssp);
 603__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
 604u64 cookie_init_timestamp(struct request_sock *req, u64 now);
 605bool cookie_timestamp_decode(const struct net *net,
 606			     struct tcp_options_received *opt);
 607
 608static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst)
 609{
 610	return READ_ONCE(net->ipv4.sysctl_tcp_ecn) ||
 611		dst_feature(dst, RTAX_FEATURE_ECN);
 612}
 613
 614#if IS_ENABLED(CONFIG_BPF)
 615static inline bool cookie_bpf_ok(struct sk_buff *skb)
 616{
 617	return skb->sk;
 618}
 619
 620struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
 621#else
 622static inline bool cookie_bpf_ok(struct sk_buff *skb)
 623{
 624	return false;
 625}
 626
 627static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
 628						    struct sk_buff *skb)
 629{
 630	return NULL;
 631}
 632#endif
 633
 634/* From net/ipv6/syncookies.c */
 635int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
 
 636struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
 637
 638u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
 639			      const struct tcphdr *th, u16 *mssp);
 640__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
 641#endif
 642/* tcp_output.c */
 643
 644void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
 645void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
 646void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
 647			       int nonagle);
 648int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 649int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 650void tcp_retransmit_timer(struct sock *sk);
 651void tcp_xmit_retransmit_queue(struct sock *);
 652void tcp_simple_retransmit(struct sock *);
 653void tcp_enter_recovery(struct sock *sk, bool ece_ack);
 654int tcp_trim_head(struct sock *, struct sk_buff *, u32);
 655enum tcp_queue {
 656	TCP_FRAG_IN_WRITE_QUEUE,
 657	TCP_FRAG_IN_RTX_QUEUE,
 658};
 659int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
 660		 struct sk_buff *skb, u32 len,
 661		 unsigned int mss_now, gfp_t gfp);
 662
 663void tcp_send_probe0(struct sock *);
 
 664int tcp_write_wakeup(struct sock *, int mib);
 665void tcp_send_fin(struct sock *sk);
 666void tcp_send_active_reset(struct sock *sk, gfp_t priority,
 667			   enum sk_rst_reason reason);
 668int tcp_send_synack(struct sock *);
 669void tcp_push_one(struct sock *, unsigned int mss_now);
 670void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
 671void tcp_send_ack(struct sock *sk);
 672void tcp_send_delayed_ack(struct sock *sk);
 673void tcp_send_loss_probe(struct sock *sk);
 674bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
 675void tcp_skb_collapse_tstamp(struct sk_buff *skb,
 676			     const struct sk_buff *next_skb);
 677
 678/* tcp_input.c */
 679void tcp_rearm_rto(struct sock *sk);
 680void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
 681void tcp_done_with_error(struct sock *sk, int err);
 682void tcp_reset(struct sock *sk, struct sk_buff *skb);
 683void tcp_fin(struct sock *sk);
 684void tcp_check_space(struct sock *sk);
 685void tcp_sack_compress_send_ack(struct sock *sk);
 686
 687static inline void tcp_cleanup_skb(struct sk_buff *skb)
 688{
 689	skb_dst_drop(skb);
 690	secpath_reset(skb);
 691}
 692
 693static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
 694{
 695	DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
 696	DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
 697	__skb_queue_tail(&sk->sk_receive_queue, skb);
 698}
 699
 700/* tcp_timer.c */
 701void tcp_init_xmit_timers(struct sock *);
 702static inline void tcp_clear_xmit_timers(struct sock *sk)
 703{
 704	if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
 705		__sock_put(sk);
 706
 707	if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
 708		__sock_put(sk);
 709
 710	inet_csk_clear_xmit_timers(sk);
 711}
 712
 713unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 714unsigned int tcp_current_mss(struct sock *sk);
 715u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
 716
 717/* Bound MSS / TSO packet size with the half of the window */
 718static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 719{
 720	int cutoff;
 721
 722	/* When peer uses tiny windows, there is no use in packetizing
 723	 * to sub-MSS pieces for the sake of SWS or making sure there
 724	 * are enough packets in the pipe for fast recovery.
 725	 *
 726	 * On the other hand, for extremely large MSS devices, handling
 727	 * smaller than MSS windows in this way does make sense.
 728	 */
 729	if (tp->max_window > TCP_MSS_DEFAULT)
 730		cutoff = (tp->max_window >> 1);
 731	else
 732		cutoff = tp->max_window;
 733
 734	if (cutoff && pktsize > cutoff)
 735		return max_t(int, cutoff, 68U - tp->tcp_header_len);
 736	else
 737		return pktsize;
 738}
 739
 740/* tcp.c */
 741void tcp_get_info(struct sock *, struct tcp_info *);
 742
 743/* Read 'sendfile()'-style from a TCP socket */
 744int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
 745		  sk_read_actor_t recv_actor);
 746int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
 747struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
 748void tcp_read_done(struct sock *sk, size_t len);
 749
 750void tcp_initialize_rcv_mss(struct sock *sk);
 751
 752int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 753int tcp_mss_to_mtu(struct sock *sk, int mss);
 754void tcp_mtup_init(struct sock *sk);
 
 755
 756static inline void tcp_bound_rto(struct sock *sk)
 757{
 758	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
 759		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
 760}
 761
 762static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
 763{
 764	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
 765}
 766
 767static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 768{
 769	/* mptcp hooks are only on the slow path */
 770	if (sk_is_mptcp((struct sock *)tp))
 771		return;
 772
 773	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
 774			       ntohl(TCP_FLAG_ACK) |
 775			       snd_wnd);
 776}
 777
 778static inline void tcp_fast_path_on(struct tcp_sock *tp)
 779{
 780	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
 781}
 782
 783static inline void tcp_fast_path_check(struct sock *sk)
 784{
 785	struct tcp_sock *tp = tcp_sk(sk);
 786
 787	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
 788	    tp->rcv_wnd &&
 789	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
 790	    !tp->urg_data)
 791		tcp_fast_path_on(tp);
 792}
 793
 794u32 tcp_delack_max(const struct sock *sk);
 795
 796/* Compute the actual rto_min value */
 797static inline u32 tcp_rto_min(const struct sock *sk)
 798{
 799	const struct dst_entry *dst = __sk_dst_get(sk);
 800	u32 rto_min = inet_csk(sk)->icsk_rto_min;
 801
 802	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
 803		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
 804	return rto_min;
 805}
 806
 807static inline u32 tcp_rto_min_us(const struct sock *sk)
 808{
 809	return jiffies_to_usecs(tcp_rto_min(sk));
 810}
 811
 812static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
 813{
 814	return dst_metric_locked(dst, RTAX_CC_ALGO);
 815}
 816
 817/* Minimum RTT in usec. ~0 means not available. */
 818static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
 819{
 820	return minmax_get(&tp->rtt_min);
 821}
 822
 823/* Compute the actual receive window we are currently advertising.
 824 * Rcv_nxt can be after the window if our peer push more data
 825 * than the offered window.
 826 */
 827static inline u32 tcp_receive_window(const struct tcp_sock *tp)
 828{
 829	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
 830
 831	if (win < 0)
 832		win = 0;
 833	return (u32) win;
 834}
 835
 836/* Choose a new window, without checks for shrinking, and without
 837 * scaling applied to the result.  The caller does these things
 838 * if necessary.  This is a "raw" window selection.
 839 */
 840u32 __tcp_select_window(struct sock *sk);
 841
 842void tcp_send_window_probe(struct sock *sk);
 843
 844/* TCP uses 32bit jiffies to save some space.
 845 * Note that this is different from tcp_time_stamp, which
 846 * historically has been the same until linux-4.13.
 847 */
 848#define tcp_jiffies32 ((u32)jiffies)
 849
 850/*
 851 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
 852 * It is no longer tied to jiffies, but to 1 ms clock.
 853 * Note: double check if you want to use tcp_jiffies32 instead of this.
 854 */
 855#define TCP_TS_HZ	1000
 856
 857static inline u64 tcp_clock_ns(void)
 858{
 859	return ktime_get_ns();
 860}
 861
 862static inline u64 tcp_clock_us(void)
 863{
 864	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
 865}
 866
 867static inline u64 tcp_clock_ms(void)
 
 868{
 869	return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
 870}
 871
 872/* TCP Timestamp included in TS option (RFC 1323) can either use ms
 873 * or usec resolution. Each socket carries a flag to select one or other
 874 * resolution, as the route attribute could change anytime.
 875 * Each flow must stick to initial resolution.
 876 */
 877static inline u32 tcp_clock_ts(bool usec_ts)
 878{
 879	return usec_ts ? tcp_clock_us() : tcp_clock_ms();
 880}
 881
 882static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
 883{
 884	return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
 885}
 886
 887static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
 
 
 
 888{
 889	if (tp->tcp_usec_ts)
 890		return tp->tcp_mstamp;
 891	return tcp_time_stamp_ms(tp);
 892}
 893
 894void tcp_mstamp_refresh(struct tcp_sock *tp);
 
 
 895
 896static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
 897{
 898	return max_t(s64, t1 - t0, 0);
 899}
 900
 901/* provide the departure time in us unit */
 902static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
 903{
 904	return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
 905}
 906
 907/* Provide skb TSval in usec or ms unit */
 908static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
 909{
 910	if (usec_ts)
 911		return tcp_skb_timestamp_us(skb);
 912
 913	return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
 914}
 915
 916static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
 917{
 918	return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
 919}
 920
 921static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
 922{
 923	return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
 924}
 925
 926#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 927
 928#define TCPHDR_FIN 0x01
 929#define TCPHDR_SYN 0x02
 930#define TCPHDR_RST 0x04
 931#define TCPHDR_PSH 0x08
 932#define TCPHDR_ACK 0x10
 933#define TCPHDR_URG 0x20
 934#define TCPHDR_ECE 0x40
 935#define TCPHDR_CWR 0x80
 936
 937#define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
 938
 939/* State flags for sacked in struct tcp_skb_cb */
 940enum tcp_skb_cb_sacked_flags {
 941	TCPCB_SACKED_ACKED	= (1 << 0),	/* SKB ACK'd by a SACK block	*/
 942	TCPCB_SACKED_RETRANS	= (1 << 1),	/* SKB retransmitted		*/
 943	TCPCB_LOST		= (1 << 2),	/* SKB is lost			*/
 944	TCPCB_TAGBITS		= (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS |
 945				   TCPCB_LOST),	/* All tag bits			*/
 946	TCPCB_REPAIRED		= (1 << 4),	/* SKB repaired (no skb_mstamp_ns)	*/
 947	TCPCB_EVER_RETRANS	= (1 << 7),	/* Ever retransmitted frame	*/
 948	TCPCB_RETRANS		= (TCPCB_SACKED_RETRANS | TCPCB_EVER_RETRANS |
 949				   TCPCB_REPAIRED),
 950};
 951
 952/* This is what the send packet queuing engine uses to pass
 953 * TCP per-packet control information to the transmission code.
 954 * We also store the host-order sequence numbers in here too.
 955 * This is 44 bytes if IPV6 is enabled.
 956 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
 957 */
 958struct tcp_skb_cb {
 959	__u32		seq;		/* Starting sequence number	*/
 960	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
 961	union {
 962		/* Note :
 
 
 963		 * 	  tcp_gso_segs/size are used in write queue only,
 964		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
 965		 */
 
 966		struct {
 967			u16	tcp_gso_segs;
 968			u16	tcp_gso_size;
 969		};
 970	};
 971	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
 972
 973	__u8		sacked;		/* State flags for SACK.	*/
 
 
 
 
 
 
 
 
 
 974	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
 975	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
 976			eor:1,		/* Is skb MSG_EOR marked? */
 977			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
 978			unused:5;
 979	__u32		ack_seq;	/* Sequence number ACK'd	*/
 980	union {
 981		struct {
 982#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
 983			/* There is space for up to 24 bytes */
 984			__u32 is_app_limited:1, /* cwnd not fully used? */
 985			      delivered_ce:20,
 986			      unused:11;
 987			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
 988			__u32 delivered;
 989			/* start of send pipeline phase */
 990			u64 first_tx_mstamp;
 991			/* when we reached the "delivered" count */
 992			u64 delivered_mstamp;
 993		} tx;   /* only used for outgoing skbs */
 994		union {
 995			struct inet_skb_parm	h4;
 996#if IS_ENABLED(CONFIG_IPV6)
 997			struct inet6_skb_parm	h6;
 998#endif
 999		} header;	/* For incoming skbs */
 
 
 
 
 
 
1000	};
1001};
1002
1003#define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
1004
1005extern const struct inet_connection_sock_af_ops ipv4_specific;
1006
1007#if IS_ENABLED(CONFIG_IPV6)
1008/* This is the variant of inet6_iif() that must be used by TCP,
1009 * as TCP moves IP6CB into a different location in skb->cb[]
1010 */
1011static inline int tcp_v6_iif(const struct sk_buff *skb)
1012{
1013	return TCP_SKB_CB(skb)->header.h6.iif;
1014}
1015
1016static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
1017{
1018	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
1019
1020	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
1021}
1022
1023/* TCP_SKB_CB reference means this can not be used from early demux */
1024static inline int tcp_v6_sdif(const struct sk_buff *skb)
1025{
1026#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1027	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
1028		return TCP_SKB_CB(skb)->header.h6.iif;
1029#endif
1030	return 0;
1031}
 
1032
1033extern const struct inet_connection_sock_af_ops ipv6_specific;
1034
1035INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
1036INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
1037void tcp_v6_early_demux(struct sk_buff *skb);
1038
1039#endif
 
 
1040
1041/* TCP_SKB_CB reference means this can not be used from early demux */
1042static inline int tcp_v4_sdif(struct sk_buff *skb)
1043{
1044#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1045	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
1046		return TCP_SKB_CB(skb)->header.h4.iif;
1047#endif
1048	return 0;
1049}
1050
1051/* Due to TSO, an SKB can be composed of multiple actual
1052 * packets.  To keep these tracked properly, we use this.
1053 */
1054static inline int tcp_skb_pcount(const struct sk_buff *skb)
1055{
1056	return TCP_SKB_CB(skb)->tcp_gso_segs;
1057}
1058
1059static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
1060{
1061	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
1062}
1063
1064static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
1065{
1066	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
1067}
1068
1069/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
1070static inline int tcp_skb_mss(const struct sk_buff *skb)
1071{
1072	return TCP_SKB_CB(skb)->tcp_gso_size;
1073}
1074
1075static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
1076{
1077	return likely(!TCP_SKB_CB(skb)->eor);
1078}
1079
1080static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
1081					const struct sk_buff *from)
1082{
1083	/* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
1084	return likely(tcp_skb_can_collapse_to(to) &&
1085		      mptcp_skb_can_collapse(to, from) &&
1086		      skb_pure_zcopy_same(to, from) &&
1087		      skb_frags_readable(to) == skb_frags_readable(from));
1088}
1089
1090static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
1091					   const struct sk_buff *from)
1092{
1093	return likely(mptcp_skb_can_collapse(to, from) &&
1094		      !skb_cmp_decrypted(to, from));
1095}
1096
1097/* Events passed to congestion control interface */
1098enum tcp_ca_event {
1099	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
1100	CA_EVENT_CWND_RESTART,	/* congestion window restart */
1101	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
1102	CA_EVENT_LOSS,		/* loss timeout */
1103	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
1104	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
 
 
1105};
1106
1107/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1108enum tcp_ca_ack_event_flags {
1109	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
1110	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
1111	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
1112};
1113
1114/*
1115 * Interface for adding new TCP congestion control handlers
1116 */
1117#define TCP_CA_NAME_MAX	16
1118#define TCP_CA_MAX	128
1119#define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
1120
1121#define TCP_CA_UNSPEC	0
1122
1123/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1124#define TCP_CONG_NON_RESTRICTED 0x1
1125/* Requires ECN/ECT set on all packets */
1126#define TCP_CONG_NEEDS_ECN	0x2
1127#define TCP_CONG_MASK	(TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1128
1129union tcp_cc_info;
1130
1131struct ack_sample {
1132	u32 pkts_acked;
1133	s32 rtt_us;
1134	u32 in_flight;
1135};
1136
1137/* A rate sample measures the number of (original/retransmitted) data
1138 * packets delivered "delivered" over an interval of time "interval_us".
1139 * The tcp_rate.c code fills in the rate sample, and congestion
1140 * control modules that define a cong_control function to run at the end
1141 * of ACK processing can optionally chose to consult this sample when
1142 * setting cwnd and pacing rate.
1143 * A sample is invalid if "delivered" or "interval_us" is negative.
1144 */
1145struct rate_sample {
1146	u64  prior_mstamp; /* starting timestamp for interval */
1147	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
1148	u32  prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
1149	s32  delivered;		/* number of packets delivered over interval */
1150	s32  delivered_ce;	/* number of packets delivered w/ CE marks*/
1151	long interval_us;	/* time for tp->delivered to incr "delivered" */
1152	u32 snd_interval_us;	/* snd interval for delivered packets */
1153	u32 rcv_interval_us;	/* rcv interval for delivered packets */
1154	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
1155	int  losses;		/* number of packets marked lost upon ACK */
1156	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
1157	u32  prior_in_flight;	/* in flight before this ACK */
1158	u32  last_end_seq;	/* end_seq of most recently ACKed packet */
1159	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
1160	bool is_retrans;	/* is sample from retransmission? */
1161	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
1162};
1163
1164struct tcp_congestion_ops {
1165/* fast path fields are put first to fill one cache line */
 
 
 
 
 
 
 
1166
1167	/* return slow start threshold (required) */
1168	u32 (*ssthresh)(struct sock *sk);
1169
1170	/* do new cwnd calculation (required) */
1171	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1172
1173	/* call before changing ca_state (optional) */
1174	void (*set_state)(struct sock *sk, u8 new_state);
1175
1176	/* call when cwnd event occurs (optional) */
1177	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1178
1179	/* call when ack arrives (optional) */
1180	void (*in_ack_event)(struct sock *sk, u32 flags);
1181
 
1182	/* hook for packet ack accounting (optional) */
1183	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1184
1185	/* override sysctl_tcp_min_tso_segs */
1186	u32 (*min_tso_segs)(struct sock *sk);
1187
 
1188	/* call when packets are delivered to update cwnd and pacing rate,
1189	 * after all the ca_state processing. (optional)
1190	 */
1191	void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
1192
1193
1194	/* new value of cwnd after loss (required) */
1195	u32  (*undo_cwnd)(struct sock *sk);
1196	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
1197	u32 (*sndbuf_expand)(struct sock *sk);
1198
1199/* control/slow paths put last */
1200	/* get info for inet_diag (optional) */
1201	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1202			   union tcp_cc_info *info);
1203
1204	char 			name[TCP_CA_NAME_MAX];
1205	struct module		*owner;
1206	struct list_head	list;
1207	u32			key;
1208	u32			flags;
1209
1210	/* initialize private data (optional) */
1211	void (*init)(struct sock *sk);
1212	/* cleanup private data  (optional) */
1213	void (*release)(struct sock *sk);
1214} ____cacheline_aligned_in_smp;
1215
1216int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1217void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1218int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1219				  struct tcp_congestion_ops *old_type);
1220int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
1221
1222void tcp_assign_congestion_control(struct sock *sk);
1223void tcp_init_congestion_control(struct sock *sk);
1224void tcp_cleanup_congestion_control(struct sock *sk);
1225int tcp_set_default_congestion_control(struct net *net, const char *name);
1226void tcp_get_default_congestion_control(struct net *net, char *name);
1227void tcp_get_available_congestion_control(char *buf, size_t len);
1228void tcp_get_allowed_congestion_control(char *buf, size_t len);
1229int tcp_set_allowed_congestion_control(char *allowed);
1230int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1231			       bool cap_net_admin);
1232u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1233void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1234
1235u32 tcp_reno_ssthresh(struct sock *sk);
1236u32 tcp_reno_undo_cwnd(struct sock *sk);
1237void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1238extern struct tcp_congestion_ops tcp_reno;
1239
1240struct tcp_congestion_ops *tcp_ca_find(const char *name);
1241struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1242u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
1243#ifdef CONFIG_INET
1244char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1245#else
1246static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1247{
1248	return NULL;
1249}
1250#endif
1251
1252static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1253{
1254	const struct inet_connection_sock *icsk = inet_csk(sk);
1255
1256	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1257}
1258
 
 
 
 
 
 
 
 
 
1259static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1260{
1261	const struct inet_connection_sock *icsk = inet_csk(sk);
1262
1263	if (icsk->icsk_ca_ops->cwnd_event)
1264		icsk->icsk_ca_ops->cwnd_event(sk, event);
1265}
1266
1267/* From tcp_cong.c */
1268void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1269
1270/* From tcp_rate.c */
1271void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1272void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1273			    struct rate_sample *rs);
1274void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1275		  bool is_sack_reneg, struct rate_sample *rs);
1276void tcp_rate_check_app_limited(struct sock *sk);
1277
1278static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1279{
1280	return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1281}
1282
1283/* These functions determine how the current flow behaves in respect of SACK
1284 * handling. SACK is negotiated with the peer, and therefore it can vary
1285 * between different flows.
1286 *
1287 * tcp_is_sack - SACK enabled
1288 * tcp_is_reno - No SACK
1289 */
1290static inline int tcp_is_sack(const struct tcp_sock *tp)
1291{
1292	return likely(tp->rx_opt.sack_ok);
1293}
1294
1295static inline bool tcp_is_reno(const struct tcp_sock *tp)
1296{
1297	return !tcp_is_sack(tp);
1298}
1299
1300static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1301{
1302	return tp->sacked_out + tp->lost_out;
1303}
1304
1305/* This determines how many packets are "in the network" to the best
1306 * of our knowledge.  In many cases it is conservative, but where
1307 * detailed information is available from the receiver (via SACK
1308 * blocks etc.) we can make more aggressive calculations.
1309 *
1310 * Use this for decisions involving congestion control, use just
1311 * tp->packets_out to determine if the send queue is empty or not.
1312 *
1313 * Read this equation as:
1314 *
1315 *	"Packets sent once on transmission queue" MINUS
1316 *	"Packets left network, but not honestly ACKed yet" PLUS
1317 *	"Packets fast retransmitted"
1318 */
1319static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1320{
1321	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1322}
1323
1324#define TCP_INFINITE_SSTHRESH	0x7fffffff
1325
1326static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1327{
1328	return tp->snd_cwnd;
1329}
1330
1331static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1332{
1333	WARN_ON_ONCE((int)val <= 0);
1334	tp->snd_cwnd = val;
1335}
1336
1337static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1338{
1339	return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1340}
1341
1342static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1343{
1344	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1345}
1346
1347static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1348{
1349	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1350	       (1 << inet_csk(sk)->icsk_ca_state);
1351}
1352
1353/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1354 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1355 * ssthresh.
1356 */
1357static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1358{
1359	const struct tcp_sock *tp = tcp_sk(sk);
1360
1361	if (tcp_in_cwnd_reduction(sk))
1362		return tp->snd_ssthresh;
1363	else
1364		return max(tp->snd_ssthresh,
1365			   ((tcp_snd_cwnd(tp) >> 1) +
1366			    (tcp_snd_cwnd(tp) >> 2)));
1367}
1368
1369/* Use define here intentionally to get WARN_ON location shown at the caller */
1370#define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1371
1372void tcp_enter_cwr(struct sock *sk);
1373__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1374
1375/* The maximum number of MSS of available cwnd for which TSO defers
1376 * sending if not using sysctl_tcp_tso_win_divisor.
1377 */
1378static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1379{
1380	return 3;
1381}
1382
1383/* Returns end sequence number of the receiver's advertised window */
1384static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1385{
1386	return tp->snd_una + tp->snd_wnd;
1387}
1388
1389/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1390 * flexible approach. The RFC suggests cwnd should not be raised unless
1391 * it was fully used previously. And that's exactly what we do in
1392 * congestion avoidance mode. But in slow start we allow cwnd to grow
1393 * as long as the application has used half the cwnd.
1394 * Example :
1395 *    cwnd is 10 (IW10), but application sends 9 frames.
1396 *    We allow cwnd to reach 18 when all frames are ACKed.
1397 * This check is safe because it's as aggressive as slow start which already
1398 * risks 100% overshoot. The advantage is that we discourage application to
1399 * either send more filler packets or data to artificially blow up the cwnd
1400 * usage, and allow application-limited process to probe bw more aggressively.
1401 */
1402static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1403{
1404	const struct tcp_sock *tp = tcp_sk(sk);
1405
1406	if (tp->is_cwnd_limited)
1407		return true;
1408
1409	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1410	if (tcp_in_slow_start(tp))
1411		return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1412
1413	return false;
1414}
1415
1416/* BBR congestion control needs pacing.
1417 * Same remark for SO_MAX_PACING_RATE.
1418 * sch_fq packet scheduler is efficiently handling pacing,
1419 * but is not always installed/used.
1420 * Return true if TCP stack should pace packets itself.
1421 */
1422static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1423{
1424	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1425}
1426
1427/* Estimates in how many jiffies next packet for this flow can be sent.
1428 * Scheduling a retransmit timer too early would be silly.
1429 */
1430static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1431{
1432	s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1433
1434	return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1435}
1436
1437static inline void tcp_reset_xmit_timer(struct sock *sk,
1438					const int what,
1439					unsigned long when,
1440					const unsigned long max_when)
1441{
1442	inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1443				  max_when);
1444}
1445
1446/* Something is really bad, we could not queue an additional packet,
1447 * because qdisc is full or receiver sent a 0 window, or we are paced.
1448 * We do not want to add fuel to the fire, or abort too early,
1449 * so make sure the timer we arm now is at least 200ms in the future,
1450 * regardless of current icsk_rto value (as it could be ~2ms)
1451 */
1452static inline unsigned long tcp_probe0_base(const struct sock *sk)
1453{
1454	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1455}
1456
1457/* Variant of inet_csk_rto_backoff() used for zero window probes */
1458static inline unsigned long tcp_probe0_when(const struct sock *sk,
1459					    unsigned long max_when)
1460{
1461	u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1462			   inet_csk(sk)->icsk_backoff);
1463	u64 when = (u64)tcp_probe0_base(sk) << backoff;
1464
1465	return (unsigned long)min_t(u64, when, max_when);
1466}
1467
1468static inline void tcp_check_probe_timer(struct sock *sk)
1469{
1470	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1471		tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1472				     tcp_probe0_base(sk), TCP_RTO_MAX);
1473}
1474
1475static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1476{
1477	tp->snd_wl1 = seq;
1478}
1479
1480static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1481{
1482	tp->snd_wl1 = seq;
1483}
1484
1485/*
1486 * Calculate(/check) TCP checksum
1487 */
1488static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1489				   __be32 daddr, __wsum base)
1490{
1491	return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
 
 
 
 
 
1492}
1493
1494static inline bool tcp_checksum_complete(struct sk_buff *skb)
1495{
1496	return !skb_csum_unnecessary(skb) &&
1497		__skb_checksum_complete(skb);
1498}
1499
1500bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1501		     enum skb_drop_reason *reason);
1502
 
1503
1504int tcp_filter(struct sock *sk, struct sk_buff *skb);
 
 
 
 
 
 
1505void tcp_set_state(struct sock *sk, int state);
 
1506void tcp_done(struct sock *sk);
 
1507int tcp_abort(struct sock *sk, int err);
1508
1509static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1510{
1511	rx_opt->dsack = 0;
1512	rx_opt->num_sacks = 0;
1513}
1514
 
1515void tcp_cwnd_restart(struct sock *sk, s32 delta);
1516
1517static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1518{
1519	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1520	struct tcp_sock *tp = tcp_sk(sk);
1521	s32 delta;
1522
1523	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1524	    tp->packets_out || ca_ops->cong_control)
1525		return;
1526	delta = tcp_jiffies32 - tp->lsndtime;
1527	if (delta > inet_csk(sk)->icsk_rto)
1528		tcp_cwnd_restart(sk, delta);
1529}
1530
1531/* Determine a window scaling and initial window to offer. */
1532void tcp_select_initial_window(const struct sock *sk, int __space,
1533			       __u32 mss, __u32 *rcv_wnd,
1534			       __u32 *window_clamp, int wscale_ok,
1535			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1536
1537static inline int __tcp_win_from_space(u8 scaling_ratio, int space)
1538{
1539	s64 scaled_space = (s64)space * scaling_ratio;
1540
1541	return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
1542}
1543
1544static inline int tcp_win_from_space(const struct sock *sk, int space)
1545{
1546	return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space);
1547}
1548
1549/* inverse of __tcp_win_from_space() */
1550static inline int __tcp_space_from_win(u8 scaling_ratio, int win)
1551{
1552	u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
1553
1554	do_div(val, scaling_ratio);
1555	return val;
1556}
1557
1558static inline int tcp_space_from_win(const struct sock *sk, int win)
1559{
1560	return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
1561}
1562
1563/* Assume a 50% default for skb->len/skb->truesize ratio.
1564 * This may be adjusted later in tcp_measure_rcv_mss().
1565 */
1566#define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
1567
1568static inline void tcp_scaling_ratio_init(struct sock *sk)
1569{
1570	tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
1571}
1572
1573/* Note: caller must be prepared to deal with negative returns */
1574static inline int tcp_space(const struct sock *sk)
1575{
1576	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1577				  READ_ONCE(sk->sk_backlog.len) -
1578				  atomic_read(&sk->sk_rmem_alloc));
1579}
1580
1581static inline int tcp_full_space(const struct sock *sk)
1582{
1583	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1584}
1585
1586static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
1587{
1588	int unused_mem = sk_unused_reserved_mem(sk);
1589	struct tcp_sock *tp = tcp_sk(sk);
1590
1591	tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
1592	if (unused_mem)
1593		tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1594					 tcp_win_from_space(sk, unused_mem));
1595}
1596
1597static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1598{
1599	__tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
1600}
1601
1602void tcp_cleanup_rbuf(struct sock *sk, int copied);
1603void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1604
1605
1606/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1607 * If 87.5 % (7/8) of the space has been consumed, we want to override
1608 * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1609 * len/truesize ratio.
1610 */
1611static inline bool tcp_rmem_pressure(const struct sock *sk)
1612{
1613	int rcvbuf, threshold;
1614
1615	if (tcp_under_memory_pressure(sk))
1616		return true;
1617
1618	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1619	threshold = rcvbuf - (rcvbuf >> 3);
1620
1621	return atomic_read(&sk->sk_rmem_alloc) > threshold;
1622}
1623
1624static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1625{
1626	const struct tcp_sock *tp = tcp_sk(sk);
1627	int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1628
1629	if (avail <= 0)
1630		return false;
1631
1632	return (avail >= target) || tcp_rmem_pressure(sk) ||
1633	       (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1634}
1635
1636extern void tcp_openreq_init_rwin(struct request_sock *req,
1637				  const struct sock *sk_listener,
1638				  const struct dst_entry *dst);
1639
1640void tcp_enter_memory_pressure(struct sock *sk);
1641void tcp_leave_memory_pressure(struct sock *sk);
1642
1643static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1644{
1645	struct net *net = sock_net((struct sock *)tp);
1646	int val;
1647
1648	/* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1649	 * and do_tcp_setsockopt().
1650	 */
1651	val = READ_ONCE(tp->keepalive_intvl);
1652
1653	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1654}
1655
1656static inline int keepalive_time_when(const struct tcp_sock *tp)
1657{
1658	struct net *net = sock_net((struct sock *)tp);
1659	int val;
1660
1661	/* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1662	val = READ_ONCE(tp->keepalive_time);
1663
1664	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1665}
1666
1667static inline int keepalive_probes(const struct tcp_sock *tp)
1668{
1669	struct net *net = sock_net((struct sock *)tp);
1670	int val;
1671
1672	/* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1673	 * and do_tcp_setsockopt().
1674	 */
1675	val = READ_ONCE(tp->keepalive_probes);
1676
1677	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1678}
1679
1680static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1681{
1682	const struct inet_connection_sock *icsk = &tp->inet_conn;
1683
1684	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1685			  tcp_jiffies32 - tp->rcv_tstamp);
1686}
1687
1688static inline int tcp_fin_time(const struct sock *sk)
1689{
1690	int fin_timeout = tcp_sk(sk)->linger2 ? :
1691		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1692	const int rto = inet_csk(sk)->icsk_rto;
1693
1694	if (fin_timeout < (rto << 2) - (rto >> 1))
1695		fin_timeout = (rto << 2) - (rto >> 1);
1696
1697	return fin_timeout;
1698}
1699
1700static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1701				  int paws_win)
1702{
1703	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1704		return true;
1705	if (unlikely(!time_before32(ktime_get_seconds(),
1706				    rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
1707		return true;
1708	/*
1709	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1710	 * then following tcp messages have valid values. Ignore 0 value,
1711	 * or else 'negative' tsval might forbid us to accept their packets.
1712	 */
1713	if (!rx_opt->ts_recent)
1714		return true;
1715	return false;
1716}
1717
1718static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1719				   int rst)
1720{
1721	if (tcp_paws_check(rx_opt, 0))
1722		return false;
1723
1724	/* RST segments are not recommended to carry timestamp,
1725	   and, if they do, it is recommended to ignore PAWS because
1726	   "their cleanup function should take precedence over timestamps."
1727	   Certainly, it is mistake. It is necessary to understand the reasons
1728	   of this constraint to relax it: if peer reboots, clock may go
1729	   out-of-sync and half-open connections will not be reset.
1730	   Actually, the problem would be not existing if all
1731	   the implementations followed draft about maintaining clock
1732	   via reboots. Linux-2.2 DOES NOT!
1733
1734	   However, we can relax time bounds for RST segments to MSL.
1735	 */
1736	if (rst && !time_before32(ktime_get_seconds(),
1737				  rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1738		return false;
1739	return true;
1740}
1741
1742bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1743			  int mib_idx, u32 *last_oow_ack_time);
1744
1745static inline void tcp_mib_init(struct net *net)
1746{
1747	/* See RFC 2012 */
1748	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1749	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1750	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1751	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1752}
1753
1754/* from STCP */
1755static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1756{
1757	tp->lost_skb_hint = NULL;
1758}
1759
1760static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1761{
1762	tcp_clear_retrans_hints_partial(tp);
1763	tp->retransmit_skb_hint = NULL;
1764}
1765
1766#define tcp_md5_addr tcp_ao_addr
 
 
 
 
 
1767
1768/* - key database */
1769struct tcp_md5sig_key {
1770	struct hlist_node	node;
1771	u8			keylen;
1772	u8			family; /* AF_INET or AF_INET6 */
1773	u8			prefixlen;
1774	u8			flags;
1775	union tcp_md5_addr	addr;
1776	int			l3index; /* set if key added with L3 scope */
1777	u8			key[TCP_MD5SIG_MAXKEYLEN];
1778	struct rcu_head		rcu;
1779};
1780
1781/* - sock block */
1782struct tcp_md5sig_info {
1783	struct hlist_head	head;
1784	struct rcu_head		rcu;
1785};
1786
1787/* - pseudo header */
1788struct tcp4_pseudohdr {
1789	__be32		saddr;
1790	__be32		daddr;
1791	__u8		pad;
1792	__u8		protocol;
1793	__be16		len;
1794};
1795
1796struct tcp6_pseudohdr {
1797	struct in6_addr	saddr;
1798	struct in6_addr daddr;
1799	__be32		len;
1800	__be32		protocol;	/* including padding */
1801};
1802
1803union tcp_md5sum_block {
1804	struct tcp4_pseudohdr ip4;
1805#if IS_ENABLED(CONFIG_IPV6)
1806	struct tcp6_pseudohdr ip6;
1807#endif
1808};
1809
1810/*
1811 * struct tcp_sigpool - per-CPU pool of ahash_requests
1812 * @scratch: per-CPU temporary area, that can be used between
1813 *	     tcp_sigpool_start() and tcp_sigpool_end() to perform
1814 *	     crypto request
1815 * @req: pre-allocated ahash request
1816 */
1817struct tcp_sigpool {
1818	void *scratch;
1819	struct ahash_request *req;
1820};
1821
1822int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size);
1823void tcp_sigpool_get(unsigned int id);
1824void tcp_sigpool_release(unsigned int id);
1825int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
1826			      const struct sk_buff *skb,
1827			      unsigned int header_len);
1828
1829/**
1830 * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash
1831 * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
1832 * @c: returned tcp_sigpool for usage (uninitialized on failure)
1833 *
1834 * Returns 0 on success, error otherwise.
1835 */
1836int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
1837/**
1838 * tcp_sigpool_end - enable bh and stop using tcp_sigpool
1839 * @c: tcp_sigpool context that was returned by tcp_sigpool_start()
1840 */
1841void tcp_sigpool_end(struct tcp_sigpool *c);
1842size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
1843/* - functions */
1844int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1845			const struct sock *sk, const struct sk_buff *skb);
1846int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1847		   int family, u8 prefixlen, int l3index, u8 flags,
1848		   const u8 *newkey, u8 newkeylen);
1849int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1850		     int family, u8 prefixlen, int l3index,
1851		     struct tcp_md5sig_key *key);
1852
1853int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1854		   int family, u8 prefixlen, int l3index, u8 flags);
1855void tcp_clear_md5_list(struct sock *sk);
1856struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1857					 const struct sock *addr_sk);
1858
1859#ifdef CONFIG_TCP_MD5SIG
1860struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1861					   const union tcp_md5_addr *addr,
1862					   int family, bool any_l3index);
1863static inline struct tcp_md5sig_key *
1864tcp_md5_do_lookup(const struct sock *sk, int l3index,
1865		  const union tcp_md5_addr *addr, int family)
1866{
1867	if (!static_branch_unlikely(&tcp_md5_needed.key))
1868		return NULL;
1869	return __tcp_md5_do_lookup(sk, l3index, addr, family, false);
1870}
1871
1872static inline struct tcp_md5sig_key *
1873tcp_md5_do_lookup_any_l3index(const struct sock *sk,
1874			      const union tcp_md5_addr *addr, int family)
1875{
1876	if (!static_branch_unlikely(&tcp_md5_needed.key))
1877		return NULL;
1878	return __tcp_md5_do_lookup(sk, 0, addr, family, true);
1879}
1880
1881#define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1882#else
1883static inline struct tcp_md5sig_key *
1884tcp_md5_do_lookup(const struct sock *sk, int l3index,
1885		  const union tcp_md5_addr *addr, int family)
1886{
1887	return NULL;
1888}
1889
1890static inline struct tcp_md5sig_key *
1891tcp_md5_do_lookup_any_l3index(const struct sock *sk,
1892			      const union tcp_md5_addr *addr, int family)
1893{
1894	return NULL;
1895}
1896
1897#define tcp_twsk_md5_key(twsk)	NULL
1898#endif
1899
1900int tcp_md5_alloc_sigpool(void);
1901void tcp_md5_release_sigpool(void);
1902void tcp_md5_add_sigpool(void);
1903extern int tcp_md5_sigpool_id;
1904
1905int tcp_md5_hash_key(struct tcp_sigpool *hp,
 
 
 
 
 
 
 
 
1906		     const struct tcp_md5sig_key *key);
1907
1908/* From tcp_fastopen.c */
1909void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1910			    struct tcp_fastopen_cookie *cookie);
1911void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1912			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
1913			    u16 try_exp);
1914struct tcp_fastopen_request {
1915	/* Fast Open cookie. Size 0 means a cookie request */
1916	struct tcp_fastopen_cookie	cookie;
1917	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1918	size_t				size;
1919	int				copied;	/* queued in tcp_connect() */
1920	struct ubuf_info		*uarg;
1921};
1922void tcp_free_fastopen_req(struct tcp_sock *tp);
1923void tcp_fastopen_destroy_cipher(struct sock *sk);
1924void tcp_fastopen_ctx_destroy(struct net *net);
1925int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1926			      void *primary_key, void *backup_key);
1927int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1928			    u64 *key);
1929void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1930struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1931			      struct request_sock *req,
1932			      struct tcp_fastopen_cookie *foc,
1933			      const struct dst_entry *dst);
1934void tcp_fastopen_init_key_once(struct net *net);
1935bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1936			     struct tcp_fastopen_cookie *cookie);
1937bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1938#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1939#define TCP_FASTOPEN_KEY_MAX 2
1940#define TCP_FASTOPEN_KEY_BUF_LENGTH \
1941	(TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1942
1943/* Fastopen key context */
1944struct tcp_fastopen_context {
1945	siphash_key_t	key[TCP_FASTOPEN_KEY_MAX];
1946	int		num;
1947	struct rcu_head	rcu;
1948};
1949
 
1950void tcp_fastopen_active_disable(struct sock *sk);
1951bool tcp_fastopen_active_should_disable(struct sock *sk);
1952void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1953void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1954
1955/* Caller needs to wrap with rcu_read_(un)lock() */
1956static inline
1957struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1958{
1959	struct tcp_fastopen_context *ctx;
1960
1961	ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1962	if (!ctx)
1963		ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1964	return ctx;
1965}
1966
1967static inline
1968bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1969			       const struct tcp_fastopen_cookie *orig)
1970{
1971	if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1972	    orig->len == foc->len &&
1973	    !memcmp(orig->val, foc->val, foc->len))
1974		return true;
1975	return false;
1976}
1977
1978static inline
1979int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1980{
1981	return ctx->num;
1982}
1983
1984/* Latencies incurred by various limits for a sender. They are
1985 * chronograph-like stats that are mutually exclusive.
1986 */
1987enum tcp_chrono {
1988	TCP_CHRONO_UNSPEC,
1989	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1990	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1991	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1992	__TCP_CHRONO_MAX,
1993};
1994
1995void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1996void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1997
1998/* This helper is needed, because skb->tcp_tsorted_anchor uses
1999 * the same memory storage than skb->destructor/_skb_refdst
2000 */
2001static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
2002{
2003	skb->destructor = NULL;
2004	skb->_skb_refdst = 0UL;
2005}
2006
2007#define tcp_skb_tsorted_save(skb) {		\
2008	unsigned long _save = skb->_skb_refdst;	\
2009	skb->_skb_refdst = 0UL;
2010
2011#define tcp_skb_tsorted_restore(skb)		\
2012	skb->_skb_refdst = _save;		\
2013}
2014
2015void tcp_write_queue_purge(struct sock *sk);
2016
2017static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
2018{
2019	return skb_rb_first(&sk->tcp_rtx_queue);
2020}
2021
2022static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
2023{
2024	return skb_rb_last(&sk->tcp_rtx_queue);
2025}
2026
2027static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
2028{
2029	return skb_peek_tail(&sk->sk_write_queue);
2030}
2031
2032#define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
2033	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
2034
2035static inline struct sk_buff *tcp_send_head(const struct sock *sk)
2036{
2037	return skb_peek(&sk->sk_write_queue);
2038}
2039
2040static inline bool tcp_skb_is_last(const struct sock *sk,
2041				   const struct sk_buff *skb)
2042{
2043	return skb_queue_is_last(&sk->sk_write_queue, skb);
2044}
2045
2046/**
2047 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
2048 * @sk: socket
2049 *
2050 * Since the write queue can have a temporary empty skb in it,
2051 * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
2052 */
2053static inline bool tcp_write_queue_empty(const struct sock *sk)
2054{
2055	const struct tcp_sock *tp = tcp_sk(sk);
2056
2057	return tp->write_seq == tp->snd_nxt;
2058}
2059
2060static inline bool tcp_rtx_queue_empty(const struct sock *sk)
2061{
2062	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
2063}
2064
2065static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
2066{
2067	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
2068}
2069
2070static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
 
 
 
 
 
 
2071{
2072	__skb_queue_tail(&sk->sk_write_queue, skb);
 
 
 
 
 
2073
2074	/* Queue it, remembering where we must start sending. */
2075	if (sk->sk_write_queue.next == skb)
2076		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
2077}
2078
2079/* Insert new before skb on the write queue of sk.  */
2080static inline void tcp_insert_write_queue_before(struct sk_buff *new,
2081						  struct sk_buff *skb,
2082						  struct sock *sk)
2083{
2084	__skb_queue_before(&sk->sk_write_queue, skb, new);
2085}
2086
2087static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
2088{
2089	tcp_skb_tsorted_anchor_cleanup(skb);
2090	__skb_unlink(skb, &sk->sk_write_queue);
2091}
2092
2093void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
2094
2095static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
2096{
2097	tcp_skb_tsorted_anchor_cleanup(skb);
2098	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
2099}
2100
2101static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
2102{
2103	list_del(&skb->tcp_tsorted_anchor);
2104	tcp_rtx_queue_unlink(skb, sk);
2105	tcp_wmem_free_skb(sk, skb);
2106}
2107
2108static inline void tcp_write_collapse_fence(struct sock *sk)
2109{
2110	struct sk_buff *skb = tcp_write_queue_tail(sk);
2111
2112	if (skb)
2113		TCP_SKB_CB(skb)->eor = 1;
2114}
2115
2116static inline void tcp_push_pending_frames(struct sock *sk)
2117{
2118	if (tcp_send_head(sk)) {
2119		struct tcp_sock *tp = tcp_sk(sk);
2120
2121		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
2122	}
2123}
2124
2125/* Start sequence of the skb just after the highest skb with SACKed
2126 * bit, valid only if sacked_out > 0 or when the caller has ensured
2127 * validity by itself.
2128 */
2129static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
2130{
2131	if (!tp->sacked_out)
2132		return tp->snd_una;
2133
2134	if (tp->highest_sack == NULL)
2135		return tp->snd_nxt;
2136
2137	return TCP_SKB_CB(tp->highest_sack)->seq;
2138}
2139
2140static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
2141{
2142	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
2143}
2144
2145static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
2146{
2147	return tcp_sk(sk)->highest_sack;
2148}
2149
2150static inline void tcp_highest_sack_reset(struct sock *sk)
2151{
2152	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
2153}
2154
2155/* Called when old skb is about to be deleted and replaced by new skb */
2156static inline void tcp_highest_sack_replace(struct sock *sk,
2157					    struct sk_buff *old,
2158					    struct sk_buff *new)
2159{
2160	if (old == tcp_highest_sack(sk))
2161		tcp_sk(sk)->highest_sack = new;
2162}
2163
2164/* This helper checks if socket has IP_TRANSPARENT set */
2165static inline bool inet_sk_transparent(const struct sock *sk)
2166{
2167	switch (sk->sk_state) {
2168	case TCP_TIME_WAIT:
2169		return inet_twsk(sk)->tw_transparent;
2170	case TCP_NEW_SYN_RECV:
2171		return inet_rsk(inet_reqsk(sk))->no_srccheck;
2172	}
2173	return inet_test_bit(TRANSPARENT, sk);
2174}
2175
2176/* Determines whether this is a thin stream (which may suffer from
2177 * increased latency). Used to trigger latency-reducing mechanisms.
2178 */
2179static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2180{
2181	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2182}
2183
2184/* /proc */
2185enum tcp_seq_states {
2186	TCP_SEQ_STATE_LISTENING,
2187	TCP_SEQ_STATE_ESTABLISHED,
2188};
2189
2190void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2191void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2192void tcp_seq_stop(struct seq_file *seq, void *v);
2193
2194struct tcp_seq_afinfo {
 
2195	sa_family_t			family;
 
 
2196};
2197
2198struct tcp_iter_state {
2199	struct seq_net_private	p;
 
2200	enum tcp_seq_states	state;
2201	struct sock		*syn_wait_sk;
2202	int			bucket, offset, sbucket, num;
2203	loff_t			last_pos;
2204};
2205
 
 
 
2206extern struct request_sock_ops tcp_request_sock_ops;
2207extern struct request_sock_ops tcp6_request_sock_ops;
2208
2209void tcp_v4_destroy_sock(struct sock *sk);
2210
2211struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2212				netdev_features_t features);
2213struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb);
2214struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
2215struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
2216				struct tcphdr *th);
2217INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2218INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2219INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
2220INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
2221#ifdef CONFIG_INET
2222void tcp_gro_complete(struct sk_buff *skb);
2223#else
2224static inline void tcp_gro_complete(struct sk_buff *skb) { }
2225#endif
2226
2227void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
2228
2229static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2230{
2231	struct net *net = sock_net((struct sock *)tp);
2232	u32 val;
 
2233
2234	val = READ_ONCE(tp->notsent_lowat);
 
 
 
2235
2236	return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2237}
2238
2239bool tcp_stream_memory_free(const struct sock *sk, int wake);
2240
2241#ifdef CONFIG_PROC_FS
2242int tcp4_proc_init(void);
2243void tcp4_proc_exit(void);
2244#endif
2245
2246int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2247int tcp_conn_request(struct request_sock_ops *rsk_ops,
2248		     const struct tcp_request_sock_ops *af_ops,
2249		     struct sock *sk, struct sk_buff *skb);
2250
2251/* TCP af-specific functions */
2252struct tcp_sock_af_ops {
2253#ifdef CONFIG_TCP_MD5SIG
2254	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
2255						const struct sock *addr_sk);
2256	int		(*calc_md5_hash)(char *location,
2257					 const struct tcp_md5sig_key *md5,
2258					 const struct sock *sk,
2259					 const struct sk_buff *skb);
2260	int		(*md5_parse)(struct sock *sk,
2261				     int optname,
2262				     sockptr_t optval,
2263				     int optlen);
2264#endif
2265#ifdef CONFIG_TCP_AO
2266	int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
2267	struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2268					struct sock *addr_sk,
2269					int sndid, int rcvid);
2270	int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key,
2271			      const struct sock *sk,
2272			      __be32 sisn, __be32 disn, bool send);
2273	int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao,
2274			    const struct sock *sk, const struct sk_buff *skb,
2275			    const u8 *tkey, int hash_offset, u32 sne);
2276#endif
2277};
2278
2279struct tcp_request_sock_ops {
2280	u16 mss_clamp;
2281#ifdef CONFIG_TCP_MD5SIG
2282	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2283						 const struct sock *addr_sk);
2284	int		(*calc_md5_hash) (char *location,
2285					  const struct tcp_md5sig_key *md5,
2286					  const struct sock *sk,
2287					  const struct sk_buff *skb);
2288#endif
2289#ifdef CONFIG_TCP_AO
2290	struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2291					struct request_sock *req,
2292					int sndid, int rcvid);
2293	int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
2294	int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt,
2295			      struct request_sock *req, const struct sk_buff *skb,
2296			      int hash_offset, u32 sne);
2297#endif
2298#ifdef CONFIG_SYN_COOKIES
2299	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
2300				 __u16 *mss);
2301#endif
2302	struct dst_entry *(*route_req)(const struct sock *sk,
2303				       struct sk_buff *skb,
2304				       struct flowi *fl,
2305				       struct request_sock *req,
2306				       u32 tw_isn);
2307	u32 (*init_seq)(const struct sk_buff *skb);
2308	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2309	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2310			   struct flowi *fl, struct request_sock *req,
2311			   struct tcp_fastopen_cookie *foc,
2312			   enum tcp_synack_type synack_type,
2313			   struct sk_buff *syn_skb);
2314};
2315
2316extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2317#if IS_ENABLED(CONFIG_IPV6)
2318extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2319#endif
2320
2321#ifdef CONFIG_SYN_COOKIES
2322static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2323					 const struct sock *sk, struct sk_buff *skb,
2324					 __u16 *mss)
2325{
2326	tcp_synq_overflow(sk);
2327	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2328	return ops->cookie_init_seq(skb, mss);
2329}
2330#else
2331static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2332					 const struct sock *sk, struct sk_buff *skb,
2333					 __u16 *mss)
2334{
2335	return 0;
2336}
2337#endif
2338
2339struct tcp_key {
2340	union {
2341		struct {
2342			struct tcp_ao_key *ao_key;
2343			char *traffic_key;
2344			u32 sne;
2345			u8 rcv_next;
2346		};
2347		struct tcp_md5sig_key *md5_key;
2348	};
2349	enum {
2350		TCP_KEY_NONE = 0,
2351		TCP_KEY_MD5,
2352		TCP_KEY_AO,
2353	} type;
2354};
2355
2356static inline void tcp_get_current_key(const struct sock *sk,
2357				       struct tcp_key *out)
2358{
2359#if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
2360	const struct tcp_sock *tp = tcp_sk(sk);
2361#endif
2362
2363#ifdef CONFIG_TCP_AO
2364	if (static_branch_unlikely(&tcp_ao_needed.key)) {
2365		struct tcp_ao_info *ao;
2366
2367		ao = rcu_dereference_protected(tp->ao_info,
2368					       lockdep_sock_is_held(sk));
2369		if (ao) {
2370			out->ao_key = READ_ONCE(ao->current_key);
2371			out->type = TCP_KEY_AO;
2372			return;
2373		}
2374	}
2375#endif
2376#ifdef CONFIG_TCP_MD5SIG
2377	if (static_branch_unlikely(&tcp_md5_needed.key) &&
2378	    rcu_access_pointer(tp->md5sig_info)) {
2379		out->md5_key = tp->af_specific->md5_lookup(sk, sk);
2380		if (out->md5_key) {
2381			out->type = TCP_KEY_MD5;
2382			return;
2383		}
2384	}
2385#endif
2386	out->type = TCP_KEY_NONE;
2387}
2388
2389static inline bool tcp_key_is_md5(const struct tcp_key *key)
2390{
2391	if (static_branch_tcp_md5())
2392		return key->type == TCP_KEY_MD5;
2393	return false;
2394}
2395
2396static inline bool tcp_key_is_ao(const struct tcp_key *key)
2397{
2398	if (static_branch_tcp_ao())
2399		return key->type == TCP_KEY_AO;
2400	return false;
2401}
2402
2403int tcpv4_offload_init(void);
2404
2405void tcp_v4_init(void);
2406void tcp_init(void);
2407
2408/* tcp_recovery.c */
2409void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2410void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2411extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2412				u32 reo_wnd);
2413extern bool tcp_rack_mark_lost(struct sock *sk);
2414extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2415			     u64 xmit_time);
2416extern void tcp_rack_reo_timeout(struct sock *sk);
2417extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2418
2419/* tcp_plb.c */
2420
2421/*
2422 * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2423 * expects cong_ratio which represents fraction of traffic that experienced
2424 * congestion over a single RTT. In order to avoid floating point operations,
2425 * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2426 */
2427#define TCP_PLB_SCALE 8
2428
2429/* State for PLB (Protective Load Balancing) for a single TCP connection. */
2430struct tcp_plb_state {
2431	u8	consec_cong_rounds:5, /* consecutive congested rounds */
2432		unused:3;
2433	u32	pause_until; /* jiffies32 when PLB can resume rerouting */
2434};
2435
2436static inline void tcp_plb_init(const struct sock *sk,
2437				struct tcp_plb_state *plb)
2438{
2439	plb->consec_cong_rounds = 0;
2440	plb->pause_until = 0;
2441}
2442void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2443			  const int cong_ratio);
2444void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2445void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2446
2447static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str)
2448{
2449	WARN_ONCE(cond,
2450		  "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n",
2451		  str,
2452		  tcp_snd_cwnd(tcp_sk(sk)),
2453		  tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
2454		  tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
2455		  tcp_sk(sk)->tlp_high_seq, sk->sk_state,
2456		  inet_csk(sk)->icsk_ca_state,
2457		  tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
2458		  inet_csk(sk)->icsk_pmtu_cookie);
2459}
2460
2461/* At how many usecs into the future should the RTO fire? */
2462static inline s64 tcp_rto_delta_us(const struct sock *sk)
2463{
2464	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2465	u32 rto = inet_csk(sk)->icsk_rto;
 
2466
2467	if (likely(skb)) {
2468		u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2469
2470		return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2471	} else {
2472		tcp_warn_once(sk, 1, "rtx queue empty: ");
2473		return jiffies_to_usecs(rto);
2474	}
2475
2476}
2477
2478/*
2479 * Save and compile IPv4 options, return a pointer to it
2480 */
2481static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2482							 struct sk_buff *skb)
2483{
2484	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2485	struct ip_options_rcu *dopt = NULL;
2486
2487	if (opt->optlen) {
2488		int opt_size = sizeof(*dopt) + opt->optlen;
2489
2490		dopt = kmalloc(opt_size, GFP_ATOMIC);
2491		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2492			kfree(dopt);
2493			dopt = NULL;
2494		}
2495	}
2496	return dopt;
2497}
2498
2499/* locally generated TCP pure ACKs have skb->truesize == 2
2500 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2501 * This is much faster than dissecting the packet to find out.
2502 * (Think of GRE encapsulations, IPv4, IPv6, ...)
2503 */
2504static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2505{
2506	return skb->truesize == 2;
2507}
2508
2509static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2510{
2511	skb->truesize = 2;
2512}
2513
2514static inline int tcp_inq(struct sock *sk)
2515{
2516	struct tcp_sock *tp = tcp_sk(sk);
2517	int answ;
2518
2519	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2520		answ = 0;
2521	} else if (sock_flag(sk, SOCK_URGINLINE) ||
2522		   !tp->urg_data ||
2523		   before(tp->urg_seq, tp->copied_seq) ||
2524		   !before(tp->urg_seq, tp->rcv_nxt)) {
2525
2526		answ = tp->rcv_nxt - tp->copied_seq;
2527
2528		/* Subtract 1, if FIN was received */
2529		if (answ && sock_flag(sk, SOCK_DONE))
2530			answ--;
2531	} else {
2532		answ = tp->urg_seq - tp->copied_seq;
2533	}
2534
2535	return answ;
2536}
2537
2538int tcp_peek_len(struct socket *sock);
2539
2540static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2541{
2542	u16 segs_in;
2543
2544	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2545
2546	/* We update these fields while other threads might
2547	 * read them from tcp_get_info()
2548	 */
2549	WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2550	if (skb->len > tcp_hdrlen(skb))
2551		WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2552}
2553
2554/*
2555 * TCP listen path runs lockless.
2556 * We forced "struct sock" to be const qualified to make sure
2557 * we don't modify one of its field by mistake.
2558 * Here, we increment sk_drops which is an atomic_t, so we can safely
2559 * make sock writable again.
2560 */
2561static inline void tcp_listendrop(const struct sock *sk)
2562{
2563	atomic_inc(&((struct sock *)sk)->sk_drops);
2564	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2565}
2566
2567enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2568
2569/*
2570 * Interface for adding Upper Level Protocols over TCP
2571 */
2572
2573#define TCP_ULP_NAME_MAX	16
2574#define TCP_ULP_MAX		128
2575#define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2576
 
 
 
 
 
2577struct tcp_ulp_ops {
2578	struct list_head	list;
2579
2580	/* initialize ulp */
2581	int (*init)(struct sock *sk);
2582	/* update ulp */
2583	void (*update)(struct sock *sk, struct proto *p,
2584		       void (*write_space)(struct sock *sk));
2585	/* cleanup ulp */
2586	void (*release)(struct sock *sk);
2587	/* diagnostic */
2588	int (*get_info)(struct sock *sk, struct sk_buff *skb);
2589	size_t (*get_info_size)(const struct sock *sk);
2590	/* clone ulp */
2591	void (*clone)(const struct request_sock *req, struct sock *newsk,
2592		      const gfp_t priority);
2593
 
2594	char		name[TCP_ULP_NAME_MAX];
 
2595	struct module	*owner;
2596};
2597int tcp_register_ulp(struct tcp_ulp_ops *type);
2598void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2599int tcp_set_ulp(struct sock *sk, const char *name);
 
2600void tcp_get_available_ulp(char *buf, size_t len);
2601void tcp_cleanup_ulp(struct sock *sk);
2602void tcp_update_ulp(struct sock *sk, struct proto *p,
2603		    void (*write_space)(struct sock *sk));
2604
2605#define MODULE_ALIAS_TCP_ULP(name)				\
2606	__MODULE_INFO(alias, alias_userspace, name);		\
2607	__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2608
2609#ifdef CONFIG_NET_SOCK_MSG
2610struct sk_msg;
2611struct sk_psock;
2612
2613#ifdef CONFIG_BPF_SYSCALL
2614int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2615void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2616#endif /* CONFIG_BPF_SYSCALL */
2617
2618#ifdef CONFIG_INET
2619void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2620#else
2621static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2622{
2623}
2624#endif
2625
2626int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2627			  struct sk_msg *msg, u32 bytes, int flags);
2628#endif /* CONFIG_NET_SOCK_MSG */
2629
2630#if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2631static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2632{
2633}
2634#endif
2635
2636#ifdef CONFIG_CGROUP_BPF
2637static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2638				      struct sk_buff *skb,
2639				      unsigned int end_offset)
2640{
2641	skops->skb = skb;
2642	skops->skb_data_end = skb->data + end_offset;
2643}
2644#else
2645static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2646				      struct sk_buff *skb,
2647				      unsigned int end_offset)
2648{
2649}
2650#endif
2651
2652/* Call BPF_SOCK_OPS program that returns an int. If the return value
2653 * is < 0, then the BPF op failed (for example if the loaded BPF
2654 * program does not support the chosen operation or there is no BPF
2655 * program loaded).
2656 */
2657#ifdef CONFIG_BPF
2658static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2659{
2660	struct bpf_sock_ops_kern sock_ops;
2661	int ret;
2662
2663	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2664	if (sk_fullsock(sk)) {
2665		sock_ops.is_fullsock = 1;
2666		sock_owned_by_me(sk);
2667	}
2668
2669	sock_ops.sk = sk;
2670	sock_ops.op = op;
2671	if (nargs > 0)
2672		memcpy(sock_ops.args, args, nargs * sizeof(*args));
2673
2674	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2675	if (ret == 0)
2676		ret = sock_ops.reply;
2677	else
2678		ret = -1;
2679	return ret;
2680}
2681
2682static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2683{
2684	u32 args[2] = {arg1, arg2};
2685
2686	return tcp_call_bpf(sk, op, 2, args);
2687}
2688
2689static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2690				    u32 arg3)
2691{
2692	u32 args[3] = {arg1, arg2, arg3};
2693
2694	return tcp_call_bpf(sk, op, 3, args);
2695}
2696
2697#else
2698static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2699{
2700	return -EPERM;
2701}
2702
2703static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2704{
2705	return -EPERM;
2706}
2707
2708static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2709				    u32 arg3)
2710{
2711	return -EPERM;
2712}
2713
2714#endif
2715
2716static inline u32 tcp_timeout_init(struct sock *sk)
2717{
2718	int timeout;
2719
2720	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2721
2722	if (timeout <= 0)
2723		timeout = TCP_TIMEOUT_INIT;
2724	return min_t(int, timeout, TCP_RTO_MAX);
2725}
2726
2727static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2728{
2729	int rwnd;
2730
2731	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2732
2733	if (rwnd < 0)
2734		rwnd = 0;
2735	return rwnd;
2736}
2737
2738static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2739{
2740	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2741}
2742
2743static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt)
2744{
2745	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2746		tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt);
2747}
2748
2749#if IS_ENABLED(CONFIG_SMC)
2750extern struct static_key_false tcp_have_smc;
2751#endif
2752
2753#if IS_ENABLED(CONFIG_TLS_DEVICE)
2754void clean_acked_data_enable(struct inet_connection_sock *icsk,
2755			     void (*cad)(struct sock *sk, u32 ack_seq));
2756void clean_acked_data_disable(struct inet_connection_sock *icsk);
2757void clean_acked_data_flush(void);
2758#endif
2759
2760DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2761static inline void tcp_add_tx_delay(struct sk_buff *skb,
2762				    const struct tcp_sock *tp)
2763{
2764	if (static_branch_unlikely(&tcp_tx_delay_enabled))
2765		skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2766}
2767
2768/* Compute Earliest Departure Time for some control packets
2769 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2770 */
2771static inline u64 tcp_transmit_time(const struct sock *sk)
2772{
2773	if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2774		u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2775			tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2776
2777		return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2778	}
2779	return 0;
2780}
2781
2782static inline int tcp_parse_auth_options(const struct tcphdr *th,
2783		const u8 **md5_hash, const struct tcp_ao_hdr **aoh)
2784{
2785	const u8 *md5_tmp, *ao_tmp;
2786	int ret;
2787
2788	ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp);
2789	if (ret)
2790		return ret;
2791
2792	if (md5_hash)
2793		*md5_hash = md5_tmp;
2794
2795	if (aoh) {
2796		if (!ao_tmp)
2797			*aoh = NULL;
2798		else
2799			*aoh = (struct tcp_ao_hdr *)(ao_tmp - 2);
2800	}
2801
2802	return 0;
2803}
2804
2805static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
2806				   int family, int l3index, bool stat_inc)
2807{
2808#ifdef CONFIG_TCP_AO
2809	struct tcp_ao_info *ao_info;
2810	struct tcp_ao_key *ao_key;
2811
2812	if (!static_branch_unlikely(&tcp_ao_needed.key))
2813		return false;
2814
2815	ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info,
2816					lockdep_sock_is_held(sk));
2817	if (!ao_info)
2818		return false;
2819
2820	ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1);
2821	if (ao_info->ao_required || ao_key) {
2822		if (stat_inc) {
2823			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED);
2824			atomic64_inc(&ao_info->counters.ao_required);
2825		}
2826		return true;
2827	}
2828#endif
2829	return false;
2830}
2831
2832enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
2833		const struct request_sock *req, const struct sk_buff *skb,
2834		const void *saddr, const void *daddr,
2835		int family, int dif, int sdif);
2836
2837#endif	/* _TCP_H */
v4.17
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Definitions for the TCP module.
   7 *
   8 * Version:	@(#)tcp.h	1.0.5	05/23/93
   9 *
  10 * Authors:	Ross Biro
  11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *
  13 *		This program is free software; you can redistribute it and/or
  14 *		modify it under the terms of the GNU General Public License
  15 *		as published by the Free Software Foundation; either version
  16 *		2 of the License, or (at your option) any later version.
  17 */
  18#ifndef _TCP_H
  19#define _TCP_H
  20
  21#define FASTRETRANS_DEBUG 1
  22
  23#include <linux/list.h>
  24#include <linux/tcp.h>
  25#include <linux/bug.h>
  26#include <linux/slab.h>
  27#include <linux/cache.h>
  28#include <linux/percpu.h>
  29#include <linux/skbuff.h>
  30#include <linux/cryptohash.h>
  31#include <linux/kref.h>
  32#include <linux/ktime.h>
 
  33
  34#include <net/inet_connection_sock.h>
  35#include <net/inet_timewait_sock.h>
  36#include <net/inet_hashtables.h>
  37#include <net/checksum.h>
  38#include <net/request_sock.h>
 
  39#include <net/sock.h>
  40#include <net/snmp.h>
  41#include <net/ip.h>
  42#include <net/tcp_states.h>
 
  43#include <net/inet_ecn.h>
  44#include <net/dst.h>
 
 
  45
  46#include <linux/seq_file.h>
  47#include <linux/memcontrol.h>
  48#include <linux/bpf-cgroup.h>
 
  49
  50extern struct inet_hashinfo tcp_hashinfo;
  51
  52extern struct percpu_counter tcp_orphan_count;
 
 
 
 
  53void tcp_time_wait(struct sock *sk, int state, int timeo);
  54
  55#define MAX_TCP_HEADER	(128 + MAX_HEADER)
  56#define MAX_TCP_OPTION_SPACE 40
 
 
  57
  58/*
  59 * Never offer a window over 32767 without using window scaling. Some
  60 * poor stacks do signed 16bit maths!
  61 */
  62#define MAX_TCP_WINDOW		32767U
  63
  64/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
  65#define TCP_MIN_MSS		88U
  66
  67/* The least MTU to use for probing */
  68#define TCP_BASE_MSS		1024
  69
  70/* probing interval, default to 10 minutes as per RFC4821 */
  71#define TCP_PROBE_INTERVAL	600
  72
  73/* Specify interval when tcp mtu probing will stop */
  74#define TCP_PROBE_THRESHOLD	8
  75
  76/* After receiving this amount of duplicate ACKs fast retransmit starts. */
  77#define TCP_FASTRETRANS_THRESH 3
  78
  79/* Maximal number of ACKs sent quickly to accelerate slow-start. */
  80#define TCP_MAX_QUICKACKS	16U
  81
  82/* Maximal number of window scale according to RFC1323 */
  83#define TCP_MAX_WSCALE		14U
  84
  85/* urg_data states */
  86#define TCP_URG_VALID	0x0100
  87#define TCP_URG_NOTYET	0x0200
  88#define TCP_URG_READ	0x0400
  89
  90#define TCP_RETR1	3	/*
  91				 * This is how many retries it does before it
  92				 * tries to figure out if the gateway is
  93				 * down. Minimal RFC value is 3; it corresponds
  94				 * to ~3sec-8min depending on RTO.
  95				 */
  96
  97#define TCP_RETR2	15	/*
  98				 * This should take at least
  99				 * 90 minutes to time out.
 100				 * RFC1122 says that the limit is 100 sec.
 101				 * 15 is ~13-30min depending on RTO.
 102				 */
 103
 104#define TCP_SYN_RETRIES	 6	/* This is how many retries are done
 105				 * when active opening a connection.
 106				 * RFC1122 says the minimum retry MUST
 107				 * be at least 180secs.  Nevertheless
 108				 * this value is corresponding to
 109				 * 63secs of retransmission with the
 110				 * current initial RTO.
 111				 */
 112
 113#define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
 114				 * when passive opening a connection.
 115				 * This is corresponding to 31secs of
 116				 * retransmission with the current
 117				 * initial RTO.
 118				 */
 119
 120#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
 121				  * state, about 60 seconds	*/
 122#define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
 123                                 /* BSD style FIN_WAIT2 deadlock breaker.
 124				  * It used to be 3min, new value is 60sec,
 125				  * to combine FIN-WAIT-2 timeout with
 126				  * TIME-WAIT timer.
 127				  */
 
 128
 129#define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
 
 
 130#if HZ >= 100
 131#define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
 132#define TCP_ATO_MIN	((unsigned)(HZ/25))
 133#else
 134#define TCP_DELACK_MIN	4U
 135#define TCP_ATO_MIN	4U
 136#endif
 137#define TCP_RTO_MAX	((unsigned)(120*HZ))
 138#define TCP_RTO_MIN	((unsigned)(HZ/5))
 139#define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
 
 
 
 140#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
 141#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
 142						 * used as a fallback RTO for the
 143						 * initial data transmission if no
 144						 * valid RTT sample has been acquired,
 145						 * most likely due to retrans in 3WHS.
 146						 */
 147
 148#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
 149					                 * for local resources.
 150					                 */
 151#define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
 152#define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
 153#define TCP_KEEPALIVE_INTVL	(75*HZ)
 154
 155#define MAX_TCP_KEEPIDLE	32767
 156#define MAX_TCP_KEEPINTVL	32767
 157#define MAX_TCP_KEEPCNT		127
 158#define MAX_TCP_SYNCNT		127
 159
 160#define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
 
 
 
 
 161
 162#define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
 163#define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
 164					 * after this time. It should be equal
 165					 * (or greater than) TCP_TIMEWAIT_LEN
 166					 * to provide reliability equal to one
 167					 * provided by timewait state.
 168					 */
 169#define TCP_PAWS_WINDOW	1		/* Replay window for per-host
 170					 * timestamps. It must be less than
 171					 * minimal timewait lifetime.
 172					 */
 173/*
 174 *	TCP option
 175 */
 176
 177#define TCPOPT_NOP		1	/* Padding */
 178#define TCPOPT_EOL		0	/* End of options */
 179#define TCPOPT_MSS		2	/* Segment size negotiating */
 180#define TCPOPT_WINDOW		3	/* Window scaling */
 181#define TCPOPT_SACK_PERM        4       /* SACK Permitted */
 182#define TCPOPT_SACK             5       /* SACK Block */
 183#define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
 184#define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
 
 
 185#define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
 186#define TCPOPT_EXP		254	/* Experimental */
 187/* Magic number to be after the option value for sharing TCP
 188 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
 189 */
 190#define TCPOPT_FASTOPEN_MAGIC	0xF989
 191#define TCPOPT_SMC_MAGIC	0xE2D4C3D9
 192
 193/*
 194 *     TCP option lengths
 195 */
 196
 197#define TCPOLEN_MSS            4
 198#define TCPOLEN_WINDOW         3
 199#define TCPOLEN_SACK_PERM      2
 200#define TCPOLEN_TIMESTAMP      10
 201#define TCPOLEN_MD5SIG         18
 202#define TCPOLEN_FASTOPEN_BASE  2
 203#define TCPOLEN_EXP_FASTOPEN_BASE  4
 204#define TCPOLEN_EXP_SMC_BASE   6
 205
 206/* But this is what stacks really send out. */
 207#define TCPOLEN_TSTAMP_ALIGNED		12
 208#define TCPOLEN_WSCALE_ALIGNED		4
 209#define TCPOLEN_SACKPERM_ALIGNED	4
 210#define TCPOLEN_SACK_BASE		2
 211#define TCPOLEN_SACK_BASE_ALIGNED	4
 212#define TCPOLEN_SACK_PERBLOCK		8
 213#define TCPOLEN_MD5SIG_ALIGNED		20
 214#define TCPOLEN_MSS_ALIGNED		4
 215#define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
 216
 217/* Flags in tp->nonagle */
 218#define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
 219#define TCP_NAGLE_CORK		2	/* Socket is corked	    */
 220#define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
 221
 222/* TCP thin-stream limits */
 223#define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 224
 225/* TCP initial congestion window as per rfc6928 */
 226#define TCP_INIT_CWND		10
 227
 228/* Bit Flags for sysctl_tcp_fastopen */
 229#define	TFO_CLIENT_ENABLE	1
 230#define	TFO_SERVER_ENABLE	2
 231#define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
 232
 233/* Accept SYN data w/o any cookie option */
 234#define	TFO_SERVER_COOKIE_NOT_REQD	0x200
 235
 236/* Force enable TFO on all listeners, i.e., not requiring the
 237 * TCP_FASTOPEN socket option.
 238 */
 239#define	TFO_SERVER_WO_SOCKOPT1	0x400
 240
 241
 242/* sysctl variables for tcp */
 243extern int sysctl_tcp_max_orphans;
 244extern long sysctl_tcp_mem[3];
 245
 246#define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
 247#define TCP_RACK_STATIC_REO_WND  0x2 /* Use static RACK reo wnd */
 
 248
 249extern atomic_long_t tcp_memory_allocated;
 
 
 250extern struct percpu_counter tcp_sockets_allocated;
 251extern unsigned long tcp_memory_pressure;
 252
 253/* optimized version of sk_under_memory_pressure() for TCP sockets */
 254static inline bool tcp_under_memory_pressure(const struct sock *sk)
 255{
 256	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
 257	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
 258		return true;
 259
 260	return tcp_memory_pressure;
 261}
 262/*
 263 * The next routines deal with comparing 32 bit unsigned ints
 264 * and worry about wraparound (automatic with unsigned arithmetic).
 265 */
 266
 267static inline bool before(__u32 seq1, __u32 seq2)
 268{
 269        return (__s32)(seq1-seq2) < 0;
 270}
 271#define after(seq2, seq1) 	before(seq1, seq2)
 272
 273/* is s2<=s1<=s3 ? */
 274static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
 275{
 276	return seq3 - seq2 >= seq1 - seq2;
 277}
 278
 279static inline bool tcp_out_of_memory(struct sock *sk)
 280{
 281	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
 282	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
 283		return true;
 284	return false;
 
 
 285}
 286
 287void sk_forced_mem_schedule(struct sock *sk, int size);
 288
 289static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 290{
 291	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
 292	int orphans = percpu_counter_read_positive(ocp);
 293
 294	if (orphans << shift > sysctl_tcp_max_orphans) {
 295		orphans = percpu_counter_sum_positive(ocp);
 296		if (orphans << shift > sysctl_tcp_max_orphans)
 297			return true;
 298	}
 299	return false;
 300}
 301
 302bool tcp_check_oom(struct sock *sk, int shift);
 303
 304
 305extern struct proto tcp_prot;
 306
 307#define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 308#define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 309#define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
 310#define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 311
 312void tcp_tasklet_init(void);
 313
 314void tcp_v4_err(struct sk_buff *skb, u32);
 315
 316void tcp_shutdown(struct sock *sk, int how);
 317
 318int tcp_v4_early_demux(struct sk_buff *skb);
 319int tcp_v4_rcv(struct sk_buff *skb);
 320
 321int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
 322int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 323int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
 324int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
 325		 int flags);
 326int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
 327			size_t size, int flags);
 328ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
 329		 size_t size, int flags);
 
 330void tcp_release_cb(struct sock *sk);
 331void tcp_wfree(struct sk_buff *skb);
 332void tcp_write_timer_handler(struct sock *sk);
 333void tcp_delack_timer_handler(struct sock *sk);
 334int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 335int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
 336void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 337			 const struct tcphdr *th);
 338void tcp_rcv_space_adjust(struct sock *sk);
 339int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
 340void tcp_twsk_destructor(struct sock *sk);
 
 341ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
 342			struct pipe_inode_info *pipe, size_t len,
 343			unsigned int flags);
 
 
 344
 345static inline void tcp_dec_quickack_mode(struct sock *sk,
 346					 const unsigned int pkts)
 347{
 348	struct inet_connection_sock *icsk = inet_csk(sk);
 349
 350	if (icsk->icsk_ack.quick) {
 
 
 
 351		if (pkts >= icsk->icsk_ack.quick) {
 352			icsk->icsk_ack.quick = 0;
 353			/* Leaving quickack mode we deflate ATO. */
 354			icsk->icsk_ack.ato   = TCP_ATO_MIN;
 355		} else
 356			icsk->icsk_ack.quick -= pkts;
 357	}
 358}
 359
 360#define	TCP_ECN_OK		1
 361#define	TCP_ECN_QUEUE_CWR	2
 362#define	TCP_ECN_DEMAND_CWR	4
 363#define	TCP_ECN_SEEN		8
 364
 365enum tcp_tw_status {
 366	TCP_TW_SUCCESS = 0,
 367	TCP_TW_RST = 1,
 368	TCP_TW_ACK = 2,
 369	TCP_TW_SYN = 3
 370};
 371
 372
 373enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
 374					      struct sk_buff *skb,
 375					      const struct tcphdr *th);
 
 376struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 377			   struct request_sock *req, bool fastopen,
 378			   bool *lost_race);
 379int tcp_child_process(struct sock *parent, struct sock *child,
 380		      struct sk_buff *skb);
 381void tcp_enter_loss(struct sock *sk);
 382void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
 383void tcp_clear_retrans(struct tcp_sock *tp);
 384void tcp_update_metrics(struct sock *sk);
 385void tcp_init_metrics(struct sock *sk);
 386void tcp_metrics_init(void);
 387bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
 
 388void tcp_close(struct sock *sk, long timeout);
 389void tcp_init_sock(struct sock *sk);
 390void tcp_init_transfer(struct sock *sk, int bpf_op);
 391__poll_t tcp_poll(struct file *file, struct socket *sock,
 392		      struct poll_table_struct *wait);
 
 
 393int tcp_getsockopt(struct sock *sk, int level, int optname,
 394		   char __user *optval, int __user *optlen);
 395int tcp_setsockopt(struct sock *sk, int level, int optname,
 396		   char __user *optval, unsigned int optlen);
 397int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 398			  char __user *optval, int __user *optlen);
 399int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
 400			  char __user *optval, unsigned int optlen);
 401void tcp_set_keepalive(struct sock *sk, int val);
 402void tcp_syn_ack_timeout(const struct request_sock *req);
 403int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
 404		int flags, int *addr_len);
 
 
 
 
 
 
 
 
 
 
 
 405void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
 406		       struct tcp_options_received *opt_rx,
 407		       int estab, struct tcp_fastopen_cookie *foc);
 408const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 409
 410/*
 
 
 
 
 
 
 
 
 
 
 
 411 *	TCP v4 functions exported for the inet6 API
 412 */
 413
 414void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 415void tcp_v4_mtu_reduced(struct sock *sk);
 416void tcp_req_err(struct sock *sk, u32 seq, bool abort);
 
 417int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 418struct sock *tcp_create_openreq_child(const struct sock *sk,
 419				      struct request_sock *req,
 420				      struct sk_buff *skb);
 421void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
 422struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 423				  struct request_sock *req,
 424				  struct dst_entry *dst,
 425				  struct request_sock *req_unhash,
 426				  bool *own_req);
 427int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 428int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 429int tcp_connect(struct sock *sk);
 430enum tcp_synack_type {
 431	TCP_SYNACK_NORMAL,
 432	TCP_SYNACK_FASTOPEN,
 433	TCP_SYNACK_COOKIE,
 434};
 435struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 436				struct request_sock *req,
 437				struct tcp_fastopen_cookie *foc,
 438				enum tcp_synack_type synack_type);
 
 439int tcp_disconnect(struct sock *sk, int flags);
 440
 441void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
 442int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 443void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 444
 445/* From syncookies.c */
 446struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
 447				 struct request_sock *req,
 448				 struct dst_entry *dst, u32 tsoff);
 449int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 450		      u32 cookie);
 451struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 452#ifdef CONFIG_SYN_COOKIES
 453
 454/* Syncookies use a monotonic timer which increments every 60 seconds.
 455 * This counter is used both as a hash input and partially encoded into
 456 * the cookie value.  A cookie is only validated further if the delta
 457 * between the current counter value and the encoded one is less than this,
 458 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
 459 * the counter advances immediately after a cookie is generated).
 460 */
 461#define MAX_SYNCOOKIE_AGE	2
 462#define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
 463#define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
 464
 465/* syncookies: remember time of last synqueue overflow
 466 * But do not dirty this field too often (once per second is enough)
 467 * It is racy as we do not hold a lock, but race is very minor.
 468 */
 469static inline void tcp_synq_overflow(const struct sock *sk)
 470{
 471	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
 472	unsigned long now = jiffies;
 473
 474	if (time_after(now, last_overflow + HZ))
 475		tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 476}
 477
 478/* syncookies: no recent synqueue overflow on this listening socket? */
 479static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
 480{
 481	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 482
 483	return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
 
 
 
 
 
 
 
 
 484}
 485
 486static inline u32 tcp_cookie_time(void)
 487{
 488	u64 val = get_jiffies_64();
 489
 490	do_div(val, TCP_SYNCOOKIE_PERIOD);
 491	return val;
 492}
 493
 
 
 
 
 
 
 
 
 
 494u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 495			      u16 *mssp);
 496__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
 497u64 cookie_init_timestamp(struct request_sock *req);
 498bool cookie_timestamp_decode(const struct net *net,
 499			     struct tcp_options_received *opt);
 500bool cookie_ecn_ok(const struct tcp_options_received *opt,
 501		   const struct net *net, const struct dst_entry *dst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502
 503/* From net/ipv6/syncookies.c */
 504int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
 505		      u32 cookie);
 506struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
 507
 508u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
 509			      const struct tcphdr *th, u16 *mssp);
 510__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
 511#endif
 512/* tcp_output.c */
 513
 
 
 514void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
 515			       int nonagle);
 516int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 517int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 518void tcp_retransmit_timer(struct sock *sk);
 519void tcp_xmit_retransmit_queue(struct sock *);
 520void tcp_simple_retransmit(struct sock *);
 521void tcp_enter_recovery(struct sock *sk, bool ece_ack);
 522int tcp_trim_head(struct sock *, struct sk_buff *, u32);
 523enum tcp_queue {
 524	TCP_FRAG_IN_WRITE_QUEUE,
 525	TCP_FRAG_IN_RTX_QUEUE,
 526};
 527int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
 528		 struct sk_buff *skb, u32 len,
 529		 unsigned int mss_now, gfp_t gfp);
 530
 531void tcp_send_probe0(struct sock *);
 532void tcp_send_partial(struct sock *);
 533int tcp_write_wakeup(struct sock *, int mib);
 534void tcp_send_fin(struct sock *sk);
 535void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 
 536int tcp_send_synack(struct sock *);
 537void tcp_push_one(struct sock *, unsigned int mss_now);
 
 538void tcp_send_ack(struct sock *sk);
 539void tcp_send_delayed_ack(struct sock *sk);
 540void tcp_send_loss_probe(struct sock *sk);
 541bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
 542void tcp_skb_collapse_tstamp(struct sk_buff *skb,
 543			     const struct sk_buff *next_skb);
 544
 545/* tcp_input.c */
 546void tcp_rearm_rto(struct sock *sk);
 547void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
 548void tcp_reset(struct sock *sk);
 549void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
 550void tcp_fin(struct sock *sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 551
 552/* tcp_timer.c */
 553void tcp_init_xmit_timers(struct sock *);
 554static inline void tcp_clear_xmit_timers(struct sock *sk)
 555{
 556	hrtimer_cancel(&tcp_sk(sk)->pacing_timer);
 
 
 
 
 
 557	inet_csk_clear_xmit_timers(sk);
 558}
 559
 560unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 561unsigned int tcp_current_mss(struct sock *sk);
 
 562
 563/* Bound MSS / TSO packet size with the half of the window */
 564static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 565{
 566	int cutoff;
 567
 568	/* When peer uses tiny windows, there is no use in packetizing
 569	 * to sub-MSS pieces for the sake of SWS or making sure there
 570	 * are enough packets in the pipe for fast recovery.
 571	 *
 572	 * On the other hand, for extremely large MSS devices, handling
 573	 * smaller than MSS windows in this way does make sense.
 574	 */
 575	if (tp->max_window > TCP_MSS_DEFAULT)
 576		cutoff = (tp->max_window >> 1);
 577	else
 578		cutoff = tp->max_window;
 579
 580	if (cutoff && pktsize > cutoff)
 581		return max_t(int, cutoff, 68U - tp->tcp_header_len);
 582	else
 583		return pktsize;
 584}
 585
 586/* tcp.c */
 587void tcp_get_info(struct sock *, struct tcp_info *);
 588
 589/* Read 'sendfile()'-style from a TCP socket */
 590int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
 591		  sk_read_actor_t recv_actor);
 
 
 
 592
 593void tcp_initialize_rcv_mss(struct sock *sk);
 594
 595int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 596int tcp_mss_to_mtu(struct sock *sk, int mss);
 597void tcp_mtup_init(struct sock *sk);
 598void tcp_init_buffer_space(struct sock *sk);
 599
 600static inline void tcp_bound_rto(const struct sock *sk)
 601{
 602	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
 603		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
 604}
 605
 606static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
 607{
 608	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
 609}
 610
 611static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 612{
 
 
 
 
 613	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
 614			       ntohl(TCP_FLAG_ACK) |
 615			       snd_wnd);
 616}
 617
 618static inline void tcp_fast_path_on(struct tcp_sock *tp)
 619{
 620	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
 621}
 622
 623static inline void tcp_fast_path_check(struct sock *sk)
 624{
 625	struct tcp_sock *tp = tcp_sk(sk);
 626
 627	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
 628	    tp->rcv_wnd &&
 629	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
 630	    !tp->urg_data)
 631		tcp_fast_path_on(tp);
 632}
 633
 
 
 634/* Compute the actual rto_min value */
 635static inline u32 tcp_rto_min(struct sock *sk)
 636{
 637	const struct dst_entry *dst = __sk_dst_get(sk);
 638	u32 rto_min = TCP_RTO_MIN;
 639
 640	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
 641		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
 642	return rto_min;
 643}
 644
 645static inline u32 tcp_rto_min_us(struct sock *sk)
 646{
 647	return jiffies_to_usecs(tcp_rto_min(sk));
 648}
 649
 650static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
 651{
 652	return dst_metric_locked(dst, RTAX_CC_ALGO);
 653}
 654
 655/* Minimum RTT in usec. ~0 means not available. */
 656static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
 657{
 658	return minmax_get(&tp->rtt_min);
 659}
 660
 661/* Compute the actual receive window we are currently advertising.
 662 * Rcv_nxt can be after the window if our peer push more data
 663 * than the offered window.
 664 */
 665static inline u32 tcp_receive_window(const struct tcp_sock *tp)
 666{
 667	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
 668
 669	if (win < 0)
 670		win = 0;
 671	return (u32) win;
 672}
 673
 674/* Choose a new window, without checks for shrinking, and without
 675 * scaling applied to the result.  The caller does these things
 676 * if necessary.  This is a "raw" window selection.
 677 */
 678u32 __tcp_select_window(struct sock *sk);
 679
 680void tcp_send_window_probe(struct sock *sk);
 681
 682/* TCP uses 32bit jiffies to save some space.
 683 * Note that this is different from tcp_time_stamp, which
 684 * historically has been the same until linux-4.13.
 685 */
 686#define tcp_jiffies32 ((u32)jiffies)
 687
 688/*
 689 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
 690 * It is no longer tied to jiffies, but to 1 ms clock.
 691 * Note: double check if you want to use tcp_jiffies32 instead of this.
 692 */
 693#define TCP_TS_HZ	1000
 694
 695static inline u64 tcp_clock_ns(void)
 696{
 697	return local_clock();
 698}
 699
 700static inline u64 tcp_clock_us(void)
 701{
 702	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
 703}
 704
 705/* This should only be used in contexts where tp->tcp_mstamp is up to date */
 706static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
 707{
 708	return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
 709}
 710
 711/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
 712static inline u32 tcp_time_stamp_raw(void)
 
 
 
 
 713{
 714	return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
 715}
 716
 
 
 
 
 717
 718/* Refresh 1us clock of a TCP socket,
 719 * ensuring monotically increasing values.
 720 */
 721static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
 722{
 723	u64 val = tcp_clock_us();
 
 
 
 724
 725	if (val > tp->tcp_mstamp)
 726		tp->tcp_mstamp = val;
 727}
 728
 729static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
 730{
 731	return max_t(s64, t1 - t0, 0);
 732}
 733
 734static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
 
 735{
 736	return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
 737}
 738
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 739
 740#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 741
 742#define TCPHDR_FIN 0x01
 743#define TCPHDR_SYN 0x02
 744#define TCPHDR_RST 0x04
 745#define TCPHDR_PSH 0x08
 746#define TCPHDR_ACK 0x10
 747#define TCPHDR_URG 0x20
 748#define TCPHDR_ECE 0x40
 749#define TCPHDR_CWR 0x80
 750
 751#define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
 752
 
 
 
 
 
 
 
 
 
 
 
 
 
 753/* This is what the send packet queuing engine uses to pass
 754 * TCP per-packet control information to the transmission code.
 755 * We also store the host-order sequence numbers in here too.
 756 * This is 44 bytes if IPV6 is enabled.
 757 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
 758 */
 759struct tcp_skb_cb {
 760	__u32		seq;		/* Starting sequence number	*/
 761	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
 762	union {
 763		/* Note : tcp_tw_isn is used in input path only
 764		 *	  (isn chosen by tcp_timewait_state_process())
 765		 *
 766		 * 	  tcp_gso_segs/size are used in write queue only,
 767		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
 768		 */
 769		__u32		tcp_tw_isn;
 770		struct {
 771			u16	tcp_gso_segs;
 772			u16	tcp_gso_size;
 773		};
 774	};
 775	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
 776
 777	__u8		sacked;		/* State flags for SACK.	*/
 778#define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
 779#define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
 780#define TCPCB_LOST		0x04	/* SKB is lost			*/
 781#define TCPCB_TAGBITS		0x07	/* All tag bits			*/
 782#define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp)	*/
 783#define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
 784#define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
 785				TCPCB_REPAIRED)
 786
 787	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
 788	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
 789			eor:1,		/* Is skb MSG_EOR marked? */
 790			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
 791			unused:5;
 792	__u32		ack_seq;	/* Sequence number ACK'd	*/
 793	union {
 794		struct {
 
 795			/* There is space for up to 24 bytes */
 796			__u32 in_flight:30,/* Bytes in flight at transmit */
 797			      is_app_limited:1, /* cwnd not fully used? */
 798			      unused:1;
 799			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
 800			__u32 delivered;
 801			/* start of send pipeline phase */
 802			u64 first_tx_mstamp;
 803			/* when we reached the "delivered" count */
 804			u64 delivered_mstamp;
 805		} tx;   /* only used for outgoing skbs */
 806		union {
 807			struct inet_skb_parm	h4;
 808#if IS_ENABLED(CONFIG_IPV6)
 809			struct inet6_skb_parm	h6;
 810#endif
 811		} header;	/* For incoming skbs */
 812		struct {
 813			__u32 key;
 814			__u32 flags;
 815			struct bpf_map *map;
 816			void *data_end;
 817		} bpf;
 818	};
 819};
 820
 821#define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
 822
 
 823
 824#if IS_ENABLED(CONFIG_IPV6)
 825/* This is the variant of inet6_iif() that must be used by TCP,
 826 * as TCP moves IP6CB into a different location in skb->cb[]
 827 */
 828static inline int tcp_v6_iif(const struct sk_buff *skb)
 829{
 
 
 
 
 
 830	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
 831
 832	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
 833}
 834
 835/* TCP_SKB_CB reference means this can not be used from early demux */
 836static inline int tcp_v6_sdif(const struct sk_buff *skb)
 837{
 838#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 839	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
 840		return TCP_SKB_CB(skb)->header.h6.iif;
 841#endif
 842	return 0;
 843}
 844#endif
 845
 846static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
 847{
 848#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 849	if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
 850	    skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
 851		return true;
 852#endif
 853	return false;
 854}
 855
 856/* TCP_SKB_CB reference means this can not be used from early demux */
 857static inline int tcp_v4_sdif(struct sk_buff *skb)
 858{
 859#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 860	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
 861		return TCP_SKB_CB(skb)->header.h4.iif;
 862#endif
 863	return 0;
 864}
 865
 866/* Due to TSO, an SKB can be composed of multiple actual
 867 * packets.  To keep these tracked properly, we use this.
 868 */
 869static inline int tcp_skb_pcount(const struct sk_buff *skb)
 870{
 871	return TCP_SKB_CB(skb)->tcp_gso_segs;
 872}
 873
 874static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
 875{
 876	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
 877}
 878
 879static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
 880{
 881	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
 882}
 883
 884/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
 885static inline int tcp_skb_mss(const struct sk_buff *skb)
 886{
 887	return TCP_SKB_CB(skb)->tcp_gso_size;
 888}
 889
 890static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
 891{
 892	return likely(!TCP_SKB_CB(skb)->eor);
 893}
 894
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 895/* Events passed to congestion control interface */
 896enum tcp_ca_event {
 897	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
 898	CA_EVENT_CWND_RESTART,	/* congestion window restart */
 899	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
 900	CA_EVENT_LOSS,		/* loss timeout */
 901	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
 902	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
 903	CA_EVENT_DELAYED_ACK,	/* Delayed ack is sent */
 904	CA_EVENT_NON_DELAYED_ACK,
 905};
 906
 907/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
 908enum tcp_ca_ack_event_flags {
 909	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
 910	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
 911	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
 912};
 913
 914/*
 915 * Interface for adding new TCP congestion control handlers
 916 */
 917#define TCP_CA_NAME_MAX	16
 918#define TCP_CA_MAX	128
 919#define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
 920
 921#define TCP_CA_UNSPEC	0
 922
 923/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
 924#define TCP_CONG_NON_RESTRICTED 0x1
 925/* Requires ECN/ECT set on all packets */
 926#define TCP_CONG_NEEDS_ECN	0x2
 
 927
 928union tcp_cc_info;
 929
 930struct ack_sample {
 931	u32 pkts_acked;
 932	s32 rtt_us;
 933	u32 in_flight;
 934};
 935
 936/* A rate sample measures the number of (original/retransmitted) data
 937 * packets delivered "delivered" over an interval of time "interval_us".
 938 * The tcp_rate.c code fills in the rate sample, and congestion
 939 * control modules that define a cong_control function to run at the end
 940 * of ACK processing can optionally chose to consult this sample when
 941 * setting cwnd and pacing rate.
 942 * A sample is invalid if "delivered" or "interval_us" is negative.
 943 */
 944struct rate_sample {
 945	u64  prior_mstamp; /* starting timestamp for interval */
 946	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
 
 947	s32  delivered;		/* number of packets delivered over interval */
 
 948	long interval_us;	/* time for tp->delivered to incr "delivered" */
 
 
 949	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
 950	int  losses;		/* number of packets marked lost upon ACK */
 951	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
 952	u32  prior_in_flight;	/* in flight before this ACK */
 
 953	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
 954	bool is_retrans;	/* is sample from retransmission? */
 955	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
 956};
 957
 958struct tcp_congestion_ops {
 959	struct list_head	list;
 960	u32 key;
 961	u32 flags;
 962
 963	/* initialize private data (optional) */
 964	void (*init)(struct sock *sk);
 965	/* cleanup private data  (optional) */
 966	void (*release)(struct sock *sk);
 967
 968	/* return slow start threshold (required) */
 969	u32 (*ssthresh)(struct sock *sk);
 
 970	/* do new cwnd calculation (required) */
 971	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
 
 972	/* call before changing ca_state (optional) */
 973	void (*set_state)(struct sock *sk, u8 new_state);
 
 974	/* call when cwnd event occurs (optional) */
 975	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
 
 976	/* call when ack arrives (optional) */
 977	void (*in_ack_event)(struct sock *sk, u32 flags);
 978	/* new value of cwnd after loss (required) */
 979	u32  (*undo_cwnd)(struct sock *sk);
 980	/* hook for packet ack accounting (optional) */
 981	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
 
 982	/* override sysctl_tcp_min_tso_segs */
 983	u32 (*min_tso_segs)(struct sock *sk);
 984	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
 985	u32 (*sndbuf_expand)(struct sock *sk);
 986	/* call when packets are delivered to update cwnd and pacing rate,
 987	 * after all the ca_state processing. (optional)
 988	 */
 989	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
 
 
 
 
 
 
 
 
 990	/* get info for inet_diag (optional) */
 991	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
 992			   union tcp_cc_info *info);
 993
 994	char 		name[TCP_CA_NAME_MAX];
 995	struct module 	*owner;
 996};
 
 
 
 
 
 
 
 
 997
 998int tcp_register_congestion_control(struct tcp_congestion_ops *type);
 999void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
 
 
 
1000
1001void tcp_assign_congestion_control(struct sock *sk);
1002void tcp_init_congestion_control(struct sock *sk);
1003void tcp_cleanup_congestion_control(struct sock *sk);
1004int tcp_set_default_congestion_control(struct net *net, const char *name);
1005void tcp_get_default_congestion_control(struct net *net, char *name);
1006void tcp_get_available_congestion_control(char *buf, size_t len);
1007void tcp_get_allowed_congestion_control(char *buf, size_t len);
1008int tcp_set_allowed_congestion_control(char *allowed);
1009int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
 
1010u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1011void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1012
1013u32 tcp_reno_ssthresh(struct sock *sk);
1014u32 tcp_reno_undo_cwnd(struct sock *sk);
1015void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1016extern struct tcp_congestion_ops tcp_reno;
1017
 
1018struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1019u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1020#ifdef CONFIG_INET
1021char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1022#else
1023static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1024{
1025	return NULL;
1026}
1027#endif
1028
1029static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1030{
1031	const struct inet_connection_sock *icsk = inet_csk(sk);
1032
1033	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1034}
1035
1036static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
1037{
1038	struct inet_connection_sock *icsk = inet_csk(sk);
1039
1040	if (icsk->icsk_ca_ops->set_state)
1041		icsk->icsk_ca_ops->set_state(sk, ca_state);
1042	icsk->icsk_ca_state = ca_state;
1043}
1044
1045static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1046{
1047	const struct inet_connection_sock *icsk = inet_csk(sk);
1048
1049	if (icsk->icsk_ca_ops->cwnd_event)
1050		icsk->icsk_ca_ops->cwnd_event(sk, event);
1051}
1052
 
 
 
1053/* From tcp_rate.c */
1054void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1055void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1056			    struct rate_sample *rs);
1057void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1058		  bool is_sack_reneg, struct rate_sample *rs);
1059void tcp_rate_check_app_limited(struct sock *sk);
1060
 
 
 
 
 
1061/* These functions determine how the current flow behaves in respect of SACK
1062 * handling. SACK is negotiated with the peer, and therefore it can vary
1063 * between different flows.
1064 *
1065 * tcp_is_sack - SACK enabled
1066 * tcp_is_reno - No SACK
1067 */
1068static inline int tcp_is_sack(const struct tcp_sock *tp)
1069{
1070	return tp->rx_opt.sack_ok;
1071}
1072
1073static inline bool tcp_is_reno(const struct tcp_sock *tp)
1074{
1075	return !tcp_is_sack(tp);
1076}
1077
1078static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1079{
1080	return tp->sacked_out + tp->lost_out;
1081}
1082
1083/* This determines how many packets are "in the network" to the best
1084 * of our knowledge.  In many cases it is conservative, but where
1085 * detailed information is available from the receiver (via SACK
1086 * blocks etc.) we can make more aggressive calculations.
1087 *
1088 * Use this for decisions involving congestion control, use just
1089 * tp->packets_out to determine if the send queue is empty or not.
1090 *
1091 * Read this equation as:
1092 *
1093 *	"Packets sent once on transmission queue" MINUS
1094 *	"Packets left network, but not honestly ACKed yet" PLUS
1095 *	"Packets fast retransmitted"
1096 */
1097static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1098{
1099	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1100}
1101
1102#define TCP_INFINITE_SSTHRESH	0x7fffffff
1103
 
 
 
 
 
 
 
 
 
 
 
1104static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1105{
1106	return tp->snd_cwnd < tp->snd_ssthresh;
1107}
1108
1109static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1110{
1111	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1112}
1113
1114static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1115{
1116	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1117	       (1 << inet_csk(sk)->icsk_ca_state);
1118}
1119
1120/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1121 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1122 * ssthresh.
1123 */
1124static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1125{
1126	const struct tcp_sock *tp = tcp_sk(sk);
1127
1128	if (tcp_in_cwnd_reduction(sk))
1129		return tp->snd_ssthresh;
1130	else
1131		return max(tp->snd_ssthresh,
1132			   ((tp->snd_cwnd >> 1) +
1133			    (tp->snd_cwnd >> 2)));
1134}
1135
1136/* Use define here intentionally to get WARN_ON location shown at the caller */
1137#define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1138
1139void tcp_enter_cwr(struct sock *sk);
1140__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1141
1142/* The maximum number of MSS of available cwnd for which TSO defers
1143 * sending if not using sysctl_tcp_tso_win_divisor.
1144 */
1145static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1146{
1147	return 3;
1148}
1149
1150/* Returns end sequence number of the receiver's advertised window */
1151static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1152{
1153	return tp->snd_una + tp->snd_wnd;
1154}
1155
1156/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1157 * flexible approach. The RFC suggests cwnd should not be raised unless
1158 * it was fully used previously. And that's exactly what we do in
1159 * congestion avoidance mode. But in slow start we allow cwnd to grow
1160 * as long as the application has used half the cwnd.
1161 * Example :
1162 *    cwnd is 10 (IW10), but application sends 9 frames.
1163 *    We allow cwnd to reach 18 when all frames are ACKed.
1164 * This check is safe because it's as aggressive as slow start which already
1165 * risks 100% overshoot. The advantage is that we discourage application to
1166 * either send more filler packets or data to artificially blow up the cwnd
1167 * usage, and allow application-limited process to probe bw more aggressively.
1168 */
1169static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1170{
1171	const struct tcp_sock *tp = tcp_sk(sk);
1172
 
 
 
1173	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1174	if (tcp_in_slow_start(tp))
1175		return tp->snd_cwnd < 2 * tp->max_packets_out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1176
1177	return tp->is_cwnd_limited;
 
 
 
 
 
 
1178}
1179
1180/* Something is really bad, we could not queue an additional packet,
1181 * because qdisc is full or receiver sent a 0 window.
1182 * We do not want to add fuel to the fire, or abort too early,
1183 * so make sure the timer we arm now is at least 200ms in the future,
1184 * regardless of current icsk_rto value (as it could be ~2ms)
1185 */
1186static inline unsigned long tcp_probe0_base(const struct sock *sk)
1187{
1188	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1189}
1190
1191/* Variant of inet_csk_rto_backoff() used for zero window probes */
1192static inline unsigned long tcp_probe0_when(const struct sock *sk,
1193					    unsigned long max_when)
1194{
1195	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
 
 
1196
1197	return (unsigned long)min_t(u64, when, max_when);
1198}
1199
1200static inline void tcp_check_probe_timer(struct sock *sk)
1201{
1202	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1203		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1204					  tcp_probe0_base(sk), TCP_RTO_MAX);
1205}
1206
1207static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1208{
1209	tp->snd_wl1 = seq;
1210}
1211
1212static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1213{
1214	tp->snd_wl1 = seq;
1215}
1216
1217/*
1218 * Calculate(/check) TCP checksum
1219 */
1220static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1221				   __be32 daddr, __wsum base)
1222{
1223	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1224}
1225
1226static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1227{
1228	return __skb_checksum_complete(skb);
1229}
1230
1231static inline bool tcp_checksum_complete(struct sk_buff *skb)
1232{
1233	return !skb_csum_unnecessary(skb) &&
1234		__tcp_checksum_complete(skb);
1235}
1236
1237bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1238int tcp_filter(struct sock *sk, struct sk_buff *skb);
1239
1240#undef STATE_TRACE
1241
1242#ifdef STATE_TRACE
1243static const char *statename[]={
1244	"Unused","Established","Syn Sent","Syn Recv",
1245	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1246	"Close Wait","Last ACK","Listen","Closing"
1247};
1248#endif
1249void tcp_set_state(struct sock *sk, int state);
1250
1251void tcp_done(struct sock *sk);
1252
1253int tcp_abort(struct sock *sk, int err);
1254
1255static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1256{
1257	rx_opt->dsack = 0;
1258	rx_opt->num_sacks = 0;
1259}
1260
1261u32 tcp_default_init_rwnd(u32 mss);
1262void tcp_cwnd_restart(struct sock *sk, s32 delta);
1263
1264static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1265{
1266	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1267	struct tcp_sock *tp = tcp_sk(sk);
1268	s32 delta;
1269
1270	if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
1271	    ca_ops->cong_control)
1272		return;
1273	delta = tcp_jiffies32 - tp->lsndtime;
1274	if (delta > inet_csk(sk)->icsk_rto)
1275		tcp_cwnd_restart(sk, delta);
1276}
1277
1278/* Determine a window scaling and initial window to offer. */
1279void tcp_select_initial_window(const struct sock *sk, int __space,
1280			       __u32 mss, __u32 *rcv_wnd,
1281			       __u32 *window_clamp, int wscale_ok,
1282			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1283
 
 
 
 
 
 
 
1284static inline int tcp_win_from_space(const struct sock *sk, int space)
1285{
1286	int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
 
 
 
 
 
 
1287
1288	return tcp_adv_win_scale <= 0 ?
1289		(space>>(-tcp_adv_win_scale)) :
1290		space - (space>>tcp_adv_win_scale);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1291}
1292
1293/* Note: caller must be prepared to deal with negative returns */
1294static inline int tcp_space(const struct sock *sk)
1295{
1296	return tcp_win_from_space(sk, sk->sk_rcvbuf -
 
1297				  atomic_read(&sk->sk_rmem_alloc));
1298}
1299
1300static inline int tcp_full_space(const struct sock *sk)
1301{
1302	return tcp_win_from_space(sk, sk->sk_rcvbuf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1303}
1304
1305extern void tcp_openreq_init_rwin(struct request_sock *req,
1306				  const struct sock *sk_listener,
1307				  const struct dst_entry *dst);
1308
1309void tcp_enter_memory_pressure(struct sock *sk);
1310void tcp_leave_memory_pressure(struct sock *sk);
1311
1312static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1313{
1314	struct net *net = sock_net((struct sock *)tp);
 
 
 
 
 
 
1315
1316	return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
1317}
1318
1319static inline int keepalive_time_when(const struct tcp_sock *tp)
1320{
1321	struct net *net = sock_net((struct sock *)tp);
 
 
 
 
1322
1323	return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
1324}
1325
1326static inline int keepalive_probes(const struct tcp_sock *tp)
1327{
1328	struct net *net = sock_net((struct sock *)tp);
 
 
 
 
 
 
1329
1330	return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1331}
1332
1333static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1334{
1335	const struct inet_connection_sock *icsk = &tp->inet_conn;
1336
1337	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1338			  tcp_jiffies32 - tp->rcv_tstamp);
1339}
1340
1341static inline int tcp_fin_time(const struct sock *sk)
1342{
1343	int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
 
1344	const int rto = inet_csk(sk)->icsk_rto;
1345
1346	if (fin_timeout < (rto << 2) - (rto >> 1))
1347		fin_timeout = (rto << 2) - (rto >> 1);
1348
1349	return fin_timeout;
1350}
1351
1352static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1353				  int paws_win)
1354{
1355	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1356		return true;
1357	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
 
1358		return true;
1359	/*
1360	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1361	 * then following tcp messages have valid values. Ignore 0 value,
1362	 * or else 'negative' tsval might forbid us to accept their packets.
1363	 */
1364	if (!rx_opt->ts_recent)
1365		return true;
1366	return false;
1367}
1368
1369static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1370				   int rst)
1371{
1372	if (tcp_paws_check(rx_opt, 0))
1373		return false;
1374
1375	/* RST segments are not recommended to carry timestamp,
1376	   and, if they do, it is recommended to ignore PAWS because
1377	   "their cleanup function should take precedence over timestamps."
1378	   Certainly, it is mistake. It is necessary to understand the reasons
1379	   of this constraint to relax it: if peer reboots, clock may go
1380	   out-of-sync and half-open connections will not be reset.
1381	   Actually, the problem would be not existing if all
1382	   the implementations followed draft about maintaining clock
1383	   via reboots. Linux-2.2 DOES NOT!
1384
1385	   However, we can relax time bounds for RST segments to MSL.
1386	 */
1387	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
 
1388		return false;
1389	return true;
1390}
1391
1392bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1393			  int mib_idx, u32 *last_oow_ack_time);
1394
1395static inline void tcp_mib_init(struct net *net)
1396{
1397	/* See RFC 2012 */
1398	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1399	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1400	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1401	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1402}
1403
1404/* from STCP */
1405static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1406{
1407	tp->lost_skb_hint = NULL;
1408}
1409
1410static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1411{
1412	tcp_clear_retrans_hints_partial(tp);
1413	tp->retransmit_skb_hint = NULL;
1414}
1415
1416union tcp_md5_addr {
1417	struct in_addr  a4;
1418#if IS_ENABLED(CONFIG_IPV6)
1419	struct in6_addr	a6;
1420#endif
1421};
1422
1423/* - key database */
1424struct tcp_md5sig_key {
1425	struct hlist_node	node;
1426	u8			keylen;
1427	u8			family; /* AF_INET or AF_INET6 */
 
 
1428	union tcp_md5_addr	addr;
1429	u8			prefixlen;
1430	u8			key[TCP_MD5SIG_MAXKEYLEN];
1431	struct rcu_head		rcu;
1432};
1433
1434/* - sock block */
1435struct tcp_md5sig_info {
1436	struct hlist_head	head;
1437	struct rcu_head		rcu;
1438};
1439
1440/* - pseudo header */
1441struct tcp4_pseudohdr {
1442	__be32		saddr;
1443	__be32		daddr;
1444	__u8		pad;
1445	__u8		protocol;
1446	__be16		len;
1447};
1448
1449struct tcp6_pseudohdr {
1450	struct in6_addr	saddr;
1451	struct in6_addr daddr;
1452	__be32		len;
1453	__be32		protocol;	/* including padding */
1454};
1455
1456union tcp_md5sum_block {
1457	struct tcp4_pseudohdr ip4;
1458#if IS_ENABLED(CONFIG_IPV6)
1459	struct tcp6_pseudohdr ip6;
1460#endif
1461};
1462
1463/* - pool: digest algorithm, hash description and scratch buffer */
1464struct tcp_md5sig_pool {
1465	struct ahash_request	*md5_req;
1466	void			*scratch;
1467};
1468
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1469/* - functions */
1470int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1471			const struct sock *sk, const struct sk_buff *skb);
1472int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1473		   int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1474		   gfp_t gfp);
 
 
 
 
1475int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1476		   int family, u8 prefixlen);
 
1477struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1478					 const struct sock *addr_sk);
1479
1480#ifdef CONFIG_TCP_MD5SIG
1481struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1482					 const union tcp_md5_addr *addr,
1483					 int family);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1484#define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1485#else
1486static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1487					 const union tcp_md5_addr *addr,
1488					 int family)
1489{
1490	return NULL;
1491}
 
 
 
 
 
 
 
 
1492#define tcp_twsk_md5_key(twsk)	NULL
1493#endif
1494
1495bool tcp_alloc_md5sig_pool(void);
 
 
 
1496
1497struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1498static inline void tcp_put_md5sig_pool(void)
1499{
1500	local_bh_enable();
1501}
1502
1503int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1504			  unsigned int header_len);
1505int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1506		     const struct tcp_md5sig_key *key);
1507
1508/* From tcp_fastopen.c */
1509void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1510			    struct tcp_fastopen_cookie *cookie);
1511void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1512			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
1513			    u16 try_exp);
1514struct tcp_fastopen_request {
1515	/* Fast Open cookie. Size 0 means a cookie request */
1516	struct tcp_fastopen_cookie	cookie;
1517	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1518	size_t				size;
1519	int				copied;	/* queued in tcp_connect() */
 
1520};
1521void tcp_free_fastopen_req(struct tcp_sock *tp);
1522void tcp_fastopen_destroy_cipher(struct sock *sk);
1523void tcp_fastopen_ctx_destroy(struct net *net);
1524int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1525			      void *key, unsigned int len);
 
 
1526void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1527struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1528			      struct request_sock *req,
1529			      struct tcp_fastopen_cookie *foc,
1530			      const struct dst_entry *dst);
1531void tcp_fastopen_init_key_once(struct net *net);
1532bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1533			     struct tcp_fastopen_cookie *cookie);
1534bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1535#define TCP_FASTOPEN_KEY_LENGTH 16
 
 
 
1536
1537/* Fastopen key context */
1538struct tcp_fastopen_context {
1539	struct crypto_cipher	*tfm;
1540	__u8			key[TCP_FASTOPEN_KEY_LENGTH];
1541	struct rcu_head		rcu;
1542};
1543
1544extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
1545void tcp_fastopen_active_disable(struct sock *sk);
1546bool tcp_fastopen_active_should_disable(struct sock *sk);
1547void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1548void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1549
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1550/* Latencies incurred by various limits for a sender. They are
1551 * chronograph-like stats that are mutually exclusive.
1552 */
1553enum tcp_chrono {
1554	TCP_CHRONO_UNSPEC,
1555	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1556	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1557	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1558	__TCP_CHRONO_MAX,
1559};
1560
1561void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1562void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1563
1564/* This helper is needed, because skb->tcp_tsorted_anchor uses
1565 * the same memory storage than skb->destructor/_skb_refdst
1566 */
1567static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1568{
1569	skb->destructor = NULL;
1570	skb->_skb_refdst = 0UL;
1571}
1572
1573#define tcp_skb_tsorted_save(skb) {		\
1574	unsigned long _save = skb->_skb_refdst;	\
1575	skb->_skb_refdst = 0UL;
1576
1577#define tcp_skb_tsorted_restore(skb)		\
1578	skb->_skb_refdst = _save;		\
1579}
1580
1581void tcp_write_queue_purge(struct sock *sk);
1582
1583static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1584{
1585	return skb_rb_first(&sk->tcp_rtx_queue);
1586}
1587
1588static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1589{
1590	return skb_peek(&sk->sk_write_queue);
1591}
1592
1593static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1594{
1595	return skb_peek_tail(&sk->sk_write_queue);
1596}
1597
1598#define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1599	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1600
1601static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1602{
1603	return skb_peek(&sk->sk_write_queue);
1604}
1605
1606static inline bool tcp_skb_is_last(const struct sock *sk,
1607				   const struct sk_buff *skb)
1608{
1609	return skb_queue_is_last(&sk->sk_write_queue, skb);
1610}
1611
 
 
 
 
 
 
 
1612static inline bool tcp_write_queue_empty(const struct sock *sk)
1613{
1614	return skb_queue_empty(&sk->sk_write_queue);
 
 
1615}
1616
1617static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1618{
1619	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1620}
1621
1622static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1623{
1624	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1625}
1626
1627static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1628{
1629	if (tcp_write_queue_empty(sk))
1630		tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1631}
1632
1633static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1634{
1635	__skb_queue_tail(&sk->sk_write_queue, skb);
1636}
1637
1638static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1639{
1640	__tcp_add_write_queue_tail(sk, skb);
1641
1642	/* Queue it, remembering where we must start sending. */
1643	if (sk->sk_write_queue.next == skb)
1644		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1645}
1646
1647/* Insert new before skb on the write queue of sk.  */
1648static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1649						  struct sk_buff *skb,
1650						  struct sock *sk)
1651{
1652	__skb_queue_before(&sk->sk_write_queue, skb, new);
1653}
1654
1655static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1656{
1657	tcp_skb_tsorted_anchor_cleanup(skb);
1658	__skb_unlink(skb, &sk->sk_write_queue);
1659}
1660
1661void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1662
1663static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1664{
1665	tcp_skb_tsorted_anchor_cleanup(skb);
1666	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1667}
1668
1669static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1670{
1671	list_del(&skb->tcp_tsorted_anchor);
1672	tcp_rtx_queue_unlink(skb, sk);
1673	sk_wmem_free_skb(sk, skb);
 
 
 
 
 
 
 
 
1674}
1675
1676static inline void tcp_push_pending_frames(struct sock *sk)
1677{
1678	if (tcp_send_head(sk)) {
1679		struct tcp_sock *tp = tcp_sk(sk);
1680
1681		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1682	}
1683}
1684
1685/* Start sequence of the skb just after the highest skb with SACKed
1686 * bit, valid only if sacked_out > 0 or when the caller has ensured
1687 * validity by itself.
1688 */
1689static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1690{
1691	if (!tp->sacked_out)
1692		return tp->snd_una;
1693
1694	if (tp->highest_sack == NULL)
1695		return tp->snd_nxt;
1696
1697	return TCP_SKB_CB(tp->highest_sack)->seq;
1698}
1699
1700static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1701{
1702	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
1703}
1704
1705static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1706{
1707	return tcp_sk(sk)->highest_sack;
1708}
1709
1710static inline void tcp_highest_sack_reset(struct sock *sk)
1711{
1712	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
1713}
1714
1715/* Called when old skb is about to be deleted and replaced by new skb */
1716static inline void tcp_highest_sack_replace(struct sock *sk,
1717					    struct sk_buff *old,
1718					    struct sk_buff *new)
1719{
1720	if (old == tcp_highest_sack(sk))
1721		tcp_sk(sk)->highest_sack = new;
1722}
1723
1724/* This helper checks if socket has IP_TRANSPARENT set */
1725static inline bool inet_sk_transparent(const struct sock *sk)
1726{
1727	switch (sk->sk_state) {
1728	case TCP_TIME_WAIT:
1729		return inet_twsk(sk)->tw_transparent;
1730	case TCP_NEW_SYN_RECV:
1731		return inet_rsk(inet_reqsk(sk))->no_srccheck;
1732	}
1733	return inet_sk(sk)->transparent;
1734}
1735
1736/* Determines whether this is a thin stream (which may suffer from
1737 * increased latency). Used to trigger latency-reducing mechanisms.
1738 */
1739static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1740{
1741	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1742}
1743
1744/* /proc */
1745enum tcp_seq_states {
1746	TCP_SEQ_STATE_LISTENING,
1747	TCP_SEQ_STATE_ESTABLISHED,
1748};
1749
1750int tcp_seq_open(struct inode *inode, struct file *file);
 
 
1751
1752struct tcp_seq_afinfo {
1753	char				*name;
1754	sa_family_t			family;
1755	const struct file_operations	*seq_fops;
1756	struct seq_operations		seq_ops;
1757};
1758
1759struct tcp_iter_state {
1760	struct seq_net_private	p;
1761	sa_family_t		family;
1762	enum tcp_seq_states	state;
1763	struct sock		*syn_wait_sk;
1764	int			bucket, offset, sbucket, num;
1765	loff_t			last_pos;
1766};
1767
1768int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1769void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1770
1771extern struct request_sock_ops tcp_request_sock_ops;
1772extern struct request_sock_ops tcp6_request_sock_ops;
1773
1774void tcp_v4_destroy_sock(struct sock *sk);
1775
1776struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1777				netdev_features_t features);
1778struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1779int tcp_gro_complete(struct sk_buff *skb);
 
 
 
 
 
 
 
 
 
 
 
1780
1781void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1782
1783static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1784{
1785	struct net *net = sock_net((struct sock *)tp);
1786	return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1787}
1788
1789static inline bool tcp_stream_memory_free(const struct sock *sk)
1790{
1791	const struct tcp_sock *tp = tcp_sk(sk);
1792	u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1793
1794	return notsent_bytes < tcp_notsent_lowat(tp);
1795}
1796
 
 
1797#ifdef CONFIG_PROC_FS
1798int tcp4_proc_init(void);
1799void tcp4_proc_exit(void);
1800#endif
1801
1802int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1803int tcp_conn_request(struct request_sock_ops *rsk_ops,
1804		     const struct tcp_request_sock_ops *af_ops,
1805		     struct sock *sk, struct sk_buff *skb);
1806
1807/* TCP af-specific functions */
1808struct tcp_sock_af_ops {
1809#ifdef CONFIG_TCP_MD5SIG
1810	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
1811						const struct sock *addr_sk);
1812	int		(*calc_md5_hash)(char *location,
1813					 const struct tcp_md5sig_key *md5,
1814					 const struct sock *sk,
1815					 const struct sk_buff *skb);
1816	int		(*md5_parse)(struct sock *sk,
1817				     int optname,
1818				     char __user *optval,
1819				     int optlen);
1820#endif
 
 
 
 
 
 
 
 
 
 
 
 
1821};
1822
1823struct tcp_request_sock_ops {
1824	u16 mss_clamp;
1825#ifdef CONFIG_TCP_MD5SIG
1826	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1827						 const struct sock *addr_sk);
1828	int		(*calc_md5_hash) (char *location,
1829					  const struct tcp_md5sig_key *md5,
1830					  const struct sock *sk,
1831					  const struct sk_buff *skb);
1832#endif
1833	void (*init_req)(struct request_sock *req,
1834			 const struct sock *sk_listener,
1835			 struct sk_buff *skb);
 
 
 
 
 
 
1836#ifdef CONFIG_SYN_COOKIES
1837	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
1838				 __u16 *mss);
1839#endif
1840	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
1841				       const struct request_sock *req);
 
 
 
1842	u32 (*init_seq)(const struct sk_buff *skb);
1843	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
1844	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1845			   struct flowi *fl, struct request_sock *req,
1846			   struct tcp_fastopen_cookie *foc,
1847			   enum tcp_synack_type synack_type);
 
1848};
1849
 
 
 
 
 
1850#ifdef CONFIG_SYN_COOKIES
1851static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1852					 const struct sock *sk, struct sk_buff *skb,
1853					 __u16 *mss)
1854{
1855	tcp_synq_overflow(sk);
1856	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
1857	return ops->cookie_init_seq(skb, mss);
1858}
1859#else
1860static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1861					 const struct sock *sk, struct sk_buff *skb,
1862					 __u16 *mss)
1863{
1864	return 0;
1865}
1866#endif
1867
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1868int tcpv4_offload_init(void);
1869
1870void tcp_v4_init(void);
1871void tcp_init(void);
1872
1873/* tcp_recovery.c */
1874extern void tcp_rack_mark_lost(struct sock *sk);
 
 
 
 
1875extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
1876			     u64 xmit_time);
1877extern void tcp_rack_reo_timeout(struct sock *sk);
1878extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
1879
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1880/* At how many usecs into the future should the RTO fire? */
1881static inline s64 tcp_rto_delta_us(const struct sock *sk)
1882{
1883	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
1884	u32 rto = inet_csk(sk)->icsk_rto;
1885	u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
1886
1887	return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
 
 
 
 
 
 
 
 
1888}
1889
1890/*
1891 * Save and compile IPv4 options, return a pointer to it
1892 */
1893static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
1894							 struct sk_buff *skb)
1895{
1896	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
1897	struct ip_options_rcu *dopt = NULL;
1898
1899	if (opt->optlen) {
1900		int opt_size = sizeof(*dopt) + opt->optlen;
1901
1902		dopt = kmalloc(opt_size, GFP_ATOMIC);
1903		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
1904			kfree(dopt);
1905			dopt = NULL;
1906		}
1907	}
1908	return dopt;
1909}
1910
1911/* locally generated TCP pure ACKs have skb->truesize == 2
1912 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
1913 * This is much faster than dissecting the packet to find out.
1914 * (Think of GRE encapsulations, IPv4, IPv6, ...)
1915 */
1916static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
1917{
1918	return skb->truesize == 2;
1919}
1920
1921static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
1922{
1923	skb->truesize = 2;
1924}
1925
1926static inline int tcp_inq(struct sock *sk)
1927{
1928	struct tcp_sock *tp = tcp_sk(sk);
1929	int answ;
1930
1931	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1932		answ = 0;
1933	} else if (sock_flag(sk, SOCK_URGINLINE) ||
1934		   !tp->urg_data ||
1935		   before(tp->urg_seq, tp->copied_seq) ||
1936		   !before(tp->urg_seq, tp->rcv_nxt)) {
1937
1938		answ = tp->rcv_nxt - tp->copied_seq;
1939
1940		/* Subtract 1, if FIN was received */
1941		if (answ && sock_flag(sk, SOCK_DONE))
1942			answ--;
1943	} else {
1944		answ = tp->urg_seq - tp->copied_seq;
1945	}
1946
1947	return answ;
1948}
1949
1950int tcp_peek_len(struct socket *sock);
1951
1952static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
1953{
1954	u16 segs_in;
1955
1956	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1957	tp->segs_in += segs_in;
 
 
 
 
1958	if (skb->len > tcp_hdrlen(skb))
1959		tp->data_segs_in += segs_in;
1960}
1961
1962/*
1963 * TCP listen path runs lockless.
1964 * We forced "struct sock" to be const qualified to make sure
1965 * we don't modify one of its field by mistake.
1966 * Here, we increment sk_drops which is an atomic_t, so we can safely
1967 * make sock writable again.
1968 */
1969static inline void tcp_listendrop(const struct sock *sk)
1970{
1971	atomic_inc(&((struct sock *)sk)->sk_drops);
1972	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
1973}
1974
1975enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
1976
1977/*
1978 * Interface for adding Upper Level Protocols over TCP
1979 */
1980
1981#define TCP_ULP_NAME_MAX	16
1982#define TCP_ULP_MAX		128
1983#define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
1984
1985enum {
1986	TCP_ULP_TLS,
1987	TCP_ULP_BPF,
1988};
1989
1990struct tcp_ulp_ops {
1991	struct list_head	list;
1992
1993	/* initialize ulp */
1994	int (*init)(struct sock *sk);
 
 
 
1995	/* cleanup ulp */
1996	void (*release)(struct sock *sk);
 
 
 
 
 
 
1997
1998	int		uid;
1999	char		name[TCP_ULP_NAME_MAX];
2000	bool		user_visible;
2001	struct module	*owner;
2002};
2003int tcp_register_ulp(struct tcp_ulp_ops *type);
2004void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2005int tcp_set_ulp(struct sock *sk, const char *name);
2006int tcp_set_ulp_id(struct sock *sk, const int ulp);
2007void tcp_get_available_ulp(char *buf, size_t len);
2008void tcp_cleanup_ulp(struct sock *sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2009
2010/* Call BPF_SOCK_OPS program that returns an int. If the return value
2011 * is < 0, then the BPF op failed (for example if the loaded BPF
2012 * program does not support the chosen operation or there is no BPF
2013 * program loaded).
2014 */
2015#ifdef CONFIG_BPF
2016static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2017{
2018	struct bpf_sock_ops_kern sock_ops;
2019	int ret;
2020
2021	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2022	if (sk_fullsock(sk)) {
2023		sock_ops.is_fullsock = 1;
2024		sock_owned_by_me(sk);
2025	}
2026
2027	sock_ops.sk = sk;
2028	sock_ops.op = op;
2029	if (nargs > 0)
2030		memcpy(sock_ops.args, args, nargs * sizeof(*args));
2031
2032	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2033	if (ret == 0)
2034		ret = sock_ops.reply;
2035	else
2036		ret = -1;
2037	return ret;
2038}
2039
2040static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2041{
2042	u32 args[2] = {arg1, arg2};
2043
2044	return tcp_call_bpf(sk, op, 2, args);
2045}
2046
2047static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2048				    u32 arg3)
2049{
2050	u32 args[3] = {arg1, arg2, arg3};
2051
2052	return tcp_call_bpf(sk, op, 3, args);
2053}
2054
2055#else
2056static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2057{
2058	return -EPERM;
2059}
2060
2061static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2062{
2063	return -EPERM;
2064}
2065
2066static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2067				    u32 arg3)
2068{
2069	return -EPERM;
2070}
2071
2072#endif
2073
2074static inline u32 tcp_timeout_init(struct sock *sk)
2075{
2076	int timeout;
2077
2078	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2079
2080	if (timeout <= 0)
2081		timeout = TCP_TIMEOUT_INIT;
2082	return timeout;
2083}
2084
2085static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2086{
2087	int rwnd;
2088
2089	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2090
2091	if (rwnd < 0)
2092		rwnd = 0;
2093	return rwnd;
2094}
2095
2096static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2097{
2098	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2099}
2100
 
 
 
 
 
 
2101#if IS_ENABLED(CONFIG_SMC)
2102extern struct static_key_false tcp_have_smc;
2103#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2104#endif	/* _TCP_H */