Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the TCP module.
8 *
9 * Version: @(#)tcp.h 1.0.5 05/23/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 */
14#ifndef _TCP_H
15#define _TCP_H
16
17#define FASTRETRANS_DEBUG 1
18
19#include <linux/list.h>
20#include <linux/tcp.h>
21#include <linux/bug.h>
22#include <linux/slab.h>
23#include <linux/cache.h>
24#include <linux/percpu.h>
25#include <linux/skbuff.h>
26#include <linux/kref.h>
27#include <linux/ktime.h>
28#include <linux/indirect_call_wrapper.h>
29
30#include <net/inet_connection_sock.h>
31#include <net/inet_timewait_sock.h>
32#include <net/inet_hashtables.h>
33#include <net/checksum.h>
34#include <net/request_sock.h>
35#include <net/sock_reuseport.h>
36#include <net/sock.h>
37#include <net/snmp.h>
38#include <net/ip.h>
39#include <net/tcp_states.h>
40#include <net/tcp_ao.h>
41#include <net/inet_ecn.h>
42#include <net/dst.h>
43#include <net/mptcp.h>
44#include <net/xfrm.h>
45
46#include <linux/seq_file.h>
47#include <linux/memcontrol.h>
48#include <linux/bpf-cgroup.h>
49#include <linux/siphash.h>
50
51extern struct inet_hashinfo tcp_hashinfo;
52
53DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
54int tcp_orphan_count_sum(void);
55
56DECLARE_PER_CPU(u32, tcp_tw_isn);
57
58void tcp_time_wait(struct sock *sk, int state, int timeo);
59
60#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
61#define MAX_TCP_OPTION_SPACE 40
62#define TCP_MIN_SND_MSS 48
63#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
64
65/*
66 * Never offer a window over 32767 without using window scaling. Some
67 * poor stacks do signed 16bit maths!
68 */
69#define MAX_TCP_WINDOW 32767U
70
71/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
72#define TCP_MIN_MSS 88U
73
74/* The initial MTU to use for probing */
75#define TCP_BASE_MSS 1024
76
77/* probing interval, default to 10 minutes as per RFC4821 */
78#define TCP_PROBE_INTERVAL 600
79
80/* Specify interval when tcp mtu probing will stop */
81#define TCP_PROBE_THRESHOLD 8
82
83/* After receiving this amount of duplicate ACKs fast retransmit starts. */
84#define TCP_FASTRETRANS_THRESH 3
85
86/* Maximal number of ACKs sent quickly to accelerate slow-start. */
87#define TCP_MAX_QUICKACKS 16U
88
89/* Maximal number of window scale according to RFC1323 */
90#define TCP_MAX_WSCALE 14U
91
92/* urg_data states */
93#define TCP_URG_VALID 0x0100
94#define TCP_URG_NOTYET 0x0200
95#define TCP_URG_READ 0x0400
96
97#define TCP_RETR1 3 /*
98 * This is how many retries it does before it
99 * tries to figure out if the gateway is
100 * down. Minimal RFC value is 3; it corresponds
101 * to ~3sec-8min depending on RTO.
102 */
103
104#define TCP_RETR2 15 /*
105 * This should take at least
106 * 90 minutes to time out.
107 * RFC1122 says that the limit is 100 sec.
108 * 15 is ~13-30min depending on RTO.
109 */
110
111#define TCP_SYN_RETRIES 6 /* This is how many retries are done
112 * when active opening a connection.
113 * RFC1122 says the minimum retry MUST
114 * be at least 180secs. Nevertheless
115 * this value is corresponding to
116 * 63secs of retransmission with the
117 * current initial RTO.
118 */
119
120#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
121 * when passive opening a connection.
122 * This is corresponding to 31secs of
123 * retransmission with the current
124 * initial RTO.
125 */
126
127#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
128 * state, about 60 seconds */
129#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
130 /* BSD style FIN_WAIT2 deadlock breaker.
131 * It used to be 3min, new value is 60sec,
132 * to combine FIN-WAIT-2 timeout with
133 * TIME-WAIT timer.
134 */
135#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
136
137#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
138static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
139
140#if HZ >= 100
141#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
142#define TCP_ATO_MIN ((unsigned)(HZ/25))
143#else
144#define TCP_DELACK_MIN 4U
145#define TCP_ATO_MIN 4U
146#endif
147#define TCP_RTO_MAX ((unsigned)(120*HZ))
148#define TCP_RTO_MIN ((unsigned)(HZ/5))
149#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
150
151#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
152
153#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
154#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
155 * used as a fallback RTO for the
156 * initial data transmission if no
157 * valid RTT sample has been acquired,
158 * most likely due to retrans in 3WHS.
159 */
160
161#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
162 * for local resources.
163 */
164#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
165#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
166#define TCP_KEEPALIVE_INTVL (75*HZ)
167
168#define MAX_TCP_KEEPIDLE 32767
169#define MAX_TCP_KEEPINTVL 32767
170#define MAX_TCP_KEEPCNT 127
171#define MAX_TCP_SYNCNT 127
172
173/* Ensure that TCP PAWS checks are relaxed after ~2147 seconds
174 * to avoid overflows. This assumes a clock smaller than 1 Mhz.
175 * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz.
176 */
177#define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
178
179#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
180 * after this time. It should be equal
181 * (or greater than) TCP_TIMEWAIT_LEN
182 * to provide reliability equal to one
183 * provided by timewait state.
184 */
185#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
186 * timestamps. It must be less than
187 * minimal timewait lifetime.
188 */
189/*
190 * TCP option
191 */
192
193#define TCPOPT_NOP 1 /* Padding */
194#define TCPOPT_EOL 0 /* End of options */
195#define TCPOPT_MSS 2 /* Segment size negotiating */
196#define TCPOPT_WINDOW 3 /* Window scaling */
197#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
198#define TCPOPT_SACK 5 /* SACK Block */
199#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
200#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
201#define TCPOPT_AO 29 /* Authentication Option (RFC5925) */
202#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
203#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
204#define TCPOPT_EXP 254 /* Experimental */
205/* Magic number to be after the option value for sharing TCP
206 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
207 */
208#define TCPOPT_FASTOPEN_MAGIC 0xF989
209#define TCPOPT_SMC_MAGIC 0xE2D4C3D9
210
211/*
212 * TCP option lengths
213 */
214
215#define TCPOLEN_MSS 4
216#define TCPOLEN_WINDOW 3
217#define TCPOLEN_SACK_PERM 2
218#define TCPOLEN_TIMESTAMP 10
219#define TCPOLEN_MD5SIG 18
220#define TCPOLEN_FASTOPEN_BASE 2
221#define TCPOLEN_EXP_FASTOPEN_BASE 4
222#define TCPOLEN_EXP_SMC_BASE 6
223
224/* But this is what stacks really send out. */
225#define TCPOLEN_TSTAMP_ALIGNED 12
226#define TCPOLEN_WSCALE_ALIGNED 4
227#define TCPOLEN_SACKPERM_ALIGNED 4
228#define TCPOLEN_SACK_BASE 2
229#define TCPOLEN_SACK_BASE_ALIGNED 4
230#define TCPOLEN_SACK_PERBLOCK 8
231#define TCPOLEN_MD5SIG_ALIGNED 20
232#define TCPOLEN_MSS_ALIGNED 4
233#define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
234
235/* Flags in tp->nonagle */
236#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
237#define TCP_NAGLE_CORK 2 /* Socket is corked */
238#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
239
240/* TCP thin-stream limits */
241#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
242
243/* TCP initial congestion window as per rfc6928 */
244#define TCP_INIT_CWND 10
245
246/* Bit Flags for sysctl_tcp_fastopen */
247#define TFO_CLIENT_ENABLE 1
248#define TFO_SERVER_ENABLE 2
249#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
250
251/* Accept SYN data w/o any cookie option */
252#define TFO_SERVER_COOKIE_NOT_REQD 0x200
253
254/* Force enable TFO on all listeners, i.e., not requiring the
255 * TCP_FASTOPEN socket option.
256 */
257#define TFO_SERVER_WO_SOCKOPT1 0x400
258
259
260/* sysctl variables for tcp */
261extern int sysctl_tcp_max_orphans;
262extern long sysctl_tcp_mem[3];
263
264#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
265#define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
266#define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
267
268extern atomic_long_t tcp_memory_allocated;
269DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
270
271extern struct percpu_counter tcp_sockets_allocated;
272extern unsigned long tcp_memory_pressure;
273
274/* optimized version of sk_under_memory_pressure() for TCP sockets */
275static inline bool tcp_under_memory_pressure(const struct sock *sk)
276{
277 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
278 mem_cgroup_under_socket_pressure(sk->sk_memcg))
279 return true;
280
281 return READ_ONCE(tcp_memory_pressure);
282}
283/*
284 * The next routines deal with comparing 32 bit unsigned ints
285 * and worry about wraparound (automatic with unsigned arithmetic).
286 */
287
288static inline bool before(__u32 seq1, __u32 seq2)
289{
290 return (__s32)(seq1-seq2) < 0;
291}
292#define after(seq2, seq1) before(seq1, seq2)
293
294/* is s2<=s1<=s3 ? */
295static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
296{
297 return seq3 - seq2 >= seq1 - seq2;
298}
299
300static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
301{
302 sk_wmem_queued_add(sk, -skb->truesize);
303 if (!skb_zcopy_pure(skb))
304 sk_mem_uncharge(sk, skb->truesize);
305 else
306 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
307 __kfree_skb(skb);
308}
309
310void sk_forced_mem_schedule(struct sock *sk, int size);
311
312bool tcp_check_oom(const struct sock *sk, int shift);
313
314
315extern struct proto tcp_prot;
316
317#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
318#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
319#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
320#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
321
322void tcp_tasklet_init(void);
323
324int tcp_v4_err(struct sk_buff *skb, u32);
325
326void tcp_shutdown(struct sock *sk, int how);
327
328int tcp_v4_early_demux(struct sk_buff *skb);
329int tcp_v4_rcv(struct sk_buff *skb);
330
331void tcp_remove_empty_skb(struct sock *sk);
332int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
333int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
334int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
335 size_t size, struct ubuf_info *uarg);
336void tcp_splice_eof(struct socket *sock);
337int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
338int tcp_wmem_schedule(struct sock *sk, int copy);
339void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
340 int size_goal);
341void tcp_release_cb(struct sock *sk);
342void tcp_wfree(struct sk_buff *skb);
343void tcp_write_timer_handler(struct sock *sk);
344void tcp_delack_timer_handler(struct sock *sk);
345int tcp_ioctl(struct sock *sk, int cmd, int *karg);
346enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
347void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
348void tcp_rcv_space_adjust(struct sock *sk);
349int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
350void tcp_twsk_destructor(struct sock *sk);
351void tcp_twsk_purge(struct list_head *net_exit_list);
352ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
353 struct pipe_inode_info *pipe, size_t len,
354 unsigned int flags);
355struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
356 bool force_schedule);
357
358static inline void tcp_dec_quickack_mode(struct sock *sk)
359{
360 struct inet_connection_sock *icsk = inet_csk(sk);
361
362 if (icsk->icsk_ack.quick) {
363 /* How many ACKs S/ACKing new data have we sent? */
364 const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
365
366 if (pkts >= icsk->icsk_ack.quick) {
367 icsk->icsk_ack.quick = 0;
368 /* Leaving quickack mode we deflate ATO. */
369 icsk->icsk_ack.ato = TCP_ATO_MIN;
370 } else
371 icsk->icsk_ack.quick -= pkts;
372 }
373}
374
375#define TCP_ECN_OK 1
376#define TCP_ECN_QUEUE_CWR 2
377#define TCP_ECN_DEMAND_CWR 4
378#define TCP_ECN_SEEN 8
379
380enum tcp_tw_status {
381 TCP_TW_SUCCESS = 0,
382 TCP_TW_RST = 1,
383 TCP_TW_ACK = 2,
384 TCP_TW_SYN = 3
385};
386
387
388enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
389 struct sk_buff *skb,
390 const struct tcphdr *th,
391 u32 *tw_isn);
392struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
393 struct request_sock *req, bool fastopen,
394 bool *lost_race);
395enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
396 struct sk_buff *skb);
397void tcp_enter_loss(struct sock *sk);
398void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
399void tcp_clear_retrans(struct tcp_sock *tp);
400void tcp_update_metrics(struct sock *sk);
401void tcp_init_metrics(struct sock *sk);
402void tcp_metrics_init(void);
403bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
404void __tcp_close(struct sock *sk, long timeout);
405void tcp_close(struct sock *sk, long timeout);
406void tcp_init_sock(struct sock *sk);
407void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
408__poll_t tcp_poll(struct file *file, struct socket *sock,
409 struct poll_table_struct *wait);
410int do_tcp_getsockopt(struct sock *sk, int level,
411 int optname, sockptr_t optval, sockptr_t optlen);
412int tcp_getsockopt(struct sock *sk, int level, int optname,
413 char __user *optval, int __user *optlen);
414bool tcp_bpf_bypass_getsockopt(int level, int optname);
415int do_tcp_setsockopt(struct sock *sk, int level, int optname,
416 sockptr_t optval, unsigned int optlen);
417int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
418 unsigned int optlen);
419void tcp_set_keepalive(struct sock *sk, int val);
420void tcp_syn_ack_timeout(const struct request_sock *req);
421int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
422 int flags, int *addr_len);
423int tcp_set_rcvlowat(struct sock *sk, int val);
424int tcp_set_window_clamp(struct sock *sk, int val);
425void tcp_update_recv_tstamps(struct sk_buff *skb,
426 struct scm_timestamping_internal *tss);
427void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
428 struct scm_timestamping_internal *tss);
429void tcp_data_ready(struct sock *sk);
430#ifdef CONFIG_MMU
431int tcp_mmap(struct file *file, struct socket *sock,
432 struct vm_area_struct *vma);
433#endif
434void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
435 struct tcp_options_received *opt_rx,
436 int estab, struct tcp_fastopen_cookie *foc);
437
438/*
439 * BPF SKB-less helpers
440 */
441u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
442 struct tcphdr *th, u32 *cookie);
443u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
444 struct tcphdr *th, u32 *cookie);
445u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
446u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
447 const struct tcp_request_sock_ops *af_ops,
448 struct sock *sk, struct tcphdr *th);
449/*
450 * TCP v4 functions exported for the inet6 API
451 */
452
453void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
454void tcp_v4_mtu_reduced(struct sock *sk);
455void tcp_req_err(struct sock *sk, u32 seq, bool abort);
456void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
457int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
458struct sock *tcp_create_openreq_child(const struct sock *sk,
459 struct request_sock *req,
460 struct sk_buff *skb);
461void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
462struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
463 struct request_sock *req,
464 struct dst_entry *dst,
465 struct request_sock *req_unhash,
466 bool *own_req);
467int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
468int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
469int tcp_connect(struct sock *sk);
470enum tcp_synack_type {
471 TCP_SYNACK_NORMAL,
472 TCP_SYNACK_FASTOPEN,
473 TCP_SYNACK_COOKIE,
474};
475struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
476 struct request_sock *req,
477 struct tcp_fastopen_cookie *foc,
478 enum tcp_synack_type synack_type,
479 struct sk_buff *syn_skb);
480int tcp_disconnect(struct sock *sk, int flags);
481
482void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
483int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
484void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
485
486/* From syncookies.c */
487struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
488 struct request_sock *req,
489 struct dst_entry *dst);
490int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th);
491struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
492struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
493 struct sock *sk, struct sk_buff *skb,
494 struct tcp_options_received *tcp_opt,
495 int mss, u32 tsoff);
496
497#if IS_ENABLED(CONFIG_BPF)
498struct bpf_tcp_req_attrs {
499 u32 rcv_tsval;
500 u32 rcv_tsecr;
501 u16 mss;
502 u8 rcv_wscale;
503 u8 snd_wscale;
504 u8 ecn_ok;
505 u8 wscale_ok;
506 u8 sack_ok;
507 u8 tstamp_ok;
508 u8 usec_ts_ok;
509 u8 reserved[3];
510};
511#endif
512
513#ifdef CONFIG_SYN_COOKIES
514
515/* Syncookies use a monotonic timer which increments every 60 seconds.
516 * This counter is used both as a hash input and partially encoded into
517 * the cookie value. A cookie is only validated further if the delta
518 * between the current counter value and the encoded one is less than this,
519 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
520 * the counter advances immediately after a cookie is generated).
521 */
522#define MAX_SYNCOOKIE_AGE 2
523#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
524#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
525
526/* syncookies: remember time of last synqueue overflow
527 * But do not dirty this field too often (once per second is enough)
528 * It is racy as we do not hold a lock, but race is very minor.
529 */
530static inline void tcp_synq_overflow(const struct sock *sk)
531{
532 unsigned int last_overflow;
533 unsigned int now = jiffies;
534
535 if (sk->sk_reuseport) {
536 struct sock_reuseport *reuse;
537
538 reuse = rcu_dereference(sk->sk_reuseport_cb);
539 if (likely(reuse)) {
540 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
541 if (!time_between32(now, last_overflow,
542 last_overflow + HZ))
543 WRITE_ONCE(reuse->synq_overflow_ts, now);
544 return;
545 }
546 }
547
548 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
549 if (!time_between32(now, last_overflow, last_overflow + HZ))
550 WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
551}
552
553/* syncookies: no recent synqueue overflow on this listening socket? */
554static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
555{
556 unsigned int last_overflow;
557 unsigned int now = jiffies;
558
559 if (sk->sk_reuseport) {
560 struct sock_reuseport *reuse;
561
562 reuse = rcu_dereference(sk->sk_reuseport_cb);
563 if (likely(reuse)) {
564 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
565 return !time_between32(now, last_overflow - HZ,
566 last_overflow +
567 TCP_SYNCOOKIE_VALID);
568 }
569 }
570
571 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
572
573 /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
574 * then we're under synflood. However, we have to use
575 * 'last_overflow - HZ' as lower bound. That's because a concurrent
576 * tcp_synq_overflow() could update .ts_recent_stamp after we read
577 * jiffies but before we store .ts_recent_stamp into last_overflow,
578 * which could lead to rejecting a valid syncookie.
579 */
580 return !time_between32(now, last_overflow - HZ,
581 last_overflow + TCP_SYNCOOKIE_VALID);
582}
583
584static inline u32 tcp_cookie_time(void)
585{
586 u64 val = get_jiffies_64();
587
588 do_div(val, TCP_SYNCOOKIE_PERIOD);
589 return val;
590}
591
592/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
593static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
594{
595 if (usec_ts)
596 return div_u64(val, NSEC_PER_USEC);
597
598 return div_u64(val, NSEC_PER_MSEC);
599}
600
601u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
602 u16 *mssp);
603__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
604u64 cookie_init_timestamp(struct request_sock *req, u64 now);
605bool cookie_timestamp_decode(const struct net *net,
606 struct tcp_options_received *opt);
607
608static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst)
609{
610 return READ_ONCE(net->ipv4.sysctl_tcp_ecn) ||
611 dst_feature(dst, RTAX_FEATURE_ECN);
612}
613
614#if IS_ENABLED(CONFIG_BPF)
615static inline bool cookie_bpf_ok(struct sk_buff *skb)
616{
617 return skb->sk;
618}
619
620struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
621#else
622static inline bool cookie_bpf_ok(struct sk_buff *skb)
623{
624 return false;
625}
626
627static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
628 struct sk_buff *skb)
629{
630 return NULL;
631}
632#endif
633
634/* From net/ipv6/syncookies.c */
635int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
636struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
637
638u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
639 const struct tcphdr *th, u16 *mssp);
640__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
641#endif
642/* tcp_output.c */
643
644void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
645void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
646void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
647 int nonagle);
648int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
649int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
650void tcp_retransmit_timer(struct sock *sk);
651void tcp_xmit_retransmit_queue(struct sock *);
652void tcp_simple_retransmit(struct sock *);
653void tcp_enter_recovery(struct sock *sk, bool ece_ack);
654int tcp_trim_head(struct sock *, struct sk_buff *, u32);
655enum tcp_queue {
656 TCP_FRAG_IN_WRITE_QUEUE,
657 TCP_FRAG_IN_RTX_QUEUE,
658};
659int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
660 struct sk_buff *skb, u32 len,
661 unsigned int mss_now, gfp_t gfp);
662
663void tcp_send_probe0(struct sock *);
664int tcp_write_wakeup(struct sock *, int mib);
665void tcp_send_fin(struct sock *sk);
666void tcp_send_active_reset(struct sock *sk, gfp_t priority,
667 enum sk_rst_reason reason);
668int tcp_send_synack(struct sock *);
669void tcp_push_one(struct sock *, unsigned int mss_now);
670void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
671void tcp_send_ack(struct sock *sk);
672void tcp_send_delayed_ack(struct sock *sk);
673void tcp_send_loss_probe(struct sock *sk);
674bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
675void tcp_skb_collapse_tstamp(struct sk_buff *skb,
676 const struct sk_buff *next_skb);
677
678/* tcp_input.c */
679void tcp_rearm_rto(struct sock *sk);
680void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
681void tcp_done_with_error(struct sock *sk, int err);
682void tcp_reset(struct sock *sk, struct sk_buff *skb);
683void tcp_fin(struct sock *sk);
684void tcp_check_space(struct sock *sk);
685void tcp_sack_compress_send_ack(struct sock *sk);
686
687static inline void tcp_cleanup_skb(struct sk_buff *skb)
688{
689 skb_dst_drop(skb);
690 secpath_reset(skb);
691}
692
693static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
694{
695 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
696 DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
697 __skb_queue_tail(&sk->sk_receive_queue, skb);
698}
699
700/* tcp_timer.c */
701void tcp_init_xmit_timers(struct sock *);
702static inline void tcp_clear_xmit_timers(struct sock *sk)
703{
704 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
705 __sock_put(sk);
706
707 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
708 __sock_put(sk);
709
710 inet_csk_clear_xmit_timers(sk);
711}
712
713unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
714unsigned int tcp_current_mss(struct sock *sk);
715u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
716
717/* Bound MSS / TSO packet size with the half of the window */
718static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
719{
720 int cutoff;
721
722 /* When peer uses tiny windows, there is no use in packetizing
723 * to sub-MSS pieces for the sake of SWS or making sure there
724 * are enough packets in the pipe for fast recovery.
725 *
726 * On the other hand, for extremely large MSS devices, handling
727 * smaller than MSS windows in this way does make sense.
728 */
729 if (tp->max_window > TCP_MSS_DEFAULT)
730 cutoff = (tp->max_window >> 1);
731 else
732 cutoff = tp->max_window;
733
734 if (cutoff && pktsize > cutoff)
735 return max_t(int, cutoff, 68U - tp->tcp_header_len);
736 else
737 return pktsize;
738}
739
740/* tcp.c */
741void tcp_get_info(struct sock *, struct tcp_info *);
742
743/* Read 'sendfile()'-style from a TCP socket */
744int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
745 sk_read_actor_t recv_actor);
746int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
747struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
748void tcp_read_done(struct sock *sk, size_t len);
749
750void tcp_initialize_rcv_mss(struct sock *sk);
751
752int tcp_mtu_to_mss(struct sock *sk, int pmtu);
753int tcp_mss_to_mtu(struct sock *sk, int mss);
754void tcp_mtup_init(struct sock *sk);
755
756static inline void tcp_bound_rto(struct sock *sk)
757{
758 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
759 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
760}
761
762static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
763{
764 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
765}
766
767static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
768{
769 /* mptcp hooks are only on the slow path */
770 if (sk_is_mptcp((struct sock *)tp))
771 return;
772
773 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
774 ntohl(TCP_FLAG_ACK) |
775 snd_wnd);
776}
777
778static inline void tcp_fast_path_on(struct tcp_sock *tp)
779{
780 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
781}
782
783static inline void tcp_fast_path_check(struct sock *sk)
784{
785 struct tcp_sock *tp = tcp_sk(sk);
786
787 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
788 tp->rcv_wnd &&
789 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
790 !tp->urg_data)
791 tcp_fast_path_on(tp);
792}
793
794u32 tcp_delack_max(const struct sock *sk);
795
796/* Compute the actual rto_min value */
797static inline u32 tcp_rto_min(const struct sock *sk)
798{
799 const struct dst_entry *dst = __sk_dst_get(sk);
800 u32 rto_min = inet_csk(sk)->icsk_rto_min;
801
802 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
803 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
804 return rto_min;
805}
806
807static inline u32 tcp_rto_min_us(const struct sock *sk)
808{
809 return jiffies_to_usecs(tcp_rto_min(sk));
810}
811
812static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
813{
814 return dst_metric_locked(dst, RTAX_CC_ALGO);
815}
816
817/* Minimum RTT in usec. ~0 means not available. */
818static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
819{
820 return minmax_get(&tp->rtt_min);
821}
822
823/* Compute the actual receive window we are currently advertising.
824 * Rcv_nxt can be after the window if our peer push more data
825 * than the offered window.
826 */
827static inline u32 tcp_receive_window(const struct tcp_sock *tp)
828{
829 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
830
831 if (win < 0)
832 win = 0;
833 return (u32) win;
834}
835
836/* Choose a new window, without checks for shrinking, and without
837 * scaling applied to the result. The caller does these things
838 * if necessary. This is a "raw" window selection.
839 */
840u32 __tcp_select_window(struct sock *sk);
841
842void tcp_send_window_probe(struct sock *sk);
843
844/* TCP uses 32bit jiffies to save some space.
845 * Note that this is different from tcp_time_stamp, which
846 * historically has been the same until linux-4.13.
847 */
848#define tcp_jiffies32 ((u32)jiffies)
849
850/*
851 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
852 * It is no longer tied to jiffies, but to 1 ms clock.
853 * Note: double check if you want to use tcp_jiffies32 instead of this.
854 */
855#define TCP_TS_HZ 1000
856
857static inline u64 tcp_clock_ns(void)
858{
859 return ktime_get_ns();
860}
861
862static inline u64 tcp_clock_us(void)
863{
864 return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
865}
866
867static inline u64 tcp_clock_ms(void)
868{
869 return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
870}
871
872/* TCP Timestamp included in TS option (RFC 1323) can either use ms
873 * or usec resolution. Each socket carries a flag to select one or other
874 * resolution, as the route attribute could change anytime.
875 * Each flow must stick to initial resolution.
876 */
877static inline u32 tcp_clock_ts(bool usec_ts)
878{
879 return usec_ts ? tcp_clock_us() : tcp_clock_ms();
880}
881
882static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
883{
884 return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
885}
886
887static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
888{
889 if (tp->tcp_usec_ts)
890 return tp->tcp_mstamp;
891 return tcp_time_stamp_ms(tp);
892}
893
894void tcp_mstamp_refresh(struct tcp_sock *tp);
895
896static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
897{
898 return max_t(s64, t1 - t0, 0);
899}
900
901/* provide the departure time in us unit */
902static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
903{
904 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
905}
906
907/* Provide skb TSval in usec or ms unit */
908static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
909{
910 if (usec_ts)
911 return tcp_skb_timestamp_us(skb);
912
913 return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
914}
915
916static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
917{
918 return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
919}
920
921static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
922{
923 return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
924}
925
926#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
927
928#define TCPHDR_FIN 0x01
929#define TCPHDR_SYN 0x02
930#define TCPHDR_RST 0x04
931#define TCPHDR_PSH 0x08
932#define TCPHDR_ACK 0x10
933#define TCPHDR_URG 0x20
934#define TCPHDR_ECE 0x40
935#define TCPHDR_CWR 0x80
936
937#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
938
939/* State flags for sacked in struct tcp_skb_cb */
940enum tcp_skb_cb_sacked_flags {
941 TCPCB_SACKED_ACKED = (1 << 0), /* SKB ACK'd by a SACK block */
942 TCPCB_SACKED_RETRANS = (1 << 1), /* SKB retransmitted */
943 TCPCB_LOST = (1 << 2), /* SKB is lost */
944 TCPCB_TAGBITS = (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS |
945 TCPCB_LOST), /* All tag bits */
946 TCPCB_REPAIRED = (1 << 4), /* SKB repaired (no skb_mstamp_ns) */
947 TCPCB_EVER_RETRANS = (1 << 7), /* Ever retransmitted frame */
948 TCPCB_RETRANS = (TCPCB_SACKED_RETRANS | TCPCB_EVER_RETRANS |
949 TCPCB_REPAIRED),
950};
951
952/* This is what the send packet queuing engine uses to pass
953 * TCP per-packet control information to the transmission code.
954 * We also store the host-order sequence numbers in here too.
955 * This is 44 bytes if IPV6 is enabled.
956 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
957 */
958struct tcp_skb_cb {
959 __u32 seq; /* Starting sequence number */
960 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
961 union {
962 /* Note :
963 * tcp_gso_segs/size are used in write queue only,
964 * cf tcp_skb_pcount()/tcp_skb_mss()
965 */
966 struct {
967 u16 tcp_gso_segs;
968 u16 tcp_gso_size;
969 };
970 };
971 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
972
973 __u8 sacked; /* State flags for SACK. */
974 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
975 __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
976 eor:1, /* Is skb MSG_EOR marked? */
977 has_rxtstamp:1, /* SKB has a RX timestamp */
978 unused:5;
979 __u32 ack_seq; /* Sequence number ACK'd */
980 union {
981 struct {
982#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
983 /* There is space for up to 24 bytes */
984 __u32 is_app_limited:1, /* cwnd not fully used? */
985 delivered_ce:20,
986 unused:11;
987 /* pkts S/ACKed so far upon tx of skb, incl retrans: */
988 __u32 delivered;
989 /* start of send pipeline phase */
990 u64 first_tx_mstamp;
991 /* when we reached the "delivered" count */
992 u64 delivered_mstamp;
993 } tx; /* only used for outgoing skbs */
994 union {
995 struct inet_skb_parm h4;
996#if IS_ENABLED(CONFIG_IPV6)
997 struct inet6_skb_parm h6;
998#endif
999 } header; /* For incoming skbs */
1000 };
1001};
1002
1003#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
1004
1005extern const struct inet_connection_sock_af_ops ipv4_specific;
1006
1007#if IS_ENABLED(CONFIG_IPV6)
1008/* This is the variant of inet6_iif() that must be used by TCP,
1009 * as TCP moves IP6CB into a different location in skb->cb[]
1010 */
1011static inline int tcp_v6_iif(const struct sk_buff *skb)
1012{
1013 return TCP_SKB_CB(skb)->header.h6.iif;
1014}
1015
1016static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
1017{
1018 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
1019
1020 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
1021}
1022
1023/* TCP_SKB_CB reference means this can not be used from early demux */
1024static inline int tcp_v6_sdif(const struct sk_buff *skb)
1025{
1026#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1027 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
1028 return TCP_SKB_CB(skb)->header.h6.iif;
1029#endif
1030 return 0;
1031}
1032
1033extern const struct inet_connection_sock_af_ops ipv6_specific;
1034
1035INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
1036INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
1037void tcp_v6_early_demux(struct sk_buff *skb);
1038
1039#endif
1040
1041/* TCP_SKB_CB reference means this can not be used from early demux */
1042static inline int tcp_v4_sdif(struct sk_buff *skb)
1043{
1044#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1045 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
1046 return TCP_SKB_CB(skb)->header.h4.iif;
1047#endif
1048 return 0;
1049}
1050
1051/* Due to TSO, an SKB can be composed of multiple actual
1052 * packets. To keep these tracked properly, we use this.
1053 */
1054static inline int tcp_skb_pcount(const struct sk_buff *skb)
1055{
1056 return TCP_SKB_CB(skb)->tcp_gso_segs;
1057}
1058
1059static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
1060{
1061 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
1062}
1063
1064static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
1065{
1066 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
1067}
1068
1069/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
1070static inline int tcp_skb_mss(const struct sk_buff *skb)
1071{
1072 return TCP_SKB_CB(skb)->tcp_gso_size;
1073}
1074
1075static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
1076{
1077 return likely(!TCP_SKB_CB(skb)->eor);
1078}
1079
1080static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
1081 const struct sk_buff *from)
1082{
1083 /* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
1084 return likely(tcp_skb_can_collapse_to(to) &&
1085 mptcp_skb_can_collapse(to, from) &&
1086 skb_pure_zcopy_same(to, from) &&
1087 skb_frags_readable(to) == skb_frags_readable(from));
1088}
1089
1090static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
1091 const struct sk_buff *from)
1092{
1093 return likely(mptcp_skb_can_collapse(to, from) &&
1094 !skb_cmp_decrypted(to, from));
1095}
1096
1097/* Events passed to congestion control interface */
1098enum tcp_ca_event {
1099 CA_EVENT_TX_START, /* first transmit when no packets in flight */
1100 CA_EVENT_CWND_RESTART, /* congestion window restart */
1101 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
1102 CA_EVENT_LOSS, /* loss timeout */
1103 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
1104 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
1105};
1106
1107/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1108enum tcp_ca_ack_event_flags {
1109 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
1110 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
1111 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
1112};
1113
1114/*
1115 * Interface for adding new TCP congestion control handlers
1116 */
1117#define TCP_CA_NAME_MAX 16
1118#define TCP_CA_MAX 128
1119#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
1120
1121#define TCP_CA_UNSPEC 0
1122
1123/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1124#define TCP_CONG_NON_RESTRICTED 0x1
1125/* Requires ECN/ECT set on all packets */
1126#define TCP_CONG_NEEDS_ECN 0x2
1127#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1128
1129union tcp_cc_info;
1130
1131struct ack_sample {
1132 u32 pkts_acked;
1133 s32 rtt_us;
1134 u32 in_flight;
1135};
1136
1137/* A rate sample measures the number of (original/retransmitted) data
1138 * packets delivered "delivered" over an interval of time "interval_us".
1139 * The tcp_rate.c code fills in the rate sample, and congestion
1140 * control modules that define a cong_control function to run at the end
1141 * of ACK processing can optionally chose to consult this sample when
1142 * setting cwnd and pacing rate.
1143 * A sample is invalid if "delivered" or "interval_us" is negative.
1144 */
1145struct rate_sample {
1146 u64 prior_mstamp; /* starting timestamp for interval */
1147 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
1148 u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
1149 s32 delivered; /* number of packets delivered over interval */
1150 s32 delivered_ce; /* number of packets delivered w/ CE marks*/
1151 long interval_us; /* time for tp->delivered to incr "delivered" */
1152 u32 snd_interval_us; /* snd interval for delivered packets */
1153 u32 rcv_interval_us; /* rcv interval for delivered packets */
1154 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
1155 int losses; /* number of packets marked lost upon ACK */
1156 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
1157 u32 prior_in_flight; /* in flight before this ACK */
1158 u32 last_end_seq; /* end_seq of most recently ACKed packet */
1159 bool is_app_limited; /* is sample from packet with bubble in pipe? */
1160 bool is_retrans; /* is sample from retransmission? */
1161 bool is_ack_delayed; /* is this (likely) a delayed ACK? */
1162};
1163
1164struct tcp_congestion_ops {
1165/* fast path fields are put first to fill one cache line */
1166
1167 /* return slow start threshold (required) */
1168 u32 (*ssthresh)(struct sock *sk);
1169
1170 /* do new cwnd calculation (required) */
1171 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1172
1173 /* call before changing ca_state (optional) */
1174 void (*set_state)(struct sock *sk, u8 new_state);
1175
1176 /* call when cwnd event occurs (optional) */
1177 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1178
1179 /* call when ack arrives (optional) */
1180 void (*in_ack_event)(struct sock *sk, u32 flags);
1181
1182 /* hook for packet ack accounting (optional) */
1183 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1184
1185 /* override sysctl_tcp_min_tso_segs */
1186 u32 (*min_tso_segs)(struct sock *sk);
1187
1188 /* call when packets are delivered to update cwnd and pacing rate,
1189 * after all the ca_state processing. (optional)
1190 */
1191 void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
1192
1193
1194 /* new value of cwnd after loss (required) */
1195 u32 (*undo_cwnd)(struct sock *sk);
1196 /* returns the multiplier used in tcp_sndbuf_expand (optional) */
1197 u32 (*sndbuf_expand)(struct sock *sk);
1198
1199/* control/slow paths put last */
1200 /* get info for inet_diag (optional) */
1201 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1202 union tcp_cc_info *info);
1203
1204 char name[TCP_CA_NAME_MAX];
1205 struct module *owner;
1206 struct list_head list;
1207 u32 key;
1208 u32 flags;
1209
1210 /* initialize private data (optional) */
1211 void (*init)(struct sock *sk);
1212 /* cleanup private data (optional) */
1213 void (*release)(struct sock *sk);
1214} ____cacheline_aligned_in_smp;
1215
1216int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1217void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1218int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1219 struct tcp_congestion_ops *old_type);
1220int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
1221
1222void tcp_assign_congestion_control(struct sock *sk);
1223void tcp_init_congestion_control(struct sock *sk);
1224void tcp_cleanup_congestion_control(struct sock *sk);
1225int tcp_set_default_congestion_control(struct net *net, const char *name);
1226void tcp_get_default_congestion_control(struct net *net, char *name);
1227void tcp_get_available_congestion_control(char *buf, size_t len);
1228void tcp_get_allowed_congestion_control(char *buf, size_t len);
1229int tcp_set_allowed_congestion_control(char *allowed);
1230int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1231 bool cap_net_admin);
1232u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1233void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1234
1235u32 tcp_reno_ssthresh(struct sock *sk);
1236u32 tcp_reno_undo_cwnd(struct sock *sk);
1237void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1238extern struct tcp_congestion_ops tcp_reno;
1239
1240struct tcp_congestion_ops *tcp_ca_find(const char *name);
1241struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1242u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
1243#ifdef CONFIG_INET
1244char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1245#else
1246static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1247{
1248 return NULL;
1249}
1250#endif
1251
1252static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1253{
1254 const struct inet_connection_sock *icsk = inet_csk(sk);
1255
1256 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1257}
1258
1259static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1260{
1261 const struct inet_connection_sock *icsk = inet_csk(sk);
1262
1263 if (icsk->icsk_ca_ops->cwnd_event)
1264 icsk->icsk_ca_ops->cwnd_event(sk, event);
1265}
1266
1267/* From tcp_cong.c */
1268void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1269
1270/* From tcp_rate.c */
1271void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1272void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1273 struct rate_sample *rs);
1274void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1275 bool is_sack_reneg, struct rate_sample *rs);
1276void tcp_rate_check_app_limited(struct sock *sk);
1277
1278static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1279{
1280 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1281}
1282
1283/* These functions determine how the current flow behaves in respect of SACK
1284 * handling. SACK is negotiated with the peer, and therefore it can vary
1285 * between different flows.
1286 *
1287 * tcp_is_sack - SACK enabled
1288 * tcp_is_reno - No SACK
1289 */
1290static inline int tcp_is_sack(const struct tcp_sock *tp)
1291{
1292 return likely(tp->rx_opt.sack_ok);
1293}
1294
1295static inline bool tcp_is_reno(const struct tcp_sock *tp)
1296{
1297 return !tcp_is_sack(tp);
1298}
1299
1300static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1301{
1302 return tp->sacked_out + tp->lost_out;
1303}
1304
1305/* This determines how many packets are "in the network" to the best
1306 * of our knowledge. In many cases it is conservative, but where
1307 * detailed information is available from the receiver (via SACK
1308 * blocks etc.) we can make more aggressive calculations.
1309 *
1310 * Use this for decisions involving congestion control, use just
1311 * tp->packets_out to determine if the send queue is empty or not.
1312 *
1313 * Read this equation as:
1314 *
1315 * "Packets sent once on transmission queue" MINUS
1316 * "Packets left network, but not honestly ACKed yet" PLUS
1317 * "Packets fast retransmitted"
1318 */
1319static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1320{
1321 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1322}
1323
1324#define TCP_INFINITE_SSTHRESH 0x7fffffff
1325
1326static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1327{
1328 return tp->snd_cwnd;
1329}
1330
1331static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1332{
1333 WARN_ON_ONCE((int)val <= 0);
1334 tp->snd_cwnd = val;
1335}
1336
1337static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1338{
1339 return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1340}
1341
1342static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1343{
1344 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1345}
1346
1347static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1348{
1349 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1350 (1 << inet_csk(sk)->icsk_ca_state);
1351}
1352
1353/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1354 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1355 * ssthresh.
1356 */
1357static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1358{
1359 const struct tcp_sock *tp = tcp_sk(sk);
1360
1361 if (tcp_in_cwnd_reduction(sk))
1362 return tp->snd_ssthresh;
1363 else
1364 return max(tp->snd_ssthresh,
1365 ((tcp_snd_cwnd(tp) >> 1) +
1366 (tcp_snd_cwnd(tp) >> 2)));
1367}
1368
1369/* Use define here intentionally to get WARN_ON location shown at the caller */
1370#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1371
1372void tcp_enter_cwr(struct sock *sk);
1373__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1374
1375/* The maximum number of MSS of available cwnd for which TSO defers
1376 * sending if not using sysctl_tcp_tso_win_divisor.
1377 */
1378static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1379{
1380 return 3;
1381}
1382
1383/* Returns end sequence number of the receiver's advertised window */
1384static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1385{
1386 return tp->snd_una + tp->snd_wnd;
1387}
1388
1389/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1390 * flexible approach. The RFC suggests cwnd should not be raised unless
1391 * it was fully used previously. And that's exactly what we do in
1392 * congestion avoidance mode. But in slow start we allow cwnd to grow
1393 * as long as the application has used half the cwnd.
1394 * Example :
1395 * cwnd is 10 (IW10), but application sends 9 frames.
1396 * We allow cwnd to reach 18 when all frames are ACKed.
1397 * This check is safe because it's as aggressive as slow start which already
1398 * risks 100% overshoot. The advantage is that we discourage application to
1399 * either send more filler packets or data to artificially blow up the cwnd
1400 * usage, and allow application-limited process to probe bw more aggressively.
1401 */
1402static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1403{
1404 const struct tcp_sock *tp = tcp_sk(sk);
1405
1406 if (tp->is_cwnd_limited)
1407 return true;
1408
1409 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
1410 if (tcp_in_slow_start(tp))
1411 return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1412
1413 return false;
1414}
1415
1416/* BBR congestion control needs pacing.
1417 * Same remark for SO_MAX_PACING_RATE.
1418 * sch_fq packet scheduler is efficiently handling pacing,
1419 * but is not always installed/used.
1420 * Return true if TCP stack should pace packets itself.
1421 */
1422static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1423{
1424 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1425}
1426
1427/* Estimates in how many jiffies next packet for this flow can be sent.
1428 * Scheduling a retransmit timer too early would be silly.
1429 */
1430static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1431{
1432 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1433
1434 return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1435}
1436
1437static inline void tcp_reset_xmit_timer(struct sock *sk,
1438 const int what,
1439 unsigned long when,
1440 const unsigned long max_when)
1441{
1442 inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1443 max_when);
1444}
1445
1446/* Something is really bad, we could not queue an additional packet,
1447 * because qdisc is full or receiver sent a 0 window, or we are paced.
1448 * We do not want to add fuel to the fire, or abort too early,
1449 * so make sure the timer we arm now is at least 200ms in the future,
1450 * regardless of current icsk_rto value (as it could be ~2ms)
1451 */
1452static inline unsigned long tcp_probe0_base(const struct sock *sk)
1453{
1454 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1455}
1456
1457/* Variant of inet_csk_rto_backoff() used for zero window probes */
1458static inline unsigned long tcp_probe0_when(const struct sock *sk,
1459 unsigned long max_when)
1460{
1461 u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1462 inet_csk(sk)->icsk_backoff);
1463 u64 when = (u64)tcp_probe0_base(sk) << backoff;
1464
1465 return (unsigned long)min_t(u64, when, max_when);
1466}
1467
1468static inline void tcp_check_probe_timer(struct sock *sk)
1469{
1470 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1471 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1472 tcp_probe0_base(sk), TCP_RTO_MAX);
1473}
1474
1475static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1476{
1477 tp->snd_wl1 = seq;
1478}
1479
1480static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1481{
1482 tp->snd_wl1 = seq;
1483}
1484
1485/*
1486 * Calculate(/check) TCP checksum
1487 */
1488static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1489 __be32 daddr, __wsum base)
1490{
1491 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1492}
1493
1494static inline bool tcp_checksum_complete(struct sk_buff *skb)
1495{
1496 return !skb_csum_unnecessary(skb) &&
1497 __skb_checksum_complete(skb);
1498}
1499
1500bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1501 enum skb_drop_reason *reason);
1502
1503
1504int tcp_filter(struct sock *sk, struct sk_buff *skb);
1505void tcp_set_state(struct sock *sk, int state);
1506void tcp_done(struct sock *sk);
1507int tcp_abort(struct sock *sk, int err);
1508
1509static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1510{
1511 rx_opt->dsack = 0;
1512 rx_opt->num_sacks = 0;
1513}
1514
1515void tcp_cwnd_restart(struct sock *sk, s32 delta);
1516
1517static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1518{
1519 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1520 struct tcp_sock *tp = tcp_sk(sk);
1521 s32 delta;
1522
1523 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1524 tp->packets_out || ca_ops->cong_control)
1525 return;
1526 delta = tcp_jiffies32 - tp->lsndtime;
1527 if (delta > inet_csk(sk)->icsk_rto)
1528 tcp_cwnd_restart(sk, delta);
1529}
1530
1531/* Determine a window scaling and initial window to offer. */
1532void tcp_select_initial_window(const struct sock *sk, int __space,
1533 __u32 mss, __u32 *rcv_wnd,
1534 __u32 *window_clamp, int wscale_ok,
1535 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1536
1537static inline int __tcp_win_from_space(u8 scaling_ratio, int space)
1538{
1539 s64 scaled_space = (s64)space * scaling_ratio;
1540
1541 return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
1542}
1543
1544static inline int tcp_win_from_space(const struct sock *sk, int space)
1545{
1546 return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space);
1547}
1548
1549/* inverse of __tcp_win_from_space() */
1550static inline int __tcp_space_from_win(u8 scaling_ratio, int win)
1551{
1552 u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
1553
1554 do_div(val, scaling_ratio);
1555 return val;
1556}
1557
1558static inline int tcp_space_from_win(const struct sock *sk, int win)
1559{
1560 return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
1561}
1562
1563/* Assume a 50% default for skb->len/skb->truesize ratio.
1564 * This may be adjusted later in tcp_measure_rcv_mss().
1565 */
1566#define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
1567
1568static inline void tcp_scaling_ratio_init(struct sock *sk)
1569{
1570 tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
1571}
1572
1573/* Note: caller must be prepared to deal with negative returns */
1574static inline int tcp_space(const struct sock *sk)
1575{
1576 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1577 READ_ONCE(sk->sk_backlog.len) -
1578 atomic_read(&sk->sk_rmem_alloc));
1579}
1580
1581static inline int tcp_full_space(const struct sock *sk)
1582{
1583 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1584}
1585
1586static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
1587{
1588 int unused_mem = sk_unused_reserved_mem(sk);
1589 struct tcp_sock *tp = tcp_sk(sk);
1590
1591 tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
1592 if (unused_mem)
1593 tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1594 tcp_win_from_space(sk, unused_mem));
1595}
1596
1597static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1598{
1599 __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
1600}
1601
1602void tcp_cleanup_rbuf(struct sock *sk, int copied);
1603void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1604
1605
1606/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1607 * If 87.5 % (7/8) of the space has been consumed, we want to override
1608 * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1609 * len/truesize ratio.
1610 */
1611static inline bool tcp_rmem_pressure(const struct sock *sk)
1612{
1613 int rcvbuf, threshold;
1614
1615 if (tcp_under_memory_pressure(sk))
1616 return true;
1617
1618 rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1619 threshold = rcvbuf - (rcvbuf >> 3);
1620
1621 return atomic_read(&sk->sk_rmem_alloc) > threshold;
1622}
1623
1624static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1625{
1626 const struct tcp_sock *tp = tcp_sk(sk);
1627 int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1628
1629 if (avail <= 0)
1630 return false;
1631
1632 return (avail >= target) || tcp_rmem_pressure(sk) ||
1633 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1634}
1635
1636extern void tcp_openreq_init_rwin(struct request_sock *req,
1637 const struct sock *sk_listener,
1638 const struct dst_entry *dst);
1639
1640void tcp_enter_memory_pressure(struct sock *sk);
1641void tcp_leave_memory_pressure(struct sock *sk);
1642
1643static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1644{
1645 struct net *net = sock_net((struct sock *)tp);
1646 int val;
1647
1648 /* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1649 * and do_tcp_setsockopt().
1650 */
1651 val = READ_ONCE(tp->keepalive_intvl);
1652
1653 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1654}
1655
1656static inline int keepalive_time_when(const struct tcp_sock *tp)
1657{
1658 struct net *net = sock_net((struct sock *)tp);
1659 int val;
1660
1661 /* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1662 val = READ_ONCE(tp->keepalive_time);
1663
1664 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1665}
1666
1667static inline int keepalive_probes(const struct tcp_sock *tp)
1668{
1669 struct net *net = sock_net((struct sock *)tp);
1670 int val;
1671
1672 /* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1673 * and do_tcp_setsockopt().
1674 */
1675 val = READ_ONCE(tp->keepalive_probes);
1676
1677 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1678}
1679
1680static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1681{
1682 const struct inet_connection_sock *icsk = &tp->inet_conn;
1683
1684 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1685 tcp_jiffies32 - tp->rcv_tstamp);
1686}
1687
1688static inline int tcp_fin_time(const struct sock *sk)
1689{
1690 int fin_timeout = tcp_sk(sk)->linger2 ? :
1691 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1692 const int rto = inet_csk(sk)->icsk_rto;
1693
1694 if (fin_timeout < (rto << 2) - (rto >> 1))
1695 fin_timeout = (rto << 2) - (rto >> 1);
1696
1697 return fin_timeout;
1698}
1699
1700static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1701 int paws_win)
1702{
1703 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1704 return true;
1705 if (unlikely(!time_before32(ktime_get_seconds(),
1706 rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
1707 return true;
1708 /*
1709 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1710 * then following tcp messages have valid values. Ignore 0 value,
1711 * or else 'negative' tsval might forbid us to accept their packets.
1712 */
1713 if (!rx_opt->ts_recent)
1714 return true;
1715 return false;
1716}
1717
1718static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1719 int rst)
1720{
1721 if (tcp_paws_check(rx_opt, 0))
1722 return false;
1723
1724 /* RST segments are not recommended to carry timestamp,
1725 and, if they do, it is recommended to ignore PAWS because
1726 "their cleanup function should take precedence over timestamps."
1727 Certainly, it is mistake. It is necessary to understand the reasons
1728 of this constraint to relax it: if peer reboots, clock may go
1729 out-of-sync and half-open connections will not be reset.
1730 Actually, the problem would be not existing if all
1731 the implementations followed draft about maintaining clock
1732 via reboots. Linux-2.2 DOES NOT!
1733
1734 However, we can relax time bounds for RST segments to MSL.
1735 */
1736 if (rst && !time_before32(ktime_get_seconds(),
1737 rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1738 return false;
1739 return true;
1740}
1741
1742bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1743 int mib_idx, u32 *last_oow_ack_time);
1744
1745static inline void tcp_mib_init(struct net *net)
1746{
1747 /* See RFC 2012 */
1748 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1749 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1750 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1751 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1752}
1753
1754/* from STCP */
1755static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1756{
1757 tp->lost_skb_hint = NULL;
1758}
1759
1760static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1761{
1762 tcp_clear_retrans_hints_partial(tp);
1763 tp->retransmit_skb_hint = NULL;
1764}
1765
1766#define tcp_md5_addr tcp_ao_addr
1767
1768/* - key database */
1769struct tcp_md5sig_key {
1770 struct hlist_node node;
1771 u8 keylen;
1772 u8 family; /* AF_INET or AF_INET6 */
1773 u8 prefixlen;
1774 u8 flags;
1775 union tcp_md5_addr addr;
1776 int l3index; /* set if key added with L3 scope */
1777 u8 key[TCP_MD5SIG_MAXKEYLEN];
1778 struct rcu_head rcu;
1779};
1780
1781/* - sock block */
1782struct tcp_md5sig_info {
1783 struct hlist_head head;
1784 struct rcu_head rcu;
1785};
1786
1787/* - pseudo header */
1788struct tcp4_pseudohdr {
1789 __be32 saddr;
1790 __be32 daddr;
1791 __u8 pad;
1792 __u8 protocol;
1793 __be16 len;
1794};
1795
1796struct tcp6_pseudohdr {
1797 struct in6_addr saddr;
1798 struct in6_addr daddr;
1799 __be32 len;
1800 __be32 protocol; /* including padding */
1801};
1802
1803union tcp_md5sum_block {
1804 struct tcp4_pseudohdr ip4;
1805#if IS_ENABLED(CONFIG_IPV6)
1806 struct tcp6_pseudohdr ip6;
1807#endif
1808};
1809
1810/*
1811 * struct tcp_sigpool - per-CPU pool of ahash_requests
1812 * @scratch: per-CPU temporary area, that can be used between
1813 * tcp_sigpool_start() and tcp_sigpool_end() to perform
1814 * crypto request
1815 * @req: pre-allocated ahash request
1816 */
1817struct tcp_sigpool {
1818 void *scratch;
1819 struct ahash_request *req;
1820};
1821
1822int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size);
1823void tcp_sigpool_get(unsigned int id);
1824void tcp_sigpool_release(unsigned int id);
1825int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
1826 const struct sk_buff *skb,
1827 unsigned int header_len);
1828
1829/**
1830 * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash
1831 * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
1832 * @c: returned tcp_sigpool for usage (uninitialized on failure)
1833 *
1834 * Returns 0 on success, error otherwise.
1835 */
1836int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
1837/**
1838 * tcp_sigpool_end - enable bh and stop using tcp_sigpool
1839 * @c: tcp_sigpool context that was returned by tcp_sigpool_start()
1840 */
1841void tcp_sigpool_end(struct tcp_sigpool *c);
1842size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
1843/* - functions */
1844int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1845 const struct sock *sk, const struct sk_buff *skb);
1846int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1847 int family, u8 prefixlen, int l3index, u8 flags,
1848 const u8 *newkey, u8 newkeylen);
1849int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1850 int family, u8 prefixlen, int l3index,
1851 struct tcp_md5sig_key *key);
1852
1853int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1854 int family, u8 prefixlen, int l3index, u8 flags);
1855void tcp_clear_md5_list(struct sock *sk);
1856struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1857 const struct sock *addr_sk);
1858
1859#ifdef CONFIG_TCP_MD5SIG
1860struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1861 const union tcp_md5_addr *addr,
1862 int family, bool any_l3index);
1863static inline struct tcp_md5sig_key *
1864tcp_md5_do_lookup(const struct sock *sk, int l3index,
1865 const union tcp_md5_addr *addr, int family)
1866{
1867 if (!static_branch_unlikely(&tcp_md5_needed.key))
1868 return NULL;
1869 return __tcp_md5_do_lookup(sk, l3index, addr, family, false);
1870}
1871
1872static inline struct tcp_md5sig_key *
1873tcp_md5_do_lookup_any_l3index(const struct sock *sk,
1874 const union tcp_md5_addr *addr, int family)
1875{
1876 if (!static_branch_unlikely(&tcp_md5_needed.key))
1877 return NULL;
1878 return __tcp_md5_do_lookup(sk, 0, addr, family, true);
1879}
1880
1881#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1882#else
1883static inline struct tcp_md5sig_key *
1884tcp_md5_do_lookup(const struct sock *sk, int l3index,
1885 const union tcp_md5_addr *addr, int family)
1886{
1887 return NULL;
1888}
1889
1890static inline struct tcp_md5sig_key *
1891tcp_md5_do_lookup_any_l3index(const struct sock *sk,
1892 const union tcp_md5_addr *addr, int family)
1893{
1894 return NULL;
1895}
1896
1897#define tcp_twsk_md5_key(twsk) NULL
1898#endif
1899
1900int tcp_md5_alloc_sigpool(void);
1901void tcp_md5_release_sigpool(void);
1902void tcp_md5_add_sigpool(void);
1903extern int tcp_md5_sigpool_id;
1904
1905int tcp_md5_hash_key(struct tcp_sigpool *hp,
1906 const struct tcp_md5sig_key *key);
1907
1908/* From tcp_fastopen.c */
1909void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1910 struct tcp_fastopen_cookie *cookie);
1911void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1912 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1913 u16 try_exp);
1914struct tcp_fastopen_request {
1915 /* Fast Open cookie. Size 0 means a cookie request */
1916 struct tcp_fastopen_cookie cookie;
1917 struct msghdr *data; /* data in MSG_FASTOPEN */
1918 size_t size;
1919 int copied; /* queued in tcp_connect() */
1920 struct ubuf_info *uarg;
1921};
1922void tcp_free_fastopen_req(struct tcp_sock *tp);
1923void tcp_fastopen_destroy_cipher(struct sock *sk);
1924void tcp_fastopen_ctx_destroy(struct net *net);
1925int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1926 void *primary_key, void *backup_key);
1927int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1928 u64 *key);
1929void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1930struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1931 struct request_sock *req,
1932 struct tcp_fastopen_cookie *foc,
1933 const struct dst_entry *dst);
1934void tcp_fastopen_init_key_once(struct net *net);
1935bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1936 struct tcp_fastopen_cookie *cookie);
1937bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1938#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1939#define TCP_FASTOPEN_KEY_MAX 2
1940#define TCP_FASTOPEN_KEY_BUF_LENGTH \
1941 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1942
1943/* Fastopen key context */
1944struct tcp_fastopen_context {
1945 siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
1946 int num;
1947 struct rcu_head rcu;
1948};
1949
1950void tcp_fastopen_active_disable(struct sock *sk);
1951bool tcp_fastopen_active_should_disable(struct sock *sk);
1952void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1953void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1954
1955/* Caller needs to wrap with rcu_read_(un)lock() */
1956static inline
1957struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1958{
1959 struct tcp_fastopen_context *ctx;
1960
1961 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1962 if (!ctx)
1963 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1964 return ctx;
1965}
1966
1967static inline
1968bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1969 const struct tcp_fastopen_cookie *orig)
1970{
1971 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1972 orig->len == foc->len &&
1973 !memcmp(orig->val, foc->val, foc->len))
1974 return true;
1975 return false;
1976}
1977
1978static inline
1979int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1980{
1981 return ctx->num;
1982}
1983
1984/* Latencies incurred by various limits for a sender. They are
1985 * chronograph-like stats that are mutually exclusive.
1986 */
1987enum tcp_chrono {
1988 TCP_CHRONO_UNSPEC,
1989 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1990 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1991 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1992 __TCP_CHRONO_MAX,
1993};
1994
1995void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1996void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1997
1998/* This helper is needed, because skb->tcp_tsorted_anchor uses
1999 * the same memory storage than skb->destructor/_skb_refdst
2000 */
2001static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
2002{
2003 skb->destructor = NULL;
2004 skb->_skb_refdst = 0UL;
2005}
2006
2007#define tcp_skb_tsorted_save(skb) { \
2008 unsigned long _save = skb->_skb_refdst; \
2009 skb->_skb_refdst = 0UL;
2010
2011#define tcp_skb_tsorted_restore(skb) \
2012 skb->_skb_refdst = _save; \
2013}
2014
2015void tcp_write_queue_purge(struct sock *sk);
2016
2017static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
2018{
2019 return skb_rb_first(&sk->tcp_rtx_queue);
2020}
2021
2022static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
2023{
2024 return skb_rb_last(&sk->tcp_rtx_queue);
2025}
2026
2027static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
2028{
2029 return skb_peek_tail(&sk->sk_write_queue);
2030}
2031
2032#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
2033 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
2034
2035static inline struct sk_buff *tcp_send_head(const struct sock *sk)
2036{
2037 return skb_peek(&sk->sk_write_queue);
2038}
2039
2040static inline bool tcp_skb_is_last(const struct sock *sk,
2041 const struct sk_buff *skb)
2042{
2043 return skb_queue_is_last(&sk->sk_write_queue, skb);
2044}
2045
2046/**
2047 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
2048 * @sk: socket
2049 *
2050 * Since the write queue can have a temporary empty skb in it,
2051 * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
2052 */
2053static inline bool tcp_write_queue_empty(const struct sock *sk)
2054{
2055 const struct tcp_sock *tp = tcp_sk(sk);
2056
2057 return tp->write_seq == tp->snd_nxt;
2058}
2059
2060static inline bool tcp_rtx_queue_empty(const struct sock *sk)
2061{
2062 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
2063}
2064
2065static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
2066{
2067 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
2068}
2069
2070static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
2071{
2072 __skb_queue_tail(&sk->sk_write_queue, skb);
2073
2074 /* Queue it, remembering where we must start sending. */
2075 if (sk->sk_write_queue.next == skb)
2076 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
2077}
2078
2079/* Insert new before skb on the write queue of sk. */
2080static inline void tcp_insert_write_queue_before(struct sk_buff *new,
2081 struct sk_buff *skb,
2082 struct sock *sk)
2083{
2084 __skb_queue_before(&sk->sk_write_queue, skb, new);
2085}
2086
2087static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
2088{
2089 tcp_skb_tsorted_anchor_cleanup(skb);
2090 __skb_unlink(skb, &sk->sk_write_queue);
2091}
2092
2093void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
2094
2095static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
2096{
2097 tcp_skb_tsorted_anchor_cleanup(skb);
2098 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
2099}
2100
2101static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
2102{
2103 list_del(&skb->tcp_tsorted_anchor);
2104 tcp_rtx_queue_unlink(skb, sk);
2105 tcp_wmem_free_skb(sk, skb);
2106}
2107
2108static inline void tcp_write_collapse_fence(struct sock *sk)
2109{
2110 struct sk_buff *skb = tcp_write_queue_tail(sk);
2111
2112 if (skb)
2113 TCP_SKB_CB(skb)->eor = 1;
2114}
2115
2116static inline void tcp_push_pending_frames(struct sock *sk)
2117{
2118 if (tcp_send_head(sk)) {
2119 struct tcp_sock *tp = tcp_sk(sk);
2120
2121 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
2122 }
2123}
2124
2125/* Start sequence of the skb just after the highest skb with SACKed
2126 * bit, valid only if sacked_out > 0 or when the caller has ensured
2127 * validity by itself.
2128 */
2129static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
2130{
2131 if (!tp->sacked_out)
2132 return tp->snd_una;
2133
2134 if (tp->highest_sack == NULL)
2135 return tp->snd_nxt;
2136
2137 return TCP_SKB_CB(tp->highest_sack)->seq;
2138}
2139
2140static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
2141{
2142 tcp_sk(sk)->highest_sack = skb_rb_next(skb);
2143}
2144
2145static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
2146{
2147 return tcp_sk(sk)->highest_sack;
2148}
2149
2150static inline void tcp_highest_sack_reset(struct sock *sk)
2151{
2152 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
2153}
2154
2155/* Called when old skb is about to be deleted and replaced by new skb */
2156static inline void tcp_highest_sack_replace(struct sock *sk,
2157 struct sk_buff *old,
2158 struct sk_buff *new)
2159{
2160 if (old == tcp_highest_sack(sk))
2161 tcp_sk(sk)->highest_sack = new;
2162}
2163
2164/* This helper checks if socket has IP_TRANSPARENT set */
2165static inline bool inet_sk_transparent(const struct sock *sk)
2166{
2167 switch (sk->sk_state) {
2168 case TCP_TIME_WAIT:
2169 return inet_twsk(sk)->tw_transparent;
2170 case TCP_NEW_SYN_RECV:
2171 return inet_rsk(inet_reqsk(sk))->no_srccheck;
2172 }
2173 return inet_test_bit(TRANSPARENT, sk);
2174}
2175
2176/* Determines whether this is a thin stream (which may suffer from
2177 * increased latency). Used to trigger latency-reducing mechanisms.
2178 */
2179static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2180{
2181 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2182}
2183
2184/* /proc */
2185enum tcp_seq_states {
2186 TCP_SEQ_STATE_LISTENING,
2187 TCP_SEQ_STATE_ESTABLISHED,
2188};
2189
2190void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2191void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2192void tcp_seq_stop(struct seq_file *seq, void *v);
2193
2194struct tcp_seq_afinfo {
2195 sa_family_t family;
2196};
2197
2198struct tcp_iter_state {
2199 struct seq_net_private p;
2200 enum tcp_seq_states state;
2201 struct sock *syn_wait_sk;
2202 int bucket, offset, sbucket, num;
2203 loff_t last_pos;
2204};
2205
2206extern struct request_sock_ops tcp_request_sock_ops;
2207extern struct request_sock_ops tcp6_request_sock_ops;
2208
2209void tcp_v4_destroy_sock(struct sock *sk);
2210
2211struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2212 netdev_features_t features);
2213struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb);
2214struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
2215struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
2216 struct tcphdr *th);
2217INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2218INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2219INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
2220INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
2221#ifdef CONFIG_INET
2222void tcp_gro_complete(struct sk_buff *skb);
2223#else
2224static inline void tcp_gro_complete(struct sk_buff *skb) { }
2225#endif
2226
2227void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
2228
2229static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2230{
2231 struct net *net = sock_net((struct sock *)tp);
2232 u32 val;
2233
2234 val = READ_ONCE(tp->notsent_lowat);
2235
2236 return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2237}
2238
2239bool tcp_stream_memory_free(const struct sock *sk, int wake);
2240
2241#ifdef CONFIG_PROC_FS
2242int tcp4_proc_init(void);
2243void tcp4_proc_exit(void);
2244#endif
2245
2246int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2247int tcp_conn_request(struct request_sock_ops *rsk_ops,
2248 const struct tcp_request_sock_ops *af_ops,
2249 struct sock *sk, struct sk_buff *skb);
2250
2251/* TCP af-specific functions */
2252struct tcp_sock_af_ops {
2253#ifdef CONFIG_TCP_MD5SIG
2254 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2255 const struct sock *addr_sk);
2256 int (*calc_md5_hash)(char *location,
2257 const struct tcp_md5sig_key *md5,
2258 const struct sock *sk,
2259 const struct sk_buff *skb);
2260 int (*md5_parse)(struct sock *sk,
2261 int optname,
2262 sockptr_t optval,
2263 int optlen);
2264#endif
2265#ifdef CONFIG_TCP_AO
2266 int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
2267 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2268 struct sock *addr_sk,
2269 int sndid, int rcvid);
2270 int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key,
2271 const struct sock *sk,
2272 __be32 sisn, __be32 disn, bool send);
2273 int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao,
2274 const struct sock *sk, const struct sk_buff *skb,
2275 const u8 *tkey, int hash_offset, u32 sne);
2276#endif
2277};
2278
2279struct tcp_request_sock_ops {
2280 u16 mss_clamp;
2281#ifdef CONFIG_TCP_MD5SIG
2282 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2283 const struct sock *addr_sk);
2284 int (*calc_md5_hash) (char *location,
2285 const struct tcp_md5sig_key *md5,
2286 const struct sock *sk,
2287 const struct sk_buff *skb);
2288#endif
2289#ifdef CONFIG_TCP_AO
2290 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2291 struct request_sock *req,
2292 int sndid, int rcvid);
2293 int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
2294 int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt,
2295 struct request_sock *req, const struct sk_buff *skb,
2296 int hash_offset, u32 sne);
2297#endif
2298#ifdef CONFIG_SYN_COOKIES
2299 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
2300 __u16 *mss);
2301#endif
2302 struct dst_entry *(*route_req)(const struct sock *sk,
2303 struct sk_buff *skb,
2304 struct flowi *fl,
2305 struct request_sock *req,
2306 u32 tw_isn);
2307 u32 (*init_seq)(const struct sk_buff *skb);
2308 u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2309 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2310 struct flowi *fl, struct request_sock *req,
2311 struct tcp_fastopen_cookie *foc,
2312 enum tcp_synack_type synack_type,
2313 struct sk_buff *syn_skb);
2314};
2315
2316extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2317#if IS_ENABLED(CONFIG_IPV6)
2318extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2319#endif
2320
2321#ifdef CONFIG_SYN_COOKIES
2322static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2323 const struct sock *sk, struct sk_buff *skb,
2324 __u16 *mss)
2325{
2326 tcp_synq_overflow(sk);
2327 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2328 return ops->cookie_init_seq(skb, mss);
2329}
2330#else
2331static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2332 const struct sock *sk, struct sk_buff *skb,
2333 __u16 *mss)
2334{
2335 return 0;
2336}
2337#endif
2338
2339struct tcp_key {
2340 union {
2341 struct {
2342 struct tcp_ao_key *ao_key;
2343 char *traffic_key;
2344 u32 sne;
2345 u8 rcv_next;
2346 };
2347 struct tcp_md5sig_key *md5_key;
2348 };
2349 enum {
2350 TCP_KEY_NONE = 0,
2351 TCP_KEY_MD5,
2352 TCP_KEY_AO,
2353 } type;
2354};
2355
2356static inline void tcp_get_current_key(const struct sock *sk,
2357 struct tcp_key *out)
2358{
2359#if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
2360 const struct tcp_sock *tp = tcp_sk(sk);
2361#endif
2362
2363#ifdef CONFIG_TCP_AO
2364 if (static_branch_unlikely(&tcp_ao_needed.key)) {
2365 struct tcp_ao_info *ao;
2366
2367 ao = rcu_dereference_protected(tp->ao_info,
2368 lockdep_sock_is_held(sk));
2369 if (ao) {
2370 out->ao_key = READ_ONCE(ao->current_key);
2371 out->type = TCP_KEY_AO;
2372 return;
2373 }
2374 }
2375#endif
2376#ifdef CONFIG_TCP_MD5SIG
2377 if (static_branch_unlikely(&tcp_md5_needed.key) &&
2378 rcu_access_pointer(tp->md5sig_info)) {
2379 out->md5_key = tp->af_specific->md5_lookup(sk, sk);
2380 if (out->md5_key) {
2381 out->type = TCP_KEY_MD5;
2382 return;
2383 }
2384 }
2385#endif
2386 out->type = TCP_KEY_NONE;
2387}
2388
2389static inline bool tcp_key_is_md5(const struct tcp_key *key)
2390{
2391 if (static_branch_tcp_md5())
2392 return key->type == TCP_KEY_MD5;
2393 return false;
2394}
2395
2396static inline bool tcp_key_is_ao(const struct tcp_key *key)
2397{
2398 if (static_branch_tcp_ao())
2399 return key->type == TCP_KEY_AO;
2400 return false;
2401}
2402
2403int tcpv4_offload_init(void);
2404
2405void tcp_v4_init(void);
2406void tcp_init(void);
2407
2408/* tcp_recovery.c */
2409void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2410void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2411extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2412 u32 reo_wnd);
2413extern bool tcp_rack_mark_lost(struct sock *sk);
2414extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2415 u64 xmit_time);
2416extern void tcp_rack_reo_timeout(struct sock *sk);
2417extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2418
2419/* tcp_plb.c */
2420
2421/*
2422 * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2423 * expects cong_ratio which represents fraction of traffic that experienced
2424 * congestion over a single RTT. In order to avoid floating point operations,
2425 * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2426 */
2427#define TCP_PLB_SCALE 8
2428
2429/* State for PLB (Protective Load Balancing) for a single TCP connection. */
2430struct tcp_plb_state {
2431 u8 consec_cong_rounds:5, /* consecutive congested rounds */
2432 unused:3;
2433 u32 pause_until; /* jiffies32 when PLB can resume rerouting */
2434};
2435
2436static inline void tcp_plb_init(const struct sock *sk,
2437 struct tcp_plb_state *plb)
2438{
2439 plb->consec_cong_rounds = 0;
2440 plb->pause_until = 0;
2441}
2442void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2443 const int cong_ratio);
2444void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2445void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2446
2447static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str)
2448{
2449 WARN_ONCE(cond,
2450 "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n",
2451 str,
2452 tcp_snd_cwnd(tcp_sk(sk)),
2453 tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
2454 tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
2455 tcp_sk(sk)->tlp_high_seq, sk->sk_state,
2456 inet_csk(sk)->icsk_ca_state,
2457 tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
2458 inet_csk(sk)->icsk_pmtu_cookie);
2459}
2460
2461/* At how many usecs into the future should the RTO fire? */
2462static inline s64 tcp_rto_delta_us(const struct sock *sk)
2463{
2464 const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2465 u32 rto = inet_csk(sk)->icsk_rto;
2466
2467 if (likely(skb)) {
2468 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2469
2470 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2471 } else {
2472 tcp_warn_once(sk, 1, "rtx queue empty: ");
2473 return jiffies_to_usecs(rto);
2474 }
2475
2476}
2477
2478/*
2479 * Save and compile IPv4 options, return a pointer to it
2480 */
2481static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2482 struct sk_buff *skb)
2483{
2484 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2485 struct ip_options_rcu *dopt = NULL;
2486
2487 if (opt->optlen) {
2488 int opt_size = sizeof(*dopt) + opt->optlen;
2489
2490 dopt = kmalloc(opt_size, GFP_ATOMIC);
2491 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2492 kfree(dopt);
2493 dopt = NULL;
2494 }
2495 }
2496 return dopt;
2497}
2498
2499/* locally generated TCP pure ACKs have skb->truesize == 2
2500 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2501 * This is much faster than dissecting the packet to find out.
2502 * (Think of GRE encapsulations, IPv4, IPv6, ...)
2503 */
2504static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2505{
2506 return skb->truesize == 2;
2507}
2508
2509static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2510{
2511 skb->truesize = 2;
2512}
2513
2514static inline int tcp_inq(struct sock *sk)
2515{
2516 struct tcp_sock *tp = tcp_sk(sk);
2517 int answ;
2518
2519 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2520 answ = 0;
2521 } else if (sock_flag(sk, SOCK_URGINLINE) ||
2522 !tp->urg_data ||
2523 before(tp->urg_seq, tp->copied_seq) ||
2524 !before(tp->urg_seq, tp->rcv_nxt)) {
2525
2526 answ = tp->rcv_nxt - tp->copied_seq;
2527
2528 /* Subtract 1, if FIN was received */
2529 if (answ && sock_flag(sk, SOCK_DONE))
2530 answ--;
2531 } else {
2532 answ = tp->urg_seq - tp->copied_seq;
2533 }
2534
2535 return answ;
2536}
2537
2538int tcp_peek_len(struct socket *sock);
2539
2540static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2541{
2542 u16 segs_in;
2543
2544 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2545
2546 /* We update these fields while other threads might
2547 * read them from tcp_get_info()
2548 */
2549 WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2550 if (skb->len > tcp_hdrlen(skb))
2551 WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2552}
2553
2554/*
2555 * TCP listen path runs lockless.
2556 * We forced "struct sock" to be const qualified to make sure
2557 * we don't modify one of its field by mistake.
2558 * Here, we increment sk_drops which is an atomic_t, so we can safely
2559 * make sock writable again.
2560 */
2561static inline void tcp_listendrop(const struct sock *sk)
2562{
2563 atomic_inc(&((struct sock *)sk)->sk_drops);
2564 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2565}
2566
2567enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2568
2569/*
2570 * Interface for adding Upper Level Protocols over TCP
2571 */
2572
2573#define TCP_ULP_NAME_MAX 16
2574#define TCP_ULP_MAX 128
2575#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2576
2577struct tcp_ulp_ops {
2578 struct list_head list;
2579
2580 /* initialize ulp */
2581 int (*init)(struct sock *sk);
2582 /* update ulp */
2583 void (*update)(struct sock *sk, struct proto *p,
2584 void (*write_space)(struct sock *sk));
2585 /* cleanup ulp */
2586 void (*release)(struct sock *sk);
2587 /* diagnostic */
2588 int (*get_info)(struct sock *sk, struct sk_buff *skb);
2589 size_t (*get_info_size)(const struct sock *sk);
2590 /* clone ulp */
2591 void (*clone)(const struct request_sock *req, struct sock *newsk,
2592 const gfp_t priority);
2593
2594 char name[TCP_ULP_NAME_MAX];
2595 struct module *owner;
2596};
2597int tcp_register_ulp(struct tcp_ulp_ops *type);
2598void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2599int tcp_set_ulp(struct sock *sk, const char *name);
2600void tcp_get_available_ulp(char *buf, size_t len);
2601void tcp_cleanup_ulp(struct sock *sk);
2602void tcp_update_ulp(struct sock *sk, struct proto *p,
2603 void (*write_space)(struct sock *sk));
2604
2605#define MODULE_ALIAS_TCP_ULP(name) \
2606 __MODULE_INFO(alias, alias_userspace, name); \
2607 __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2608
2609#ifdef CONFIG_NET_SOCK_MSG
2610struct sk_msg;
2611struct sk_psock;
2612
2613#ifdef CONFIG_BPF_SYSCALL
2614int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2615void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2616#endif /* CONFIG_BPF_SYSCALL */
2617
2618#ifdef CONFIG_INET
2619void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2620#else
2621static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2622{
2623}
2624#endif
2625
2626int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2627 struct sk_msg *msg, u32 bytes, int flags);
2628#endif /* CONFIG_NET_SOCK_MSG */
2629
2630#if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2631static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2632{
2633}
2634#endif
2635
2636#ifdef CONFIG_CGROUP_BPF
2637static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2638 struct sk_buff *skb,
2639 unsigned int end_offset)
2640{
2641 skops->skb = skb;
2642 skops->skb_data_end = skb->data + end_offset;
2643}
2644#else
2645static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2646 struct sk_buff *skb,
2647 unsigned int end_offset)
2648{
2649}
2650#endif
2651
2652/* Call BPF_SOCK_OPS program that returns an int. If the return value
2653 * is < 0, then the BPF op failed (for example if the loaded BPF
2654 * program does not support the chosen operation or there is no BPF
2655 * program loaded).
2656 */
2657#ifdef CONFIG_BPF
2658static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2659{
2660 struct bpf_sock_ops_kern sock_ops;
2661 int ret;
2662
2663 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2664 if (sk_fullsock(sk)) {
2665 sock_ops.is_fullsock = 1;
2666 sock_owned_by_me(sk);
2667 }
2668
2669 sock_ops.sk = sk;
2670 sock_ops.op = op;
2671 if (nargs > 0)
2672 memcpy(sock_ops.args, args, nargs * sizeof(*args));
2673
2674 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2675 if (ret == 0)
2676 ret = sock_ops.reply;
2677 else
2678 ret = -1;
2679 return ret;
2680}
2681
2682static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2683{
2684 u32 args[2] = {arg1, arg2};
2685
2686 return tcp_call_bpf(sk, op, 2, args);
2687}
2688
2689static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2690 u32 arg3)
2691{
2692 u32 args[3] = {arg1, arg2, arg3};
2693
2694 return tcp_call_bpf(sk, op, 3, args);
2695}
2696
2697#else
2698static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2699{
2700 return -EPERM;
2701}
2702
2703static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2704{
2705 return -EPERM;
2706}
2707
2708static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2709 u32 arg3)
2710{
2711 return -EPERM;
2712}
2713
2714#endif
2715
2716static inline u32 tcp_timeout_init(struct sock *sk)
2717{
2718 int timeout;
2719
2720 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2721
2722 if (timeout <= 0)
2723 timeout = TCP_TIMEOUT_INIT;
2724 return min_t(int, timeout, TCP_RTO_MAX);
2725}
2726
2727static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2728{
2729 int rwnd;
2730
2731 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2732
2733 if (rwnd < 0)
2734 rwnd = 0;
2735 return rwnd;
2736}
2737
2738static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2739{
2740 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2741}
2742
2743static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt)
2744{
2745 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2746 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt);
2747}
2748
2749#if IS_ENABLED(CONFIG_SMC)
2750extern struct static_key_false tcp_have_smc;
2751#endif
2752
2753#if IS_ENABLED(CONFIG_TLS_DEVICE)
2754void clean_acked_data_enable(struct inet_connection_sock *icsk,
2755 void (*cad)(struct sock *sk, u32 ack_seq));
2756void clean_acked_data_disable(struct inet_connection_sock *icsk);
2757void clean_acked_data_flush(void);
2758#endif
2759
2760DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2761static inline void tcp_add_tx_delay(struct sk_buff *skb,
2762 const struct tcp_sock *tp)
2763{
2764 if (static_branch_unlikely(&tcp_tx_delay_enabled))
2765 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2766}
2767
2768/* Compute Earliest Departure Time for some control packets
2769 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2770 */
2771static inline u64 tcp_transmit_time(const struct sock *sk)
2772{
2773 if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2774 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2775 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2776
2777 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2778 }
2779 return 0;
2780}
2781
2782static inline int tcp_parse_auth_options(const struct tcphdr *th,
2783 const u8 **md5_hash, const struct tcp_ao_hdr **aoh)
2784{
2785 const u8 *md5_tmp, *ao_tmp;
2786 int ret;
2787
2788 ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp);
2789 if (ret)
2790 return ret;
2791
2792 if (md5_hash)
2793 *md5_hash = md5_tmp;
2794
2795 if (aoh) {
2796 if (!ao_tmp)
2797 *aoh = NULL;
2798 else
2799 *aoh = (struct tcp_ao_hdr *)(ao_tmp - 2);
2800 }
2801
2802 return 0;
2803}
2804
2805static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
2806 int family, int l3index, bool stat_inc)
2807{
2808#ifdef CONFIG_TCP_AO
2809 struct tcp_ao_info *ao_info;
2810 struct tcp_ao_key *ao_key;
2811
2812 if (!static_branch_unlikely(&tcp_ao_needed.key))
2813 return false;
2814
2815 ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info,
2816 lockdep_sock_is_held(sk));
2817 if (!ao_info)
2818 return false;
2819
2820 ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1);
2821 if (ao_info->ao_required || ao_key) {
2822 if (stat_inc) {
2823 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED);
2824 atomic64_inc(&ao_info->counters.ao_required);
2825 }
2826 return true;
2827 }
2828#endif
2829 return false;
2830}
2831
2832enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
2833 const struct request_sock *req, const struct sk_buff *skb,
2834 const void *saddr, const void *daddr,
2835 int family, int dif, int sdif);
2836
2837#endif /* _TCP_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the TCP module.
8 *
9 * Version: @(#)tcp.h 1.0.5 05/23/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 */
14#ifndef _TCP_H
15#define _TCP_H
16
17#define FASTRETRANS_DEBUG 1
18
19#include <linux/list.h>
20#include <linux/tcp.h>
21#include <linux/bug.h>
22#include <linux/slab.h>
23#include <linux/cache.h>
24#include <linux/percpu.h>
25#include <linux/skbuff.h>
26#include <linux/kref.h>
27#include <linux/ktime.h>
28#include <linux/indirect_call_wrapper.h>
29
30#include <net/inet_connection_sock.h>
31#include <net/inet_timewait_sock.h>
32#include <net/inet_hashtables.h>
33#include <net/checksum.h>
34#include <net/request_sock.h>
35#include <net/sock_reuseport.h>
36#include <net/sock.h>
37#include <net/snmp.h>
38#include <net/ip.h>
39#include <net/tcp_states.h>
40#include <net/inet_ecn.h>
41#include <net/dst.h>
42#include <net/mptcp.h>
43
44#include <linux/seq_file.h>
45#include <linux/memcontrol.h>
46#include <linux/bpf-cgroup.h>
47#include <linux/siphash.h>
48
49extern struct inet_hashinfo tcp_hashinfo;
50
51DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
52int tcp_orphan_count_sum(void);
53
54void tcp_time_wait(struct sock *sk, int state, int timeo);
55
56#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
57#define MAX_TCP_OPTION_SPACE 40
58#define TCP_MIN_SND_MSS 48
59#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
60
61/*
62 * Never offer a window over 32767 without using window scaling. Some
63 * poor stacks do signed 16bit maths!
64 */
65#define MAX_TCP_WINDOW 32767U
66
67/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
68#define TCP_MIN_MSS 88U
69
70/* The initial MTU to use for probing */
71#define TCP_BASE_MSS 1024
72
73/* probing interval, default to 10 minutes as per RFC4821 */
74#define TCP_PROBE_INTERVAL 600
75
76/* Specify interval when tcp mtu probing will stop */
77#define TCP_PROBE_THRESHOLD 8
78
79/* After receiving this amount of duplicate ACKs fast retransmit starts. */
80#define TCP_FASTRETRANS_THRESH 3
81
82/* Maximal number of ACKs sent quickly to accelerate slow-start. */
83#define TCP_MAX_QUICKACKS 16U
84
85/* Maximal number of window scale according to RFC1323 */
86#define TCP_MAX_WSCALE 14U
87
88/* urg_data states */
89#define TCP_URG_VALID 0x0100
90#define TCP_URG_NOTYET 0x0200
91#define TCP_URG_READ 0x0400
92
93#define TCP_RETR1 3 /*
94 * This is how many retries it does before it
95 * tries to figure out if the gateway is
96 * down. Minimal RFC value is 3; it corresponds
97 * to ~3sec-8min depending on RTO.
98 */
99
100#define TCP_RETR2 15 /*
101 * This should take at least
102 * 90 minutes to time out.
103 * RFC1122 says that the limit is 100 sec.
104 * 15 is ~13-30min depending on RTO.
105 */
106
107#define TCP_SYN_RETRIES 6 /* This is how many retries are done
108 * when active opening a connection.
109 * RFC1122 says the minimum retry MUST
110 * be at least 180secs. Nevertheless
111 * this value is corresponding to
112 * 63secs of retransmission with the
113 * current initial RTO.
114 */
115
116#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
117 * when passive opening a connection.
118 * This is corresponding to 31secs of
119 * retransmission with the current
120 * initial RTO.
121 */
122
123#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
124 * state, about 60 seconds */
125#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
126 /* BSD style FIN_WAIT2 deadlock breaker.
127 * It used to be 3min, new value is 60sec,
128 * to combine FIN-WAIT-2 timeout with
129 * TIME-WAIT timer.
130 */
131#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
132
133#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
134#if HZ >= 100
135#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
136#define TCP_ATO_MIN ((unsigned)(HZ/25))
137#else
138#define TCP_DELACK_MIN 4U
139#define TCP_ATO_MIN 4U
140#endif
141#define TCP_RTO_MAX ((unsigned)(120*HZ))
142#define TCP_RTO_MIN ((unsigned)(HZ/5))
143#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
144#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
145#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
146 * used as a fallback RTO for the
147 * initial data transmission if no
148 * valid RTT sample has been acquired,
149 * most likely due to retrans in 3WHS.
150 */
151
152#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
153 * for local resources.
154 */
155#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
156#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
157#define TCP_KEEPALIVE_INTVL (75*HZ)
158
159#define MAX_TCP_KEEPIDLE 32767
160#define MAX_TCP_KEEPINTVL 32767
161#define MAX_TCP_KEEPCNT 127
162#define MAX_TCP_SYNCNT 127
163
164#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
165
166#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
167#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
168 * after this time. It should be equal
169 * (or greater than) TCP_TIMEWAIT_LEN
170 * to provide reliability equal to one
171 * provided by timewait state.
172 */
173#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
174 * timestamps. It must be less than
175 * minimal timewait lifetime.
176 */
177/*
178 * TCP option
179 */
180
181#define TCPOPT_NOP 1 /* Padding */
182#define TCPOPT_EOL 0 /* End of options */
183#define TCPOPT_MSS 2 /* Segment size negotiating */
184#define TCPOPT_WINDOW 3 /* Window scaling */
185#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
186#define TCPOPT_SACK 5 /* SACK Block */
187#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
188#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
189#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
190#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
191#define TCPOPT_EXP 254 /* Experimental */
192/* Magic number to be after the option value for sharing TCP
193 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
194 */
195#define TCPOPT_FASTOPEN_MAGIC 0xF989
196#define TCPOPT_SMC_MAGIC 0xE2D4C3D9
197
198/*
199 * TCP option lengths
200 */
201
202#define TCPOLEN_MSS 4
203#define TCPOLEN_WINDOW 3
204#define TCPOLEN_SACK_PERM 2
205#define TCPOLEN_TIMESTAMP 10
206#define TCPOLEN_MD5SIG 18
207#define TCPOLEN_FASTOPEN_BASE 2
208#define TCPOLEN_EXP_FASTOPEN_BASE 4
209#define TCPOLEN_EXP_SMC_BASE 6
210
211/* But this is what stacks really send out. */
212#define TCPOLEN_TSTAMP_ALIGNED 12
213#define TCPOLEN_WSCALE_ALIGNED 4
214#define TCPOLEN_SACKPERM_ALIGNED 4
215#define TCPOLEN_SACK_BASE 2
216#define TCPOLEN_SACK_BASE_ALIGNED 4
217#define TCPOLEN_SACK_PERBLOCK 8
218#define TCPOLEN_MD5SIG_ALIGNED 20
219#define TCPOLEN_MSS_ALIGNED 4
220#define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
221
222/* Flags in tp->nonagle */
223#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
224#define TCP_NAGLE_CORK 2 /* Socket is corked */
225#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
226
227/* TCP thin-stream limits */
228#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
229
230/* TCP initial congestion window as per rfc6928 */
231#define TCP_INIT_CWND 10
232
233/* Bit Flags for sysctl_tcp_fastopen */
234#define TFO_CLIENT_ENABLE 1
235#define TFO_SERVER_ENABLE 2
236#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
237
238/* Accept SYN data w/o any cookie option */
239#define TFO_SERVER_COOKIE_NOT_REQD 0x200
240
241/* Force enable TFO on all listeners, i.e., not requiring the
242 * TCP_FASTOPEN socket option.
243 */
244#define TFO_SERVER_WO_SOCKOPT1 0x400
245
246
247/* sysctl variables for tcp */
248extern int sysctl_tcp_max_orphans;
249extern long sysctl_tcp_mem[3];
250
251#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
252#define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
253#define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
254
255extern atomic_long_t tcp_memory_allocated;
256DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
257
258extern struct percpu_counter tcp_sockets_allocated;
259extern unsigned long tcp_memory_pressure;
260
261/* optimized version of sk_under_memory_pressure() for TCP sockets */
262static inline bool tcp_under_memory_pressure(const struct sock *sk)
263{
264 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
265 mem_cgroup_under_socket_pressure(sk->sk_memcg))
266 return true;
267
268 return READ_ONCE(tcp_memory_pressure);
269}
270/*
271 * The next routines deal with comparing 32 bit unsigned ints
272 * and worry about wraparound (automatic with unsigned arithmetic).
273 */
274
275static inline bool before(__u32 seq1, __u32 seq2)
276{
277 return (__s32)(seq1-seq2) < 0;
278}
279#define after(seq2, seq1) before(seq1, seq2)
280
281/* is s2<=s1<=s3 ? */
282static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
283{
284 return seq3 - seq2 >= seq1 - seq2;
285}
286
287static inline bool tcp_out_of_memory(struct sock *sk)
288{
289 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
290 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
291 return true;
292 return false;
293}
294
295static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
296{
297 sk_wmem_queued_add(sk, -skb->truesize);
298 if (!skb_zcopy_pure(skb))
299 sk_mem_uncharge(sk, skb->truesize);
300 else
301 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
302 __kfree_skb(skb);
303}
304
305void sk_forced_mem_schedule(struct sock *sk, int size);
306
307bool tcp_check_oom(struct sock *sk, int shift);
308
309
310extern struct proto tcp_prot;
311
312#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
313#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
314#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
315#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
316
317void tcp_tasklet_init(void);
318
319int tcp_v4_err(struct sk_buff *skb, u32);
320
321void tcp_shutdown(struct sock *sk, int how);
322
323int tcp_v4_early_demux(struct sk_buff *skb);
324int tcp_v4_rcv(struct sk_buff *skb);
325
326void tcp_remove_empty_skb(struct sock *sk);
327int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
328int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
329int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
330int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
331 size_t size, struct ubuf_info *uarg);
332int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
333 int flags);
334int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
335 size_t size, int flags);
336ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
337 size_t size, int flags);
338int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
339void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
340 int size_goal);
341void tcp_release_cb(struct sock *sk);
342void tcp_wfree(struct sk_buff *skb);
343void tcp_write_timer_handler(struct sock *sk);
344void tcp_delack_timer_handler(struct sock *sk);
345int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
346int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
347void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
348void tcp_rcv_space_adjust(struct sock *sk);
349int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
350void tcp_twsk_destructor(struct sock *sk);
351void tcp_twsk_purge(struct list_head *net_exit_list, int family);
352ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
353 struct pipe_inode_info *pipe, size_t len,
354 unsigned int flags);
355struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
356 bool force_schedule);
357
358void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
359static inline void tcp_dec_quickack_mode(struct sock *sk,
360 const unsigned int pkts)
361{
362 struct inet_connection_sock *icsk = inet_csk(sk);
363
364 if (icsk->icsk_ack.quick) {
365 if (pkts >= icsk->icsk_ack.quick) {
366 icsk->icsk_ack.quick = 0;
367 /* Leaving quickack mode we deflate ATO. */
368 icsk->icsk_ack.ato = TCP_ATO_MIN;
369 } else
370 icsk->icsk_ack.quick -= pkts;
371 }
372}
373
374#define TCP_ECN_OK 1
375#define TCP_ECN_QUEUE_CWR 2
376#define TCP_ECN_DEMAND_CWR 4
377#define TCP_ECN_SEEN 8
378
379enum tcp_tw_status {
380 TCP_TW_SUCCESS = 0,
381 TCP_TW_RST = 1,
382 TCP_TW_ACK = 2,
383 TCP_TW_SYN = 3
384};
385
386
387enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
388 struct sk_buff *skb,
389 const struct tcphdr *th);
390struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
391 struct request_sock *req, bool fastopen,
392 bool *lost_race);
393int tcp_child_process(struct sock *parent, struct sock *child,
394 struct sk_buff *skb);
395void tcp_enter_loss(struct sock *sk);
396void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
397void tcp_clear_retrans(struct tcp_sock *tp);
398void tcp_update_metrics(struct sock *sk);
399void tcp_init_metrics(struct sock *sk);
400void tcp_metrics_init(void);
401bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
402void __tcp_close(struct sock *sk, long timeout);
403void tcp_close(struct sock *sk, long timeout);
404void tcp_init_sock(struct sock *sk);
405void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
406__poll_t tcp_poll(struct file *file, struct socket *sock,
407 struct poll_table_struct *wait);
408int do_tcp_getsockopt(struct sock *sk, int level,
409 int optname, sockptr_t optval, sockptr_t optlen);
410int tcp_getsockopt(struct sock *sk, int level, int optname,
411 char __user *optval, int __user *optlen);
412bool tcp_bpf_bypass_getsockopt(int level, int optname);
413int do_tcp_setsockopt(struct sock *sk, int level, int optname,
414 sockptr_t optval, unsigned int optlen);
415int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
416 unsigned int optlen);
417void tcp_set_keepalive(struct sock *sk, int val);
418void tcp_syn_ack_timeout(const struct request_sock *req);
419int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
420 int flags, int *addr_len);
421int tcp_set_rcvlowat(struct sock *sk, int val);
422int tcp_set_window_clamp(struct sock *sk, int val);
423void tcp_update_recv_tstamps(struct sk_buff *skb,
424 struct scm_timestamping_internal *tss);
425void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
426 struct scm_timestamping_internal *tss);
427void tcp_data_ready(struct sock *sk);
428#ifdef CONFIG_MMU
429int tcp_mmap(struct file *file, struct socket *sock,
430 struct vm_area_struct *vma);
431#endif
432void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
433 struct tcp_options_received *opt_rx,
434 int estab, struct tcp_fastopen_cookie *foc);
435const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
436
437/*
438 * BPF SKB-less helpers
439 */
440u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
441 struct tcphdr *th, u32 *cookie);
442u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
443 struct tcphdr *th, u32 *cookie);
444u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
445u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
446 const struct tcp_request_sock_ops *af_ops,
447 struct sock *sk, struct tcphdr *th);
448/*
449 * TCP v4 functions exported for the inet6 API
450 */
451
452void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
453void tcp_v4_mtu_reduced(struct sock *sk);
454void tcp_req_err(struct sock *sk, u32 seq, bool abort);
455void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
456int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
457struct sock *tcp_create_openreq_child(const struct sock *sk,
458 struct request_sock *req,
459 struct sk_buff *skb);
460void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
461struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
462 struct request_sock *req,
463 struct dst_entry *dst,
464 struct request_sock *req_unhash,
465 bool *own_req);
466int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
467int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
468int tcp_connect(struct sock *sk);
469enum tcp_synack_type {
470 TCP_SYNACK_NORMAL,
471 TCP_SYNACK_FASTOPEN,
472 TCP_SYNACK_COOKIE,
473};
474struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
475 struct request_sock *req,
476 struct tcp_fastopen_cookie *foc,
477 enum tcp_synack_type synack_type,
478 struct sk_buff *syn_skb);
479int tcp_disconnect(struct sock *sk, int flags);
480
481void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
482int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
483void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
484
485/* From syncookies.c */
486struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
487 struct request_sock *req,
488 struct dst_entry *dst, u32 tsoff);
489int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
490 u32 cookie);
491struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
492struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
493 const struct tcp_request_sock_ops *af_ops,
494 struct sock *sk, struct sk_buff *skb);
495#ifdef CONFIG_SYN_COOKIES
496
497/* Syncookies use a monotonic timer which increments every 60 seconds.
498 * This counter is used both as a hash input and partially encoded into
499 * the cookie value. A cookie is only validated further if the delta
500 * between the current counter value and the encoded one is less than this,
501 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
502 * the counter advances immediately after a cookie is generated).
503 */
504#define MAX_SYNCOOKIE_AGE 2
505#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
506#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
507
508/* syncookies: remember time of last synqueue overflow
509 * But do not dirty this field too often (once per second is enough)
510 * It is racy as we do not hold a lock, but race is very minor.
511 */
512static inline void tcp_synq_overflow(const struct sock *sk)
513{
514 unsigned int last_overflow;
515 unsigned int now = jiffies;
516
517 if (sk->sk_reuseport) {
518 struct sock_reuseport *reuse;
519
520 reuse = rcu_dereference(sk->sk_reuseport_cb);
521 if (likely(reuse)) {
522 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
523 if (!time_between32(now, last_overflow,
524 last_overflow + HZ))
525 WRITE_ONCE(reuse->synq_overflow_ts, now);
526 return;
527 }
528 }
529
530 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
531 if (!time_between32(now, last_overflow, last_overflow + HZ))
532 WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
533}
534
535/* syncookies: no recent synqueue overflow on this listening socket? */
536static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
537{
538 unsigned int last_overflow;
539 unsigned int now = jiffies;
540
541 if (sk->sk_reuseport) {
542 struct sock_reuseport *reuse;
543
544 reuse = rcu_dereference(sk->sk_reuseport_cb);
545 if (likely(reuse)) {
546 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
547 return !time_between32(now, last_overflow - HZ,
548 last_overflow +
549 TCP_SYNCOOKIE_VALID);
550 }
551 }
552
553 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
554
555 /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
556 * then we're under synflood. However, we have to use
557 * 'last_overflow - HZ' as lower bound. That's because a concurrent
558 * tcp_synq_overflow() could update .ts_recent_stamp after we read
559 * jiffies but before we store .ts_recent_stamp into last_overflow,
560 * which could lead to rejecting a valid syncookie.
561 */
562 return !time_between32(now, last_overflow - HZ,
563 last_overflow + TCP_SYNCOOKIE_VALID);
564}
565
566static inline u32 tcp_cookie_time(void)
567{
568 u64 val = get_jiffies_64();
569
570 do_div(val, TCP_SYNCOOKIE_PERIOD);
571 return val;
572}
573
574u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
575 u16 *mssp);
576__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
577u64 cookie_init_timestamp(struct request_sock *req, u64 now);
578bool cookie_timestamp_decode(const struct net *net,
579 struct tcp_options_received *opt);
580bool cookie_ecn_ok(const struct tcp_options_received *opt,
581 const struct net *net, const struct dst_entry *dst);
582
583/* From net/ipv6/syncookies.c */
584int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
585 u32 cookie);
586struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
587
588u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
589 const struct tcphdr *th, u16 *mssp);
590__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
591#endif
592/* tcp_output.c */
593
594void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
595void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
596void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
597 int nonagle);
598int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
599int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
600void tcp_retransmit_timer(struct sock *sk);
601void tcp_xmit_retransmit_queue(struct sock *);
602void tcp_simple_retransmit(struct sock *);
603void tcp_enter_recovery(struct sock *sk, bool ece_ack);
604int tcp_trim_head(struct sock *, struct sk_buff *, u32);
605enum tcp_queue {
606 TCP_FRAG_IN_WRITE_QUEUE,
607 TCP_FRAG_IN_RTX_QUEUE,
608};
609int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
610 struct sk_buff *skb, u32 len,
611 unsigned int mss_now, gfp_t gfp);
612
613void tcp_send_probe0(struct sock *);
614void tcp_send_partial(struct sock *);
615int tcp_write_wakeup(struct sock *, int mib);
616void tcp_send_fin(struct sock *sk);
617void tcp_send_active_reset(struct sock *sk, gfp_t priority);
618int tcp_send_synack(struct sock *);
619void tcp_push_one(struct sock *, unsigned int mss_now);
620void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
621void tcp_send_ack(struct sock *sk);
622void tcp_send_delayed_ack(struct sock *sk);
623void tcp_send_loss_probe(struct sock *sk);
624bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
625void tcp_skb_collapse_tstamp(struct sk_buff *skb,
626 const struct sk_buff *next_skb);
627
628/* tcp_input.c */
629void tcp_rearm_rto(struct sock *sk);
630void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
631void tcp_reset(struct sock *sk, struct sk_buff *skb);
632void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
633void tcp_fin(struct sock *sk);
634void tcp_check_space(struct sock *sk);
635
636/* tcp_timer.c */
637void tcp_init_xmit_timers(struct sock *);
638static inline void tcp_clear_xmit_timers(struct sock *sk)
639{
640 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
641 __sock_put(sk);
642
643 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
644 __sock_put(sk);
645
646 inet_csk_clear_xmit_timers(sk);
647}
648
649unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
650unsigned int tcp_current_mss(struct sock *sk);
651u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
652
653/* Bound MSS / TSO packet size with the half of the window */
654static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
655{
656 int cutoff;
657
658 /* When peer uses tiny windows, there is no use in packetizing
659 * to sub-MSS pieces for the sake of SWS or making sure there
660 * are enough packets in the pipe for fast recovery.
661 *
662 * On the other hand, for extremely large MSS devices, handling
663 * smaller than MSS windows in this way does make sense.
664 */
665 if (tp->max_window > TCP_MSS_DEFAULT)
666 cutoff = (tp->max_window >> 1);
667 else
668 cutoff = tp->max_window;
669
670 if (cutoff && pktsize > cutoff)
671 return max_t(int, cutoff, 68U - tp->tcp_header_len);
672 else
673 return pktsize;
674}
675
676/* tcp.c */
677void tcp_get_info(struct sock *, struct tcp_info *);
678
679/* Read 'sendfile()'-style from a TCP socket */
680int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
681 sk_read_actor_t recv_actor);
682int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
683struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
684void tcp_read_done(struct sock *sk, size_t len);
685
686void tcp_initialize_rcv_mss(struct sock *sk);
687
688int tcp_mtu_to_mss(struct sock *sk, int pmtu);
689int tcp_mss_to_mtu(struct sock *sk, int mss);
690void tcp_mtup_init(struct sock *sk);
691
692static inline void tcp_bound_rto(const struct sock *sk)
693{
694 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
695 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
696}
697
698static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
699{
700 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
701}
702
703static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
704{
705 /* mptcp hooks are only on the slow path */
706 if (sk_is_mptcp((struct sock *)tp))
707 return;
708
709 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
710 ntohl(TCP_FLAG_ACK) |
711 snd_wnd);
712}
713
714static inline void tcp_fast_path_on(struct tcp_sock *tp)
715{
716 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
717}
718
719static inline void tcp_fast_path_check(struct sock *sk)
720{
721 struct tcp_sock *tp = tcp_sk(sk);
722
723 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
724 tp->rcv_wnd &&
725 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
726 !tp->urg_data)
727 tcp_fast_path_on(tp);
728}
729
730/* Compute the actual rto_min value */
731static inline u32 tcp_rto_min(struct sock *sk)
732{
733 const struct dst_entry *dst = __sk_dst_get(sk);
734 u32 rto_min = inet_csk(sk)->icsk_rto_min;
735
736 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
737 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
738 return rto_min;
739}
740
741static inline u32 tcp_rto_min_us(struct sock *sk)
742{
743 return jiffies_to_usecs(tcp_rto_min(sk));
744}
745
746static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
747{
748 return dst_metric_locked(dst, RTAX_CC_ALGO);
749}
750
751/* Minimum RTT in usec. ~0 means not available. */
752static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
753{
754 return minmax_get(&tp->rtt_min);
755}
756
757/* Compute the actual receive window we are currently advertising.
758 * Rcv_nxt can be after the window if our peer push more data
759 * than the offered window.
760 */
761static inline u32 tcp_receive_window(const struct tcp_sock *tp)
762{
763 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
764
765 if (win < 0)
766 win = 0;
767 return (u32) win;
768}
769
770/* Choose a new window, without checks for shrinking, and without
771 * scaling applied to the result. The caller does these things
772 * if necessary. This is a "raw" window selection.
773 */
774u32 __tcp_select_window(struct sock *sk);
775
776void tcp_send_window_probe(struct sock *sk);
777
778/* TCP uses 32bit jiffies to save some space.
779 * Note that this is different from tcp_time_stamp, which
780 * historically has been the same until linux-4.13.
781 */
782#define tcp_jiffies32 ((u32)jiffies)
783
784/*
785 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
786 * It is no longer tied to jiffies, but to 1 ms clock.
787 * Note: double check if you want to use tcp_jiffies32 instead of this.
788 */
789#define TCP_TS_HZ 1000
790
791static inline u64 tcp_clock_ns(void)
792{
793 return ktime_get_ns();
794}
795
796static inline u64 tcp_clock_us(void)
797{
798 return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
799}
800
801/* This should only be used in contexts where tp->tcp_mstamp is up to date */
802static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
803{
804 return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
805}
806
807/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
808static inline u32 tcp_ns_to_ts(u64 ns)
809{
810 return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
811}
812
813/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
814static inline u32 tcp_time_stamp_raw(void)
815{
816 return tcp_ns_to_ts(tcp_clock_ns());
817}
818
819void tcp_mstamp_refresh(struct tcp_sock *tp);
820
821static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
822{
823 return max_t(s64, t1 - t0, 0);
824}
825
826static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
827{
828 return tcp_ns_to_ts(skb->skb_mstamp_ns);
829}
830
831/* provide the departure time in us unit */
832static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
833{
834 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
835}
836
837
838#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
839
840#define TCPHDR_FIN 0x01
841#define TCPHDR_SYN 0x02
842#define TCPHDR_RST 0x04
843#define TCPHDR_PSH 0x08
844#define TCPHDR_ACK 0x10
845#define TCPHDR_URG 0x20
846#define TCPHDR_ECE 0x40
847#define TCPHDR_CWR 0x80
848
849#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
850
851/* This is what the send packet queuing engine uses to pass
852 * TCP per-packet control information to the transmission code.
853 * We also store the host-order sequence numbers in here too.
854 * This is 44 bytes if IPV6 is enabled.
855 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
856 */
857struct tcp_skb_cb {
858 __u32 seq; /* Starting sequence number */
859 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
860 union {
861 /* Note : tcp_tw_isn is used in input path only
862 * (isn chosen by tcp_timewait_state_process())
863 *
864 * tcp_gso_segs/size are used in write queue only,
865 * cf tcp_skb_pcount()/tcp_skb_mss()
866 */
867 __u32 tcp_tw_isn;
868 struct {
869 u16 tcp_gso_segs;
870 u16 tcp_gso_size;
871 };
872 };
873 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
874
875 __u8 sacked; /* State flags for SACK. */
876#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
877#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
878#define TCPCB_LOST 0x04 /* SKB is lost */
879#define TCPCB_TAGBITS 0x07 /* All tag bits */
880#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */
881#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
882#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
883 TCPCB_REPAIRED)
884
885 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
886 __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
887 eor:1, /* Is skb MSG_EOR marked? */
888 has_rxtstamp:1, /* SKB has a RX timestamp */
889 unused:5;
890 __u32 ack_seq; /* Sequence number ACK'd */
891 union {
892 struct {
893#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
894 /* There is space for up to 24 bytes */
895 __u32 is_app_limited:1, /* cwnd not fully used? */
896 delivered_ce:20,
897 unused:11;
898 /* pkts S/ACKed so far upon tx of skb, incl retrans: */
899 __u32 delivered;
900 /* start of send pipeline phase */
901 u64 first_tx_mstamp;
902 /* when we reached the "delivered" count */
903 u64 delivered_mstamp;
904 } tx; /* only used for outgoing skbs */
905 union {
906 struct inet_skb_parm h4;
907#if IS_ENABLED(CONFIG_IPV6)
908 struct inet6_skb_parm h6;
909#endif
910 } header; /* For incoming skbs */
911 };
912};
913
914#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
915
916extern const struct inet_connection_sock_af_ops ipv4_specific;
917
918#if IS_ENABLED(CONFIG_IPV6)
919/* This is the variant of inet6_iif() that must be used by TCP,
920 * as TCP moves IP6CB into a different location in skb->cb[]
921 */
922static inline int tcp_v6_iif(const struct sk_buff *skb)
923{
924 return TCP_SKB_CB(skb)->header.h6.iif;
925}
926
927static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
928{
929 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
930
931 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
932}
933
934/* TCP_SKB_CB reference means this can not be used from early demux */
935static inline int tcp_v6_sdif(const struct sk_buff *skb)
936{
937#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
938 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
939 return TCP_SKB_CB(skb)->header.h6.iif;
940#endif
941 return 0;
942}
943
944extern const struct inet_connection_sock_af_ops ipv6_specific;
945
946INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
947INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
948void tcp_v6_early_demux(struct sk_buff *skb);
949
950#endif
951
952/* TCP_SKB_CB reference means this can not be used from early demux */
953static inline int tcp_v4_sdif(struct sk_buff *skb)
954{
955#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
956 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
957 return TCP_SKB_CB(skb)->header.h4.iif;
958#endif
959 return 0;
960}
961
962/* Due to TSO, an SKB can be composed of multiple actual
963 * packets. To keep these tracked properly, we use this.
964 */
965static inline int tcp_skb_pcount(const struct sk_buff *skb)
966{
967 return TCP_SKB_CB(skb)->tcp_gso_segs;
968}
969
970static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
971{
972 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
973}
974
975static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
976{
977 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
978}
979
980/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
981static inline int tcp_skb_mss(const struct sk_buff *skb)
982{
983 return TCP_SKB_CB(skb)->tcp_gso_size;
984}
985
986static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
987{
988 return likely(!TCP_SKB_CB(skb)->eor);
989}
990
991static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
992 const struct sk_buff *from)
993{
994 return likely(tcp_skb_can_collapse_to(to) &&
995 mptcp_skb_can_collapse(to, from) &&
996 skb_pure_zcopy_same(to, from));
997}
998
999/* Events passed to congestion control interface */
1000enum tcp_ca_event {
1001 CA_EVENT_TX_START, /* first transmit when no packets in flight */
1002 CA_EVENT_CWND_RESTART, /* congestion window restart */
1003 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
1004 CA_EVENT_LOSS, /* loss timeout */
1005 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
1006 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
1007};
1008
1009/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1010enum tcp_ca_ack_event_flags {
1011 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
1012 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
1013 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
1014};
1015
1016/*
1017 * Interface for adding new TCP congestion control handlers
1018 */
1019#define TCP_CA_NAME_MAX 16
1020#define TCP_CA_MAX 128
1021#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
1022
1023#define TCP_CA_UNSPEC 0
1024
1025/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1026#define TCP_CONG_NON_RESTRICTED 0x1
1027/* Requires ECN/ECT set on all packets */
1028#define TCP_CONG_NEEDS_ECN 0x2
1029#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1030
1031union tcp_cc_info;
1032
1033struct ack_sample {
1034 u32 pkts_acked;
1035 s32 rtt_us;
1036 u32 in_flight;
1037};
1038
1039/* A rate sample measures the number of (original/retransmitted) data
1040 * packets delivered "delivered" over an interval of time "interval_us".
1041 * The tcp_rate.c code fills in the rate sample, and congestion
1042 * control modules that define a cong_control function to run at the end
1043 * of ACK processing can optionally chose to consult this sample when
1044 * setting cwnd and pacing rate.
1045 * A sample is invalid if "delivered" or "interval_us" is negative.
1046 */
1047struct rate_sample {
1048 u64 prior_mstamp; /* starting timestamp for interval */
1049 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
1050 u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
1051 s32 delivered; /* number of packets delivered over interval */
1052 s32 delivered_ce; /* number of packets delivered w/ CE marks*/
1053 long interval_us; /* time for tp->delivered to incr "delivered" */
1054 u32 snd_interval_us; /* snd interval for delivered packets */
1055 u32 rcv_interval_us; /* rcv interval for delivered packets */
1056 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
1057 int losses; /* number of packets marked lost upon ACK */
1058 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
1059 u32 prior_in_flight; /* in flight before this ACK */
1060 u32 last_end_seq; /* end_seq of most recently ACKed packet */
1061 bool is_app_limited; /* is sample from packet with bubble in pipe? */
1062 bool is_retrans; /* is sample from retransmission? */
1063 bool is_ack_delayed; /* is this (likely) a delayed ACK? */
1064};
1065
1066struct tcp_congestion_ops {
1067/* fast path fields are put first to fill one cache line */
1068
1069 /* return slow start threshold (required) */
1070 u32 (*ssthresh)(struct sock *sk);
1071
1072 /* do new cwnd calculation (required) */
1073 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1074
1075 /* call before changing ca_state (optional) */
1076 void (*set_state)(struct sock *sk, u8 new_state);
1077
1078 /* call when cwnd event occurs (optional) */
1079 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1080
1081 /* call when ack arrives (optional) */
1082 void (*in_ack_event)(struct sock *sk, u32 flags);
1083
1084 /* hook for packet ack accounting (optional) */
1085 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1086
1087 /* override sysctl_tcp_min_tso_segs */
1088 u32 (*min_tso_segs)(struct sock *sk);
1089
1090 /* call when packets are delivered to update cwnd and pacing rate,
1091 * after all the ca_state processing. (optional)
1092 */
1093 void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1094
1095
1096 /* new value of cwnd after loss (required) */
1097 u32 (*undo_cwnd)(struct sock *sk);
1098 /* returns the multiplier used in tcp_sndbuf_expand (optional) */
1099 u32 (*sndbuf_expand)(struct sock *sk);
1100
1101/* control/slow paths put last */
1102 /* get info for inet_diag (optional) */
1103 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1104 union tcp_cc_info *info);
1105
1106 char name[TCP_CA_NAME_MAX];
1107 struct module *owner;
1108 struct list_head list;
1109 u32 key;
1110 u32 flags;
1111
1112 /* initialize private data (optional) */
1113 void (*init)(struct sock *sk);
1114 /* cleanup private data (optional) */
1115 void (*release)(struct sock *sk);
1116} ____cacheline_aligned_in_smp;
1117
1118int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1119void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1120
1121void tcp_assign_congestion_control(struct sock *sk);
1122void tcp_init_congestion_control(struct sock *sk);
1123void tcp_cleanup_congestion_control(struct sock *sk);
1124int tcp_set_default_congestion_control(struct net *net, const char *name);
1125void tcp_get_default_congestion_control(struct net *net, char *name);
1126void tcp_get_available_congestion_control(char *buf, size_t len);
1127void tcp_get_allowed_congestion_control(char *buf, size_t len);
1128int tcp_set_allowed_congestion_control(char *allowed);
1129int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1130 bool cap_net_admin);
1131u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1132void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1133
1134u32 tcp_reno_ssthresh(struct sock *sk);
1135u32 tcp_reno_undo_cwnd(struct sock *sk);
1136void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1137extern struct tcp_congestion_ops tcp_reno;
1138
1139struct tcp_congestion_ops *tcp_ca_find(const char *name);
1140struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1141u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1142#ifdef CONFIG_INET
1143char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1144#else
1145static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1146{
1147 return NULL;
1148}
1149#endif
1150
1151static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1152{
1153 const struct inet_connection_sock *icsk = inet_csk(sk);
1154
1155 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1156}
1157
1158static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1159{
1160 const struct inet_connection_sock *icsk = inet_csk(sk);
1161
1162 if (icsk->icsk_ca_ops->cwnd_event)
1163 icsk->icsk_ca_ops->cwnd_event(sk, event);
1164}
1165
1166/* From tcp_cong.c */
1167void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1168
1169/* From tcp_rate.c */
1170void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1171void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1172 struct rate_sample *rs);
1173void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1174 bool is_sack_reneg, struct rate_sample *rs);
1175void tcp_rate_check_app_limited(struct sock *sk);
1176
1177static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1178{
1179 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1180}
1181
1182/* These functions determine how the current flow behaves in respect of SACK
1183 * handling. SACK is negotiated with the peer, and therefore it can vary
1184 * between different flows.
1185 *
1186 * tcp_is_sack - SACK enabled
1187 * tcp_is_reno - No SACK
1188 */
1189static inline int tcp_is_sack(const struct tcp_sock *tp)
1190{
1191 return likely(tp->rx_opt.sack_ok);
1192}
1193
1194static inline bool tcp_is_reno(const struct tcp_sock *tp)
1195{
1196 return !tcp_is_sack(tp);
1197}
1198
1199static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1200{
1201 return tp->sacked_out + tp->lost_out;
1202}
1203
1204/* This determines how many packets are "in the network" to the best
1205 * of our knowledge. In many cases it is conservative, but where
1206 * detailed information is available from the receiver (via SACK
1207 * blocks etc.) we can make more aggressive calculations.
1208 *
1209 * Use this for decisions involving congestion control, use just
1210 * tp->packets_out to determine if the send queue is empty or not.
1211 *
1212 * Read this equation as:
1213 *
1214 * "Packets sent once on transmission queue" MINUS
1215 * "Packets left network, but not honestly ACKed yet" PLUS
1216 * "Packets fast retransmitted"
1217 */
1218static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1219{
1220 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1221}
1222
1223#define TCP_INFINITE_SSTHRESH 0x7fffffff
1224
1225static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1226{
1227 return tp->snd_cwnd;
1228}
1229
1230static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1231{
1232 WARN_ON_ONCE((int)val <= 0);
1233 tp->snd_cwnd = val;
1234}
1235
1236static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1237{
1238 return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1239}
1240
1241static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1242{
1243 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1244}
1245
1246static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1247{
1248 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1249 (1 << inet_csk(sk)->icsk_ca_state);
1250}
1251
1252/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1253 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1254 * ssthresh.
1255 */
1256static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1257{
1258 const struct tcp_sock *tp = tcp_sk(sk);
1259
1260 if (tcp_in_cwnd_reduction(sk))
1261 return tp->snd_ssthresh;
1262 else
1263 return max(tp->snd_ssthresh,
1264 ((tcp_snd_cwnd(tp) >> 1) +
1265 (tcp_snd_cwnd(tp) >> 2)));
1266}
1267
1268/* Use define here intentionally to get WARN_ON location shown at the caller */
1269#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1270
1271void tcp_enter_cwr(struct sock *sk);
1272__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1273
1274/* The maximum number of MSS of available cwnd for which TSO defers
1275 * sending if not using sysctl_tcp_tso_win_divisor.
1276 */
1277static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1278{
1279 return 3;
1280}
1281
1282/* Returns end sequence number of the receiver's advertised window */
1283static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1284{
1285 return tp->snd_una + tp->snd_wnd;
1286}
1287
1288/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1289 * flexible approach. The RFC suggests cwnd should not be raised unless
1290 * it was fully used previously. And that's exactly what we do in
1291 * congestion avoidance mode. But in slow start we allow cwnd to grow
1292 * as long as the application has used half the cwnd.
1293 * Example :
1294 * cwnd is 10 (IW10), but application sends 9 frames.
1295 * We allow cwnd to reach 18 when all frames are ACKed.
1296 * This check is safe because it's as aggressive as slow start which already
1297 * risks 100% overshoot. The advantage is that we discourage application to
1298 * either send more filler packets or data to artificially blow up the cwnd
1299 * usage, and allow application-limited process to probe bw more aggressively.
1300 */
1301static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1302{
1303 const struct tcp_sock *tp = tcp_sk(sk);
1304
1305 if (tp->is_cwnd_limited)
1306 return true;
1307
1308 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
1309 if (tcp_in_slow_start(tp))
1310 return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1311
1312 return false;
1313}
1314
1315/* BBR congestion control needs pacing.
1316 * Same remark for SO_MAX_PACING_RATE.
1317 * sch_fq packet scheduler is efficiently handling pacing,
1318 * but is not always installed/used.
1319 * Return true if TCP stack should pace packets itself.
1320 */
1321static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1322{
1323 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1324}
1325
1326/* Estimates in how many jiffies next packet for this flow can be sent.
1327 * Scheduling a retransmit timer too early would be silly.
1328 */
1329static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1330{
1331 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1332
1333 return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1334}
1335
1336static inline void tcp_reset_xmit_timer(struct sock *sk,
1337 const int what,
1338 unsigned long when,
1339 const unsigned long max_when)
1340{
1341 inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1342 max_when);
1343}
1344
1345/* Something is really bad, we could not queue an additional packet,
1346 * because qdisc is full or receiver sent a 0 window, or we are paced.
1347 * We do not want to add fuel to the fire, or abort too early,
1348 * so make sure the timer we arm now is at least 200ms in the future,
1349 * regardless of current icsk_rto value (as it could be ~2ms)
1350 */
1351static inline unsigned long tcp_probe0_base(const struct sock *sk)
1352{
1353 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1354}
1355
1356/* Variant of inet_csk_rto_backoff() used for zero window probes */
1357static inline unsigned long tcp_probe0_when(const struct sock *sk,
1358 unsigned long max_when)
1359{
1360 u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1361 inet_csk(sk)->icsk_backoff);
1362 u64 when = (u64)tcp_probe0_base(sk) << backoff;
1363
1364 return (unsigned long)min_t(u64, when, max_when);
1365}
1366
1367static inline void tcp_check_probe_timer(struct sock *sk)
1368{
1369 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1370 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1371 tcp_probe0_base(sk), TCP_RTO_MAX);
1372}
1373
1374static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1375{
1376 tp->snd_wl1 = seq;
1377}
1378
1379static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1380{
1381 tp->snd_wl1 = seq;
1382}
1383
1384/*
1385 * Calculate(/check) TCP checksum
1386 */
1387static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1388 __be32 daddr, __wsum base)
1389{
1390 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1391}
1392
1393static inline bool tcp_checksum_complete(struct sk_buff *skb)
1394{
1395 return !skb_csum_unnecessary(skb) &&
1396 __skb_checksum_complete(skb);
1397}
1398
1399bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1400 enum skb_drop_reason *reason);
1401
1402
1403int tcp_filter(struct sock *sk, struct sk_buff *skb);
1404void tcp_set_state(struct sock *sk, int state);
1405void tcp_done(struct sock *sk);
1406int tcp_abort(struct sock *sk, int err);
1407
1408static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1409{
1410 rx_opt->dsack = 0;
1411 rx_opt->num_sacks = 0;
1412}
1413
1414void tcp_cwnd_restart(struct sock *sk, s32 delta);
1415
1416static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1417{
1418 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1419 struct tcp_sock *tp = tcp_sk(sk);
1420 s32 delta;
1421
1422 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1423 tp->packets_out || ca_ops->cong_control)
1424 return;
1425 delta = tcp_jiffies32 - tp->lsndtime;
1426 if (delta > inet_csk(sk)->icsk_rto)
1427 tcp_cwnd_restart(sk, delta);
1428}
1429
1430/* Determine a window scaling and initial window to offer. */
1431void tcp_select_initial_window(const struct sock *sk, int __space,
1432 __u32 mss, __u32 *rcv_wnd,
1433 __u32 *window_clamp, int wscale_ok,
1434 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1435
1436static inline int tcp_win_from_space(const struct sock *sk, int space)
1437{
1438 int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
1439
1440 return tcp_adv_win_scale <= 0 ?
1441 (space>>(-tcp_adv_win_scale)) :
1442 space - (space>>tcp_adv_win_scale);
1443}
1444
1445/* Note: caller must be prepared to deal with negative returns */
1446static inline int tcp_space(const struct sock *sk)
1447{
1448 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1449 READ_ONCE(sk->sk_backlog.len) -
1450 atomic_read(&sk->sk_rmem_alloc));
1451}
1452
1453static inline int tcp_full_space(const struct sock *sk)
1454{
1455 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1456}
1457
1458static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1459{
1460 int unused_mem = sk_unused_reserved_mem(sk);
1461 struct tcp_sock *tp = tcp_sk(sk);
1462
1463 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
1464 if (unused_mem)
1465 tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1466 tcp_win_from_space(sk, unused_mem));
1467}
1468
1469void tcp_cleanup_rbuf(struct sock *sk, int copied);
1470
1471/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1472 * If 87.5 % (7/8) of the space has been consumed, we want to override
1473 * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1474 * len/truesize ratio.
1475 */
1476static inline bool tcp_rmem_pressure(const struct sock *sk)
1477{
1478 int rcvbuf, threshold;
1479
1480 if (tcp_under_memory_pressure(sk))
1481 return true;
1482
1483 rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1484 threshold = rcvbuf - (rcvbuf >> 3);
1485
1486 return atomic_read(&sk->sk_rmem_alloc) > threshold;
1487}
1488
1489static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1490{
1491 const struct tcp_sock *tp = tcp_sk(sk);
1492 int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1493
1494 if (avail <= 0)
1495 return false;
1496
1497 return (avail >= target) || tcp_rmem_pressure(sk) ||
1498 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1499}
1500
1501extern void tcp_openreq_init_rwin(struct request_sock *req,
1502 const struct sock *sk_listener,
1503 const struct dst_entry *dst);
1504
1505void tcp_enter_memory_pressure(struct sock *sk);
1506void tcp_leave_memory_pressure(struct sock *sk);
1507
1508static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1509{
1510 struct net *net = sock_net((struct sock *)tp);
1511
1512 return tp->keepalive_intvl ? :
1513 READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1514}
1515
1516static inline int keepalive_time_when(const struct tcp_sock *tp)
1517{
1518 struct net *net = sock_net((struct sock *)tp);
1519
1520 return tp->keepalive_time ? :
1521 READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1522}
1523
1524static inline int keepalive_probes(const struct tcp_sock *tp)
1525{
1526 struct net *net = sock_net((struct sock *)tp);
1527
1528 return tp->keepalive_probes ? :
1529 READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1530}
1531
1532static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1533{
1534 const struct inet_connection_sock *icsk = &tp->inet_conn;
1535
1536 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1537 tcp_jiffies32 - tp->rcv_tstamp);
1538}
1539
1540static inline int tcp_fin_time(const struct sock *sk)
1541{
1542 int fin_timeout = tcp_sk(sk)->linger2 ? :
1543 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1544 const int rto = inet_csk(sk)->icsk_rto;
1545
1546 if (fin_timeout < (rto << 2) - (rto >> 1))
1547 fin_timeout = (rto << 2) - (rto >> 1);
1548
1549 return fin_timeout;
1550}
1551
1552static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1553 int paws_win)
1554{
1555 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1556 return true;
1557 if (unlikely(!time_before32(ktime_get_seconds(),
1558 rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
1559 return true;
1560 /*
1561 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1562 * then following tcp messages have valid values. Ignore 0 value,
1563 * or else 'negative' tsval might forbid us to accept their packets.
1564 */
1565 if (!rx_opt->ts_recent)
1566 return true;
1567 return false;
1568}
1569
1570static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1571 int rst)
1572{
1573 if (tcp_paws_check(rx_opt, 0))
1574 return false;
1575
1576 /* RST segments are not recommended to carry timestamp,
1577 and, if they do, it is recommended to ignore PAWS because
1578 "their cleanup function should take precedence over timestamps."
1579 Certainly, it is mistake. It is necessary to understand the reasons
1580 of this constraint to relax it: if peer reboots, clock may go
1581 out-of-sync and half-open connections will not be reset.
1582 Actually, the problem would be not existing if all
1583 the implementations followed draft about maintaining clock
1584 via reboots. Linux-2.2 DOES NOT!
1585
1586 However, we can relax time bounds for RST segments to MSL.
1587 */
1588 if (rst && !time_before32(ktime_get_seconds(),
1589 rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1590 return false;
1591 return true;
1592}
1593
1594bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1595 int mib_idx, u32 *last_oow_ack_time);
1596
1597static inline void tcp_mib_init(struct net *net)
1598{
1599 /* See RFC 2012 */
1600 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1601 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1602 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1603 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1604}
1605
1606/* from STCP */
1607static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1608{
1609 tp->lost_skb_hint = NULL;
1610}
1611
1612static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1613{
1614 tcp_clear_retrans_hints_partial(tp);
1615 tp->retransmit_skb_hint = NULL;
1616}
1617
1618union tcp_md5_addr {
1619 struct in_addr a4;
1620#if IS_ENABLED(CONFIG_IPV6)
1621 struct in6_addr a6;
1622#endif
1623};
1624
1625/* - key database */
1626struct tcp_md5sig_key {
1627 struct hlist_node node;
1628 u8 keylen;
1629 u8 family; /* AF_INET or AF_INET6 */
1630 u8 prefixlen;
1631 u8 flags;
1632 union tcp_md5_addr addr;
1633 int l3index; /* set if key added with L3 scope */
1634 u8 key[TCP_MD5SIG_MAXKEYLEN];
1635 struct rcu_head rcu;
1636};
1637
1638/* - sock block */
1639struct tcp_md5sig_info {
1640 struct hlist_head head;
1641 struct rcu_head rcu;
1642};
1643
1644/* - pseudo header */
1645struct tcp4_pseudohdr {
1646 __be32 saddr;
1647 __be32 daddr;
1648 __u8 pad;
1649 __u8 protocol;
1650 __be16 len;
1651};
1652
1653struct tcp6_pseudohdr {
1654 struct in6_addr saddr;
1655 struct in6_addr daddr;
1656 __be32 len;
1657 __be32 protocol; /* including padding */
1658};
1659
1660union tcp_md5sum_block {
1661 struct tcp4_pseudohdr ip4;
1662#if IS_ENABLED(CONFIG_IPV6)
1663 struct tcp6_pseudohdr ip6;
1664#endif
1665};
1666
1667/* - pool: digest algorithm, hash description and scratch buffer */
1668struct tcp_md5sig_pool {
1669 struct ahash_request *md5_req;
1670 void *scratch;
1671};
1672
1673/* - functions */
1674int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1675 const struct sock *sk, const struct sk_buff *skb);
1676int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1677 int family, u8 prefixlen, int l3index, u8 flags,
1678 const u8 *newkey, u8 newkeylen);
1679int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1680 int family, u8 prefixlen, int l3index,
1681 struct tcp_md5sig_key *key);
1682
1683int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1684 int family, u8 prefixlen, int l3index, u8 flags);
1685struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1686 const struct sock *addr_sk);
1687
1688#ifdef CONFIG_TCP_MD5SIG
1689#include <linux/jump_label.h>
1690extern struct static_key_false_deferred tcp_md5_needed;
1691struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1692 const union tcp_md5_addr *addr,
1693 int family);
1694static inline struct tcp_md5sig_key *
1695tcp_md5_do_lookup(const struct sock *sk, int l3index,
1696 const union tcp_md5_addr *addr, int family)
1697{
1698 if (!static_branch_unlikely(&tcp_md5_needed.key))
1699 return NULL;
1700 return __tcp_md5_do_lookup(sk, l3index, addr, family);
1701}
1702
1703enum skb_drop_reason
1704tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
1705 const void *saddr, const void *daddr,
1706 int family, int dif, int sdif);
1707
1708
1709#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1710#else
1711static inline struct tcp_md5sig_key *
1712tcp_md5_do_lookup(const struct sock *sk, int l3index,
1713 const union tcp_md5_addr *addr, int family)
1714{
1715 return NULL;
1716}
1717
1718static inline enum skb_drop_reason
1719tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
1720 const void *saddr, const void *daddr,
1721 int family, int dif, int sdif)
1722{
1723 return SKB_NOT_DROPPED_YET;
1724}
1725#define tcp_twsk_md5_key(twsk) NULL
1726#endif
1727
1728bool tcp_alloc_md5sig_pool(void);
1729
1730struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1731static inline void tcp_put_md5sig_pool(void)
1732{
1733 local_bh_enable();
1734}
1735
1736int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1737 unsigned int header_len);
1738int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1739 const struct tcp_md5sig_key *key);
1740
1741/* From tcp_fastopen.c */
1742void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1743 struct tcp_fastopen_cookie *cookie);
1744void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1745 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1746 u16 try_exp);
1747struct tcp_fastopen_request {
1748 /* Fast Open cookie. Size 0 means a cookie request */
1749 struct tcp_fastopen_cookie cookie;
1750 struct msghdr *data; /* data in MSG_FASTOPEN */
1751 size_t size;
1752 int copied; /* queued in tcp_connect() */
1753 struct ubuf_info *uarg;
1754};
1755void tcp_free_fastopen_req(struct tcp_sock *tp);
1756void tcp_fastopen_destroy_cipher(struct sock *sk);
1757void tcp_fastopen_ctx_destroy(struct net *net);
1758int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1759 void *primary_key, void *backup_key);
1760int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1761 u64 *key);
1762void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1763struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1764 struct request_sock *req,
1765 struct tcp_fastopen_cookie *foc,
1766 const struct dst_entry *dst);
1767void tcp_fastopen_init_key_once(struct net *net);
1768bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1769 struct tcp_fastopen_cookie *cookie);
1770bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1771#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1772#define TCP_FASTOPEN_KEY_MAX 2
1773#define TCP_FASTOPEN_KEY_BUF_LENGTH \
1774 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1775
1776/* Fastopen key context */
1777struct tcp_fastopen_context {
1778 siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
1779 int num;
1780 struct rcu_head rcu;
1781};
1782
1783void tcp_fastopen_active_disable(struct sock *sk);
1784bool tcp_fastopen_active_should_disable(struct sock *sk);
1785void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1786void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1787
1788/* Caller needs to wrap with rcu_read_(un)lock() */
1789static inline
1790struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1791{
1792 struct tcp_fastopen_context *ctx;
1793
1794 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1795 if (!ctx)
1796 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1797 return ctx;
1798}
1799
1800static inline
1801bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1802 const struct tcp_fastopen_cookie *orig)
1803{
1804 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1805 orig->len == foc->len &&
1806 !memcmp(orig->val, foc->val, foc->len))
1807 return true;
1808 return false;
1809}
1810
1811static inline
1812int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1813{
1814 return ctx->num;
1815}
1816
1817/* Latencies incurred by various limits for a sender. They are
1818 * chronograph-like stats that are mutually exclusive.
1819 */
1820enum tcp_chrono {
1821 TCP_CHRONO_UNSPEC,
1822 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1823 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1824 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1825 __TCP_CHRONO_MAX,
1826};
1827
1828void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1829void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1830
1831/* This helper is needed, because skb->tcp_tsorted_anchor uses
1832 * the same memory storage than skb->destructor/_skb_refdst
1833 */
1834static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1835{
1836 skb->destructor = NULL;
1837 skb->_skb_refdst = 0UL;
1838}
1839
1840#define tcp_skb_tsorted_save(skb) { \
1841 unsigned long _save = skb->_skb_refdst; \
1842 skb->_skb_refdst = 0UL;
1843
1844#define tcp_skb_tsorted_restore(skb) \
1845 skb->_skb_refdst = _save; \
1846}
1847
1848void tcp_write_queue_purge(struct sock *sk);
1849
1850static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1851{
1852 return skb_rb_first(&sk->tcp_rtx_queue);
1853}
1854
1855static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1856{
1857 return skb_rb_last(&sk->tcp_rtx_queue);
1858}
1859
1860static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1861{
1862 return skb_peek_tail(&sk->sk_write_queue);
1863}
1864
1865#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1866 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1867
1868static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1869{
1870 return skb_peek(&sk->sk_write_queue);
1871}
1872
1873static inline bool tcp_skb_is_last(const struct sock *sk,
1874 const struct sk_buff *skb)
1875{
1876 return skb_queue_is_last(&sk->sk_write_queue, skb);
1877}
1878
1879/**
1880 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
1881 * @sk: socket
1882 *
1883 * Since the write queue can have a temporary empty skb in it,
1884 * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
1885 */
1886static inline bool tcp_write_queue_empty(const struct sock *sk)
1887{
1888 const struct tcp_sock *tp = tcp_sk(sk);
1889
1890 return tp->write_seq == tp->snd_nxt;
1891}
1892
1893static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1894{
1895 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1896}
1897
1898static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1899{
1900 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1901}
1902
1903static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1904{
1905 __skb_queue_tail(&sk->sk_write_queue, skb);
1906
1907 /* Queue it, remembering where we must start sending. */
1908 if (sk->sk_write_queue.next == skb)
1909 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1910}
1911
1912/* Insert new before skb on the write queue of sk. */
1913static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1914 struct sk_buff *skb,
1915 struct sock *sk)
1916{
1917 __skb_queue_before(&sk->sk_write_queue, skb, new);
1918}
1919
1920static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1921{
1922 tcp_skb_tsorted_anchor_cleanup(skb);
1923 __skb_unlink(skb, &sk->sk_write_queue);
1924}
1925
1926void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1927
1928static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1929{
1930 tcp_skb_tsorted_anchor_cleanup(skb);
1931 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1932}
1933
1934static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1935{
1936 list_del(&skb->tcp_tsorted_anchor);
1937 tcp_rtx_queue_unlink(skb, sk);
1938 tcp_wmem_free_skb(sk, skb);
1939}
1940
1941static inline void tcp_push_pending_frames(struct sock *sk)
1942{
1943 if (tcp_send_head(sk)) {
1944 struct tcp_sock *tp = tcp_sk(sk);
1945
1946 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1947 }
1948}
1949
1950/* Start sequence of the skb just after the highest skb with SACKed
1951 * bit, valid only if sacked_out > 0 or when the caller has ensured
1952 * validity by itself.
1953 */
1954static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1955{
1956 if (!tp->sacked_out)
1957 return tp->snd_una;
1958
1959 if (tp->highest_sack == NULL)
1960 return tp->snd_nxt;
1961
1962 return TCP_SKB_CB(tp->highest_sack)->seq;
1963}
1964
1965static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1966{
1967 tcp_sk(sk)->highest_sack = skb_rb_next(skb);
1968}
1969
1970static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1971{
1972 return tcp_sk(sk)->highest_sack;
1973}
1974
1975static inline void tcp_highest_sack_reset(struct sock *sk)
1976{
1977 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
1978}
1979
1980/* Called when old skb is about to be deleted and replaced by new skb */
1981static inline void tcp_highest_sack_replace(struct sock *sk,
1982 struct sk_buff *old,
1983 struct sk_buff *new)
1984{
1985 if (old == tcp_highest_sack(sk))
1986 tcp_sk(sk)->highest_sack = new;
1987}
1988
1989/* This helper checks if socket has IP_TRANSPARENT set */
1990static inline bool inet_sk_transparent(const struct sock *sk)
1991{
1992 switch (sk->sk_state) {
1993 case TCP_TIME_WAIT:
1994 return inet_twsk(sk)->tw_transparent;
1995 case TCP_NEW_SYN_RECV:
1996 return inet_rsk(inet_reqsk(sk))->no_srccheck;
1997 }
1998 return inet_sk(sk)->transparent;
1999}
2000
2001/* Determines whether this is a thin stream (which may suffer from
2002 * increased latency). Used to trigger latency-reducing mechanisms.
2003 */
2004static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2005{
2006 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2007}
2008
2009/* /proc */
2010enum tcp_seq_states {
2011 TCP_SEQ_STATE_LISTENING,
2012 TCP_SEQ_STATE_ESTABLISHED,
2013};
2014
2015void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2016void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2017void tcp_seq_stop(struct seq_file *seq, void *v);
2018
2019struct tcp_seq_afinfo {
2020 sa_family_t family;
2021};
2022
2023struct tcp_iter_state {
2024 struct seq_net_private p;
2025 enum tcp_seq_states state;
2026 struct sock *syn_wait_sk;
2027 int bucket, offset, sbucket, num;
2028 loff_t last_pos;
2029};
2030
2031extern struct request_sock_ops tcp_request_sock_ops;
2032extern struct request_sock_ops tcp6_request_sock_ops;
2033
2034void tcp_v4_destroy_sock(struct sock *sk);
2035
2036struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2037 netdev_features_t features);
2038struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
2039INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2040INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2041INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
2042INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
2043int tcp_gro_complete(struct sk_buff *skb);
2044
2045void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
2046
2047static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2048{
2049 struct net *net = sock_net((struct sock *)tp);
2050 return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2051}
2052
2053bool tcp_stream_memory_free(const struct sock *sk, int wake);
2054
2055#ifdef CONFIG_PROC_FS
2056int tcp4_proc_init(void);
2057void tcp4_proc_exit(void);
2058#endif
2059
2060int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2061int tcp_conn_request(struct request_sock_ops *rsk_ops,
2062 const struct tcp_request_sock_ops *af_ops,
2063 struct sock *sk, struct sk_buff *skb);
2064
2065/* TCP af-specific functions */
2066struct tcp_sock_af_ops {
2067#ifdef CONFIG_TCP_MD5SIG
2068 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2069 const struct sock *addr_sk);
2070 int (*calc_md5_hash)(char *location,
2071 const struct tcp_md5sig_key *md5,
2072 const struct sock *sk,
2073 const struct sk_buff *skb);
2074 int (*md5_parse)(struct sock *sk,
2075 int optname,
2076 sockptr_t optval,
2077 int optlen);
2078#endif
2079};
2080
2081struct tcp_request_sock_ops {
2082 u16 mss_clamp;
2083#ifdef CONFIG_TCP_MD5SIG
2084 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2085 const struct sock *addr_sk);
2086 int (*calc_md5_hash) (char *location,
2087 const struct tcp_md5sig_key *md5,
2088 const struct sock *sk,
2089 const struct sk_buff *skb);
2090#endif
2091#ifdef CONFIG_SYN_COOKIES
2092 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
2093 __u16 *mss);
2094#endif
2095 struct dst_entry *(*route_req)(const struct sock *sk,
2096 struct sk_buff *skb,
2097 struct flowi *fl,
2098 struct request_sock *req);
2099 u32 (*init_seq)(const struct sk_buff *skb);
2100 u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2101 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2102 struct flowi *fl, struct request_sock *req,
2103 struct tcp_fastopen_cookie *foc,
2104 enum tcp_synack_type synack_type,
2105 struct sk_buff *syn_skb);
2106};
2107
2108extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2109#if IS_ENABLED(CONFIG_IPV6)
2110extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2111#endif
2112
2113#ifdef CONFIG_SYN_COOKIES
2114static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2115 const struct sock *sk, struct sk_buff *skb,
2116 __u16 *mss)
2117{
2118 tcp_synq_overflow(sk);
2119 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2120 return ops->cookie_init_seq(skb, mss);
2121}
2122#else
2123static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2124 const struct sock *sk, struct sk_buff *skb,
2125 __u16 *mss)
2126{
2127 return 0;
2128}
2129#endif
2130
2131int tcpv4_offload_init(void);
2132
2133void tcp_v4_init(void);
2134void tcp_init(void);
2135
2136/* tcp_recovery.c */
2137void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2138void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2139extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2140 u32 reo_wnd);
2141extern bool tcp_rack_mark_lost(struct sock *sk);
2142extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2143 u64 xmit_time);
2144extern void tcp_rack_reo_timeout(struct sock *sk);
2145extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2146
2147/* tcp_plb.c */
2148
2149/*
2150 * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2151 * expects cong_ratio which represents fraction of traffic that experienced
2152 * congestion over a single RTT. In order to avoid floating point operations,
2153 * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2154 */
2155#define TCP_PLB_SCALE 8
2156
2157/* State for PLB (Protective Load Balancing) for a single TCP connection. */
2158struct tcp_plb_state {
2159 u8 consec_cong_rounds:5, /* consecutive congested rounds */
2160 unused:3;
2161 u32 pause_until; /* jiffies32 when PLB can resume rerouting */
2162};
2163
2164static inline void tcp_plb_init(const struct sock *sk,
2165 struct tcp_plb_state *plb)
2166{
2167 plb->consec_cong_rounds = 0;
2168 plb->pause_until = 0;
2169}
2170void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2171 const int cong_ratio);
2172void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2173void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2174
2175/* At how many usecs into the future should the RTO fire? */
2176static inline s64 tcp_rto_delta_us(const struct sock *sk)
2177{
2178 const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2179 u32 rto = inet_csk(sk)->icsk_rto;
2180 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2181
2182 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2183}
2184
2185/*
2186 * Save and compile IPv4 options, return a pointer to it
2187 */
2188static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2189 struct sk_buff *skb)
2190{
2191 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2192 struct ip_options_rcu *dopt = NULL;
2193
2194 if (opt->optlen) {
2195 int opt_size = sizeof(*dopt) + opt->optlen;
2196
2197 dopt = kmalloc(opt_size, GFP_ATOMIC);
2198 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2199 kfree(dopt);
2200 dopt = NULL;
2201 }
2202 }
2203 return dopt;
2204}
2205
2206/* locally generated TCP pure ACKs have skb->truesize == 2
2207 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2208 * This is much faster than dissecting the packet to find out.
2209 * (Think of GRE encapsulations, IPv4, IPv6, ...)
2210 */
2211static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2212{
2213 return skb->truesize == 2;
2214}
2215
2216static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2217{
2218 skb->truesize = 2;
2219}
2220
2221static inline int tcp_inq(struct sock *sk)
2222{
2223 struct tcp_sock *tp = tcp_sk(sk);
2224 int answ;
2225
2226 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2227 answ = 0;
2228 } else if (sock_flag(sk, SOCK_URGINLINE) ||
2229 !tp->urg_data ||
2230 before(tp->urg_seq, tp->copied_seq) ||
2231 !before(tp->urg_seq, tp->rcv_nxt)) {
2232
2233 answ = tp->rcv_nxt - tp->copied_seq;
2234
2235 /* Subtract 1, if FIN was received */
2236 if (answ && sock_flag(sk, SOCK_DONE))
2237 answ--;
2238 } else {
2239 answ = tp->urg_seq - tp->copied_seq;
2240 }
2241
2242 return answ;
2243}
2244
2245int tcp_peek_len(struct socket *sock);
2246
2247static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2248{
2249 u16 segs_in;
2250
2251 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2252
2253 /* We update these fields while other threads might
2254 * read them from tcp_get_info()
2255 */
2256 WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2257 if (skb->len > tcp_hdrlen(skb))
2258 WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2259}
2260
2261/*
2262 * TCP listen path runs lockless.
2263 * We forced "struct sock" to be const qualified to make sure
2264 * we don't modify one of its field by mistake.
2265 * Here, we increment sk_drops which is an atomic_t, so we can safely
2266 * make sock writable again.
2267 */
2268static inline void tcp_listendrop(const struct sock *sk)
2269{
2270 atomic_inc(&((struct sock *)sk)->sk_drops);
2271 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2272}
2273
2274enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2275
2276/*
2277 * Interface for adding Upper Level Protocols over TCP
2278 */
2279
2280#define TCP_ULP_NAME_MAX 16
2281#define TCP_ULP_MAX 128
2282#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2283
2284struct tcp_ulp_ops {
2285 struct list_head list;
2286
2287 /* initialize ulp */
2288 int (*init)(struct sock *sk);
2289 /* update ulp */
2290 void (*update)(struct sock *sk, struct proto *p,
2291 void (*write_space)(struct sock *sk));
2292 /* cleanup ulp */
2293 void (*release)(struct sock *sk);
2294 /* diagnostic */
2295 int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2296 size_t (*get_info_size)(const struct sock *sk);
2297 /* clone ulp */
2298 void (*clone)(const struct request_sock *req, struct sock *newsk,
2299 const gfp_t priority);
2300
2301 char name[TCP_ULP_NAME_MAX];
2302 struct module *owner;
2303};
2304int tcp_register_ulp(struct tcp_ulp_ops *type);
2305void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2306int tcp_set_ulp(struct sock *sk, const char *name);
2307void tcp_get_available_ulp(char *buf, size_t len);
2308void tcp_cleanup_ulp(struct sock *sk);
2309void tcp_update_ulp(struct sock *sk, struct proto *p,
2310 void (*write_space)(struct sock *sk));
2311
2312#define MODULE_ALIAS_TCP_ULP(name) \
2313 __MODULE_INFO(alias, alias_userspace, name); \
2314 __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2315
2316#ifdef CONFIG_NET_SOCK_MSG
2317struct sk_msg;
2318struct sk_psock;
2319
2320#ifdef CONFIG_BPF_SYSCALL
2321struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
2322int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2323void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2324#endif /* CONFIG_BPF_SYSCALL */
2325
2326int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2327 struct sk_msg *msg, u32 bytes, int flags);
2328#endif /* CONFIG_NET_SOCK_MSG */
2329
2330#if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2331static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2332{
2333}
2334#endif
2335
2336#ifdef CONFIG_CGROUP_BPF
2337static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2338 struct sk_buff *skb,
2339 unsigned int end_offset)
2340{
2341 skops->skb = skb;
2342 skops->skb_data_end = skb->data + end_offset;
2343}
2344#else
2345static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2346 struct sk_buff *skb,
2347 unsigned int end_offset)
2348{
2349}
2350#endif
2351
2352/* Call BPF_SOCK_OPS program that returns an int. If the return value
2353 * is < 0, then the BPF op failed (for example if the loaded BPF
2354 * program does not support the chosen operation or there is no BPF
2355 * program loaded).
2356 */
2357#ifdef CONFIG_BPF
2358static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2359{
2360 struct bpf_sock_ops_kern sock_ops;
2361 int ret;
2362
2363 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2364 if (sk_fullsock(sk)) {
2365 sock_ops.is_fullsock = 1;
2366 sock_owned_by_me(sk);
2367 }
2368
2369 sock_ops.sk = sk;
2370 sock_ops.op = op;
2371 if (nargs > 0)
2372 memcpy(sock_ops.args, args, nargs * sizeof(*args));
2373
2374 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2375 if (ret == 0)
2376 ret = sock_ops.reply;
2377 else
2378 ret = -1;
2379 return ret;
2380}
2381
2382static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2383{
2384 u32 args[2] = {arg1, arg2};
2385
2386 return tcp_call_bpf(sk, op, 2, args);
2387}
2388
2389static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2390 u32 arg3)
2391{
2392 u32 args[3] = {arg1, arg2, arg3};
2393
2394 return tcp_call_bpf(sk, op, 3, args);
2395}
2396
2397#else
2398static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2399{
2400 return -EPERM;
2401}
2402
2403static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2404{
2405 return -EPERM;
2406}
2407
2408static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2409 u32 arg3)
2410{
2411 return -EPERM;
2412}
2413
2414#endif
2415
2416static inline u32 tcp_timeout_init(struct sock *sk)
2417{
2418 int timeout;
2419
2420 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2421
2422 if (timeout <= 0)
2423 timeout = TCP_TIMEOUT_INIT;
2424 return min_t(int, timeout, TCP_RTO_MAX);
2425}
2426
2427static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2428{
2429 int rwnd;
2430
2431 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2432
2433 if (rwnd < 0)
2434 rwnd = 0;
2435 return rwnd;
2436}
2437
2438static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2439{
2440 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2441}
2442
2443static inline void tcp_bpf_rtt(struct sock *sk)
2444{
2445 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2446 tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
2447}
2448
2449#if IS_ENABLED(CONFIG_SMC)
2450extern struct static_key_false tcp_have_smc;
2451#endif
2452
2453#if IS_ENABLED(CONFIG_TLS_DEVICE)
2454void clean_acked_data_enable(struct inet_connection_sock *icsk,
2455 void (*cad)(struct sock *sk, u32 ack_seq));
2456void clean_acked_data_disable(struct inet_connection_sock *icsk);
2457void clean_acked_data_flush(void);
2458#endif
2459
2460DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2461static inline void tcp_add_tx_delay(struct sk_buff *skb,
2462 const struct tcp_sock *tp)
2463{
2464 if (static_branch_unlikely(&tcp_tx_delay_enabled))
2465 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2466}
2467
2468/* Compute Earliest Departure Time for some control packets
2469 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2470 */
2471static inline u64 tcp_transmit_time(const struct sock *sk)
2472{
2473 if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2474 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2475 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2476
2477 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2478 }
2479 return 0;
2480}
2481
2482#endif /* _TCP_H */