Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the TCP module.
8 *
9 * Version: @(#)tcp.h 1.0.5 05/23/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 */
14#ifndef _TCP_H
15#define _TCP_H
16
17#define FASTRETRANS_DEBUG 1
18
19#include <linux/list.h>
20#include <linux/tcp.h>
21#include <linux/bug.h>
22#include <linux/slab.h>
23#include <linux/cache.h>
24#include <linux/percpu.h>
25#include <linux/skbuff.h>
26#include <linux/kref.h>
27#include <linux/ktime.h>
28#include <linux/indirect_call_wrapper.h>
29
30#include <net/inet_connection_sock.h>
31#include <net/inet_timewait_sock.h>
32#include <net/inet_hashtables.h>
33#include <net/checksum.h>
34#include <net/request_sock.h>
35#include <net/sock_reuseport.h>
36#include <net/sock.h>
37#include <net/snmp.h>
38#include <net/ip.h>
39#include <net/tcp_states.h>
40#include <net/tcp_ao.h>
41#include <net/inet_ecn.h>
42#include <net/dst.h>
43#include <net/mptcp.h>
44#include <net/xfrm.h>
45
46#include <linux/seq_file.h>
47#include <linux/memcontrol.h>
48#include <linux/bpf-cgroup.h>
49#include <linux/siphash.h>
50
51extern struct inet_hashinfo tcp_hashinfo;
52
53DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
54int tcp_orphan_count_sum(void);
55
56DECLARE_PER_CPU(u32, tcp_tw_isn);
57
58void tcp_time_wait(struct sock *sk, int state, int timeo);
59
60#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
61#define MAX_TCP_OPTION_SPACE 40
62#define TCP_MIN_SND_MSS 48
63#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
64
65/*
66 * Never offer a window over 32767 without using window scaling. Some
67 * poor stacks do signed 16bit maths!
68 */
69#define MAX_TCP_WINDOW 32767U
70
71/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
72#define TCP_MIN_MSS 88U
73
74/* The initial MTU to use for probing */
75#define TCP_BASE_MSS 1024
76
77/* probing interval, default to 10 minutes as per RFC4821 */
78#define TCP_PROBE_INTERVAL 600
79
80/* Specify interval when tcp mtu probing will stop */
81#define TCP_PROBE_THRESHOLD 8
82
83/* After receiving this amount of duplicate ACKs fast retransmit starts. */
84#define TCP_FASTRETRANS_THRESH 3
85
86/* Maximal number of ACKs sent quickly to accelerate slow-start. */
87#define TCP_MAX_QUICKACKS 16U
88
89/* Maximal number of window scale according to RFC1323 */
90#define TCP_MAX_WSCALE 14U
91
92/* urg_data states */
93#define TCP_URG_VALID 0x0100
94#define TCP_URG_NOTYET 0x0200
95#define TCP_URG_READ 0x0400
96
97#define TCP_RETR1 3 /*
98 * This is how many retries it does before it
99 * tries to figure out if the gateway is
100 * down. Minimal RFC value is 3; it corresponds
101 * to ~3sec-8min depending on RTO.
102 */
103
104#define TCP_RETR2 15 /*
105 * This should take at least
106 * 90 minutes to time out.
107 * RFC1122 says that the limit is 100 sec.
108 * 15 is ~13-30min depending on RTO.
109 */
110
111#define TCP_SYN_RETRIES 6 /* This is how many retries are done
112 * when active opening a connection.
113 * RFC1122 says the minimum retry MUST
114 * be at least 180secs. Nevertheless
115 * this value is corresponding to
116 * 63secs of retransmission with the
117 * current initial RTO.
118 */
119
120#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
121 * when passive opening a connection.
122 * This is corresponding to 31secs of
123 * retransmission with the current
124 * initial RTO.
125 */
126
127#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
128 * state, about 60 seconds */
129#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
130 /* BSD style FIN_WAIT2 deadlock breaker.
131 * It used to be 3min, new value is 60sec,
132 * to combine FIN-WAIT-2 timeout with
133 * TIME-WAIT timer.
134 */
135#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
136
137#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
138static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
139
140#if HZ >= 100
141#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
142#define TCP_ATO_MIN ((unsigned)(HZ/25))
143#else
144#define TCP_DELACK_MIN 4U
145#define TCP_ATO_MIN 4U
146#endif
147#define TCP_RTO_MAX ((unsigned)(120*HZ))
148#define TCP_RTO_MIN ((unsigned)(HZ/5))
149#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
150
151#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
152
153#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
154#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
155 * used as a fallback RTO for the
156 * initial data transmission if no
157 * valid RTT sample has been acquired,
158 * most likely due to retrans in 3WHS.
159 */
160
161#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
162 * for local resources.
163 */
164#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
165#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
166#define TCP_KEEPALIVE_INTVL (75*HZ)
167
168#define MAX_TCP_KEEPIDLE 32767
169#define MAX_TCP_KEEPINTVL 32767
170#define MAX_TCP_KEEPCNT 127
171#define MAX_TCP_SYNCNT 127
172
173/* Ensure that TCP PAWS checks are relaxed after ~2147 seconds
174 * to avoid overflows. This assumes a clock smaller than 1 Mhz.
175 * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz.
176 */
177#define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
178
179#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
180 * after this time. It should be equal
181 * (or greater than) TCP_TIMEWAIT_LEN
182 * to provide reliability equal to one
183 * provided by timewait state.
184 */
185#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
186 * timestamps. It must be less than
187 * minimal timewait lifetime.
188 */
189/*
190 * TCP option
191 */
192
193#define TCPOPT_NOP 1 /* Padding */
194#define TCPOPT_EOL 0 /* End of options */
195#define TCPOPT_MSS 2 /* Segment size negotiating */
196#define TCPOPT_WINDOW 3 /* Window scaling */
197#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
198#define TCPOPT_SACK 5 /* SACK Block */
199#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
200#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
201#define TCPOPT_AO 29 /* Authentication Option (RFC5925) */
202#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
203#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
204#define TCPOPT_EXP 254 /* Experimental */
205/* Magic number to be after the option value for sharing TCP
206 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
207 */
208#define TCPOPT_FASTOPEN_MAGIC 0xF989
209#define TCPOPT_SMC_MAGIC 0xE2D4C3D9
210
211/*
212 * TCP option lengths
213 */
214
215#define TCPOLEN_MSS 4
216#define TCPOLEN_WINDOW 3
217#define TCPOLEN_SACK_PERM 2
218#define TCPOLEN_TIMESTAMP 10
219#define TCPOLEN_MD5SIG 18
220#define TCPOLEN_FASTOPEN_BASE 2
221#define TCPOLEN_EXP_FASTOPEN_BASE 4
222#define TCPOLEN_EXP_SMC_BASE 6
223
224/* But this is what stacks really send out. */
225#define TCPOLEN_TSTAMP_ALIGNED 12
226#define TCPOLEN_WSCALE_ALIGNED 4
227#define TCPOLEN_SACKPERM_ALIGNED 4
228#define TCPOLEN_SACK_BASE 2
229#define TCPOLEN_SACK_BASE_ALIGNED 4
230#define TCPOLEN_SACK_PERBLOCK 8
231#define TCPOLEN_MD5SIG_ALIGNED 20
232#define TCPOLEN_MSS_ALIGNED 4
233#define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
234
235/* Flags in tp->nonagle */
236#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
237#define TCP_NAGLE_CORK 2 /* Socket is corked */
238#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
239
240/* TCP thin-stream limits */
241#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
242
243/* TCP initial congestion window as per rfc6928 */
244#define TCP_INIT_CWND 10
245
246/* Bit Flags for sysctl_tcp_fastopen */
247#define TFO_CLIENT_ENABLE 1
248#define TFO_SERVER_ENABLE 2
249#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
250
251/* Accept SYN data w/o any cookie option */
252#define TFO_SERVER_COOKIE_NOT_REQD 0x200
253
254/* Force enable TFO on all listeners, i.e., not requiring the
255 * TCP_FASTOPEN socket option.
256 */
257#define TFO_SERVER_WO_SOCKOPT1 0x400
258
259
260/* sysctl variables for tcp */
261extern int sysctl_tcp_max_orphans;
262extern long sysctl_tcp_mem[3];
263
264#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
265#define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
266#define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
267
268extern atomic_long_t tcp_memory_allocated;
269DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
270
271extern struct percpu_counter tcp_sockets_allocated;
272extern unsigned long tcp_memory_pressure;
273
274/* optimized version of sk_under_memory_pressure() for TCP sockets */
275static inline bool tcp_under_memory_pressure(const struct sock *sk)
276{
277 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
278 mem_cgroup_under_socket_pressure(sk->sk_memcg))
279 return true;
280
281 return READ_ONCE(tcp_memory_pressure);
282}
283/*
284 * The next routines deal with comparing 32 bit unsigned ints
285 * and worry about wraparound (automatic with unsigned arithmetic).
286 */
287
288static inline bool before(__u32 seq1, __u32 seq2)
289{
290 return (__s32)(seq1-seq2) < 0;
291}
292#define after(seq2, seq1) before(seq1, seq2)
293
294/* is s2<=s1<=s3 ? */
295static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
296{
297 return seq3 - seq2 >= seq1 - seq2;
298}
299
300static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
301{
302 sk_wmem_queued_add(sk, -skb->truesize);
303 if (!skb_zcopy_pure(skb))
304 sk_mem_uncharge(sk, skb->truesize);
305 else
306 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
307 __kfree_skb(skb);
308}
309
310void sk_forced_mem_schedule(struct sock *sk, int size);
311
312bool tcp_check_oom(const struct sock *sk, int shift);
313
314
315extern struct proto tcp_prot;
316
317#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
318#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
319#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
320#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
321
322void tcp_tasklet_init(void);
323
324int tcp_v4_err(struct sk_buff *skb, u32);
325
326void tcp_shutdown(struct sock *sk, int how);
327
328int tcp_v4_early_demux(struct sk_buff *skb);
329int tcp_v4_rcv(struct sk_buff *skb);
330
331void tcp_remove_empty_skb(struct sock *sk);
332int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
333int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
334int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
335 size_t size, struct ubuf_info *uarg);
336void tcp_splice_eof(struct socket *sock);
337int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
338int tcp_wmem_schedule(struct sock *sk, int copy);
339void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
340 int size_goal);
341void tcp_release_cb(struct sock *sk);
342void tcp_wfree(struct sk_buff *skb);
343void tcp_write_timer_handler(struct sock *sk);
344void tcp_delack_timer_handler(struct sock *sk);
345int tcp_ioctl(struct sock *sk, int cmd, int *karg);
346enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
347void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
348void tcp_rcv_space_adjust(struct sock *sk);
349int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
350void tcp_twsk_destructor(struct sock *sk);
351void tcp_twsk_purge(struct list_head *net_exit_list);
352ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
353 struct pipe_inode_info *pipe, size_t len,
354 unsigned int flags);
355struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
356 bool force_schedule);
357
358static inline void tcp_dec_quickack_mode(struct sock *sk)
359{
360 struct inet_connection_sock *icsk = inet_csk(sk);
361
362 if (icsk->icsk_ack.quick) {
363 /* How many ACKs S/ACKing new data have we sent? */
364 const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
365
366 if (pkts >= icsk->icsk_ack.quick) {
367 icsk->icsk_ack.quick = 0;
368 /* Leaving quickack mode we deflate ATO. */
369 icsk->icsk_ack.ato = TCP_ATO_MIN;
370 } else
371 icsk->icsk_ack.quick -= pkts;
372 }
373}
374
375#define TCP_ECN_OK 1
376#define TCP_ECN_QUEUE_CWR 2
377#define TCP_ECN_DEMAND_CWR 4
378#define TCP_ECN_SEEN 8
379
380enum tcp_tw_status {
381 TCP_TW_SUCCESS = 0,
382 TCP_TW_RST = 1,
383 TCP_TW_ACK = 2,
384 TCP_TW_SYN = 3
385};
386
387
388enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
389 struct sk_buff *skb,
390 const struct tcphdr *th,
391 u32 *tw_isn);
392struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
393 struct request_sock *req, bool fastopen,
394 bool *lost_race);
395enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
396 struct sk_buff *skb);
397void tcp_enter_loss(struct sock *sk);
398void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
399void tcp_clear_retrans(struct tcp_sock *tp);
400void tcp_update_metrics(struct sock *sk);
401void tcp_init_metrics(struct sock *sk);
402void tcp_metrics_init(void);
403bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
404void __tcp_close(struct sock *sk, long timeout);
405void tcp_close(struct sock *sk, long timeout);
406void tcp_init_sock(struct sock *sk);
407void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
408__poll_t tcp_poll(struct file *file, struct socket *sock,
409 struct poll_table_struct *wait);
410int do_tcp_getsockopt(struct sock *sk, int level,
411 int optname, sockptr_t optval, sockptr_t optlen);
412int tcp_getsockopt(struct sock *sk, int level, int optname,
413 char __user *optval, int __user *optlen);
414bool tcp_bpf_bypass_getsockopt(int level, int optname);
415int do_tcp_setsockopt(struct sock *sk, int level, int optname,
416 sockptr_t optval, unsigned int optlen);
417int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
418 unsigned int optlen);
419void tcp_set_keepalive(struct sock *sk, int val);
420void tcp_syn_ack_timeout(const struct request_sock *req);
421int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
422 int flags, int *addr_len);
423int tcp_set_rcvlowat(struct sock *sk, int val);
424int tcp_set_window_clamp(struct sock *sk, int val);
425void tcp_update_recv_tstamps(struct sk_buff *skb,
426 struct scm_timestamping_internal *tss);
427void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
428 struct scm_timestamping_internal *tss);
429void tcp_data_ready(struct sock *sk);
430#ifdef CONFIG_MMU
431int tcp_mmap(struct file *file, struct socket *sock,
432 struct vm_area_struct *vma);
433#endif
434void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
435 struct tcp_options_received *opt_rx,
436 int estab, struct tcp_fastopen_cookie *foc);
437
438/*
439 * BPF SKB-less helpers
440 */
441u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
442 struct tcphdr *th, u32 *cookie);
443u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
444 struct tcphdr *th, u32 *cookie);
445u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
446u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
447 const struct tcp_request_sock_ops *af_ops,
448 struct sock *sk, struct tcphdr *th);
449/*
450 * TCP v4 functions exported for the inet6 API
451 */
452
453void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
454void tcp_v4_mtu_reduced(struct sock *sk);
455void tcp_req_err(struct sock *sk, u32 seq, bool abort);
456void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
457int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
458struct sock *tcp_create_openreq_child(const struct sock *sk,
459 struct request_sock *req,
460 struct sk_buff *skb);
461void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
462struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
463 struct request_sock *req,
464 struct dst_entry *dst,
465 struct request_sock *req_unhash,
466 bool *own_req);
467int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
468int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
469int tcp_connect(struct sock *sk);
470enum tcp_synack_type {
471 TCP_SYNACK_NORMAL,
472 TCP_SYNACK_FASTOPEN,
473 TCP_SYNACK_COOKIE,
474};
475struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
476 struct request_sock *req,
477 struct tcp_fastopen_cookie *foc,
478 enum tcp_synack_type synack_type,
479 struct sk_buff *syn_skb);
480int tcp_disconnect(struct sock *sk, int flags);
481
482void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
483int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
484void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
485
486/* From syncookies.c */
487struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
488 struct request_sock *req,
489 struct dst_entry *dst);
490int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th);
491struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
492struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
493 struct sock *sk, struct sk_buff *skb,
494 struct tcp_options_received *tcp_opt,
495 int mss, u32 tsoff);
496
497#if IS_ENABLED(CONFIG_BPF)
498struct bpf_tcp_req_attrs {
499 u32 rcv_tsval;
500 u32 rcv_tsecr;
501 u16 mss;
502 u8 rcv_wscale;
503 u8 snd_wscale;
504 u8 ecn_ok;
505 u8 wscale_ok;
506 u8 sack_ok;
507 u8 tstamp_ok;
508 u8 usec_ts_ok;
509 u8 reserved[3];
510};
511#endif
512
513#ifdef CONFIG_SYN_COOKIES
514
515/* Syncookies use a monotonic timer which increments every 60 seconds.
516 * This counter is used both as a hash input and partially encoded into
517 * the cookie value. A cookie is only validated further if the delta
518 * between the current counter value and the encoded one is less than this,
519 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
520 * the counter advances immediately after a cookie is generated).
521 */
522#define MAX_SYNCOOKIE_AGE 2
523#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
524#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
525
526/* syncookies: remember time of last synqueue overflow
527 * But do not dirty this field too often (once per second is enough)
528 * It is racy as we do not hold a lock, but race is very minor.
529 */
530static inline void tcp_synq_overflow(const struct sock *sk)
531{
532 unsigned int last_overflow;
533 unsigned int now = jiffies;
534
535 if (sk->sk_reuseport) {
536 struct sock_reuseport *reuse;
537
538 reuse = rcu_dereference(sk->sk_reuseport_cb);
539 if (likely(reuse)) {
540 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
541 if (!time_between32(now, last_overflow,
542 last_overflow + HZ))
543 WRITE_ONCE(reuse->synq_overflow_ts, now);
544 return;
545 }
546 }
547
548 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
549 if (!time_between32(now, last_overflow, last_overflow + HZ))
550 WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
551}
552
553/* syncookies: no recent synqueue overflow on this listening socket? */
554static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
555{
556 unsigned int last_overflow;
557 unsigned int now = jiffies;
558
559 if (sk->sk_reuseport) {
560 struct sock_reuseport *reuse;
561
562 reuse = rcu_dereference(sk->sk_reuseport_cb);
563 if (likely(reuse)) {
564 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
565 return !time_between32(now, last_overflow - HZ,
566 last_overflow +
567 TCP_SYNCOOKIE_VALID);
568 }
569 }
570
571 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
572
573 /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
574 * then we're under synflood. However, we have to use
575 * 'last_overflow - HZ' as lower bound. That's because a concurrent
576 * tcp_synq_overflow() could update .ts_recent_stamp after we read
577 * jiffies but before we store .ts_recent_stamp into last_overflow,
578 * which could lead to rejecting a valid syncookie.
579 */
580 return !time_between32(now, last_overflow - HZ,
581 last_overflow + TCP_SYNCOOKIE_VALID);
582}
583
584static inline u32 tcp_cookie_time(void)
585{
586 u64 val = get_jiffies_64();
587
588 do_div(val, TCP_SYNCOOKIE_PERIOD);
589 return val;
590}
591
592/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
593static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
594{
595 if (usec_ts)
596 return div_u64(val, NSEC_PER_USEC);
597
598 return div_u64(val, NSEC_PER_MSEC);
599}
600
601u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
602 u16 *mssp);
603__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
604u64 cookie_init_timestamp(struct request_sock *req, u64 now);
605bool cookie_timestamp_decode(const struct net *net,
606 struct tcp_options_received *opt);
607
608static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst)
609{
610 return READ_ONCE(net->ipv4.sysctl_tcp_ecn) ||
611 dst_feature(dst, RTAX_FEATURE_ECN);
612}
613
614#if IS_ENABLED(CONFIG_BPF)
615static inline bool cookie_bpf_ok(struct sk_buff *skb)
616{
617 return skb->sk;
618}
619
620struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
621#else
622static inline bool cookie_bpf_ok(struct sk_buff *skb)
623{
624 return false;
625}
626
627static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
628 struct sk_buff *skb)
629{
630 return NULL;
631}
632#endif
633
634/* From net/ipv6/syncookies.c */
635int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
636struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
637
638u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
639 const struct tcphdr *th, u16 *mssp);
640__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
641#endif
642/* tcp_output.c */
643
644void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
645void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
646void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
647 int nonagle);
648int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
649int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
650void tcp_retransmit_timer(struct sock *sk);
651void tcp_xmit_retransmit_queue(struct sock *);
652void tcp_simple_retransmit(struct sock *);
653void tcp_enter_recovery(struct sock *sk, bool ece_ack);
654int tcp_trim_head(struct sock *, struct sk_buff *, u32);
655enum tcp_queue {
656 TCP_FRAG_IN_WRITE_QUEUE,
657 TCP_FRAG_IN_RTX_QUEUE,
658};
659int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
660 struct sk_buff *skb, u32 len,
661 unsigned int mss_now, gfp_t gfp);
662
663void tcp_send_probe0(struct sock *);
664int tcp_write_wakeup(struct sock *, int mib);
665void tcp_send_fin(struct sock *sk);
666void tcp_send_active_reset(struct sock *sk, gfp_t priority,
667 enum sk_rst_reason reason);
668int tcp_send_synack(struct sock *);
669void tcp_push_one(struct sock *, unsigned int mss_now);
670void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
671void tcp_send_ack(struct sock *sk);
672void tcp_send_delayed_ack(struct sock *sk);
673void tcp_send_loss_probe(struct sock *sk);
674bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
675void tcp_skb_collapse_tstamp(struct sk_buff *skb,
676 const struct sk_buff *next_skb);
677
678/* tcp_input.c */
679void tcp_rearm_rto(struct sock *sk);
680void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
681void tcp_done_with_error(struct sock *sk, int err);
682void tcp_reset(struct sock *sk, struct sk_buff *skb);
683void tcp_fin(struct sock *sk);
684void tcp_check_space(struct sock *sk);
685void tcp_sack_compress_send_ack(struct sock *sk);
686
687static inline void tcp_cleanup_skb(struct sk_buff *skb)
688{
689 skb_dst_drop(skb);
690 secpath_reset(skb);
691}
692
693static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
694{
695 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
696 DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
697 __skb_queue_tail(&sk->sk_receive_queue, skb);
698}
699
700/* tcp_timer.c */
701void tcp_init_xmit_timers(struct sock *);
702static inline void tcp_clear_xmit_timers(struct sock *sk)
703{
704 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
705 __sock_put(sk);
706
707 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
708 __sock_put(sk);
709
710 inet_csk_clear_xmit_timers(sk);
711}
712
713unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
714unsigned int tcp_current_mss(struct sock *sk);
715u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
716
717/* Bound MSS / TSO packet size with the half of the window */
718static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
719{
720 int cutoff;
721
722 /* When peer uses tiny windows, there is no use in packetizing
723 * to sub-MSS pieces for the sake of SWS or making sure there
724 * are enough packets in the pipe for fast recovery.
725 *
726 * On the other hand, for extremely large MSS devices, handling
727 * smaller than MSS windows in this way does make sense.
728 */
729 if (tp->max_window > TCP_MSS_DEFAULT)
730 cutoff = (tp->max_window >> 1);
731 else
732 cutoff = tp->max_window;
733
734 if (cutoff && pktsize > cutoff)
735 return max_t(int, cutoff, 68U - tp->tcp_header_len);
736 else
737 return pktsize;
738}
739
740/* tcp.c */
741void tcp_get_info(struct sock *, struct tcp_info *);
742
743/* Read 'sendfile()'-style from a TCP socket */
744int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
745 sk_read_actor_t recv_actor);
746int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
747struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
748void tcp_read_done(struct sock *sk, size_t len);
749
750void tcp_initialize_rcv_mss(struct sock *sk);
751
752int tcp_mtu_to_mss(struct sock *sk, int pmtu);
753int tcp_mss_to_mtu(struct sock *sk, int mss);
754void tcp_mtup_init(struct sock *sk);
755
756static inline void tcp_bound_rto(struct sock *sk)
757{
758 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
759 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
760}
761
762static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
763{
764 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
765}
766
767static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
768{
769 /* mptcp hooks are only on the slow path */
770 if (sk_is_mptcp((struct sock *)tp))
771 return;
772
773 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
774 ntohl(TCP_FLAG_ACK) |
775 snd_wnd);
776}
777
778static inline void tcp_fast_path_on(struct tcp_sock *tp)
779{
780 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
781}
782
783static inline void tcp_fast_path_check(struct sock *sk)
784{
785 struct tcp_sock *tp = tcp_sk(sk);
786
787 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
788 tp->rcv_wnd &&
789 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
790 !tp->urg_data)
791 tcp_fast_path_on(tp);
792}
793
794u32 tcp_delack_max(const struct sock *sk);
795
796/* Compute the actual rto_min value */
797static inline u32 tcp_rto_min(const struct sock *sk)
798{
799 const struct dst_entry *dst = __sk_dst_get(sk);
800 u32 rto_min = inet_csk(sk)->icsk_rto_min;
801
802 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
803 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
804 return rto_min;
805}
806
807static inline u32 tcp_rto_min_us(const struct sock *sk)
808{
809 return jiffies_to_usecs(tcp_rto_min(sk));
810}
811
812static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
813{
814 return dst_metric_locked(dst, RTAX_CC_ALGO);
815}
816
817/* Minimum RTT in usec. ~0 means not available. */
818static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
819{
820 return minmax_get(&tp->rtt_min);
821}
822
823/* Compute the actual receive window we are currently advertising.
824 * Rcv_nxt can be after the window if our peer push more data
825 * than the offered window.
826 */
827static inline u32 tcp_receive_window(const struct tcp_sock *tp)
828{
829 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
830
831 if (win < 0)
832 win = 0;
833 return (u32) win;
834}
835
836/* Choose a new window, without checks for shrinking, and without
837 * scaling applied to the result. The caller does these things
838 * if necessary. This is a "raw" window selection.
839 */
840u32 __tcp_select_window(struct sock *sk);
841
842void tcp_send_window_probe(struct sock *sk);
843
844/* TCP uses 32bit jiffies to save some space.
845 * Note that this is different from tcp_time_stamp, which
846 * historically has been the same until linux-4.13.
847 */
848#define tcp_jiffies32 ((u32)jiffies)
849
850/*
851 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
852 * It is no longer tied to jiffies, but to 1 ms clock.
853 * Note: double check if you want to use tcp_jiffies32 instead of this.
854 */
855#define TCP_TS_HZ 1000
856
857static inline u64 tcp_clock_ns(void)
858{
859 return ktime_get_ns();
860}
861
862static inline u64 tcp_clock_us(void)
863{
864 return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
865}
866
867static inline u64 tcp_clock_ms(void)
868{
869 return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
870}
871
872/* TCP Timestamp included in TS option (RFC 1323) can either use ms
873 * or usec resolution. Each socket carries a flag to select one or other
874 * resolution, as the route attribute could change anytime.
875 * Each flow must stick to initial resolution.
876 */
877static inline u32 tcp_clock_ts(bool usec_ts)
878{
879 return usec_ts ? tcp_clock_us() : tcp_clock_ms();
880}
881
882static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
883{
884 return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
885}
886
887static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
888{
889 if (tp->tcp_usec_ts)
890 return tp->tcp_mstamp;
891 return tcp_time_stamp_ms(tp);
892}
893
894void tcp_mstamp_refresh(struct tcp_sock *tp);
895
896static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
897{
898 return max_t(s64, t1 - t0, 0);
899}
900
901/* provide the departure time in us unit */
902static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
903{
904 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
905}
906
907/* Provide skb TSval in usec or ms unit */
908static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
909{
910 if (usec_ts)
911 return tcp_skb_timestamp_us(skb);
912
913 return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
914}
915
916static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
917{
918 return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
919}
920
921static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
922{
923 return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
924}
925
926#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
927
928#define TCPHDR_FIN 0x01
929#define TCPHDR_SYN 0x02
930#define TCPHDR_RST 0x04
931#define TCPHDR_PSH 0x08
932#define TCPHDR_ACK 0x10
933#define TCPHDR_URG 0x20
934#define TCPHDR_ECE 0x40
935#define TCPHDR_CWR 0x80
936
937#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
938
939/* State flags for sacked in struct tcp_skb_cb */
940enum tcp_skb_cb_sacked_flags {
941 TCPCB_SACKED_ACKED = (1 << 0), /* SKB ACK'd by a SACK block */
942 TCPCB_SACKED_RETRANS = (1 << 1), /* SKB retransmitted */
943 TCPCB_LOST = (1 << 2), /* SKB is lost */
944 TCPCB_TAGBITS = (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS |
945 TCPCB_LOST), /* All tag bits */
946 TCPCB_REPAIRED = (1 << 4), /* SKB repaired (no skb_mstamp_ns) */
947 TCPCB_EVER_RETRANS = (1 << 7), /* Ever retransmitted frame */
948 TCPCB_RETRANS = (TCPCB_SACKED_RETRANS | TCPCB_EVER_RETRANS |
949 TCPCB_REPAIRED),
950};
951
952/* This is what the send packet queuing engine uses to pass
953 * TCP per-packet control information to the transmission code.
954 * We also store the host-order sequence numbers in here too.
955 * This is 44 bytes if IPV6 is enabled.
956 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
957 */
958struct tcp_skb_cb {
959 __u32 seq; /* Starting sequence number */
960 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
961 union {
962 /* Note :
963 * tcp_gso_segs/size are used in write queue only,
964 * cf tcp_skb_pcount()/tcp_skb_mss()
965 */
966 struct {
967 u16 tcp_gso_segs;
968 u16 tcp_gso_size;
969 };
970 };
971 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
972
973 __u8 sacked; /* State flags for SACK. */
974 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
975 __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
976 eor:1, /* Is skb MSG_EOR marked? */
977 has_rxtstamp:1, /* SKB has a RX timestamp */
978 unused:5;
979 __u32 ack_seq; /* Sequence number ACK'd */
980 union {
981 struct {
982#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
983 /* There is space for up to 24 bytes */
984 __u32 is_app_limited:1, /* cwnd not fully used? */
985 delivered_ce:20,
986 unused:11;
987 /* pkts S/ACKed so far upon tx of skb, incl retrans: */
988 __u32 delivered;
989 /* start of send pipeline phase */
990 u64 first_tx_mstamp;
991 /* when we reached the "delivered" count */
992 u64 delivered_mstamp;
993 } tx; /* only used for outgoing skbs */
994 union {
995 struct inet_skb_parm h4;
996#if IS_ENABLED(CONFIG_IPV6)
997 struct inet6_skb_parm h6;
998#endif
999 } header; /* For incoming skbs */
1000 };
1001};
1002
1003#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
1004
1005extern const struct inet_connection_sock_af_ops ipv4_specific;
1006
1007#if IS_ENABLED(CONFIG_IPV6)
1008/* This is the variant of inet6_iif() that must be used by TCP,
1009 * as TCP moves IP6CB into a different location in skb->cb[]
1010 */
1011static inline int tcp_v6_iif(const struct sk_buff *skb)
1012{
1013 return TCP_SKB_CB(skb)->header.h6.iif;
1014}
1015
1016static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
1017{
1018 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
1019
1020 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
1021}
1022
1023/* TCP_SKB_CB reference means this can not be used from early demux */
1024static inline int tcp_v6_sdif(const struct sk_buff *skb)
1025{
1026#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1027 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
1028 return TCP_SKB_CB(skb)->header.h6.iif;
1029#endif
1030 return 0;
1031}
1032
1033extern const struct inet_connection_sock_af_ops ipv6_specific;
1034
1035INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
1036INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
1037void tcp_v6_early_demux(struct sk_buff *skb);
1038
1039#endif
1040
1041/* TCP_SKB_CB reference means this can not be used from early demux */
1042static inline int tcp_v4_sdif(struct sk_buff *skb)
1043{
1044#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1045 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
1046 return TCP_SKB_CB(skb)->header.h4.iif;
1047#endif
1048 return 0;
1049}
1050
1051/* Due to TSO, an SKB can be composed of multiple actual
1052 * packets. To keep these tracked properly, we use this.
1053 */
1054static inline int tcp_skb_pcount(const struct sk_buff *skb)
1055{
1056 return TCP_SKB_CB(skb)->tcp_gso_segs;
1057}
1058
1059static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
1060{
1061 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
1062}
1063
1064static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
1065{
1066 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
1067}
1068
1069/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
1070static inline int tcp_skb_mss(const struct sk_buff *skb)
1071{
1072 return TCP_SKB_CB(skb)->tcp_gso_size;
1073}
1074
1075static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
1076{
1077 return likely(!TCP_SKB_CB(skb)->eor);
1078}
1079
1080static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
1081 const struct sk_buff *from)
1082{
1083 /* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
1084 return likely(tcp_skb_can_collapse_to(to) &&
1085 mptcp_skb_can_collapse(to, from) &&
1086 skb_pure_zcopy_same(to, from) &&
1087 skb_frags_readable(to) == skb_frags_readable(from));
1088}
1089
1090static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
1091 const struct sk_buff *from)
1092{
1093 return likely(mptcp_skb_can_collapse(to, from) &&
1094 !skb_cmp_decrypted(to, from));
1095}
1096
1097/* Events passed to congestion control interface */
1098enum tcp_ca_event {
1099 CA_EVENT_TX_START, /* first transmit when no packets in flight */
1100 CA_EVENT_CWND_RESTART, /* congestion window restart */
1101 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
1102 CA_EVENT_LOSS, /* loss timeout */
1103 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
1104 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
1105};
1106
1107/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1108enum tcp_ca_ack_event_flags {
1109 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
1110 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
1111 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
1112};
1113
1114/*
1115 * Interface for adding new TCP congestion control handlers
1116 */
1117#define TCP_CA_NAME_MAX 16
1118#define TCP_CA_MAX 128
1119#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
1120
1121#define TCP_CA_UNSPEC 0
1122
1123/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1124#define TCP_CONG_NON_RESTRICTED 0x1
1125/* Requires ECN/ECT set on all packets */
1126#define TCP_CONG_NEEDS_ECN 0x2
1127#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1128
1129union tcp_cc_info;
1130
1131struct ack_sample {
1132 u32 pkts_acked;
1133 s32 rtt_us;
1134 u32 in_flight;
1135};
1136
1137/* A rate sample measures the number of (original/retransmitted) data
1138 * packets delivered "delivered" over an interval of time "interval_us".
1139 * The tcp_rate.c code fills in the rate sample, and congestion
1140 * control modules that define a cong_control function to run at the end
1141 * of ACK processing can optionally chose to consult this sample when
1142 * setting cwnd and pacing rate.
1143 * A sample is invalid if "delivered" or "interval_us" is negative.
1144 */
1145struct rate_sample {
1146 u64 prior_mstamp; /* starting timestamp for interval */
1147 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
1148 u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
1149 s32 delivered; /* number of packets delivered over interval */
1150 s32 delivered_ce; /* number of packets delivered w/ CE marks*/
1151 long interval_us; /* time for tp->delivered to incr "delivered" */
1152 u32 snd_interval_us; /* snd interval for delivered packets */
1153 u32 rcv_interval_us; /* rcv interval for delivered packets */
1154 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
1155 int losses; /* number of packets marked lost upon ACK */
1156 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
1157 u32 prior_in_flight; /* in flight before this ACK */
1158 u32 last_end_seq; /* end_seq of most recently ACKed packet */
1159 bool is_app_limited; /* is sample from packet with bubble in pipe? */
1160 bool is_retrans; /* is sample from retransmission? */
1161 bool is_ack_delayed; /* is this (likely) a delayed ACK? */
1162};
1163
1164struct tcp_congestion_ops {
1165/* fast path fields are put first to fill one cache line */
1166
1167 /* return slow start threshold (required) */
1168 u32 (*ssthresh)(struct sock *sk);
1169
1170 /* do new cwnd calculation (required) */
1171 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1172
1173 /* call before changing ca_state (optional) */
1174 void (*set_state)(struct sock *sk, u8 new_state);
1175
1176 /* call when cwnd event occurs (optional) */
1177 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1178
1179 /* call when ack arrives (optional) */
1180 void (*in_ack_event)(struct sock *sk, u32 flags);
1181
1182 /* hook for packet ack accounting (optional) */
1183 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1184
1185 /* override sysctl_tcp_min_tso_segs */
1186 u32 (*min_tso_segs)(struct sock *sk);
1187
1188 /* call when packets are delivered to update cwnd and pacing rate,
1189 * after all the ca_state processing. (optional)
1190 */
1191 void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
1192
1193
1194 /* new value of cwnd after loss (required) */
1195 u32 (*undo_cwnd)(struct sock *sk);
1196 /* returns the multiplier used in tcp_sndbuf_expand (optional) */
1197 u32 (*sndbuf_expand)(struct sock *sk);
1198
1199/* control/slow paths put last */
1200 /* get info for inet_diag (optional) */
1201 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1202 union tcp_cc_info *info);
1203
1204 char name[TCP_CA_NAME_MAX];
1205 struct module *owner;
1206 struct list_head list;
1207 u32 key;
1208 u32 flags;
1209
1210 /* initialize private data (optional) */
1211 void (*init)(struct sock *sk);
1212 /* cleanup private data (optional) */
1213 void (*release)(struct sock *sk);
1214} ____cacheline_aligned_in_smp;
1215
1216int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1217void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1218int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1219 struct tcp_congestion_ops *old_type);
1220int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
1221
1222void tcp_assign_congestion_control(struct sock *sk);
1223void tcp_init_congestion_control(struct sock *sk);
1224void tcp_cleanup_congestion_control(struct sock *sk);
1225int tcp_set_default_congestion_control(struct net *net, const char *name);
1226void tcp_get_default_congestion_control(struct net *net, char *name);
1227void tcp_get_available_congestion_control(char *buf, size_t len);
1228void tcp_get_allowed_congestion_control(char *buf, size_t len);
1229int tcp_set_allowed_congestion_control(char *allowed);
1230int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1231 bool cap_net_admin);
1232u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1233void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1234
1235u32 tcp_reno_ssthresh(struct sock *sk);
1236u32 tcp_reno_undo_cwnd(struct sock *sk);
1237void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1238extern struct tcp_congestion_ops tcp_reno;
1239
1240struct tcp_congestion_ops *tcp_ca_find(const char *name);
1241struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1242u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
1243#ifdef CONFIG_INET
1244char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1245#else
1246static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1247{
1248 return NULL;
1249}
1250#endif
1251
1252static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1253{
1254 const struct inet_connection_sock *icsk = inet_csk(sk);
1255
1256 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1257}
1258
1259static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1260{
1261 const struct inet_connection_sock *icsk = inet_csk(sk);
1262
1263 if (icsk->icsk_ca_ops->cwnd_event)
1264 icsk->icsk_ca_ops->cwnd_event(sk, event);
1265}
1266
1267/* From tcp_cong.c */
1268void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1269
1270/* From tcp_rate.c */
1271void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1272void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1273 struct rate_sample *rs);
1274void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1275 bool is_sack_reneg, struct rate_sample *rs);
1276void tcp_rate_check_app_limited(struct sock *sk);
1277
1278static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1279{
1280 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1281}
1282
1283/* These functions determine how the current flow behaves in respect of SACK
1284 * handling. SACK is negotiated with the peer, and therefore it can vary
1285 * between different flows.
1286 *
1287 * tcp_is_sack - SACK enabled
1288 * tcp_is_reno - No SACK
1289 */
1290static inline int tcp_is_sack(const struct tcp_sock *tp)
1291{
1292 return likely(tp->rx_opt.sack_ok);
1293}
1294
1295static inline bool tcp_is_reno(const struct tcp_sock *tp)
1296{
1297 return !tcp_is_sack(tp);
1298}
1299
1300static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1301{
1302 return tp->sacked_out + tp->lost_out;
1303}
1304
1305/* This determines how many packets are "in the network" to the best
1306 * of our knowledge. In many cases it is conservative, but where
1307 * detailed information is available from the receiver (via SACK
1308 * blocks etc.) we can make more aggressive calculations.
1309 *
1310 * Use this for decisions involving congestion control, use just
1311 * tp->packets_out to determine if the send queue is empty or not.
1312 *
1313 * Read this equation as:
1314 *
1315 * "Packets sent once on transmission queue" MINUS
1316 * "Packets left network, but not honestly ACKed yet" PLUS
1317 * "Packets fast retransmitted"
1318 */
1319static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1320{
1321 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1322}
1323
1324#define TCP_INFINITE_SSTHRESH 0x7fffffff
1325
1326static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1327{
1328 return tp->snd_cwnd;
1329}
1330
1331static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1332{
1333 WARN_ON_ONCE((int)val <= 0);
1334 tp->snd_cwnd = val;
1335}
1336
1337static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1338{
1339 return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1340}
1341
1342static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1343{
1344 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1345}
1346
1347static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1348{
1349 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1350 (1 << inet_csk(sk)->icsk_ca_state);
1351}
1352
1353/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1354 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1355 * ssthresh.
1356 */
1357static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1358{
1359 const struct tcp_sock *tp = tcp_sk(sk);
1360
1361 if (tcp_in_cwnd_reduction(sk))
1362 return tp->snd_ssthresh;
1363 else
1364 return max(tp->snd_ssthresh,
1365 ((tcp_snd_cwnd(tp) >> 1) +
1366 (tcp_snd_cwnd(tp) >> 2)));
1367}
1368
1369/* Use define here intentionally to get WARN_ON location shown at the caller */
1370#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1371
1372void tcp_enter_cwr(struct sock *sk);
1373__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1374
1375/* The maximum number of MSS of available cwnd for which TSO defers
1376 * sending if not using sysctl_tcp_tso_win_divisor.
1377 */
1378static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1379{
1380 return 3;
1381}
1382
1383/* Returns end sequence number of the receiver's advertised window */
1384static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1385{
1386 return tp->snd_una + tp->snd_wnd;
1387}
1388
1389/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1390 * flexible approach. The RFC suggests cwnd should not be raised unless
1391 * it was fully used previously. And that's exactly what we do in
1392 * congestion avoidance mode. But in slow start we allow cwnd to grow
1393 * as long as the application has used half the cwnd.
1394 * Example :
1395 * cwnd is 10 (IW10), but application sends 9 frames.
1396 * We allow cwnd to reach 18 when all frames are ACKed.
1397 * This check is safe because it's as aggressive as slow start which already
1398 * risks 100% overshoot. The advantage is that we discourage application to
1399 * either send more filler packets or data to artificially blow up the cwnd
1400 * usage, and allow application-limited process to probe bw more aggressively.
1401 */
1402static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1403{
1404 const struct tcp_sock *tp = tcp_sk(sk);
1405
1406 if (tp->is_cwnd_limited)
1407 return true;
1408
1409 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
1410 if (tcp_in_slow_start(tp))
1411 return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1412
1413 return false;
1414}
1415
1416/* BBR congestion control needs pacing.
1417 * Same remark for SO_MAX_PACING_RATE.
1418 * sch_fq packet scheduler is efficiently handling pacing,
1419 * but is not always installed/used.
1420 * Return true if TCP stack should pace packets itself.
1421 */
1422static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1423{
1424 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1425}
1426
1427/* Estimates in how many jiffies next packet for this flow can be sent.
1428 * Scheduling a retransmit timer too early would be silly.
1429 */
1430static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1431{
1432 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1433
1434 return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1435}
1436
1437static inline void tcp_reset_xmit_timer(struct sock *sk,
1438 const int what,
1439 unsigned long when,
1440 const unsigned long max_when)
1441{
1442 inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1443 max_when);
1444}
1445
1446/* Something is really bad, we could not queue an additional packet,
1447 * because qdisc is full or receiver sent a 0 window, or we are paced.
1448 * We do not want to add fuel to the fire, or abort too early,
1449 * so make sure the timer we arm now is at least 200ms in the future,
1450 * regardless of current icsk_rto value (as it could be ~2ms)
1451 */
1452static inline unsigned long tcp_probe0_base(const struct sock *sk)
1453{
1454 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1455}
1456
1457/* Variant of inet_csk_rto_backoff() used for zero window probes */
1458static inline unsigned long tcp_probe0_when(const struct sock *sk,
1459 unsigned long max_when)
1460{
1461 u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1462 inet_csk(sk)->icsk_backoff);
1463 u64 when = (u64)tcp_probe0_base(sk) << backoff;
1464
1465 return (unsigned long)min_t(u64, when, max_when);
1466}
1467
1468static inline void tcp_check_probe_timer(struct sock *sk)
1469{
1470 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1471 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1472 tcp_probe0_base(sk), TCP_RTO_MAX);
1473}
1474
1475static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1476{
1477 tp->snd_wl1 = seq;
1478}
1479
1480static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1481{
1482 tp->snd_wl1 = seq;
1483}
1484
1485/*
1486 * Calculate(/check) TCP checksum
1487 */
1488static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1489 __be32 daddr, __wsum base)
1490{
1491 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1492}
1493
1494static inline bool tcp_checksum_complete(struct sk_buff *skb)
1495{
1496 return !skb_csum_unnecessary(skb) &&
1497 __skb_checksum_complete(skb);
1498}
1499
1500bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1501 enum skb_drop_reason *reason);
1502
1503
1504int tcp_filter(struct sock *sk, struct sk_buff *skb);
1505void tcp_set_state(struct sock *sk, int state);
1506void tcp_done(struct sock *sk);
1507int tcp_abort(struct sock *sk, int err);
1508
1509static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1510{
1511 rx_opt->dsack = 0;
1512 rx_opt->num_sacks = 0;
1513}
1514
1515void tcp_cwnd_restart(struct sock *sk, s32 delta);
1516
1517static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1518{
1519 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1520 struct tcp_sock *tp = tcp_sk(sk);
1521 s32 delta;
1522
1523 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1524 tp->packets_out || ca_ops->cong_control)
1525 return;
1526 delta = tcp_jiffies32 - tp->lsndtime;
1527 if (delta > inet_csk(sk)->icsk_rto)
1528 tcp_cwnd_restart(sk, delta);
1529}
1530
1531/* Determine a window scaling and initial window to offer. */
1532void tcp_select_initial_window(const struct sock *sk, int __space,
1533 __u32 mss, __u32 *rcv_wnd,
1534 __u32 *window_clamp, int wscale_ok,
1535 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1536
1537static inline int __tcp_win_from_space(u8 scaling_ratio, int space)
1538{
1539 s64 scaled_space = (s64)space * scaling_ratio;
1540
1541 return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
1542}
1543
1544static inline int tcp_win_from_space(const struct sock *sk, int space)
1545{
1546 return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space);
1547}
1548
1549/* inverse of __tcp_win_from_space() */
1550static inline int __tcp_space_from_win(u8 scaling_ratio, int win)
1551{
1552 u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
1553
1554 do_div(val, scaling_ratio);
1555 return val;
1556}
1557
1558static inline int tcp_space_from_win(const struct sock *sk, int win)
1559{
1560 return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
1561}
1562
1563/* Assume a 50% default for skb->len/skb->truesize ratio.
1564 * This may be adjusted later in tcp_measure_rcv_mss().
1565 */
1566#define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
1567
1568static inline void tcp_scaling_ratio_init(struct sock *sk)
1569{
1570 tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
1571}
1572
1573/* Note: caller must be prepared to deal with negative returns */
1574static inline int tcp_space(const struct sock *sk)
1575{
1576 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1577 READ_ONCE(sk->sk_backlog.len) -
1578 atomic_read(&sk->sk_rmem_alloc));
1579}
1580
1581static inline int tcp_full_space(const struct sock *sk)
1582{
1583 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1584}
1585
1586static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
1587{
1588 int unused_mem = sk_unused_reserved_mem(sk);
1589 struct tcp_sock *tp = tcp_sk(sk);
1590
1591 tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
1592 if (unused_mem)
1593 tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1594 tcp_win_from_space(sk, unused_mem));
1595}
1596
1597static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1598{
1599 __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
1600}
1601
1602void tcp_cleanup_rbuf(struct sock *sk, int copied);
1603void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1604
1605
1606/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1607 * If 87.5 % (7/8) of the space has been consumed, we want to override
1608 * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1609 * len/truesize ratio.
1610 */
1611static inline bool tcp_rmem_pressure(const struct sock *sk)
1612{
1613 int rcvbuf, threshold;
1614
1615 if (tcp_under_memory_pressure(sk))
1616 return true;
1617
1618 rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1619 threshold = rcvbuf - (rcvbuf >> 3);
1620
1621 return atomic_read(&sk->sk_rmem_alloc) > threshold;
1622}
1623
1624static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1625{
1626 const struct tcp_sock *tp = tcp_sk(sk);
1627 int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1628
1629 if (avail <= 0)
1630 return false;
1631
1632 return (avail >= target) || tcp_rmem_pressure(sk) ||
1633 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1634}
1635
1636extern void tcp_openreq_init_rwin(struct request_sock *req,
1637 const struct sock *sk_listener,
1638 const struct dst_entry *dst);
1639
1640void tcp_enter_memory_pressure(struct sock *sk);
1641void tcp_leave_memory_pressure(struct sock *sk);
1642
1643static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1644{
1645 struct net *net = sock_net((struct sock *)tp);
1646 int val;
1647
1648 /* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1649 * and do_tcp_setsockopt().
1650 */
1651 val = READ_ONCE(tp->keepalive_intvl);
1652
1653 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1654}
1655
1656static inline int keepalive_time_when(const struct tcp_sock *tp)
1657{
1658 struct net *net = sock_net((struct sock *)tp);
1659 int val;
1660
1661 /* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1662 val = READ_ONCE(tp->keepalive_time);
1663
1664 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1665}
1666
1667static inline int keepalive_probes(const struct tcp_sock *tp)
1668{
1669 struct net *net = sock_net((struct sock *)tp);
1670 int val;
1671
1672 /* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1673 * and do_tcp_setsockopt().
1674 */
1675 val = READ_ONCE(tp->keepalive_probes);
1676
1677 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1678}
1679
1680static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1681{
1682 const struct inet_connection_sock *icsk = &tp->inet_conn;
1683
1684 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1685 tcp_jiffies32 - tp->rcv_tstamp);
1686}
1687
1688static inline int tcp_fin_time(const struct sock *sk)
1689{
1690 int fin_timeout = tcp_sk(sk)->linger2 ? :
1691 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1692 const int rto = inet_csk(sk)->icsk_rto;
1693
1694 if (fin_timeout < (rto << 2) - (rto >> 1))
1695 fin_timeout = (rto << 2) - (rto >> 1);
1696
1697 return fin_timeout;
1698}
1699
1700static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1701 int paws_win)
1702{
1703 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1704 return true;
1705 if (unlikely(!time_before32(ktime_get_seconds(),
1706 rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
1707 return true;
1708 /*
1709 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1710 * then following tcp messages have valid values. Ignore 0 value,
1711 * or else 'negative' tsval might forbid us to accept their packets.
1712 */
1713 if (!rx_opt->ts_recent)
1714 return true;
1715 return false;
1716}
1717
1718static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1719 int rst)
1720{
1721 if (tcp_paws_check(rx_opt, 0))
1722 return false;
1723
1724 /* RST segments are not recommended to carry timestamp,
1725 and, if they do, it is recommended to ignore PAWS because
1726 "their cleanup function should take precedence over timestamps."
1727 Certainly, it is mistake. It is necessary to understand the reasons
1728 of this constraint to relax it: if peer reboots, clock may go
1729 out-of-sync and half-open connections will not be reset.
1730 Actually, the problem would be not existing if all
1731 the implementations followed draft about maintaining clock
1732 via reboots. Linux-2.2 DOES NOT!
1733
1734 However, we can relax time bounds for RST segments to MSL.
1735 */
1736 if (rst && !time_before32(ktime_get_seconds(),
1737 rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1738 return false;
1739 return true;
1740}
1741
1742bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1743 int mib_idx, u32 *last_oow_ack_time);
1744
1745static inline void tcp_mib_init(struct net *net)
1746{
1747 /* See RFC 2012 */
1748 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1749 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1750 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1751 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1752}
1753
1754/* from STCP */
1755static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1756{
1757 tp->lost_skb_hint = NULL;
1758}
1759
1760static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1761{
1762 tcp_clear_retrans_hints_partial(tp);
1763 tp->retransmit_skb_hint = NULL;
1764}
1765
1766#define tcp_md5_addr tcp_ao_addr
1767
1768/* - key database */
1769struct tcp_md5sig_key {
1770 struct hlist_node node;
1771 u8 keylen;
1772 u8 family; /* AF_INET or AF_INET6 */
1773 u8 prefixlen;
1774 u8 flags;
1775 union tcp_md5_addr addr;
1776 int l3index; /* set if key added with L3 scope */
1777 u8 key[TCP_MD5SIG_MAXKEYLEN];
1778 struct rcu_head rcu;
1779};
1780
1781/* - sock block */
1782struct tcp_md5sig_info {
1783 struct hlist_head head;
1784 struct rcu_head rcu;
1785};
1786
1787/* - pseudo header */
1788struct tcp4_pseudohdr {
1789 __be32 saddr;
1790 __be32 daddr;
1791 __u8 pad;
1792 __u8 protocol;
1793 __be16 len;
1794};
1795
1796struct tcp6_pseudohdr {
1797 struct in6_addr saddr;
1798 struct in6_addr daddr;
1799 __be32 len;
1800 __be32 protocol; /* including padding */
1801};
1802
1803union tcp_md5sum_block {
1804 struct tcp4_pseudohdr ip4;
1805#if IS_ENABLED(CONFIG_IPV6)
1806 struct tcp6_pseudohdr ip6;
1807#endif
1808};
1809
1810/*
1811 * struct tcp_sigpool - per-CPU pool of ahash_requests
1812 * @scratch: per-CPU temporary area, that can be used between
1813 * tcp_sigpool_start() and tcp_sigpool_end() to perform
1814 * crypto request
1815 * @req: pre-allocated ahash request
1816 */
1817struct tcp_sigpool {
1818 void *scratch;
1819 struct ahash_request *req;
1820};
1821
1822int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size);
1823void tcp_sigpool_get(unsigned int id);
1824void tcp_sigpool_release(unsigned int id);
1825int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
1826 const struct sk_buff *skb,
1827 unsigned int header_len);
1828
1829/**
1830 * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash
1831 * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
1832 * @c: returned tcp_sigpool for usage (uninitialized on failure)
1833 *
1834 * Returns 0 on success, error otherwise.
1835 */
1836int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
1837/**
1838 * tcp_sigpool_end - enable bh and stop using tcp_sigpool
1839 * @c: tcp_sigpool context that was returned by tcp_sigpool_start()
1840 */
1841void tcp_sigpool_end(struct tcp_sigpool *c);
1842size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
1843/* - functions */
1844int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1845 const struct sock *sk, const struct sk_buff *skb);
1846int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1847 int family, u8 prefixlen, int l3index, u8 flags,
1848 const u8 *newkey, u8 newkeylen);
1849int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1850 int family, u8 prefixlen, int l3index,
1851 struct tcp_md5sig_key *key);
1852
1853int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1854 int family, u8 prefixlen, int l3index, u8 flags);
1855void tcp_clear_md5_list(struct sock *sk);
1856struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1857 const struct sock *addr_sk);
1858
1859#ifdef CONFIG_TCP_MD5SIG
1860struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1861 const union tcp_md5_addr *addr,
1862 int family, bool any_l3index);
1863static inline struct tcp_md5sig_key *
1864tcp_md5_do_lookup(const struct sock *sk, int l3index,
1865 const union tcp_md5_addr *addr, int family)
1866{
1867 if (!static_branch_unlikely(&tcp_md5_needed.key))
1868 return NULL;
1869 return __tcp_md5_do_lookup(sk, l3index, addr, family, false);
1870}
1871
1872static inline struct tcp_md5sig_key *
1873tcp_md5_do_lookup_any_l3index(const struct sock *sk,
1874 const union tcp_md5_addr *addr, int family)
1875{
1876 if (!static_branch_unlikely(&tcp_md5_needed.key))
1877 return NULL;
1878 return __tcp_md5_do_lookup(sk, 0, addr, family, true);
1879}
1880
1881#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1882#else
1883static inline struct tcp_md5sig_key *
1884tcp_md5_do_lookup(const struct sock *sk, int l3index,
1885 const union tcp_md5_addr *addr, int family)
1886{
1887 return NULL;
1888}
1889
1890static inline struct tcp_md5sig_key *
1891tcp_md5_do_lookup_any_l3index(const struct sock *sk,
1892 const union tcp_md5_addr *addr, int family)
1893{
1894 return NULL;
1895}
1896
1897#define tcp_twsk_md5_key(twsk) NULL
1898#endif
1899
1900int tcp_md5_alloc_sigpool(void);
1901void tcp_md5_release_sigpool(void);
1902void tcp_md5_add_sigpool(void);
1903extern int tcp_md5_sigpool_id;
1904
1905int tcp_md5_hash_key(struct tcp_sigpool *hp,
1906 const struct tcp_md5sig_key *key);
1907
1908/* From tcp_fastopen.c */
1909void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1910 struct tcp_fastopen_cookie *cookie);
1911void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1912 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1913 u16 try_exp);
1914struct tcp_fastopen_request {
1915 /* Fast Open cookie. Size 0 means a cookie request */
1916 struct tcp_fastopen_cookie cookie;
1917 struct msghdr *data; /* data in MSG_FASTOPEN */
1918 size_t size;
1919 int copied; /* queued in tcp_connect() */
1920 struct ubuf_info *uarg;
1921};
1922void tcp_free_fastopen_req(struct tcp_sock *tp);
1923void tcp_fastopen_destroy_cipher(struct sock *sk);
1924void tcp_fastopen_ctx_destroy(struct net *net);
1925int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1926 void *primary_key, void *backup_key);
1927int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1928 u64 *key);
1929void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1930struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1931 struct request_sock *req,
1932 struct tcp_fastopen_cookie *foc,
1933 const struct dst_entry *dst);
1934void tcp_fastopen_init_key_once(struct net *net);
1935bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1936 struct tcp_fastopen_cookie *cookie);
1937bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1938#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1939#define TCP_FASTOPEN_KEY_MAX 2
1940#define TCP_FASTOPEN_KEY_BUF_LENGTH \
1941 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1942
1943/* Fastopen key context */
1944struct tcp_fastopen_context {
1945 siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
1946 int num;
1947 struct rcu_head rcu;
1948};
1949
1950void tcp_fastopen_active_disable(struct sock *sk);
1951bool tcp_fastopen_active_should_disable(struct sock *sk);
1952void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1953void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1954
1955/* Caller needs to wrap with rcu_read_(un)lock() */
1956static inline
1957struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1958{
1959 struct tcp_fastopen_context *ctx;
1960
1961 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1962 if (!ctx)
1963 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1964 return ctx;
1965}
1966
1967static inline
1968bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1969 const struct tcp_fastopen_cookie *orig)
1970{
1971 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1972 orig->len == foc->len &&
1973 !memcmp(orig->val, foc->val, foc->len))
1974 return true;
1975 return false;
1976}
1977
1978static inline
1979int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1980{
1981 return ctx->num;
1982}
1983
1984/* Latencies incurred by various limits for a sender. They are
1985 * chronograph-like stats that are mutually exclusive.
1986 */
1987enum tcp_chrono {
1988 TCP_CHRONO_UNSPEC,
1989 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1990 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1991 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1992 __TCP_CHRONO_MAX,
1993};
1994
1995void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1996void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1997
1998/* This helper is needed, because skb->tcp_tsorted_anchor uses
1999 * the same memory storage than skb->destructor/_skb_refdst
2000 */
2001static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
2002{
2003 skb->destructor = NULL;
2004 skb->_skb_refdst = 0UL;
2005}
2006
2007#define tcp_skb_tsorted_save(skb) { \
2008 unsigned long _save = skb->_skb_refdst; \
2009 skb->_skb_refdst = 0UL;
2010
2011#define tcp_skb_tsorted_restore(skb) \
2012 skb->_skb_refdst = _save; \
2013}
2014
2015void tcp_write_queue_purge(struct sock *sk);
2016
2017static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
2018{
2019 return skb_rb_first(&sk->tcp_rtx_queue);
2020}
2021
2022static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
2023{
2024 return skb_rb_last(&sk->tcp_rtx_queue);
2025}
2026
2027static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
2028{
2029 return skb_peek_tail(&sk->sk_write_queue);
2030}
2031
2032#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
2033 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
2034
2035static inline struct sk_buff *tcp_send_head(const struct sock *sk)
2036{
2037 return skb_peek(&sk->sk_write_queue);
2038}
2039
2040static inline bool tcp_skb_is_last(const struct sock *sk,
2041 const struct sk_buff *skb)
2042{
2043 return skb_queue_is_last(&sk->sk_write_queue, skb);
2044}
2045
2046/**
2047 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
2048 * @sk: socket
2049 *
2050 * Since the write queue can have a temporary empty skb in it,
2051 * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
2052 */
2053static inline bool tcp_write_queue_empty(const struct sock *sk)
2054{
2055 const struct tcp_sock *tp = tcp_sk(sk);
2056
2057 return tp->write_seq == tp->snd_nxt;
2058}
2059
2060static inline bool tcp_rtx_queue_empty(const struct sock *sk)
2061{
2062 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
2063}
2064
2065static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
2066{
2067 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
2068}
2069
2070static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
2071{
2072 __skb_queue_tail(&sk->sk_write_queue, skb);
2073
2074 /* Queue it, remembering where we must start sending. */
2075 if (sk->sk_write_queue.next == skb)
2076 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
2077}
2078
2079/* Insert new before skb on the write queue of sk. */
2080static inline void tcp_insert_write_queue_before(struct sk_buff *new,
2081 struct sk_buff *skb,
2082 struct sock *sk)
2083{
2084 __skb_queue_before(&sk->sk_write_queue, skb, new);
2085}
2086
2087static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
2088{
2089 tcp_skb_tsorted_anchor_cleanup(skb);
2090 __skb_unlink(skb, &sk->sk_write_queue);
2091}
2092
2093void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
2094
2095static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
2096{
2097 tcp_skb_tsorted_anchor_cleanup(skb);
2098 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
2099}
2100
2101static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
2102{
2103 list_del(&skb->tcp_tsorted_anchor);
2104 tcp_rtx_queue_unlink(skb, sk);
2105 tcp_wmem_free_skb(sk, skb);
2106}
2107
2108static inline void tcp_write_collapse_fence(struct sock *sk)
2109{
2110 struct sk_buff *skb = tcp_write_queue_tail(sk);
2111
2112 if (skb)
2113 TCP_SKB_CB(skb)->eor = 1;
2114}
2115
2116static inline void tcp_push_pending_frames(struct sock *sk)
2117{
2118 if (tcp_send_head(sk)) {
2119 struct tcp_sock *tp = tcp_sk(sk);
2120
2121 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
2122 }
2123}
2124
2125/* Start sequence of the skb just after the highest skb with SACKed
2126 * bit, valid only if sacked_out > 0 or when the caller has ensured
2127 * validity by itself.
2128 */
2129static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
2130{
2131 if (!tp->sacked_out)
2132 return tp->snd_una;
2133
2134 if (tp->highest_sack == NULL)
2135 return tp->snd_nxt;
2136
2137 return TCP_SKB_CB(tp->highest_sack)->seq;
2138}
2139
2140static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
2141{
2142 tcp_sk(sk)->highest_sack = skb_rb_next(skb);
2143}
2144
2145static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
2146{
2147 return tcp_sk(sk)->highest_sack;
2148}
2149
2150static inline void tcp_highest_sack_reset(struct sock *sk)
2151{
2152 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
2153}
2154
2155/* Called when old skb is about to be deleted and replaced by new skb */
2156static inline void tcp_highest_sack_replace(struct sock *sk,
2157 struct sk_buff *old,
2158 struct sk_buff *new)
2159{
2160 if (old == tcp_highest_sack(sk))
2161 tcp_sk(sk)->highest_sack = new;
2162}
2163
2164/* This helper checks if socket has IP_TRANSPARENT set */
2165static inline bool inet_sk_transparent(const struct sock *sk)
2166{
2167 switch (sk->sk_state) {
2168 case TCP_TIME_WAIT:
2169 return inet_twsk(sk)->tw_transparent;
2170 case TCP_NEW_SYN_RECV:
2171 return inet_rsk(inet_reqsk(sk))->no_srccheck;
2172 }
2173 return inet_test_bit(TRANSPARENT, sk);
2174}
2175
2176/* Determines whether this is a thin stream (which may suffer from
2177 * increased latency). Used to trigger latency-reducing mechanisms.
2178 */
2179static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2180{
2181 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2182}
2183
2184/* /proc */
2185enum tcp_seq_states {
2186 TCP_SEQ_STATE_LISTENING,
2187 TCP_SEQ_STATE_ESTABLISHED,
2188};
2189
2190void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2191void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2192void tcp_seq_stop(struct seq_file *seq, void *v);
2193
2194struct tcp_seq_afinfo {
2195 sa_family_t family;
2196};
2197
2198struct tcp_iter_state {
2199 struct seq_net_private p;
2200 enum tcp_seq_states state;
2201 struct sock *syn_wait_sk;
2202 int bucket, offset, sbucket, num;
2203 loff_t last_pos;
2204};
2205
2206extern struct request_sock_ops tcp_request_sock_ops;
2207extern struct request_sock_ops tcp6_request_sock_ops;
2208
2209void tcp_v4_destroy_sock(struct sock *sk);
2210
2211struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2212 netdev_features_t features);
2213struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb);
2214struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
2215struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
2216 struct tcphdr *th);
2217INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2218INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2219INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
2220INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
2221#ifdef CONFIG_INET
2222void tcp_gro_complete(struct sk_buff *skb);
2223#else
2224static inline void tcp_gro_complete(struct sk_buff *skb) { }
2225#endif
2226
2227void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
2228
2229static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2230{
2231 struct net *net = sock_net((struct sock *)tp);
2232 u32 val;
2233
2234 val = READ_ONCE(tp->notsent_lowat);
2235
2236 return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2237}
2238
2239bool tcp_stream_memory_free(const struct sock *sk, int wake);
2240
2241#ifdef CONFIG_PROC_FS
2242int tcp4_proc_init(void);
2243void tcp4_proc_exit(void);
2244#endif
2245
2246int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2247int tcp_conn_request(struct request_sock_ops *rsk_ops,
2248 const struct tcp_request_sock_ops *af_ops,
2249 struct sock *sk, struct sk_buff *skb);
2250
2251/* TCP af-specific functions */
2252struct tcp_sock_af_ops {
2253#ifdef CONFIG_TCP_MD5SIG
2254 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2255 const struct sock *addr_sk);
2256 int (*calc_md5_hash)(char *location,
2257 const struct tcp_md5sig_key *md5,
2258 const struct sock *sk,
2259 const struct sk_buff *skb);
2260 int (*md5_parse)(struct sock *sk,
2261 int optname,
2262 sockptr_t optval,
2263 int optlen);
2264#endif
2265#ifdef CONFIG_TCP_AO
2266 int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
2267 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2268 struct sock *addr_sk,
2269 int sndid, int rcvid);
2270 int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key,
2271 const struct sock *sk,
2272 __be32 sisn, __be32 disn, bool send);
2273 int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao,
2274 const struct sock *sk, const struct sk_buff *skb,
2275 const u8 *tkey, int hash_offset, u32 sne);
2276#endif
2277};
2278
2279struct tcp_request_sock_ops {
2280 u16 mss_clamp;
2281#ifdef CONFIG_TCP_MD5SIG
2282 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2283 const struct sock *addr_sk);
2284 int (*calc_md5_hash) (char *location,
2285 const struct tcp_md5sig_key *md5,
2286 const struct sock *sk,
2287 const struct sk_buff *skb);
2288#endif
2289#ifdef CONFIG_TCP_AO
2290 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2291 struct request_sock *req,
2292 int sndid, int rcvid);
2293 int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
2294 int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt,
2295 struct request_sock *req, const struct sk_buff *skb,
2296 int hash_offset, u32 sne);
2297#endif
2298#ifdef CONFIG_SYN_COOKIES
2299 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
2300 __u16 *mss);
2301#endif
2302 struct dst_entry *(*route_req)(const struct sock *sk,
2303 struct sk_buff *skb,
2304 struct flowi *fl,
2305 struct request_sock *req,
2306 u32 tw_isn);
2307 u32 (*init_seq)(const struct sk_buff *skb);
2308 u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2309 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2310 struct flowi *fl, struct request_sock *req,
2311 struct tcp_fastopen_cookie *foc,
2312 enum tcp_synack_type synack_type,
2313 struct sk_buff *syn_skb);
2314};
2315
2316extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2317#if IS_ENABLED(CONFIG_IPV6)
2318extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2319#endif
2320
2321#ifdef CONFIG_SYN_COOKIES
2322static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2323 const struct sock *sk, struct sk_buff *skb,
2324 __u16 *mss)
2325{
2326 tcp_synq_overflow(sk);
2327 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2328 return ops->cookie_init_seq(skb, mss);
2329}
2330#else
2331static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2332 const struct sock *sk, struct sk_buff *skb,
2333 __u16 *mss)
2334{
2335 return 0;
2336}
2337#endif
2338
2339struct tcp_key {
2340 union {
2341 struct {
2342 struct tcp_ao_key *ao_key;
2343 char *traffic_key;
2344 u32 sne;
2345 u8 rcv_next;
2346 };
2347 struct tcp_md5sig_key *md5_key;
2348 };
2349 enum {
2350 TCP_KEY_NONE = 0,
2351 TCP_KEY_MD5,
2352 TCP_KEY_AO,
2353 } type;
2354};
2355
2356static inline void tcp_get_current_key(const struct sock *sk,
2357 struct tcp_key *out)
2358{
2359#if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
2360 const struct tcp_sock *tp = tcp_sk(sk);
2361#endif
2362
2363#ifdef CONFIG_TCP_AO
2364 if (static_branch_unlikely(&tcp_ao_needed.key)) {
2365 struct tcp_ao_info *ao;
2366
2367 ao = rcu_dereference_protected(tp->ao_info,
2368 lockdep_sock_is_held(sk));
2369 if (ao) {
2370 out->ao_key = READ_ONCE(ao->current_key);
2371 out->type = TCP_KEY_AO;
2372 return;
2373 }
2374 }
2375#endif
2376#ifdef CONFIG_TCP_MD5SIG
2377 if (static_branch_unlikely(&tcp_md5_needed.key) &&
2378 rcu_access_pointer(tp->md5sig_info)) {
2379 out->md5_key = tp->af_specific->md5_lookup(sk, sk);
2380 if (out->md5_key) {
2381 out->type = TCP_KEY_MD5;
2382 return;
2383 }
2384 }
2385#endif
2386 out->type = TCP_KEY_NONE;
2387}
2388
2389static inline bool tcp_key_is_md5(const struct tcp_key *key)
2390{
2391 if (static_branch_tcp_md5())
2392 return key->type == TCP_KEY_MD5;
2393 return false;
2394}
2395
2396static inline bool tcp_key_is_ao(const struct tcp_key *key)
2397{
2398 if (static_branch_tcp_ao())
2399 return key->type == TCP_KEY_AO;
2400 return false;
2401}
2402
2403int tcpv4_offload_init(void);
2404
2405void tcp_v4_init(void);
2406void tcp_init(void);
2407
2408/* tcp_recovery.c */
2409void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2410void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2411extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2412 u32 reo_wnd);
2413extern bool tcp_rack_mark_lost(struct sock *sk);
2414extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2415 u64 xmit_time);
2416extern void tcp_rack_reo_timeout(struct sock *sk);
2417extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2418
2419/* tcp_plb.c */
2420
2421/*
2422 * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2423 * expects cong_ratio which represents fraction of traffic that experienced
2424 * congestion over a single RTT. In order to avoid floating point operations,
2425 * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2426 */
2427#define TCP_PLB_SCALE 8
2428
2429/* State for PLB (Protective Load Balancing) for a single TCP connection. */
2430struct tcp_plb_state {
2431 u8 consec_cong_rounds:5, /* consecutive congested rounds */
2432 unused:3;
2433 u32 pause_until; /* jiffies32 when PLB can resume rerouting */
2434};
2435
2436static inline void tcp_plb_init(const struct sock *sk,
2437 struct tcp_plb_state *plb)
2438{
2439 plb->consec_cong_rounds = 0;
2440 plb->pause_until = 0;
2441}
2442void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2443 const int cong_ratio);
2444void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2445void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2446
2447static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str)
2448{
2449 WARN_ONCE(cond,
2450 "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n",
2451 str,
2452 tcp_snd_cwnd(tcp_sk(sk)),
2453 tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
2454 tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
2455 tcp_sk(sk)->tlp_high_seq, sk->sk_state,
2456 inet_csk(sk)->icsk_ca_state,
2457 tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
2458 inet_csk(sk)->icsk_pmtu_cookie);
2459}
2460
2461/* At how many usecs into the future should the RTO fire? */
2462static inline s64 tcp_rto_delta_us(const struct sock *sk)
2463{
2464 const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2465 u32 rto = inet_csk(sk)->icsk_rto;
2466
2467 if (likely(skb)) {
2468 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2469
2470 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2471 } else {
2472 tcp_warn_once(sk, 1, "rtx queue empty: ");
2473 return jiffies_to_usecs(rto);
2474 }
2475
2476}
2477
2478/*
2479 * Save and compile IPv4 options, return a pointer to it
2480 */
2481static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2482 struct sk_buff *skb)
2483{
2484 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2485 struct ip_options_rcu *dopt = NULL;
2486
2487 if (opt->optlen) {
2488 int opt_size = sizeof(*dopt) + opt->optlen;
2489
2490 dopt = kmalloc(opt_size, GFP_ATOMIC);
2491 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2492 kfree(dopt);
2493 dopt = NULL;
2494 }
2495 }
2496 return dopt;
2497}
2498
2499/* locally generated TCP pure ACKs have skb->truesize == 2
2500 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2501 * This is much faster than dissecting the packet to find out.
2502 * (Think of GRE encapsulations, IPv4, IPv6, ...)
2503 */
2504static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2505{
2506 return skb->truesize == 2;
2507}
2508
2509static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2510{
2511 skb->truesize = 2;
2512}
2513
2514static inline int tcp_inq(struct sock *sk)
2515{
2516 struct tcp_sock *tp = tcp_sk(sk);
2517 int answ;
2518
2519 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2520 answ = 0;
2521 } else if (sock_flag(sk, SOCK_URGINLINE) ||
2522 !tp->urg_data ||
2523 before(tp->urg_seq, tp->copied_seq) ||
2524 !before(tp->urg_seq, tp->rcv_nxt)) {
2525
2526 answ = tp->rcv_nxt - tp->copied_seq;
2527
2528 /* Subtract 1, if FIN was received */
2529 if (answ && sock_flag(sk, SOCK_DONE))
2530 answ--;
2531 } else {
2532 answ = tp->urg_seq - tp->copied_seq;
2533 }
2534
2535 return answ;
2536}
2537
2538int tcp_peek_len(struct socket *sock);
2539
2540static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2541{
2542 u16 segs_in;
2543
2544 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2545
2546 /* We update these fields while other threads might
2547 * read them from tcp_get_info()
2548 */
2549 WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2550 if (skb->len > tcp_hdrlen(skb))
2551 WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2552}
2553
2554/*
2555 * TCP listen path runs lockless.
2556 * We forced "struct sock" to be const qualified to make sure
2557 * we don't modify one of its field by mistake.
2558 * Here, we increment sk_drops which is an atomic_t, so we can safely
2559 * make sock writable again.
2560 */
2561static inline void tcp_listendrop(const struct sock *sk)
2562{
2563 atomic_inc(&((struct sock *)sk)->sk_drops);
2564 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2565}
2566
2567enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2568
2569/*
2570 * Interface for adding Upper Level Protocols over TCP
2571 */
2572
2573#define TCP_ULP_NAME_MAX 16
2574#define TCP_ULP_MAX 128
2575#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2576
2577struct tcp_ulp_ops {
2578 struct list_head list;
2579
2580 /* initialize ulp */
2581 int (*init)(struct sock *sk);
2582 /* update ulp */
2583 void (*update)(struct sock *sk, struct proto *p,
2584 void (*write_space)(struct sock *sk));
2585 /* cleanup ulp */
2586 void (*release)(struct sock *sk);
2587 /* diagnostic */
2588 int (*get_info)(struct sock *sk, struct sk_buff *skb);
2589 size_t (*get_info_size)(const struct sock *sk);
2590 /* clone ulp */
2591 void (*clone)(const struct request_sock *req, struct sock *newsk,
2592 const gfp_t priority);
2593
2594 char name[TCP_ULP_NAME_MAX];
2595 struct module *owner;
2596};
2597int tcp_register_ulp(struct tcp_ulp_ops *type);
2598void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2599int tcp_set_ulp(struct sock *sk, const char *name);
2600void tcp_get_available_ulp(char *buf, size_t len);
2601void tcp_cleanup_ulp(struct sock *sk);
2602void tcp_update_ulp(struct sock *sk, struct proto *p,
2603 void (*write_space)(struct sock *sk));
2604
2605#define MODULE_ALIAS_TCP_ULP(name) \
2606 __MODULE_INFO(alias, alias_userspace, name); \
2607 __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2608
2609#ifdef CONFIG_NET_SOCK_MSG
2610struct sk_msg;
2611struct sk_psock;
2612
2613#ifdef CONFIG_BPF_SYSCALL
2614int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2615void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2616#endif /* CONFIG_BPF_SYSCALL */
2617
2618#ifdef CONFIG_INET
2619void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2620#else
2621static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2622{
2623}
2624#endif
2625
2626int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2627 struct sk_msg *msg, u32 bytes, int flags);
2628#endif /* CONFIG_NET_SOCK_MSG */
2629
2630#if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2631static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2632{
2633}
2634#endif
2635
2636#ifdef CONFIG_CGROUP_BPF
2637static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2638 struct sk_buff *skb,
2639 unsigned int end_offset)
2640{
2641 skops->skb = skb;
2642 skops->skb_data_end = skb->data + end_offset;
2643}
2644#else
2645static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2646 struct sk_buff *skb,
2647 unsigned int end_offset)
2648{
2649}
2650#endif
2651
2652/* Call BPF_SOCK_OPS program that returns an int. If the return value
2653 * is < 0, then the BPF op failed (for example if the loaded BPF
2654 * program does not support the chosen operation or there is no BPF
2655 * program loaded).
2656 */
2657#ifdef CONFIG_BPF
2658static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2659{
2660 struct bpf_sock_ops_kern sock_ops;
2661 int ret;
2662
2663 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2664 if (sk_fullsock(sk)) {
2665 sock_ops.is_fullsock = 1;
2666 sock_owned_by_me(sk);
2667 }
2668
2669 sock_ops.sk = sk;
2670 sock_ops.op = op;
2671 if (nargs > 0)
2672 memcpy(sock_ops.args, args, nargs * sizeof(*args));
2673
2674 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2675 if (ret == 0)
2676 ret = sock_ops.reply;
2677 else
2678 ret = -1;
2679 return ret;
2680}
2681
2682static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2683{
2684 u32 args[2] = {arg1, arg2};
2685
2686 return tcp_call_bpf(sk, op, 2, args);
2687}
2688
2689static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2690 u32 arg3)
2691{
2692 u32 args[3] = {arg1, arg2, arg3};
2693
2694 return tcp_call_bpf(sk, op, 3, args);
2695}
2696
2697#else
2698static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2699{
2700 return -EPERM;
2701}
2702
2703static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2704{
2705 return -EPERM;
2706}
2707
2708static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2709 u32 arg3)
2710{
2711 return -EPERM;
2712}
2713
2714#endif
2715
2716static inline u32 tcp_timeout_init(struct sock *sk)
2717{
2718 int timeout;
2719
2720 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2721
2722 if (timeout <= 0)
2723 timeout = TCP_TIMEOUT_INIT;
2724 return min_t(int, timeout, TCP_RTO_MAX);
2725}
2726
2727static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2728{
2729 int rwnd;
2730
2731 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2732
2733 if (rwnd < 0)
2734 rwnd = 0;
2735 return rwnd;
2736}
2737
2738static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2739{
2740 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2741}
2742
2743static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt)
2744{
2745 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2746 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt);
2747}
2748
2749#if IS_ENABLED(CONFIG_SMC)
2750extern struct static_key_false tcp_have_smc;
2751#endif
2752
2753#if IS_ENABLED(CONFIG_TLS_DEVICE)
2754void clean_acked_data_enable(struct inet_connection_sock *icsk,
2755 void (*cad)(struct sock *sk, u32 ack_seq));
2756void clean_acked_data_disable(struct inet_connection_sock *icsk);
2757void clean_acked_data_flush(void);
2758#endif
2759
2760DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2761static inline void tcp_add_tx_delay(struct sk_buff *skb,
2762 const struct tcp_sock *tp)
2763{
2764 if (static_branch_unlikely(&tcp_tx_delay_enabled))
2765 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2766}
2767
2768/* Compute Earliest Departure Time for some control packets
2769 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2770 */
2771static inline u64 tcp_transmit_time(const struct sock *sk)
2772{
2773 if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2774 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2775 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2776
2777 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2778 }
2779 return 0;
2780}
2781
2782static inline int tcp_parse_auth_options(const struct tcphdr *th,
2783 const u8 **md5_hash, const struct tcp_ao_hdr **aoh)
2784{
2785 const u8 *md5_tmp, *ao_tmp;
2786 int ret;
2787
2788 ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp);
2789 if (ret)
2790 return ret;
2791
2792 if (md5_hash)
2793 *md5_hash = md5_tmp;
2794
2795 if (aoh) {
2796 if (!ao_tmp)
2797 *aoh = NULL;
2798 else
2799 *aoh = (struct tcp_ao_hdr *)(ao_tmp - 2);
2800 }
2801
2802 return 0;
2803}
2804
2805static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
2806 int family, int l3index, bool stat_inc)
2807{
2808#ifdef CONFIG_TCP_AO
2809 struct tcp_ao_info *ao_info;
2810 struct tcp_ao_key *ao_key;
2811
2812 if (!static_branch_unlikely(&tcp_ao_needed.key))
2813 return false;
2814
2815 ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info,
2816 lockdep_sock_is_held(sk));
2817 if (!ao_info)
2818 return false;
2819
2820 ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1);
2821 if (ao_info->ao_required || ao_key) {
2822 if (stat_inc) {
2823 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED);
2824 atomic64_inc(&ao_info->counters.ao_required);
2825 }
2826 return true;
2827 }
2828#endif
2829 return false;
2830}
2831
2832enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
2833 const struct request_sock *req, const struct sk_buff *skb,
2834 const void *saddr, const void *daddr,
2835 int family, int dif, int sdif);
2836
2837#endif /* _TCP_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the TCP module.
8 *
9 * Version: @(#)tcp.h 1.0.5 05/23/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 */
14#ifndef _TCP_H
15#define _TCP_H
16
17#define FASTRETRANS_DEBUG 1
18
19#include <linux/list.h>
20#include <linux/tcp.h>
21#include <linux/bug.h>
22#include <linux/slab.h>
23#include <linux/cache.h>
24#include <linux/percpu.h>
25#include <linux/skbuff.h>
26#include <linux/kref.h>
27#include <linux/ktime.h>
28#include <linux/indirect_call_wrapper.h>
29
30#include <net/inet_connection_sock.h>
31#include <net/inet_timewait_sock.h>
32#include <net/inet_hashtables.h>
33#include <net/checksum.h>
34#include <net/request_sock.h>
35#include <net/sock_reuseport.h>
36#include <net/sock.h>
37#include <net/snmp.h>
38#include <net/ip.h>
39#include <net/tcp_states.h>
40#include <net/tcp_ao.h>
41#include <net/inet_ecn.h>
42#include <net/dst.h>
43#include <net/mptcp.h>
44
45#include <linux/seq_file.h>
46#include <linux/memcontrol.h>
47#include <linux/bpf-cgroup.h>
48#include <linux/siphash.h>
49
50extern struct inet_hashinfo tcp_hashinfo;
51
52DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
53int tcp_orphan_count_sum(void);
54
55void tcp_time_wait(struct sock *sk, int state, int timeo);
56
57#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
58#define MAX_TCP_OPTION_SPACE 40
59#define TCP_MIN_SND_MSS 48
60#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
61
62/*
63 * Never offer a window over 32767 without using window scaling. Some
64 * poor stacks do signed 16bit maths!
65 */
66#define MAX_TCP_WINDOW 32767U
67
68/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
69#define TCP_MIN_MSS 88U
70
71/* The initial MTU to use for probing */
72#define TCP_BASE_MSS 1024
73
74/* probing interval, default to 10 minutes as per RFC4821 */
75#define TCP_PROBE_INTERVAL 600
76
77/* Specify interval when tcp mtu probing will stop */
78#define TCP_PROBE_THRESHOLD 8
79
80/* After receiving this amount of duplicate ACKs fast retransmit starts. */
81#define TCP_FASTRETRANS_THRESH 3
82
83/* Maximal number of ACKs sent quickly to accelerate slow-start. */
84#define TCP_MAX_QUICKACKS 16U
85
86/* Maximal number of window scale according to RFC1323 */
87#define TCP_MAX_WSCALE 14U
88
89/* urg_data states */
90#define TCP_URG_VALID 0x0100
91#define TCP_URG_NOTYET 0x0200
92#define TCP_URG_READ 0x0400
93
94#define TCP_RETR1 3 /*
95 * This is how many retries it does before it
96 * tries to figure out if the gateway is
97 * down. Minimal RFC value is 3; it corresponds
98 * to ~3sec-8min depending on RTO.
99 */
100
101#define TCP_RETR2 15 /*
102 * This should take at least
103 * 90 minutes to time out.
104 * RFC1122 says that the limit is 100 sec.
105 * 15 is ~13-30min depending on RTO.
106 */
107
108#define TCP_SYN_RETRIES 6 /* This is how many retries are done
109 * when active opening a connection.
110 * RFC1122 says the minimum retry MUST
111 * be at least 180secs. Nevertheless
112 * this value is corresponding to
113 * 63secs of retransmission with the
114 * current initial RTO.
115 */
116
117#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
118 * when passive opening a connection.
119 * This is corresponding to 31secs of
120 * retransmission with the current
121 * initial RTO.
122 */
123
124#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
125 * state, about 60 seconds */
126#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
127 /* BSD style FIN_WAIT2 deadlock breaker.
128 * It used to be 3min, new value is 60sec,
129 * to combine FIN-WAIT-2 timeout with
130 * TIME-WAIT timer.
131 */
132#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
133
134#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
135static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
136
137#if HZ >= 100
138#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
139#define TCP_ATO_MIN ((unsigned)(HZ/25))
140#else
141#define TCP_DELACK_MIN 4U
142#define TCP_ATO_MIN 4U
143#endif
144#define TCP_RTO_MAX ((unsigned)(120*HZ))
145#define TCP_RTO_MIN ((unsigned)(HZ/5))
146#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
147
148#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
149
150#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
151#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
152 * used as a fallback RTO for the
153 * initial data transmission if no
154 * valid RTT sample has been acquired,
155 * most likely due to retrans in 3WHS.
156 */
157
158#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
159 * for local resources.
160 */
161#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
162#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
163#define TCP_KEEPALIVE_INTVL (75*HZ)
164
165#define MAX_TCP_KEEPIDLE 32767
166#define MAX_TCP_KEEPINTVL 32767
167#define MAX_TCP_KEEPCNT 127
168#define MAX_TCP_SYNCNT 127
169
170/* Ensure that TCP PAWS checks are relaxed after ~2147 seconds
171 * to avoid overflows. This assumes a clock smaller than 1 Mhz.
172 * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz.
173 */
174#define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
175
176#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
177 * after this time. It should be equal
178 * (or greater than) TCP_TIMEWAIT_LEN
179 * to provide reliability equal to one
180 * provided by timewait state.
181 */
182#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
183 * timestamps. It must be less than
184 * minimal timewait lifetime.
185 */
186/*
187 * TCP option
188 */
189
190#define TCPOPT_NOP 1 /* Padding */
191#define TCPOPT_EOL 0 /* End of options */
192#define TCPOPT_MSS 2 /* Segment size negotiating */
193#define TCPOPT_WINDOW 3 /* Window scaling */
194#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
195#define TCPOPT_SACK 5 /* SACK Block */
196#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
197#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
198#define TCPOPT_AO 29 /* Authentication Option (RFC5925) */
199#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
200#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
201#define TCPOPT_EXP 254 /* Experimental */
202/* Magic number to be after the option value for sharing TCP
203 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
204 */
205#define TCPOPT_FASTOPEN_MAGIC 0xF989
206#define TCPOPT_SMC_MAGIC 0xE2D4C3D9
207
208/*
209 * TCP option lengths
210 */
211
212#define TCPOLEN_MSS 4
213#define TCPOLEN_WINDOW 3
214#define TCPOLEN_SACK_PERM 2
215#define TCPOLEN_TIMESTAMP 10
216#define TCPOLEN_MD5SIG 18
217#define TCPOLEN_FASTOPEN_BASE 2
218#define TCPOLEN_EXP_FASTOPEN_BASE 4
219#define TCPOLEN_EXP_SMC_BASE 6
220
221/* But this is what stacks really send out. */
222#define TCPOLEN_TSTAMP_ALIGNED 12
223#define TCPOLEN_WSCALE_ALIGNED 4
224#define TCPOLEN_SACKPERM_ALIGNED 4
225#define TCPOLEN_SACK_BASE 2
226#define TCPOLEN_SACK_BASE_ALIGNED 4
227#define TCPOLEN_SACK_PERBLOCK 8
228#define TCPOLEN_MD5SIG_ALIGNED 20
229#define TCPOLEN_MSS_ALIGNED 4
230#define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
231
232/* Flags in tp->nonagle */
233#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
234#define TCP_NAGLE_CORK 2 /* Socket is corked */
235#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
236
237/* TCP thin-stream limits */
238#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
239
240/* TCP initial congestion window as per rfc6928 */
241#define TCP_INIT_CWND 10
242
243/* Bit Flags for sysctl_tcp_fastopen */
244#define TFO_CLIENT_ENABLE 1
245#define TFO_SERVER_ENABLE 2
246#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
247
248/* Accept SYN data w/o any cookie option */
249#define TFO_SERVER_COOKIE_NOT_REQD 0x200
250
251/* Force enable TFO on all listeners, i.e., not requiring the
252 * TCP_FASTOPEN socket option.
253 */
254#define TFO_SERVER_WO_SOCKOPT1 0x400
255
256
257/* sysctl variables for tcp */
258extern int sysctl_tcp_max_orphans;
259extern long sysctl_tcp_mem[3];
260
261#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
262#define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
263#define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
264
265extern atomic_long_t tcp_memory_allocated;
266DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
267
268extern struct percpu_counter tcp_sockets_allocated;
269extern unsigned long tcp_memory_pressure;
270
271/* optimized version of sk_under_memory_pressure() for TCP sockets */
272static inline bool tcp_under_memory_pressure(const struct sock *sk)
273{
274 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
275 mem_cgroup_under_socket_pressure(sk->sk_memcg))
276 return true;
277
278 return READ_ONCE(tcp_memory_pressure);
279}
280/*
281 * The next routines deal with comparing 32 bit unsigned ints
282 * and worry about wraparound (automatic with unsigned arithmetic).
283 */
284
285static inline bool before(__u32 seq1, __u32 seq2)
286{
287 return (__s32)(seq1-seq2) < 0;
288}
289#define after(seq2, seq1) before(seq1, seq2)
290
291/* is s2<=s1<=s3 ? */
292static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
293{
294 return seq3 - seq2 >= seq1 - seq2;
295}
296
297static inline bool tcp_out_of_memory(struct sock *sk)
298{
299 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
300 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
301 return true;
302 return false;
303}
304
305static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
306{
307 sk_wmem_queued_add(sk, -skb->truesize);
308 if (!skb_zcopy_pure(skb))
309 sk_mem_uncharge(sk, skb->truesize);
310 else
311 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
312 __kfree_skb(skb);
313}
314
315void sk_forced_mem_schedule(struct sock *sk, int size);
316
317bool tcp_check_oom(struct sock *sk, int shift);
318
319
320extern struct proto tcp_prot;
321
322#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
323#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
324#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
325#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
326
327void tcp_tasklet_init(void);
328
329int tcp_v4_err(struct sk_buff *skb, u32);
330
331void tcp_shutdown(struct sock *sk, int how);
332
333int tcp_v4_early_demux(struct sk_buff *skb);
334int tcp_v4_rcv(struct sk_buff *skb);
335
336void tcp_remove_empty_skb(struct sock *sk);
337int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
338int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
339int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
340 size_t size, struct ubuf_info *uarg);
341void tcp_splice_eof(struct socket *sock);
342int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
343int tcp_wmem_schedule(struct sock *sk, int copy);
344void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
345 int size_goal);
346void tcp_release_cb(struct sock *sk);
347void tcp_wfree(struct sk_buff *skb);
348void tcp_write_timer_handler(struct sock *sk);
349void tcp_delack_timer_handler(struct sock *sk);
350int tcp_ioctl(struct sock *sk, int cmd, int *karg);
351enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
352void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
353void tcp_rcv_space_adjust(struct sock *sk);
354int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
355void tcp_twsk_destructor(struct sock *sk);
356void tcp_twsk_purge(struct list_head *net_exit_list, int family);
357ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
358 struct pipe_inode_info *pipe, size_t len,
359 unsigned int flags);
360struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
361 bool force_schedule);
362
363static inline void tcp_dec_quickack_mode(struct sock *sk)
364{
365 struct inet_connection_sock *icsk = inet_csk(sk);
366
367 if (icsk->icsk_ack.quick) {
368 /* How many ACKs S/ACKing new data have we sent? */
369 const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
370
371 if (pkts >= icsk->icsk_ack.quick) {
372 icsk->icsk_ack.quick = 0;
373 /* Leaving quickack mode we deflate ATO. */
374 icsk->icsk_ack.ato = TCP_ATO_MIN;
375 } else
376 icsk->icsk_ack.quick -= pkts;
377 }
378}
379
380#define TCP_ECN_OK 1
381#define TCP_ECN_QUEUE_CWR 2
382#define TCP_ECN_DEMAND_CWR 4
383#define TCP_ECN_SEEN 8
384
385enum tcp_tw_status {
386 TCP_TW_SUCCESS = 0,
387 TCP_TW_RST = 1,
388 TCP_TW_ACK = 2,
389 TCP_TW_SYN = 3
390};
391
392
393enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
394 struct sk_buff *skb,
395 const struct tcphdr *th);
396struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
397 struct request_sock *req, bool fastopen,
398 bool *lost_race);
399enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
400 struct sk_buff *skb);
401void tcp_enter_loss(struct sock *sk);
402void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
403void tcp_clear_retrans(struct tcp_sock *tp);
404void tcp_update_metrics(struct sock *sk);
405void tcp_init_metrics(struct sock *sk);
406void tcp_metrics_init(void);
407bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
408void __tcp_close(struct sock *sk, long timeout);
409void tcp_close(struct sock *sk, long timeout);
410void tcp_init_sock(struct sock *sk);
411void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
412__poll_t tcp_poll(struct file *file, struct socket *sock,
413 struct poll_table_struct *wait);
414int do_tcp_getsockopt(struct sock *sk, int level,
415 int optname, sockptr_t optval, sockptr_t optlen);
416int tcp_getsockopt(struct sock *sk, int level, int optname,
417 char __user *optval, int __user *optlen);
418bool tcp_bpf_bypass_getsockopt(int level, int optname);
419int do_tcp_setsockopt(struct sock *sk, int level, int optname,
420 sockptr_t optval, unsigned int optlen);
421int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
422 unsigned int optlen);
423void tcp_set_keepalive(struct sock *sk, int val);
424void tcp_syn_ack_timeout(const struct request_sock *req);
425int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
426 int flags, int *addr_len);
427int tcp_set_rcvlowat(struct sock *sk, int val);
428int tcp_set_window_clamp(struct sock *sk, int val);
429void tcp_update_recv_tstamps(struct sk_buff *skb,
430 struct scm_timestamping_internal *tss);
431void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
432 struct scm_timestamping_internal *tss);
433void tcp_data_ready(struct sock *sk);
434#ifdef CONFIG_MMU
435int tcp_mmap(struct file *file, struct socket *sock,
436 struct vm_area_struct *vma);
437#endif
438void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
439 struct tcp_options_received *opt_rx,
440 int estab, struct tcp_fastopen_cookie *foc);
441
442/*
443 * BPF SKB-less helpers
444 */
445u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
446 struct tcphdr *th, u32 *cookie);
447u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
448 struct tcphdr *th, u32 *cookie);
449u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
450u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
451 const struct tcp_request_sock_ops *af_ops,
452 struct sock *sk, struct tcphdr *th);
453/*
454 * TCP v4 functions exported for the inet6 API
455 */
456
457void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
458void tcp_v4_mtu_reduced(struct sock *sk);
459void tcp_req_err(struct sock *sk, u32 seq, bool abort);
460void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
461int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
462struct sock *tcp_create_openreq_child(const struct sock *sk,
463 struct request_sock *req,
464 struct sk_buff *skb);
465void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
466struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
467 struct request_sock *req,
468 struct dst_entry *dst,
469 struct request_sock *req_unhash,
470 bool *own_req);
471int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
472int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
473int tcp_connect(struct sock *sk);
474enum tcp_synack_type {
475 TCP_SYNACK_NORMAL,
476 TCP_SYNACK_FASTOPEN,
477 TCP_SYNACK_COOKIE,
478};
479struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
480 struct request_sock *req,
481 struct tcp_fastopen_cookie *foc,
482 enum tcp_synack_type synack_type,
483 struct sk_buff *syn_skb);
484int tcp_disconnect(struct sock *sk, int flags);
485
486void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
487int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
488void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
489
490/* From syncookies.c */
491struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
492 struct request_sock *req,
493 struct dst_entry *dst);
494int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th);
495struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
496struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
497 struct sock *sk, struct sk_buff *skb,
498 struct tcp_options_received *tcp_opt,
499 int mss, u32 tsoff);
500
501#if IS_ENABLED(CONFIG_BPF)
502struct bpf_tcp_req_attrs {
503 u32 rcv_tsval;
504 u32 rcv_tsecr;
505 u16 mss;
506 u8 rcv_wscale;
507 u8 snd_wscale;
508 u8 ecn_ok;
509 u8 wscale_ok;
510 u8 sack_ok;
511 u8 tstamp_ok;
512 u8 usec_ts_ok;
513 u8 reserved[3];
514};
515#endif
516
517#ifdef CONFIG_SYN_COOKIES
518
519/* Syncookies use a monotonic timer which increments every 60 seconds.
520 * This counter is used both as a hash input and partially encoded into
521 * the cookie value. A cookie is only validated further if the delta
522 * between the current counter value and the encoded one is less than this,
523 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
524 * the counter advances immediately after a cookie is generated).
525 */
526#define MAX_SYNCOOKIE_AGE 2
527#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
528#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
529
530/* syncookies: remember time of last synqueue overflow
531 * But do not dirty this field too often (once per second is enough)
532 * It is racy as we do not hold a lock, but race is very minor.
533 */
534static inline void tcp_synq_overflow(const struct sock *sk)
535{
536 unsigned int last_overflow;
537 unsigned int now = jiffies;
538
539 if (sk->sk_reuseport) {
540 struct sock_reuseport *reuse;
541
542 reuse = rcu_dereference(sk->sk_reuseport_cb);
543 if (likely(reuse)) {
544 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
545 if (!time_between32(now, last_overflow,
546 last_overflow + HZ))
547 WRITE_ONCE(reuse->synq_overflow_ts, now);
548 return;
549 }
550 }
551
552 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
553 if (!time_between32(now, last_overflow, last_overflow + HZ))
554 WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
555}
556
557/* syncookies: no recent synqueue overflow on this listening socket? */
558static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
559{
560 unsigned int last_overflow;
561 unsigned int now = jiffies;
562
563 if (sk->sk_reuseport) {
564 struct sock_reuseport *reuse;
565
566 reuse = rcu_dereference(sk->sk_reuseport_cb);
567 if (likely(reuse)) {
568 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
569 return !time_between32(now, last_overflow - HZ,
570 last_overflow +
571 TCP_SYNCOOKIE_VALID);
572 }
573 }
574
575 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
576
577 /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
578 * then we're under synflood. However, we have to use
579 * 'last_overflow - HZ' as lower bound. That's because a concurrent
580 * tcp_synq_overflow() could update .ts_recent_stamp after we read
581 * jiffies but before we store .ts_recent_stamp into last_overflow,
582 * which could lead to rejecting a valid syncookie.
583 */
584 return !time_between32(now, last_overflow - HZ,
585 last_overflow + TCP_SYNCOOKIE_VALID);
586}
587
588static inline u32 tcp_cookie_time(void)
589{
590 u64 val = get_jiffies_64();
591
592 do_div(val, TCP_SYNCOOKIE_PERIOD);
593 return val;
594}
595
596/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
597static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
598{
599 if (usec_ts)
600 return div_u64(val, NSEC_PER_USEC);
601
602 return div_u64(val, NSEC_PER_MSEC);
603}
604
605u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
606 u16 *mssp);
607__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
608u64 cookie_init_timestamp(struct request_sock *req, u64 now);
609bool cookie_timestamp_decode(const struct net *net,
610 struct tcp_options_received *opt);
611
612static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst)
613{
614 return READ_ONCE(net->ipv4.sysctl_tcp_ecn) ||
615 dst_feature(dst, RTAX_FEATURE_ECN);
616}
617
618#if IS_ENABLED(CONFIG_BPF)
619static inline bool cookie_bpf_ok(struct sk_buff *skb)
620{
621 return skb->sk;
622}
623
624struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
625#else
626static inline bool cookie_bpf_ok(struct sk_buff *skb)
627{
628 return false;
629}
630
631static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
632 struct sk_buff *skb)
633{
634 return NULL;
635}
636#endif
637
638/* From net/ipv6/syncookies.c */
639int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
640struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
641
642u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
643 const struct tcphdr *th, u16 *mssp);
644__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
645#endif
646/* tcp_output.c */
647
648void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
649void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
650void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
651 int nonagle);
652int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
653int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
654void tcp_retransmit_timer(struct sock *sk);
655void tcp_xmit_retransmit_queue(struct sock *);
656void tcp_simple_retransmit(struct sock *);
657void tcp_enter_recovery(struct sock *sk, bool ece_ack);
658int tcp_trim_head(struct sock *, struct sk_buff *, u32);
659enum tcp_queue {
660 TCP_FRAG_IN_WRITE_QUEUE,
661 TCP_FRAG_IN_RTX_QUEUE,
662};
663int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
664 struct sk_buff *skb, u32 len,
665 unsigned int mss_now, gfp_t gfp);
666
667void tcp_send_probe0(struct sock *);
668int tcp_write_wakeup(struct sock *, int mib);
669void tcp_send_fin(struct sock *sk);
670void tcp_send_active_reset(struct sock *sk, gfp_t priority);
671int tcp_send_synack(struct sock *);
672void tcp_push_one(struct sock *, unsigned int mss_now);
673void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
674void tcp_send_ack(struct sock *sk);
675void tcp_send_delayed_ack(struct sock *sk);
676void tcp_send_loss_probe(struct sock *sk);
677bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
678void tcp_skb_collapse_tstamp(struct sk_buff *skb,
679 const struct sk_buff *next_skb);
680
681/* tcp_input.c */
682void tcp_rearm_rto(struct sock *sk);
683void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
684void tcp_reset(struct sock *sk, struct sk_buff *skb);
685void tcp_fin(struct sock *sk);
686void tcp_check_space(struct sock *sk);
687void tcp_sack_compress_send_ack(struct sock *sk);
688
689/* tcp_timer.c */
690void tcp_init_xmit_timers(struct sock *);
691static inline void tcp_clear_xmit_timers(struct sock *sk)
692{
693 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
694 __sock_put(sk);
695
696 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
697 __sock_put(sk);
698
699 inet_csk_clear_xmit_timers(sk);
700}
701
702unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
703unsigned int tcp_current_mss(struct sock *sk);
704u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
705
706/* Bound MSS / TSO packet size with the half of the window */
707static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
708{
709 int cutoff;
710
711 /* When peer uses tiny windows, there is no use in packetizing
712 * to sub-MSS pieces for the sake of SWS or making sure there
713 * are enough packets in the pipe for fast recovery.
714 *
715 * On the other hand, for extremely large MSS devices, handling
716 * smaller than MSS windows in this way does make sense.
717 */
718 if (tp->max_window > TCP_MSS_DEFAULT)
719 cutoff = (tp->max_window >> 1);
720 else
721 cutoff = tp->max_window;
722
723 if (cutoff && pktsize > cutoff)
724 return max_t(int, cutoff, 68U - tp->tcp_header_len);
725 else
726 return pktsize;
727}
728
729/* tcp.c */
730void tcp_get_info(struct sock *, struct tcp_info *);
731
732/* Read 'sendfile()'-style from a TCP socket */
733int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
734 sk_read_actor_t recv_actor);
735int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
736struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
737void tcp_read_done(struct sock *sk, size_t len);
738
739void tcp_initialize_rcv_mss(struct sock *sk);
740
741int tcp_mtu_to_mss(struct sock *sk, int pmtu);
742int tcp_mss_to_mtu(struct sock *sk, int mss);
743void tcp_mtup_init(struct sock *sk);
744
745static inline void tcp_bound_rto(const struct sock *sk)
746{
747 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
748 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
749}
750
751static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
752{
753 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
754}
755
756static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
757{
758 /* mptcp hooks are only on the slow path */
759 if (sk_is_mptcp((struct sock *)tp))
760 return;
761
762 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
763 ntohl(TCP_FLAG_ACK) |
764 snd_wnd);
765}
766
767static inline void tcp_fast_path_on(struct tcp_sock *tp)
768{
769 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
770}
771
772static inline void tcp_fast_path_check(struct sock *sk)
773{
774 struct tcp_sock *tp = tcp_sk(sk);
775
776 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
777 tp->rcv_wnd &&
778 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
779 !tp->urg_data)
780 tcp_fast_path_on(tp);
781}
782
783u32 tcp_delack_max(const struct sock *sk);
784
785/* Compute the actual rto_min value */
786static inline u32 tcp_rto_min(const struct sock *sk)
787{
788 const struct dst_entry *dst = __sk_dst_get(sk);
789 u32 rto_min = inet_csk(sk)->icsk_rto_min;
790
791 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
792 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
793 return rto_min;
794}
795
796static inline u32 tcp_rto_min_us(const struct sock *sk)
797{
798 return jiffies_to_usecs(tcp_rto_min(sk));
799}
800
801static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
802{
803 return dst_metric_locked(dst, RTAX_CC_ALGO);
804}
805
806/* Minimum RTT in usec. ~0 means not available. */
807static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
808{
809 return minmax_get(&tp->rtt_min);
810}
811
812/* Compute the actual receive window we are currently advertising.
813 * Rcv_nxt can be after the window if our peer push more data
814 * than the offered window.
815 */
816static inline u32 tcp_receive_window(const struct tcp_sock *tp)
817{
818 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
819
820 if (win < 0)
821 win = 0;
822 return (u32) win;
823}
824
825/* Choose a new window, without checks for shrinking, and without
826 * scaling applied to the result. The caller does these things
827 * if necessary. This is a "raw" window selection.
828 */
829u32 __tcp_select_window(struct sock *sk);
830
831void tcp_send_window_probe(struct sock *sk);
832
833/* TCP uses 32bit jiffies to save some space.
834 * Note that this is different from tcp_time_stamp, which
835 * historically has been the same until linux-4.13.
836 */
837#define tcp_jiffies32 ((u32)jiffies)
838
839/*
840 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
841 * It is no longer tied to jiffies, but to 1 ms clock.
842 * Note: double check if you want to use tcp_jiffies32 instead of this.
843 */
844#define TCP_TS_HZ 1000
845
846static inline u64 tcp_clock_ns(void)
847{
848 return ktime_get_ns();
849}
850
851static inline u64 tcp_clock_us(void)
852{
853 return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
854}
855
856static inline u64 tcp_clock_ms(void)
857{
858 return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
859}
860
861/* TCP Timestamp included in TS option (RFC 1323) can either use ms
862 * or usec resolution. Each socket carries a flag to select one or other
863 * resolution, as the route attribute could change anytime.
864 * Each flow must stick to initial resolution.
865 */
866static inline u32 tcp_clock_ts(bool usec_ts)
867{
868 return usec_ts ? tcp_clock_us() : tcp_clock_ms();
869}
870
871static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
872{
873 return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
874}
875
876static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
877{
878 if (tp->tcp_usec_ts)
879 return tp->tcp_mstamp;
880 return tcp_time_stamp_ms(tp);
881}
882
883void tcp_mstamp_refresh(struct tcp_sock *tp);
884
885static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
886{
887 return max_t(s64, t1 - t0, 0);
888}
889
890/* provide the departure time in us unit */
891static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
892{
893 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
894}
895
896/* Provide skb TSval in usec or ms unit */
897static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
898{
899 if (usec_ts)
900 return tcp_skb_timestamp_us(skb);
901
902 return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
903}
904
905static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
906{
907 return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
908}
909
910static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
911{
912 return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
913}
914
915#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
916
917#define TCPHDR_FIN 0x01
918#define TCPHDR_SYN 0x02
919#define TCPHDR_RST 0x04
920#define TCPHDR_PSH 0x08
921#define TCPHDR_ACK 0x10
922#define TCPHDR_URG 0x20
923#define TCPHDR_ECE 0x40
924#define TCPHDR_CWR 0x80
925
926#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
927
928/* This is what the send packet queuing engine uses to pass
929 * TCP per-packet control information to the transmission code.
930 * We also store the host-order sequence numbers in here too.
931 * This is 44 bytes if IPV6 is enabled.
932 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
933 */
934struct tcp_skb_cb {
935 __u32 seq; /* Starting sequence number */
936 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
937 union {
938 /* Note : tcp_tw_isn is used in input path only
939 * (isn chosen by tcp_timewait_state_process())
940 *
941 * tcp_gso_segs/size are used in write queue only,
942 * cf tcp_skb_pcount()/tcp_skb_mss()
943 */
944 __u32 tcp_tw_isn;
945 struct {
946 u16 tcp_gso_segs;
947 u16 tcp_gso_size;
948 };
949 };
950 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
951
952 __u8 sacked; /* State flags for SACK. */
953#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
954#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
955#define TCPCB_LOST 0x04 /* SKB is lost */
956#define TCPCB_TAGBITS 0x07 /* All tag bits */
957#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */
958#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
959#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
960 TCPCB_REPAIRED)
961
962 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
963 __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
964 eor:1, /* Is skb MSG_EOR marked? */
965 has_rxtstamp:1, /* SKB has a RX timestamp */
966 unused:5;
967 __u32 ack_seq; /* Sequence number ACK'd */
968 union {
969 struct {
970#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
971 /* There is space for up to 24 bytes */
972 __u32 is_app_limited:1, /* cwnd not fully used? */
973 delivered_ce:20,
974 unused:11;
975 /* pkts S/ACKed so far upon tx of skb, incl retrans: */
976 __u32 delivered;
977 /* start of send pipeline phase */
978 u64 first_tx_mstamp;
979 /* when we reached the "delivered" count */
980 u64 delivered_mstamp;
981 } tx; /* only used for outgoing skbs */
982 union {
983 struct inet_skb_parm h4;
984#if IS_ENABLED(CONFIG_IPV6)
985 struct inet6_skb_parm h6;
986#endif
987 } header; /* For incoming skbs */
988 };
989};
990
991#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
992
993extern const struct inet_connection_sock_af_ops ipv4_specific;
994
995#if IS_ENABLED(CONFIG_IPV6)
996/* This is the variant of inet6_iif() that must be used by TCP,
997 * as TCP moves IP6CB into a different location in skb->cb[]
998 */
999static inline int tcp_v6_iif(const struct sk_buff *skb)
1000{
1001 return TCP_SKB_CB(skb)->header.h6.iif;
1002}
1003
1004static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
1005{
1006 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
1007
1008 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
1009}
1010
1011/* TCP_SKB_CB reference means this can not be used from early demux */
1012static inline int tcp_v6_sdif(const struct sk_buff *skb)
1013{
1014#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1015 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
1016 return TCP_SKB_CB(skb)->header.h6.iif;
1017#endif
1018 return 0;
1019}
1020
1021extern const struct inet_connection_sock_af_ops ipv6_specific;
1022
1023INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
1024INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
1025void tcp_v6_early_demux(struct sk_buff *skb);
1026
1027#endif
1028
1029/* TCP_SKB_CB reference means this can not be used from early demux */
1030static inline int tcp_v4_sdif(struct sk_buff *skb)
1031{
1032#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1033 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
1034 return TCP_SKB_CB(skb)->header.h4.iif;
1035#endif
1036 return 0;
1037}
1038
1039/* Due to TSO, an SKB can be composed of multiple actual
1040 * packets. To keep these tracked properly, we use this.
1041 */
1042static inline int tcp_skb_pcount(const struct sk_buff *skb)
1043{
1044 return TCP_SKB_CB(skb)->tcp_gso_segs;
1045}
1046
1047static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
1048{
1049 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
1050}
1051
1052static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
1053{
1054 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
1055}
1056
1057/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
1058static inline int tcp_skb_mss(const struct sk_buff *skb)
1059{
1060 return TCP_SKB_CB(skb)->tcp_gso_size;
1061}
1062
1063static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
1064{
1065 return likely(!TCP_SKB_CB(skb)->eor);
1066}
1067
1068static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
1069 const struct sk_buff *from)
1070{
1071 return likely(tcp_skb_can_collapse_to(to) &&
1072 mptcp_skb_can_collapse(to, from) &&
1073 skb_pure_zcopy_same(to, from));
1074}
1075
1076/* Events passed to congestion control interface */
1077enum tcp_ca_event {
1078 CA_EVENT_TX_START, /* first transmit when no packets in flight */
1079 CA_EVENT_CWND_RESTART, /* congestion window restart */
1080 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
1081 CA_EVENT_LOSS, /* loss timeout */
1082 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
1083 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
1084};
1085
1086/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1087enum tcp_ca_ack_event_flags {
1088 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
1089 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
1090 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
1091};
1092
1093/*
1094 * Interface for adding new TCP congestion control handlers
1095 */
1096#define TCP_CA_NAME_MAX 16
1097#define TCP_CA_MAX 128
1098#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
1099
1100#define TCP_CA_UNSPEC 0
1101
1102/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1103#define TCP_CONG_NON_RESTRICTED 0x1
1104/* Requires ECN/ECT set on all packets */
1105#define TCP_CONG_NEEDS_ECN 0x2
1106#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1107
1108union tcp_cc_info;
1109
1110struct ack_sample {
1111 u32 pkts_acked;
1112 s32 rtt_us;
1113 u32 in_flight;
1114};
1115
1116/* A rate sample measures the number of (original/retransmitted) data
1117 * packets delivered "delivered" over an interval of time "interval_us".
1118 * The tcp_rate.c code fills in the rate sample, and congestion
1119 * control modules that define a cong_control function to run at the end
1120 * of ACK processing can optionally chose to consult this sample when
1121 * setting cwnd and pacing rate.
1122 * A sample is invalid if "delivered" or "interval_us" is negative.
1123 */
1124struct rate_sample {
1125 u64 prior_mstamp; /* starting timestamp for interval */
1126 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
1127 u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
1128 s32 delivered; /* number of packets delivered over interval */
1129 s32 delivered_ce; /* number of packets delivered w/ CE marks*/
1130 long interval_us; /* time for tp->delivered to incr "delivered" */
1131 u32 snd_interval_us; /* snd interval for delivered packets */
1132 u32 rcv_interval_us; /* rcv interval for delivered packets */
1133 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
1134 int losses; /* number of packets marked lost upon ACK */
1135 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
1136 u32 prior_in_flight; /* in flight before this ACK */
1137 u32 last_end_seq; /* end_seq of most recently ACKed packet */
1138 bool is_app_limited; /* is sample from packet with bubble in pipe? */
1139 bool is_retrans; /* is sample from retransmission? */
1140 bool is_ack_delayed; /* is this (likely) a delayed ACK? */
1141};
1142
1143struct tcp_congestion_ops {
1144/* fast path fields are put first to fill one cache line */
1145
1146 /* return slow start threshold (required) */
1147 u32 (*ssthresh)(struct sock *sk);
1148
1149 /* do new cwnd calculation (required) */
1150 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1151
1152 /* call before changing ca_state (optional) */
1153 void (*set_state)(struct sock *sk, u8 new_state);
1154
1155 /* call when cwnd event occurs (optional) */
1156 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1157
1158 /* call when ack arrives (optional) */
1159 void (*in_ack_event)(struct sock *sk, u32 flags);
1160
1161 /* hook for packet ack accounting (optional) */
1162 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1163
1164 /* override sysctl_tcp_min_tso_segs */
1165 u32 (*min_tso_segs)(struct sock *sk);
1166
1167 /* call when packets are delivered to update cwnd and pacing rate,
1168 * after all the ca_state processing. (optional)
1169 */
1170 void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1171
1172
1173 /* new value of cwnd after loss (required) */
1174 u32 (*undo_cwnd)(struct sock *sk);
1175 /* returns the multiplier used in tcp_sndbuf_expand (optional) */
1176 u32 (*sndbuf_expand)(struct sock *sk);
1177
1178/* control/slow paths put last */
1179 /* get info for inet_diag (optional) */
1180 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1181 union tcp_cc_info *info);
1182
1183 char name[TCP_CA_NAME_MAX];
1184 struct module *owner;
1185 struct list_head list;
1186 u32 key;
1187 u32 flags;
1188
1189 /* initialize private data (optional) */
1190 void (*init)(struct sock *sk);
1191 /* cleanup private data (optional) */
1192 void (*release)(struct sock *sk);
1193} ____cacheline_aligned_in_smp;
1194
1195int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1196void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1197int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1198 struct tcp_congestion_ops *old_type);
1199int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
1200
1201void tcp_assign_congestion_control(struct sock *sk);
1202void tcp_init_congestion_control(struct sock *sk);
1203void tcp_cleanup_congestion_control(struct sock *sk);
1204int tcp_set_default_congestion_control(struct net *net, const char *name);
1205void tcp_get_default_congestion_control(struct net *net, char *name);
1206void tcp_get_available_congestion_control(char *buf, size_t len);
1207void tcp_get_allowed_congestion_control(char *buf, size_t len);
1208int tcp_set_allowed_congestion_control(char *allowed);
1209int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1210 bool cap_net_admin);
1211u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1212void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1213
1214u32 tcp_reno_ssthresh(struct sock *sk);
1215u32 tcp_reno_undo_cwnd(struct sock *sk);
1216void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1217extern struct tcp_congestion_ops tcp_reno;
1218
1219struct tcp_congestion_ops *tcp_ca_find(const char *name);
1220struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1221u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1222#ifdef CONFIG_INET
1223char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1224#else
1225static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1226{
1227 return NULL;
1228}
1229#endif
1230
1231static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1232{
1233 const struct inet_connection_sock *icsk = inet_csk(sk);
1234
1235 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1236}
1237
1238static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1239{
1240 const struct inet_connection_sock *icsk = inet_csk(sk);
1241
1242 if (icsk->icsk_ca_ops->cwnd_event)
1243 icsk->icsk_ca_ops->cwnd_event(sk, event);
1244}
1245
1246/* From tcp_cong.c */
1247void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1248
1249/* From tcp_rate.c */
1250void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1251void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1252 struct rate_sample *rs);
1253void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1254 bool is_sack_reneg, struct rate_sample *rs);
1255void tcp_rate_check_app_limited(struct sock *sk);
1256
1257static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1258{
1259 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1260}
1261
1262/* These functions determine how the current flow behaves in respect of SACK
1263 * handling. SACK is negotiated with the peer, and therefore it can vary
1264 * between different flows.
1265 *
1266 * tcp_is_sack - SACK enabled
1267 * tcp_is_reno - No SACK
1268 */
1269static inline int tcp_is_sack(const struct tcp_sock *tp)
1270{
1271 return likely(tp->rx_opt.sack_ok);
1272}
1273
1274static inline bool tcp_is_reno(const struct tcp_sock *tp)
1275{
1276 return !tcp_is_sack(tp);
1277}
1278
1279static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1280{
1281 return tp->sacked_out + tp->lost_out;
1282}
1283
1284/* This determines how many packets are "in the network" to the best
1285 * of our knowledge. In many cases it is conservative, but where
1286 * detailed information is available from the receiver (via SACK
1287 * blocks etc.) we can make more aggressive calculations.
1288 *
1289 * Use this for decisions involving congestion control, use just
1290 * tp->packets_out to determine if the send queue is empty or not.
1291 *
1292 * Read this equation as:
1293 *
1294 * "Packets sent once on transmission queue" MINUS
1295 * "Packets left network, but not honestly ACKed yet" PLUS
1296 * "Packets fast retransmitted"
1297 */
1298static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1299{
1300 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1301}
1302
1303#define TCP_INFINITE_SSTHRESH 0x7fffffff
1304
1305static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1306{
1307 return tp->snd_cwnd;
1308}
1309
1310static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1311{
1312 WARN_ON_ONCE((int)val <= 0);
1313 tp->snd_cwnd = val;
1314}
1315
1316static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1317{
1318 return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1319}
1320
1321static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1322{
1323 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1324}
1325
1326static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1327{
1328 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1329 (1 << inet_csk(sk)->icsk_ca_state);
1330}
1331
1332/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1333 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1334 * ssthresh.
1335 */
1336static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1337{
1338 const struct tcp_sock *tp = tcp_sk(sk);
1339
1340 if (tcp_in_cwnd_reduction(sk))
1341 return tp->snd_ssthresh;
1342 else
1343 return max(tp->snd_ssthresh,
1344 ((tcp_snd_cwnd(tp) >> 1) +
1345 (tcp_snd_cwnd(tp) >> 2)));
1346}
1347
1348/* Use define here intentionally to get WARN_ON location shown at the caller */
1349#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1350
1351void tcp_enter_cwr(struct sock *sk);
1352__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1353
1354/* The maximum number of MSS of available cwnd for which TSO defers
1355 * sending if not using sysctl_tcp_tso_win_divisor.
1356 */
1357static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1358{
1359 return 3;
1360}
1361
1362/* Returns end sequence number of the receiver's advertised window */
1363static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1364{
1365 return tp->snd_una + tp->snd_wnd;
1366}
1367
1368/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1369 * flexible approach. The RFC suggests cwnd should not be raised unless
1370 * it was fully used previously. And that's exactly what we do in
1371 * congestion avoidance mode. But in slow start we allow cwnd to grow
1372 * as long as the application has used half the cwnd.
1373 * Example :
1374 * cwnd is 10 (IW10), but application sends 9 frames.
1375 * We allow cwnd to reach 18 when all frames are ACKed.
1376 * This check is safe because it's as aggressive as slow start which already
1377 * risks 100% overshoot. The advantage is that we discourage application to
1378 * either send more filler packets or data to artificially blow up the cwnd
1379 * usage, and allow application-limited process to probe bw more aggressively.
1380 */
1381static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1382{
1383 const struct tcp_sock *tp = tcp_sk(sk);
1384
1385 if (tp->is_cwnd_limited)
1386 return true;
1387
1388 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
1389 if (tcp_in_slow_start(tp))
1390 return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1391
1392 return false;
1393}
1394
1395/* BBR congestion control needs pacing.
1396 * Same remark for SO_MAX_PACING_RATE.
1397 * sch_fq packet scheduler is efficiently handling pacing,
1398 * but is not always installed/used.
1399 * Return true if TCP stack should pace packets itself.
1400 */
1401static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1402{
1403 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1404}
1405
1406/* Estimates in how many jiffies next packet for this flow can be sent.
1407 * Scheduling a retransmit timer too early would be silly.
1408 */
1409static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1410{
1411 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1412
1413 return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1414}
1415
1416static inline void tcp_reset_xmit_timer(struct sock *sk,
1417 const int what,
1418 unsigned long when,
1419 const unsigned long max_when)
1420{
1421 inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1422 max_when);
1423}
1424
1425/* Something is really bad, we could not queue an additional packet,
1426 * because qdisc is full or receiver sent a 0 window, or we are paced.
1427 * We do not want to add fuel to the fire, or abort too early,
1428 * so make sure the timer we arm now is at least 200ms in the future,
1429 * regardless of current icsk_rto value (as it could be ~2ms)
1430 */
1431static inline unsigned long tcp_probe0_base(const struct sock *sk)
1432{
1433 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1434}
1435
1436/* Variant of inet_csk_rto_backoff() used for zero window probes */
1437static inline unsigned long tcp_probe0_when(const struct sock *sk,
1438 unsigned long max_when)
1439{
1440 u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1441 inet_csk(sk)->icsk_backoff);
1442 u64 when = (u64)tcp_probe0_base(sk) << backoff;
1443
1444 return (unsigned long)min_t(u64, when, max_when);
1445}
1446
1447static inline void tcp_check_probe_timer(struct sock *sk)
1448{
1449 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1450 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1451 tcp_probe0_base(sk), TCP_RTO_MAX);
1452}
1453
1454static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1455{
1456 tp->snd_wl1 = seq;
1457}
1458
1459static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1460{
1461 tp->snd_wl1 = seq;
1462}
1463
1464/*
1465 * Calculate(/check) TCP checksum
1466 */
1467static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1468 __be32 daddr, __wsum base)
1469{
1470 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1471}
1472
1473static inline bool tcp_checksum_complete(struct sk_buff *skb)
1474{
1475 return !skb_csum_unnecessary(skb) &&
1476 __skb_checksum_complete(skb);
1477}
1478
1479bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1480 enum skb_drop_reason *reason);
1481
1482
1483int tcp_filter(struct sock *sk, struct sk_buff *skb);
1484void tcp_set_state(struct sock *sk, int state);
1485void tcp_done(struct sock *sk);
1486int tcp_abort(struct sock *sk, int err);
1487
1488static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1489{
1490 rx_opt->dsack = 0;
1491 rx_opt->num_sacks = 0;
1492}
1493
1494void tcp_cwnd_restart(struct sock *sk, s32 delta);
1495
1496static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1497{
1498 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1499 struct tcp_sock *tp = tcp_sk(sk);
1500 s32 delta;
1501
1502 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1503 tp->packets_out || ca_ops->cong_control)
1504 return;
1505 delta = tcp_jiffies32 - tp->lsndtime;
1506 if (delta > inet_csk(sk)->icsk_rto)
1507 tcp_cwnd_restart(sk, delta);
1508}
1509
1510/* Determine a window scaling and initial window to offer. */
1511void tcp_select_initial_window(const struct sock *sk, int __space,
1512 __u32 mss, __u32 *rcv_wnd,
1513 __u32 *window_clamp, int wscale_ok,
1514 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1515
1516static inline int __tcp_win_from_space(u8 scaling_ratio, int space)
1517{
1518 s64 scaled_space = (s64)space * scaling_ratio;
1519
1520 return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
1521}
1522
1523static inline int tcp_win_from_space(const struct sock *sk, int space)
1524{
1525 return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space);
1526}
1527
1528/* inverse of __tcp_win_from_space() */
1529static inline int __tcp_space_from_win(u8 scaling_ratio, int win)
1530{
1531 u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
1532
1533 do_div(val, scaling_ratio);
1534 return val;
1535}
1536
1537static inline int tcp_space_from_win(const struct sock *sk, int win)
1538{
1539 return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
1540}
1541
1542/* Assume a 50% default for skb->len/skb->truesize ratio.
1543 * This may be adjusted later in tcp_measure_rcv_mss().
1544 */
1545#define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
1546
1547static inline void tcp_scaling_ratio_init(struct sock *sk)
1548{
1549 tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
1550}
1551
1552/* Note: caller must be prepared to deal with negative returns */
1553static inline int tcp_space(const struct sock *sk)
1554{
1555 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1556 READ_ONCE(sk->sk_backlog.len) -
1557 atomic_read(&sk->sk_rmem_alloc));
1558}
1559
1560static inline int tcp_full_space(const struct sock *sk)
1561{
1562 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1563}
1564
1565static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
1566{
1567 int unused_mem = sk_unused_reserved_mem(sk);
1568 struct tcp_sock *tp = tcp_sk(sk);
1569
1570 tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
1571 if (unused_mem)
1572 tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1573 tcp_win_from_space(sk, unused_mem));
1574}
1575
1576static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1577{
1578 __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
1579}
1580
1581void tcp_cleanup_rbuf(struct sock *sk, int copied);
1582void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1583
1584
1585/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1586 * If 87.5 % (7/8) of the space has been consumed, we want to override
1587 * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1588 * len/truesize ratio.
1589 */
1590static inline bool tcp_rmem_pressure(const struct sock *sk)
1591{
1592 int rcvbuf, threshold;
1593
1594 if (tcp_under_memory_pressure(sk))
1595 return true;
1596
1597 rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1598 threshold = rcvbuf - (rcvbuf >> 3);
1599
1600 return atomic_read(&sk->sk_rmem_alloc) > threshold;
1601}
1602
1603static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1604{
1605 const struct tcp_sock *tp = tcp_sk(sk);
1606 int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1607
1608 if (avail <= 0)
1609 return false;
1610
1611 return (avail >= target) || tcp_rmem_pressure(sk) ||
1612 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1613}
1614
1615extern void tcp_openreq_init_rwin(struct request_sock *req,
1616 const struct sock *sk_listener,
1617 const struct dst_entry *dst);
1618
1619void tcp_enter_memory_pressure(struct sock *sk);
1620void tcp_leave_memory_pressure(struct sock *sk);
1621
1622static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1623{
1624 struct net *net = sock_net((struct sock *)tp);
1625 int val;
1626
1627 /* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1628 * and do_tcp_setsockopt().
1629 */
1630 val = READ_ONCE(tp->keepalive_intvl);
1631
1632 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1633}
1634
1635static inline int keepalive_time_when(const struct tcp_sock *tp)
1636{
1637 struct net *net = sock_net((struct sock *)tp);
1638 int val;
1639
1640 /* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1641 val = READ_ONCE(tp->keepalive_time);
1642
1643 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1644}
1645
1646static inline int keepalive_probes(const struct tcp_sock *tp)
1647{
1648 struct net *net = sock_net((struct sock *)tp);
1649 int val;
1650
1651 /* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1652 * and do_tcp_setsockopt().
1653 */
1654 val = READ_ONCE(tp->keepalive_probes);
1655
1656 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1657}
1658
1659static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1660{
1661 const struct inet_connection_sock *icsk = &tp->inet_conn;
1662
1663 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1664 tcp_jiffies32 - tp->rcv_tstamp);
1665}
1666
1667static inline int tcp_fin_time(const struct sock *sk)
1668{
1669 int fin_timeout = tcp_sk(sk)->linger2 ? :
1670 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1671 const int rto = inet_csk(sk)->icsk_rto;
1672
1673 if (fin_timeout < (rto << 2) - (rto >> 1))
1674 fin_timeout = (rto << 2) - (rto >> 1);
1675
1676 return fin_timeout;
1677}
1678
1679static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1680 int paws_win)
1681{
1682 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1683 return true;
1684 if (unlikely(!time_before32(ktime_get_seconds(),
1685 rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
1686 return true;
1687 /*
1688 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1689 * then following tcp messages have valid values. Ignore 0 value,
1690 * or else 'negative' tsval might forbid us to accept their packets.
1691 */
1692 if (!rx_opt->ts_recent)
1693 return true;
1694 return false;
1695}
1696
1697static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1698 int rst)
1699{
1700 if (tcp_paws_check(rx_opt, 0))
1701 return false;
1702
1703 /* RST segments are not recommended to carry timestamp,
1704 and, if they do, it is recommended to ignore PAWS because
1705 "their cleanup function should take precedence over timestamps."
1706 Certainly, it is mistake. It is necessary to understand the reasons
1707 of this constraint to relax it: if peer reboots, clock may go
1708 out-of-sync and half-open connections will not be reset.
1709 Actually, the problem would be not existing if all
1710 the implementations followed draft about maintaining clock
1711 via reboots. Linux-2.2 DOES NOT!
1712
1713 However, we can relax time bounds for RST segments to MSL.
1714 */
1715 if (rst && !time_before32(ktime_get_seconds(),
1716 rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1717 return false;
1718 return true;
1719}
1720
1721bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1722 int mib_idx, u32 *last_oow_ack_time);
1723
1724static inline void tcp_mib_init(struct net *net)
1725{
1726 /* See RFC 2012 */
1727 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1728 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1729 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1730 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1731}
1732
1733/* from STCP */
1734static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1735{
1736 tp->lost_skb_hint = NULL;
1737}
1738
1739static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1740{
1741 tcp_clear_retrans_hints_partial(tp);
1742 tp->retransmit_skb_hint = NULL;
1743}
1744
1745#define tcp_md5_addr tcp_ao_addr
1746
1747/* - key database */
1748struct tcp_md5sig_key {
1749 struct hlist_node node;
1750 u8 keylen;
1751 u8 family; /* AF_INET or AF_INET6 */
1752 u8 prefixlen;
1753 u8 flags;
1754 union tcp_md5_addr addr;
1755 int l3index; /* set if key added with L3 scope */
1756 u8 key[TCP_MD5SIG_MAXKEYLEN];
1757 struct rcu_head rcu;
1758};
1759
1760/* - sock block */
1761struct tcp_md5sig_info {
1762 struct hlist_head head;
1763 struct rcu_head rcu;
1764};
1765
1766/* - pseudo header */
1767struct tcp4_pseudohdr {
1768 __be32 saddr;
1769 __be32 daddr;
1770 __u8 pad;
1771 __u8 protocol;
1772 __be16 len;
1773};
1774
1775struct tcp6_pseudohdr {
1776 struct in6_addr saddr;
1777 struct in6_addr daddr;
1778 __be32 len;
1779 __be32 protocol; /* including padding */
1780};
1781
1782union tcp_md5sum_block {
1783 struct tcp4_pseudohdr ip4;
1784#if IS_ENABLED(CONFIG_IPV6)
1785 struct tcp6_pseudohdr ip6;
1786#endif
1787};
1788
1789/*
1790 * struct tcp_sigpool - per-CPU pool of ahash_requests
1791 * @scratch: per-CPU temporary area, that can be used between
1792 * tcp_sigpool_start() and tcp_sigpool_end() to perform
1793 * crypto request
1794 * @req: pre-allocated ahash request
1795 */
1796struct tcp_sigpool {
1797 void *scratch;
1798 struct ahash_request *req;
1799};
1800
1801int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size);
1802void tcp_sigpool_get(unsigned int id);
1803void tcp_sigpool_release(unsigned int id);
1804int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
1805 const struct sk_buff *skb,
1806 unsigned int header_len);
1807
1808/**
1809 * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash
1810 * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
1811 * @c: returned tcp_sigpool for usage (uninitialized on failure)
1812 *
1813 * Returns 0 on success, error otherwise.
1814 */
1815int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
1816/**
1817 * tcp_sigpool_end - enable bh and stop using tcp_sigpool
1818 * @c: tcp_sigpool context that was returned by tcp_sigpool_start()
1819 */
1820void tcp_sigpool_end(struct tcp_sigpool *c);
1821size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
1822/* - functions */
1823int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1824 const struct sock *sk, const struct sk_buff *skb);
1825int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1826 int family, u8 prefixlen, int l3index, u8 flags,
1827 const u8 *newkey, u8 newkeylen);
1828int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1829 int family, u8 prefixlen, int l3index,
1830 struct tcp_md5sig_key *key);
1831
1832int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1833 int family, u8 prefixlen, int l3index, u8 flags);
1834void tcp_clear_md5_list(struct sock *sk);
1835struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1836 const struct sock *addr_sk);
1837
1838#ifdef CONFIG_TCP_MD5SIG
1839struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1840 const union tcp_md5_addr *addr,
1841 int family, bool any_l3index);
1842static inline struct tcp_md5sig_key *
1843tcp_md5_do_lookup(const struct sock *sk, int l3index,
1844 const union tcp_md5_addr *addr, int family)
1845{
1846 if (!static_branch_unlikely(&tcp_md5_needed.key))
1847 return NULL;
1848 return __tcp_md5_do_lookup(sk, l3index, addr, family, false);
1849}
1850
1851static inline struct tcp_md5sig_key *
1852tcp_md5_do_lookup_any_l3index(const struct sock *sk,
1853 const union tcp_md5_addr *addr, int family)
1854{
1855 if (!static_branch_unlikely(&tcp_md5_needed.key))
1856 return NULL;
1857 return __tcp_md5_do_lookup(sk, 0, addr, family, true);
1858}
1859
1860enum skb_drop_reason
1861tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
1862 const void *saddr, const void *daddr,
1863 int family, int l3index, const __u8 *hash_location);
1864
1865
1866#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1867#else
1868static inline struct tcp_md5sig_key *
1869tcp_md5_do_lookup(const struct sock *sk, int l3index,
1870 const union tcp_md5_addr *addr, int family)
1871{
1872 return NULL;
1873}
1874
1875static inline struct tcp_md5sig_key *
1876tcp_md5_do_lookup_any_l3index(const struct sock *sk,
1877 const union tcp_md5_addr *addr, int family)
1878{
1879 return NULL;
1880}
1881
1882static inline enum skb_drop_reason
1883tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
1884 const void *saddr, const void *daddr,
1885 int family, int l3index, const __u8 *hash_location)
1886{
1887 return SKB_NOT_DROPPED_YET;
1888}
1889#define tcp_twsk_md5_key(twsk) NULL
1890#endif
1891
1892int tcp_md5_alloc_sigpool(void);
1893void tcp_md5_release_sigpool(void);
1894void tcp_md5_add_sigpool(void);
1895extern int tcp_md5_sigpool_id;
1896
1897int tcp_md5_hash_key(struct tcp_sigpool *hp,
1898 const struct tcp_md5sig_key *key);
1899
1900/* From tcp_fastopen.c */
1901void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1902 struct tcp_fastopen_cookie *cookie);
1903void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1904 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1905 u16 try_exp);
1906struct tcp_fastopen_request {
1907 /* Fast Open cookie. Size 0 means a cookie request */
1908 struct tcp_fastopen_cookie cookie;
1909 struct msghdr *data; /* data in MSG_FASTOPEN */
1910 size_t size;
1911 int copied; /* queued in tcp_connect() */
1912 struct ubuf_info *uarg;
1913};
1914void tcp_free_fastopen_req(struct tcp_sock *tp);
1915void tcp_fastopen_destroy_cipher(struct sock *sk);
1916void tcp_fastopen_ctx_destroy(struct net *net);
1917int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1918 void *primary_key, void *backup_key);
1919int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1920 u64 *key);
1921void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1922struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1923 struct request_sock *req,
1924 struct tcp_fastopen_cookie *foc,
1925 const struct dst_entry *dst);
1926void tcp_fastopen_init_key_once(struct net *net);
1927bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1928 struct tcp_fastopen_cookie *cookie);
1929bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1930#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1931#define TCP_FASTOPEN_KEY_MAX 2
1932#define TCP_FASTOPEN_KEY_BUF_LENGTH \
1933 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1934
1935/* Fastopen key context */
1936struct tcp_fastopen_context {
1937 siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
1938 int num;
1939 struct rcu_head rcu;
1940};
1941
1942void tcp_fastopen_active_disable(struct sock *sk);
1943bool tcp_fastopen_active_should_disable(struct sock *sk);
1944void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1945void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1946
1947/* Caller needs to wrap with rcu_read_(un)lock() */
1948static inline
1949struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1950{
1951 struct tcp_fastopen_context *ctx;
1952
1953 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1954 if (!ctx)
1955 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1956 return ctx;
1957}
1958
1959static inline
1960bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1961 const struct tcp_fastopen_cookie *orig)
1962{
1963 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1964 orig->len == foc->len &&
1965 !memcmp(orig->val, foc->val, foc->len))
1966 return true;
1967 return false;
1968}
1969
1970static inline
1971int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1972{
1973 return ctx->num;
1974}
1975
1976/* Latencies incurred by various limits for a sender. They are
1977 * chronograph-like stats that are mutually exclusive.
1978 */
1979enum tcp_chrono {
1980 TCP_CHRONO_UNSPEC,
1981 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1982 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1983 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1984 __TCP_CHRONO_MAX,
1985};
1986
1987void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1988void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1989
1990/* This helper is needed, because skb->tcp_tsorted_anchor uses
1991 * the same memory storage than skb->destructor/_skb_refdst
1992 */
1993static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1994{
1995 skb->destructor = NULL;
1996 skb->_skb_refdst = 0UL;
1997}
1998
1999#define tcp_skb_tsorted_save(skb) { \
2000 unsigned long _save = skb->_skb_refdst; \
2001 skb->_skb_refdst = 0UL;
2002
2003#define tcp_skb_tsorted_restore(skb) \
2004 skb->_skb_refdst = _save; \
2005}
2006
2007void tcp_write_queue_purge(struct sock *sk);
2008
2009static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
2010{
2011 return skb_rb_first(&sk->tcp_rtx_queue);
2012}
2013
2014static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
2015{
2016 return skb_rb_last(&sk->tcp_rtx_queue);
2017}
2018
2019static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
2020{
2021 return skb_peek_tail(&sk->sk_write_queue);
2022}
2023
2024#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
2025 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
2026
2027static inline struct sk_buff *tcp_send_head(const struct sock *sk)
2028{
2029 return skb_peek(&sk->sk_write_queue);
2030}
2031
2032static inline bool tcp_skb_is_last(const struct sock *sk,
2033 const struct sk_buff *skb)
2034{
2035 return skb_queue_is_last(&sk->sk_write_queue, skb);
2036}
2037
2038/**
2039 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
2040 * @sk: socket
2041 *
2042 * Since the write queue can have a temporary empty skb in it,
2043 * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
2044 */
2045static inline bool tcp_write_queue_empty(const struct sock *sk)
2046{
2047 const struct tcp_sock *tp = tcp_sk(sk);
2048
2049 return tp->write_seq == tp->snd_nxt;
2050}
2051
2052static inline bool tcp_rtx_queue_empty(const struct sock *sk)
2053{
2054 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
2055}
2056
2057static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
2058{
2059 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
2060}
2061
2062static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
2063{
2064 __skb_queue_tail(&sk->sk_write_queue, skb);
2065
2066 /* Queue it, remembering where we must start sending. */
2067 if (sk->sk_write_queue.next == skb)
2068 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
2069}
2070
2071/* Insert new before skb on the write queue of sk. */
2072static inline void tcp_insert_write_queue_before(struct sk_buff *new,
2073 struct sk_buff *skb,
2074 struct sock *sk)
2075{
2076 __skb_queue_before(&sk->sk_write_queue, skb, new);
2077}
2078
2079static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
2080{
2081 tcp_skb_tsorted_anchor_cleanup(skb);
2082 __skb_unlink(skb, &sk->sk_write_queue);
2083}
2084
2085void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
2086
2087static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
2088{
2089 tcp_skb_tsorted_anchor_cleanup(skb);
2090 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
2091}
2092
2093static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
2094{
2095 list_del(&skb->tcp_tsorted_anchor);
2096 tcp_rtx_queue_unlink(skb, sk);
2097 tcp_wmem_free_skb(sk, skb);
2098}
2099
2100static inline void tcp_push_pending_frames(struct sock *sk)
2101{
2102 if (tcp_send_head(sk)) {
2103 struct tcp_sock *tp = tcp_sk(sk);
2104
2105 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
2106 }
2107}
2108
2109/* Start sequence of the skb just after the highest skb with SACKed
2110 * bit, valid only if sacked_out > 0 or when the caller has ensured
2111 * validity by itself.
2112 */
2113static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
2114{
2115 if (!tp->sacked_out)
2116 return tp->snd_una;
2117
2118 if (tp->highest_sack == NULL)
2119 return tp->snd_nxt;
2120
2121 return TCP_SKB_CB(tp->highest_sack)->seq;
2122}
2123
2124static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
2125{
2126 tcp_sk(sk)->highest_sack = skb_rb_next(skb);
2127}
2128
2129static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
2130{
2131 return tcp_sk(sk)->highest_sack;
2132}
2133
2134static inline void tcp_highest_sack_reset(struct sock *sk)
2135{
2136 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
2137}
2138
2139/* Called when old skb is about to be deleted and replaced by new skb */
2140static inline void tcp_highest_sack_replace(struct sock *sk,
2141 struct sk_buff *old,
2142 struct sk_buff *new)
2143{
2144 if (old == tcp_highest_sack(sk))
2145 tcp_sk(sk)->highest_sack = new;
2146}
2147
2148/* This helper checks if socket has IP_TRANSPARENT set */
2149static inline bool inet_sk_transparent(const struct sock *sk)
2150{
2151 switch (sk->sk_state) {
2152 case TCP_TIME_WAIT:
2153 return inet_twsk(sk)->tw_transparent;
2154 case TCP_NEW_SYN_RECV:
2155 return inet_rsk(inet_reqsk(sk))->no_srccheck;
2156 }
2157 return inet_test_bit(TRANSPARENT, sk);
2158}
2159
2160/* Determines whether this is a thin stream (which may suffer from
2161 * increased latency). Used to trigger latency-reducing mechanisms.
2162 */
2163static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2164{
2165 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2166}
2167
2168/* /proc */
2169enum tcp_seq_states {
2170 TCP_SEQ_STATE_LISTENING,
2171 TCP_SEQ_STATE_ESTABLISHED,
2172};
2173
2174void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2175void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2176void tcp_seq_stop(struct seq_file *seq, void *v);
2177
2178struct tcp_seq_afinfo {
2179 sa_family_t family;
2180};
2181
2182struct tcp_iter_state {
2183 struct seq_net_private p;
2184 enum tcp_seq_states state;
2185 struct sock *syn_wait_sk;
2186 int bucket, offset, sbucket, num;
2187 loff_t last_pos;
2188};
2189
2190extern struct request_sock_ops tcp_request_sock_ops;
2191extern struct request_sock_ops tcp6_request_sock_ops;
2192
2193void tcp_v4_destroy_sock(struct sock *sk);
2194
2195struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2196 netdev_features_t features);
2197struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
2198INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2199INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2200INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
2201INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
2202#ifdef CONFIG_INET
2203void tcp_gro_complete(struct sk_buff *skb);
2204#else
2205static inline void tcp_gro_complete(struct sk_buff *skb) { }
2206#endif
2207
2208void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
2209
2210static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2211{
2212 struct net *net = sock_net((struct sock *)tp);
2213 u32 val;
2214
2215 val = READ_ONCE(tp->notsent_lowat);
2216
2217 return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2218}
2219
2220bool tcp_stream_memory_free(const struct sock *sk, int wake);
2221
2222#ifdef CONFIG_PROC_FS
2223int tcp4_proc_init(void);
2224void tcp4_proc_exit(void);
2225#endif
2226
2227int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2228int tcp_conn_request(struct request_sock_ops *rsk_ops,
2229 const struct tcp_request_sock_ops *af_ops,
2230 struct sock *sk, struct sk_buff *skb);
2231
2232/* TCP af-specific functions */
2233struct tcp_sock_af_ops {
2234#ifdef CONFIG_TCP_MD5SIG
2235 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2236 const struct sock *addr_sk);
2237 int (*calc_md5_hash)(char *location,
2238 const struct tcp_md5sig_key *md5,
2239 const struct sock *sk,
2240 const struct sk_buff *skb);
2241 int (*md5_parse)(struct sock *sk,
2242 int optname,
2243 sockptr_t optval,
2244 int optlen);
2245#endif
2246#ifdef CONFIG_TCP_AO
2247 int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
2248 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2249 struct sock *addr_sk,
2250 int sndid, int rcvid);
2251 int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key,
2252 const struct sock *sk,
2253 __be32 sisn, __be32 disn, bool send);
2254 int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao,
2255 const struct sock *sk, const struct sk_buff *skb,
2256 const u8 *tkey, int hash_offset, u32 sne);
2257#endif
2258};
2259
2260struct tcp_request_sock_ops {
2261 u16 mss_clamp;
2262#ifdef CONFIG_TCP_MD5SIG
2263 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2264 const struct sock *addr_sk);
2265 int (*calc_md5_hash) (char *location,
2266 const struct tcp_md5sig_key *md5,
2267 const struct sock *sk,
2268 const struct sk_buff *skb);
2269#endif
2270#ifdef CONFIG_TCP_AO
2271 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2272 struct request_sock *req,
2273 int sndid, int rcvid);
2274 int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
2275 int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt,
2276 struct request_sock *req, const struct sk_buff *skb,
2277 int hash_offset, u32 sne);
2278#endif
2279#ifdef CONFIG_SYN_COOKIES
2280 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
2281 __u16 *mss);
2282#endif
2283 struct dst_entry *(*route_req)(const struct sock *sk,
2284 struct sk_buff *skb,
2285 struct flowi *fl,
2286 struct request_sock *req);
2287 u32 (*init_seq)(const struct sk_buff *skb);
2288 u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2289 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2290 struct flowi *fl, struct request_sock *req,
2291 struct tcp_fastopen_cookie *foc,
2292 enum tcp_synack_type synack_type,
2293 struct sk_buff *syn_skb);
2294};
2295
2296extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2297#if IS_ENABLED(CONFIG_IPV6)
2298extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2299#endif
2300
2301#ifdef CONFIG_SYN_COOKIES
2302static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2303 const struct sock *sk, struct sk_buff *skb,
2304 __u16 *mss)
2305{
2306 tcp_synq_overflow(sk);
2307 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2308 return ops->cookie_init_seq(skb, mss);
2309}
2310#else
2311static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2312 const struct sock *sk, struct sk_buff *skb,
2313 __u16 *mss)
2314{
2315 return 0;
2316}
2317#endif
2318
2319struct tcp_key {
2320 union {
2321 struct {
2322 struct tcp_ao_key *ao_key;
2323 char *traffic_key;
2324 u32 sne;
2325 u8 rcv_next;
2326 };
2327 struct tcp_md5sig_key *md5_key;
2328 };
2329 enum {
2330 TCP_KEY_NONE = 0,
2331 TCP_KEY_MD5,
2332 TCP_KEY_AO,
2333 } type;
2334};
2335
2336static inline void tcp_get_current_key(const struct sock *sk,
2337 struct tcp_key *out)
2338{
2339#if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
2340 const struct tcp_sock *tp = tcp_sk(sk);
2341#endif
2342
2343#ifdef CONFIG_TCP_AO
2344 if (static_branch_unlikely(&tcp_ao_needed.key)) {
2345 struct tcp_ao_info *ao;
2346
2347 ao = rcu_dereference_protected(tp->ao_info,
2348 lockdep_sock_is_held(sk));
2349 if (ao) {
2350 out->ao_key = READ_ONCE(ao->current_key);
2351 out->type = TCP_KEY_AO;
2352 return;
2353 }
2354 }
2355#endif
2356#ifdef CONFIG_TCP_MD5SIG
2357 if (static_branch_unlikely(&tcp_md5_needed.key) &&
2358 rcu_access_pointer(tp->md5sig_info)) {
2359 out->md5_key = tp->af_specific->md5_lookup(sk, sk);
2360 if (out->md5_key) {
2361 out->type = TCP_KEY_MD5;
2362 return;
2363 }
2364 }
2365#endif
2366 out->type = TCP_KEY_NONE;
2367}
2368
2369static inline bool tcp_key_is_md5(const struct tcp_key *key)
2370{
2371#ifdef CONFIG_TCP_MD5SIG
2372 if (static_branch_unlikely(&tcp_md5_needed.key) &&
2373 key->type == TCP_KEY_MD5)
2374 return true;
2375#endif
2376 return false;
2377}
2378
2379static inline bool tcp_key_is_ao(const struct tcp_key *key)
2380{
2381#ifdef CONFIG_TCP_AO
2382 if (static_branch_unlikely(&tcp_ao_needed.key) &&
2383 key->type == TCP_KEY_AO)
2384 return true;
2385#endif
2386 return false;
2387}
2388
2389int tcpv4_offload_init(void);
2390
2391void tcp_v4_init(void);
2392void tcp_init(void);
2393
2394/* tcp_recovery.c */
2395void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2396void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2397extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2398 u32 reo_wnd);
2399extern bool tcp_rack_mark_lost(struct sock *sk);
2400extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2401 u64 xmit_time);
2402extern void tcp_rack_reo_timeout(struct sock *sk);
2403extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2404
2405/* tcp_plb.c */
2406
2407/*
2408 * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2409 * expects cong_ratio which represents fraction of traffic that experienced
2410 * congestion over a single RTT. In order to avoid floating point operations,
2411 * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2412 */
2413#define TCP_PLB_SCALE 8
2414
2415/* State for PLB (Protective Load Balancing) for a single TCP connection. */
2416struct tcp_plb_state {
2417 u8 consec_cong_rounds:5, /* consecutive congested rounds */
2418 unused:3;
2419 u32 pause_until; /* jiffies32 when PLB can resume rerouting */
2420};
2421
2422static inline void tcp_plb_init(const struct sock *sk,
2423 struct tcp_plb_state *plb)
2424{
2425 plb->consec_cong_rounds = 0;
2426 plb->pause_until = 0;
2427}
2428void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2429 const int cong_ratio);
2430void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2431void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2432
2433/* At how many usecs into the future should the RTO fire? */
2434static inline s64 tcp_rto_delta_us(const struct sock *sk)
2435{
2436 const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2437 u32 rto = inet_csk(sk)->icsk_rto;
2438 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2439
2440 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2441}
2442
2443/*
2444 * Save and compile IPv4 options, return a pointer to it
2445 */
2446static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2447 struct sk_buff *skb)
2448{
2449 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2450 struct ip_options_rcu *dopt = NULL;
2451
2452 if (opt->optlen) {
2453 int opt_size = sizeof(*dopt) + opt->optlen;
2454
2455 dopt = kmalloc(opt_size, GFP_ATOMIC);
2456 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2457 kfree(dopt);
2458 dopt = NULL;
2459 }
2460 }
2461 return dopt;
2462}
2463
2464/* locally generated TCP pure ACKs have skb->truesize == 2
2465 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2466 * This is much faster than dissecting the packet to find out.
2467 * (Think of GRE encapsulations, IPv4, IPv6, ...)
2468 */
2469static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2470{
2471 return skb->truesize == 2;
2472}
2473
2474static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2475{
2476 skb->truesize = 2;
2477}
2478
2479static inline int tcp_inq(struct sock *sk)
2480{
2481 struct tcp_sock *tp = tcp_sk(sk);
2482 int answ;
2483
2484 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2485 answ = 0;
2486 } else if (sock_flag(sk, SOCK_URGINLINE) ||
2487 !tp->urg_data ||
2488 before(tp->urg_seq, tp->copied_seq) ||
2489 !before(tp->urg_seq, tp->rcv_nxt)) {
2490
2491 answ = tp->rcv_nxt - tp->copied_seq;
2492
2493 /* Subtract 1, if FIN was received */
2494 if (answ && sock_flag(sk, SOCK_DONE))
2495 answ--;
2496 } else {
2497 answ = tp->urg_seq - tp->copied_seq;
2498 }
2499
2500 return answ;
2501}
2502
2503int tcp_peek_len(struct socket *sock);
2504
2505static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2506{
2507 u16 segs_in;
2508
2509 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2510
2511 /* We update these fields while other threads might
2512 * read them from tcp_get_info()
2513 */
2514 WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2515 if (skb->len > tcp_hdrlen(skb))
2516 WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2517}
2518
2519/*
2520 * TCP listen path runs lockless.
2521 * We forced "struct sock" to be const qualified to make sure
2522 * we don't modify one of its field by mistake.
2523 * Here, we increment sk_drops which is an atomic_t, so we can safely
2524 * make sock writable again.
2525 */
2526static inline void tcp_listendrop(const struct sock *sk)
2527{
2528 atomic_inc(&((struct sock *)sk)->sk_drops);
2529 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2530}
2531
2532enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2533
2534/*
2535 * Interface for adding Upper Level Protocols over TCP
2536 */
2537
2538#define TCP_ULP_NAME_MAX 16
2539#define TCP_ULP_MAX 128
2540#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2541
2542struct tcp_ulp_ops {
2543 struct list_head list;
2544
2545 /* initialize ulp */
2546 int (*init)(struct sock *sk);
2547 /* update ulp */
2548 void (*update)(struct sock *sk, struct proto *p,
2549 void (*write_space)(struct sock *sk));
2550 /* cleanup ulp */
2551 void (*release)(struct sock *sk);
2552 /* diagnostic */
2553 int (*get_info)(struct sock *sk, struct sk_buff *skb);
2554 size_t (*get_info_size)(const struct sock *sk);
2555 /* clone ulp */
2556 void (*clone)(const struct request_sock *req, struct sock *newsk,
2557 const gfp_t priority);
2558
2559 char name[TCP_ULP_NAME_MAX];
2560 struct module *owner;
2561};
2562int tcp_register_ulp(struct tcp_ulp_ops *type);
2563void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2564int tcp_set_ulp(struct sock *sk, const char *name);
2565void tcp_get_available_ulp(char *buf, size_t len);
2566void tcp_cleanup_ulp(struct sock *sk);
2567void tcp_update_ulp(struct sock *sk, struct proto *p,
2568 void (*write_space)(struct sock *sk));
2569
2570#define MODULE_ALIAS_TCP_ULP(name) \
2571 __MODULE_INFO(alias, alias_userspace, name); \
2572 __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2573
2574#ifdef CONFIG_NET_SOCK_MSG
2575struct sk_msg;
2576struct sk_psock;
2577
2578#ifdef CONFIG_BPF_SYSCALL
2579int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2580void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2581#endif /* CONFIG_BPF_SYSCALL */
2582
2583#ifdef CONFIG_INET
2584void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2585#else
2586static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2587{
2588}
2589#endif
2590
2591int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2592 struct sk_msg *msg, u32 bytes, int flags);
2593#endif /* CONFIG_NET_SOCK_MSG */
2594
2595#if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2596static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2597{
2598}
2599#endif
2600
2601#ifdef CONFIG_CGROUP_BPF
2602static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2603 struct sk_buff *skb,
2604 unsigned int end_offset)
2605{
2606 skops->skb = skb;
2607 skops->skb_data_end = skb->data + end_offset;
2608}
2609#else
2610static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2611 struct sk_buff *skb,
2612 unsigned int end_offset)
2613{
2614}
2615#endif
2616
2617/* Call BPF_SOCK_OPS program that returns an int. If the return value
2618 * is < 0, then the BPF op failed (for example if the loaded BPF
2619 * program does not support the chosen operation or there is no BPF
2620 * program loaded).
2621 */
2622#ifdef CONFIG_BPF
2623static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2624{
2625 struct bpf_sock_ops_kern sock_ops;
2626 int ret;
2627
2628 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2629 if (sk_fullsock(sk)) {
2630 sock_ops.is_fullsock = 1;
2631 sock_owned_by_me(sk);
2632 }
2633
2634 sock_ops.sk = sk;
2635 sock_ops.op = op;
2636 if (nargs > 0)
2637 memcpy(sock_ops.args, args, nargs * sizeof(*args));
2638
2639 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2640 if (ret == 0)
2641 ret = sock_ops.reply;
2642 else
2643 ret = -1;
2644 return ret;
2645}
2646
2647static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2648{
2649 u32 args[2] = {arg1, arg2};
2650
2651 return tcp_call_bpf(sk, op, 2, args);
2652}
2653
2654static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2655 u32 arg3)
2656{
2657 u32 args[3] = {arg1, arg2, arg3};
2658
2659 return tcp_call_bpf(sk, op, 3, args);
2660}
2661
2662#else
2663static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2664{
2665 return -EPERM;
2666}
2667
2668static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2669{
2670 return -EPERM;
2671}
2672
2673static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2674 u32 arg3)
2675{
2676 return -EPERM;
2677}
2678
2679#endif
2680
2681static inline u32 tcp_timeout_init(struct sock *sk)
2682{
2683 int timeout;
2684
2685 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2686
2687 if (timeout <= 0)
2688 timeout = TCP_TIMEOUT_INIT;
2689 return min_t(int, timeout, TCP_RTO_MAX);
2690}
2691
2692static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2693{
2694 int rwnd;
2695
2696 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2697
2698 if (rwnd < 0)
2699 rwnd = 0;
2700 return rwnd;
2701}
2702
2703static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2704{
2705 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2706}
2707
2708static inline void tcp_bpf_rtt(struct sock *sk)
2709{
2710 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2711 tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
2712}
2713
2714#if IS_ENABLED(CONFIG_SMC)
2715extern struct static_key_false tcp_have_smc;
2716#endif
2717
2718#if IS_ENABLED(CONFIG_TLS_DEVICE)
2719void clean_acked_data_enable(struct inet_connection_sock *icsk,
2720 void (*cad)(struct sock *sk, u32 ack_seq));
2721void clean_acked_data_disable(struct inet_connection_sock *icsk);
2722void clean_acked_data_flush(void);
2723#endif
2724
2725DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2726static inline void tcp_add_tx_delay(struct sk_buff *skb,
2727 const struct tcp_sock *tp)
2728{
2729 if (static_branch_unlikely(&tcp_tx_delay_enabled))
2730 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2731}
2732
2733/* Compute Earliest Departure Time for some control packets
2734 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2735 */
2736static inline u64 tcp_transmit_time(const struct sock *sk)
2737{
2738 if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2739 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2740 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2741
2742 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2743 }
2744 return 0;
2745}
2746
2747static inline int tcp_parse_auth_options(const struct tcphdr *th,
2748 const u8 **md5_hash, const struct tcp_ao_hdr **aoh)
2749{
2750 const u8 *md5_tmp, *ao_tmp;
2751 int ret;
2752
2753 ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp);
2754 if (ret)
2755 return ret;
2756
2757 if (md5_hash)
2758 *md5_hash = md5_tmp;
2759
2760 if (aoh) {
2761 if (!ao_tmp)
2762 *aoh = NULL;
2763 else
2764 *aoh = (struct tcp_ao_hdr *)(ao_tmp - 2);
2765 }
2766
2767 return 0;
2768}
2769
2770static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
2771 int family, int l3index, bool stat_inc)
2772{
2773#ifdef CONFIG_TCP_AO
2774 struct tcp_ao_info *ao_info;
2775 struct tcp_ao_key *ao_key;
2776
2777 if (!static_branch_unlikely(&tcp_ao_needed.key))
2778 return false;
2779
2780 ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info,
2781 lockdep_sock_is_held(sk));
2782 if (!ao_info)
2783 return false;
2784
2785 ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1);
2786 if (ao_info->ao_required || ao_key) {
2787 if (stat_inc) {
2788 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED);
2789 atomic64_inc(&ao_info->counters.ao_required);
2790 }
2791 return true;
2792 }
2793#endif
2794 return false;
2795}
2796
2797/* Called with rcu_read_lock() */
2798static inline enum skb_drop_reason
2799tcp_inbound_hash(struct sock *sk, const struct request_sock *req,
2800 const struct sk_buff *skb,
2801 const void *saddr, const void *daddr,
2802 int family, int dif, int sdif)
2803{
2804 const struct tcphdr *th = tcp_hdr(skb);
2805 const struct tcp_ao_hdr *aoh;
2806 const __u8 *md5_location;
2807 int l3index;
2808
2809 /* Invalid option or two times meet any of auth options */
2810 if (tcp_parse_auth_options(th, &md5_location, &aoh)) {
2811 tcp_hash_fail("TCP segment has incorrect auth options set",
2812 family, skb, "");
2813 return SKB_DROP_REASON_TCP_AUTH_HDR;
2814 }
2815
2816 if (req) {
2817 if (tcp_rsk_used_ao(req) != !!aoh) {
2818 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
2819 tcp_hash_fail("TCP connection can't start/end using TCP-AO",
2820 family, skb, "%s",
2821 !aoh ? "missing AO" : "AO signed");
2822 return SKB_DROP_REASON_TCP_AOFAILURE;
2823 }
2824 }
2825
2826 /* sdif set, means packet ingressed via a device
2827 * in an L3 domain and dif is set to the l3mdev
2828 */
2829 l3index = sdif ? dif : 0;
2830
2831 /* Fast path: unsigned segments */
2832 if (likely(!md5_location && !aoh)) {
2833 /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid
2834 * for the remote peer. On TCP-AO established connection
2835 * the last key is impossible to remove, so there's
2836 * always at least one current_key.
2837 */
2838 if (tcp_ao_required(sk, saddr, family, l3index, true)) {
2839 tcp_hash_fail("AO hash is required, but not found",
2840 family, skb, "L3 index %d", l3index);
2841 return SKB_DROP_REASON_TCP_AONOTFOUND;
2842 }
2843 if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) {
2844 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
2845 tcp_hash_fail("MD5 Hash not found",
2846 family, skb, "L3 index %d", l3index);
2847 return SKB_DROP_REASON_TCP_MD5NOTFOUND;
2848 }
2849 return SKB_NOT_DROPPED_YET;
2850 }
2851
2852 if (aoh)
2853 return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh);
2854
2855 return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family,
2856 l3index, md5_location);
2857}
2858
2859#endif /* _TCP_H */