Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the TCP module.
8 *
9 * Version: @(#)tcp.h 1.0.5 05/23/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 */
14#ifndef _TCP_H
15#define _TCP_H
16
17#define FASTRETRANS_DEBUG 1
18
19#include <linux/list.h>
20#include <linux/tcp.h>
21#include <linux/bug.h>
22#include <linux/slab.h>
23#include <linux/cache.h>
24#include <linux/percpu.h>
25#include <linux/skbuff.h>
26#include <linux/kref.h>
27#include <linux/ktime.h>
28#include <linux/indirect_call_wrapper.h>
29
30#include <net/inet_connection_sock.h>
31#include <net/inet_timewait_sock.h>
32#include <net/inet_hashtables.h>
33#include <net/checksum.h>
34#include <net/request_sock.h>
35#include <net/sock_reuseport.h>
36#include <net/sock.h>
37#include <net/snmp.h>
38#include <net/ip.h>
39#include <net/tcp_states.h>
40#include <net/inet_ecn.h>
41#include <net/dst.h>
42#include <net/mptcp.h>
43
44#include <linux/seq_file.h>
45#include <linux/memcontrol.h>
46#include <linux/bpf-cgroup.h>
47#include <linux/siphash.h>
48
49extern struct inet_hashinfo tcp_hashinfo;
50
51extern struct percpu_counter tcp_orphan_count;
52void tcp_time_wait(struct sock *sk, int state, int timeo);
53
54#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
55#define MAX_TCP_OPTION_SPACE 40
56#define TCP_MIN_SND_MSS 48
57#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
58
59/*
60 * Never offer a window over 32767 without using window scaling. Some
61 * poor stacks do signed 16bit maths!
62 */
63#define MAX_TCP_WINDOW 32767U
64
65/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
66#define TCP_MIN_MSS 88U
67
68/* The initial MTU to use for probing */
69#define TCP_BASE_MSS 1024
70
71/* probing interval, default to 10 minutes as per RFC4821 */
72#define TCP_PROBE_INTERVAL 600
73
74/* Specify interval when tcp mtu probing will stop */
75#define TCP_PROBE_THRESHOLD 8
76
77/* After receiving this amount of duplicate ACKs fast retransmit starts. */
78#define TCP_FASTRETRANS_THRESH 3
79
80/* Maximal number of ACKs sent quickly to accelerate slow-start. */
81#define TCP_MAX_QUICKACKS 16U
82
83/* Maximal number of window scale according to RFC1323 */
84#define TCP_MAX_WSCALE 14U
85
86/* urg_data states */
87#define TCP_URG_VALID 0x0100
88#define TCP_URG_NOTYET 0x0200
89#define TCP_URG_READ 0x0400
90
91#define TCP_RETR1 3 /*
92 * This is how many retries it does before it
93 * tries to figure out if the gateway is
94 * down. Minimal RFC value is 3; it corresponds
95 * to ~3sec-8min depending on RTO.
96 */
97
98#define TCP_RETR2 15 /*
99 * This should take at least
100 * 90 minutes to time out.
101 * RFC1122 says that the limit is 100 sec.
102 * 15 is ~13-30min depending on RTO.
103 */
104
105#define TCP_SYN_RETRIES 6 /* This is how many retries are done
106 * when active opening a connection.
107 * RFC1122 says the minimum retry MUST
108 * be at least 180secs. Nevertheless
109 * this value is corresponding to
110 * 63secs of retransmission with the
111 * current initial RTO.
112 */
113
114#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
115 * when passive opening a connection.
116 * This is corresponding to 31secs of
117 * retransmission with the current
118 * initial RTO.
119 */
120
121#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
122 * state, about 60 seconds */
123#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
124 /* BSD style FIN_WAIT2 deadlock breaker.
125 * It used to be 3min, new value is 60sec,
126 * to combine FIN-WAIT-2 timeout with
127 * TIME-WAIT timer.
128 */
129#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
130
131#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
132#if HZ >= 100
133#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
134#define TCP_ATO_MIN ((unsigned)(HZ/25))
135#else
136#define TCP_DELACK_MIN 4U
137#define TCP_ATO_MIN 4U
138#endif
139#define TCP_RTO_MAX ((unsigned)(120*HZ))
140#define TCP_RTO_MIN ((unsigned)(HZ/5))
141#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
142#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
143#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
144 * used as a fallback RTO for the
145 * initial data transmission if no
146 * valid RTT sample has been acquired,
147 * most likely due to retrans in 3WHS.
148 */
149
150#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
151 * for local resources.
152 */
153#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
154#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
155#define TCP_KEEPALIVE_INTVL (75*HZ)
156
157#define MAX_TCP_KEEPIDLE 32767
158#define MAX_TCP_KEEPINTVL 32767
159#define MAX_TCP_KEEPCNT 127
160#define MAX_TCP_SYNCNT 127
161
162#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
163
164#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
165#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
166 * after this time. It should be equal
167 * (or greater than) TCP_TIMEWAIT_LEN
168 * to provide reliability equal to one
169 * provided by timewait state.
170 */
171#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
172 * timestamps. It must be less than
173 * minimal timewait lifetime.
174 */
175/*
176 * TCP option
177 */
178
179#define TCPOPT_NOP 1 /* Padding */
180#define TCPOPT_EOL 0 /* End of options */
181#define TCPOPT_MSS 2 /* Segment size negotiating */
182#define TCPOPT_WINDOW 3 /* Window scaling */
183#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
184#define TCPOPT_SACK 5 /* SACK Block */
185#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
186#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
187#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
188#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
189#define TCPOPT_EXP 254 /* Experimental */
190/* Magic number to be after the option value for sharing TCP
191 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
192 */
193#define TCPOPT_FASTOPEN_MAGIC 0xF989
194#define TCPOPT_SMC_MAGIC 0xE2D4C3D9
195
196/*
197 * TCP option lengths
198 */
199
200#define TCPOLEN_MSS 4
201#define TCPOLEN_WINDOW 3
202#define TCPOLEN_SACK_PERM 2
203#define TCPOLEN_TIMESTAMP 10
204#define TCPOLEN_MD5SIG 18
205#define TCPOLEN_FASTOPEN_BASE 2
206#define TCPOLEN_EXP_FASTOPEN_BASE 4
207#define TCPOLEN_EXP_SMC_BASE 6
208
209/* But this is what stacks really send out. */
210#define TCPOLEN_TSTAMP_ALIGNED 12
211#define TCPOLEN_WSCALE_ALIGNED 4
212#define TCPOLEN_SACKPERM_ALIGNED 4
213#define TCPOLEN_SACK_BASE 2
214#define TCPOLEN_SACK_BASE_ALIGNED 4
215#define TCPOLEN_SACK_PERBLOCK 8
216#define TCPOLEN_MD5SIG_ALIGNED 20
217#define TCPOLEN_MSS_ALIGNED 4
218#define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
219
220/* Flags in tp->nonagle */
221#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
222#define TCP_NAGLE_CORK 2 /* Socket is corked */
223#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
224
225/* TCP thin-stream limits */
226#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
227
228/* TCP initial congestion window as per rfc6928 */
229#define TCP_INIT_CWND 10
230
231/* Bit Flags for sysctl_tcp_fastopen */
232#define TFO_CLIENT_ENABLE 1
233#define TFO_SERVER_ENABLE 2
234#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
235
236/* Accept SYN data w/o any cookie option */
237#define TFO_SERVER_COOKIE_NOT_REQD 0x200
238
239/* Force enable TFO on all listeners, i.e., not requiring the
240 * TCP_FASTOPEN socket option.
241 */
242#define TFO_SERVER_WO_SOCKOPT1 0x400
243
244
245/* sysctl variables for tcp */
246extern int sysctl_tcp_max_orphans;
247extern long sysctl_tcp_mem[3];
248
249#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
250#define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
251#define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
252
253extern atomic_long_t tcp_memory_allocated;
254extern struct percpu_counter tcp_sockets_allocated;
255extern unsigned long tcp_memory_pressure;
256
257/* optimized version of sk_under_memory_pressure() for TCP sockets */
258static inline bool tcp_under_memory_pressure(const struct sock *sk)
259{
260 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
261 mem_cgroup_under_socket_pressure(sk->sk_memcg))
262 return true;
263
264 return READ_ONCE(tcp_memory_pressure);
265}
266/*
267 * The next routines deal with comparing 32 bit unsigned ints
268 * and worry about wraparound (automatic with unsigned arithmetic).
269 */
270
271static inline bool before(__u32 seq1, __u32 seq2)
272{
273 return (__s32)(seq1-seq2) < 0;
274}
275#define after(seq2, seq1) before(seq1, seq2)
276
277/* is s2<=s1<=s3 ? */
278static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
279{
280 return seq3 - seq2 >= seq1 - seq2;
281}
282
283static inline bool tcp_out_of_memory(struct sock *sk)
284{
285 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
286 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
287 return true;
288 return false;
289}
290
291void sk_forced_mem_schedule(struct sock *sk, int size);
292
293static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
294{
295 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
296 int orphans = percpu_counter_read_positive(ocp);
297
298 if (orphans << shift > sysctl_tcp_max_orphans) {
299 orphans = percpu_counter_sum_positive(ocp);
300 if (orphans << shift > sysctl_tcp_max_orphans)
301 return true;
302 }
303 return false;
304}
305
306bool tcp_check_oom(struct sock *sk, int shift);
307
308
309extern struct proto tcp_prot;
310
311#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
312#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
313#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
314#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
315
316void tcp_tasklet_init(void);
317
318int tcp_v4_err(struct sk_buff *skb, u32);
319
320void tcp_shutdown(struct sock *sk, int how);
321
322int tcp_v4_early_demux(struct sk_buff *skb);
323int tcp_v4_rcv(struct sk_buff *skb);
324
325int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
326int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
327int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
328int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
329 int flags);
330int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
331 size_t size, int flags);
332ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
333 size_t size, int flags);
334int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
335void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
336 int size_goal);
337void tcp_release_cb(struct sock *sk);
338void tcp_wfree(struct sk_buff *skb);
339void tcp_write_timer_handler(struct sock *sk);
340void tcp_delack_timer_handler(struct sock *sk);
341int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
342int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
343void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
344void tcp_rcv_space_adjust(struct sock *sk);
345int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
346void tcp_twsk_destructor(struct sock *sk);
347ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
348 struct pipe_inode_info *pipe, size_t len,
349 unsigned int flags);
350
351void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
352static inline void tcp_dec_quickack_mode(struct sock *sk,
353 const unsigned int pkts)
354{
355 struct inet_connection_sock *icsk = inet_csk(sk);
356
357 if (icsk->icsk_ack.quick) {
358 if (pkts >= icsk->icsk_ack.quick) {
359 icsk->icsk_ack.quick = 0;
360 /* Leaving quickack mode we deflate ATO. */
361 icsk->icsk_ack.ato = TCP_ATO_MIN;
362 } else
363 icsk->icsk_ack.quick -= pkts;
364 }
365}
366
367#define TCP_ECN_OK 1
368#define TCP_ECN_QUEUE_CWR 2
369#define TCP_ECN_DEMAND_CWR 4
370#define TCP_ECN_SEEN 8
371
372enum tcp_tw_status {
373 TCP_TW_SUCCESS = 0,
374 TCP_TW_RST = 1,
375 TCP_TW_ACK = 2,
376 TCP_TW_SYN = 3
377};
378
379
380enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
381 struct sk_buff *skb,
382 const struct tcphdr *th);
383struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
384 struct request_sock *req, bool fastopen,
385 bool *lost_race);
386int tcp_child_process(struct sock *parent, struct sock *child,
387 struct sk_buff *skb);
388void tcp_enter_loss(struct sock *sk);
389void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
390void tcp_clear_retrans(struct tcp_sock *tp);
391void tcp_update_metrics(struct sock *sk);
392void tcp_init_metrics(struct sock *sk);
393void tcp_metrics_init(void);
394bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
395void tcp_close(struct sock *sk, long timeout);
396void tcp_init_sock(struct sock *sk);
397void tcp_init_transfer(struct sock *sk, int bpf_op);
398__poll_t tcp_poll(struct file *file, struct socket *sock,
399 struct poll_table_struct *wait);
400int tcp_getsockopt(struct sock *sk, int level, int optname,
401 char __user *optval, int __user *optlen);
402int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
403 unsigned int optlen);
404void tcp_set_keepalive(struct sock *sk, int val);
405void tcp_syn_ack_timeout(const struct request_sock *req);
406int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
407 int flags, int *addr_len);
408int tcp_set_rcvlowat(struct sock *sk, int val);
409void tcp_data_ready(struct sock *sk);
410#ifdef CONFIG_MMU
411int tcp_mmap(struct file *file, struct socket *sock,
412 struct vm_area_struct *vma);
413#endif
414void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
415 struct tcp_options_received *opt_rx,
416 int estab, struct tcp_fastopen_cookie *foc);
417const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
418
419/*
420 * BPF SKB-less helpers
421 */
422u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
423 struct tcphdr *th, u32 *cookie);
424u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
425 struct tcphdr *th, u32 *cookie);
426u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
427 const struct tcp_request_sock_ops *af_ops,
428 struct sock *sk, struct tcphdr *th);
429/*
430 * TCP v4 functions exported for the inet6 API
431 */
432
433void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
434void tcp_v4_mtu_reduced(struct sock *sk);
435void tcp_req_err(struct sock *sk, u32 seq, bool abort);
436void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
437int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
438struct sock *tcp_create_openreq_child(const struct sock *sk,
439 struct request_sock *req,
440 struct sk_buff *skb);
441void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
442struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
443 struct request_sock *req,
444 struct dst_entry *dst,
445 struct request_sock *req_unhash,
446 bool *own_req);
447int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
448int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
449int tcp_connect(struct sock *sk);
450enum tcp_synack_type {
451 TCP_SYNACK_NORMAL,
452 TCP_SYNACK_FASTOPEN,
453 TCP_SYNACK_COOKIE,
454};
455struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
456 struct request_sock *req,
457 struct tcp_fastopen_cookie *foc,
458 enum tcp_synack_type synack_type);
459int tcp_disconnect(struct sock *sk, int flags);
460
461void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
462int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
463void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
464
465/* From syncookies.c */
466struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
467 struct request_sock *req,
468 struct dst_entry *dst, u32 tsoff);
469int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
470 u32 cookie);
471struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
472struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
473 struct sock *sk, struct sk_buff *skb);
474#ifdef CONFIG_SYN_COOKIES
475
476/* Syncookies use a monotonic timer which increments every 60 seconds.
477 * This counter is used both as a hash input and partially encoded into
478 * the cookie value. A cookie is only validated further if the delta
479 * between the current counter value and the encoded one is less than this,
480 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
481 * the counter advances immediately after a cookie is generated).
482 */
483#define MAX_SYNCOOKIE_AGE 2
484#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
485#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
486
487/* syncookies: remember time of last synqueue overflow
488 * But do not dirty this field too often (once per second is enough)
489 * It is racy as we do not hold a lock, but race is very minor.
490 */
491static inline void tcp_synq_overflow(const struct sock *sk)
492{
493 unsigned int last_overflow;
494 unsigned int now = jiffies;
495
496 if (sk->sk_reuseport) {
497 struct sock_reuseport *reuse;
498
499 reuse = rcu_dereference(sk->sk_reuseport_cb);
500 if (likely(reuse)) {
501 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
502 if (!time_between32(now, last_overflow,
503 last_overflow + HZ))
504 WRITE_ONCE(reuse->synq_overflow_ts, now);
505 return;
506 }
507 }
508
509 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
510 if (!time_between32(now, last_overflow, last_overflow + HZ))
511 WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
512}
513
514/* syncookies: no recent synqueue overflow on this listening socket? */
515static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
516{
517 unsigned int last_overflow;
518 unsigned int now = jiffies;
519
520 if (sk->sk_reuseport) {
521 struct sock_reuseport *reuse;
522
523 reuse = rcu_dereference(sk->sk_reuseport_cb);
524 if (likely(reuse)) {
525 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
526 return !time_between32(now, last_overflow - HZ,
527 last_overflow +
528 TCP_SYNCOOKIE_VALID);
529 }
530 }
531
532 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
533
534 /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
535 * then we're under synflood. However, we have to use
536 * 'last_overflow - HZ' as lower bound. That's because a concurrent
537 * tcp_synq_overflow() could update .ts_recent_stamp after we read
538 * jiffies but before we store .ts_recent_stamp into last_overflow,
539 * which could lead to rejecting a valid syncookie.
540 */
541 return !time_between32(now, last_overflow - HZ,
542 last_overflow + TCP_SYNCOOKIE_VALID);
543}
544
545static inline u32 tcp_cookie_time(void)
546{
547 u64 val = get_jiffies_64();
548
549 do_div(val, TCP_SYNCOOKIE_PERIOD);
550 return val;
551}
552
553u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
554 u16 *mssp);
555__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
556u64 cookie_init_timestamp(struct request_sock *req, u64 now);
557bool cookie_timestamp_decode(const struct net *net,
558 struct tcp_options_received *opt);
559bool cookie_ecn_ok(const struct tcp_options_received *opt,
560 const struct net *net, const struct dst_entry *dst);
561
562/* From net/ipv6/syncookies.c */
563int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
564 u32 cookie);
565struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
566
567u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
568 const struct tcphdr *th, u16 *mssp);
569__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
570#endif
571/* tcp_output.c */
572
573void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
574 int nonagle);
575int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
576int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
577void tcp_retransmit_timer(struct sock *sk);
578void tcp_xmit_retransmit_queue(struct sock *);
579void tcp_simple_retransmit(struct sock *);
580void tcp_enter_recovery(struct sock *sk, bool ece_ack);
581int tcp_trim_head(struct sock *, struct sk_buff *, u32);
582enum tcp_queue {
583 TCP_FRAG_IN_WRITE_QUEUE,
584 TCP_FRAG_IN_RTX_QUEUE,
585};
586int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
587 struct sk_buff *skb, u32 len,
588 unsigned int mss_now, gfp_t gfp);
589
590void tcp_send_probe0(struct sock *);
591void tcp_send_partial(struct sock *);
592int tcp_write_wakeup(struct sock *, int mib);
593void tcp_send_fin(struct sock *sk);
594void tcp_send_active_reset(struct sock *sk, gfp_t priority);
595int tcp_send_synack(struct sock *);
596void tcp_push_one(struct sock *, unsigned int mss_now);
597void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
598void tcp_send_ack(struct sock *sk);
599void tcp_send_delayed_ack(struct sock *sk);
600void tcp_send_loss_probe(struct sock *sk);
601bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
602void tcp_skb_collapse_tstamp(struct sk_buff *skb,
603 const struct sk_buff *next_skb);
604
605/* tcp_input.c */
606void tcp_rearm_rto(struct sock *sk);
607void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
608void tcp_reset(struct sock *sk);
609void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
610void tcp_fin(struct sock *sk);
611
612/* tcp_timer.c */
613void tcp_init_xmit_timers(struct sock *);
614static inline void tcp_clear_xmit_timers(struct sock *sk)
615{
616 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
617 __sock_put(sk);
618
619 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
620 __sock_put(sk);
621
622 inet_csk_clear_xmit_timers(sk);
623}
624
625unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
626unsigned int tcp_current_mss(struct sock *sk);
627
628/* Bound MSS / TSO packet size with the half of the window */
629static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
630{
631 int cutoff;
632
633 /* When peer uses tiny windows, there is no use in packetizing
634 * to sub-MSS pieces for the sake of SWS or making sure there
635 * are enough packets in the pipe for fast recovery.
636 *
637 * On the other hand, for extremely large MSS devices, handling
638 * smaller than MSS windows in this way does make sense.
639 */
640 if (tp->max_window > TCP_MSS_DEFAULT)
641 cutoff = (tp->max_window >> 1);
642 else
643 cutoff = tp->max_window;
644
645 if (cutoff && pktsize > cutoff)
646 return max_t(int, cutoff, 68U - tp->tcp_header_len);
647 else
648 return pktsize;
649}
650
651/* tcp.c */
652void tcp_get_info(struct sock *, struct tcp_info *);
653
654/* Read 'sendfile()'-style from a TCP socket */
655int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
656 sk_read_actor_t recv_actor);
657
658void tcp_initialize_rcv_mss(struct sock *sk);
659
660int tcp_mtu_to_mss(struct sock *sk, int pmtu);
661int tcp_mss_to_mtu(struct sock *sk, int mss);
662void tcp_mtup_init(struct sock *sk);
663
664static inline void tcp_bound_rto(const struct sock *sk)
665{
666 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
667 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
668}
669
670static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
671{
672 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
673}
674
675static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
676{
677 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
678 ntohl(TCP_FLAG_ACK) |
679 snd_wnd);
680}
681
682static inline void tcp_fast_path_on(struct tcp_sock *tp)
683{
684 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
685}
686
687static inline void tcp_fast_path_check(struct sock *sk)
688{
689 struct tcp_sock *tp = tcp_sk(sk);
690
691 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
692 tp->rcv_wnd &&
693 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
694 !tp->urg_data)
695 tcp_fast_path_on(tp);
696}
697
698/* Compute the actual rto_min value */
699static inline u32 tcp_rto_min(struct sock *sk)
700{
701 const struct dst_entry *dst = __sk_dst_get(sk);
702 u32 rto_min = TCP_RTO_MIN;
703
704 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
705 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
706 return rto_min;
707}
708
709static inline u32 tcp_rto_min_us(struct sock *sk)
710{
711 return jiffies_to_usecs(tcp_rto_min(sk));
712}
713
714static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
715{
716 return dst_metric_locked(dst, RTAX_CC_ALGO);
717}
718
719/* Minimum RTT in usec. ~0 means not available. */
720static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
721{
722 return minmax_get(&tp->rtt_min);
723}
724
725/* Compute the actual receive window we are currently advertising.
726 * Rcv_nxt can be after the window if our peer push more data
727 * than the offered window.
728 */
729static inline u32 tcp_receive_window(const struct tcp_sock *tp)
730{
731 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
732
733 if (win < 0)
734 win = 0;
735 return (u32) win;
736}
737
738/* Choose a new window, without checks for shrinking, and without
739 * scaling applied to the result. The caller does these things
740 * if necessary. This is a "raw" window selection.
741 */
742u32 __tcp_select_window(struct sock *sk);
743
744void tcp_send_window_probe(struct sock *sk);
745
746/* TCP uses 32bit jiffies to save some space.
747 * Note that this is different from tcp_time_stamp, which
748 * historically has been the same until linux-4.13.
749 */
750#define tcp_jiffies32 ((u32)jiffies)
751
752/*
753 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
754 * It is no longer tied to jiffies, but to 1 ms clock.
755 * Note: double check if you want to use tcp_jiffies32 instead of this.
756 */
757#define TCP_TS_HZ 1000
758
759static inline u64 tcp_clock_ns(void)
760{
761 return ktime_get_ns();
762}
763
764static inline u64 tcp_clock_us(void)
765{
766 return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
767}
768
769/* This should only be used in contexts where tp->tcp_mstamp is up to date */
770static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
771{
772 return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
773}
774
775/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
776static inline u32 tcp_ns_to_ts(u64 ns)
777{
778 return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
779}
780
781/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
782static inline u32 tcp_time_stamp_raw(void)
783{
784 return tcp_ns_to_ts(tcp_clock_ns());
785}
786
787void tcp_mstamp_refresh(struct tcp_sock *tp);
788
789static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
790{
791 return max_t(s64, t1 - t0, 0);
792}
793
794static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
795{
796 return tcp_ns_to_ts(skb->skb_mstamp_ns);
797}
798
799/* provide the departure time in us unit */
800static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
801{
802 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
803}
804
805
806#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
807
808#define TCPHDR_FIN 0x01
809#define TCPHDR_SYN 0x02
810#define TCPHDR_RST 0x04
811#define TCPHDR_PSH 0x08
812#define TCPHDR_ACK 0x10
813#define TCPHDR_URG 0x20
814#define TCPHDR_ECE 0x40
815#define TCPHDR_CWR 0x80
816
817#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
818
819/* This is what the send packet queuing engine uses to pass
820 * TCP per-packet control information to the transmission code.
821 * We also store the host-order sequence numbers in here too.
822 * This is 44 bytes if IPV6 is enabled.
823 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
824 */
825struct tcp_skb_cb {
826 __u32 seq; /* Starting sequence number */
827 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
828 union {
829 /* Note : tcp_tw_isn is used in input path only
830 * (isn chosen by tcp_timewait_state_process())
831 *
832 * tcp_gso_segs/size are used in write queue only,
833 * cf tcp_skb_pcount()/tcp_skb_mss()
834 */
835 __u32 tcp_tw_isn;
836 struct {
837 u16 tcp_gso_segs;
838 u16 tcp_gso_size;
839 };
840 };
841 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
842
843 __u8 sacked; /* State flags for SACK. */
844#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
845#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
846#define TCPCB_LOST 0x04 /* SKB is lost */
847#define TCPCB_TAGBITS 0x07 /* All tag bits */
848#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */
849#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
850#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
851 TCPCB_REPAIRED)
852
853 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
854 __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
855 eor:1, /* Is skb MSG_EOR marked? */
856 has_rxtstamp:1, /* SKB has a RX timestamp */
857 unused:5;
858 __u32 ack_seq; /* Sequence number ACK'd */
859 union {
860 struct {
861 /* There is space for up to 24 bytes */
862 __u32 in_flight:30,/* Bytes in flight at transmit */
863 is_app_limited:1, /* cwnd not fully used? */
864 unused:1;
865 /* pkts S/ACKed so far upon tx of skb, incl retrans: */
866 __u32 delivered;
867 /* start of send pipeline phase */
868 u64 first_tx_mstamp;
869 /* when we reached the "delivered" count */
870 u64 delivered_mstamp;
871 } tx; /* only used for outgoing skbs */
872 union {
873 struct inet_skb_parm h4;
874#if IS_ENABLED(CONFIG_IPV6)
875 struct inet6_skb_parm h6;
876#endif
877 } header; /* For incoming skbs */
878 struct {
879 __u32 flags;
880 struct sock *sk_redir;
881 void *data_end;
882 } bpf;
883 };
884};
885
886#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
887
888static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
889{
890 TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
891}
892
893static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
894{
895 return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
896}
897
898static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
899{
900 return TCP_SKB_CB(skb)->bpf.sk_redir;
901}
902
903static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
904{
905 TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
906}
907
908extern const struct inet_connection_sock_af_ops ipv4_specific;
909
910#if IS_ENABLED(CONFIG_IPV6)
911/* This is the variant of inet6_iif() that must be used by TCP,
912 * as TCP moves IP6CB into a different location in skb->cb[]
913 */
914static inline int tcp_v6_iif(const struct sk_buff *skb)
915{
916 return TCP_SKB_CB(skb)->header.h6.iif;
917}
918
919static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
920{
921 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
922
923 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
924}
925
926/* TCP_SKB_CB reference means this can not be used from early demux */
927static inline int tcp_v6_sdif(const struct sk_buff *skb)
928{
929#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
930 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
931 return TCP_SKB_CB(skb)->header.h6.iif;
932#endif
933 return 0;
934}
935
936extern const struct inet_connection_sock_af_ops ipv6_specific;
937
938INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
939INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
940INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb));
941
942#endif
943
944static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
945{
946#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
947 if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
948 skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
949 return true;
950#endif
951 return false;
952}
953
954/* TCP_SKB_CB reference means this can not be used from early demux */
955static inline int tcp_v4_sdif(struct sk_buff *skb)
956{
957#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
958 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
959 return TCP_SKB_CB(skb)->header.h4.iif;
960#endif
961 return 0;
962}
963
964/* Due to TSO, an SKB can be composed of multiple actual
965 * packets. To keep these tracked properly, we use this.
966 */
967static inline int tcp_skb_pcount(const struct sk_buff *skb)
968{
969 return TCP_SKB_CB(skb)->tcp_gso_segs;
970}
971
972static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
973{
974 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
975}
976
977static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
978{
979 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
980}
981
982/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
983static inline int tcp_skb_mss(const struct sk_buff *skb)
984{
985 return TCP_SKB_CB(skb)->tcp_gso_size;
986}
987
988static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
989{
990 return likely(!TCP_SKB_CB(skb)->eor);
991}
992
993static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
994 const struct sk_buff *from)
995{
996 return likely(tcp_skb_can_collapse_to(to) &&
997 mptcp_skb_can_collapse(to, from));
998}
999
1000/* Events passed to congestion control interface */
1001enum tcp_ca_event {
1002 CA_EVENT_TX_START, /* first transmit when no packets in flight */
1003 CA_EVENT_CWND_RESTART, /* congestion window restart */
1004 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
1005 CA_EVENT_LOSS, /* loss timeout */
1006 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
1007 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
1008};
1009
1010/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1011enum tcp_ca_ack_event_flags {
1012 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
1013 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
1014 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
1015};
1016
1017/*
1018 * Interface for adding new TCP congestion control handlers
1019 */
1020#define TCP_CA_NAME_MAX 16
1021#define TCP_CA_MAX 128
1022#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
1023
1024#define TCP_CA_UNSPEC 0
1025
1026/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1027#define TCP_CONG_NON_RESTRICTED 0x1
1028/* Requires ECN/ECT set on all packets */
1029#define TCP_CONG_NEEDS_ECN 0x2
1030#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1031
1032union tcp_cc_info;
1033
1034struct ack_sample {
1035 u32 pkts_acked;
1036 s32 rtt_us;
1037 u32 in_flight;
1038};
1039
1040/* A rate sample measures the number of (original/retransmitted) data
1041 * packets delivered "delivered" over an interval of time "interval_us".
1042 * The tcp_rate.c code fills in the rate sample, and congestion
1043 * control modules that define a cong_control function to run at the end
1044 * of ACK processing can optionally chose to consult this sample when
1045 * setting cwnd and pacing rate.
1046 * A sample is invalid if "delivered" or "interval_us" is negative.
1047 */
1048struct rate_sample {
1049 u64 prior_mstamp; /* starting timestamp for interval */
1050 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
1051 s32 delivered; /* number of packets delivered over interval */
1052 long interval_us; /* time for tp->delivered to incr "delivered" */
1053 u32 snd_interval_us; /* snd interval for delivered packets */
1054 u32 rcv_interval_us; /* rcv interval for delivered packets */
1055 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
1056 int losses; /* number of packets marked lost upon ACK */
1057 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
1058 u32 prior_in_flight; /* in flight before this ACK */
1059 bool is_app_limited; /* is sample from packet with bubble in pipe? */
1060 bool is_retrans; /* is sample from retransmission? */
1061 bool is_ack_delayed; /* is this (likely) a delayed ACK? */
1062};
1063
1064struct tcp_congestion_ops {
1065 struct list_head list;
1066 u32 key;
1067 u32 flags;
1068
1069 /* initialize private data (optional) */
1070 void (*init)(struct sock *sk);
1071 /* cleanup private data (optional) */
1072 void (*release)(struct sock *sk);
1073
1074 /* return slow start threshold (required) */
1075 u32 (*ssthresh)(struct sock *sk);
1076 /* do new cwnd calculation (required) */
1077 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1078 /* call before changing ca_state (optional) */
1079 void (*set_state)(struct sock *sk, u8 new_state);
1080 /* call when cwnd event occurs (optional) */
1081 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1082 /* call when ack arrives (optional) */
1083 void (*in_ack_event)(struct sock *sk, u32 flags);
1084 /* new value of cwnd after loss (required) */
1085 u32 (*undo_cwnd)(struct sock *sk);
1086 /* hook for packet ack accounting (optional) */
1087 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1088 /* override sysctl_tcp_min_tso_segs */
1089 u32 (*min_tso_segs)(struct sock *sk);
1090 /* returns the multiplier used in tcp_sndbuf_expand (optional) */
1091 u32 (*sndbuf_expand)(struct sock *sk);
1092 /* call when packets are delivered to update cwnd and pacing rate,
1093 * after all the ca_state processing. (optional)
1094 */
1095 void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1096 /* get info for inet_diag (optional) */
1097 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1098 union tcp_cc_info *info);
1099
1100 char name[TCP_CA_NAME_MAX];
1101 struct module *owner;
1102};
1103
1104int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1105void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1106
1107void tcp_assign_congestion_control(struct sock *sk);
1108void tcp_init_congestion_control(struct sock *sk);
1109void tcp_cleanup_congestion_control(struct sock *sk);
1110int tcp_set_default_congestion_control(struct net *net, const char *name);
1111void tcp_get_default_congestion_control(struct net *net, char *name);
1112void tcp_get_available_congestion_control(char *buf, size_t len);
1113void tcp_get_allowed_congestion_control(char *buf, size_t len);
1114int tcp_set_allowed_congestion_control(char *allowed);
1115int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1116 bool reinit, bool cap_net_admin);
1117u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1118void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1119
1120u32 tcp_reno_ssthresh(struct sock *sk);
1121u32 tcp_reno_undo_cwnd(struct sock *sk);
1122void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1123extern struct tcp_congestion_ops tcp_reno;
1124
1125struct tcp_congestion_ops *tcp_ca_find(const char *name);
1126struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1127u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1128#ifdef CONFIG_INET
1129char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1130#else
1131static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1132{
1133 return NULL;
1134}
1135#endif
1136
1137static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1138{
1139 const struct inet_connection_sock *icsk = inet_csk(sk);
1140
1141 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1142}
1143
1144static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
1145{
1146 struct inet_connection_sock *icsk = inet_csk(sk);
1147
1148 if (icsk->icsk_ca_ops->set_state)
1149 icsk->icsk_ca_ops->set_state(sk, ca_state);
1150 icsk->icsk_ca_state = ca_state;
1151}
1152
1153static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1154{
1155 const struct inet_connection_sock *icsk = inet_csk(sk);
1156
1157 if (icsk->icsk_ca_ops->cwnd_event)
1158 icsk->icsk_ca_ops->cwnd_event(sk, event);
1159}
1160
1161/* From tcp_rate.c */
1162void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1163void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1164 struct rate_sample *rs);
1165void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1166 bool is_sack_reneg, struct rate_sample *rs);
1167void tcp_rate_check_app_limited(struct sock *sk);
1168
1169/* These functions determine how the current flow behaves in respect of SACK
1170 * handling. SACK is negotiated with the peer, and therefore it can vary
1171 * between different flows.
1172 *
1173 * tcp_is_sack - SACK enabled
1174 * tcp_is_reno - No SACK
1175 */
1176static inline int tcp_is_sack(const struct tcp_sock *tp)
1177{
1178 return likely(tp->rx_opt.sack_ok);
1179}
1180
1181static inline bool tcp_is_reno(const struct tcp_sock *tp)
1182{
1183 return !tcp_is_sack(tp);
1184}
1185
1186static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1187{
1188 return tp->sacked_out + tp->lost_out;
1189}
1190
1191/* This determines how many packets are "in the network" to the best
1192 * of our knowledge. In many cases it is conservative, but where
1193 * detailed information is available from the receiver (via SACK
1194 * blocks etc.) we can make more aggressive calculations.
1195 *
1196 * Use this for decisions involving congestion control, use just
1197 * tp->packets_out to determine if the send queue is empty or not.
1198 *
1199 * Read this equation as:
1200 *
1201 * "Packets sent once on transmission queue" MINUS
1202 * "Packets left network, but not honestly ACKed yet" PLUS
1203 * "Packets fast retransmitted"
1204 */
1205static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1206{
1207 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1208}
1209
1210#define TCP_INFINITE_SSTHRESH 0x7fffffff
1211
1212static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1213{
1214 return tp->snd_cwnd < tp->snd_ssthresh;
1215}
1216
1217static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1218{
1219 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1220}
1221
1222static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1223{
1224 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1225 (1 << inet_csk(sk)->icsk_ca_state);
1226}
1227
1228/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1229 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1230 * ssthresh.
1231 */
1232static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1233{
1234 const struct tcp_sock *tp = tcp_sk(sk);
1235
1236 if (tcp_in_cwnd_reduction(sk))
1237 return tp->snd_ssthresh;
1238 else
1239 return max(tp->snd_ssthresh,
1240 ((tp->snd_cwnd >> 1) +
1241 (tp->snd_cwnd >> 2)));
1242}
1243
1244/* Use define here intentionally to get WARN_ON location shown at the caller */
1245#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1246
1247void tcp_enter_cwr(struct sock *sk);
1248__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1249
1250/* The maximum number of MSS of available cwnd for which TSO defers
1251 * sending if not using sysctl_tcp_tso_win_divisor.
1252 */
1253static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1254{
1255 return 3;
1256}
1257
1258/* Returns end sequence number of the receiver's advertised window */
1259static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1260{
1261 return tp->snd_una + tp->snd_wnd;
1262}
1263
1264/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1265 * flexible approach. The RFC suggests cwnd should not be raised unless
1266 * it was fully used previously. And that's exactly what we do in
1267 * congestion avoidance mode. But in slow start we allow cwnd to grow
1268 * as long as the application has used half the cwnd.
1269 * Example :
1270 * cwnd is 10 (IW10), but application sends 9 frames.
1271 * We allow cwnd to reach 18 when all frames are ACKed.
1272 * This check is safe because it's as aggressive as slow start which already
1273 * risks 100% overshoot. The advantage is that we discourage application to
1274 * either send more filler packets or data to artificially blow up the cwnd
1275 * usage, and allow application-limited process to probe bw more aggressively.
1276 */
1277static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1278{
1279 const struct tcp_sock *tp = tcp_sk(sk);
1280
1281 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
1282 if (tcp_in_slow_start(tp))
1283 return tp->snd_cwnd < 2 * tp->max_packets_out;
1284
1285 return tp->is_cwnd_limited;
1286}
1287
1288/* BBR congestion control needs pacing.
1289 * Same remark for SO_MAX_PACING_RATE.
1290 * sch_fq packet scheduler is efficiently handling pacing,
1291 * but is not always installed/used.
1292 * Return true if TCP stack should pace packets itself.
1293 */
1294static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1295{
1296 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1297}
1298
1299/* Estimates in how many jiffies next packet for this flow can be sent.
1300 * Scheduling a retransmit timer too early would be silly.
1301 */
1302static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1303{
1304 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1305
1306 return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1307}
1308
1309static inline void tcp_reset_xmit_timer(struct sock *sk,
1310 const int what,
1311 unsigned long when,
1312 const unsigned long max_when)
1313{
1314 inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1315 max_when);
1316}
1317
1318/* Something is really bad, we could not queue an additional packet,
1319 * because qdisc is full or receiver sent a 0 window, or we are paced.
1320 * We do not want to add fuel to the fire, or abort too early,
1321 * so make sure the timer we arm now is at least 200ms in the future,
1322 * regardless of current icsk_rto value (as it could be ~2ms)
1323 */
1324static inline unsigned long tcp_probe0_base(const struct sock *sk)
1325{
1326 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1327}
1328
1329/* Variant of inet_csk_rto_backoff() used for zero window probes */
1330static inline unsigned long tcp_probe0_when(const struct sock *sk,
1331 unsigned long max_when)
1332{
1333 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1334
1335 return (unsigned long)min_t(u64, when, max_when);
1336}
1337
1338static inline void tcp_check_probe_timer(struct sock *sk)
1339{
1340 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1341 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1342 tcp_probe0_base(sk), TCP_RTO_MAX);
1343}
1344
1345static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1346{
1347 tp->snd_wl1 = seq;
1348}
1349
1350static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1351{
1352 tp->snd_wl1 = seq;
1353}
1354
1355/*
1356 * Calculate(/check) TCP checksum
1357 */
1358static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1359 __be32 daddr, __wsum base)
1360{
1361 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1362}
1363
1364static inline bool tcp_checksum_complete(struct sk_buff *skb)
1365{
1366 return !skb_csum_unnecessary(skb) &&
1367 __skb_checksum_complete(skb);
1368}
1369
1370bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1371int tcp_filter(struct sock *sk, struct sk_buff *skb);
1372void tcp_set_state(struct sock *sk, int state);
1373void tcp_done(struct sock *sk);
1374int tcp_abort(struct sock *sk, int err);
1375
1376static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1377{
1378 rx_opt->dsack = 0;
1379 rx_opt->num_sacks = 0;
1380}
1381
1382void tcp_cwnd_restart(struct sock *sk, s32 delta);
1383
1384static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1385{
1386 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1387 struct tcp_sock *tp = tcp_sk(sk);
1388 s32 delta;
1389
1390 if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
1391 ca_ops->cong_control)
1392 return;
1393 delta = tcp_jiffies32 - tp->lsndtime;
1394 if (delta > inet_csk(sk)->icsk_rto)
1395 tcp_cwnd_restart(sk, delta);
1396}
1397
1398/* Determine a window scaling and initial window to offer. */
1399void tcp_select_initial_window(const struct sock *sk, int __space,
1400 __u32 mss, __u32 *rcv_wnd,
1401 __u32 *window_clamp, int wscale_ok,
1402 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1403
1404static inline int tcp_win_from_space(const struct sock *sk, int space)
1405{
1406 int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
1407
1408 return tcp_adv_win_scale <= 0 ?
1409 (space>>(-tcp_adv_win_scale)) :
1410 space - (space>>tcp_adv_win_scale);
1411}
1412
1413/* Note: caller must be prepared to deal with negative returns */
1414static inline int tcp_space(const struct sock *sk)
1415{
1416 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1417 READ_ONCE(sk->sk_backlog.len) -
1418 atomic_read(&sk->sk_rmem_alloc));
1419}
1420
1421static inline int tcp_full_space(const struct sock *sk)
1422{
1423 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1424}
1425
1426/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1427 * If 87.5 % (7/8) of the space has been consumed, we want to override
1428 * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1429 * len/truesize ratio.
1430 */
1431static inline bool tcp_rmem_pressure(const struct sock *sk)
1432{
1433 int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1434 int threshold = rcvbuf - (rcvbuf >> 3);
1435
1436 return atomic_read(&sk->sk_rmem_alloc) > threshold;
1437}
1438
1439extern void tcp_openreq_init_rwin(struct request_sock *req,
1440 const struct sock *sk_listener,
1441 const struct dst_entry *dst);
1442
1443void tcp_enter_memory_pressure(struct sock *sk);
1444void tcp_leave_memory_pressure(struct sock *sk);
1445
1446static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1447{
1448 struct net *net = sock_net((struct sock *)tp);
1449
1450 return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
1451}
1452
1453static inline int keepalive_time_when(const struct tcp_sock *tp)
1454{
1455 struct net *net = sock_net((struct sock *)tp);
1456
1457 return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
1458}
1459
1460static inline int keepalive_probes(const struct tcp_sock *tp)
1461{
1462 struct net *net = sock_net((struct sock *)tp);
1463
1464 return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1465}
1466
1467static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1468{
1469 const struct inet_connection_sock *icsk = &tp->inet_conn;
1470
1471 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1472 tcp_jiffies32 - tp->rcv_tstamp);
1473}
1474
1475static inline int tcp_fin_time(const struct sock *sk)
1476{
1477 int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1478 const int rto = inet_csk(sk)->icsk_rto;
1479
1480 if (fin_timeout < (rto << 2) - (rto >> 1))
1481 fin_timeout = (rto << 2) - (rto >> 1);
1482
1483 return fin_timeout;
1484}
1485
1486static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1487 int paws_win)
1488{
1489 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1490 return true;
1491 if (unlikely(!time_before32(ktime_get_seconds(),
1492 rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
1493 return true;
1494 /*
1495 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1496 * then following tcp messages have valid values. Ignore 0 value,
1497 * or else 'negative' tsval might forbid us to accept their packets.
1498 */
1499 if (!rx_opt->ts_recent)
1500 return true;
1501 return false;
1502}
1503
1504static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1505 int rst)
1506{
1507 if (tcp_paws_check(rx_opt, 0))
1508 return false;
1509
1510 /* RST segments are not recommended to carry timestamp,
1511 and, if they do, it is recommended to ignore PAWS because
1512 "their cleanup function should take precedence over timestamps."
1513 Certainly, it is mistake. It is necessary to understand the reasons
1514 of this constraint to relax it: if peer reboots, clock may go
1515 out-of-sync and half-open connections will not be reset.
1516 Actually, the problem would be not existing if all
1517 the implementations followed draft about maintaining clock
1518 via reboots. Linux-2.2 DOES NOT!
1519
1520 However, we can relax time bounds for RST segments to MSL.
1521 */
1522 if (rst && !time_before32(ktime_get_seconds(),
1523 rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1524 return false;
1525 return true;
1526}
1527
1528bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1529 int mib_idx, u32 *last_oow_ack_time);
1530
1531static inline void tcp_mib_init(struct net *net)
1532{
1533 /* See RFC 2012 */
1534 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1535 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1536 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1537 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1538}
1539
1540/* from STCP */
1541static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1542{
1543 tp->lost_skb_hint = NULL;
1544}
1545
1546static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1547{
1548 tcp_clear_retrans_hints_partial(tp);
1549 tp->retransmit_skb_hint = NULL;
1550}
1551
1552union tcp_md5_addr {
1553 struct in_addr a4;
1554#if IS_ENABLED(CONFIG_IPV6)
1555 struct in6_addr a6;
1556#endif
1557};
1558
1559/* - key database */
1560struct tcp_md5sig_key {
1561 struct hlist_node node;
1562 u8 keylen;
1563 u8 family; /* AF_INET or AF_INET6 */
1564 u8 prefixlen;
1565 union tcp_md5_addr addr;
1566 int l3index; /* set if key added with L3 scope */
1567 u8 key[TCP_MD5SIG_MAXKEYLEN];
1568 struct rcu_head rcu;
1569};
1570
1571/* - sock block */
1572struct tcp_md5sig_info {
1573 struct hlist_head head;
1574 struct rcu_head rcu;
1575};
1576
1577/* - pseudo header */
1578struct tcp4_pseudohdr {
1579 __be32 saddr;
1580 __be32 daddr;
1581 __u8 pad;
1582 __u8 protocol;
1583 __be16 len;
1584};
1585
1586struct tcp6_pseudohdr {
1587 struct in6_addr saddr;
1588 struct in6_addr daddr;
1589 __be32 len;
1590 __be32 protocol; /* including padding */
1591};
1592
1593union tcp_md5sum_block {
1594 struct tcp4_pseudohdr ip4;
1595#if IS_ENABLED(CONFIG_IPV6)
1596 struct tcp6_pseudohdr ip6;
1597#endif
1598};
1599
1600/* - pool: digest algorithm, hash description and scratch buffer */
1601struct tcp_md5sig_pool {
1602 struct ahash_request *md5_req;
1603 void *scratch;
1604};
1605
1606/* - functions */
1607int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1608 const struct sock *sk, const struct sk_buff *skb);
1609int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1610 int family, u8 prefixlen, int l3index,
1611 const u8 *newkey, u8 newkeylen, gfp_t gfp);
1612int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1613 int family, u8 prefixlen, int l3index);
1614struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1615 const struct sock *addr_sk);
1616
1617#ifdef CONFIG_TCP_MD5SIG
1618#include <linux/jump_label.h>
1619extern struct static_key_false tcp_md5_needed;
1620struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1621 const union tcp_md5_addr *addr,
1622 int family);
1623static inline struct tcp_md5sig_key *
1624tcp_md5_do_lookup(const struct sock *sk, int l3index,
1625 const union tcp_md5_addr *addr, int family)
1626{
1627 if (!static_branch_unlikely(&tcp_md5_needed))
1628 return NULL;
1629 return __tcp_md5_do_lookup(sk, l3index, addr, family);
1630}
1631
1632#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1633#else
1634static inline struct tcp_md5sig_key *
1635tcp_md5_do_lookup(const struct sock *sk, int l3index,
1636 const union tcp_md5_addr *addr, int family)
1637{
1638 return NULL;
1639}
1640#define tcp_twsk_md5_key(twsk) NULL
1641#endif
1642
1643bool tcp_alloc_md5sig_pool(void);
1644
1645struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1646static inline void tcp_put_md5sig_pool(void)
1647{
1648 local_bh_enable();
1649}
1650
1651int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1652 unsigned int header_len);
1653int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1654 const struct tcp_md5sig_key *key);
1655
1656/* From tcp_fastopen.c */
1657void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1658 struct tcp_fastopen_cookie *cookie);
1659void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1660 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1661 u16 try_exp);
1662struct tcp_fastopen_request {
1663 /* Fast Open cookie. Size 0 means a cookie request */
1664 struct tcp_fastopen_cookie cookie;
1665 struct msghdr *data; /* data in MSG_FASTOPEN */
1666 size_t size;
1667 int copied; /* queued in tcp_connect() */
1668 struct ubuf_info *uarg;
1669};
1670void tcp_free_fastopen_req(struct tcp_sock *tp);
1671void tcp_fastopen_destroy_cipher(struct sock *sk);
1672void tcp_fastopen_ctx_destroy(struct net *net);
1673int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1674 void *primary_key, void *backup_key);
1675int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1676 u64 *key);
1677void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1678struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1679 struct request_sock *req,
1680 struct tcp_fastopen_cookie *foc,
1681 const struct dst_entry *dst);
1682void tcp_fastopen_init_key_once(struct net *net);
1683bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1684 struct tcp_fastopen_cookie *cookie);
1685bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1686#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1687#define TCP_FASTOPEN_KEY_MAX 2
1688#define TCP_FASTOPEN_KEY_BUF_LENGTH \
1689 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1690
1691/* Fastopen key context */
1692struct tcp_fastopen_context {
1693 siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
1694 int num;
1695 struct rcu_head rcu;
1696};
1697
1698extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
1699void tcp_fastopen_active_disable(struct sock *sk);
1700bool tcp_fastopen_active_should_disable(struct sock *sk);
1701void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1702void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1703
1704/* Caller needs to wrap with rcu_read_(un)lock() */
1705static inline
1706struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1707{
1708 struct tcp_fastopen_context *ctx;
1709
1710 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1711 if (!ctx)
1712 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1713 return ctx;
1714}
1715
1716static inline
1717bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1718 const struct tcp_fastopen_cookie *orig)
1719{
1720 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1721 orig->len == foc->len &&
1722 !memcmp(orig->val, foc->val, foc->len))
1723 return true;
1724 return false;
1725}
1726
1727static inline
1728int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1729{
1730 return ctx->num;
1731}
1732
1733/* Latencies incurred by various limits for a sender. They are
1734 * chronograph-like stats that are mutually exclusive.
1735 */
1736enum tcp_chrono {
1737 TCP_CHRONO_UNSPEC,
1738 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1739 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1740 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1741 __TCP_CHRONO_MAX,
1742};
1743
1744void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1745void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1746
1747/* This helper is needed, because skb->tcp_tsorted_anchor uses
1748 * the same memory storage than skb->destructor/_skb_refdst
1749 */
1750static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1751{
1752 skb->destructor = NULL;
1753 skb->_skb_refdst = 0UL;
1754}
1755
1756#define tcp_skb_tsorted_save(skb) { \
1757 unsigned long _save = skb->_skb_refdst; \
1758 skb->_skb_refdst = 0UL;
1759
1760#define tcp_skb_tsorted_restore(skb) \
1761 skb->_skb_refdst = _save; \
1762}
1763
1764void tcp_write_queue_purge(struct sock *sk);
1765
1766static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1767{
1768 return skb_rb_first(&sk->tcp_rtx_queue);
1769}
1770
1771static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1772{
1773 return skb_rb_last(&sk->tcp_rtx_queue);
1774}
1775
1776static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1777{
1778 return skb_peek(&sk->sk_write_queue);
1779}
1780
1781static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1782{
1783 return skb_peek_tail(&sk->sk_write_queue);
1784}
1785
1786#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1787 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1788
1789static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1790{
1791 return skb_peek(&sk->sk_write_queue);
1792}
1793
1794static inline bool tcp_skb_is_last(const struct sock *sk,
1795 const struct sk_buff *skb)
1796{
1797 return skb_queue_is_last(&sk->sk_write_queue, skb);
1798}
1799
1800/**
1801 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
1802 * @sk: socket
1803 *
1804 * Since the write queue can have a temporary empty skb in it,
1805 * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
1806 */
1807static inline bool tcp_write_queue_empty(const struct sock *sk)
1808{
1809 const struct tcp_sock *tp = tcp_sk(sk);
1810
1811 return tp->write_seq == tp->snd_nxt;
1812}
1813
1814static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1815{
1816 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1817}
1818
1819static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1820{
1821 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1822}
1823
1824static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1825{
1826 __skb_queue_tail(&sk->sk_write_queue, skb);
1827
1828 /* Queue it, remembering where we must start sending. */
1829 if (sk->sk_write_queue.next == skb)
1830 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1831}
1832
1833/* Insert new before skb on the write queue of sk. */
1834static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1835 struct sk_buff *skb,
1836 struct sock *sk)
1837{
1838 __skb_queue_before(&sk->sk_write_queue, skb, new);
1839}
1840
1841static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1842{
1843 tcp_skb_tsorted_anchor_cleanup(skb);
1844 __skb_unlink(skb, &sk->sk_write_queue);
1845}
1846
1847void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1848
1849static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1850{
1851 tcp_skb_tsorted_anchor_cleanup(skb);
1852 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1853}
1854
1855static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1856{
1857 list_del(&skb->tcp_tsorted_anchor);
1858 tcp_rtx_queue_unlink(skb, sk);
1859 sk_wmem_free_skb(sk, skb);
1860}
1861
1862static inline void tcp_push_pending_frames(struct sock *sk)
1863{
1864 if (tcp_send_head(sk)) {
1865 struct tcp_sock *tp = tcp_sk(sk);
1866
1867 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1868 }
1869}
1870
1871/* Start sequence of the skb just after the highest skb with SACKed
1872 * bit, valid only if sacked_out > 0 or when the caller has ensured
1873 * validity by itself.
1874 */
1875static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1876{
1877 if (!tp->sacked_out)
1878 return tp->snd_una;
1879
1880 if (tp->highest_sack == NULL)
1881 return tp->snd_nxt;
1882
1883 return TCP_SKB_CB(tp->highest_sack)->seq;
1884}
1885
1886static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1887{
1888 tcp_sk(sk)->highest_sack = skb_rb_next(skb);
1889}
1890
1891static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1892{
1893 return tcp_sk(sk)->highest_sack;
1894}
1895
1896static inline void tcp_highest_sack_reset(struct sock *sk)
1897{
1898 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
1899}
1900
1901/* Called when old skb is about to be deleted and replaced by new skb */
1902static inline void tcp_highest_sack_replace(struct sock *sk,
1903 struct sk_buff *old,
1904 struct sk_buff *new)
1905{
1906 if (old == tcp_highest_sack(sk))
1907 tcp_sk(sk)->highest_sack = new;
1908}
1909
1910/* This helper checks if socket has IP_TRANSPARENT set */
1911static inline bool inet_sk_transparent(const struct sock *sk)
1912{
1913 switch (sk->sk_state) {
1914 case TCP_TIME_WAIT:
1915 return inet_twsk(sk)->tw_transparent;
1916 case TCP_NEW_SYN_RECV:
1917 return inet_rsk(inet_reqsk(sk))->no_srccheck;
1918 }
1919 return inet_sk(sk)->transparent;
1920}
1921
1922/* Determines whether this is a thin stream (which may suffer from
1923 * increased latency). Used to trigger latency-reducing mechanisms.
1924 */
1925static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1926{
1927 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1928}
1929
1930/* /proc */
1931enum tcp_seq_states {
1932 TCP_SEQ_STATE_LISTENING,
1933 TCP_SEQ_STATE_ESTABLISHED,
1934};
1935
1936void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
1937void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1938void tcp_seq_stop(struct seq_file *seq, void *v);
1939
1940struct tcp_seq_afinfo {
1941 sa_family_t family;
1942};
1943
1944struct tcp_iter_state {
1945 struct seq_net_private p;
1946 enum tcp_seq_states state;
1947 struct sock *syn_wait_sk;
1948 struct tcp_seq_afinfo *bpf_seq_afinfo;
1949 int bucket, offset, sbucket, num;
1950 loff_t last_pos;
1951};
1952
1953extern struct request_sock_ops tcp_request_sock_ops;
1954extern struct request_sock_ops tcp6_request_sock_ops;
1955
1956void tcp_v4_destroy_sock(struct sock *sk);
1957
1958struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1959 netdev_features_t features);
1960struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
1961INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
1962INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
1963INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
1964INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
1965int tcp_gro_complete(struct sk_buff *skb);
1966
1967void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1968
1969static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1970{
1971 struct net *net = sock_net((struct sock *)tp);
1972 return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1973}
1974
1975/* @wake is one when sk_stream_write_space() calls us.
1976 * This sends EPOLLOUT only if notsent_bytes is half the limit.
1977 * This mimics the strategy used in sock_def_write_space().
1978 */
1979static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
1980{
1981 const struct tcp_sock *tp = tcp_sk(sk);
1982 u32 notsent_bytes = READ_ONCE(tp->write_seq) -
1983 READ_ONCE(tp->snd_nxt);
1984
1985 return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
1986}
1987
1988#ifdef CONFIG_PROC_FS
1989int tcp4_proc_init(void);
1990void tcp4_proc_exit(void);
1991#endif
1992
1993int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1994int tcp_conn_request(struct request_sock_ops *rsk_ops,
1995 const struct tcp_request_sock_ops *af_ops,
1996 struct sock *sk, struct sk_buff *skb);
1997
1998/* TCP af-specific functions */
1999struct tcp_sock_af_ops {
2000#ifdef CONFIG_TCP_MD5SIG
2001 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2002 const struct sock *addr_sk);
2003 int (*calc_md5_hash)(char *location,
2004 const struct tcp_md5sig_key *md5,
2005 const struct sock *sk,
2006 const struct sk_buff *skb);
2007 int (*md5_parse)(struct sock *sk,
2008 int optname,
2009 sockptr_t optval,
2010 int optlen);
2011#endif
2012};
2013
2014struct tcp_request_sock_ops {
2015 u16 mss_clamp;
2016#ifdef CONFIG_TCP_MD5SIG
2017 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2018 const struct sock *addr_sk);
2019 int (*calc_md5_hash) (char *location,
2020 const struct tcp_md5sig_key *md5,
2021 const struct sock *sk,
2022 const struct sk_buff *skb);
2023#endif
2024 void (*init_req)(struct request_sock *req,
2025 const struct sock *sk_listener,
2026 struct sk_buff *skb);
2027#ifdef CONFIG_SYN_COOKIES
2028 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
2029 __u16 *mss);
2030#endif
2031 struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
2032 const struct request_sock *req);
2033 u32 (*init_seq)(const struct sk_buff *skb);
2034 u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2035 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2036 struct flowi *fl, struct request_sock *req,
2037 struct tcp_fastopen_cookie *foc,
2038 enum tcp_synack_type synack_type);
2039};
2040
2041extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2042#if IS_ENABLED(CONFIG_IPV6)
2043extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2044#endif
2045
2046#ifdef CONFIG_SYN_COOKIES
2047static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2048 const struct sock *sk, struct sk_buff *skb,
2049 __u16 *mss)
2050{
2051 tcp_synq_overflow(sk);
2052 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2053 return ops->cookie_init_seq(skb, mss);
2054}
2055#else
2056static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2057 const struct sock *sk, struct sk_buff *skb,
2058 __u16 *mss)
2059{
2060 return 0;
2061}
2062#endif
2063
2064int tcpv4_offload_init(void);
2065
2066void tcp_v4_init(void);
2067void tcp_init(void);
2068
2069/* tcp_recovery.c */
2070void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2071void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2072extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2073 u32 reo_wnd);
2074extern void tcp_rack_mark_lost(struct sock *sk);
2075extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2076 u64 xmit_time);
2077extern void tcp_rack_reo_timeout(struct sock *sk);
2078extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2079
2080/* At how many usecs into the future should the RTO fire? */
2081static inline s64 tcp_rto_delta_us(const struct sock *sk)
2082{
2083 const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2084 u32 rto = inet_csk(sk)->icsk_rto;
2085 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2086
2087 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2088}
2089
2090/*
2091 * Save and compile IPv4 options, return a pointer to it
2092 */
2093static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2094 struct sk_buff *skb)
2095{
2096 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2097 struct ip_options_rcu *dopt = NULL;
2098
2099 if (opt->optlen) {
2100 int opt_size = sizeof(*dopt) + opt->optlen;
2101
2102 dopt = kmalloc(opt_size, GFP_ATOMIC);
2103 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2104 kfree(dopt);
2105 dopt = NULL;
2106 }
2107 }
2108 return dopt;
2109}
2110
2111/* locally generated TCP pure ACKs have skb->truesize == 2
2112 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2113 * This is much faster than dissecting the packet to find out.
2114 * (Think of GRE encapsulations, IPv4, IPv6, ...)
2115 */
2116static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2117{
2118 return skb->truesize == 2;
2119}
2120
2121static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2122{
2123 skb->truesize = 2;
2124}
2125
2126static inline int tcp_inq(struct sock *sk)
2127{
2128 struct tcp_sock *tp = tcp_sk(sk);
2129 int answ;
2130
2131 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2132 answ = 0;
2133 } else if (sock_flag(sk, SOCK_URGINLINE) ||
2134 !tp->urg_data ||
2135 before(tp->urg_seq, tp->copied_seq) ||
2136 !before(tp->urg_seq, tp->rcv_nxt)) {
2137
2138 answ = tp->rcv_nxt - tp->copied_seq;
2139
2140 /* Subtract 1, if FIN was received */
2141 if (answ && sock_flag(sk, SOCK_DONE))
2142 answ--;
2143 } else {
2144 answ = tp->urg_seq - tp->copied_seq;
2145 }
2146
2147 return answ;
2148}
2149
2150int tcp_peek_len(struct socket *sock);
2151
2152static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2153{
2154 u16 segs_in;
2155
2156 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2157 tp->segs_in += segs_in;
2158 if (skb->len > tcp_hdrlen(skb))
2159 tp->data_segs_in += segs_in;
2160}
2161
2162/*
2163 * TCP listen path runs lockless.
2164 * We forced "struct sock" to be const qualified to make sure
2165 * we don't modify one of its field by mistake.
2166 * Here, we increment sk_drops which is an atomic_t, so we can safely
2167 * make sock writable again.
2168 */
2169static inline void tcp_listendrop(const struct sock *sk)
2170{
2171 atomic_inc(&((struct sock *)sk)->sk_drops);
2172 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2173}
2174
2175enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2176
2177/*
2178 * Interface for adding Upper Level Protocols over TCP
2179 */
2180
2181#define TCP_ULP_NAME_MAX 16
2182#define TCP_ULP_MAX 128
2183#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2184
2185struct tcp_ulp_ops {
2186 struct list_head list;
2187
2188 /* initialize ulp */
2189 int (*init)(struct sock *sk);
2190 /* update ulp */
2191 void (*update)(struct sock *sk, struct proto *p,
2192 void (*write_space)(struct sock *sk));
2193 /* cleanup ulp */
2194 void (*release)(struct sock *sk);
2195 /* diagnostic */
2196 int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2197 size_t (*get_info_size)(const struct sock *sk);
2198 /* clone ulp */
2199 void (*clone)(const struct request_sock *req, struct sock *newsk,
2200 const gfp_t priority);
2201
2202 char name[TCP_ULP_NAME_MAX];
2203 struct module *owner;
2204};
2205int tcp_register_ulp(struct tcp_ulp_ops *type);
2206void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2207int tcp_set_ulp(struct sock *sk, const char *name);
2208void tcp_get_available_ulp(char *buf, size_t len);
2209void tcp_cleanup_ulp(struct sock *sk);
2210void tcp_update_ulp(struct sock *sk, struct proto *p,
2211 void (*write_space)(struct sock *sk));
2212
2213#define MODULE_ALIAS_TCP_ULP(name) \
2214 __MODULE_INFO(alias, alias_userspace, name); \
2215 __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2216
2217struct sk_msg;
2218struct sk_psock;
2219
2220#ifdef CONFIG_BPF_STREAM_PARSER
2221struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
2222void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2223#else
2224static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2225{
2226}
2227#endif /* CONFIG_BPF_STREAM_PARSER */
2228
2229#ifdef CONFIG_NET_SOCK_MSG
2230int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
2231 int flags);
2232int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
2233 struct msghdr *msg, int len, int flags);
2234#endif /* CONFIG_NET_SOCK_MSG */
2235
2236/* Call BPF_SOCK_OPS program that returns an int. If the return value
2237 * is < 0, then the BPF op failed (for example if the loaded BPF
2238 * program does not support the chosen operation or there is no BPF
2239 * program loaded).
2240 */
2241#ifdef CONFIG_BPF
2242static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2243{
2244 struct bpf_sock_ops_kern sock_ops;
2245 int ret;
2246
2247 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2248 if (sk_fullsock(sk)) {
2249 sock_ops.is_fullsock = 1;
2250 sock_owned_by_me(sk);
2251 }
2252
2253 sock_ops.sk = sk;
2254 sock_ops.op = op;
2255 if (nargs > 0)
2256 memcpy(sock_ops.args, args, nargs * sizeof(*args));
2257
2258 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2259 if (ret == 0)
2260 ret = sock_ops.reply;
2261 else
2262 ret = -1;
2263 return ret;
2264}
2265
2266static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2267{
2268 u32 args[2] = {arg1, arg2};
2269
2270 return tcp_call_bpf(sk, op, 2, args);
2271}
2272
2273static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2274 u32 arg3)
2275{
2276 u32 args[3] = {arg1, arg2, arg3};
2277
2278 return tcp_call_bpf(sk, op, 3, args);
2279}
2280
2281#else
2282static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2283{
2284 return -EPERM;
2285}
2286
2287static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2288{
2289 return -EPERM;
2290}
2291
2292static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2293 u32 arg3)
2294{
2295 return -EPERM;
2296}
2297
2298#endif
2299
2300static inline u32 tcp_timeout_init(struct sock *sk)
2301{
2302 int timeout;
2303
2304 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2305
2306 if (timeout <= 0)
2307 timeout = TCP_TIMEOUT_INIT;
2308 return timeout;
2309}
2310
2311static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2312{
2313 int rwnd;
2314
2315 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2316
2317 if (rwnd < 0)
2318 rwnd = 0;
2319 return rwnd;
2320}
2321
2322static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2323{
2324 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2325}
2326
2327static inline void tcp_bpf_rtt(struct sock *sk)
2328{
2329 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2330 tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
2331}
2332
2333#if IS_ENABLED(CONFIG_SMC)
2334extern struct static_key_false tcp_have_smc;
2335#endif
2336
2337#if IS_ENABLED(CONFIG_TLS_DEVICE)
2338void clean_acked_data_enable(struct inet_connection_sock *icsk,
2339 void (*cad)(struct sock *sk, u32 ack_seq));
2340void clean_acked_data_disable(struct inet_connection_sock *icsk);
2341void clean_acked_data_flush(void);
2342#endif
2343
2344DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2345static inline void tcp_add_tx_delay(struct sk_buff *skb,
2346 const struct tcp_sock *tp)
2347{
2348 if (static_branch_unlikely(&tcp_tx_delay_enabled))
2349 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2350}
2351
2352/* Compute Earliest Departure Time for some control packets
2353 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2354 */
2355static inline u64 tcp_transmit_time(const struct sock *sk)
2356{
2357 if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2358 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2359 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2360
2361 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2362 }
2363 return 0;
2364}
2365
2366#endif /* _TCP_H */
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
21#define FASTRETRANS_DEBUG 1
22
23#include <linux/list.h>
24#include <linux/tcp.h>
25#include <linux/bug.h>
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
29#include <linux/skbuff.h>
30#include <linux/dmaengine.h>
31#include <linux/crypto.h>
32#include <linux/cryptohash.h>
33#include <linux/kref.h>
34
35#include <net/inet_connection_sock.h>
36#include <net/inet_timewait_sock.h>
37#include <net/inet_hashtables.h>
38#include <net/checksum.h>
39#include <net/request_sock.h>
40#include <net/sock.h>
41#include <net/snmp.h>
42#include <net/ip.h>
43#include <net/tcp_states.h>
44#include <net/inet_ecn.h>
45#include <net/dst.h>
46
47#include <linux/seq_file.h>
48#include <linux/memcontrol.h>
49
50extern struct inet_hashinfo tcp_hashinfo;
51
52extern struct percpu_counter tcp_orphan_count;
53extern void tcp_time_wait(struct sock *sk, int state, int timeo);
54
55#define MAX_TCP_HEADER (128 + MAX_HEADER)
56#define MAX_TCP_OPTION_SPACE 40
57
58/*
59 * Never offer a window over 32767 without using window scaling. Some
60 * poor stacks do signed 16bit maths!
61 */
62#define MAX_TCP_WINDOW 32767U
63
64/* Offer an initial receive window of 10 mss. */
65#define TCP_DEFAULT_INIT_RCVWND 10
66
67/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
68#define TCP_MIN_MSS 88U
69
70/* The least MTU to use for probing */
71#define TCP_BASE_MSS 512
72
73/* After receiving this amount of duplicate ACKs fast retransmit starts. */
74#define TCP_FASTRETRANS_THRESH 3
75
76/* Maximal reordering. */
77#define TCP_MAX_REORDERING 127
78
79/* Maximal number of ACKs sent quickly to accelerate slow-start. */
80#define TCP_MAX_QUICKACKS 16U
81
82/* urg_data states */
83#define TCP_URG_VALID 0x0100
84#define TCP_URG_NOTYET 0x0200
85#define TCP_URG_READ 0x0400
86
87#define TCP_RETR1 3 /*
88 * This is how many retries it does before it
89 * tries to figure out if the gateway is
90 * down. Minimal RFC value is 3; it corresponds
91 * to ~3sec-8min depending on RTO.
92 */
93
94#define TCP_RETR2 15 /*
95 * This should take at least
96 * 90 minutes to time out.
97 * RFC1122 says that the limit is 100 sec.
98 * 15 is ~13-30min depending on RTO.
99 */
100
101#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
102 * connection: ~180sec is RFC minimum */
103
104#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
105 * connection: ~180sec is RFC minimum */
106
107#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
108 * state, about 60 seconds */
109#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
110 /* BSD style FIN_WAIT2 deadlock breaker.
111 * It used to be 3min, new value is 60sec,
112 * to combine FIN-WAIT-2 timeout with
113 * TIME-WAIT timer.
114 */
115
116#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
117#if HZ >= 100
118#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
119#define TCP_ATO_MIN ((unsigned)(HZ/25))
120#else
121#define TCP_DELACK_MIN 4U
122#define TCP_ATO_MIN 4U
123#endif
124#define TCP_RTO_MAX ((unsigned)(120*HZ))
125#define TCP_RTO_MIN ((unsigned)(HZ/5))
126#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
127#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
128 * used as a fallback RTO for the
129 * initial data transmission if no
130 * valid RTT sample has been acquired,
131 * most likely due to retrans in 3WHS.
132 */
133
134#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
135 * for local resources.
136 */
137
138#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
139#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
140#define TCP_KEEPALIVE_INTVL (75*HZ)
141
142#define MAX_TCP_KEEPIDLE 32767
143#define MAX_TCP_KEEPINTVL 32767
144#define MAX_TCP_KEEPCNT 127
145#define MAX_TCP_SYNCNT 127
146
147#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
148
149#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
150#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
151 * after this time. It should be equal
152 * (or greater than) TCP_TIMEWAIT_LEN
153 * to provide reliability equal to one
154 * provided by timewait state.
155 */
156#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
157 * timestamps. It must be less than
158 * minimal timewait lifetime.
159 */
160/*
161 * TCP option
162 */
163
164#define TCPOPT_NOP 1 /* Padding */
165#define TCPOPT_EOL 0 /* End of options */
166#define TCPOPT_MSS 2 /* Segment size negotiating */
167#define TCPOPT_WINDOW 3 /* Window scaling */
168#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
169#define TCPOPT_SACK 5 /* SACK Block */
170#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
171#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
172#define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */
173
174/*
175 * TCP option lengths
176 */
177
178#define TCPOLEN_MSS 4
179#define TCPOLEN_WINDOW 3
180#define TCPOLEN_SACK_PERM 2
181#define TCPOLEN_TIMESTAMP 10
182#define TCPOLEN_MD5SIG 18
183#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
184#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
185#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
186#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
187
188/* But this is what stacks really send out. */
189#define TCPOLEN_TSTAMP_ALIGNED 12
190#define TCPOLEN_WSCALE_ALIGNED 4
191#define TCPOLEN_SACKPERM_ALIGNED 4
192#define TCPOLEN_SACK_BASE 2
193#define TCPOLEN_SACK_BASE_ALIGNED 4
194#define TCPOLEN_SACK_PERBLOCK 8
195#define TCPOLEN_MD5SIG_ALIGNED 20
196#define TCPOLEN_MSS_ALIGNED 4
197
198/* Flags in tp->nonagle */
199#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
200#define TCP_NAGLE_CORK 2 /* Socket is corked */
201#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
202
203/* TCP thin-stream limits */
204#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
205
206/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
207#define TCP_INIT_CWND 10
208
209extern struct inet_timewait_death_row tcp_death_row;
210
211/* sysctl variables for tcp */
212extern int sysctl_tcp_timestamps;
213extern int sysctl_tcp_window_scaling;
214extern int sysctl_tcp_sack;
215extern int sysctl_tcp_fin_timeout;
216extern int sysctl_tcp_keepalive_time;
217extern int sysctl_tcp_keepalive_probes;
218extern int sysctl_tcp_keepalive_intvl;
219extern int sysctl_tcp_syn_retries;
220extern int sysctl_tcp_synack_retries;
221extern int sysctl_tcp_retries1;
222extern int sysctl_tcp_retries2;
223extern int sysctl_tcp_orphan_retries;
224extern int sysctl_tcp_syncookies;
225extern int sysctl_tcp_retrans_collapse;
226extern int sysctl_tcp_stdurg;
227extern int sysctl_tcp_rfc1337;
228extern int sysctl_tcp_abort_on_overflow;
229extern int sysctl_tcp_max_orphans;
230extern int sysctl_tcp_fack;
231extern int sysctl_tcp_reordering;
232extern int sysctl_tcp_ecn;
233extern int sysctl_tcp_dsack;
234extern int sysctl_tcp_wmem[3];
235extern int sysctl_tcp_rmem[3];
236extern int sysctl_tcp_app_win;
237extern int sysctl_tcp_adv_win_scale;
238extern int sysctl_tcp_tw_reuse;
239extern int sysctl_tcp_frto;
240extern int sysctl_tcp_frto_response;
241extern int sysctl_tcp_low_latency;
242extern int sysctl_tcp_dma_copybreak;
243extern int sysctl_tcp_nometrics_save;
244extern int sysctl_tcp_moderate_rcvbuf;
245extern int sysctl_tcp_tso_win_divisor;
246extern int sysctl_tcp_abc;
247extern int sysctl_tcp_mtu_probing;
248extern int sysctl_tcp_base_mss;
249extern int sysctl_tcp_workaround_signed_windows;
250extern int sysctl_tcp_slow_start_after_idle;
251extern int sysctl_tcp_max_ssthresh;
252extern int sysctl_tcp_cookie_size;
253extern int sysctl_tcp_thin_linear_timeouts;
254extern int sysctl_tcp_thin_dupack;
255extern int sysctl_tcp_early_retrans;
256
257extern atomic_long_t tcp_memory_allocated;
258extern struct percpu_counter tcp_sockets_allocated;
259extern int tcp_memory_pressure;
260
261/*
262 * The next routines deal with comparing 32 bit unsigned ints
263 * and worry about wraparound (automatic with unsigned arithmetic).
264 */
265
266static inline bool before(__u32 seq1, __u32 seq2)
267{
268 return (__s32)(seq1-seq2) < 0;
269}
270#define after(seq2, seq1) before(seq1, seq2)
271
272/* is s2<=s1<=s3 ? */
273static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
274{
275 return seq3 - seq2 >= seq1 - seq2;
276}
277
278static inline bool tcp_out_of_memory(struct sock *sk)
279{
280 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
281 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
282 return true;
283 return false;
284}
285
286static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
287{
288 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
289 int orphans = percpu_counter_read_positive(ocp);
290
291 if (orphans << shift > sysctl_tcp_max_orphans) {
292 orphans = percpu_counter_sum_positive(ocp);
293 if (orphans << shift > sysctl_tcp_max_orphans)
294 return true;
295 }
296 return false;
297}
298
299extern bool tcp_check_oom(struct sock *sk, int shift);
300
301/* syncookies: remember time of last synqueue overflow */
302static inline void tcp_synq_overflow(struct sock *sk)
303{
304 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
305}
306
307/* syncookies: no recent synqueue overflow on this listening socket? */
308static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
309{
310 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
311 return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
312}
313
314extern struct proto tcp_prot;
315
316#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
317#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
318#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
319#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
320#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
321
322extern void tcp_init_mem(struct net *net);
323
324extern void tcp_v4_err(struct sk_buff *skb, u32);
325
326extern void tcp_shutdown (struct sock *sk, int how);
327
328extern int tcp_v4_rcv(struct sk_buff *skb);
329
330extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it);
331extern void *tcp_v4_tw_get_peer(struct sock *sk);
332extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
333extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
334 size_t size);
335extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
336 size_t size, int flags);
337extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
338extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
339 const struct tcphdr *th, unsigned int len);
340extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
341 const struct tcphdr *th, unsigned int len);
342extern void tcp_rcv_space_adjust(struct sock *sk);
343extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
344extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
345extern void tcp_twsk_destructor(struct sock *sk);
346extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
347 struct pipe_inode_info *pipe, size_t len,
348 unsigned int flags);
349
350static inline void tcp_dec_quickack_mode(struct sock *sk,
351 const unsigned int pkts)
352{
353 struct inet_connection_sock *icsk = inet_csk(sk);
354
355 if (icsk->icsk_ack.quick) {
356 if (pkts >= icsk->icsk_ack.quick) {
357 icsk->icsk_ack.quick = 0;
358 /* Leaving quickack mode we deflate ATO. */
359 icsk->icsk_ack.ato = TCP_ATO_MIN;
360 } else
361 icsk->icsk_ack.quick -= pkts;
362 }
363}
364
365#define TCP_ECN_OK 1
366#define TCP_ECN_QUEUE_CWR 2
367#define TCP_ECN_DEMAND_CWR 4
368#define TCP_ECN_SEEN 8
369
370enum tcp_tw_status {
371 TCP_TW_SUCCESS = 0,
372 TCP_TW_RST = 1,
373 TCP_TW_ACK = 2,
374 TCP_TW_SYN = 3
375};
376
377
378extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
379 struct sk_buff *skb,
380 const struct tcphdr *th);
381extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
382 struct request_sock *req,
383 struct request_sock **prev);
384extern int tcp_child_process(struct sock *parent, struct sock *child,
385 struct sk_buff *skb);
386extern bool tcp_use_frto(struct sock *sk);
387extern void tcp_enter_frto(struct sock *sk);
388extern void tcp_enter_loss(struct sock *sk, int how);
389extern void tcp_clear_retrans(struct tcp_sock *tp);
390extern void tcp_update_metrics(struct sock *sk);
391extern void tcp_close(struct sock *sk, long timeout);
392extern void tcp_init_sock(struct sock *sk);
393extern unsigned int tcp_poll(struct file * file, struct socket *sock,
394 struct poll_table_struct *wait);
395extern int tcp_getsockopt(struct sock *sk, int level, int optname,
396 char __user *optval, int __user *optlen);
397extern int tcp_setsockopt(struct sock *sk, int level, int optname,
398 char __user *optval, unsigned int optlen);
399extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
400 char __user *optval, int __user *optlen);
401extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
402 char __user *optval, unsigned int optlen);
403extern void tcp_set_keepalive(struct sock *sk, int val);
404extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
405extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
406 size_t len, int nonblock, int flags, int *addr_len);
407extern void tcp_parse_options(const struct sk_buff *skb,
408 struct tcp_options_received *opt_rx, const u8 **hvpp,
409 int estab);
410extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
411
412/*
413 * TCP v4 functions exported for the inet6 API
414 */
415
416extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
417extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
418extern struct sock * tcp_create_openreq_child(struct sock *sk,
419 struct request_sock *req,
420 struct sk_buff *skb);
421extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
422 struct request_sock *req,
423 struct dst_entry *dst);
424extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
425extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
426 int addr_len);
427extern int tcp_connect(struct sock *sk);
428extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
429 struct request_sock *req,
430 struct request_values *rvp);
431extern int tcp_disconnect(struct sock *sk, int flags);
432
433void tcp_connect_init(struct sock *sk);
434void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
435int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
436
437/* From syncookies.c */
438extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
439extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
440 struct ip_options *opt);
441#ifdef CONFIG_SYN_COOKIES
442extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
443 __u16 *mss);
444#else
445static inline __u32 cookie_v4_init_sequence(struct sock *sk,
446 struct sk_buff *skb,
447 __u16 *mss)
448{
449 return 0;
450}
451#endif
452
453extern __u32 cookie_init_timestamp(struct request_sock *req);
454extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
455
456/* From net/ipv6/syncookies.c */
457extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
458#ifdef CONFIG_SYN_COOKIES
459extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
460 __u16 *mss);
461#else
462static inline __u32 cookie_v6_init_sequence(struct sock *sk,
463 struct sk_buff *skb,
464 __u16 *mss)
465{
466 return 0;
467}
468#endif
469/* tcp_output.c */
470
471extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
472 int nonagle);
473extern bool tcp_may_send_now(struct sock *sk);
474extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
475extern void tcp_retransmit_timer(struct sock *sk);
476extern void tcp_xmit_retransmit_queue(struct sock *);
477extern void tcp_simple_retransmit(struct sock *);
478extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
479extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
480
481extern void tcp_send_probe0(struct sock *);
482extern void tcp_send_partial(struct sock *);
483extern int tcp_write_wakeup(struct sock *);
484extern void tcp_send_fin(struct sock *sk);
485extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
486extern int tcp_send_synack(struct sock *);
487extern bool tcp_syn_flood_action(struct sock *sk,
488 const struct sk_buff *skb,
489 const char *proto);
490extern void tcp_push_one(struct sock *, unsigned int mss_now);
491extern void tcp_send_ack(struct sock *sk);
492extern void tcp_send_delayed_ack(struct sock *sk);
493
494/* tcp_input.c */
495extern void tcp_cwnd_application_limited(struct sock *sk);
496extern void tcp_resume_early_retransmit(struct sock *sk);
497extern void tcp_rearm_rto(struct sock *sk);
498
499/* tcp_timer.c */
500extern void tcp_init_xmit_timers(struct sock *);
501static inline void tcp_clear_xmit_timers(struct sock *sk)
502{
503 inet_csk_clear_xmit_timers(sk);
504}
505
506extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
507extern unsigned int tcp_current_mss(struct sock *sk);
508
509/* Bound MSS / TSO packet size with the half of the window */
510static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
511{
512 int cutoff;
513
514 /* When peer uses tiny windows, there is no use in packetizing
515 * to sub-MSS pieces for the sake of SWS or making sure there
516 * are enough packets in the pipe for fast recovery.
517 *
518 * On the other hand, for extremely large MSS devices, handling
519 * smaller than MSS windows in this way does make sense.
520 */
521 if (tp->max_window >= 512)
522 cutoff = (tp->max_window >> 1);
523 else
524 cutoff = tp->max_window;
525
526 if (cutoff && pktsize > cutoff)
527 return max_t(int, cutoff, 68U - tp->tcp_header_len);
528 else
529 return pktsize;
530}
531
532/* tcp.c */
533extern void tcp_get_info(const struct sock *, struct tcp_info *);
534
535/* Read 'sendfile()'-style from a TCP socket */
536typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
537 unsigned int, size_t);
538extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
539 sk_read_actor_t recv_actor);
540
541extern void tcp_initialize_rcv_mss(struct sock *sk);
542
543extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
544extern int tcp_mss_to_mtu(struct sock *sk, int mss);
545extern void tcp_mtup_init(struct sock *sk);
546extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
547
548static inline void tcp_bound_rto(const struct sock *sk)
549{
550 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
551 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
552}
553
554static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
555{
556 return (tp->srtt >> 3) + tp->rttvar;
557}
558
559static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
560{
561 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
562 ntohl(TCP_FLAG_ACK) |
563 snd_wnd);
564}
565
566static inline void tcp_fast_path_on(struct tcp_sock *tp)
567{
568 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
569}
570
571static inline void tcp_fast_path_check(struct sock *sk)
572{
573 struct tcp_sock *tp = tcp_sk(sk);
574
575 if (skb_queue_empty(&tp->out_of_order_queue) &&
576 tp->rcv_wnd &&
577 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
578 !tp->urg_data)
579 tcp_fast_path_on(tp);
580}
581
582/* Compute the actual rto_min value */
583static inline u32 tcp_rto_min(struct sock *sk)
584{
585 const struct dst_entry *dst = __sk_dst_get(sk);
586 u32 rto_min = TCP_RTO_MIN;
587
588 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
589 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
590 return rto_min;
591}
592
593/* Compute the actual receive window we are currently advertising.
594 * Rcv_nxt can be after the window if our peer push more data
595 * than the offered window.
596 */
597static inline u32 tcp_receive_window(const struct tcp_sock *tp)
598{
599 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
600
601 if (win < 0)
602 win = 0;
603 return (u32) win;
604}
605
606/* Choose a new window, without checks for shrinking, and without
607 * scaling applied to the result. The caller does these things
608 * if necessary. This is a "raw" window selection.
609 */
610extern u32 __tcp_select_window(struct sock *sk);
611
612void tcp_send_window_probe(struct sock *sk);
613
614/* TCP timestamps are only 32-bits, this causes a slight
615 * complication on 64-bit systems since we store a snapshot
616 * of jiffies in the buffer control blocks below. We decided
617 * to use only the low 32-bits of jiffies and hide the ugly
618 * casts with the following macro.
619 */
620#define tcp_time_stamp ((__u32)(jiffies))
621
622#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
623
624#define TCPHDR_FIN 0x01
625#define TCPHDR_SYN 0x02
626#define TCPHDR_RST 0x04
627#define TCPHDR_PSH 0x08
628#define TCPHDR_ACK 0x10
629#define TCPHDR_URG 0x20
630#define TCPHDR_ECE 0x40
631#define TCPHDR_CWR 0x80
632
633/* This is what the send packet queuing engine uses to pass
634 * TCP per-packet control information to the transmission code.
635 * We also store the host-order sequence numbers in here too.
636 * This is 44 bytes if IPV6 is enabled.
637 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
638 */
639struct tcp_skb_cb {
640 union {
641 struct inet_skb_parm h4;
642#if IS_ENABLED(CONFIG_IPV6)
643 struct inet6_skb_parm h6;
644#endif
645 } header; /* For incoming frames */
646 __u32 seq; /* Starting sequence number */
647 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
648 __u32 when; /* used to compute rtt's */
649 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
650
651 __u8 sacked; /* State flags for SACK/FACK. */
652#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
653#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
654#define TCPCB_LOST 0x04 /* SKB is lost */
655#define TCPCB_TAGBITS 0x07 /* All tag bits */
656#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
657#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
658
659 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
660 /* 1 byte hole */
661 __u32 ack_seq; /* Sequence number ACK'd */
662};
663
664#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
665
666/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
667 *
668 * If we receive a SYN packet with these bits set, it means a network is
669 * playing bad games with TOS bits. In order to avoid possible false congestion
670 * notifications, we disable TCP ECN negociation.
671 */
672static inline void
673TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb)
674{
675 const struct tcphdr *th = tcp_hdr(skb);
676
677 if (sysctl_tcp_ecn && th->ece && th->cwr &&
678 INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
679 inet_rsk(req)->ecn_ok = 1;
680}
681
682/* Due to TSO, an SKB can be composed of multiple actual
683 * packets. To keep these tracked properly, we use this.
684 */
685static inline int tcp_skb_pcount(const struct sk_buff *skb)
686{
687 return skb_shinfo(skb)->gso_segs;
688}
689
690/* This is valid iff tcp_skb_pcount() > 1. */
691static inline int tcp_skb_mss(const struct sk_buff *skb)
692{
693 return skb_shinfo(skb)->gso_size;
694}
695
696/* Events passed to congestion control interface */
697enum tcp_ca_event {
698 CA_EVENT_TX_START, /* first transmit when no packets in flight */
699 CA_EVENT_CWND_RESTART, /* congestion window restart */
700 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
701 CA_EVENT_FRTO, /* fast recovery timeout */
702 CA_EVENT_LOSS, /* loss timeout */
703 CA_EVENT_FAST_ACK, /* in sequence ack */
704 CA_EVENT_SLOW_ACK, /* other ack */
705};
706
707/*
708 * Interface for adding new TCP congestion control handlers
709 */
710#define TCP_CA_NAME_MAX 16
711#define TCP_CA_MAX 128
712#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
713
714#define TCP_CONG_NON_RESTRICTED 0x1
715#define TCP_CONG_RTT_STAMP 0x2
716
717struct tcp_congestion_ops {
718 struct list_head list;
719 unsigned long flags;
720
721 /* initialize private data (optional) */
722 void (*init)(struct sock *sk);
723 /* cleanup private data (optional) */
724 void (*release)(struct sock *sk);
725
726 /* return slow start threshold (required) */
727 u32 (*ssthresh)(struct sock *sk);
728 /* lower bound for congestion window (optional) */
729 u32 (*min_cwnd)(const struct sock *sk);
730 /* do new cwnd calculation (required) */
731 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
732 /* call before changing ca_state (optional) */
733 void (*set_state)(struct sock *sk, u8 new_state);
734 /* call when cwnd event occurs (optional) */
735 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
736 /* new value of cwnd after loss (optional) */
737 u32 (*undo_cwnd)(struct sock *sk);
738 /* hook for packet ack accounting (optional) */
739 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
740 /* get info for inet_diag (optional) */
741 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
742
743 char name[TCP_CA_NAME_MAX];
744 struct module *owner;
745};
746
747extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
748extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
749
750extern void tcp_init_congestion_control(struct sock *sk);
751extern void tcp_cleanup_congestion_control(struct sock *sk);
752extern int tcp_set_default_congestion_control(const char *name);
753extern void tcp_get_default_congestion_control(char *name);
754extern void tcp_get_available_congestion_control(char *buf, size_t len);
755extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
756extern int tcp_set_allowed_congestion_control(char *allowed);
757extern int tcp_set_congestion_control(struct sock *sk, const char *name);
758extern void tcp_slow_start(struct tcp_sock *tp);
759extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
760
761extern struct tcp_congestion_ops tcp_init_congestion_ops;
762extern u32 tcp_reno_ssthresh(struct sock *sk);
763extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
764extern u32 tcp_reno_min_cwnd(const struct sock *sk);
765extern struct tcp_congestion_ops tcp_reno;
766
767static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
768{
769 struct inet_connection_sock *icsk = inet_csk(sk);
770
771 if (icsk->icsk_ca_ops->set_state)
772 icsk->icsk_ca_ops->set_state(sk, ca_state);
773 icsk->icsk_ca_state = ca_state;
774}
775
776static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
777{
778 const struct inet_connection_sock *icsk = inet_csk(sk);
779
780 if (icsk->icsk_ca_ops->cwnd_event)
781 icsk->icsk_ca_ops->cwnd_event(sk, event);
782}
783
784/* These functions determine how the current flow behaves in respect of SACK
785 * handling. SACK is negotiated with the peer, and therefore it can vary
786 * between different flows.
787 *
788 * tcp_is_sack - SACK enabled
789 * tcp_is_reno - No SACK
790 * tcp_is_fack - FACK enabled, implies SACK enabled
791 */
792static inline int tcp_is_sack(const struct tcp_sock *tp)
793{
794 return tp->rx_opt.sack_ok;
795}
796
797static inline bool tcp_is_reno(const struct tcp_sock *tp)
798{
799 return !tcp_is_sack(tp);
800}
801
802static inline bool tcp_is_fack(const struct tcp_sock *tp)
803{
804 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
805}
806
807static inline void tcp_enable_fack(struct tcp_sock *tp)
808{
809 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
810}
811
812/* TCP early-retransmit (ER) is similar to but more conservative than
813 * the thin-dupack feature. Enable ER only if thin-dupack is disabled.
814 */
815static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
816{
817 tp->do_early_retrans = sysctl_tcp_early_retrans &&
818 !sysctl_tcp_thin_dupack && sysctl_tcp_reordering == 3;
819 tp->early_retrans_delayed = 0;
820}
821
822static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
823{
824 tp->do_early_retrans = 0;
825}
826
827static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
828{
829 return tp->sacked_out + tp->lost_out;
830}
831
832/* This determines how many packets are "in the network" to the best
833 * of our knowledge. In many cases it is conservative, but where
834 * detailed information is available from the receiver (via SACK
835 * blocks etc.) we can make more aggressive calculations.
836 *
837 * Use this for decisions involving congestion control, use just
838 * tp->packets_out to determine if the send queue is empty or not.
839 *
840 * Read this equation as:
841 *
842 * "Packets sent once on transmission queue" MINUS
843 * "Packets left network, but not honestly ACKed yet" PLUS
844 * "Packets fast retransmitted"
845 */
846static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
847{
848 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
849}
850
851#define TCP_INFINITE_SSTHRESH 0x7fffffff
852
853static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
854{
855 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
856}
857
858/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
859 * The exception is rate halving phase, when cwnd is decreasing towards
860 * ssthresh.
861 */
862static inline __u32 tcp_current_ssthresh(const struct sock *sk)
863{
864 const struct tcp_sock *tp = tcp_sk(sk);
865
866 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
867 return tp->snd_ssthresh;
868 else
869 return max(tp->snd_ssthresh,
870 ((tp->snd_cwnd >> 1) +
871 (tp->snd_cwnd >> 2)));
872}
873
874/* Use define here intentionally to get WARN_ON location shown at the caller */
875#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
876
877extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
878extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
879
880/* The maximum number of MSS of available cwnd for which TSO defers
881 * sending if not using sysctl_tcp_tso_win_divisor.
882 */
883static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
884{
885 return 3;
886}
887
888/* Slow start with delack produces 3 packets of burst, so that
889 * it is safe "de facto". This will be the default - same as
890 * the default reordering threshold - but if reordering increases,
891 * we must be able to allow cwnd to burst at least this much in order
892 * to not pull it back when holes are filled.
893 */
894static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
895{
896 return tp->reordering;
897}
898
899/* Returns end sequence number of the receiver's advertised window */
900static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
901{
902 return tp->snd_una + tp->snd_wnd;
903}
904extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
905
906static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
907 const struct sk_buff *skb)
908{
909 if (skb->len < mss)
910 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
911}
912
913static inline void tcp_check_probe_timer(struct sock *sk)
914{
915 const struct tcp_sock *tp = tcp_sk(sk);
916 const struct inet_connection_sock *icsk = inet_csk(sk);
917
918 if (!tp->packets_out && !icsk->icsk_pending)
919 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
920 icsk->icsk_rto, TCP_RTO_MAX);
921}
922
923static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
924{
925 tp->snd_wl1 = seq;
926}
927
928static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
929{
930 tp->snd_wl1 = seq;
931}
932
933/*
934 * Calculate(/check) TCP checksum
935 */
936static inline __sum16 tcp_v4_check(int len, __be32 saddr,
937 __be32 daddr, __wsum base)
938{
939 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
940}
941
942static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
943{
944 return __skb_checksum_complete(skb);
945}
946
947static inline bool tcp_checksum_complete(struct sk_buff *skb)
948{
949 return !skb_csum_unnecessary(skb) &&
950 __tcp_checksum_complete(skb);
951}
952
953/* Prequeue for VJ style copy to user, combined with checksumming. */
954
955static inline void tcp_prequeue_init(struct tcp_sock *tp)
956{
957 tp->ucopy.task = NULL;
958 tp->ucopy.len = 0;
959 tp->ucopy.memory = 0;
960 skb_queue_head_init(&tp->ucopy.prequeue);
961#ifdef CONFIG_NET_DMA
962 tp->ucopy.dma_chan = NULL;
963 tp->ucopy.wakeup = 0;
964 tp->ucopy.pinned_list = NULL;
965 tp->ucopy.dma_cookie = 0;
966#endif
967}
968
969/* Packet is added to VJ-style prequeue for processing in process
970 * context, if a reader task is waiting. Apparently, this exciting
971 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
972 * failed somewhere. Latency? Burstiness? Well, at least now we will
973 * see, why it failed. 8)8) --ANK
974 *
975 * NOTE: is this not too big to inline?
976 */
977static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
978{
979 struct tcp_sock *tp = tcp_sk(sk);
980
981 if (sysctl_tcp_low_latency || !tp->ucopy.task)
982 return false;
983
984 __skb_queue_tail(&tp->ucopy.prequeue, skb);
985 tp->ucopy.memory += skb->truesize;
986 if (tp->ucopy.memory > sk->sk_rcvbuf) {
987 struct sk_buff *skb1;
988
989 BUG_ON(sock_owned_by_user(sk));
990
991 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
992 sk_backlog_rcv(sk, skb1);
993 NET_INC_STATS_BH(sock_net(sk),
994 LINUX_MIB_TCPPREQUEUEDROPPED);
995 }
996
997 tp->ucopy.memory = 0;
998 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
999 wake_up_interruptible_sync_poll(sk_sleep(sk),
1000 POLLIN | POLLRDNORM | POLLRDBAND);
1001 if (!inet_csk_ack_scheduled(sk))
1002 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1003 (3 * tcp_rto_min(sk)) / 4,
1004 TCP_RTO_MAX);
1005 }
1006 return true;
1007}
1008
1009
1010#undef STATE_TRACE
1011
1012#ifdef STATE_TRACE
1013static const char *statename[]={
1014 "Unused","Established","Syn Sent","Syn Recv",
1015 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1016 "Close Wait","Last ACK","Listen","Closing"
1017};
1018#endif
1019extern void tcp_set_state(struct sock *sk, int state);
1020
1021extern void tcp_done(struct sock *sk);
1022
1023static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1024{
1025 rx_opt->dsack = 0;
1026 rx_opt->num_sacks = 0;
1027}
1028
1029/* Determine a window scaling and initial window to offer. */
1030extern void tcp_select_initial_window(int __space, __u32 mss,
1031 __u32 *rcv_wnd, __u32 *window_clamp,
1032 int wscale_ok, __u8 *rcv_wscale,
1033 __u32 init_rcv_wnd);
1034
1035static inline int tcp_win_from_space(int space)
1036{
1037 return sysctl_tcp_adv_win_scale<=0 ?
1038 (space>>(-sysctl_tcp_adv_win_scale)) :
1039 space - (space>>sysctl_tcp_adv_win_scale);
1040}
1041
1042/* Note: caller must be prepared to deal with negative returns */
1043static inline int tcp_space(const struct sock *sk)
1044{
1045 return tcp_win_from_space(sk->sk_rcvbuf -
1046 atomic_read(&sk->sk_rmem_alloc));
1047}
1048
1049static inline int tcp_full_space(const struct sock *sk)
1050{
1051 return tcp_win_from_space(sk->sk_rcvbuf);
1052}
1053
1054static inline void tcp_openreq_init(struct request_sock *req,
1055 struct tcp_options_received *rx_opt,
1056 struct sk_buff *skb)
1057{
1058 struct inet_request_sock *ireq = inet_rsk(req);
1059
1060 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1061 req->cookie_ts = 0;
1062 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1063 req->mss = rx_opt->mss_clamp;
1064 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1065 ireq->tstamp_ok = rx_opt->tstamp_ok;
1066 ireq->sack_ok = rx_opt->sack_ok;
1067 ireq->snd_wscale = rx_opt->snd_wscale;
1068 ireq->wscale_ok = rx_opt->wscale_ok;
1069 ireq->acked = 0;
1070 ireq->ecn_ok = 0;
1071 ireq->rmt_port = tcp_hdr(skb)->source;
1072 ireq->loc_port = tcp_hdr(skb)->dest;
1073}
1074
1075extern void tcp_enter_memory_pressure(struct sock *sk);
1076
1077static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1078{
1079 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1080}
1081
1082static inline int keepalive_time_when(const struct tcp_sock *tp)
1083{
1084 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1085}
1086
1087static inline int keepalive_probes(const struct tcp_sock *tp)
1088{
1089 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1090}
1091
1092static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1093{
1094 const struct inet_connection_sock *icsk = &tp->inet_conn;
1095
1096 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1097 tcp_time_stamp - tp->rcv_tstamp);
1098}
1099
1100static inline int tcp_fin_time(const struct sock *sk)
1101{
1102 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1103 const int rto = inet_csk(sk)->icsk_rto;
1104
1105 if (fin_timeout < (rto << 2) - (rto >> 1))
1106 fin_timeout = (rto << 2) - (rto >> 1);
1107
1108 return fin_timeout;
1109}
1110
1111static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1112 int paws_win)
1113{
1114 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1115 return true;
1116 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1117 return true;
1118 /*
1119 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1120 * then following tcp messages have valid values. Ignore 0 value,
1121 * or else 'negative' tsval might forbid us to accept their packets.
1122 */
1123 if (!rx_opt->ts_recent)
1124 return true;
1125 return false;
1126}
1127
1128static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1129 int rst)
1130{
1131 if (tcp_paws_check(rx_opt, 0))
1132 return false;
1133
1134 /* RST segments are not recommended to carry timestamp,
1135 and, if they do, it is recommended to ignore PAWS because
1136 "their cleanup function should take precedence over timestamps."
1137 Certainly, it is mistake. It is necessary to understand the reasons
1138 of this constraint to relax it: if peer reboots, clock may go
1139 out-of-sync and half-open connections will not be reset.
1140 Actually, the problem would be not existing if all
1141 the implementations followed draft about maintaining clock
1142 via reboots. Linux-2.2 DOES NOT!
1143
1144 However, we can relax time bounds for RST segments to MSL.
1145 */
1146 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1147 return false;
1148 return true;
1149}
1150
1151static inline void tcp_mib_init(struct net *net)
1152{
1153 /* See RFC 2012 */
1154 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1155 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1156 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1157 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1158}
1159
1160/* from STCP */
1161static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1162{
1163 tp->lost_skb_hint = NULL;
1164 tp->scoreboard_skb_hint = NULL;
1165}
1166
1167static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1168{
1169 tcp_clear_retrans_hints_partial(tp);
1170 tp->retransmit_skb_hint = NULL;
1171}
1172
1173/* MD5 Signature */
1174struct crypto_hash;
1175
1176union tcp_md5_addr {
1177 struct in_addr a4;
1178#if IS_ENABLED(CONFIG_IPV6)
1179 struct in6_addr a6;
1180#endif
1181};
1182
1183/* - key database */
1184struct tcp_md5sig_key {
1185 struct hlist_node node;
1186 u8 keylen;
1187 u8 family; /* AF_INET or AF_INET6 */
1188 union tcp_md5_addr addr;
1189 u8 key[TCP_MD5SIG_MAXKEYLEN];
1190 struct rcu_head rcu;
1191};
1192
1193/* - sock block */
1194struct tcp_md5sig_info {
1195 struct hlist_head head;
1196 struct rcu_head rcu;
1197};
1198
1199/* - pseudo header */
1200struct tcp4_pseudohdr {
1201 __be32 saddr;
1202 __be32 daddr;
1203 __u8 pad;
1204 __u8 protocol;
1205 __be16 len;
1206};
1207
1208struct tcp6_pseudohdr {
1209 struct in6_addr saddr;
1210 struct in6_addr daddr;
1211 __be32 len;
1212 __be32 protocol; /* including padding */
1213};
1214
1215union tcp_md5sum_block {
1216 struct tcp4_pseudohdr ip4;
1217#if IS_ENABLED(CONFIG_IPV6)
1218 struct tcp6_pseudohdr ip6;
1219#endif
1220};
1221
1222/* - pool: digest algorithm, hash description and scratch buffer */
1223struct tcp_md5sig_pool {
1224 struct hash_desc md5_desc;
1225 union tcp_md5sum_block md5_blk;
1226};
1227
1228/* - functions */
1229extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1230 const struct sock *sk,
1231 const struct request_sock *req,
1232 const struct sk_buff *skb);
1233extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1234 int family, const u8 *newkey,
1235 u8 newkeylen, gfp_t gfp);
1236extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1237 int family);
1238extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1239 struct sock *addr_sk);
1240
1241#ifdef CONFIG_TCP_MD5SIG
1242extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1243 const union tcp_md5_addr *addr, int family);
1244#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1245#else
1246static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1247 const union tcp_md5_addr *addr,
1248 int family)
1249{
1250 return NULL;
1251}
1252#define tcp_twsk_md5_key(twsk) NULL
1253#endif
1254
1255extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *);
1256extern void tcp_free_md5sig_pool(void);
1257
1258extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1259extern void tcp_put_md5sig_pool(void);
1260
1261extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1262extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1263 unsigned int header_len);
1264extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1265 const struct tcp_md5sig_key *key);
1266
1267/* write queue abstraction */
1268static inline void tcp_write_queue_purge(struct sock *sk)
1269{
1270 struct sk_buff *skb;
1271
1272 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1273 sk_wmem_free_skb(sk, skb);
1274 sk_mem_reclaim(sk);
1275 tcp_clear_all_retrans_hints(tcp_sk(sk));
1276}
1277
1278static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1279{
1280 return skb_peek(&sk->sk_write_queue);
1281}
1282
1283static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1284{
1285 return skb_peek_tail(&sk->sk_write_queue);
1286}
1287
1288static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1289 const struct sk_buff *skb)
1290{
1291 return skb_queue_next(&sk->sk_write_queue, skb);
1292}
1293
1294static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1295 const struct sk_buff *skb)
1296{
1297 return skb_queue_prev(&sk->sk_write_queue, skb);
1298}
1299
1300#define tcp_for_write_queue(skb, sk) \
1301 skb_queue_walk(&(sk)->sk_write_queue, skb)
1302
1303#define tcp_for_write_queue_from(skb, sk) \
1304 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1305
1306#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1307 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1308
1309static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1310{
1311 return sk->sk_send_head;
1312}
1313
1314static inline bool tcp_skb_is_last(const struct sock *sk,
1315 const struct sk_buff *skb)
1316{
1317 return skb_queue_is_last(&sk->sk_write_queue, skb);
1318}
1319
1320static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1321{
1322 if (tcp_skb_is_last(sk, skb))
1323 sk->sk_send_head = NULL;
1324 else
1325 sk->sk_send_head = tcp_write_queue_next(sk, skb);
1326}
1327
1328static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1329{
1330 if (sk->sk_send_head == skb_unlinked)
1331 sk->sk_send_head = NULL;
1332}
1333
1334static inline void tcp_init_send_head(struct sock *sk)
1335{
1336 sk->sk_send_head = NULL;
1337}
1338
1339static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1340{
1341 __skb_queue_tail(&sk->sk_write_queue, skb);
1342}
1343
1344static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1345{
1346 __tcp_add_write_queue_tail(sk, skb);
1347
1348 /* Queue it, remembering where we must start sending. */
1349 if (sk->sk_send_head == NULL) {
1350 sk->sk_send_head = skb;
1351
1352 if (tcp_sk(sk)->highest_sack == NULL)
1353 tcp_sk(sk)->highest_sack = skb;
1354 }
1355}
1356
1357static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1358{
1359 __skb_queue_head(&sk->sk_write_queue, skb);
1360}
1361
1362/* Insert buff after skb on the write queue of sk. */
1363static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1364 struct sk_buff *buff,
1365 struct sock *sk)
1366{
1367 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1368}
1369
1370/* Insert new before skb on the write queue of sk. */
1371static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1372 struct sk_buff *skb,
1373 struct sock *sk)
1374{
1375 __skb_queue_before(&sk->sk_write_queue, skb, new);
1376
1377 if (sk->sk_send_head == skb)
1378 sk->sk_send_head = new;
1379}
1380
1381static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1382{
1383 __skb_unlink(skb, &sk->sk_write_queue);
1384}
1385
1386static inline bool tcp_write_queue_empty(struct sock *sk)
1387{
1388 return skb_queue_empty(&sk->sk_write_queue);
1389}
1390
1391static inline void tcp_push_pending_frames(struct sock *sk)
1392{
1393 if (tcp_send_head(sk)) {
1394 struct tcp_sock *tp = tcp_sk(sk);
1395
1396 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1397 }
1398}
1399
1400/* Start sequence of the skb just after the highest skb with SACKed
1401 * bit, valid only if sacked_out > 0 or when the caller has ensured
1402 * validity by itself.
1403 */
1404static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1405{
1406 if (!tp->sacked_out)
1407 return tp->snd_una;
1408
1409 if (tp->highest_sack == NULL)
1410 return tp->snd_nxt;
1411
1412 return TCP_SKB_CB(tp->highest_sack)->seq;
1413}
1414
1415static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1416{
1417 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1418 tcp_write_queue_next(sk, skb);
1419}
1420
1421static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1422{
1423 return tcp_sk(sk)->highest_sack;
1424}
1425
1426static inline void tcp_highest_sack_reset(struct sock *sk)
1427{
1428 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1429}
1430
1431/* Called when old skb is about to be deleted (to be combined with new skb) */
1432static inline void tcp_highest_sack_combine(struct sock *sk,
1433 struct sk_buff *old,
1434 struct sk_buff *new)
1435{
1436 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1437 tcp_sk(sk)->highest_sack = new;
1438}
1439
1440/* Determines whether this is a thin stream (which may suffer from
1441 * increased latency). Used to trigger latency-reducing mechanisms.
1442 */
1443static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1444{
1445 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1446}
1447
1448/* /proc */
1449enum tcp_seq_states {
1450 TCP_SEQ_STATE_LISTENING,
1451 TCP_SEQ_STATE_OPENREQ,
1452 TCP_SEQ_STATE_ESTABLISHED,
1453 TCP_SEQ_STATE_TIME_WAIT,
1454};
1455
1456int tcp_seq_open(struct inode *inode, struct file *file);
1457
1458struct tcp_seq_afinfo {
1459 char *name;
1460 sa_family_t family;
1461 const struct file_operations *seq_fops;
1462 struct seq_operations seq_ops;
1463};
1464
1465struct tcp_iter_state {
1466 struct seq_net_private p;
1467 sa_family_t family;
1468 enum tcp_seq_states state;
1469 struct sock *syn_wait_sk;
1470 int bucket, offset, sbucket, num, uid;
1471 loff_t last_pos;
1472};
1473
1474extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1475extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1476
1477extern struct request_sock_ops tcp_request_sock_ops;
1478extern struct request_sock_ops tcp6_request_sock_ops;
1479
1480extern void tcp_v4_destroy_sock(struct sock *sk);
1481
1482extern int tcp_v4_gso_send_check(struct sk_buff *skb);
1483extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
1484 netdev_features_t features);
1485extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1486 struct sk_buff *skb);
1487extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1488 struct sk_buff *skb);
1489extern int tcp_gro_complete(struct sk_buff *skb);
1490extern int tcp4_gro_complete(struct sk_buff *skb);
1491
1492#ifdef CONFIG_PROC_FS
1493extern int tcp4_proc_init(void);
1494extern void tcp4_proc_exit(void);
1495#endif
1496
1497/* TCP af-specific functions */
1498struct tcp_sock_af_ops {
1499#ifdef CONFIG_TCP_MD5SIG
1500 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1501 struct sock *addr_sk);
1502 int (*calc_md5_hash) (char *location,
1503 struct tcp_md5sig_key *md5,
1504 const struct sock *sk,
1505 const struct request_sock *req,
1506 const struct sk_buff *skb);
1507 int (*md5_parse) (struct sock *sk,
1508 char __user *optval,
1509 int optlen);
1510#endif
1511};
1512
1513struct tcp_request_sock_ops {
1514#ifdef CONFIG_TCP_MD5SIG
1515 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1516 struct request_sock *req);
1517 int (*calc_md5_hash) (char *location,
1518 struct tcp_md5sig_key *md5,
1519 const struct sock *sk,
1520 const struct request_sock *req,
1521 const struct sk_buff *skb);
1522#endif
1523};
1524
1525/* Using SHA1 for now, define some constants.
1526 */
1527#define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS)
1528#define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4)
1529#define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS)
1530
1531extern int tcp_cookie_generator(u32 *bakery);
1532
1533/**
1534 * struct tcp_cookie_values - each socket needs extra space for the
1535 * cookies, together with (optional) space for any SYN data.
1536 *
1537 * A tcp_sock contains a pointer to the current value, and this is
1538 * cloned to the tcp_timewait_sock.
1539 *
1540 * @cookie_pair: variable data from the option exchange.
1541 *
1542 * @cookie_desired: user specified tcpct_cookie_desired. Zero
1543 * indicates default (sysctl_tcp_cookie_size).
1544 * After cookie sent, remembers size of cookie.
1545 * Range 0, TCP_COOKIE_MIN to TCP_COOKIE_MAX.
1546 *
1547 * @s_data_desired: user specified tcpct_s_data_desired. When the
1548 * constant payload is specified (@s_data_constant),
1549 * holds its length instead.
1550 * Range 0 to TCP_MSS_DESIRED.
1551 *
1552 * @s_data_payload: constant data that is to be included in the
1553 * payload of SYN or SYNACK segments when the
1554 * cookie option is present.
1555 */
1556struct tcp_cookie_values {
1557 struct kref kref;
1558 u8 cookie_pair[TCP_COOKIE_PAIR_SIZE];
1559 u8 cookie_pair_size;
1560 u8 cookie_desired;
1561 u16 s_data_desired:11,
1562 s_data_constant:1,
1563 s_data_in:1,
1564 s_data_out:1,
1565 s_data_unused:2;
1566 u8 s_data_payload[0];
1567};
1568
1569static inline void tcp_cookie_values_release(struct kref *kref)
1570{
1571 kfree(container_of(kref, struct tcp_cookie_values, kref));
1572}
1573
1574/* The length of constant payload data. Note that s_data_desired is
1575 * overloaded, depending on s_data_constant: either the length of constant
1576 * data (returned here) or the limit on variable data.
1577 */
1578static inline int tcp_s_data_size(const struct tcp_sock *tp)
1579{
1580 return (tp->cookie_values != NULL && tp->cookie_values->s_data_constant)
1581 ? tp->cookie_values->s_data_desired
1582 : 0;
1583}
1584
1585/**
1586 * struct tcp_extend_values - tcp_ipv?.c to tcp_output.c workspace.
1587 *
1588 * As tcp_request_sock has already been extended in other places, the
1589 * only remaining method is to pass stack values along as function
1590 * parameters. These parameters are not needed after sending SYNACK.
1591 *
1592 * @cookie_bakery: cryptographic secret and message workspace.
1593 *
1594 * @cookie_plus: bytes in authenticator/cookie option, copied from
1595 * struct tcp_options_received (above).
1596 */
1597struct tcp_extend_values {
1598 struct request_values rv;
1599 u32 cookie_bakery[COOKIE_WORKSPACE_WORDS];
1600 u8 cookie_plus:6,
1601 cookie_out_never:1,
1602 cookie_in_always:1;
1603};
1604
1605static inline struct tcp_extend_values *tcp_xv(struct request_values *rvp)
1606{
1607 return (struct tcp_extend_values *)rvp;
1608}
1609
1610extern void tcp_v4_init(void);
1611extern void tcp_init(void);
1612
1613#endif /* _TCP_H */