Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/sysctl.h>
26#include <linux/workqueue.h>
27#include <linux/static_key.h>
28#include <net/tcp.h>
29#include <net/inet_common.h>
30#include <net/xfrm.h>
31#include <net/busy_poll.h>
32
33static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
34{
35 if (seq == s_win)
36 return true;
37 if (after(end_seq, s_win) && before(seq, e_win))
38 return true;
39 return seq == e_win && seq == end_seq;
40}
41
42static enum tcp_tw_status
43tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
44 const struct sk_buff *skb, int mib_idx)
45{
46 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
47
48 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
49 &tcptw->tw_last_oow_ack_time)) {
50 /* Send ACK. Note, we do not put the bucket,
51 * it will be released by caller.
52 */
53 return TCP_TW_ACK;
54 }
55
56 /* We are rate-limiting, so just release the tw sock and drop skb. */
57 inet_twsk_put(tw);
58 return TCP_TW_SUCCESS;
59}
60
61/*
62 * * Main purpose of TIME-WAIT state is to close connection gracefully,
63 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
64 * (and, probably, tail of data) and one or more our ACKs are lost.
65 * * What is TIME-WAIT timeout? It is associated with maximal packet
66 * lifetime in the internet, which results in wrong conclusion, that
67 * it is set to catch "old duplicate segments" wandering out of their path.
68 * It is not quite correct. This timeout is calculated so that it exceeds
69 * maximal retransmission timeout enough to allow to lose one (or more)
70 * segments sent by peer and our ACKs. This time may be calculated from RTO.
71 * * When TIME-WAIT socket receives RST, it means that another end
72 * finally closed and we are allowed to kill TIME-WAIT too.
73 * * Second purpose of TIME-WAIT is catching old duplicate segments.
74 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
75 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
76 * * If we invented some more clever way to catch duplicates
77 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
78 *
79 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
80 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
81 * from the very beginning.
82 *
83 * NOTE. With recycling (and later with fin-wait-2) TW bucket
84 * is _not_ stateless. It means, that strictly speaking we must
85 * spinlock it. I do not want! Well, probability of misbehaviour
86 * is ridiculously low and, seems, we could use some mb() tricks
87 * to avoid misread sequence numbers, states etc. --ANK
88 *
89 * We don't need to initialize tmp_out.sack_ok as we don't use the results
90 */
91enum tcp_tw_status
92tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
93 const struct tcphdr *th)
94{
95 struct tcp_options_received tmp_opt;
96 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
97 bool paws_reject = false;
98
99 tmp_opt.saw_tstamp = 0;
100 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
101 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
102
103 if (tmp_opt.saw_tstamp) {
104 if (tmp_opt.rcv_tsecr)
105 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
106 tmp_opt.ts_recent = tcptw->tw_ts_recent;
107 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
108 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
109 }
110 }
111
112 if (tw->tw_substate == TCP_FIN_WAIT2) {
113 /* Just repeat all the checks of tcp_rcv_state_process() */
114
115 /* Out of window, send ACK */
116 if (paws_reject ||
117 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
118 tcptw->tw_rcv_nxt,
119 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
120 return tcp_timewait_check_oow_rate_limit(
121 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
122
123 if (th->rst)
124 goto kill;
125
126 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
127 return TCP_TW_RST;
128
129 /* Dup ACK? */
130 if (!th->ack ||
131 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
132 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
133 inet_twsk_put(tw);
134 return TCP_TW_SUCCESS;
135 }
136
137 /* New data or FIN. If new data arrive after half-duplex close,
138 * reset.
139 */
140 if (!th->fin ||
141 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
142 return TCP_TW_RST;
143
144 /* FIN arrived, enter true time-wait state. */
145 tw->tw_substate = TCP_TIME_WAIT;
146 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
147 if (tmp_opt.saw_tstamp) {
148 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
149 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
150 }
151
152 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
153 return TCP_TW_ACK;
154 }
155
156 /*
157 * Now real TIME-WAIT state.
158 *
159 * RFC 1122:
160 * "When a connection is [...] on TIME-WAIT state [...]
161 * [a TCP] MAY accept a new SYN from the remote TCP to
162 * reopen the connection directly, if it:
163 *
164 * (1) assigns its initial sequence number for the new
165 * connection to be larger than the largest sequence
166 * number it used on the previous connection incarnation,
167 * and
168 *
169 * (2) returns to TIME-WAIT state if the SYN turns out
170 * to be an old duplicate".
171 */
172
173 if (!paws_reject &&
174 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
175 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
176 /* In window segment, it may be only reset or bare ack. */
177
178 if (th->rst) {
179 /* This is TIME_WAIT assassination, in two flavors.
180 * Oh well... nobody has a sufficient solution to this
181 * protocol bug yet.
182 */
183 if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
184kill:
185 inet_twsk_deschedule_put(tw);
186 return TCP_TW_SUCCESS;
187 }
188 } else {
189 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
190 }
191
192 if (tmp_opt.saw_tstamp) {
193 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
194 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
195 }
196
197 inet_twsk_put(tw);
198 return TCP_TW_SUCCESS;
199 }
200
201 /* Out of window segment.
202
203 All the segments are ACKed immediately.
204
205 The only exception is new SYN. We accept it, if it is
206 not old duplicate and we are not in danger to be killed
207 by delayed old duplicates. RFC check is that it has
208 newer sequence number works at rates <40Mbit/sec.
209 However, if paws works, it is reliable AND even more,
210 we even may relax silly seq space cutoff.
211
212 RED-PEN: we violate main RFC requirement, if this SYN will appear
213 old duplicate (i.e. we receive RST in reply to SYN-ACK),
214 we must return socket to time-wait state. It is not good,
215 but not fatal yet.
216 */
217
218 if (th->syn && !th->rst && !th->ack && !paws_reject &&
219 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
220 (tmp_opt.saw_tstamp &&
221 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
222 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
223 if (isn == 0)
224 isn++;
225 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
226 return TCP_TW_SYN;
227 }
228
229 if (paws_reject)
230 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
231
232 if (!th->rst) {
233 /* In this case we must reset the TIMEWAIT timer.
234 *
235 * If it is ACKless SYN it may be both old duplicate
236 * and new good SYN with random sequence number <rcv_nxt.
237 * Do not reschedule in the last case.
238 */
239 if (paws_reject || th->ack)
240 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
241
242 return tcp_timewait_check_oow_rate_limit(
243 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
244 }
245 inet_twsk_put(tw);
246 return TCP_TW_SUCCESS;
247}
248EXPORT_SYMBOL(tcp_timewait_state_process);
249
250/*
251 * Move a socket to time-wait or dead fin-wait-2 state.
252 */
253void tcp_time_wait(struct sock *sk, int state, int timeo)
254{
255 const struct inet_connection_sock *icsk = inet_csk(sk);
256 const struct tcp_sock *tp = tcp_sk(sk);
257 struct inet_timewait_sock *tw;
258 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
259
260 tw = inet_twsk_alloc(sk, tcp_death_row, state);
261
262 if (tw) {
263 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
264 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
265 struct inet_sock *inet = inet_sk(sk);
266
267 tw->tw_transparent = inet->transparent;
268 tw->tw_mark = sk->sk_mark;
269 tw->tw_priority = sk->sk_priority;
270 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
271 tcptw->tw_rcv_nxt = tp->rcv_nxt;
272 tcptw->tw_snd_nxt = tp->snd_nxt;
273 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
274 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
275 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
276 tcptw->tw_ts_offset = tp->tsoffset;
277 tcptw->tw_last_oow_ack_time = 0;
278 tcptw->tw_tx_delay = tp->tcp_tx_delay;
279#if IS_ENABLED(CONFIG_IPV6)
280 if (tw->tw_family == PF_INET6) {
281 struct ipv6_pinfo *np = inet6_sk(sk);
282
283 tw->tw_v6_daddr = sk->sk_v6_daddr;
284 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
285 tw->tw_tclass = np->tclass;
286 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
287 tw->tw_txhash = sk->sk_txhash;
288 tw->tw_ipv6only = sk->sk_ipv6only;
289 }
290#endif
291
292#ifdef CONFIG_TCP_MD5SIG
293 /*
294 * The timewait bucket does not have the key DB from the
295 * sock structure. We just make a quick copy of the
296 * md5 key being used (if indeed we are using one)
297 * so the timewait ack generating code has the key.
298 */
299 do {
300 tcptw->tw_md5_key = NULL;
301 if (static_branch_unlikely(&tcp_md5_needed)) {
302 struct tcp_md5sig_key *key;
303
304 key = tp->af_specific->md5_lookup(sk, sk);
305 if (key) {
306 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
307 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
308 }
309 }
310 } while (0);
311#endif
312
313 /* Get the TIME_WAIT timeout firing. */
314 if (timeo < rto)
315 timeo = rto;
316
317 if (state == TCP_TIME_WAIT)
318 timeo = TCP_TIMEWAIT_LEN;
319
320 /* tw_timer is pinned, so we need to make sure BH are disabled
321 * in following section, otherwise timer handler could run before
322 * we complete the initialization.
323 */
324 local_bh_disable();
325 inet_twsk_schedule(tw, timeo);
326 /* Linkage updates.
327 * Note that access to tw after this point is illegal.
328 */
329 inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
330 local_bh_enable();
331 } else {
332 /* Sorry, if we're out of memory, just CLOSE this
333 * socket up. We've got bigger problems than
334 * non-graceful socket closings.
335 */
336 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
337 }
338
339 tcp_update_metrics(sk);
340 tcp_done(sk);
341}
342EXPORT_SYMBOL(tcp_time_wait);
343
344void tcp_twsk_destructor(struct sock *sk)
345{
346#ifdef CONFIG_TCP_MD5SIG
347 if (static_branch_unlikely(&tcp_md5_needed)) {
348 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
349
350 if (twsk->tw_md5_key)
351 kfree_rcu(twsk->tw_md5_key, rcu);
352 }
353#endif
354}
355EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
356
357/* Warning : This function is called without sk_listener being locked.
358 * Be sure to read socket fields once, as their value could change under us.
359 */
360void tcp_openreq_init_rwin(struct request_sock *req,
361 const struct sock *sk_listener,
362 const struct dst_entry *dst)
363{
364 struct inet_request_sock *ireq = inet_rsk(req);
365 const struct tcp_sock *tp = tcp_sk(sk_listener);
366 int full_space = tcp_full_space(sk_listener);
367 u32 window_clamp;
368 __u8 rcv_wscale;
369 u32 rcv_wnd;
370 int mss;
371
372 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
373 window_clamp = READ_ONCE(tp->window_clamp);
374 /* Set this up on the first call only */
375 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
376
377 /* limit the window selection if the user enforce a smaller rx buffer */
378 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
379 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
380 req->rsk_window_clamp = full_space;
381
382 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
383 if (rcv_wnd == 0)
384 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
385 else if (full_space < rcv_wnd * mss)
386 full_space = rcv_wnd * mss;
387
388 /* tcp_full_space because it is guaranteed to be the first packet */
389 tcp_select_initial_window(sk_listener, full_space,
390 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
391 &req->rsk_rcv_wnd,
392 &req->rsk_window_clamp,
393 ireq->wscale_ok,
394 &rcv_wscale,
395 rcv_wnd);
396 ireq->rcv_wscale = rcv_wscale;
397}
398EXPORT_SYMBOL(tcp_openreq_init_rwin);
399
400static void tcp_ecn_openreq_child(struct tcp_sock *tp,
401 const struct request_sock *req)
402{
403 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
404}
405
406void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
407{
408 struct inet_connection_sock *icsk = inet_csk(sk);
409 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
410 bool ca_got_dst = false;
411
412 if (ca_key != TCP_CA_UNSPEC) {
413 const struct tcp_congestion_ops *ca;
414
415 rcu_read_lock();
416 ca = tcp_ca_find_key(ca_key);
417 if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
418 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
419 icsk->icsk_ca_ops = ca;
420 ca_got_dst = true;
421 }
422 rcu_read_unlock();
423 }
424
425 /* If no valid choice made yet, assign current system default ca. */
426 if (!ca_got_dst &&
427 (!icsk->icsk_ca_setsockopt ||
428 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
429 tcp_assign_congestion_control(sk);
430
431 tcp_set_ca_state(sk, TCP_CA_Open);
432}
433EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
434
435static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
436 struct request_sock *req,
437 struct tcp_sock *newtp)
438{
439#if IS_ENABLED(CONFIG_SMC)
440 struct inet_request_sock *ireq;
441
442 if (static_branch_unlikely(&tcp_have_smc)) {
443 ireq = inet_rsk(req);
444 if (oldtp->syn_smc && !ireq->smc_ok)
445 newtp->syn_smc = 0;
446 }
447#endif
448}
449
450/* This is not only more efficient than what we used to do, it eliminates
451 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
452 *
453 * Actually, we could lots of memory writes here. tp of listening
454 * socket contains all necessary default parameters.
455 */
456struct sock *tcp_create_openreq_child(const struct sock *sk,
457 struct request_sock *req,
458 struct sk_buff *skb)
459{
460 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
461 const struct inet_request_sock *ireq = inet_rsk(req);
462 struct tcp_request_sock *treq = tcp_rsk(req);
463 struct inet_connection_sock *newicsk;
464 struct tcp_sock *oldtp, *newtp;
465 u32 seq;
466
467 if (!newsk)
468 return NULL;
469
470 newicsk = inet_csk(newsk);
471 newtp = tcp_sk(newsk);
472 oldtp = tcp_sk(sk);
473
474 smc_check_reset_syn_req(oldtp, req, newtp);
475
476 /* Now setup tcp_sock */
477 newtp->pred_flags = 0;
478
479 seq = treq->rcv_isn + 1;
480 newtp->rcv_wup = seq;
481 WRITE_ONCE(newtp->copied_seq, seq);
482 WRITE_ONCE(newtp->rcv_nxt, seq);
483 newtp->segs_in = 1;
484
485 seq = treq->snt_isn + 1;
486 newtp->snd_sml = newtp->snd_una = seq;
487 WRITE_ONCE(newtp->snd_nxt, seq);
488 newtp->snd_up = seq;
489
490 INIT_LIST_HEAD(&newtp->tsq_node);
491 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
492
493 tcp_init_wl(newtp, treq->rcv_isn);
494
495 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
496 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
497
498 newtp->lsndtime = tcp_jiffies32;
499 newsk->sk_txhash = treq->txhash;
500 newtp->total_retrans = req->num_retrans;
501
502 tcp_init_xmit_timers(newsk);
503 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
504
505 if (sock_flag(newsk, SOCK_KEEPOPEN))
506 inet_csk_reset_keepalive_timer(newsk,
507 keepalive_time_when(newtp));
508
509 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
510 newtp->rx_opt.sack_ok = ireq->sack_ok;
511 newtp->window_clamp = req->rsk_window_clamp;
512 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
513 newtp->rcv_wnd = req->rsk_rcv_wnd;
514 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
515 if (newtp->rx_opt.wscale_ok) {
516 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
517 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
518 } else {
519 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
520 newtp->window_clamp = min(newtp->window_clamp, 65535U);
521 }
522 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
523 newtp->max_window = newtp->snd_wnd;
524
525 if (newtp->rx_opt.tstamp_ok) {
526 newtp->rx_opt.ts_recent = req->ts_recent;
527 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
528 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
529 } else {
530 newtp->rx_opt.ts_recent_stamp = 0;
531 newtp->tcp_header_len = sizeof(struct tcphdr);
532 }
533 if (req->num_timeout) {
534 newtp->undo_marker = treq->snt_isn;
535 newtp->retrans_stamp = div_u64(treq->snt_synack,
536 USEC_PER_SEC / TCP_TS_HZ);
537 }
538 newtp->tsoffset = treq->ts_off;
539#ifdef CONFIG_TCP_MD5SIG
540 newtp->md5sig_info = NULL; /*XXX*/
541 if (newtp->af_specific->md5_lookup(sk, newsk))
542 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
543#endif
544 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
545 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
546 newtp->rx_opt.mss_clamp = req->mss;
547 tcp_ecn_openreq_child(newtp, req);
548 newtp->fastopen_req = NULL;
549 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
550
551 tcp_bpf_clone(sk, newsk);
552
553 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
554
555 return newsk;
556}
557EXPORT_SYMBOL(tcp_create_openreq_child);
558
559/*
560 * Process an incoming packet for SYN_RECV sockets represented as a
561 * request_sock. Normally sk is the listener socket but for TFO it
562 * points to the child socket.
563 *
564 * XXX (TFO) - The current impl contains a special check for ack
565 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
566 *
567 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
568 */
569
570struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
571 struct request_sock *req,
572 bool fastopen, bool *req_stolen)
573{
574 struct tcp_options_received tmp_opt;
575 struct sock *child;
576 const struct tcphdr *th = tcp_hdr(skb);
577 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
578 bool paws_reject = false;
579 bool own_req;
580
581 tmp_opt.saw_tstamp = 0;
582 if (th->doff > (sizeof(struct tcphdr)>>2)) {
583 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
584
585 if (tmp_opt.saw_tstamp) {
586 tmp_opt.ts_recent = req->ts_recent;
587 if (tmp_opt.rcv_tsecr)
588 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
589 /* We do not store true stamp, but it is not required,
590 * it can be estimated (approximately)
591 * from another data.
592 */
593 tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
594 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
595 }
596 }
597
598 /* Check for pure retransmitted SYN. */
599 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
600 flg == TCP_FLAG_SYN &&
601 !paws_reject) {
602 /*
603 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
604 * this case on figure 6 and figure 8, but formal
605 * protocol description says NOTHING.
606 * To be more exact, it says that we should send ACK,
607 * because this segment (at least, if it has no data)
608 * is out of window.
609 *
610 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
611 * describe SYN-RECV state. All the description
612 * is wrong, we cannot believe to it and should
613 * rely only on common sense and implementation
614 * experience.
615 *
616 * Enforce "SYN-ACK" according to figure 8, figure 6
617 * of RFC793, fixed by RFC1122.
618 *
619 * Note that even if there is new data in the SYN packet
620 * they will be thrown away too.
621 *
622 * Reset timer after retransmitting SYNACK, similar to
623 * the idea of fast retransmit in recovery.
624 */
625 if (!tcp_oow_rate_limited(sock_net(sk), skb,
626 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
627 &tcp_rsk(req)->last_oow_ack_time) &&
628
629 !inet_rtx_syn_ack(sk, req)) {
630 unsigned long expires = jiffies;
631
632 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
633 TCP_RTO_MAX);
634 if (!fastopen)
635 mod_timer_pending(&req->rsk_timer, expires);
636 else
637 req->rsk_timer.expires = expires;
638 }
639 return NULL;
640 }
641
642 /* Further reproduces section "SEGMENT ARRIVES"
643 for state SYN-RECEIVED of RFC793.
644 It is broken, however, it does not work only
645 when SYNs are crossed.
646
647 You would think that SYN crossing is impossible here, since
648 we should have a SYN_SENT socket (from connect()) on our end,
649 but this is not true if the crossed SYNs were sent to both
650 ends by a malicious third party. We must defend against this,
651 and to do that we first verify the ACK (as per RFC793, page
652 36) and reset if it is invalid. Is this a true full defense?
653 To convince ourselves, let us consider a way in which the ACK
654 test can still pass in this 'malicious crossed SYNs' case.
655 Malicious sender sends identical SYNs (and thus identical sequence
656 numbers) to both A and B:
657
658 A: gets SYN, seq=7
659 B: gets SYN, seq=7
660
661 By our good fortune, both A and B select the same initial
662 send sequence number of seven :-)
663
664 A: sends SYN|ACK, seq=7, ack_seq=8
665 B: sends SYN|ACK, seq=7, ack_seq=8
666
667 So we are now A eating this SYN|ACK, ACK test passes. So
668 does sequence test, SYN is truncated, and thus we consider
669 it a bare ACK.
670
671 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
672 bare ACK. Otherwise, we create an established connection. Both
673 ends (listening sockets) accept the new incoming connection and try
674 to talk to each other. 8-)
675
676 Note: This case is both harmless, and rare. Possibility is about the
677 same as us discovering intelligent life on another plant tomorrow.
678
679 But generally, we should (RFC lies!) to accept ACK
680 from SYNACK both here and in tcp_rcv_state_process().
681 tcp_rcv_state_process() does not, hence, we do not too.
682
683 Note that the case is absolutely generic:
684 we cannot optimize anything here without
685 violating protocol. All the checks must be made
686 before attempt to create socket.
687 */
688
689 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
690 * and the incoming segment acknowledges something not yet
691 * sent (the segment carries an unacceptable ACK) ...
692 * a reset is sent."
693 *
694 * Invalid ACK: reset will be sent by listening socket.
695 * Note that the ACK validity check for a Fast Open socket is done
696 * elsewhere and is checked directly against the child socket rather
697 * than req because user data may have been sent out.
698 */
699 if ((flg & TCP_FLAG_ACK) && !fastopen &&
700 (TCP_SKB_CB(skb)->ack_seq !=
701 tcp_rsk(req)->snt_isn + 1))
702 return sk;
703
704 /* Also, it would be not so bad idea to check rcv_tsecr, which
705 * is essentially ACK extension and too early or too late values
706 * should cause reset in unsynchronized states.
707 */
708
709 /* RFC793: "first check sequence number". */
710
711 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
712 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
713 /* Out of window: send ACK and drop. */
714 if (!(flg & TCP_FLAG_RST) &&
715 !tcp_oow_rate_limited(sock_net(sk), skb,
716 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
717 &tcp_rsk(req)->last_oow_ack_time))
718 req->rsk_ops->send_ack(sk, skb, req);
719 if (paws_reject)
720 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
721 return NULL;
722 }
723
724 /* In sequence, PAWS is OK. */
725
726 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
727 req->ts_recent = tmp_opt.rcv_tsval;
728
729 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
730 /* Truncate SYN, it is out of window starting
731 at tcp_rsk(req)->rcv_isn + 1. */
732 flg &= ~TCP_FLAG_SYN;
733 }
734
735 /* RFC793: "second check the RST bit" and
736 * "fourth, check the SYN bit"
737 */
738 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
739 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
740 goto embryonic_reset;
741 }
742
743 /* ACK sequence verified above, just make sure ACK is
744 * set. If ACK not set, just silently drop the packet.
745 *
746 * XXX (TFO) - if we ever allow "data after SYN", the
747 * following check needs to be removed.
748 */
749 if (!(flg & TCP_FLAG_ACK))
750 return NULL;
751
752 /* For Fast Open no more processing is needed (sk is the
753 * child socket).
754 */
755 if (fastopen)
756 return sk;
757
758 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
759 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
760 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
761 inet_rsk(req)->acked = 1;
762 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
763 return NULL;
764 }
765
766 /* OK, ACK is valid, create big socket and
767 * feed this segment to it. It will repeat all
768 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
769 * ESTABLISHED STATE. If it will be dropped after
770 * socket is created, wait for troubles.
771 */
772 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
773 req, &own_req);
774 if (!child)
775 goto listen_overflow;
776
777 if (own_req && rsk_drop_req(req)) {
778 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
779 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
780 return child;
781 }
782
783 sock_rps_save_rxhash(child, skb);
784 tcp_synack_rtt_meas(child, req);
785 *req_stolen = !own_req;
786 return inet_csk_complete_hashdance(sk, child, req, own_req);
787
788listen_overflow:
789 if (sk != req->rsk_listener)
790 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
791
792 if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
793 inet_rsk(req)->acked = 1;
794 return NULL;
795 }
796
797embryonic_reset:
798 if (!(flg & TCP_FLAG_RST)) {
799 /* Received a bad SYN pkt - for TFO We try not to reset
800 * the local connection unless it's really necessary to
801 * avoid becoming vulnerable to outside attack aiming at
802 * resetting legit local connections.
803 */
804 req->rsk_ops->send_reset(sk, skb);
805 } else if (fastopen) { /* received a valid RST pkt */
806 reqsk_fastopen_remove(sk, req, true);
807 tcp_reset(sk, skb);
808 }
809 if (!fastopen) {
810 bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
811
812 if (unlinked)
813 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
814 *req_stolen = !unlinked;
815 }
816 return NULL;
817}
818EXPORT_SYMBOL(tcp_check_req);
819
820/*
821 * Queue segment on the new socket if the new socket is active,
822 * otherwise we just shortcircuit this and continue with
823 * the new socket.
824 *
825 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
826 * when entering. But other states are possible due to a race condition
827 * where after __inet_lookup_established() fails but before the listener
828 * locked is obtained, other packets cause the same connection to
829 * be created.
830 */
831
832int tcp_child_process(struct sock *parent, struct sock *child,
833 struct sk_buff *skb)
834 __releases(&((child)->sk_lock.slock))
835{
836 int ret = 0;
837 int state = child->sk_state;
838
839 /* record NAPI ID of child */
840 sk_mark_napi_id(child, skb);
841
842 tcp_segs_in(tcp_sk(child), skb);
843 if (!sock_owned_by_user(child)) {
844 ret = tcp_rcv_state_process(child, skb);
845 /* Wakeup parent, send SIGIO */
846 if (state == TCP_SYN_RECV && child->sk_state != state)
847 parent->sk_data_ready(parent);
848 } else {
849 /* Alas, it is possible again, because we do lookup
850 * in main socket hash table and lock on listening
851 * socket does not protect us more.
852 */
853 __sk_add_backlog(child, skb);
854 }
855
856 bh_unlock_sock(child);
857 sock_put(child);
858 return ret;
859}
860EXPORT_SYMBOL(tcp_child_process);
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/sysctl.h>
25#include <linux/workqueue.h>
26#include <net/tcp.h>
27#include <net/inet_common.h>
28#include <net/xfrm.h>
29
30int sysctl_tcp_abort_on_overflow __read_mostly;
31
32struct inet_timewait_death_row tcp_death_row = {
33 .sysctl_max_tw_buckets = NR_FILE * 2,
34 .hashinfo = &tcp_hashinfo,
35};
36EXPORT_SYMBOL_GPL(tcp_death_row);
37
38static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
39{
40 if (seq == s_win)
41 return true;
42 if (after(end_seq, s_win) && before(seq, e_win))
43 return true;
44 return seq == e_win && seq == end_seq;
45}
46
47static enum tcp_tw_status
48tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
49 const struct sk_buff *skb, int mib_idx)
50{
51 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
52
53 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
54 &tcptw->tw_last_oow_ack_time)) {
55 /* Send ACK. Note, we do not put the bucket,
56 * it will be released by caller.
57 */
58 return TCP_TW_ACK;
59 }
60
61 /* We are rate-limiting, so just release the tw sock and drop skb. */
62 inet_twsk_put(tw);
63 return TCP_TW_SUCCESS;
64}
65
66/*
67 * * Main purpose of TIME-WAIT state is to close connection gracefully,
68 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
69 * (and, probably, tail of data) and one or more our ACKs are lost.
70 * * What is TIME-WAIT timeout? It is associated with maximal packet
71 * lifetime in the internet, which results in wrong conclusion, that
72 * it is set to catch "old duplicate segments" wandering out of their path.
73 * It is not quite correct. This timeout is calculated so that it exceeds
74 * maximal retransmission timeout enough to allow to lose one (or more)
75 * segments sent by peer and our ACKs. This time may be calculated from RTO.
76 * * When TIME-WAIT socket receives RST, it means that another end
77 * finally closed and we are allowed to kill TIME-WAIT too.
78 * * Second purpose of TIME-WAIT is catching old duplicate segments.
79 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
80 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
81 * * If we invented some more clever way to catch duplicates
82 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
83 *
84 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
85 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
86 * from the very beginning.
87 *
88 * NOTE. With recycling (and later with fin-wait-2) TW bucket
89 * is _not_ stateless. It means, that strictly speaking we must
90 * spinlock it. I do not want! Well, probability of misbehaviour
91 * is ridiculously low and, seems, we could use some mb() tricks
92 * to avoid misread sequence numbers, states etc. --ANK
93 *
94 * We don't need to initialize tmp_out.sack_ok as we don't use the results
95 */
96enum tcp_tw_status
97tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
98 const struct tcphdr *th)
99{
100 struct tcp_options_received tmp_opt;
101 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
102 bool paws_reject = false;
103
104 tmp_opt.saw_tstamp = 0;
105 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
106 tcp_parse_options(skb, &tmp_opt, 0, NULL);
107
108 if (tmp_opt.saw_tstamp) {
109 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
110 tmp_opt.ts_recent = tcptw->tw_ts_recent;
111 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
112 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
113 }
114 }
115
116 if (tw->tw_substate == TCP_FIN_WAIT2) {
117 /* Just repeat all the checks of tcp_rcv_state_process() */
118
119 /* Out of window, send ACK */
120 if (paws_reject ||
121 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
122 tcptw->tw_rcv_nxt,
123 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
124 return tcp_timewait_check_oow_rate_limit(
125 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
126
127 if (th->rst)
128 goto kill;
129
130 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
131 return TCP_TW_RST;
132
133 /* Dup ACK? */
134 if (!th->ack ||
135 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
136 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
137 inet_twsk_put(tw);
138 return TCP_TW_SUCCESS;
139 }
140
141 /* New data or FIN. If new data arrive after half-duplex close,
142 * reset.
143 */
144 if (!th->fin ||
145 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
146 return TCP_TW_RST;
147
148 /* FIN arrived, enter true time-wait state. */
149 tw->tw_substate = TCP_TIME_WAIT;
150 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
151 if (tmp_opt.saw_tstamp) {
152 tcptw->tw_ts_recent_stamp = get_seconds();
153 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
154 }
155
156 if (tcp_death_row.sysctl_tw_recycle &&
157 tcptw->tw_ts_recent_stamp &&
158 tcp_tw_remember_stamp(tw))
159 inet_twsk_reschedule(tw, tw->tw_timeout);
160 else
161 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
162 return TCP_TW_ACK;
163 }
164
165 /*
166 * Now real TIME-WAIT state.
167 *
168 * RFC 1122:
169 * "When a connection is [...] on TIME-WAIT state [...]
170 * [a TCP] MAY accept a new SYN from the remote TCP to
171 * reopen the connection directly, if it:
172 *
173 * (1) assigns its initial sequence number for the new
174 * connection to be larger than the largest sequence
175 * number it used on the previous connection incarnation,
176 * and
177 *
178 * (2) returns to TIME-WAIT state if the SYN turns out
179 * to be an old duplicate".
180 */
181
182 if (!paws_reject &&
183 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
184 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
185 /* In window segment, it may be only reset or bare ack. */
186
187 if (th->rst) {
188 /* This is TIME_WAIT assassination, in two flavors.
189 * Oh well... nobody has a sufficient solution to this
190 * protocol bug yet.
191 */
192 if (sysctl_tcp_rfc1337 == 0) {
193kill:
194 inet_twsk_deschedule_put(tw);
195 return TCP_TW_SUCCESS;
196 }
197 }
198 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
199
200 if (tmp_opt.saw_tstamp) {
201 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
202 tcptw->tw_ts_recent_stamp = get_seconds();
203 }
204
205 inet_twsk_put(tw);
206 return TCP_TW_SUCCESS;
207 }
208
209 /* Out of window segment.
210
211 All the segments are ACKed immediately.
212
213 The only exception is new SYN. We accept it, if it is
214 not old duplicate and we are not in danger to be killed
215 by delayed old duplicates. RFC check is that it has
216 newer sequence number works at rates <40Mbit/sec.
217 However, if paws works, it is reliable AND even more,
218 we even may relax silly seq space cutoff.
219
220 RED-PEN: we violate main RFC requirement, if this SYN will appear
221 old duplicate (i.e. we receive RST in reply to SYN-ACK),
222 we must return socket to time-wait state. It is not good,
223 but not fatal yet.
224 */
225
226 if (th->syn && !th->rst && !th->ack && !paws_reject &&
227 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
228 (tmp_opt.saw_tstamp &&
229 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
230 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
231 if (isn == 0)
232 isn++;
233 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
234 return TCP_TW_SYN;
235 }
236
237 if (paws_reject)
238 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
239
240 if (!th->rst) {
241 /* In this case we must reset the TIMEWAIT timer.
242 *
243 * If it is ACKless SYN it may be both old duplicate
244 * and new good SYN with random sequence number <rcv_nxt.
245 * Do not reschedule in the last case.
246 */
247 if (paws_reject || th->ack)
248 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
249
250 return tcp_timewait_check_oow_rate_limit(
251 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
252 }
253 inet_twsk_put(tw);
254 return TCP_TW_SUCCESS;
255}
256EXPORT_SYMBOL(tcp_timewait_state_process);
257
258/*
259 * Move a socket to time-wait or dead fin-wait-2 state.
260 */
261void tcp_time_wait(struct sock *sk, int state, int timeo)
262{
263 const struct inet_connection_sock *icsk = inet_csk(sk);
264 const struct tcp_sock *tp = tcp_sk(sk);
265 struct inet_timewait_sock *tw;
266 bool recycle_ok = false;
267
268 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
269 recycle_ok = tcp_remember_stamp(sk);
270
271 tw = inet_twsk_alloc(sk, &tcp_death_row, state);
272
273 if (tw) {
274 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
275 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
276 struct inet_sock *inet = inet_sk(sk);
277
278 tw->tw_transparent = inet->transparent;
279 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
280 tcptw->tw_rcv_nxt = tp->rcv_nxt;
281 tcptw->tw_snd_nxt = tp->snd_nxt;
282 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
283 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
284 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
285 tcptw->tw_ts_offset = tp->tsoffset;
286 tcptw->tw_last_oow_ack_time = 0;
287
288#if IS_ENABLED(CONFIG_IPV6)
289 if (tw->tw_family == PF_INET6) {
290 struct ipv6_pinfo *np = inet6_sk(sk);
291
292 tw->tw_v6_daddr = sk->sk_v6_daddr;
293 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
294 tw->tw_tclass = np->tclass;
295 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
296 tw->tw_ipv6only = sk->sk_ipv6only;
297 }
298#endif
299
300#ifdef CONFIG_TCP_MD5SIG
301 /*
302 * The timewait bucket does not have the key DB from the
303 * sock structure. We just make a quick copy of the
304 * md5 key being used (if indeed we are using one)
305 * so the timewait ack generating code has the key.
306 */
307 do {
308 struct tcp_md5sig_key *key;
309 tcptw->tw_md5_key = NULL;
310 key = tp->af_specific->md5_lookup(sk, sk);
311 if (key) {
312 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
313 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
314 BUG();
315 }
316 } while (0);
317#endif
318
319 /* Get the TIME_WAIT timeout firing. */
320 if (timeo < rto)
321 timeo = rto;
322
323 if (recycle_ok) {
324 tw->tw_timeout = rto;
325 } else {
326 tw->tw_timeout = TCP_TIMEWAIT_LEN;
327 if (state == TCP_TIME_WAIT)
328 timeo = TCP_TIMEWAIT_LEN;
329 }
330
331 inet_twsk_schedule(tw, timeo);
332 /* Linkage updates. */
333 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
334 inet_twsk_put(tw);
335 } else {
336 /* Sorry, if we're out of memory, just CLOSE this
337 * socket up. We've got bigger problems than
338 * non-graceful socket closings.
339 */
340 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
341 }
342
343 tcp_update_metrics(sk);
344 tcp_done(sk);
345}
346
347void tcp_twsk_destructor(struct sock *sk)
348{
349#ifdef CONFIG_TCP_MD5SIG
350 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
351
352 if (twsk->tw_md5_key)
353 kfree_rcu(twsk->tw_md5_key, rcu);
354#endif
355}
356EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
357
358/* Warning : This function is called without sk_listener being locked.
359 * Be sure to read socket fields once, as their value could change under us.
360 */
361void tcp_openreq_init_rwin(struct request_sock *req,
362 const struct sock *sk_listener,
363 const struct dst_entry *dst)
364{
365 struct inet_request_sock *ireq = inet_rsk(req);
366 const struct tcp_sock *tp = tcp_sk(sk_listener);
367 u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
368 int full_space = tcp_full_space(sk_listener);
369 int mss = dst_metric_advmss(dst);
370 u32 window_clamp;
371 __u8 rcv_wscale;
372
373 if (user_mss && user_mss < mss)
374 mss = user_mss;
375
376 window_clamp = READ_ONCE(tp->window_clamp);
377 /* Set this up on the first call only */
378 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
379
380 /* limit the window selection if the user enforce a smaller rx buffer */
381 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
382 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
383 req->rsk_window_clamp = full_space;
384
385 /* tcp_full_space because it is guaranteed to be the first packet */
386 tcp_select_initial_window(full_space,
387 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
388 &req->rsk_rcv_wnd,
389 &req->rsk_window_clamp,
390 ireq->wscale_ok,
391 &rcv_wscale,
392 dst_metric(dst, RTAX_INITRWND));
393 ireq->rcv_wscale = rcv_wscale;
394}
395EXPORT_SYMBOL(tcp_openreq_init_rwin);
396
397static void tcp_ecn_openreq_child(struct tcp_sock *tp,
398 const struct request_sock *req)
399{
400 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
401}
402
403void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
404{
405 struct inet_connection_sock *icsk = inet_csk(sk);
406 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
407 bool ca_got_dst = false;
408
409 if (ca_key != TCP_CA_UNSPEC) {
410 const struct tcp_congestion_ops *ca;
411
412 rcu_read_lock();
413 ca = tcp_ca_find_key(ca_key);
414 if (likely(ca && try_module_get(ca->owner))) {
415 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
416 icsk->icsk_ca_ops = ca;
417 ca_got_dst = true;
418 }
419 rcu_read_unlock();
420 }
421
422 /* If no valid choice made yet, assign current system default ca. */
423 if (!ca_got_dst &&
424 (!icsk->icsk_ca_setsockopt ||
425 !try_module_get(icsk->icsk_ca_ops->owner)))
426 tcp_assign_congestion_control(sk);
427
428 tcp_set_ca_state(sk, TCP_CA_Open);
429}
430EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
431
432/* This is not only more efficient than what we used to do, it eliminates
433 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
434 *
435 * Actually, we could lots of memory writes here. tp of listening
436 * socket contains all necessary default parameters.
437 */
438struct sock *tcp_create_openreq_child(const struct sock *sk,
439 struct request_sock *req,
440 struct sk_buff *skb)
441{
442 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
443
444 if (newsk) {
445 const struct inet_request_sock *ireq = inet_rsk(req);
446 struct tcp_request_sock *treq = tcp_rsk(req);
447 struct inet_connection_sock *newicsk = inet_csk(newsk);
448 struct tcp_sock *newtp = tcp_sk(newsk);
449
450 /* Now setup tcp_sock */
451 newtp->pred_flags = 0;
452
453 newtp->rcv_wup = newtp->copied_seq =
454 newtp->rcv_nxt = treq->rcv_isn + 1;
455 newtp->segs_in = 1;
456
457 newtp->snd_sml = newtp->snd_una =
458 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
459
460 tcp_prequeue_init(newtp);
461 INIT_LIST_HEAD(&newtp->tsq_node);
462
463 tcp_init_wl(newtp, treq->rcv_isn);
464
465 newtp->srtt_us = 0;
466 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
467 minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
468 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
469 newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
470
471 newtp->packets_out = 0;
472 newtp->retrans_out = 0;
473 newtp->sacked_out = 0;
474 newtp->fackets_out = 0;
475 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
476 tcp_enable_early_retrans(newtp);
477 newtp->tlp_high_seq = 0;
478 newtp->lsndtime = treq->snt_synack.stamp_jiffies;
479 newsk->sk_txhash = treq->txhash;
480 newtp->last_oow_ack_time = 0;
481 newtp->total_retrans = req->num_retrans;
482
483 /* So many TCP implementations out there (incorrectly) count the
484 * initial SYN frame in their delayed-ACK and congestion control
485 * algorithms that we must have the following bandaid to talk
486 * efficiently to them. -DaveM
487 */
488 newtp->snd_cwnd = TCP_INIT_CWND;
489 newtp->snd_cwnd_cnt = 0;
490
491 /* There's a bubble in the pipe until at least the first ACK. */
492 newtp->app_limited = ~0U;
493
494 tcp_init_xmit_timers(newsk);
495 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
496
497 newtp->rx_opt.saw_tstamp = 0;
498
499 newtp->rx_opt.dsack = 0;
500 newtp->rx_opt.num_sacks = 0;
501
502 newtp->urg_data = 0;
503
504 if (sock_flag(newsk, SOCK_KEEPOPEN))
505 inet_csk_reset_keepalive_timer(newsk,
506 keepalive_time_when(newtp));
507
508 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
509 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
510 if (sysctl_tcp_fack)
511 tcp_enable_fack(newtp);
512 }
513 newtp->window_clamp = req->rsk_window_clamp;
514 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
515 newtp->rcv_wnd = req->rsk_rcv_wnd;
516 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
517 if (newtp->rx_opt.wscale_ok) {
518 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
519 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
520 } else {
521 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
522 newtp->window_clamp = min(newtp->window_clamp, 65535U);
523 }
524 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
525 newtp->rx_opt.snd_wscale);
526 newtp->max_window = newtp->snd_wnd;
527
528 if (newtp->rx_opt.tstamp_ok) {
529 newtp->rx_opt.ts_recent = req->ts_recent;
530 newtp->rx_opt.ts_recent_stamp = get_seconds();
531 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
532 } else {
533 newtp->rx_opt.ts_recent_stamp = 0;
534 newtp->tcp_header_len = sizeof(struct tcphdr);
535 }
536 newtp->tsoffset = treq->ts_off;
537#ifdef CONFIG_TCP_MD5SIG
538 newtp->md5sig_info = NULL; /*XXX*/
539 if (newtp->af_specific->md5_lookup(sk, newsk))
540 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
541#endif
542 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
543 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
544 newtp->rx_opt.mss_clamp = req->mss;
545 tcp_ecn_openreq_child(newtp, req);
546 newtp->fastopen_rsk = NULL;
547 newtp->syn_data_acked = 0;
548 newtp->rack.mstamp.v64 = 0;
549 newtp->rack.advanced = 0;
550
551 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
552 }
553 return newsk;
554}
555EXPORT_SYMBOL(tcp_create_openreq_child);
556
557/*
558 * Process an incoming packet for SYN_RECV sockets represented as a
559 * request_sock. Normally sk is the listener socket but for TFO it
560 * points to the child socket.
561 *
562 * XXX (TFO) - The current impl contains a special check for ack
563 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
564 *
565 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
566 */
567
568struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
569 struct request_sock *req,
570 bool fastopen)
571{
572 struct tcp_options_received tmp_opt;
573 struct sock *child;
574 const struct tcphdr *th = tcp_hdr(skb);
575 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
576 bool paws_reject = false;
577 bool own_req;
578
579 tmp_opt.saw_tstamp = 0;
580 if (th->doff > (sizeof(struct tcphdr)>>2)) {
581 tcp_parse_options(skb, &tmp_opt, 0, NULL);
582
583 if (tmp_opt.saw_tstamp) {
584 tmp_opt.ts_recent = req->ts_recent;
585 if (tmp_opt.rcv_tsecr)
586 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
587 /* We do not store true stamp, but it is not required,
588 * it can be estimated (approximately)
589 * from another data.
590 */
591 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
592 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
593 }
594 }
595
596 /* Check for pure retransmitted SYN. */
597 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
598 flg == TCP_FLAG_SYN &&
599 !paws_reject) {
600 /*
601 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
602 * this case on figure 6 and figure 8, but formal
603 * protocol description says NOTHING.
604 * To be more exact, it says that we should send ACK,
605 * because this segment (at least, if it has no data)
606 * is out of window.
607 *
608 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
609 * describe SYN-RECV state. All the description
610 * is wrong, we cannot believe to it and should
611 * rely only on common sense and implementation
612 * experience.
613 *
614 * Enforce "SYN-ACK" according to figure 8, figure 6
615 * of RFC793, fixed by RFC1122.
616 *
617 * Note that even if there is new data in the SYN packet
618 * they will be thrown away too.
619 *
620 * Reset timer after retransmitting SYNACK, similar to
621 * the idea of fast retransmit in recovery.
622 */
623 if (!tcp_oow_rate_limited(sock_net(sk), skb,
624 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
625 &tcp_rsk(req)->last_oow_ack_time) &&
626
627 !inet_rtx_syn_ack(sk, req)) {
628 unsigned long expires = jiffies;
629
630 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
631 TCP_RTO_MAX);
632 if (!fastopen)
633 mod_timer_pending(&req->rsk_timer, expires);
634 else
635 req->rsk_timer.expires = expires;
636 }
637 return NULL;
638 }
639
640 /* Further reproduces section "SEGMENT ARRIVES"
641 for state SYN-RECEIVED of RFC793.
642 It is broken, however, it does not work only
643 when SYNs are crossed.
644
645 You would think that SYN crossing is impossible here, since
646 we should have a SYN_SENT socket (from connect()) on our end,
647 but this is not true if the crossed SYNs were sent to both
648 ends by a malicious third party. We must defend against this,
649 and to do that we first verify the ACK (as per RFC793, page
650 36) and reset if it is invalid. Is this a true full defense?
651 To convince ourselves, let us consider a way in which the ACK
652 test can still pass in this 'malicious crossed SYNs' case.
653 Malicious sender sends identical SYNs (and thus identical sequence
654 numbers) to both A and B:
655
656 A: gets SYN, seq=7
657 B: gets SYN, seq=7
658
659 By our good fortune, both A and B select the same initial
660 send sequence number of seven :-)
661
662 A: sends SYN|ACK, seq=7, ack_seq=8
663 B: sends SYN|ACK, seq=7, ack_seq=8
664
665 So we are now A eating this SYN|ACK, ACK test passes. So
666 does sequence test, SYN is truncated, and thus we consider
667 it a bare ACK.
668
669 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
670 bare ACK. Otherwise, we create an established connection. Both
671 ends (listening sockets) accept the new incoming connection and try
672 to talk to each other. 8-)
673
674 Note: This case is both harmless, and rare. Possibility is about the
675 same as us discovering intelligent life on another plant tomorrow.
676
677 But generally, we should (RFC lies!) to accept ACK
678 from SYNACK both here and in tcp_rcv_state_process().
679 tcp_rcv_state_process() does not, hence, we do not too.
680
681 Note that the case is absolutely generic:
682 we cannot optimize anything here without
683 violating protocol. All the checks must be made
684 before attempt to create socket.
685 */
686
687 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
688 * and the incoming segment acknowledges something not yet
689 * sent (the segment carries an unacceptable ACK) ...
690 * a reset is sent."
691 *
692 * Invalid ACK: reset will be sent by listening socket.
693 * Note that the ACK validity check for a Fast Open socket is done
694 * elsewhere and is checked directly against the child socket rather
695 * than req because user data may have been sent out.
696 */
697 if ((flg & TCP_FLAG_ACK) && !fastopen &&
698 (TCP_SKB_CB(skb)->ack_seq !=
699 tcp_rsk(req)->snt_isn + 1))
700 return sk;
701
702 /* Also, it would be not so bad idea to check rcv_tsecr, which
703 * is essentially ACK extension and too early or too late values
704 * should cause reset in unsynchronized states.
705 */
706
707 /* RFC793: "first check sequence number". */
708
709 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
710 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
711 /* Out of window: send ACK and drop. */
712 if (!(flg & TCP_FLAG_RST) &&
713 !tcp_oow_rate_limited(sock_net(sk), skb,
714 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
715 &tcp_rsk(req)->last_oow_ack_time))
716 req->rsk_ops->send_ack(sk, skb, req);
717 if (paws_reject)
718 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
719 return NULL;
720 }
721
722 /* In sequence, PAWS is OK. */
723
724 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
725 req->ts_recent = tmp_opt.rcv_tsval;
726
727 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
728 /* Truncate SYN, it is out of window starting
729 at tcp_rsk(req)->rcv_isn + 1. */
730 flg &= ~TCP_FLAG_SYN;
731 }
732
733 /* RFC793: "second check the RST bit" and
734 * "fourth, check the SYN bit"
735 */
736 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
737 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
738 goto embryonic_reset;
739 }
740
741 /* ACK sequence verified above, just make sure ACK is
742 * set. If ACK not set, just silently drop the packet.
743 *
744 * XXX (TFO) - if we ever allow "data after SYN", the
745 * following check needs to be removed.
746 */
747 if (!(flg & TCP_FLAG_ACK))
748 return NULL;
749
750 /* For Fast Open no more processing is needed (sk is the
751 * child socket).
752 */
753 if (fastopen)
754 return sk;
755
756 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
757 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
758 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
759 inet_rsk(req)->acked = 1;
760 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
761 return NULL;
762 }
763
764 /* OK, ACK is valid, create big socket and
765 * feed this segment to it. It will repeat all
766 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
767 * ESTABLISHED STATE. If it will be dropped after
768 * socket is created, wait for troubles.
769 */
770 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
771 req, &own_req);
772 if (!child)
773 goto listen_overflow;
774
775 sock_rps_save_rxhash(child, skb);
776 tcp_synack_rtt_meas(child, req);
777 return inet_csk_complete_hashdance(sk, child, req, own_req);
778
779listen_overflow:
780 if (!sysctl_tcp_abort_on_overflow) {
781 inet_rsk(req)->acked = 1;
782 return NULL;
783 }
784
785embryonic_reset:
786 if (!(flg & TCP_FLAG_RST)) {
787 /* Received a bad SYN pkt - for TFO We try not to reset
788 * the local connection unless it's really necessary to
789 * avoid becoming vulnerable to outside attack aiming at
790 * resetting legit local connections.
791 */
792 req->rsk_ops->send_reset(sk, skb);
793 } else if (fastopen) { /* received a valid RST pkt */
794 reqsk_fastopen_remove(sk, req, true);
795 tcp_reset(sk);
796 }
797 if (!fastopen) {
798 inet_csk_reqsk_queue_drop(sk, req);
799 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
800 }
801 return NULL;
802}
803EXPORT_SYMBOL(tcp_check_req);
804
805/*
806 * Queue segment on the new socket if the new socket is active,
807 * otherwise we just shortcircuit this and continue with
808 * the new socket.
809 *
810 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
811 * when entering. But other states are possible due to a race condition
812 * where after __inet_lookup_established() fails but before the listener
813 * locked is obtained, other packets cause the same connection to
814 * be created.
815 */
816
817int tcp_child_process(struct sock *parent, struct sock *child,
818 struct sk_buff *skb)
819{
820 int ret = 0;
821 int state = child->sk_state;
822
823 tcp_segs_in(tcp_sk(child), skb);
824 if (!sock_owned_by_user(child)) {
825 ret = tcp_rcv_state_process(child, skb);
826 /* Wakeup parent, send SIGIO */
827 if (state == TCP_SYN_RECV && child->sk_state != state)
828 parent->sk_data_ready(parent);
829 } else {
830 /* Alas, it is possible again, because we do lookup
831 * in main socket hash table and lock on listening
832 * socket does not protect us more.
833 */
834 __sk_add_backlog(child, skb);
835 }
836
837 bh_unlock_sock(child);
838 sock_put(child);
839 return ret;
840}
841EXPORT_SYMBOL(tcp_child_process);