Loading...
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/sysctl.h>
25#include <linux/workqueue.h>
26#include <linux/static_key.h>
27#include <net/tcp.h>
28#include <net/inet_common.h>
29#include <net/xfrm.h>
30#include <net/busy_poll.h>
31
32static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
33{
34 if (seq == s_win)
35 return true;
36 if (after(end_seq, s_win) && before(seq, e_win))
37 return true;
38 return seq == e_win && seq == end_seq;
39}
40
41static enum tcp_tw_status
42tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
43 const struct sk_buff *skb, int mib_idx)
44{
45 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
46
47 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
48 &tcptw->tw_last_oow_ack_time)) {
49 /* Send ACK. Note, we do not put the bucket,
50 * it will be released by caller.
51 */
52 return TCP_TW_ACK;
53 }
54
55 /* We are rate-limiting, so just release the tw sock and drop skb. */
56 inet_twsk_put(tw);
57 return TCP_TW_SUCCESS;
58}
59
60/*
61 * * Main purpose of TIME-WAIT state is to close connection gracefully,
62 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
63 * (and, probably, tail of data) and one or more our ACKs are lost.
64 * * What is TIME-WAIT timeout? It is associated with maximal packet
65 * lifetime in the internet, which results in wrong conclusion, that
66 * it is set to catch "old duplicate segments" wandering out of their path.
67 * It is not quite correct. This timeout is calculated so that it exceeds
68 * maximal retransmission timeout enough to allow to lose one (or more)
69 * segments sent by peer and our ACKs. This time may be calculated from RTO.
70 * * When TIME-WAIT socket receives RST, it means that another end
71 * finally closed and we are allowed to kill TIME-WAIT too.
72 * * Second purpose of TIME-WAIT is catching old duplicate segments.
73 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
74 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
75 * * If we invented some more clever way to catch duplicates
76 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
77 *
78 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
79 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
80 * from the very beginning.
81 *
82 * NOTE. With recycling (and later with fin-wait-2) TW bucket
83 * is _not_ stateless. It means, that strictly speaking we must
84 * spinlock it. I do not want! Well, probability of misbehaviour
85 * is ridiculously low and, seems, we could use some mb() tricks
86 * to avoid misread sequence numbers, states etc. --ANK
87 *
88 * We don't need to initialize tmp_out.sack_ok as we don't use the results
89 */
90enum tcp_tw_status
91tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
92 const struct tcphdr *th)
93{
94 struct tcp_options_received tmp_opt;
95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
96 bool paws_reject = false;
97
98 tmp_opt.saw_tstamp = 0;
99 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
100 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
101
102 if (tmp_opt.saw_tstamp) {
103 if (tmp_opt.rcv_tsecr)
104 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
105 tmp_opt.ts_recent = tcptw->tw_ts_recent;
106 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
107 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
108 }
109 }
110
111 if (tw->tw_substate == TCP_FIN_WAIT2) {
112 /* Just repeat all the checks of tcp_rcv_state_process() */
113
114 /* Out of window, send ACK */
115 if (paws_reject ||
116 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
117 tcptw->tw_rcv_nxt,
118 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
119 return tcp_timewait_check_oow_rate_limit(
120 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
121
122 if (th->rst)
123 goto kill;
124
125 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
126 return TCP_TW_RST;
127
128 /* Dup ACK? */
129 if (!th->ack ||
130 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
131 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
132 inet_twsk_put(tw);
133 return TCP_TW_SUCCESS;
134 }
135
136 /* New data or FIN. If new data arrive after half-duplex close,
137 * reset.
138 */
139 if (!th->fin ||
140 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
141 return TCP_TW_RST;
142
143 /* FIN arrived, enter true time-wait state. */
144 tw->tw_substate = TCP_TIME_WAIT;
145 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
146 if (tmp_opt.saw_tstamp) {
147 tcptw->tw_ts_recent_stamp = get_seconds();
148 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
149 }
150
151 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
152 return TCP_TW_ACK;
153 }
154
155 /*
156 * Now real TIME-WAIT state.
157 *
158 * RFC 1122:
159 * "When a connection is [...] on TIME-WAIT state [...]
160 * [a TCP] MAY accept a new SYN from the remote TCP to
161 * reopen the connection directly, if it:
162 *
163 * (1) assigns its initial sequence number for the new
164 * connection to be larger than the largest sequence
165 * number it used on the previous connection incarnation,
166 * and
167 *
168 * (2) returns to TIME-WAIT state if the SYN turns out
169 * to be an old duplicate".
170 */
171
172 if (!paws_reject &&
173 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
174 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
175 /* In window segment, it may be only reset or bare ack. */
176
177 if (th->rst) {
178 /* This is TIME_WAIT assassination, in two flavors.
179 * Oh well... nobody has a sufficient solution to this
180 * protocol bug yet.
181 */
182 if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
183kill:
184 inet_twsk_deschedule_put(tw);
185 return TCP_TW_SUCCESS;
186 }
187 }
188 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
189
190 if (tmp_opt.saw_tstamp) {
191 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
192 tcptw->tw_ts_recent_stamp = get_seconds();
193 }
194
195 inet_twsk_put(tw);
196 return TCP_TW_SUCCESS;
197 }
198
199 /* Out of window segment.
200
201 All the segments are ACKed immediately.
202
203 The only exception is new SYN. We accept it, if it is
204 not old duplicate and we are not in danger to be killed
205 by delayed old duplicates. RFC check is that it has
206 newer sequence number works at rates <40Mbit/sec.
207 However, if paws works, it is reliable AND even more,
208 we even may relax silly seq space cutoff.
209
210 RED-PEN: we violate main RFC requirement, if this SYN will appear
211 old duplicate (i.e. we receive RST in reply to SYN-ACK),
212 we must return socket to time-wait state. It is not good,
213 but not fatal yet.
214 */
215
216 if (th->syn && !th->rst && !th->ack && !paws_reject &&
217 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
218 (tmp_opt.saw_tstamp &&
219 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
220 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
221 if (isn == 0)
222 isn++;
223 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
224 return TCP_TW_SYN;
225 }
226
227 if (paws_reject)
228 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
229
230 if (!th->rst) {
231 /* In this case we must reset the TIMEWAIT timer.
232 *
233 * If it is ACKless SYN it may be both old duplicate
234 * and new good SYN with random sequence number <rcv_nxt.
235 * Do not reschedule in the last case.
236 */
237 if (paws_reject || th->ack)
238 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
239
240 return tcp_timewait_check_oow_rate_limit(
241 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
242 }
243 inet_twsk_put(tw);
244 return TCP_TW_SUCCESS;
245}
246EXPORT_SYMBOL(tcp_timewait_state_process);
247
248/*
249 * Move a socket to time-wait or dead fin-wait-2 state.
250 */
251void tcp_time_wait(struct sock *sk, int state, int timeo)
252{
253 const struct inet_connection_sock *icsk = inet_csk(sk);
254 const struct tcp_sock *tp = tcp_sk(sk);
255 struct inet_timewait_sock *tw;
256 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
257
258 tw = inet_twsk_alloc(sk, tcp_death_row, state);
259
260 if (tw) {
261 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
262 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
263 struct inet_sock *inet = inet_sk(sk);
264
265 tw->tw_transparent = inet->transparent;
266 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
267 tcptw->tw_rcv_nxt = tp->rcv_nxt;
268 tcptw->tw_snd_nxt = tp->snd_nxt;
269 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
270 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
271 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
272 tcptw->tw_ts_offset = tp->tsoffset;
273 tcptw->tw_last_oow_ack_time = 0;
274
275#if IS_ENABLED(CONFIG_IPV6)
276 if (tw->tw_family == PF_INET6) {
277 struct ipv6_pinfo *np = inet6_sk(sk);
278
279 tw->tw_v6_daddr = sk->sk_v6_daddr;
280 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
281 tw->tw_tclass = np->tclass;
282 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
283 tw->tw_ipv6only = sk->sk_ipv6only;
284 }
285#endif
286
287#ifdef CONFIG_TCP_MD5SIG
288 /*
289 * The timewait bucket does not have the key DB from the
290 * sock structure. We just make a quick copy of the
291 * md5 key being used (if indeed we are using one)
292 * so the timewait ack generating code has the key.
293 */
294 do {
295 struct tcp_md5sig_key *key;
296 tcptw->tw_md5_key = NULL;
297 key = tp->af_specific->md5_lookup(sk, sk);
298 if (key) {
299 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
300 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
301 }
302 } while (0);
303#endif
304
305 /* Get the TIME_WAIT timeout firing. */
306 if (timeo < rto)
307 timeo = rto;
308
309 tw->tw_timeout = TCP_TIMEWAIT_LEN;
310 if (state == TCP_TIME_WAIT)
311 timeo = TCP_TIMEWAIT_LEN;
312
313 /* tw_timer is pinned, so we need to make sure BH are disabled
314 * in following section, otherwise timer handler could run before
315 * we complete the initialization.
316 */
317 local_bh_disable();
318 inet_twsk_schedule(tw, timeo);
319 /* Linkage updates.
320 * Note that access to tw after this point is illegal.
321 */
322 inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
323 local_bh_enable();
324 } else {
325 /* Sorry, if we're out of memory, just CLOSE this
326 * socket up. We've got bigger problems than
327 * non-graceful socket closings.
328 */
329 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
330 }
331
332 tcp_update_metrics(sk);
333 tcp_done(sk);
334}
335EXPORT_SYMBOL(tcp_time_wait);
336
337void tcp_twsk_destructor(struct sock *sk)
338{
339#ifdef CONFIG_TCP_MD5SIG
340 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
341
342 if (twsk->tw_md5_key)
343 kfree_rcu(twsk->tw_md5_key, rcu);
344#endif
345}
346EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
347
348/* Warning : This function is called without sk_listener being locked.
349 * Be sure to read socket fields once, as their value could change under us.
350 */
351void tcp_openreq_init_rwin(struct request_sock *req,
352 const struct sock *sk_listener,
353 const struct dst_entry *dst)
354{
355 struct inet_request_sock *ireq = inet_rsk(req);
356 const struct tcp_sock *tp = tcp_sk(sk_listener);
357 int full_space = tcp_full_space(sk_listener);
358 u32 window_clamp;
359 __u8 rcv_wscale;
360 u32 rcv_wnd;
361 int mss;
362
363 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
364 window_clamp = READ_ONCE(tp->window_clamp);
365 /* Set this up on the first call only */
366 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
367
368 /* limit the window selection if the user enforce a smaller rx buffer */
369 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
370 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
371 req->rsk_window_clamp = full_space;
372
373 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
374 if (rcv_wnd == 0)
375 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
376 else if (full_space < rcv_wnd * mss)
377 full_space = rcv_wnd * mss;
378
379 /* tcp_full_space because it is guaranteed to be the first packet */
380 tcp_select_initial_window(sk_listener, full_space,
381 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
382 &req->rsk_rcv_wnd,
383 &req->rsk_window_clamp,
384 ireq->wscale_ok,
385 &rcv_wscale,
386 rcv_wnd);
387 ireq->rcv_wscale = rcv_wscale;
388}
389EXPORT_SYMBOL(tcp_openreq_init_rwin);
390
391static void tcp_ecn_openreq_child(struct tcp_sock *tp,
392 const struct request_sock *req)
393{
394 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
395}
396
397void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
398{
399 struct inet_connection_sock *icsk = inet_csk(sk);
400 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
401 bool ca_got_dst = false;
402
403 if (ca_key != TCP_CA_UNSPEC) {
404 const struct tcp_congestion_ops *ca;
405
406 rcu_read_lock();
407 ca = tcp_ca_find_key(ca_key);
408 if (likely(ca && try_module_get(ca->owner))) {
409 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
410 icsk->icsk_ca_ops = ca;
411 ca_got_dst = true;
412 }
413 rcu_read_unlock();
414 }
415
416 /* If no valid choice made yet, assign current system default ca. */
417 if (!ca_got_dst &&
418 (!icsk->icsk_ca_setsockopt ||
419 !try_module_get(icsk->icsk_ca_ops->owner)))
420 tcp_assign_congestion_control(sk);
421
422 tcp_set_ca_state(sk, TCP_CA_Open);
423}
424EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
425
426static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
427 struct request_sock *req,
428 struct tcp_sock *newtp)
429{
430#if IS_ENABLED(CONFIG_SMC)
431 struct inet_request_sock *ireq;
432
433 if (static_branch_unlikely(&tcp_have_smc)) {
434 ireq = inet_rsk(req);
435 if (oldtp->syn_smc && !ireq->smc_ok)
436 newtp->syn_smc = 0;
437 }
438#endif
439}
440
441/* This is not only more efficient than what we used to do, it eliminates
442 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
443 *
444 * Actually, we could lots of memory writes here. tp of listening
445 * socket contains all necessary default parameters.
446 */
447struct sock *tcp_create_openreq_child(const struct sock *sk,
448 struct request_sock *req,
449 struct sk_buff *skb)
450{
451 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
452
453 if (newsk) {
454 const struct inet_request_sock *ireq = inet_rsk(req);
455 struct tcp_request_sock *treq = tcp_rsk(req);
456 struct inet_connection_sock *newicsk = inet_csk(newsk);
457 struct tcp_sock *newtp = tcp_sk(newsk);
458 struct tcp_sock *oldtp = tcp_sk(sk);
459
460 smc_check_reset_syn_req(oldtp, req, newtp);
461
462 /* Now setup tcp_sock */
463 newtp->pred_flags = 0;
464
465 newtp->rcv_wup = newtp->copied_seq =
466 newtp->rcv_nxt = treq->rcv_isn + 1;
467 newtp->segs_in = 1;
468
469 newtp->snd_sml = newtp->snd_una =
470 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
471
472 INIT_LIST_HEAD(&newtp->tsq_node);
473 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
474
475 tcp_init_wl(newtp, treq->rcv_isn);
476
477 newtp->srtt_us = 0;
478 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
479 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
480 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
481 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
482
483 newtp->packets_out = 0;
484 newtp->retrans_out = 0;
485 newtp->sacked_out = 0;
486 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
487 newtp->tlp_high_seq = 0;
488 newtp->lsndtime = tcp_jiffies32;
489 newsk->sk_txhash = treq->txhash;
490 newtp->last_oow_ack_time = 0;
491 newtp->total_retrans = req->num_retrans;
492
493 /* So many TCP implementations out there (incorrectly) count the
494 * initial SYN frame in their delayed-ACK and congestion control
495 * algorithms that we must have the following bandaid to talk
496 * efficiently to them. -DaveM
497 */
498 newtp->snd_cwnd = TCP_INIT_CWND;
499 newtp->snd_cwnd_cnt = 0;
500
501 /* There's a bubble in the pipe until at least the first ACK. */
502 newtp->app_limited = ~0U;
503
504 tcp_init_xmit_timers(newsk);
505 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
506
507 newtp->rx_opt.saw_tstamp = 0;
508
509 newtp->rx_opt.dsack = 0;
510 newtp->rx_opt.num_sacks = 0;
511
512 newtp->urg_data = 0;
513
514 if (sock_flag(newsk, SOCK_KEEPOPEN))
515 inet_csk_reset_keepalive_timer(newsk,
516 keepalive_time_when(newtp));
517
518 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
519 newtp->rx_opt.sack_ok = ireq->sack_ok;
520 newtp->window_clamp = req->rsk_window_clamp;
521 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
522 newtp->rcv_wnd = req->rsk_rcv_wnd;
523 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
524 if (newtp->rx_opt.wscale_ok) {
525 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
526 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
527 } else {
528 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
529 newtp->window_clamp = min(newtp->window_clamp, 65535U);
530 }
531 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
532 newtp->rx_opt.snd_wscale);
533 newtp->max_window = newtp->snd_wnd;
534
535 if (newtp->rx_opt.tstamp_ok) {
536 newtp->rx_opt.ts_recent = req->ts_recent;
537 newtp->rx_opt.ts_recent_stamp = get_seconds();
538 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
539 } else {
540 newtp->rx_opt.ts_recent_stamp = 0;
541 newtp->tcp_header_len = sizeof(struct tcphdr);
542 }
543 newtp->tsoffset = treq->ts_off;
544#ifdef CONFIG_TCP_MD5SIG
545 newtp->md5sig_info = NULL; /*XXX*/
546 if (newtp->af_specific->md5_lookup(sk, newsk))
547 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
548#endif
549 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
550 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
551 newtp->rx_opt.mss_clamp = req->mss;
552 tcp_ecn_openreq_child(newtp, req);
553 newtp->fastopen_req = NULL;
554 newtp->fastopen_rsk = NULL;
555 newtp->syn_data_acked = 0;
556 newtp->rack.mstamp = 0;
557 newtp->rack.advanced = 0;
558 newtp->rack.reo_wnd_steps = 1;
559 newtp->rack.last_delivered = 0;
560 newtp->rack.reo_wnd_persist = 0;
561 newtp->rack.dsack_seen = 0;
562
563 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
564 }
565 return newsk;
566}
567EXPORT_SYMBOL(tcp_create_openreq_child);
568
569/*
570 * Process an incoming packet for SYN_RECV sockets represented as a
571 * request_sock. Normally sk is the listener socket but for TFO it
572 * points to the child socket.
573 *
574 * XXX (TFO) - The current impl contains a special check for ack
575 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
576 *
577 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
578 */
579
580struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
581 struct request_sock *req,
582 bool fastopen, bool *req_stolen)
583{
584 struct tcp_options_received tmp_opt;
585 struct sock *child;
586 const struct tcphdr *th = tcp_hdr(skb);
587 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
588 bool paws_reject = false;
589 bool own_req;
590
591 tmp_opt.saw_tstamp = 0;
592 if (th->doff > (sizeof(struct tcphdr)>>2)) {
593 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
594
595 if (tmp_opt.saw_tstamp) {
596 tmp_opt.ts_recent = req->ts_recent;
597 if (tmp_opt.rcv_tsecr)
598 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
599 /* We do not store true stamp, but it is not required,
600 * it can be estimated (approximately)
601 * from another data.
602 */
603 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
604 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
605 }
606 }
607
608 /* Check for pure retransmitted SYN. */
609 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
610 flg == TCP_FLAG_SYN &&
611 !paws_reject) {
612 /*
613 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
614 * this case on figure 6 and figure 8, but formal
615 * protocol description says NOTHING.
616 * To be more exact, it says that we should send ACK,
617 * because this segment (at least, if it has no data)
618 * is out of window.
619 *
620 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
621 * describe SYN-RECV state. All the description
622 * is wrong, we cannot believe to it and should
623 * rely only on common sense and implementation
624 * experience.
625 *
626 * Enforce "SYN-ACK" according to figure 8, figure 6
627 * of RFC793, fixed by RFC1122.
628 *
629 * Note that even if there is new data in the SYN packet
630 * they will be thrown away too.
631 *
632 * Reset timer after retransmitting SYNACK, similar to
633 * the idea of fast retransmit in recovery.
634 */
635 if (!tcp_oow_rate_limited(sock_net(sk), skb,
636 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
637 &tcp_rsk(req)->last_oow_ack_time) &&
638
639 !inet_rtx_syn_ack(sk, req)) {
640 unsigned long expires = jiffies;
641
642 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
643 TCP_RTO_MAX);
644 if (!fastopen)
645 mod_timer_pending(&req->rsk_timer, expires);
646 else
647 req->rsk_timer.expires = expires;
648 }
649 return NULL;
650 }
651
652 /* Further reproduces section "SEGMENT ARRIVES"
653 for state SYN-RECEIVED of RFC793.
654 It is broken, however, it does not work only
655 when SYNs are crossed.
656
657 You would think that SYN crossing is impossible here, since
658 we should have a SYN_SENT socket (from connect()) on our end,
659 but this is not true if the crossed SYNs were sent to both
660 ends by a malicious third party. We must defend against this,
661 and to do that we first verify the ACK (as per RFC793, page
662 36) and reset if it is invalid. Is this a true full defense?
663 To convince ourselves, let us consider a way in which the ACK
664 test can still pass in this 'malicious crossed SYNs' case.
665 Malicious sender sends identical SYNs (and thus identical sequence
666 numbers) to both A and B:
667
668 A: gets SYN, seq=7
669 B: gets SYN, seq=7
670
671 By our good fortune, both A and B select the same initial
672 send sequence number of seven :-)
673
674 A: sends SYN|ACK, seq=7, ack_seq=8
675 B: sends SYN|ACK, seq=7, ack_seq=8
676
677 So we are now A eating this SYN|ACK, ACK test passes. So
678 does sequence test, SYN is truncated, and thus we consider
679 it a bare ACK.
680
681 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
682 bare ACK. Otherwise, we create an established connection. Both
683 ends (listening sockets) accept the new incoming connection and try
684 to talk to each other. 8-)
685
686 Note: This case is both harmless, and rare. Possibility is about the
687 same as us discovering intelligent life on another plant tomorrow.
688
689 But generally, we should (RFC lies!) to accept ACK
690 from SYNACK both here and in tcp_rcv_state_process().
691 tcp_rcv_state_process() does not, hence, we do not too.
692
693 Note that the case is absolutely generic:
694 we cannot optimize anything here without
695 violating protocol. All the checks must be made
696 before attempt to create socket.
697 */
698
699 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
700 * and the incoming segment acknowledges something not yet
701 * sent (the segment carries an unacceptable ACK) ...
702 * a reset is sent."
703 *
704 * Invalid ACK: reset will be sent by listening socket.
705 * Note that the ACK validity check for a Fast Open socket is done
706 * elsewhere and is checked directly against the child socket rather
707 * than req because user data may have been sent out.
708 */
709 if ((flg & TCP_FLAG_ACK) && !fastopen &&
710 (TCP_SKB_CB(skb)->ack_seq !=
711 tcp_rsk(req)->snt_isn + 1))
712 return sk;
713
714 /* Also, it would be not so bad idea to check rcv_tsecr, which
715 * is essentially ACK extension and too early or too late values
716 * should cause reset in unsynchronized states.
717 */
718
719 /* RFC793: "first check sequence number". */
720
721 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
722 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
723 /* Out of window: send ACK and drop. */
724 if (!(flg & TCP_FLAG_RST) &&
725 !tcp_oow_rate_limited(sock_net(sk), skb,
726 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
727 &tcp_rsk(req)->last_oow_ack_time))
728 req->rsk_ops->send_ack(sk, skb, req);
729 if (paws_reject)
730 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
731 return NULL;
732 }
733
734 /* In sequence, PAWS is OK. */
735
736 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
737 req->ts_recent = tmp_opt.rcv_tsval;
738
739 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
740 /* Truncate SYN, it is out of window starting
741 at tcp_rsk(req)->rcv_isn + 1. */
742 flg &= ~TCP_FLAG_SYN;
743 }
744
745 /* RFC793: "second check the RST bit" and
746 * "fourth, check the SYN bit"
747 */
748 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
749 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
750 goto embryonic_reset;
751 }
752
753 /* ACK sequence verified above, just make sure ACK is
754 * set. If ACK not set, just silently drop the packet.
755 *
756 * XXX (TFO) - if we ever allow "data after SYN", the
757 * following check needs to be removed.
758 */
759 if (!(flg & TCP_FLAG_ACK))
760 return NULL;
761
762 /* For Fast Open no more processing is needed (sk is the
763 * child socket).
764 */
765 if (fastopen)
766 return sk;
767
768 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
769 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
770 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
771 inet_rsk(req)->acked = 1;
772 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
773 return NULL;
774 }
775
776 /* OK, ACK is valid, create big socket and
777 * feed this segment to it. It will repeat all
778 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
779 * ESTABLISHED STATE. If it will be dropped after
780 * socket is created, wait for troubles.
781 */
782 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
783 req, &own_req);
784 if (!child)
785 goto listen_overflow;
786
787 sock_rps_save_rxhash(child, skb);
788 tcp_synack_rtt_meas(child, req);
789 *req_stolen = !own_req;
790 return inet_csk_complete_hashdance(sk, child, req, own_req);
791
792listen_overflow:
793 if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
794 inet_rsk(req)->acked = 1;
795 return NULL;
796 }
797
798embryonic_reset:
799 if (!(flg & TCP_FLAG_RST)) {
800 /* Received a bad SYN pkt - for TFO We try not to reset
801 * the local connection unless it's really necessary to
802 * avoid becoming vulnerable to outside attack aiming at
803 * resetting legit local connections.
804 */
805 req->rsk_ops->send_reset(sk, skb);
806 } else if (fastopen) { /* received a valid RST pkt */
807 reqsk_fastopen_remove(sk, req, true);
808 tcp_reset(sk);
809 }
810 if (!fastopen) {
811 inet_csk_reqsk_queue_drop(sk, req);
812 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
813 }
814 return NULL;
815}
816EXPORT_SYMBOL(tcp_check_req);
817
818/*
819 * Queue segment on the new socket if the new socket is active,
820 * otherwise we just shortcircuit this and continue with
821 * the new socket.
822 *
823 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
824 * when entering. But other states are possible due to a race condition
825 * where after __inet_lookup_established() fails but before the listener
826 * locked is obtained, other packets cause the same connection to
827 * be created.
828 */
829
830int tcp_child_process(struct sock *parent, struct sock *child,
831 struct sk_buff *skb)
832{
833 int ret = 0;
834 int state = child->sk_state;
835
836 /* record NAPI ID of child */
837 sk_mark_napi_id(child, skb);
838
839 tcp_segs_in(tcp_sk(child), skb);
840 if (!sock_owned_by_user(child)) {
841 ret = tcp_rcv_state_process(child, skb);
842 /* Wakeup parent, send SIGIO */
843 if (state == TCP_SYN_RECV && child->sk_state != state)
844 parent->sk_data_ready(parent);
845 } else {
846 /* Alas, it is possible again, because we do lookup
847 * in main socket hash table and lock on listening
848 * socket does not protect us more.
849 */
850 __sk_add_backlog(child, skb);
851 }
852
853 bh_unlock_sock(child);
854 sock_put(child);
855 return ret;
856}
857EXPORT_SYMBOL(tcp_child_process);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
22#include <net/tcp.h>
23#include <net/xfrm.h>
24#include <net/busy_poll.h>
25
26static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
27{
28 if (seq == s_win)
29 return true;
30 if (after(end_seq, s_win) && before(seq, e_win))
31 return true;
32 return seq == e_win && seq == end_seq;
33}
34
35static enum tcp_tw_status
36tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
37 const struct sk_buff *skb, int mib_idx)
38{
39 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
40
41 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
42 &tcptw->tw_last_oow_ack_time)) {
43 /* Send ACK. Note, we do not put the bucket,
44 * it will be released by caller.
45 */
46 return TCP_TW_ACK;
47 }
48
49 /* We are rate-limiting, so just release the tw sock and drop skb. */
50 inet_twsk_put(tw);
51 return TCP_TW_SUCCESS;
52}
53
54static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq)
55{
56#ifdef CONFIG_TCP_AO
57 struct tcp_ao_info *ao;
58
59 ao = rcu_dereference(tcptw->ao_info);
60 if (unlikely(ao && seq < tcptw->tw_rcv_nxt))
61 WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
62#endif
63 tcptw->tw_rcv_nxt = seq;
64}
65
66/*
67 * * Main purpose of TIME-WAIT state is to close connection gracefully,
68 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
69 * (and, probably, tail of data) and one or more our ACKs are lost.
70 * * What is TIME-WAIT timeout? It is associated with maximal packet
71 * lifetime in the internet, which results in wrong conclusion, that
72 * it is set to catch "old duplicate segments" wandering out of their path.
73 * It is not quite correct. This timeout is calculated so that it exceeds
74 * maximal retransmission timeout enough to allow to lose one (or more)
75 * segments sent by peer and our ACKs. This time may be calculated from RTO.
76 * * When TIME-WAIT socket receives RST, it means that another end
77 * finally closed and we are allowed to kill TIME-WAIT too.
78 * * Second purpose of TIME-WAIT is catching old duplicate segments.
79 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
80 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
81 * * If we invented some more clever way to catch duplicates
82 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
83 *
84 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
85 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
86 * from the very beginning.
87 *
88 * NOTE. With recycling (and later with fin-wait-2) TW bucket
89 * is _not_ stateless. It means, that strictly speaking we must
90 * spinlock it. I do not want! Well, probability of misbehaviour
91 * is ridiculously low and, seems, we could use some mb() tricks
92 * to avoid misread sequence numbers, states etc. --ANK
93 *
94 * We don't need to initialize tmp_out.sack_ok as we don't use the results
95 */
96enum tcp_tw_status
97tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
98 const struct tcphdr *th)
99{
100 struct tcp_options_received tmp_opt;
101 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
102 bool paws_reject = false;
103
104 tmp_opt.saw_tstamp = 0;
105 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
106 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
107
108 if (tmp_opt.saw_tstamp) {
109 if (tmp_opt.rcv_tsecr)
110 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
111 tmp_opt.ts_recent = tcptw->tw_ts_recent;
112 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
113 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
114 }
115 }
116
117 if (tw->tw_substate == TCP_FIN_WAIT2) {
118 /* Just repeat all the checks of tcp_rcv_state_process() */
119
120 /* Out of window, send ACK */
121 if (paws_reject ||
122 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
123 tcptw->tw_rcv_nxt,
124 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
125 return tcp_timewait_check_oow_rate_limit(
126 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
127
128 if (th->rst)
129 goto kill;
130
131 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
132 return TCP_TW_RST;
133
134 /* Dup ACK? */
135 if (!th->ack ||
136 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
137 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
138 inet_twsk_put(tw);
139 return TCP_TW_SUCCESS;
140 }
141
142 /* New data or FIN. If new data arrive after half-duplex close,
143 * reset.
144 */
145 if (!th->fin ||
146 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
147 return TCP_TW_RST;
148
149 /* FIN arrived, enter true time-wait state. */
150 tw->tw_substate = TCP_TIME_WAIT;
151 twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq);
152
153 if (tmp_opt.saw_tstamp) {
154 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
155 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
156 }
157
158 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
159 return TCP_TW_ACK;
160 }
161
162 /*
163 * Now real TIME-WAIT state.
164 *
165 * RFC 1122:
166 * "When a connection is [...] on TIME-WAIT state [...]
167 * [a TCP] MAY accept a new SYN from the remote TCP to
168 * reopen the connection directly, if it:
169 *
170 * (1) assigns its initial sequence number for the new
171 * connection to be larger than the largest sequence
172 * number it used on the previous connection incarnation,
173 * and
174 *
175 * (2) returns to TIME-WAIT state if the SYN turns out
176 * to be an old duplicate".
177 */
178
179 if (!paws_reject &&
180 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
181 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
182 /* In window segment, it may be only reset or bare ack. */
183
184 if (th->rst) {
185 /* This is TIME_WAIT assassination, in two flavors.
186 * Oh well... nobody has a sufficient solution to this
187 * protocol bug yet.
188 */
189 if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
190kill:
191 inet_twsk_deschedule_put(tw);
192 return TCP_TW_SUCCESS;
193 }
194 } else {
195 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
196 }
197
198 if (tmp_opt.saw_tstamp) {
199 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
200 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
201 }
202
203 inet_twsk_put(tw);
204 return TCP_TW_SUCCESS;
205 }
206
207 /* Out of window segment.
208
209 All the segments are ACKed immediately.
210
211 The only exception is new SYN. We accept it, if it is
212 not old duplicate and we are not in danger to be killed
213 by delayed old duplicates. RFC check is that it has
214 newer sequence number works at rates <40Mbit/sec.
215 However, if paws works, it is reliable AND even more,
216 we even may relax silly seq space cutoff.
217
218 RED-PEN: we violate main RFC requirement, if this SYN will appear
219 old duplicate (i.e. we receive RST in reply to SYN-ACK),
220 we must return socket to time-wait state. It is not good,
221 but not fatal yet.
222 */
223
224 if (th->syn && !th->rst && !th->ack && !paws_reject &&
225 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
226 (tmp_opt.saw_tstamp &&
227 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
228 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
229 if (isn == 0)
230 isn++;
231 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
232 return TCP_TW_SYN;
233 }
234
235 if (paws_reject)
236 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
237
238 if (!th->rst) {
239 /* In this case we must reset the TIMEWAIT timer.
240 *
241 * If it is ACKless SYN it may be both old duplicate
242 * and new good SYN with random sequence number <rcv_nxt.
243 * Do not reschedule in the last case.
244 */
245 if (paws_reject || th->ack)
246 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
247
248 return tcp_timewait_check_oow_rate_limit(
249 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
250 }
251 inet_twsk_put(tw);
252 return TCP_TW_SUCCESS;
253}
254EXPORT_SYMBOL(tcp_timewait_state_process);
255
256static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
257{
258#ifdef CONFIG_TCP_MD5SIG
259 const struct tcp_sock *tp = tcp_sk(sk);
260 struct tcp_md5sig_key *key;
261
262 /*
263 * The timewait bucket does not have the key DB from the
264 * sock structure. We just make a quick copy of the
265 * md5 key being used (if indeed we are using one)
266 * so the timewait ack generating code has the key.
267 */
268 tcptw->tw_md5_key = NULL;
269 if (!static_branch_unlikely(&tcp_md5_needed.key))
270 return;
271
272 key = tp->af_specific->md5_lookup(sk, sk);
273 if (key) {
274 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
275 if (!tcptw->tw_md5_key)
276 return;
277 if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
278 goto out_free;
279 tcp_md5_add_sigpool();
280 }
281 return;
282out_free:
283 WARN_ON_ONCE(1);
284 kfree(tcptw->tw_md5_key);
285 tcptw->tw_md5_key = NULL;
286#endif
287}
288
289/*
290 * Move a socket to time-wait or dead fin-wait-2 state.
291 */
292void tcp_time_wait(struct sock *sk, int state, int timeo)
293{
294 const struct inet_connection_sock *icsk = inet_csk(sk);
295 struct tcp_sock *tp = tcp_sk(sk);
296 struct net *net = sock_net(sk);
297 struct inet_timewait_sock *tw;
298
299 tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
300
301 if (tw) {
302 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
303 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
304
305 tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
306 tw->tw_mark = sk->sk_mark;
307 tw->tw_priority = READ_ONCE(sk->sk_priority);
308 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
309 tcptw->tw_rcv_nxt = tp->rcv_nxt;
310 tcptw->tw_snd_nxt = tp->snd_nxt;
311 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
312 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
313 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
314 tcptw->tw_ts_offset = tp->tsoffset;
315 tw->tw_usec_ts = tp->tcp_usec_ts;
316 tcptw->tw_last_oow_ack_time = 0;
317 tcptw->tw_tx_delay = tp->tcp_tx_delay;
318 tw->tw_txhash = sk->sk_txhash;
319#if IS_ENABLED(CONFIG_IPV6)
320 if (tw->tw_family == PF_INET6) {
321 struct ipv6_pinfo *np = inet6_sk(sk);
322
323 tw->tw_v6_daddr = sk->sk_v6_daddr;
324 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
325 tw->tw_tclass = np->tclass;
326 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
327 tw->tw_ipv6only = sk->sk_ipv6only;
328 }
329#endif
330
331 tcp_time_wait_init(sk, tcptw);
332 tcp_ao_time_wait(tcptw, tp);
333
334 /* Get the TIME_WAIT timeout firing. */
335 if (timeo < rto)
336 timeo = rto;
337
338 if (state == TCP_TIME_WAIT)
339 timeo = TCP_TIMEWAIT_LEN;
340
341 /* tw_timer is pinned, so we need to make sure BH are disabled
342 * in following section, otherwise timer handler could run before
343 * we complete the initialization.
344 */
345 local_bh_disable();
346 inet_twsk_schedule(tw, timeo);
347 /* Linkage updates.
348 * Note that access to tw after this point is illegal.
349 */
350 inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
351 local_bh_enable();
352 } else {
353 /* Sorry, if we're out of memory, just CLOSE this
354 * socket up. We've got bigger problems than
355 * non-graceful socket closings.
356 */
357 NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
358 }
359
360 tcp_update_metrics(sk);
361 tcp_done(sk);
362}
363EXPORT_SYMBOL(tcp_time_wait);
364
365#ifdef CONFIG_TCP_MD5SIG
366static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
367{
368 struct tcp_md5sig_key *key;
369
370 key = container_of(head, struct tcp_md5sig_key, rcu);
371 kfree(key);
372 static_branch_slow_dec_deferred(&tcp_md5_needed);
373 tcp_md5_release_sigpool();
374}
375#endif
376
377void tcp_twsk_destructor(struct sock *sk)
378{
379#ifdef CONFIG_TCP_MD5SIG
380 if (static_branch_unlikely(&tcp_md5_needed.key)) {
381 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
382
383 if (twsk->tw_md5_key)
384 call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
385 }
386#endif
387 tcp_ao_destroy_sock(sk, true);
388}
389EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
390
391void tcp_twsk_purge(struct list_head *net_exit_list, int family)
392{
393 bool purged_once = false;
394 struct net *net;
395
396 list_for_each_entry(net, net_exit_list, exit_list) {
397 if (net->ipv4.tcp_death_row.hashinfo->pernet) {
398 /* Even if tw_refcount == 1, we must clean up kernel reqsk */
399 inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
400 } else if (!purged_once) {
401 /* The last refcount is decremented in tcp_sk_exit_batch() */
402 if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
403 continue;
404
405 inet_twsk_purge(&tcp_hashinfo, family);
406 purged_once = true;
407 }
408 }
409}
410EXPORT_SYMBOL_GPL(tcp_twsk_purge);
411
412/* Warning : This function is called without sk_listener being locked.
413 * Be sure to read socket fields once, as their value could change under us.
414 */
415void tcp_openreq_init_rwin(struct request_sock *req,
416 const struct sock *sk_listener,
417 const struct dst_entry *dst)
418{
419 struct inet_request_sock *ireq = inet_rsk(req);
420 const struct tcp_sock *tp = tcp_sk(sk_listener);
421 int full_space = tcp_full_space(sk_listener);
422 u32 window_clamp;
423 __u8 rcv_wscale;
424 u32 rcv_wnd;
425 int mss;
426
427 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
428 window_clamp = READ_ONCE(tp->window_clamp);
429 /* Set this up on the first call only */
430 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
431
432 /* limit the window selection if the user enforce a smaller rx buffer */
433 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
434 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
435 req->rsk_window_clamp = full_space;
436
437 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
438 if (rcv_wnd == 0)
439 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
440 else if (full_space < rcv_wnd * mss)
441 full_space = rcv_wnd * mss;
442
443 /* tcp_full_space because it is guaranteed to be the first packet */
444 tcp_select_initial_window(sk_listener, full_space,
445 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
446 &req->rsk_rcv_wnd,
447 &req->rsk_window_clamp,
448 ireq->wscale_ok,
449 &rcv_wscale,
450 rcv_wnd);
451 ireq->rcv_wscale = rcv_wscale;
452}
453EXPORT_SYMBOL(tcp_openreq_init_rwin);
454
455static void tcp_ecn_openreq_child(struct tcp_sock *tp,
456 const struct request_sock *req)
457{
458 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
459}
460
461void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
462{
463 struct inet_connection_sock *icsk = inet_csk(sk);
464 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
465 bool ca_got_dst = false;
466
467 if (ca_key != TCP_CA_UNSPEC) {
468 const struct tcp_congestion_ops *ca;
469
470 rcu_read_lock();
471 ca = tcp_ca_find_key(ca_key);
472 if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
473 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
474 icsk->icsk_ca_ops = ca;
475 ca_got_dst = true;
476 }
477 rcu_read_unlock();
478 }
479
480 /* If no valid choice made yet, assign current system default ca. */
481 if (!ca_got_dst &&
482 (!icsk->icsk_ca_setsockopt ||
483 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
484 tcp_assign_congestion_control(sk);
485
486 tcp_set_ca_state(sk, TCP_CA_Open);
487}
488EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
489
490static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
491 struct request_sock *req,
492 struct tcp_sock *newtp)
493{
494#if IS_ENABLED(CONFIG_SMC)
495 struct inet_request_sock *ireq;
496
497 if (static_branch_unlikely(&tcp_have_smc)) {
498 ireq = inet_rsk(req);
499 if (oldtp->syn_smc && !ireq->smc_ok)
500 newtp->syn_smc = 0;
501 }
502#endif
503}
504
505/* This is not only more efficient than what we used to do, it eliminates
506 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
507 *
508 * Actually, we could lots of memory writes here. tp of listening
509 * socket contains all necessary default parameters.
510 */
511struct sock *tcp_create_openreq_child(const struct sock *sk,
512 struct request_sock *req,
513 struct sk_buff *skb)
514{
515 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
516 const struct inet_request_sock *ireq = inet_rsk(req);
517 struct tcp_request_sock *treq = tcp_rsk(req);
518 struct inet_connection_sock *newicsk;
519 const struct tcp_sock *oldtp;
520 struct tcp_sock *newtp;
521 u32 seq;
522#ifdef CONFIG_TCP_AO
523 struct tcp_ao_key *ao_key;
524#endif
525
526 if (!newsk)
527 return NULL;
528
529 newicsk = inet_csk(newsk);
530 newtp = tcp_sk(newsk);
531 oldtp = tcp_sk(sk);
532
533 smc_check_reset_syn_req(oldtp, req, newtp);
534
535 /* Now setup tcp_sock */
536 newtp->pred_flags = 0;
537
538 seq = treq->rcv_isn + 1;
539 newtp->rcv_wup = seq;
540 WRITE_ONCE(newtp->copied_seq, seq);
541 WRITE_ONCE(newtp->rcv_nxt, seq);
542 newtp->segs_in = 1;
543
544 seq = treq->snt_isn + 1;
545 newtp->snd_sml = newtp->snd_una = seq;
546 WRITE_ONCE(newtp->snd_nxt, seq);
547 newtp->snd_up = seq;
548
549 INIT_LIST_HEAD(&newtp->tsq_node);
550 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
551
552 tcp_init_wl(newtp, treq->rcv_isn);
553
554 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
555 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
556
557 newtp->lsndtime = tcp_jiffies32;
558 newsk->sk_txhash = READ_ONCE(treq->txhash);
559 newtp->total_retrans = req->num_retrans;
560
561 tcp_init_xmit_timers(newsk);
562 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
563
564 if (sock_flag(newsk, SOCK_KEEPOPEN))
565 inet_csk_reset_keepalive_timer(newsk,
566 keepalive_time_when(newtp));
567
568 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
569 newtp->rx_opt.sack_ok = ireq->sack_ok;
570 newtp->window_clamp = req->rsk_window_clamp;
571 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
572 newtp->rcv_wnd = req->rsk_rcv_wnd;
573 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
574 if (newtp->rx_opt.wscale_ok) {
575 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
576 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
577 } else {
578 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
579 newtp->window_clamp = min(newtp->window_clamp, 65535U);
580 }
581 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
582 newtp->max_window = newtp->snd_wnd;
583
584 if (newtp->rx_opt.tstamp_ok) {
585 newtp->tcp_usec_ts = treq->req_usec_ts;
586 newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
587 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
588 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
589 } else {
590 newtp->tcp_usec_ts = 0;
591 newtp->rx_opt.ts_recent_stamp = 0;
592 newtp->tcp_header_len = sizeof(struct tcphdr);
593 }
594 if (req->num_timeout) {
595 newtp->total_rto = req->num_timeout;
596 newtp->undo_marker = treq->snt_isn;
597 if (newtp->tcp_usec_ts) {
598 newtp->retrans_stamp = treq->snt_synack;
599 newtp->total_rto_time = (u32)(tcp_clock_us() -
600 newtp->retrans_stamp) / USEC_PER_MSEC;
601 } else {
602 newtp->retrans_stamp = div_u64(treq->snt_synack,
603 USEC_PER_SEC / TCP_TS_HZ);
604 newtp->total_rto_time = tcp_clock_ms() -
605 newtp->retrans_stamp;
606 }
607 newtp->total_rto_recoveries = 1;
608 }
609 newtp->tsoffset = treq->ts_off;
610#ifdef CONFIG_TCP_MD5SIG
611 newtp->md5sig_info = NULL; /*XXX*/
612#endif
613#ifdef CONFIG_TCP_AO
614 newtp->ao_info = NULL;
615 ao_key = treq->af_specific->ao_lookup(sk, req,
616 tcp_rsk(req)->ao_keyid, -1);
617 if (ao_key)
618 newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
619 #endif
620 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
621 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
622 newtp->rx_opt.mss_clamp = req->mss;
623 tcp_ecn_openreq_child(newtp, req);
624 newtp->fastopen_req = NULL;
625 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
626
627 newtp->bpf_chg_cc_inprogress = 0;
628 tcp_bpf_clone(sk, newsk);
629
630 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
631
632 return newsk;
633}
634EXPORT_SYMBOL(tcp_create_openreq_child);
635
636/*
637 * Process an incoming packet for SYN_RECV sockets represented as a
638 * request_sock. Normally sk is the listener socket but for TFO it
639 * points to the child socket.
640 *
641 * XXX (TFO) - The current impl contains a special check for ack
642 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
643 *
644 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
645 *
646 * Note: If @fastopen is true, this can be called from process context.
647 * Otherwise, this is from BH context.
648 */
649
650struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
651 struct request_sock *req,
652 bool fastopen, bool *req_stolen)
653{
654 struct tcp_options_received tmp_opt;
655 struct sock *child;
656 const struct tcphdr *th = tcp_hdr(skb);
657 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
658 bool paws_reject = false;
659 bool own_req;
660
661 tmp_opt.saw_tstamp = 0;
662 if (th->doff > (sizeof(struct tcphdr)>>2)) {
663 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
664
665 if (tmp_opt.saw_tstamp) {
666 tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
667 if (tmp_opt.rcv_tsecr)
668 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
669 /* We do not store true stamp, but it is not required,
670 * it can be estimated (approximately)
671 * from another data.
672 */
673 tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
674 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
675 }
676 }
677
678 /* Check for pure retransmitted SYN. */
679 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
680 flg == TCP_FLAG_SYN &&
681 !paws_reject) {
682 /*
683 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
684 * this case on figure 6 and figure 8, but formal
685 * protocol description says NOTHING.
686 * To be more exact, it says that we should send ACK,
687 * because this segment (at least, if it has no data)
688 * is out of window.
689 *
690 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
691 * describe SYN-RECV state. All the description
692 * is wrong, we cannot believe to it and should
693 * rely only on common sense and implementation
694 * experience.
695 *
696 * Enforce "SYN-ACK" according to figure 8, figure 6
697 * of RFC793, fixed by RFC1122.
698 *
699 * Note that even if there is new data in the SYN packet
700 * they will be thrown away too.
701 *
702 * Reset timer after retransmitting SYNACK, similar to
703 * the idea of fast retransmit in recovery.
704 */
705 if (!tcp_oow_rate_limited(sock_net(sk), skb,
706 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
707 &tcp_rsk(req)->last_oow_ack_time) &&
708
709 !inet_rtx_syn_ack(sk, req)) {
710 unsigned long expires = jiffies;
711
712 expires += reqsk_timeout(req, TCP_RTO_MAX);
713 if (!fastopen)
714 mod_timer_pending(&req->rsk_timer, expires);
715 else
716 req->rsk_timer.expires = expires;
717 }
718 return NULL;
719 }
720
721 /* Further reproduces section "SEGMENT ARRIVES"
722 for state SYN-RECEIVED of RFC793.
723 It is broken, however, it does not work only
724 when SYNs are crossed.
725
726 You would think that SYN crossing is impossible here, since
727 we should have a SYN_SENT socket (from connect()) on our end,
728 but this is not true if the crossed SYNs were sent to both
729 ends by a malicious third party. We must defend against this,
730 and to do that we first verify the ACK (as per RFC793, page
731 36) and reset if it is invalid. Is this a true full defense?
732 To convince ourselves, let us consider a way in which the ACK
733 test can still pass in this 'malicious crossed SYNs' case.
734 Malicious sender sends identical SYNs (and thus identical sequence
735 numbers) to both A and B:
736
737 A: gets SYN, seq=7
738 B: gets SYN, seq=7
739
740 By our good fortune, both A and B select the same initial
741 send sequence number of seven :-)
742
743 A: sends SYN|ACK, seq=7, ack_seq=8
744 B: sends SYN|ACK, seq=7, ack_seq=8
745
746 So we are now A eating this SYN|ACK, ACK test passes. So
747 does sequence test, SYN is truncated, and thus we consider
748 it a bare ACK.
749
750 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
751 bare ACK. Otherwise, we create an established connection. Both
752 ends (listening sockets) accept the new incoming connection and try
753 to talk to each other. 8-)
754
755 Note: This case is both harmless, and rare. Possibility is about the
756 same as us discovering intelligent life on another plant tomorrow.
757
758 But generally, we should (RFC lies!) to accept ACK
759 from SYNACK both here and in tcp_rcv_state_process().
760 tcp_rcv_state_process() does not, hence, we do not too.
761
762 Note that the case is absolutely generic:
763 we cannot optimize anything here without
764 violating protocol. All the checks must be made
765 before attempt to create socket.
766 */
767
768 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
769 * and the incoming segment acknowledges something not yet
770 * sent (the segment carries an unacceptable ACK) ...
771 * a reset is sent."
772 *
773 * Invalid ACK: reset will be sent by listening socket.
774 * Note that the ACK validity check for a Fast Open socket is done
775 * elsewhere and is checked directly against the child socket rather
776 * than req because user data may have been sent out.
777 */
778 if ((flg & TCP_FLAG_ACK) && !fastopen &&
779 (TCP_SKB_CB(skb)->ack_seq !=
780 tcp_rsk(req)->snt_isn + 1))
781 return sk;
782
783 /* Also, it would be not so bad idea to check rcv_tsecr, which
784 * is essentially ACK extension and too early or too late values
785 * should cause reset in unsynchronized states.
786 */
787
788 /* RFC793: "first check sequence number". */
789
790 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
791 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
792 /* Out of window: send ACK and drop. */
793 if (!(flg & TCP_FLAG_RST) &&
794 !tcp_oow_rate_limited(sock_net(sk), skb,
795 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
796 &tcp_rsk(req)->last_oow_ack_time))
797 req->rsk_ops->send_ack(sk, skb, req);
798 if (paws_reject)
799 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
800 return NULL;
801 }
802
803 /* In sequence, PAWS is OK. */
804
805 /* TODO: We probably should defer ts_recent change once
806 * we take ownership of @req.
807 */
808 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
809 WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
810
811 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
812 /* Truncate SYN, it is out of window starting
813 at tcp_rsk(req)->rcv_isn + 1. */
814 flg &= ~TCP_FLAG_SYN;
815 }
816
817 /* RFC793: "second check the RST bit" and
818 * "fourth, check the SYN bit"
819 */
820 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
821 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
822 goto embryonic_reset;
823 }
824
825 /* ACK sequence verified above, just make sure ACK is
826 * set. If ACK not set, just silently drop the packet.
827 *
828 * XXX (TFO) - if we ever allow "data after SYN", the
829 * following check needs to be removed.
830 */
831 if (!(flg & TCP_FLAG_ACK))
832 return NULL;
833
834 /* For Fast Open no more processing is needed (sk is the
835 * child socket).
836 */
837 if (fastopen)
838 return sk;
839
840 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
841 if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
842 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
843 inet_rsk(req)->acked = 1;
844 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
845 return NULL;
846 }
847
848 /* OK, ACK is valid, create big socket and
849 * feed this segment to it. It will repeat all
850 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
851 * ESTABLISHED STATE. If it will be dropped after
852 * socket is created, wait for troubles.
853 */
854 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
855 req, &own_req);
856 if (!child)
857 goto listen_overflow;
858
859 if (own_req && rsk_drop_req(req)) {
860 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
861 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
862 return child;
863 }
864
865 sock_rps_save_rxhash(child, skb);
866 tcp_synack_rtt_meas(child, req);
867 *req_stolen = !own_req;
868 return inet_csk_complete_hashdance(sk, child, req, own_req);
869
870listen_overflow:
871 if (sk != req->rsk_listener)
872 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
873
874 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
875 inet_rsk(req)->acked = 1;
876 return NULL;
877 }
878
879embryonic_reset:
880 if (!(flg & TCP_FLAG_RST)) {
881 /* Received a bad SYN pkt - for TFO We try not to reset
882 * the local connection unless it's really necessary to
883 * avoid becoming vulnerable to outside attack aiming at
884 * resetting legit local connections.
885 */
886 req->rsk_ops->send_reset(sk, skb);
887 } else if (fastopen) { /* received a valid RST pkt */
888 reqsk_fastopen_remove(sk, req, true);
889 tcp_reset(sk, skb);
890 }
891 if (!fastopen) {
892 bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
893
894 if (unlinked)
895 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
896 *req_stolen = !unlinked;
897 }
898 return NULL;
899}
900EXPORT_SYMBOL(tcp_check_req);
901
902/*
903 * Queue segment on the new socket if the new socket is active,
904 * otherwise we just shortcircuit this and continue with
905 * the new socket.
906 *
907 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
908 * when entering. But other states are possible due to a race condition
909 * where after __inet_lookup_established() fails but before the listener
910 * locked is obtained, other packets cause the same connection to
911 * be created.
912 */
913
914int tcp_child_process(struct sock *parent, struct sock *child,
915 struct sk_buff *skb)
916 __releases(&((child)->sk_lock.slock))
917{
918 int ret = 0;
919 int state = child->sk_state;
920
921 /* record sk_napi_id and sk_rx_queue_mapping of child. */
922 sk_mark_napi_id_set(child, skb);
923
924 tcp_segs_in(tcp_sk(child), skb);
925 if (!sock_owned_by_user(child)) {
926 ret = tcp_rcv_state_process(child, skb);
927 /* Wakeup parent, send SIGIO */
928 if (state == TCP_SYN_RECV && child->sk_state != state)
929 parent->sk_data_ready(parent);
930 } else {
931 /* Alas, it is possible again, because we do lookup
932 * in main socket hash table and lock on listening
933 * socket does not protect us more.
934 */
935 __sk_add_backlog(child, skb);
936 }
937
938 bh_unlock_sock(child);
939 sock_put(child);
940 return ret;
941}
942EXPORT_SYMBOL(tcp_child_process);