Loading...
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/sysctl.h>
25#include <linux/workqueue.h>
26#include <linux/static_key.h>
27#include <net/tcp.h>
28#include <net/inet_common.h>
29#include <net/xfrm.h>
30#include <net/busy_poll.h>
31
32static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
33{
34 if (seq == s_win)
35 return true;
36 if (after(end_seq, s_win) && before(seq, e_win))
37 return true;
38 return seq == e_win && seq == end_seq;
39}
40
41static enum tcp_tw_status
42tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
43 const struct sk_buff *skb, int mib_idx)
44{
45 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
46
47 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
48 &tcptw->tw_last_oow_ack_time)) {
49 /* Send ACK. Note, we do not put the bucket,
50 * it will be released by caller.
51 */
52 return TCP_TW_ACK;
53 }
54
55 /* We are rate-limiting, so just release the tw sock and drop skb. */
56 inet_twsk_put(tw);
57 return TCP_TW_SUCCESS;
58}
59
60/*
61 * * Main purpose of TIME-WAIT state is to close connection gracefully,
62 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
63 * (and, probably, tail of data) and one or more our ACKs are lost.
64 * * What is TIME-WAIT timeout? It is associated with maximal packet
65 * lifetime in the internet, which results in wrong conclusion, that
66 * it is set to catch "old duplicate segments" wandering out of their path.
67 * It is not quite correct. This timeout is calculated so that it exceeds
68 * maximal retransmission timeout enough to allow to lose one (or more)
69 * segments sent by peer and our ACKs. This time may be calculated from RTO.
70 * * When TIME-WAIT socket receives RST, it means that another end
71 * finally closed and we are allowed to kill TIME-WAIT too.
72 * * Second purpose of TIME-WAIT is catching old duplicate segments.
73 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
74 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
75 * * If we invented some more clever way to catch duplicates
76 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
77 *
78 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
79 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
80 * from the very beginning.
81 *
82 * NOTE. With recycling (and later with fin-wait-2) TW bucket
83 * is _not_ stateless. It means, that strictly speaking we must
84 * spinlock it. I do not want! Well, probability of misbehaviour
85 * is ridiculously low and, seems, we could use some mb() tricks
86 * to avoid misread sequence numbers, states etc. --ANK
87 *
88 * We don't need to initialize tmp_out.sack_ok as we don't use the results
89 */
90enum tcp_tw_status
91tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
92 const struct tcphdr *th)
93{
94 struct tcp_options_received tmp_opt;
95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
96 bool paws_reject = false;
97
98 tmp_opt.saw_tstamp = 0;
99 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
100 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
101
102 if (tmp_opt.saw_tstamp) {
103 if (tmp_opt.rcv_tsecr)
104 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
105 tmp_opt.ts_recent = tcptw->tw_ts_recent;
106 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
107 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
108 }
109 }
110
111 if (tw->tw_substate == TCP_FIN_WAIT2) {
112 /* Just repeat all the checks of tcp_rcv_state_process() */
113
114 /* Out of window, send ACK */
115 if (paws_reject ||
116 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
117 tcptw->tw_rcv_nxt,
118 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
119 return tcp_timewait_check_oow_rate_limit(
120 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
121
122 if (th->rst)
123 goto kill;
124
125 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
126 return TCP_TW_RST;
127
128 /* Dup ACK? */
129 if (!th->ack ||
130 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
131 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
132 inet_twsk_put(tw);
133 return TCP_TW_SUCCESS;
134 }
135
136 /* New data or FIN. If new data arrive after half-duplex close,
137 * reset.
138 */
139 if (!th->fin ||
140 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
141 return TCP_TW_RST;
142
143 /* FIN arrived, enter true time-wait state. */
144 tw->tw_substate = TCP_TIME_WAIT;
145 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
146 if (tmp_opt.saw_tstamp) {
147 tcptw->tw_ts_recent_stamp = get_seconds();
148 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
149 }
150
151 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
152 return TCP_TW_ACK;
153 }
154
155 /*
156 * Now real TIME-WAIT state.
157 *
158 * RFC 1122:
159 * "When a connection is [...] on TIME-WAIT state [...]
160 * [a TCP] MAY accept a new SYN from the remote TCP to
161 * reopen the connection directly, if it:
162 *
163 * (1) assigns its initial sequence number for the new
164 * connection to be larger than the largest sequence
165 * number it used on the previous connection incarnation,
166 * and
167 *
168 * (2) returns to TIME-WAIT state if the SYN turns out
169 * to be an old duplicate".
170 */
171
172 if (!paws_reject &&
173 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
174 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
175 /* In window segment, it may be only reset or bare ack. */
176
177 if (th->rst) {
178 /* This is TIME_WAIT assassination, in two flavors.
179 * Oh well... nobody has a sufficient solution to this
180 * protocol bug yet.
181 */
182 if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
183kill:
184 inet_twsk_deschedule_put(tw);
185 return TCP_TW_SUCCESS;
186 }
187 }
188 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
189
190 if (tmp_opt.saw_tstamp) {
191 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
192 tcptw->tw_ts_recent_stamp = get_seconds();
193 }
194
195 inet_twsk_put(tw);
196 return TCP_TW_SUCCESS;
197 }
198
199 /* Out of window segment.
200
201 All the segments are ACKed immediately.
202
203 The only exception is new SYN. We accept it, if it is
204 not old duplicate and we are not in danger to be killed
205 by delayed old duplicates. RFC check is that it has
206 newer sequence number works at rates <40Mbit/sec.
207 However, if paws works, it is reliable AND even more,
208 we even may relax silly seq space cutoff.
209
210 RED-PEN: we violate main RFC requirement, if this SYN will appear
211 old duplicate (i.e. we receive RST in reply to SYN-ACK),
212 we must return socket to time-wait state. It is not good,
213 but not fatal yet.
214 */
215
216 if (th->syn && !th->rst && !th->ack && !paws_reject &&
217 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
218 (tmp_opt.saw_tstamp &&
219 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
220 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
221 if (isn == 0)
222 isn++;
223 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
224 return TCP_TW_SYN;
225 }
226
227 if (paws_reject)
228 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
229
230 if (!th->rst) {
231 /* In this case we must reset the TIMEWAIT timer.
232 *
233 * If it is ACKless SYN it may be both old duplicate
234 * and new good SYN with random sequence number <rcv_nxt.
235 * Do not reschedule in the last case.
236 */
237 if (paws_reject || th->ack)
238 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
239
240 return tcp_timewait_check_oow_rate_limit(
241 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
242 }
243 inet_twsk_put(tw);
244 return TCP_TW_SUCCESS;
245}
246EXPORT_SYMBOL(tcp_timewait_state_process);
247
248/*
249 * Move a socket to time-wait or dead fin-wait-2 state.
250 */
251void tcp_time_wait(struct sock *sk, int state, int timeo)
252{
253 const struct inet_connection_sock *icsk = inet_csk(sk);
254 const struct tcp_sock *tp = tcp_sk(sk);
255 struct inet_timewait_sock *tw;
256 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
257
258 tw = inet_twsk_alloc(sk, tcp_death_row, state);
259
260 if (tw) {
261 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
262 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
263 struct inet_sock *inet = inet_sk(sk);
264
265 tw->tw_transparent = inet->transparent;
266 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
267 tcptw->tw_rcv_nxt = tp->rcv_nxt;
268 tcptw->tw_snd_nxt = tp->snd_nxt;
269 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
270 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
271 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
272 tcptw->tw_ts_offset = tp->tsoffset;
273 tcptw->tw_last_oow_ack_time = 0;
274
275#if IS_ENABLED(CONFIG_IPV6)
276 if (tw->tw_family == PF_INET6) {
277 struct ipv6_pinfo *np = inet6_sk(sk);
278
279 tw->tw_v6_daddr = sk->sk_v6_daddr;
280 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
281 tw->tw_tclass = np->tclass;
282 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
283 tw->tw_ipv6only = sk->sk_ipv6only;
284 }
285#endif
286
287#ifdef CONFIG_TCP_MD5SIG
288 /*
289 * The timewait bucket does not have the key DB from the
290 * sock structure. We just make a quick copy of the
291 * md5 key being used (if indeed we are using one)
292 * so the timewait ack generating code has the key.
293 */
294 do {
295 struct tcp_md5sig_key *key;
296 tcptw->tw_md5_key = NULL;
297 key = tp->af_specific->md5_lookup(sk, sk);
298 if (key) {
299 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
300 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
301 }
302 } while (0);
303#endif
304
305 /* Get the TIME_WAIT timeout firing. */
306 if (timeo < rto)
307 timeo = rto;
308
309 tw->tw_timeout = TCP_TIMEWAIT_LEN;
310 if (state == TCP_TIME_WAIT)
311 timeo = TCP_TIMEWAIT_LEN;
312
313 /* tw_timer is pinned, so we need to make sure BH are disabled
314 * in following section, otherwise timer handler could run before
315 * we complete the initialization.
316 */
317 local_bh_disable();
318 inet_twsk_schedule(tw, timeo);
319 /* Linkage updates.
320 * Note that access to tw after this point is illegal.
321 */
322 inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
323 local_bh_enable();
324 } else {
325 /* Sorry, if we're out of memory, just CLOSE this
326 * socket up. We've got bigger problems than
327 * non-graceful socket closings.
328 */
329 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
330 }
331
332 tcp_update_metrics(sk);
333 tcp_done(sk);
334}
335EXPORT_SYMBOL(tcp_time_wait);
336
337void tcp_twsk_destructor(struct sock *sk)
338{
339#ifdef CONFIG_TCP_MD5SIG
340 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
341
342 if (twsk->tw_md5_key)
343 kfree_rcu(twsk->tw_md5_key, rcu);
344#endif
345}
346EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
347
348/* Warning : This function is called without sk_listener being locked.
349 * Be sure to read socket fields once, as their value could change under us.
350 */
351void tcp_openreq_init_rwin(struct request_sock *req,
352 const struct sock *sk_listener,
353 const struct dst_entry *dst)
354{
355 struct inet_request_sock *ireq = inet_rsk(req);
356 const struct tcp_sock *tp = tcp_sk(sk_listener);
357 int full_space = tcp_full_space(sk_listener);
358 u32 window_clamp;
359 __u8 rcv_wscale;
360 u32 rcv_wnd;
361 int mss;
362
363 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
364 window_clamp = READ_ONCE(tp->window_clamp);
365 /* Set this up on the first call only */
366 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
367
368 /* limit the window selection if the user enforce a smaller rx buffer */
369 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
370 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
371 req->rsk_window_clamp = full_space;
372
373 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
374 if (rcv_wnd == 0)
375 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
376 else if (full_space < rcv_wnd * mss)
377 full_space = rcv_wnd * mss;
378
379 /* tcp_full_space because it is guaranteed to be the first packet */
380 tcp_select_initial_window(sk_listener, full_space,
381 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
382 &req->rsk_rcv_wnd,
383 &req->rsk_window_clamp,
384 ireq->wscale_ok,
385 &rcv_wscale,
386 rcv_wnd);
387 ireq->rcv_wscale = rcv_wscale;
388}
389EXPORT_SYMBOL(tcp_openreq_init_rwin);
390
391static void tcp_ecn_openreq_child(struct tcp_sock *tp,
392 const struct request_sock *req)
393{
394 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
395}
396
397void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
398{
399 struct inet_connection_sock *icsk = inet_csk(sk);
400 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
401 bool ca_got_dst = false;
402
403 if (ca_key != TCP_CA_UNSPEC) {
404 const struct tcp_congestion_ops *ca;
405
406 rcu_read_lock();
407 ca = tcp_ca_find_key(ca_key);
408 if (likely(ca && try_module_get(ca->owner))) {
409 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
410 icsk->icsk_ca_ops = ca;
411 ca_got_dst = true;
412 }
413 rcu_read_unlock();
414 }
415
416 /* If no valid choice made yet, assign current system default ca. */
417 if (!ca_got_dst &&
418 (!icsk->icsk_ca_setsockopt ||
419 !try_module_get(icsk->icsk_ca_ops->owner)))
420 tcp_assign_congestion_control(sk);
421
422 tcp_set_ca_state(sk, TCP_CA_Open);
423}
424EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
425
426static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
427 struct request_sock *req,
428 struct tcp_sock *newtp)
429{
430#if IS_ENABLED(CONFIG_SMC)
431 struct inet_request_sock *ireq;
432
433 if (static_branch_unlikely(&tcp_have_smc)) {
434 ireq = inet_rsk(req);
435 if (oldtp->syn_smc && !ireq->smc_ok)
436 newtp->syn_smc = 0;
437 }
438#endif
439}
440
441/* This is not only more efficient than what we used to do, it eliminates
442 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
443 *
444 * Actually, we could lots of memory writes here. tp of listening
445 * socket contains all necessary default parameters.
446 */
447struct sock *tcp_create_openreq_child(const struct sock *sk,
448 struct request_sock *req,
449 struct sk_buff *skb)
450{
451 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
452
453 if (newsk) {
454 const struct inet_request_sock *ireq = inet_rsk(req);
455 struct tcp_request_sock *treq = tcp_rsk(req);
456 struct inet_connection_sock *newicsk = inet_csk(newsk);
457 struct tcp_sock *newtp = tcp_sk(newsk);
458 struct tcp_sock *oldtp = tcp_sk(sk);
459
460 smc_check_reset_syn_req(oldtp, req, newtp);
461
462 /* Now setup tcp_sock */
463 newtp->pred_flags = 0;
464
465 newtp->rcv_wup = newtp->copied_seq =
466 newtp->rcv_nxt = treq->rcv_isn + 1;
467 newtp->segs_in = 1;
468
469 newtp->snd_sml = newtp->snd_una =
470 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
471
472 INIT_LIST_HEAD(&newtp->tsq_node);
473 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
474
475 tcp_init_wl(newtp, treq->rcv_isn);
476
477 newtp->srtt_us = 0;
478 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
479 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
480 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
481 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
482
483 newtp->packets_out = 0;
484 newtp->retrans_out = 0;
485 newtp->sacked_out = 0;
486 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
487 newtp->tlp_high_seq = 0;
488 newtp->lsndtime = tcp_jiffies32;
489 newsk->sk_txhash = treq->txhash;
490 newtp->last_oow_ack_time = 0;
491 newtp->total_retrans = req->num_retrans;
492
493 /* So many TCP implementations out there (incorrectly) count the
494 * initial SYN frame in their delayed-ACK and congestion control
495 * algorithms that we must have the following bandaid to talk
496 * efficiently to them. -DaveM
497 */
498 newtp->snd_cwnd = TCP_INIT_CWND;
499 newtp->snd_cwnd_cnt = 0;
500
501 /* There's a bubble in the pipe until at least the first ACK. */
502 newtp->app_limited = ~0U;
503
504 tcp_init_xmit_timers(newsk);
505 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
506
507 newtp->rx_opt.saw_tstamp = 0;
508
509 newtp->rx_opt.dsack = 0;
510 newtp->rx_opt.num_sacks = 0;
511
512 newtp->urg_data = 0;
513
514 if (sock_flag(newsk, SOCK_KEEPOPEN))
515 inet_csk_reset_keepalive_timer(newsk,
516 keepalive_time_when(newtp));
517
518 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
519 newtp->rx_opt.sack_ok = ireq->sack_ok;
520 newtp->window_clamp = req->rsk_window_clamp;
521 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
522 newtp->rcv_wnd = req->rsk_rcv_wnd;
523 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
524 if (newtp->rx_opt.wscale_ok) {
525 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
526 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
527 } else {
528 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
529 newtp->window_clamp = min(newtp->window_clamp, 65535U);
530 }
531 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
532 newtp->rx_opt.snd_wscale);
533 newtp->max_window = newtp->snd_wnd;
534
535 if (newtp->rx_opt.tstamp_ok) {
536 newtp->rx_opt.ts_recent = req->ts_recent;
537 newtp->rx_opt.ts_recent_stamp = get_seconds();
538 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
539 } else {
540 newtp->rx_opt.ts_recent_stamp = 0;
541 newtp->tcp_header_len = sizeof(struct tcphdr);
542 }
543 newtp->tsoffset = treq->ts_off;
544#ifdef CONFIG_TCP_MD5SIG
545 newtp->md5sig_info = NULL; /*XXX*/
546 if (newtp->af_specific->md5_lookup(sk, newsk))
547 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
548#endif
549 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
550 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
551 newtp->rx_opt.mss_clamp = req->mss;
552 tcp_ecn_openreq_child(newtp, req);
553 newtp->fastopen_req = NULL;
554 newtp->fastopen_rsk = NULL;
555 newtp->syn_data_acked = 0;
556 newtp->rack.mstamp = 0;
557 newtp->rack.advanced = 0;
558 newtp->rack.reo_wnd_steps = 1;
559 newtp->rack.last_delivered = 0;
560 newtp->rack.reo_wnd_persist = 0;
561 newtp->rack.dsack_seen = 0;
562
563 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
564 }
565 return newsk;
566}
567EXPORT_SYMBOL(tcp_create_openreq_child);
568
569/*
570 * Process an incoming packet for SYN_RECV sockets represented as a
571 * request_sock. Normally sk is the listener socket but for TFO it
572 * points to the child socket.
573 *
574 * XXX (TFO) - The current impl contains a special check for ack
575 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
576 *
577 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
578 */
579
580struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
581 struct request_sock *req,
582 bool fastopen, bool *req_stolen)
583{
584 struct tcp_options_received tmp_opt;
585 struct sock *child;
586 const struct tcphdr *th = tcp_hdr(skb);
587 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
588 bool paws_reject = false;
589 bool own_req;
590
591 tmp_opt.saw_tstamp = 0;
592 if (th->doff > (sizeof(struct tcphdr)>>2)) {
593 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
594
595 if (tmp_opt.saw_tstamp) {
596 tmp_opt.ts_recent = req->ts_recent;
597 if (tmp_opt.rcv_tsecr)
598 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
599 /* We do not store true stamp, but it is not required,
600 * it can be estimated (approximately)
601 * from another data.
602 */
603 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
604 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
605 }
606 }
607
608 /* Check for pure retransmitted SYN. */
609 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
610 flg == TCP_FLAG_SYN &&
611 !paws_reject) {
612 /*
613 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
614 * this case on figure 6 and figure 8, but formal
615 * protocol description says NOTHING.
616 * To be more exact, it says that we should send ACK,
617 * because this segment (at least, if it has no data)
618 * is out of window.
619 *
620 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
621 * describe SYN-RECV state. All the description
622 * is wrong, we cannot believe to it and should
623 * rely only on common sense and implementation
624 * experience.
625 *
626 * Enforce "SYN-ACK" according to figure 8, figure 6
627 * of RFC793, fixed by RFC1122.
628 *
629 * Note that even if there is new data in the SYN packet
630 * they will be thrown away too.
631 *
632 * Reset timer after retransmitting SYNACK, similar to
633 * the idea of fast retransmit in recovery.
634 */
635 if (!tcp_oow_rate_limited(sock_net(sk), skb,
636 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
637 &tcp_rsk(req)->last_oow_ack_time) &&
638
639 !inet_rtx_syn_ack(sk, req)) {
640 unsigned long expires = jiffies;
641
642 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
643 TCP_RTO_MAX);
644 if (!fastopen)
645 mod_timer_pending(&req->rsk_timer, expires);
646 else
647 req->rsk_timer.expires = expires;
648 }
649 return NULL;
650 }
651
652 /* Further reproduces section "SEGMENT ARRIVES"
653 for state SYN-RECEIVED of RFC793.
654 It is broken, however, it does not work only
655 when SYNs are crossed.
656
657 You would think that SYN crossing is impossible here, since
658 we should have a SYN_SENT socket (from connect()) on our end,
659 but this is not true if the crossed SYNs were sent to both
660 ends by a malicious third party. We must defend against this,
661 and to do that we first verify the ACK (as per RFC793, page
662 36) and reset if it is invalid. Is this a true full defense?
663 To convince ourselves, let us consider a way in which the ACK
664 test can still pass in this 'malicious crossed SYNs' case.
665 Malicious sender sends identical SYNs (and thus identical sequence
666 numbers) to both A and B:
667
668 A: gets SYN, seq=7
669 B: gets SYN, seq=7
670
671 By our good fortune, both A and B select the same initial
672 send sequence number of seven :-)
673
674 A: sends SYN|ACK, seq=7, ack_seq=8
675 B: sends SYN|ACK, seq=7, ack_seq=8
676
677 So we are now A eating this SYN|ACK, ACK test passes. So
678 does sequence test, SYN is truncated, and thus we consider
679 it a bare ACK.
680
681 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
682 bare ACK. Otherwise, we create an established connection. Both
683 ends (listening sockets) accept the new incoming connection and try
684 to talk to each other. 8-)
685
686 Note: This case is both harmless, and rare. Possibility is about the
687 same as us discovering intelligent life on another plant tomorrow.
688
689 But generally, we should (RFC lies!) to accept ACK
690 from SYNACK both here and in tcp_rcv_state_process().
691 tcp_rcv_state_process() does not, hence, we do not too.
692
693 Note that the case is absolutely generic:
694 we cannot optimize anything here without
695 violating protocol. All the checks must be made
696 before attempt to create socket.
697 */
698
699 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
700 * and the incoming segment acknowledges something not yet
701 * sent (the segment carries an unacceptable ACK) ...
702 * a reset is sent."
703 *
704 * Invalid ACK: reset will be sent by listening socket.
705 * Note that the ACK validity check for a Fast Open socket is done
706 * elsewhere and is checked directly against the child socket rather
707 * than req because user data may have been sent out.
708 */
709 if ((flg & TCP_FLAG_ACK) && !fastopen &&
710 (TCP_SKB_CB(skb)->ack_seq !=
711 tcp_rsk(req)->snt_isn + 1))
712 return sk;
713
714 /* Also, it would be not so bad idea to check rcv_tsecr, which
715 * is essentially ACK extension and too early or too late values
716 * should cause reset in unsynchronized states.
717 */
718
719 /* RFC793: "first check sequence number". */
720
721 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
722 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
723 /* Out of window: send ACK and drop. */
724 if (!(flg & TCP_FLAG_RST) &&
725 !tcp_oow_rate_limited(sock_net(sk), skb,
726 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
727 &tcp_rsk(req)->last_oow_ack_time))
728 req->rsk_ops->send_ack(sk, skb, req);
729 if (paws_reject)
730 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
731 return NULL;
732 }
733
734 /* In sequence, PAWS is OK. */
735
736 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
737 req->ts_recent = tmp_opt.rcv_tsval;
738
739 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
740 /* Truncate SYN, it is out of window starting
741 at tcp_rsk(req)->rcv_isn + 1. */
742 flg &= ~TCP_FLAG_SYN;
743 }
744
745 /* RFC793: "second check the RST bit" and
746 * "fourth, check the SYN bit"
747 */
748 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
749 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
750 goto embryonic_reset;
751 }
752
753 /* ACK sequence verified above, just make sure ACK is
754 * set. If ACK not set, just silently drop the packet.
755 *
756 * XXX (TFO) - if we ever allow "data after SYN", the
757 * following check needs to be removed.
758 */
759 if (!(flg & TCP_FLAG_ACK))
760 return NULL;
761
762 /* For Fast Open no more processing is needed (sk is the
763 * child socket).
764 */
765 if (fastopen)
766 return sk;
767
768 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
769 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
770 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
771 inet_rsk(req)->acked = 1;
772 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
773 return NULL;
774 }
775
776 /* OK, ACK is valid, create big socket and
777 * feed this segment to it. It will repeat all
778 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
779 * ESTABLISHED STATE. If it will be dropped after
780 * socket is created, wait for troubles.
781 */
782 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
783 req, &own_req);
784 if (!child)
785 goto listen_overflow;
786
787 sock_rps_save_rxhash(child, skb);
788 tcp_synack_rtt_meas(child, req);
789 *req_stolen = !own_req;
790 return inet_csk_complete_hashdance(sk, child, req, own_req);
791
792listen_overflow:
793 if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
794 inet_rsk(req)->acked = 1;
795 return NULL;
796 }
797
798embryonic_reset:
799 if (!(flg & TCP_FLAG_RST)) {
800 /* Received a bad SYN pkt - for TFO We try not to reset
801 * the local connection unless it's really necessary to
802 * avoid becoming vulnerable to outside attack aiming at
803 * resetting legit local connections.
804 */
805 req->rsk_ops->send_reset(sk, skb);
806 } else if (fastopen) { /* received a valid RST pkt */
807 reqsk_fastopen_remove(sk, req, true);
808 tcp_reset(sk);
809 }
810 if (!fastopen) {
811 inet_csk_reqsk_queue_drop(sk, req);
812 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
813 }
814 return NULL;
815}
816EXPORT_SYMBOL(tcp_check_req);
817
818/*
819 * Queue segment on the new socket if the new socket is active,
820 * otherwise we just shortcircuit this and continue with
821 * the new socket.
822 *
823 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
824 * when entering. But other states are possible due to a race condition
825 * where after __inet_lookup_established() fails but before the listener
826 * locked is obtained, other packets cause the same connection to
827 * be created.
828 */
829
830int tcp_child_process(struct sock *parent, struct sock *child,
831 struct sk_buff *skb)
832{
833 int ret = 0;
834 int state = child->sk_state;
835
836 /* record NAPI ID of child */
837 sk_mark_napi_id(child, skb);
838
839 tcp_segs_in(tcp_sk(child), skb);
840 if (!sock_owned_by_user(child)) {
841 ret = tcp_rcv_state_process(child, skb);
842 /* Wakeup parent, send SIGIO */
843 if (state == TCP_SYN_RECV && child->sk_state != state)
844 parent->sk_data_ready(parent);
845 } else {
846 /* Alas, it is possible again, because we do lookup
847 * in main socket hash table and lock on listening
848 * socket does not protect us more.
849 */
850 __sk_add_backlog(child, skb);
851 }
852
853 bh_unlock_sock(child);
854 sock_put(child);
855 return ret;
856}
857EXPORT_SYMBOL(tcp_child_process);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
22#include <net/tcp.h>
23#include <net/xfrm.h>
24#include <net/busy_poll.h>
25
26static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
27{
28 if (seq == s_win)
29 return true;
30 if (after(end_seq, s_win) && before(seq, e_win))
31 return true;
32 return seq == e_win && seq == end_seq;
33}
34
35static enum tcp_tw_status
36tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
37 const struct sk_buff *skb, int mib_idx)
38{
39 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
40
41 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
42 &tcptw->tw_last_oow_ack_time)) {
43 /* Send ACK. Note, we do not put the bucket,
44 * it will be released by caller.
45 */
46 return TCP_TW_ACK;
47 }
48
49 /* We are rate-limiting, so just release the tw sock and drop skb. */
50 inet_twsk_put(tw);
51 return TCP_TW_SUCCESS;
52}
53
54/*
55 * * Main purpose of TIME-WAIT state is to close connection gracefully,
56 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
57 * (and, probably, tail of data) and one or more our ACKs are lost.
58 * * What is TIME-WAIT timeout? It is associated with maximal packet
59 * lifetime in the internet, which results in wrong conclusion, that
60 * it is set to catch "old duplicate segments" wandering out of their path.
61 * It is not quite correct. This timeout is calculated so that it exceeds
62 * maximal retransmission timeout enough to allow to lose one (or more)
63 * segments sent by peer and our ACKs. This time may be calculated from RTO.
64 * * When TIME-WAIT socket receives RST, it means that another end
65 * finally closed and we are allowed to kill TIME-WAIT too.
66 * * Second purpose of TIME-WAIT is catching old duplicate segments.
67 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
68 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
69 * * If we invented some more clever way to catch duplicates
70 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
71 *
72 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
73 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
74 * from the very beginning.
75 *
76 * NOTE. With recycling (and later with fin-wait-2) TW bucket
77 * is _not_ stateless. It means, that strictly speaking we must
78 * spinlock it. I do not want! Well, probability of misbehaviour
79 * is ridiculously low and, seems, we could use some mb() tricks
80 * to avoid misread sequence numbers, states etc. --ANK
81 *
82 * We don't need to initialize tmp_out.sack_ok as we don't use the results
83 */
84enum tcp_tw_status
85tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
86 const struct tcphdr *th)
87{
88 struct tcp_options_received tmp_opt;
89 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
90 bool paws_reject = false;
91
92 tmp_opt.saw_tstamp = 0;
93 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
94 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
95
96 if (tmp_opt.saw_tstamp) {
97 if (tmp_opt.rcv_tsecr)
98 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
99 tmp_opt.ts_recent = tcptw->tw_ts_recent;
100 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
101 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
102 }
103 }
104
105 if (tw->tw_substate == TCP_FIN_WAIT2) {
106 /* Just repeat all the checks of tcp_rcv_state_process() */
107
108 /* Out of window, send ACK */
109 if (paws_reject ||
110 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
111 tcptw->tw_rcv_nxt,
112 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
113 return tcp_timewait_check_oow_rate_limit(
114 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
115
116 if (th->rst)
117 goto kill;
118
119 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
120 return TCP_TW_RST;
121
122 /* Dup ACK? */
123 if (!th->ack ||
124 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
125 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
126 inet_twsk_put(tw);
127 return TCP_TW_SUCCESS;
128 }
129
130 /* New data or FIN. If new data arrive after half-duplex close,
131 * reset.
132 */
133 if (!th->fin ||
134 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
135 return TCP_TW_RST;
136
137 /* FIN arrived, enter true time-wait state. */
138 tw->tw_substate = TCP_TIME_WAIT;
139 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
140 if (tmp_opt.saw_tstamp) {
141 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
142 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
143 }
144
145 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
146 return TCP_TW_ACK;
147 }
148
149 /*
150 * Now real TIME-WAIT state.
151 *
152 * RFC 1122:
153 * "When a connection is [...] on TIME-WAIT state [...]
154 * [a TCP] MAY accept a new SYN from the remote TCP to
155 * reopen the connection directly, if it:
156 *
157 * (1) assigns its initial sequence number for the new
158 * connection to be larger than the largest sequence
159 * number it used on the previous connection incarnation,
160 * and
161 *
162 * (2) returns to TIME-WAIT state if the SYN turns out
163 * to be an old duplicate".
164 */
165
166 if (!paws_reject &&
167 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
168 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
169 /* In window segment, it may be only reset or bare ack. */
170
171 if (th->rst) {
172 /* This is TIME_WAIT assassination, in two flavors.
173 * Oh well... nobody has a sufficient solution to this
174 * protocol bug yet.
175 */
176 if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
177kill:
178 inet_twsk_deschedule_put(tw);
179 return TCP_TW_SUCCESS;
180 }
181 } else {
182 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
183 }
184
185 if (tmp_opt.saw_tstamp) {
186 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
187 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
188 }
189
190 inet_twsk_put(tw);
191 return TCP_TW_SUCCESS;
192 }
193
194 /* Out of window segment.
195
196 All the segments are ACKed immediately.
197
198 The only exception is new SYN. We accept it, if it is
199 not old duplicate and we are not in danger to be killed
200 by delayed old duplicates. RFC check is that it has
201 newer sequence number works at rates <40Mbit/sec.
202 However, if paws works, it is reliable AND even more,
203 we even may relax silly seq space cutoff.
204
205 RED-PEN: we violate main RFC requirement, if this SYN will appear
206 old duplicate (i.e. we receive RST in reply to SYN-ACK),
207 we must return socket to time-wait state. It is not good,
208 but not fatal yet.
209 */
210
211 if (th->syn && !th->rst && !th->ack && !paws_reject &&
212 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
213 (tmp_opt.saw_tstamp &&
214 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
215 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
216 if (isn == 0)
217 isn++;
218 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
219 return TCP_TW_SYN;
220 }
221
222 if (paws_reject)
223 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
224
225 if (!th->rst) {
226 /* In this case we must reset the TIMEWAIT timer.
227 *
228 * If it is ACKless SYN it may be both old duplicate
229 * and new good SYN with random sequence number <rcv_nxt.
230 * Do not reschedule in the last case.
231 */
232 if (paws_reject || th->ack)
233 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
234
235 return tcp_timewait_check_oow_rate_limit(
236 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
237 }
238 inet_twsk_put(tw);
239 return TCP_TW_SUCCESS;
240}
241EXPORT_SYMBOL(tcp_timewait_state_process);
242
243static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
244{
245#ifdef CONFIG_TCP_MD5SIG
246 const struct tcp_sock *tp = tcp_sk(sk);
247 struct tcp_md5sig_key *key;
248
249 /*
250 * The timewait bucket does not have the key DB from the
251 * sock structure. We just make a quick copy of the
252 * md5 key being used (if indeed we are using one)
253 * so the timewait ack generating code has the key.
254 */
255 tcptw->tw_md5_key = NULL;
256 if (!static_branch_unlikely(&tcp_md5_needed.key))
257 return;
258
259 key = tp->af_specific->md5_lookup(sk, sk);
260 if (key) {
261 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
262 if (!tcptw->tw_md5_key)
263 return;
264 if (!tcp_alloc_md5sig_pool())
265 goto out_free;
266 if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
267 goto out_free;
268 }
269 return;
270out_free:
271 WARN_ON_ONCE(1);
272 kfree(tcptw->tw_md5_key);
273 tcptw->tw_md5_key = NULL;
274#endif
275}
276
277/*
278 * Move a socket to time-wait or dead fin-wait-2 state.
279 */
280void tcp_time_wait(struct sock *sk, int state, int timeo)
281{
282 const struct inet_connection_sock *icsk = inet_csk(sk);
283 const struct tcp_sock *tp = tcp_sk(sk);
284 struct net *net = sock_net(sk);
285 struct inet_timewait_sock *tw;
286
287 tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
288
289 if (tw) {
290 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
291 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
292 struct inet_sock *inet = inet_sk(sk);
293
294 tw->tw_transparent = inet->transparent;
295 tw->tw_mark = sk->sk_mark;
296 tw->tw_priority = sk->sk_priority;
297 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
298 tcptw->tw_rcv_nxt = tp->rcv_nxt;
299 tcptw->tw_snd_nxt = tp->snd_nxt;
300 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
301 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
302 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
303 tcptw->tw_ts_offset = tp->tsoffset;
304 tcptw->tw_last_oow_ack_time = 0;
305 tcptw->tw_tx_delay = tp->tcp_tx_delay;
306#if IS_ENABLED(CONFIG_IPV6)
307 if (tw->tw_family == PF_INET6) {
308 struct ipv6_pinfo *np = inet6_sk(sk);
309
310 tw->tw_v6_daddr = sk->sk_v6_daddr;
311 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
312 tw->tw_tclass = np->tclass;
313 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
314 tw->tw_txhash = sk->sk_txhash;
315 tw->tw_ipv6only = sk->sk_ipv6only;
316 }
317#endif
318
319 tcp_time_wait_init(sk, tcptw);
320
321 /* Get the TIME_WAIT timeout firing. */
322 if (timeo < rto)
323 timeo = rto;
324
325 if (state == TCP_TIME_WAIT)
326 timeo = TCP_TIMEWAIT_LEN;
327
328 /* tw_timer is pinned, so we need to make sure BH are disabled
329 * in following section, otherwise timer handler could run before
330 * we complete the initialization.
331 */
332 local_bh_disable();
333 inet_twsk_schedule(tw, timeo);
334 /* Linkage updates.
335 * Note that access to tw after this point is illegal.
336 */
337 inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
338 local_bh_enable();
339 } else {
340 /* Sorry, if we're out of memory, just CLOSE this
341 * socket up. We've got bigger problems than
342 * non-graceful socket closings.
343 */
344 NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
345 }
346
347 tcp_update_metrics(sk);
348 tcp_done(sk);
349}
350EXPORT_SYMBOL(tcp_time_wait);
351
352void tcp_twsk_destructor(struct sock *sk)
353{
354#ifdef CONFIG_TCP_MD5SIG
355 if (static_branch_unlikely(&tcp_md5_needed.key)) {
356 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
357
358 if (twsk->tw_md5_key) {
359 kfree_rcu(twsk->tw_md5_key, rcu);
360 static_branch_slow_dec_deferred(&tcp_md5_needed);
361 }
362 }
363#endif
364}
365EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
366
367void tcp_twsk_purge(struct list_head *net_exit_list, int family)
368{
369 bool purged_once = false;
370 struct net *net;
371
372 list_for_each_entry(net, net_exit_list, exit_list) {
373 if (net->ipv4.tcp_death_row.hashinfo->pernet) {
374 /* Even if tw_refcount == 1, we must clean up kernel reqsk */
375 inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
376 } else if (!purged_once) {
377 /* The last refcount is decremented in tcp_sk_exit_batch() */
378 if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
379 continue;
380
381 inet_twsk_purge(&tcp_hashinfo, family);
382 purged_once = true;
383 }
384 }
385}
386EXPORT_SYMBOL_GPL(tcp_twsk_purge);
387
388/* Warning : This function is called without sk_listener being locked.
389 * Be sure to read socket fields once, as their value could change under us.
390 */
391void tcp_openreq_init_rwin(struct request_sock *req,
392 const struct sock *sk_listener,
393 const struct dst_entry *dst)
394{
395 struct inet_request_sock *ireq = inet_rsk(req);
396 const struct tcp_sock *tp = tcp_sk(sk_listener);
397 int full_space = tcp_full_space(sk_listener);
398 u32 window_clamp;
399 __u8 rcv_wscale;
400 u32 rcv_wnd;
401 int mss;
402
403 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
404 window_clamp = READ_ONCE(tp->window_clamp);
405 /* Set this up on the first call only */
406 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
407
408 /* limit the window selection if the user enforce a smaller rx buffer */
409 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
410 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
411 req->rsk_window_clamp = full_space;
412
413 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
414 if (rcv_wnd == 0)
415 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
416 else if (full_space < rcv_wnd * mss)
417 full_space = rcv_wnd * mss;
418
419 /* tcp_full_space because it is guaranteed to be the first packet */
420 tcp_select_initial_window(sk_listener, full_space,
421 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
422 &req->rsk_rcv_wnd,
423 &req->rsk_window_clamp,
424 ireq->wscale_ok,
425 &rcv_wscale,
426 rcv_wnd);
427 ireq->rcv_wscale = rcv_wscale;
428}
429EXPORT_SYMBOL(tcp_openreq_init_rwin);
430
431static void tcp_ecn_openreq_child(struct tcp_sock *tp,
432 const struct request_sock *req)
433{
434 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
435}
436
437void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
438{
439 struct inet_connection_sock *icsk = inet_csk(sk);
440 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
441 bool ca_got_dst = false;
442
443 if (ca_key != TCP_CA_UNSPEC) {
444 const struct tcp_congestion_ops *ca;
445
446 rcu_read_lock();
447 ca = tcp_ca_find_key(ca_key);
448 if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
449 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
450 icsk->icsk_ca_ops = ca;
451 ca_got_dst = true;
452 }
453 rcu_read_unlock();
454 }
455
456 /* If no valid choice made yet, assign current system default ca. */
457 if (!ca_got_dst &&
458 (!icsk->icsk_ca_setsockopt ||
459 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
460 tcp_assign_congestion_control(sk);
461
462 tcp_set_ca_state(sk, TCP_CA_Open);
463}
464EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
465
466static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
467 struct request_sock *req,
468 struct tcp_sock *newtp)
469{
470#if IS_ENABLED(CONFIG_SMC)
471 struct inet_request_sock *ireq;
472
473 if (static_branch_unlikely(&tcp_have_smc)) {
474 ireq = inet_rsk(req);
475 if (oldtp->syn_smc && !ireq->smc_ok)
476 newtp->syn_smc = 0;
477 }
478#endif
479}
480
481/* This is not only more efficient than what we used to do, it eliminates
482 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
483 *
484 * Actually, we could lots of memory writes here. tp of listening
485 * socket contains all necessary default parameters.
486 */
487struct sock *tcp_create_openreq_child(const struct sock *sk,
488 struct request_sock *req,
489 struct sk_buff *skb)
490{
491 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
492 const struct inet_request_sock *ireq = inet_rsk(req);
493 struct tcp_request_sock *treq = tcp_rsk(req);
494 struct inet_connection_sock *newicsk;
495 struct tcp_sock *oldtp, *newtp;
496 u32 seq;
497
498 if (!newsk)
499 return NULL;
500
501 newicsk = inet_csk(newsk);
502 newtp = tcp_sk(newsk);
503 oldtp = tcp_sk(sk);
504
505 smc_check_reset_syn_req(oldtp, req, newtp);
506
507 /* Now setup tcp_sock */
508 newtp->pred_flags = 0;
509
510 seq = treq->rcv_isn + 1;
511 newtp->rcv_wup = seq;
512 WRITE_ONCE(newtp->copied_seq, seq);
513 WRITE_ONCE(newtp->rcv_nxt, seq);
514 newtp->segs_in = 1;
515
516 seq = treq->snt_isn + 1;
517 newtp->snd_sml = newtp->snd_una = seq;
518 WRITE_ONCE(newtp->snd_nxt, seq);
519 newtp->snd_up = seq;
520
521 INIT_LIST_HEAD(&newtp->tsq_node);
522 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
523
524 tcp_init_wl(newtp, treq->rcv_isn);
525
526 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
527 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
528
529 newtp->lsndtime = tcp_jiffies32;
530 newsk->sk_txhash = treq->txhash;
531 newtp->total_retrans = req->num_retrans;
532
533 tcp_init_xmit_timers(newsk);
534 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
535
536 if (sock_flag(newsk, SOCK_KEEPOPEN))
537 inet_csk_reset_keepalive_timer(newsk,
538 keepalive_time_when(newtp));
539
540 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
541 newtp->rx_opt.sack_ok = ireq->sack_ok;
542 newtp->window_clamp = req->rsk_window_clamp;
543 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
544 newtp->rcv_wnd = req->rsk_rcv_wnd;
545 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
546 if (newtp->rx_opt.wscale_ok) {
547 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
548 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
549 } else {
550 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
551 newtp->window_clamp = min(newtp->window_clamp, 65535U);
552 }
553 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
554 newtp->max_window = newtp->snd_wnd;
555
556 if (newtp->rx_opt.tstamp_ok) {
557 newtp->rx_opt.ts_recent = req->ts_recent;
558 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
559 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
560 } else {
561 newtp->rx_opt.ts_recent_stamp = 0;
562 newtp->tcp_header_len = sizeof(struct tcphdr);
563 }
564 if (req->num_timeout) {
565 newtp->undo_marker = treq->snt_isn;
566 newtp->retrans_stamp = div_u64(treq->snt_synack,
567 USEC_PER_SEC / TCP_TS_HZ);
568 }
569 newtp->tsoffset = treq->ts_off;
570#ifdef CONFIG_TCP_MD5SIG
571 newtp->md5sig_info = NULL; /*XXX*/
572 if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req)))
573 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
574#endif
575 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
576 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
577 newtp->rx_opt.mss_clamp = req->mss;
578 tcp_ecn_openreq_child(newtp, req);
579 newtp->fastopen_req = NULL;
580 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
581
582 newtp->bpf_chg_cc_inprogress = 0;
583 tcp_bpf_clone(sk, newsk);
584
585 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
586
587 return newsk;
588}
589EXPORT_SYMBOL(tcp_create_openreq_child);
590
591/*
592 * Process an incoming packet for SYN_RECV sockets represented as a
593 * request_sock. Normally sk is the listener socket but for TFO it
594 * points to the child socket.
595 *
596 * XXX (TFO) - The current impl contains a special check for ack
597 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
598 *
599 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
600 */
601
602struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
603 struct request_sock *req,
604 bool fastopen, bool *req_stolen)
605{
606 struct tcp_options_received tmp_opt;
607 struct sock *child;
608 const struct tcphdr *th = tcp_hdr(skb);
609 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
610 bool paws_reject = false;
611 bool own_req;
612
613 tmp_opt.saw_tstamp = 0;
614 if (th->doff > (sizeof(struct tcphdr)>>2)) {
615 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
616
617 if (tmp_opt.saw_tstamp) {
618 tmp_opt.ts_recent = req->ts_recent;
619 if (tmp_opt.rcv_tsecr)
620 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
621 /* We do not store true stamp, but it is not required,
622 * it can be estimated (approximately)
623 * from another data.
624 */
625 tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
626 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
627 }
628 }
629
630 /* Check for pure retransmitted SYN. */
631 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
632 flg == TCP_FLAG_SYN &&
633 !paws_reject) {
634 /*
635 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
636 * this case on figure 6 and figure 8, but formal
637 * protocol description says NOTHING.
638 * To be more exact, it says that we should send ACK,
639 * because this segment (at least, if it has no data)
640 * is out of window.
641 *
642 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
643 * describe SYN-RECV state. All the description
644 * is wrong, we cannot believe to it and should
645 * rely only on common sense and implementation
646 * experience.
647 *
648 * Enforce "SYN-ACK" according to figure 8, figure 6
649 * of RFC793, fixed by RFC1122.
650 *
651 * Note that even if there is new data in the SYN packet
652 * they will be thrown away too.
653 *
654 * Reset timer after retransmitting SYNACK, similar to
655 * the idea of fast retransmit in recovery.
656 */
657 if (!tcp_oow_rate_limited(sock_net(sk), skb,
658 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
659 &tcp_rsk(req)->last_oow_ack_time) &&
660
661 !inet_rtx_syn_ack(sk, req)) {
662 unsigned long expires = jiffies;
663
664 expires += reqsk_timeout(req, TCP_RTO_MAX);
665 if (!fastopen)
666 mod_timer_pending(&req->rsk_timer, expires);
667 else
668 req->rsk_timer.expires = expires;
669 }
670 return NULL;
671 }
672
673 /* Further reproduces section "SEGMENT ARRIVES"
674 for state SYN-RECEIVED of RFC793.
675 It is broken, however, it does not work only
676 when SYNs are crossed.
677
678 You would think that SYN crossing is impossible here, since
679 we should have a SYN_SENT socket (from connect()) on our end,
680 but this is not true if the crossed SYNs were sent to both
681 ends by a malicious third party. We must defend against this,
682 and to do that we first verify the ACK (as per RFC793, page
683 36) and reset if it is invalid. Is this a true full defense?
684 To convince ourselves, let us consider a way in which the ACK
685 test can still pass in this 'malicious crossed SYNs' case.
686 Malicious sender sends identical SYNs (and thus identical sequence
687 numbers) to both A and B:
688
689 A: gets SYN, seq=7
690 B: gets SYN, seq=7
691
692 By our good fortune, both A and B select the same initial
693 send sequence number of seven :-)
694
695 A: sends SYN|ACK, seq=7, ack_seq=8
696 B: sends SYN|ACK, seq=7, ack_seq=8
697
698 So we are now A eating this SYN|ACK, ACK test passes. So
699 does sequence test, SYN is truncated, and thus we consider
700 it a bare ACK.
701
702 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
703 bare ACK. Otherwise, we create an established connection. Both
704 ends (listening sockets) accept the new incoming connection and try
705 to talk to each other. 8-)
706
707 Note: This case is both harmless, and rare. Possibility is about the
708 same as us discovering intelligent life on another plant tomorrow.
709
710 But generally, we should (RFC lies!) to accept ACK
711 from SYNACK both here and in tcp_rcv_state_process().
712 tcp_rcv_state_process() does not, hence, we do not too.
713
714 Note that the case is absolutely generic:
715 we cannot optimize anything here without
716 violating protocol. All the checks must be made
717 before attempt to create socket.
718 */
719
720 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
721 * and the incoming segment acknowledges something not yet
722 * sent (the segment carries an unacceptable ACK) ...
723 * a reset is sent."
724 *
725 * Invalid ACK: reset will be sent by listening socket.
726 * Note that the ACK validity check for a Fast Open socket is done
727 * elsewhere and is checked directly against the child socket rather
728 * than req because user data may have been sent out.
729 */
730 if ((flg & TCP_FLAG_ACK) && !fastopen &&
731 (TCP_SKB_CB(skb)->ack_seq !=
732 tcp_rsk(req)->snt_isn + 1))
733 return sk;
734
735 /* Also, it would be not so bad idea to check rcv_tsecr, which
736 * is essentially ACK extension and too early or too late values
737 * should cause reset in unsynchronized states.
738 */
739
740 /* RFC793: "first check sequence number". */
741
742 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
743 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
744 /* Out of window: send ACK and drop. */
745 if (!(flg & TCP_FLAG_RST) &&
746 !tcp_oow_rate_limited(sock_net(sk), skb,
747 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
748 &tcp_rsk(req)->last_oow_ack_time))
749 req->rsk_ops->send_ack(sk, skb, req);
750 if (paws_reject)
751 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
752 return NULL;
753 }
754
755 /* In sequence, PAWS is OK. */
756
757 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
758 req->ts_recent = tmp_opt.rcv_tsval;
759
760 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
761 /* Truncate SYN, it is out of window starting
762 at tcp_rsk(req)->rcv_isn + 1. */
763 flg &= ~TCP_FLAG_SYN;
764 }
765
766 /* RFC793: "second check the RST bit" and
767 * "fourth, check the SYN bit"
768 */
769 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
770 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
771 goto embryonic_reset;
772 }
773
774 /* ACK sequence verified above, just make sure ACK is
775 * set. If ACK not set, just silently drop the packet.
776 *
777 * XXX (TFO) - if we ever allow "data after SYN", the
778 * following check needs to be removed.
779 */
780 if (!(flg & TCP_FLAG_ACK))
781 return NULL;
782
783 /* For Fast Open no more processing is needed (sk is the
784 * child socket).
785 */
786 if (fastopen)
787 return sk;
788
789 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
790 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
791 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
792 inet_rsk(req)->acked = 1;
793 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
794 return NULL;
795 }
796
797 /* OK, ACK is valid, create big socket and
798 * feed this segment to it. It will repeat all
799 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
800 * ESTABLISHED STATE. If it will be dropped after
801 * socket is created, wait for troubles.
802 */
803 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
804 req, &own_req);
805 if (!child)
806 goto listen_overflow;
807
808 if (own_req && rsk_drop_req(req)) {
809 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
810 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
811 return child;
812 }
813
814 sock_rps_save_rxhash(child, skb);
815 tcp_synack_rtt_meas(child, req);
816 *req_stolen = !own_req;
817 return inet_csk_complete_hashdance(sk, child, req, own_req);
818
819listen_overflow:
820 if (sk != req->rsk_listener)
821 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
822
823 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
824 inet_rsk(req)->acked = 1;
825 return NULL;
826 }
827
828embryonic_reset:
829 if (!(flg & TCP_FLAG_RST)) {
830 /* Received a bad SYN pkt - for TFO We try not to reset
831 * the local connection unless it's really necessary to
832 * avoid becoming vulnerable to outside attack aiming at
833 * resetting legit local connections.
834 */
835 req->rsk_ops->send_reset(sk, skb);
836 } else if (fastopen) { /* received a valid RST pkt */
837 reqsk_fastopen_remove(sk, req, true);
838 tcp_reset(sk, skb);
839 }
840 if (!fastopen) {
841 bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
842
843 if (unlinked)
844 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
845 *req_stolen = !unlinked;
846 }
847 return NULL;
848}
849EXPORT_SYMBOL(tcp_check_req);
850
851/*
852 * Queue segment on the new socket if the new socket is active,
853 * otherwise we just shortcircuit this and continue with
854 * the new socket.
855 *
856 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
857 * when entering. But other states are possible due to a race condition
858 * where after __inet_lookup_established() fails but before the listener
859 * locked is obtained, other packets cause the same connection to
860 * be created.
861 */
862
863int tcp_child_process(struct sock *parent, struct sock *child,
864 struct sk_buff *skb)
865 __releases(&((child)->sk_lock.slock))
866{
867 int ret = 0;
868 int state = child->sk_state;
869
870 /* record sk_napi_id and sk_rx_queue_mapping of child. */
871 sk_mark_napi_id_set(child, skb);
872
873 tcp_segs_in(tcp_sk(child), skb);
874 if (!sock_owned_by_user(child)) {
875 ret = tcp_rcv_state_process(child, skb);
876 /* Wakeup parent, send SIGIO */
877 if (state == TCP_SYN_RECV && child->sk_state != state)
878 parent->sk_data_ready(parent);
879 } else {
880 /* Alas, it is possible again, because we do lookup
881 * in main socket hash table and lock on listening
882 * socket does not protect us more.
883 */
884 __sk_add_backlog(child, skb);
885 }
886
887 bh_unlock_sock(child);
888 sock_put(child);
889 return ret;
890}
891EXPORT_SYMBOL(tcp_child_process);