Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Implementation of the Transmission Control Protocol(TCP).
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 13 *		Florian La Roche, <flla@stud.uni-sb.de>
 14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19 *		Jorge Cwik, <jorge@laser.satlink.net>
 20 */
 21
 22#include <linux/mm.h>
 23#include <linux/module.h>
 24#include <linux/slab.h>
 25#include <linux/sysctl.h>
 26#include <linux/workqueue.h>
 27#include <linux/static_key.h>
 28#include <net/tcp.h>
 29#include <net/inet_common.h>
 30#include <net/xfrm.h>
 31#include <net/busy_poll.h>
 32
 33static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 34{
 35	if (seq == s_win)
 36		return true;
 37	if (after(end_seq, s_win) && before(seq, e_win))
 38		return true;
 39	return seq == e_win && seq == end_seq;
 40}
 41
 42static enum tcp_tw_status
 43tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
 44				  const struct sk_buff *skb, int mib_idx)
 45{
 46	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 47
 48	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
 49				  &tcptw->tw_last_oow_ack_time)) {
 50		/* Send ACK. Note, we do not put the bucket,
 51		 * it will be released by caller.
 52		 */
 53		return TCP_TW_ACK;
 54	}
 55
 56	/* We are rate-limiting, so just release the tw sock and drop skb. */
 57	inet_twsk_put(tw);
 58	return TCP_TW_SUCCESS;
 59}
 60
 
 
 
 
 
 
 
 
 
 
 
 
 61/*
 62 * * Main purpose of TIME-WAIT state is to close connection gracefully,
 63 *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
 64 *   (and, probably, tail of data) and one or more our ACKs are lost.
 65 * * What is TIME-WAIT timeout? It is associated with maximal packet
 66 *   lifetime in the internet, which results in wrong conclusion, that
 67 *   it is set to catch "old duplicate segments" wandering out of their path.
 68 *   It is not quite correct. This timeout is calculated so that it exceeds
 69 *   maximal retransmission timeout enough to allow to lose one (or more)
 70 *   segments sent by peer and our ACKs. This time may be calculated from RTO.
 71 * * When TIME-WAIT socket receives RST, it means that another end
 72 *   finally closed and we are allowed to kill TIME-WAIT too.
 73 * * Second purpose of TIME-WAIT is catching old duplicate segments.
 74 *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
 75 *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
 76 * * If we invented some more clever way to catch duplicates
 77 *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
 78 *
 79 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
 80 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
 81 * from the very beginning.
 82 *
 83 * NOTE. With recycling (and later with fin-wait-2) TW bucket
 84 * is _not_ stateless. It means, that strictly speaking we must
 85 * spinlock it. I do not want! Well, probability of misbehaviour
 86 * is ridiculously low and, seems, we could use some mb() tricks
 87 * to avoid misread sequence numbers, states etc.  --ANK
 88 *
 89 * We don't need to initialize tmp_out.sack_ok as we don't use the results
 90 */
 91enum tcp_tw_status
 92tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
 93			   const struct tcphdr *th)
 94{
 95	struct tcp_options_received tmp_opt;
 96	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 97	bool paws_reject = false;
 98
 99	tmp_opt.saw_tstamp = 0;
100	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
101		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
102
103		if (tmp_opt.saw_tstamp) {
104			if (tmp_opt.rcv_tsecr)
105				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
106			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
107			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
108			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
109		}
110	}
111
112	if (tw->tw_substate == TCP_FIN_WAIT2) {
113		/* Just repeat all the checks of tcp_rcv_state_process() */
114
115		/* Out of window, send ACK */
116		if (paws_reject ||
117		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
118				   tcptw->tw_rcv_nxt,
119				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
120			return tcp_timewait_check_oow_rate_limit(
121				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
122
123		if (th->rst)
124			goto kill;
125
126		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
127			return TCP_TW_RST;
128
129		/* Dup ACK? */
130		if (!th->ack ||
131		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
132		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
133			inet_twsk_put(tw);
134			return TCP_TW_SUCCESS;
135		}
136
137		/* New data or FIN. If new data arrive after half-duplex close,
138		 * reset.
139		 */
140		if (!th->fin ||
141		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
142			return TCP_TW_RST;
143
144		/* FIN arrived, enter true time-wait state. */
145		tw->tw_substate	  = TCP_TIME_WAIT;
146		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
 
147		if (tmp_opt.saw_tstamp) {
148			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
149			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
150		}
151
152		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
153		return TCP_TW_ACK;
154	}
155
156	/*
157	 *	Now real TIME-WAIT state.
158	 *
159	 *	RFC 1122:
160	 *	"When a connection is [...] on TIME-WAIT state [...]
161	 *	[a TCP] MAY accept a new SYN from the remote TCP to
162	 *	reopen the connection directly, if it:
163	 *
164	 *	(1)  assigns its initial sequence number for the new
165	 *	connection to be larger than the largest sequence
166	 *	number it used on the previous connection incarnation,
167	 *	and
168	 *
169	 *	(2)  returns to TIME-WAIT state if the SYN turns out
170	 *	to be an old duplicate".
171	 */
172
173	if (!paws_reject &&
174	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
175	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
176		/* In window segment, it may be only reset or bare ack. */
177
178		if (th->rst) {
179			/* This is TIME_WAIT assassination, in two flavors.
180			 * Oh well... nobody has a sufficient solution to this
181			 * protocol bug yet.
182			 */
183			if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
184kill:
185				inet_twsk_deschedule_put(tw);
186				return TCP_TW_SUCCESS;
187			}
188		} else {
189			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
190		}
191
192		if (tmp_opt.saw_tstamp) {
193			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
194			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
195		}
196
197		inet_twsk_put(tw);
198		return TCP_TW_SUCCESS;
199	}
200
201	/* Out of window segment.
202
203	   All the segments are ACKed immediately.
204
205	   The only exception is new SYN. We accept it, if it is
206	   not old duplicate and we are not in danger to be killed
207	   by delayed old duplicates. RFC check is that it has
208	   newer sequence number works at rates <40Mbit/sec.
209	   However, if paws works, it is reliable AND even more,
210	   we even may relax silly seq space cutoff.
211
212	   RED-PEN: we violate main RFC requirement, if this SYN will appear
213	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
214	   we must return socket to time-wait state. It is not good,
215	   but not fatal yet.
216	 */
217
218	if (th->syn && !th->rst && !th->ack && !paws_reject &&
219	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
220	     (tmp_opt.saw_tstamp &&
221	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
222		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
223		if (isn == 0)
224			isn++;
225		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
226		return TCP_TW_SYN;
227	}
228
229	if (paws_reject)
230		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
231
232	if (!th->rst) {
233		/* In this case we must reset the TIMEWAIT timer.
234		 *
235		 * If it is ACKless SYN it may be both old duplicate
236		 * and new good SYN with random sequence number <rcv_nxt.
237		 * Do not reschedule in the last case.
238		 */
239		if (paws_reject || th->ack)
240			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
241
242		return tcp_timewait_check_oow_rate_limit(
243			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
244	}
245	inet_twsk_put(tw);
246	return TCP_TW_SUCCESS;
247}
248EXPORT_SYMBOL(tcp_timewait_state_process);
249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250/*
251 * Move a socket to time-wait or dead fin-wait-2 state.
252 */
253void tcp_time_wait(struct sock *sk, int state, int timeo)
254{
255	const struct inet_connection_sock *icsk = inet_csk(sk);
256	const struct tcp_sock *tp = tcp_sk(sk);
 
257	struct inet_timewait_sock *tw;
258	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
259
260	tw = inet_twsk_alloc(sk, tcp_death_row, state);
261
262	if (tw) {
263		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
264		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
265		struct inet_sock *inet = inet_sk(sk);
266
267		tw->tw_transparent	= inet->transparent;
268		tw->tw_mark		= sk->sk_mark;
269		tw->tw_priority		= sk->sk_priority;
270		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
271		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
272		tcptw->tw_snd_nxt	= tp->snd_nxt;
273		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
274		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
275		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
276		tcptw->tw_ts_offset	= tp->tsoffset;
 
277		tcptw->tw_last_oow_ack_time = 0;
278		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
 
279#if IS_ENABLED(CONFIG_IPV6)
280		if (tw->tw_family == PF_INET6) {
281			struct ipv6_pinfo *np = inet6_sk(sk);
282
283			tw->tw_v6_daddr = sk->sk_v6_daddr;
284			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
285			tw->tw_tclass = np->tclass;
286			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
287			tw->tw_txhash = sk->sk_txhash;
288			tw->tw_ipv6only = sk->sk_ipv6only;
289		}
290#endif
291
292#ifdef CONFIG_TCP_MD5SIG
293		/*
294		 * The timewait bucket does not have the key DB from the
295		 * sock structure. We just make a quick copy of the
296		 * md5 key being used (if indeed we are using one)
297		 * so the timewait ack generating code has the key.
298		 */
299		do {
300			tcptw->tw_md5_key = NULL;
301			if (static_branch_unlikely(&tcp_md5_needed)) {
302				struct tcp_md5sig_key *key;
303
304				key = tp->af_specific->md5_lookup(sk, sk);
305				if (key) {
306					tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
307					BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
308				}
309			}
310		} while (0);
311#endif
312
313		/* Get the TIME_WAIT timeout firing. */
314		if (timeo < rto)
315			timeo = rto;
316
317		if (state == TCP_TIME_WAIT)
318			timeo = TCP_TIMEWAIT_LEN;
319
320		/* tw_timer is pinned, so we need to make sure BH are disabled
321		 * in following section, otherwise timer handler could run before
322		 * we complete the initialization.
323		 */
324		local_bh_disable();
325		inet_twsk_schedule(tw, timeo);
326		/* Linkage updates.
327		 * Note that access to tw after this point is illegal.
328		 */
329		inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
330		local_bh_enable();
331	} else {
332		/* Sorry, if we're out of memory, just CLOSE this
333		 * socket up.  We've got bigger problems than
334		 * non-graceful socket closings.
335		 */
336		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
337	}
338
339	tcp_update_metrics(sk);
340	tcp_done(sk);
341}
342EXPORT_SYMBOL(tcp_time_wait);
343
 
 
 
 
 
 
 
 
 
 
 
 
344void tcp_twsk_destructor(struct sock *sk)
345{
346#ifdef CONFIG_TCP_MD5SIG
347	if (static_branch_unlikely(&tcp_md5_needed)) {
348		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
349
350		if (twsk->tw_md5_key)
351			kfree_rcu(twsk->tw_md5_key, rcu);
352	}
353#endif
 
354}
355EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
356
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357/* Warning : This function is called without sk_listener being locked.
358 * Be sure to read socket fields once, as their value could change under us.
359 */
360void tcp_openreq_init_rwin(struct request_sock *req,
361			   const struct sock *sk_listener,
362			   const struct dst_entry *dst)
363{
364	struct inet_request_sock *ireq = inet_rsk(req);
365	const struct tcp_sock *tp = tcp_sk(sk_listener);
366	int full_space = tcp_full_space(sk_listener);
367	u32 window_clamp;
368	__u8 rcv_wscale;
369	u32 rcv_wnd;
370	int mss;
371
372	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
373	window_clamp = READ_ONCE(tp->window_clamp);
374	/* Set this up on the first call only */
375	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
376
377	/* limit the window selection if the user enforce a smaller rx buffer */
378	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
379	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
380		req->rsk_window_clamp = full_space;
381
382	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
383	if (rcv_wnd == 0)
384		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
385	else if (full_space < rcv_wnd * mss)
386		full_space = rcv_wnd * mss;
387
388	/* tcp_full_space because it is guaranteed to be the first packet */
389	tcp_select_initial_window(sk_listener, full_space,
390		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
391		&req->rsk_rcv_wnd,
392		&req->rsk_window_clamp,
393		ireq->wscale_ok,
394		&rcv_wscale,
395		rcv_wnd);
396	ireq->rcv_wscale = rcv_wscale;
397}
398EXPORT_SYMBOL(tcp_openreq_init_rwin);
399
400static void tcp_ecn_openreq_child(struct tcp_sock *tp,
401				  const struct request_sock *req)
402{
403	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
404}
405
406void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
407{
408	struct inet_connection_sock *icsk = inet_csk(sk);
409	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
410	bool ca_got_dst = false;
411
412	if (ca_key != TCP_CA_UNSPEC) {
413		const struct tcp_congestion_ops *ca;
414
415		rcu_read_lock();
416		ca = tcp_ca_find_key(ca_key);
417		if (likely(ca && try_module_get(ca->owner))) {
418			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
419			icsk->icsk_ca_ops = ca;
420			ca_got_dst = true;
421		}
422		rcu_read_unlock();
423	}
424
425	/* If no valid choice made yet, assign current system default ca. */
426	if (!ca_got_dst &&
427	    (!icsk->icsk_ca_setsockopt ||
428	     !try_module_get(icsk->icsk_ca_ops->owner)))
429		tcp_assign_congestion_control(sk);
430
431	tcp_set_ca_state(sk, TCP_CA_Open);
432}
433EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
434
435static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
436				    struct request_sock *req,
437				    struct tcp_sock *newtp)
438{
439#if IS_ENABLED(CONFIG_SMC)
440	struct inet_request_sock *ireq;
441
442	if (static_branch_unlikely(&tcp_have_smc)) {
443		ireq = inet_rsk(req);
444		if (oldtp->syn_smc && !ireq->smc_ok)
445			newtp->syn_smc = 0;
446	}
447#endif
448}
449
450/* This is not only more efficient than what we used to do, it eliminates
451 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
452 *
453 * Actually, we could lots of memory writes here. tp of listening
454 * socket contains all necessary default parameters.
455 */
456struct sock *tcp_create_openreq_child(const struct sock *sk,
457				      struct request_sock *req,
458				      struct sk_buff *skb)
459{
460	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
461	const struct inet_request_sock *ireq = inet_rsk(req);
462	struct tcp_request_sock *treq = tcp_rsk(req);
463	struct inet_connection_sock *newicsk;
464	struct tcp_sock *oldtp, *newtp;
 
465	u32 seq;
 
 
 
466
467	if (!newsk)
468		return NULL;
469
470	newicsk = inet_csk(newsk);
471	newtp = tcp_sk(newsk);
472	oldtp = tcp_sk(sk);
473
474	smc_check_reset_syn_req(oldtp, req, newtp);
475
476	/* Now setup tcp_sock */
477	newtp->pred_flags = 0;
478
479	seq = treq->rcv_isn + 1;
480	newtp->rcv_wup = seq;
481	WRITE_ONCE(newtp->copied_seq, seq);
482	WRITE_ONCE(newtp->rcv_nxt, seq);
483	newtp->segs_in = 1;
484
485	seq = treq->snt_isn + 1;
486	newtp->snd_sml = newtp->snd_una = seq;
487	WRITE_ONCE(newtp->snd_nxt, seq);
488	newtp->snd_up = seq;
489
490	INIT_LIST_HEAD(&newtp->tsq_node);
491	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
492
493	tcp_init_wl(newtp, treq->rcv_isn);
494
495	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
496	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
497
498	newtp->lsndtime = tcp_jiffies32;
499	newsk->sk_txhash = treq->txhash;
500	newtp->total_retrans = req->num_retrans;
501
502	tcp_init_xmit_timers(newsk);
503	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
504
505	if (sock_flag(newsk, SOCK_KEEPOPEN))
506		inet_csk_reset_keepalive_timer(newsk,
507					       keepalive_time_when(newtp));
508
509	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
510	newtp->rx_opt.sack_ok = ireq->sack_ok;
511	newtp->window_clamp = req->rsk_window_clamp;
512	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
513	newtp->rcv_wnd = req->rsk_rcv_wnd;
514	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
515	if (newtp->rx_opt.wscale_ok) {
516		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
517		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
518	} else {
519		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
520		newtp->window_clamp = min(newtp->window_clamp, 65535U);
521	}
522	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
523	newtp->max_window = newtp->snd_wnd;
524
525	if (newtp->rx_opt.tstamp_ok) {
526		newtp->rx_opt.ts_recent = req->ts_recent;
 
527		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
528		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
529	} else {
 
530		newtp->rx_opt.ts_recent_stamp = 0;
531		newtp->tcp_header_len = sizeof(struct tcphdr);
532	}
533	if (req->num_timeout) {
 
534		newtp->undo_marker = treq->snt_isn;
535		newtp->retrans_stamp = div_u64(treq->snt_synack,
536					       USEC_PER_SEC / TCP_TS_HZ);
 
 
 
 
 
 
 
 
 
537	}
538	newtp->tsoffset = treq->ts_off;
539#ifdef CONFIG_TCP_MD5SIG
540	newtp->md5sig_info = NULL;	/*XXX*/
541	if (newtp->af_specific->md5_lookup(sk, newsk))
542		newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
543#endif
 
 
 
 
 
 
 
544	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
545		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
546	newtp->rx_opt.mss_clamp = req->mss;
547	tcp_ecn_openreq_child(newtp, req);
548	newtp->fastopen_req = NULL;
549	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
550
 
 
 
551	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
552
553	return newsk;
554}
555EXPORT_SYMBOL(tcp_create_openreq_child);
556
557/*
558 * Process an incoming packet for SYN_RECV sockets represented as a
559 * request_sock. Normally sk is the listener socket but for TFO it
560 * points to the child socket.
561 *
562 * XXX (TFO) - The current impl contains a special check for ack
563 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
564 *
565 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
 
 
 
566 */
567
568struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
569			   struct request_sock *req,
570			   bool fastopen, bool *req_stolen)
571{
572	struct tcp_options_received tmp_opt;
573	struct sock *child;
574	const struct tcphdr *th = tcp_hdr(skb);
575	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
576	bool paws_reject = false;
577	bool own_req;
578
579	tmp_opt.saw_tstamp = 0;
580	if (th->doff > (sizeof(struct tcphdr)>>2)) {
581		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
582
583		if (tmp_opt.saw_tstamp) {
584			tmp_opt.ts_recent = req->ts_recent;
585			if (tmp_opt.rcv_tsecr)
586				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
587			/* We do not store true stamp, but it is not required,
588			 * it can be estimated (approximately)
589			 * from another data.
590			 */
591			tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
592			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
593		}
594	}
595
596	/* Check for pure retransmitted SYN. */
597	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
598	    flg == TCP_FLAG_SYN &&
599	    !paws_reject) {
600		/*
601		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
602		 * this case on figure 6 and figure 8, but formal
603		 * protocol description says NOTHING.
604		 * To be more exact, it says that we should send ACK,
605		 * because this segment (at least, if it has no data)
606		 * is out of window.
607		 *
608		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
609		 *  describe SYN-RECV state. All the description
610		 *  is wrong, we cannot believe to it and should
611		 *  rely only on common sense and implementation
612		 *  experience.
613		 *
614		 * Enforce "SYN-ACK" according to figure 8, figure 6
615		 * of RFC793, fixed by RFC1122.
616		 *
617		 * Note that even if there is new data in the SYN packet
618		 * they will be thrown away too.
619		 *
620		 * Reset timer after retransmitting SYNACK, similar to
621		 * the idea of fast retransmit in recovery.
622		 */
623		if (!tcp_oow_rate_limited(sock_net(sk), skb,
624					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
625					  &tcp_rsk(req)->last_oow_ack_time) &&
626
627		    !inet_rtx_syn_ack(sk, req)) {
628			unsigned long expires = jiffies;
629
630			expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
631				       TCP_RTO_MAX);
632			if (!fastopen)
633				mod_timer_pending(&req->rsk_timer, expires);
634			else
635				req->rsk_timer.expires = expires;
636		}
637		return NULL;
638	}
639
640	/* Further reproduces section "SEGMENT ARRIVES"
641	   for state SYN-RECEIVED of RFC793.
642	   It is broken, however, it does not work only
643	   when SYNs are crossed.
644
645	   You would think that SYN crossing is impossible here, since
646	   we should have a SYN_SENT socket (from connect()) on our end,
647	   but this is not true if the crossed SYNs were sent to both
648	   ends by a malicious third party.  We must defend against this,
649	   and to do that we first verify the ACK (as per RFC793, page
650	   36) and reset if it is invalid.  Is this a true full defense?
651	   To convince ourselves, let us consider a way in which the ACK
652	   test can still pass in this 'malicious crossed SYNs' case.
653	   Malicious sender sends identical SYNs (and thus identical sequence
654	   numbers) to both A and B:
655
656		A: gets SYN, seq=7
657		B: gets SYN, seq=7
658
659	   By our good fortune, both A and B select the same initial
660	   send sequence number of seven :-)
661
662		A: sends SYN|ACK, seq=7, ack_seq=8
663		B: sends SYN|ACK, seq=7, ack_seq=8
664
665	   So we are now A eating this SYN|ACK, ACK test passes.  So
666	   does sequence test, SYN is truncated, and thus we consider
667	   it a bare ACK.
668
669	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
670	   bare ACK.  Otherwise, we create an established connection.  Both
671	   ends (listening sockets) accept the new incoming connection and try
672	   to talk to each other. 8-)
673
674	   Note: This case is both harmless, and rare.  Possibility is about the
675	   same as us discovering intelligent life on another plant tomorrow.
676
677	   But generally, we should (RFC lies!) to accept ACK
678	   from SYNACK both here and in tcp_rcv_state_process().
679	   tcp_rcv_state_process() does not, hence, we do not too.
680
681	   Note that the case is absolutely generic:
682	   we cannot optimize anything here without
683	   violating protocol. All the checks must be made
684	   before attempt to create socket.
685	 */
686
687	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
688	 *                  and the incoming segment acknowledges something not yet
689	 *                  sent (the segment carries an unacceptable ACK) ...
690	 *                  a reset is sent."
691	 *
692	 * Invalid ACK: reset will be sent by listening socket.
693	 * Note that the ACK validity check for a Fast Open socket is done
694	 * elsewhere and is checked directly against the child socket rather
695	 * than req because user data may have been sent out.
696	 */
697	if ((flg & TCP_FLAG_ACK) && !fastopen &&
698	    (TCP_SKB_CB(skb)->ack_seq !=
699	     tcp_rsk(req)->snt_isn + 1))
700		return sk;
701
702	/* Also, it would be not so bad idea to check rcv_tsecr, which
703	 * is essentially ACK extension and too early or too late values
704	 * should cause reset in unsynchronized states.
705	 */
706
707	/* RFC793: "first check sequence number". */
708
709	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
710					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
711		/* Out of window: send ACK and drop. */
712		if (!(flg & TCP_FLAG_RST) &&
713		    !tcp_oow_rate_limited(sock_net(sk), skb,
714					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
715					  &tcp_rsk(req)->last_oow_ack_time))
716			req->rsk_ops->send_ack(sk, skb, req);
717		if (paws_reject)
718			__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
719		return NULL;
720	}
721
722	/* In sequence, PAWS is OK. */
723
 
 
 
724	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
725		req->ts_recent = tmp_opt.rcv_tsval;
726
727	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
728		/* Truncate SYN, it is out of window starting
729		   at tcp_rsk(req)->rcv_isn + 1. */
730		flg &= ~TCP_FLAG_SYN;
731	}
732
733	/* RFC793: "second check the RST bit" and
734	 *	   "fourth, check the SYN bit"
735	 */
736	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
737		__TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
738		goto embryonic_reset;
739	}
740
741	/* ACK sequence verified above, just make sure ACK is
742	 * set.  If ACK not set, just silently drop the packet.
743	 *
744	 * XXX (TFO) - if we ever allow "data after SYN", the
745	 * following check needs to be removed.
746	 */
747	if (!(flg & TCP_FLAG_ACK))
748		return NULL;
749
750	/* For Fast Open no more processing is needed (sk is the
751	 * child socket).
752	 */
753	if (fastopen)
754		return sk;
755
756	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
757	if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
758	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
759		inet_rsk(req)->acked = 1;
760		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
761		return NULL;
762	}
763
764	/* OK, ACK is valid, create big socket and
765	 * feed this segment to it. It will repeat all
766	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
767	 * ESTABLISHED STATE. If it will be dropped after
768	 * socket is created, wait for troubles.
769	 */
770	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
771							 req, &own_req);
772	if (!child)
773		goto listen_overflow;
774
 
 
 
 
 
 
775	sock_rps_save_rxhash(child, skb);
776	tcp_synack_rtt_meas(child, req);
777	*req_stolen = !own_req;
778	return inet_csk_complete_hashdance(sk, child, req, own_req);
779
780listen_overflow:
781	if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
 
 
 
782		inet_rsk(req)->acked = 1;
783		return NULL;
784	}
785
786embryonic_reset:
787	if (!(flg & TCP_FLAG_RST)) {
788		/* Received a bad SYN pkt - for TFO We try not to reset
789		 * the local connection unless it's really necessary to
790		 * avoid becoming vulnerable to outside attack aiming at
791		 * resetting legit local connections.
792		 */
793		req->rsk_ops->send_reset(sk, skb);
794	} else if (fastopen) { /* received a valid RST pkt */
795		reqsk_fastopen_remove(sk, req, true);
796		tcp_reset(sk);
797	}
798	if (!fastopen) {
799		inet_csk_reqsk_queue_drop(sk, req);
800		__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
 
 
 
801	}
802	return NULL;
803}
804EXPORT_SYMBOL(tcp_check_req);
805
806/*
807 * Queue segment on the new socket if the new socket is active,
808 * otherwise we just shortcircuit this and continue with
809 * the new socket.
810 *
811 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
812 * when entering. But other states are possible due to a race condition
813 * where after __inet_lookup_established() fails but before the listener
814 * locked is obtained, other packets cause the same connection to
815 * be created.
816 */
817
818int tcp_child_process(struct sock *parent, struct sock *child,
819		      struct sk_buff *skb)
 
820{
821	int ret = 0;
822	int state = child->sk_state;
823
824	/* record NAPI ID of child */
825	sk_mark_napi_id(child, skb);
826
827	tcp_segs_in(tcp_sk(child), skb);
828	if (!sock_owned_by_user(child)) {
829		ret = tcp_rcv_state_process(child, skb);
830		/* Wakeup parent, send SIGIO */
831		if (state == TCP_SYN_RECV && child->sk_state != state)
832			parent->sk_data_ready(parent);
833	} else {
834		/* Alas, it is possible again, because we do lookup
835		 * in main socket hash table and lock on listening
836		 * socket does not protect us more.
837		 */
838		__sk_add_backlog(child, skb);
839	}
840
841	bh_unlock_sock(child);
842	sock_put(child);
843	return ret;
844}
845EXPORT_SYMBOL(tcp_child_process);
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Implementation of the Transmission Control Protocol(TCP).
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 13 *		Florian La Roche, <flla@stud.uni-sb.de>
 14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19 *		Jorge Cwik, <jorge@laser.satlink.net>
 20 */
 21
 
 
 
 
 
 
 22#include <net/tcp.h>
 
 23#include <net/xfrm.h>
 24#include <net/busy_poll.h>
 25
 26static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 27{
 28	if (seq == s_win)
 29		return true;
 30	if (after(end_seq, s_win) && before(seq, e_win))
 31		return true;
 32	return seq == e_win && seq == end_seq;
 33}
 34
 35static enum tcp_tw_status
 36tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
 37				  const struct sk_buff *skb, int mib_idx)
 38{
 39	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 40
 41	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
 42				  &tcptw->tw_last_oow_ack_time)) {
 43		/* Send ACK. Note, we do not put the bucket,
 44		 * it will be released by caller.
 45		 */
 46		return TCP_TW_ACK;
 47	}
 48
 49	/* We are rate-limiting, so just release the tw sock and drop skb. */
 50	inet_twsk_put(tw);
 51	return TCP_TW_SUCCESS;
 52}
 53
 54static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq)
 55{
 56#ifdef CONFIG_TCP_AO
 57	struct tcp_ao_info *ao;
 58
 59	ao = rcu_dereference(tcptw->ao_info);
 60	if (unlikely(ao && seq < tcptw->tw_rcv_nxt))
 61		WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
 62#endif
 63	tcptw->tw_rcv_nxt = seq;
 64}
 65
 66/*
 67 * * Main purpose of TIME-WAIT state is to close connection gracefully,
 68 *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
 69 *   (and, probably, tail of data) and one or more our ACKs are lost.
 70 * * What is TIME-WAIT timeout? It is associated with maximal packet
 71 *   lifetime in the internet, which results in wrong conclusion, that
 72 *   it is set to catch "old duplicate segments" wandering out of their path.
 73 *   It is not quite correct. This timeout is calculated so that it exceeds
 74 *   maximal retransmission timeout enough to allow to lose one (or more)
 75 *   segments sent by peer and our ACKs. This time may be calculated from RTO.
 76 * * When TIME-WAIT socket receives RST, it means that another end
 77 *   finally closed and we are allowed to kill TIME-WAIT too.
 78 * * Second purpose of TIME-WAIT is catching old duplicate segments.
 79 *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
 80 *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
 81 * * If we invented some more clever way to catch duplicates
 82 *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
 83 *
 84 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
 85 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
 86 * from the very beginning.
 87 *
 88 * NOTE. With recycling (and later with fin-wait-2) TW bucket
 89 * is _not_ stateless. It means, that strictly speaking we must
 90 * spinlock it. I do not want! Well, probability of misbehaviour
 91 * is ridiculously low and, seems, we could use some mb() tricks
 92 * to avoid misread sequence numbers, states etc.  --ANK
 93 *
 94 * We don't need to initialize tmp_out.sack_ok as we don't use the results
 95 */
 96enum tcp_tw_status
 97tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
 98			   const struct tcphdr *th)
 99{
100	struct tcp_options_received tmp_opt;
101	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
102	bool paws_reject = false;
103
104	tmp_opt.saw_tstamp = 0;
105	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
106		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
107
108		if (tmp_opt.saw_tstamp) {
109			if (tmp_opt.rcv_tsecr)
110				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
111			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
112			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
113			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
114		}
115	}
116
117	if (tw->tw_substate == TCP_FIN_WAIT2) {
118		/* Just repeat all the checks of tcp_rcv_state_process() */
119
120		/* Out of window, send ACK */
121		if (paws_reject ||
122		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
123				   tcptw->tw_rcv_nxt,
124				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
125			return tcp_timewait_check_oow_rate_limit(
126				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
127
128		if (th->rst)
129			goto kill;
130
131		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
132			return TCP_TW_RST;
133
134		/* Dup ACK? */
135		if (!th->ack ||
136		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
137		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
138			inet_twsk_put(tw);
139			return TCP_TW_SUCCESS;
140		}
141
142		/* New data or FIN. If new data arrive after half-duplex close,
143		 * reset.
144		 */
145		if (!th->fin ||
146		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
147			return TCP_TW_RST;
148
149		/* FIN arrived, enter true time-wait state. */
150		tw->tw_substate	  = TCP_TIME_WAIT;
151		twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq);
152
153		if (tmp_opt.saw_tstamp) {
154			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
155			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
156		}
157
158		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
159		return TCP_TW_ACK;
160	}
161
162	/*
163	 *	Now real TIME-WAIT state.
164	 *
165	 *	RFC 1122:
166	 *	"When a connection is [...] on TIME-WAIT state [...]
167	 *	[a TCP] MAY accept a new SYN from the remote TCP to
168	 *	reopen the connection directly, if it:
169	 *
170	 *	(1)  assigns its initial sequence number for the new
171	 *	connection to be larger than the largest sequence
172	 *	number it used on the previous connection incarnation,
173	 *	and
174	 *
175	 *	(2)  returns to TIME-WAIT state if the SYN turns out
176	 *	to be an old duplicate".
177	 */
178
179	if (!paws_reject &&
180	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
181	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
182		/* In window segment, it may be only reset or bare ack. */
183
184		if (th->rst) {
185			/* This is TIME_WAIT assassination, in two flavors.
186			 * Oh well... nobody has a sufficient solution to this
187			 * protocol bug yet.
188			 */
189			if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
190kill:
191				inet_twsk_deschedule_put(tw);
192				return TCP_TW_SUCCESS;
193			}
194		} else {
195			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
196		}
197
198		if (tmp_opt.saw_tstamp) {
199			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
200			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
201		}
202
203		inet_twsk_put(tw);
204		return TCP_TW_SUCCESS;
205	}
206
207	/* Out of window segment.
208
209	   All the segments are ACKed immediately.
210
211	   The only exception is new SYN. We accept it, if it is
212	   not old duplicate and we are not in danger to be killed
213	   by delayed old duplicates. RFC check is that it has
214	   newer sequence number works at rates <40Mbit/sec.
215	   However, if paws works, it is reliable AND even more,
216	   we even may relax silly seq space cutoff.
217
218	   RED-PEN: we violate main RFC requirement, if this SYN will appear
219	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
220	   we must return socket to time-wait state. It is not good,
221	   but not fatal yet.
222	 */
223
224	if (th->syn && !th->rst && !th->ack && !paws_reject &&
225	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
226	     (tmp_opt.saw_tstamp &&
227	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
228		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
229		if (isn == 0)
230			isn++;
231		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
232		return TCP_TW_SYN;
233	}
234
235	if (paws_reject)
236		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
237
238	if (!th->rst) {
239		/* In this case we must reset the TIMEWAIT timer.
240		 *
241		 * If it is ACKless SYN it may be both old duplicate
242		 * and new good SYN with random sequence number <rcv_nxt.
243		 * Do not reschedule in the last case.
244		 */
245		if (paws_reject || th->ack)
246			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
247
248		return tcp_timewait_check_oow_rate_limit(
249			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
250	}
251	inet_twsk_put(tw);
252	return TCP_TW_SUCCESS;
253}
254EXPORT_SYMBOL(tcp_timewait_state_process);
255
256static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
257{
258#ifdef CONFIG_TCP_MD5SIG
259	const struct tcp_sock *tp = tcp_sk(sk);
260	struct tcp_md5sig_key *key;
261
262	/*
263	 * The timewait bucket does not have the key DB from the
264	 * sock structure. We just make a quick copy of the
265	 * md5 key being used (if indeed we are using one)
266	 * so the timewait ack generating code has the key.
267	 */
268	tcptw->tw_md5_key = NULL;
269	if (!static_branch_unlikely(&tcp_md5_needed.key))
270		return;
271
272	key = tp->af_specific->md5_lookup(sk, sk);
273	if (key) {
274		tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
275		if (!tcptw->tw_md5_key)
276			return;
277		if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
278			goto out_free;
279		tcp_md5_add_sigpool();
280	}
281	return;
282out_free:
283	WARN_ON_ONCE(1);
284	kfree(tcptw->tw_md5_key);
285	tcptw->tw_md5_key = NULL;
286#endif
287}
288
289/*
290 * Move a socket to time-wait or dead fin-wait-2 state.
291 */
292void tcp_time_wait(struct sock *sk, int state, int timeo)
293{
294	const struct inet_connection_sock *icsk = inet_csk(sk);
295	struct tcp_sock *tp = tcp_sk(sk);
296	struct net *net = sock_net(sk);
297	struct inet_timewait_sock *tw;
 
298
299	tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
300
301	if (tw) {
302		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
303		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
 
304
305		tw->tw_transparent	= inet_test_bit(TRANSPARENT, sk);
306		tw->tw_mark		= sk->sk_mark;
307		tw->tw_priority		= READ_ONCE(sk->sk_priority);
308		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
309		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
310		tcptw->tw_snd_nxt	= tp->snd_nxt;
311		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
312		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
313		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
314		tcptw->tw_ts_offset	= tp->tsoffset;
315		tw->tw_usec_ts		= tp->tcp_usec_ts;
316		tcptw->tw_last_oow_ack_time = 0;
317		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
318		tw->tw_txhash		= sk->sk_txhash;
319#if IS_ENABLED(CONFIG_IPV6)
320		if (tw->tw_family == PF_INET6) {
321			struct ipv6_pinfo *np = inet6_sk(sk);
322
323			tw->tw_v6_daddr = sk->sk_v6_daddr;
324			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
325			tw->tw_tclass = np->tclass;
326			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
 
327			tw->tw_ipv6only = sk->sk_ipv6only;
328		}
329#endif
330
331		tcp_time_wait_init(sk, tcptw);
332		tcp_ao_time_wait(tcptw, tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
334		/* Get the TIME_WAIT timeout firing. */
335		if (timeo < rto)
336			timeo = rto;
337
338		if (state == TCP_TIME_WAIT)
339			timeo = TCP_TIMEWAIT_LEN;
340
341		/* tw_timer is pinned, so we need to make sure BH are disabled
342		 * in following section, otherwise timer handler could run before
343		 * we complete the initialization.
344		 */
345		local_bh_disable();
346		inet_twsk_schedule(tw, timeo);
347		/* Linkage updates.
348		 * Note that access to tw after this point is illegal.
349		 */
350		inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
351		local_bh_enable();
352	} else {
353		/* Sorry, if we're out of memory, just CLOSE this
354		 * socket up.  We've got bigger problems than
355		 * non-graceful socket closings.
356		 */
357		NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
358	}
359
360	tcp_update_metrics(sk);
361	tcp_done(sk);
362}
363EXPORT_SYMBOL(tcp_time_wait);
364
365#ifdef CONFIG_TCP_MD5SIG
366static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
367{
368	struct tcp_md5sig_key *key;
369
370	key = container_of(head, struct tcp_md5sig_key, rcu);
371	kfree(key);
372	static_branch_slow_dec_deferred(&tcp_md5_needed);
373	tcp_md5_release_sigpool();
374}
375#endif
376
377void tcp_twsk_destructor(struct sock *sk)
378{
379#ifdef CONFIG_TCP_MD5SIG
380	if (static_branch_unlikely(&tcp_md5_needed.key)) {
381		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
382
383		if (twsk->tw_md5_key)
384			call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
385	}
386#endif
387	tcp_ao_destroy_sock(sk, true);
388}
389EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
390
391void tcp_twsk_purge(struct list_head *net_exit_list, int family)
392{
393	bool purged_once = false;
394	struct net *net;
395
396	list_for_each_entry(net, net_exit_list, exit_list) {
397		if (net->ipv4.tcp_death_row.hashinfo->pernet) {
398			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
399			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
400		} else if (!purged_once) {
401			inet_twsk_purge(&tcp_hashinfo, family);
402			purged_once = true;
403		}
404	}
405}
406EXPORT_SYMBOL_GPL(tcp_twsk_purge);
407
408/* Warning : This function is called without sk_listener being locked.
409 * Be sure to read socket fields once, as their value could change under us.
410 */
411void tcp_openreq_init_rwin(struct request_sock *req,
412			   const struct sock *sk_listener,
413			   const struct dst_entry *dst)
414{
415	struct inet_request_sock *ireq = inet_rsk(req);
416	const struct tcp_sock *tp = tcp_sk(sk_listener);
417	int full_space = tcp_full_space(sk_listener);
418	u32 window_clamp;
419	__u8 rcv_wscale;
420	u32 rcv_wnd;
421	int mss;
422
423	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
424	window_clamp = READ_ONCE(tp->window_clamp);
425	/* Set this up on the first call only */
426	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
427
428	/* limit the window selection if the user enforce a smaller rx buffer */
429	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
430	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
431		req->rsk_window_clamp = full_space;
432
433	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
434	if (rcv_wnd == 0)
435		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
436	else if (full_space < rcv_wnd * mss)
437		full_space = rcv_wnd * mss;
438
439	/* tcp_full_space because it is guaranteed to be the first packet */
440	tcp_select_initial_window(sk_listener, full_space,
441		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
442		&req->rsk_rcv_wnd,
443		&req->rsk_window_clamp,
444		ireq->wscale_ok,
445		&rcv_wscale,
446		rcv_wnd);
447	ireq->rcv_wscale = rcv_wscale;
448}
449EXPORT_SYMBOL(tcp_openreq_init_rwin);
450
451static void tcp_ecn_openreq_child(struct tcp_sock *tp,
452				  const struct request_sock *req)
453{
454	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
455}
456
457void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
458{
459	struct inet_connection_sock *icsk = inet_csk(sk);
460	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
461	bool ca_got_dst = false;
462
463	if (ca_key != TCP_CA_UNSPEC) {
464		const struct tcp_congestion_ops *ca;
465
466		rcu_read_lock();
467		ca = tcp_ca_find_key(ca_key);
468		if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
469			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
470			icsk->icsk_ca_ops = ca;
471			ca_got_dst = true;
472		}
473		rcu_read_unlock();
474	}
475
476	/* If no valid choice made yet, assign current system default ca. */
477	if (!ca_got_dst &&
478	    (!icsk->icsk_ca_setsockopt ||
479	     !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
480		tcp_assign_congestion_control(sk);
481
482	tcp_set_ca_state(sk, TCP_CA_Open);
483}
484EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
485
486static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
487				    struct request_sock *req,
488				    struct tcp_sock *newtp)
489{
490#if IS_ENABLED(CONFIG_SMC)
491	struct inet_request_sock *ireq;
492
493	if (static_branch_unlikely(&tcp_have_smc)) {
494		ireq = inet_rsk(req);
495		if (oldtp->syn_smc && !ireq->smc_ok)
496			newtp->syn_smc = 0;
497	}
498#endif
499}
500
501/* This is not only more efficient than what we used to do, it eliminates
502 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
503 *
504 * Actually, we could lots of memory writes here. tp of listening
505 * socket contains all necessary default parameters.
506 */
507struct sock *tcp_create_openreq_child(const struct sock *sk,
508				      struct request_sock *req,
509				      struct sk_buff *skb)
510{
511	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
512	const struct inet_request_sock *ireq = inet_rsk(req);
513	struct tcp_request_sock *treq = tcp_rsk(req);
514	struct inet_connection_sock *newicsk;
515	const struct tcp_sock *oldtp;
516	struct tcp_sock *newtp;
517	u32 seq;
518#ifdef CONFIG_TCP_AO
519	struct tcp_ao_key *ao_key;
520#endif
521
522	if (!newsk)
523		return NULL;
524
525	newicsk = inet_csk(newsk);
526	newtp = tcp_sk(newsk);
527	oldtp = tcp_sk(sk);
528
529	smc_check_reset_syn_req(oldtp, req, newtp);
530
531	/* Now setup tcp_sock */
532	newtp->pred_flags = 0;
533
534	seq = treq->rcv_isn + 1;
535	newtp->rcv_wup = seq;
536	WRITE_ONCE(newtp->copied_seq, seq);
537	WRITE_ONCE(newtp->rcv_nxt, seq);
538	newtp->segs_in = 1;
539
540	seq = treq->snt_isn + 1;
541	newtp->snd_sml = newtp->snd_una = seq;
542	WRITE_ONCE(newtp->snd_nxt, seq);
543	newtp->snd_up = seq;
544
545	INIT_LIST_HEAD(&newtp->tsq_node);
546	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
547
548	tcp_init_wl(newtp, treq->rcv_isn);
549
550	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
551	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
552
553	newtp->lsndtime = tcp_jiffies32;
554	newsk->sk_txhash = READ_ONCE(treq->txhash);
555	newtp->total_retrans = req->num_retrans;
556
557	tcp_init_xmit_timers(newsk);
558	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
559
560	if (sock_flag(newsk, SOCK_KEEPOPEN))
561		inet_csk_reset_keepalive_timer(newsk,
562					       keepalive_time_when(newtp));
563
564	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
565	newtp->rx_opt.sack_ok = ireq->sack_ok;
566	newtp->window_clamp = req->rsk_window_clamp;
567	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
568	newtp->rcv_wnd = req->rsk_rcv_wnd;
569	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
570	if (newtp->rx_opt.wscale_ok) {
571		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
572		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
573	} else {
574		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
575		newtp->window_clamp = min(newtp->window_clamp, 65535U);
576	}
577	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
578	newtp->max_window = newtp->snd_wnd;
579
580	if (newtp->rx_opt.tstamp_ok) {
581		newtp->tcp_usec_ts = treq->req_usec_ts;
582		newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
583		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
584		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
585	} else {
586		newtp->tcp_usec_ts = 0;
587		newtp->rx_opt.ts_recent_stamp = 0;
588		newtp->tcp_header_len = sizeof(struct tcphdr);
589	}
590	if (req->num_timeout) {
591		newtp->total_rto = req->num_timeout;
592		newtp->undo_marker = treq->snt_isn;
593		if (newtp->tcp_usec_ts) {
594			newtp->retrans_stamp = treq->snt_synack;
595			newtp->total_rto_time = (u32)(tcp_clock_us() -
596						      newtp->retrans_stamp) / USEC_PER_MSEC;
597		} else {
598			newtp->retrans_stamp = div_u64(treq->snt_synack,
599						       USEC_PER_SEC / TCP_TS_HZ);
600			newtp->total_rto_time = tcp_clock_ms() -
601						newtp->retrans_stamp;
602		}
603		newtp->total_rto_recoveries = 1;
604	}
605	newtp->tsoffset = treq->ts_off;
606#ifdef CONFIG_TCP_MD5SIG
607	newtp->md5sig_info = NULL;	/*XXX*/
 
 
608#endif
609#ifdef CONFIG_TCP_AO
610	newtp->ao_info = NULL;
611	ao_key = treq->af_specific->ao_lookup(sk, req,
612				tcp_rsk(req)->ao_keyid, -1);
613	if (ao_key)
614		newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
615 #endif
616	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
617		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
618	newtp->rx_opt.mss_clamp = req->mss;
619	tcp_ecn_openreq_child(newtp, req);
620	newtp->fastopen_req = NULL;
621	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
622
623	newtp->bpf_chg_cc_inprogress = 0;
624	tcp_bpf_clone(sk, newsk);
625
626	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
627
628	return newsk;
629}
630EXPORT_SYMBOL(tcp_create_openreq_child);
631
632/*
633 * Process an incoming packet for SYN_RECV sockets represented as a
634 * request_sock. Normally sk is the listener socket but for TFO it
635 * points to the child socket.
636 *
637 * XXX (TFO) - The current impl contains a special check for ack
638 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
639 *
640 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
641 *
642 * Note: If @fastopen is true, this can be called from process context.
643 *       Otherwise, this is from BH context.
644 */
645
646struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
647			   struct request_sock *req,
648			   bool fastopen, bool *req_stolen)
649{
650	struct tcp_options_received tmp_opt;
651	struct sock *child;
652	const struct tcphdr *th = tcp_hdr(skb);
653	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
654	bool paws_reject = false;
655	bool own_req;
656
657	tmp_opt.saw_tstamp = 0;
658	if (th->doff > (sizeof(struct tcphdr)>>2)) {
659		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
660
661		if (tmp_opt.saw_tstamp) {
662			tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
663			if (tmp_opt.rcv_tsecr)
664				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
665			/* We do not store true stamp, but it is not required,
666			 * it can be estimated (approximately)
667			 * from another data.
668			 */
669			tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
670			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
671		}
672	}
673
674	/* Check for pure retransmitted SYN. */
675	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
676	    flg == TCP_FLAG_SYN &&
677	    !paws_reject) {
678		/*
679		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
680		 * this case on figure 6 and figure 8, but formal
681		 * protocol description says NOTHING.
682		 * To be more exact, it says that we should send ACK,
683		 * because this segment (at least, if it has no data)
684		 * is out of window.
685		 *
686		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
687		 *  describe SYN-RECV state. All the description
688		 *  is wrong, we cannot believe to it and should
689		 *  rely only on common sense and implementation
690		 *  experience.
691		 *
692		 * Enforce "SYN-ACK" according to figure 8, figure 6
693		 * of RFC793, fixed by RFC1122.
694		 *
695		 * Note that even if there is new data in the SYN packet
696		 * they will be thrown away too.
697		 *
698		 * Reset timer after retransmitting SYNACK, similar to
699		 * the idea of fast retransmit in recovery.
700		 */
701		if (!tcp_oow_rate_limited(sock_net(sk), skb,
702					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
703					  &tcp_rsk(req)->last_oow_ack_time) &&
704
705		    !inet_rtx_syn_ack(sk, req)) {
706			unsigned long expires = jiffies;
707
708			expires += reqsk_timeout(req, TCP_RTO_MAX);
 
709			if (!fastopen)
710				mod_timer_pending(&req->rsk_timer, expires);
711			else
712				req->rsk_timer.expires = expires;
713		}
714		return NULL;
715	}
716
717	/* Further reproduces section "SEGMENT ARRIVES"
718	   for state SYN-RECEIVED of RFC793.
719	   It is broken, however, it does not work only
720	   when SYNs are crossed.
721
722	   You would think that SYN crossing is impossible here, since
723	   we should have a SYN_SENT socket (from connect()) on our end,
724	   but this is not true if the crossed SYNs were sent to both
725	   ends by a malicious third party.  We must defend against this,
726	   and to do that we first verify the ACK (as per RFC793, page
727	   36) and reset if it is invalid.  Is this a true full defense?
728	   To convince ourselves, let us consider a way in which the ACK
729	   test can still pass in this 'malicious crossed SYNs' case.
730	   Malicious sender sends identical SYNs (and thus identical sequence
731	   numbers) to both A and B:
732
733		A: gets SYN, seq=7
734		B: gets SYN, seq=7
735
736	   By our good fortune, both A and B select the same initial
737	   send sequence number of seven :-)
738
739		A: sends SYN|ACK, seq=7, ack_seq=8
740		B: sends SYN|ACK, seq=7, ack_seq=8
741
742	   So we are now A eating this SYN|ACK, ACK test passes.  So
743	   does sequence test, SYN is truncated, and thus we consider
744	   it a bare ACK.
745
746	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
747	   bare ACK.  Otherwise, we create an established connection.  Both
748	   ends (listening sockets) accept the new incoming connection and try
749	   to talk to each other. 8-)
750
751	   Note: This case is both harmless, and rare.  Possibility is about the
752	   same as us discovering intelligent life on another plant tomorrow.
753
754	   But generally, we should (RFC lies!) to accept ACK
755	   from SYNACK both here and in tcp_rcv_state_process().
756	   tcp_rcv_state_process() does not, hence, we do not too.
757
758	   Note that the case is absolutely generic:
759	   we cannot optimize anything here without
760	   violating protocol. All the checks must be made
761	   before attempt to create socket.
762	 */
763
764	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
765	 *                  and the incoming segment acknowledges something not yet
766	 *                  sent (the segment carries an unacceptable ACK) ...
767	 *                  a reset is sent."
768	 *
769	 * Invalid ACK: reset will be sent by listening socket.
770	 * Note that the ACK validity check for a Fast Open socket is done
771	 * elsewhere and is checked directly against the child socket rather
772	 * than req because user data may have been sent out.
773	 */
774	if ((flg & TCP_FLAG_ACK) && !fastopen &&
775	    (TCP_SKB_CB(skb)->ack_seq !=
776	     tcp_rsk(req)->snt_isn + 1))
777		return sk;
778
779	/* Also, it would be not so bad idea to check rcv_tsecr, which
780	 * is essentially ACK extension and too early or too late values
781	 * should cause reset in unsynchronized states.
782	 */
783
784	/* RFC793: "first check sequence number". */
785
786	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
787					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
788		/* Out of window: send ACK and drop. */
789		if (!(flg & TCP_FLAG_RST) &&
790		    !tcp_oow_rate_limited(sock_net(sk), skb,
791					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
792					  &tcp_rsk(req)->last_oow_ack_time))
793			req->rsk_ops->send_ack(sk, skb, req);
794		if (paws_reject)
795			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
796		return NULL;
797	}
798
799	/* In sequence, PAWS is OK. */
800
801	/* TODO: We probably should defer ts_recent change once
802	 * we take ownership of @req.
803	 */
804	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
805		WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
806
807	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
808		/* Truncate SYN, it is out of window starting
809		   at tcp_rsk(req)->rcv_isn + 1. */
810		flg &= ~TCP_FLAG_SYN;
811	}
812
813	/* RFC793: "second check the RST bit" and
814	 *	   "fourth, check the SYN bit"
815	 */
816	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
817		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
818		goto embryonic_reset;
819	}
820
821	/* ACK sequence verified above, just make sure ACK is
822	 * set.  If ACK not set, just silently drop the packet.
823	 *
824	 * XXX (TFO) - if we ever allow "data after SYN", the
825	 * following check needs to be removed.
826	 */
827	if (!(flg & TCP_FLAG_ACK))
828		return NULL;
829
830	/* For Fast Open no more processing is needed (sk is the
831	 * child socket).
832	 */
833	if (fastopen)
834		return sk;
835
836	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
837	if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
838	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
839		inet_rsk(req)->acked = 1;
840		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
841		return NULL;
842	}
843
844	/* OK, ACK is valid, create big socket and
845	 * feed this segment to it. It will repeat all
846	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
847	 * ESTABLISHED STATE. If it will be dropped after
848	 * socket is created, wait for troubles.
849	 */
850	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
851							 req, &own_req);
852	if (!child)
853		goto listen_overflow;
854
855	if (own_req && rsk_drop_req(req)) {
856		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
857		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
858		return child;
859	}
860
861	sock_rps_save_rxhash(child, skb);
862	tcp_synack_rtt_meas(child, req);
863	*req_stolen = !own_req;
864	return inet_csk_complete_hashdance(sk, child, req, own_req);
865
866listen_overflow:
867	if (sk != req->rsk_listener)
868		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
869
870	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
871		inet_rsk(req)->acked = 1;
872		return NULL;
873	}
874
875embryonic_reset:
876	if (!(flg & TCP_FLAG_RST)) {
877		/* Received a bad SYN pkt - for TFO We try not to reset
878		 * the local connection unless it's really necessary to
879		 * avoid becoming vulnerable to outside attack aiming at
880		 * resetting legit local connections.
881		 */
882		req->rsk_ops->send_reset(sk, skb);
883	} else if (fastopen) { /* received a valid RST pkt */
884		reqsk_fastopen_remove(sk, req, true);
885		tcp_reset(sk, skb);
886	}
887	if (!fastopen) {
888		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
889
890		if (unlinked)
891			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
892		*req_stolen = !unlinked;
893	}
894	return NULL;
895}
896EXPORT_SYMBOL(tcp_check_req);
897
898/*
899 * Queue segment on the new socket if the new socket is active,
900 * otherwise we just shortcircuit this and continue with
901 * the new socket.
902 *
903 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
904 * when entering. But other states are possible due to a race condition
905 * where after __inet_lookup_established() fails but before the listener
906 * locked is obtained, other packets cause the same connection to
907 * be created.
908 */
909
910enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
911				       struct sk_buff *skb)
912	__releases(&((child)->sk_lock.slock))
913{
914	enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
915	int state = child->sk_state;
916
917	/* record sk_napi_id and sk_rx_queue_mapping of child. */
918	sk_mark_napi_id_set(child, skb);
919
920	tcp_segs_in(tcp_sk(child), skb);
921	if (!sock_owned_by_user(child)) {
922		reason = tcp_rcv_state_process(child, skb);
923		/* Wakeup parent, send SIGIO */
924		if (state == TCP_SYN_RECV && child->sk_state != state)
925			parent->sk_data_ready(parent);
926	} else {
927		/* Alas, it is possible again, because we do lookup
928		 * in main socket hash table and lock on listening
929		 * socket does not protect us more.
930		 */
931		__sk_add_backlog(child, skb);
932	}
933
934	bh_unlock_sock(child);
935	sock_put(child);
936	return reason;
937}
938EXPORT_SYMBOL(tcp_child_process);