Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Implementation of the Transmission Control Protocol(TCP).
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 13 *		Florian La Roche, <flla@stud.uni-sb.de>
 14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19 *		Jorge Cwik, <jorge@laser.satlink.net>
 20 */
 21
 22#include <net/tcp.h>
 23#include <net/xfrm.h>
 24#include <net/busy_poll.h>
 25
 26static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 27{
 28	if (seq == s_win)
 29		return true;
 30	if (after(end_seq, s_win) && before(seq, e_win))
 31		return true;
 32	return seq == e_win && seq == end_seq;
 33}
 34
 35static enum tcp_tw_status
 36tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
 37				  const struct sk_buff *skb, int mib_idx)
 38{
 39	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 40
 41	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
 42				  &tcptw->tw_last_oow_ack_time)) {
 43		/* Send ACK. Note, we do not put the bucket,
 44		 * it will be released by caller.
 45		 */
 46		return TCP_TW_ACK;
 47	}
 48
 49	/* We are rate-limiting, so just release the tw sock and drop skb. */
 50	inet_twsk_put(tw);
 51	return TCP_TW_SUCCESS;
 52}
 53
 
 
 
 
 
 
 
 
 
 
 
 
 54/*
 55 * * Main purpose of TIME-WAIT state is to close connection gracefully,
 56 *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
 57 *   (and, probably, tail of data) and one or more our ACKs are lost.
 58 * * What is TIME-WAIT timeout? It is associated with maximal packet
 59 *   lifetime in the internet, which results in wrong conclusion, that
 60 *   it is set to catch "old duplicate segments" wandering out of their path.
 61 *   It is not quite correct. This timeout is calculated so that it exceeds
 62 *   maximal retransmission timeout enough to allow to lose one (or more)
 63 *   segments sent by peer and our ACKs. This time may be calculated from RTO.
 64 * * When TIME-WAIT socket receives RST, it means that another end
 65 *   finally closed and we are allowed to kill TIME-WAIT too.
 66 * * Second purpose of TIME-WAIT is catching old duplicate segments.
 67 *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
 68 *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
 69 * * If we invented some more clever way to catch duplicates
 70 *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
 71 *
 72 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
 73 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
 74 * from the very beginning.
 75 *
 76 * NOTE. With recycling (and later with fin-wait-2) TW bucket
 77 * is _not_ stateless. It means, that strictly speaking we must
 78 * spinlock it. I do not want! Well, probability of misbehaviour
 79 * is ridiculously low and, seems, we could use some mb() tricks
 80 * to avoid misread sequence numbers, states etc.  --ANK
 81 *
 82 * We don't need to initialize tmp_out.sack_ok as we don't use the results
 83 */
 84enum tcp_tw_status
 85tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
 86			   const struct tcphdr *th)
 87{
 88	struct tcp_options_received tmp_opt;
 89	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 90	bool paws_reject = false;
 91
 92	tmp_opt.saw_tstamp = 0;
 93	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
 94		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
 95
 96		if (tmp_opt.saw_tstamp) {
 97			if (tmp_opt.rcv_tsecr)
 98				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
 99			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
100			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
101			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
102		}
103	}
104
105	if (tw->tw_substate == TCP_FIN_WAIT2) {
106		/* Just repeat all the checks of tcp_rcv_state_process() */
107
108		/* Out of window, send ACK */
109		if (paws_reject ||
110		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
111				   tcptw->tw_rcv_nxt,
112				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
113			return tcp_timewait_check_oow_rate_limit(
114				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
115
116		if (th->rst)
117			goto kill;
118
119		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
120			return TCP_TW_RST;
121
122		/* Dup ACK? */
123		if (!th->ack ||
124		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
125		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
126			inet_twsk_put(tw);
127			return TCP_TW_SUCCESS;
128		}
129
130		/* New data or FIN. If new data arrive after half-duplex close,
131		 * reset.
132		 */
133		if (!th->fin ||
134		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
135			return TCP_TW_RST;
136
137		/* FIN arrived, enter true time-wait state. */
138		tw->tw_substate	  = TCP_TIME_WAIT;
139		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
 
140		if (tmp_opt.saw_tstamp) {
141			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
142			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
143		}
144
145		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
146		return TCP_TW_ACK;
147	}
148
149	/*
150	 *	Now real TIME-WAIT state.
151	 *
152	 *	RFC 1122:
153	 *	"When a connection is [...] on TIME-WAIT state [...]
154	 *	[a TCP] MAY accept a new SYN from the remote TCP to
155	 *	reopen the connection directly, if it:
156	 *
157	 *	(1)  assigns its initial sequence number for the new
158	 *	connection to be larger than the largest sequence
159	 *	number it used on the previous connection incarnation,
160	 *	and
161	 *
162	 *	(2)  returns to TIME-WAIT state if the SYN turns out
163	 *	to be an old duplicate".
164	 */
165
166	if (!paws_reject &&
167	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
168	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
169		/* In window segment, it may be only reset or bare ack. */
170
171		if (th->rst) {
172			/* This is TIME_WAIT assassination, in two flavors.
173			 * Oh well... nobody has a sufficient solution to this
174			 * protocol bug yet.
175			 */
176			if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
177kill:
178				inet_twsk_deschedule_put(tw);
179				return TCP_TW_SUCCESS;
180			}
181		} else {
182			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
183		}
184
185		if (tmp_opt.saw_tstamp) {
186			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
187			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
188		}
189
190		inet_twsk_put(tw);
191		return TCP_TW_SUCCESS;
192	}
193
194	/* Out of window segment.
195
196	   All the segments are ACKed immediately.
197
198	   The only exception is new SYN. We accept it, if it is
199	   not old duplicate and we are not in danger to be killed
200	   by delayed old duplicates. RFC check is that it has
201	   newer sequence number works at rates <40Mbit/sec.
202	   However, if paws works, it is reliable AND even more,
203	   we even may relax silly seq space cutoff.
204
205	   RED-PEN: we violate main RFC requirement, if this SYN will appear
206	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
207	   we must return socket to time-wait state. It is not good,
208	   but not fatal yet.
209	 */
210
211	if (th->syn && !th->rst && !th->ack && !paws_reject &&
212	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
213	     (tmp_opt.saw_tstamp &&
214	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
215		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
216		if (isn == 0)
217			isn++;
218		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
219		return TCP_TW_SYN;
220	}
221
222	if (paws_reject)
223		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
224
225	if (!th->rst) {
226		/* In this case we must reset the TIMEWAIT timer.
227		 *
228		 * If it is ACKless SYN it may be both old duplicate
229		 * and new good SYN with random sequence number <rcv_nxt.
230		 * Do not reschedule in the last case.
231		 */
232		if (paws_reject || th->ack)
233			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
234
235		return tcp_timewait_check_oow_rate_limit(
236			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
237	}
238	inet_twsk_put(tw);
239	return TCP_TW_SUCCESS;
240}
241EXPORT_SYMBOL(tcp_timewait_state_process);
242
243static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
244{
245#ifdef CONFIG_TCP_MD5SIG
246	const struct tcp_sock *tp = tcp_sk(sk);
247	struct tcp_md5sig_key *key;
248
249	/*
250	 * The timewait bucket does not have the key DB from the
251	 * sock structure. We just make a quick copy of the
252	 * md5 key being used (if indeed we are using one)
253	 * so the timewait ack generating code has the key.
254	 */
255	tcptw->tw_md5_key = NULL;
256	if (!static_branch_unlikely(&tcp_md5_needed.key))
257		return;
258
259	key = tp->af_specific->md5_lookup(sk, sk);
260	if (key) {
261		tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
262		if (!tcptw->tw_md5_key)
263			return;
264		if (!tcp_alloc_md5sig_pool())
265			goto out_free;
266		if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
267			goto out_free;
 
268	}
269	return;
270out_free:
271	WARN_ON_ONCE(1);
272	kfree(tcptw->tw_md5_key);
273	tcptw->tw_md5_key = NULL;
274#endif
275}
276
277/*
278 * Move a socket to time-wait or dead fin-wait-2 state.
279 */
280void tcp_time_wait(struct sock *sk, int state, int timeo)
281{
282	const struct inet_connection_sock *icsk = inet_csk(sk);
283	const struct tcp_sock *tp = tcp_sk(sk);
284	struct net *net = sock_net(sk);
285	struct inet_timewait_sock *tw;
286
287	tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
288
289	if (tw) {
290		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
291		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
292		struct inet_sock *inet = inet_sk(sk);
293
294		tw->tw_transparent	= inet->transparent;
295		tw->tw_mark		= sk->sk_mark;
296		tw->tw_priority		= sk->sk_priority;
297		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
298		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
299		tcptw->tw_snd_nxt	= tp->snd_nxt;
300		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
301		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
302		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
303		tcptw->tw_ts_offset	= tp->tsoffset;
 
304		tcptw->tw_last_oow_ack_time = 0;
305		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
 
306#if IS_ENABLED(CONFIG_IPV6)
307		if (tw->tw_family == PF_INET6) {
308			struct ipv6_pinfo *np = inet6_sk(sk);
309
310			tw->tw_v6_daddr = sk->sk_v6_daddr;
311			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
312			tw->tw_tclass = np->tclass;
313			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
314			tw->tw_txhash = sk->sk_txhash;
315			tw->tw_ipv6only = sk->sk_ipv6only;
316		}
317#endif
318
319		tcp_time_wait_init(sk, tcptw);
 
320
321		/* Get the TIME_WAIT timeout firing. */
322		if (timeo < rto)
323			timeo = rto;
324
325		if (state == TCP_TIME_WAIT)
326			timeo = TCP_TIMEWAIT_LEN;
327
328		/* tw_timer is pinned, so we need to make sure BH are disabled
329		 * in following section, otherwise timer handler could run before
330		 * we complete the initialization.
331		 */
332		local_bh_disable();
333		inet_twsk_schedule(tw, timeo);
334		/* Linkage updates.
335		 * Note that access to tw after this point is illegal.
336		 */
337		inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
338		local_bh_enable();
339	} else {
340		/* Sorry, if we're out of memory, just CLOSE this
341		 * socket up.  We've got bigger problems than
342		 * non-graceful socket closings.
343		 */
344		NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
345	}
346
347	tcp_update_metrics(sk);
348	tcp_done(sk);
349}
350EXPORT_SYMBOL(tcp_time_wait);
351
 
 
 
 
 
 
 
 
 
 
 
 
352void tcp_twsk_destructor(struct sock *sk)
353{
354#ifdef CONFIG_TCP_MD5SIG
355	if (static_branch_unlikely(&tcp_md5_needed.key)) {
356		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
357
358		if (twsk->tw_md5_key) {
359			kfree_rcu(twsk->tw_md5_key, rcu);
360			static_branch_slow_dec_deferred(&tcp_md5_needed);
361		}
362	}
363#endif
 
364}
365EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
366
367void tcp_twsk_purge(struct list_head *net_exit_list, int family)
368{
369	bool purged_once = false;
370	struct net *net;
371
372	list_for_each_entry(net, net_exit_list, exit_list) {
373		if (net->ipv4.tcp_death_row.hashinfo->pernet) {
374			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
375			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
376		} else if (!purged_once) {
377			/* The last refcount is decremented in tcp_sk_exit_batch() */
378			if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
379				continue;
380
381			inet_twsk_purge(&tcp_hashinfo, family);
382			purged_once = true;
383		}
384	}
385}
386EXPORT_SYMBOL_GPL(tcp_twsk_purge);
387
388/* Warning : This function is called without sk_listener being locked.
389 * Be sure to read socket fields once, as their value could change under us.
390 */
391void tcp_openreq_init_rwin(struct request_sock *req,
392			   const struct sock *sk_listener,
393			   const struct dst_entry *dst)
394{
395	struct inet_request_sock *ireq = inet_rsk(req);
396	const struct tcp_sock *tp = tcp_sk(sk_listener);
397	int full_space = tcp_full_space(sk_listener);
398	u32 window_clamp;
399	__u8 rcv_wscale;
400	u32 rcv_wnd;
401	int mss;
402
403	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
404	window_clamp = READ_ONCE(tp->window_clamp);
405	/* Set this up on the first call only */
406	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
407
408	/* limit the window selection if the user enforce a smaller rx buffer */
409	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
410	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
411		req->rsk_window_clamp = full_space;
412
413	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
414	if (rcv_wnd == 0)
415		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
416	else if (full_space < rcv_wnd * mss)
417		full_space = rcv_wnd * mss;
418
419	/* tcp_full_space because it is guaranteed to be the first packet */
420	tcp_select_initial_window(sk_listener, full_space,
421		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
422		&req->rsk_rcv_wnd,
423		&req->rsk_window_clamp,
424		ireq->wscale_ok,
425		&rcv_wscale,
426		rcv_wnd);
427	ireq->rcv_wscale = rcv_wscale;
428}
429EXPORT_SYMBOL(tcp_openreq_init_rwin);
430
431static void tcp_ecn_openreq_child(struct tcp_sock *tp,
432				  const struct request_sock *req)
433{
434	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
435}
436
437void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
438{
439	struct inet_connection_sock *icsk = inet_csk(sk);
440	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
441	bool ca_got_dst = false;
442
443	if (ca_key != TCP_CA_UNSPEC) {
444		const struct tcp_congestion_ops *ca;
445
446		rcu_read_lock();
447		ca = tcp_ca_find_key(ca_key);
448		if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
449			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
450			icsk->icsk_ca_ops = ca;
451			ca_got_dst = true;
452		}
453		rcu_read_unlock();
454	}
455
456	/* If no valid choice made yet, assign current system default ca. */
457	if (!ca_got_dst &&
458	    (!icsk->icsk_ca_setsockopt ||
459	     !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
460		tcp_assign_congestion_control(sk);
461
462	tcp_set_ca_state(sk, TCP_CA_Open);
463}
464EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
465
466static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
467				    struct request_sock *req,
468				    struct tcp_sock *newtp)
469{
470#if IS_ENABLED(CONFIG_SMC)
471	struct inet_request_sock *ireq;
472
473	if (static_branch_unlikely(&tcp_have_smc)) {
474		ireq = inet_rsk(req);
475		if (oldtp->syn_smc && !ireq->smc_ok)
476			newtp->syn_smc = 0;
477	}
478#endif
479}
480
481/* This is not only more efficient than what we used to do, it eliminates
482 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
483 *
484 * Actually, we could lots of memory writes here. tp of listening
485 * socket contains all necessary default parameters.
486 */
487struct sock *tcp_create_openreq_child(const struct sock *sk,
488				      struct request_sock *req,
489				      struct sk_buff *skb)
490{
491	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
492	const struct inet_request_sock *ireq = inet_rsk(req);
493	struct tcp_request_sock *treq = tcp_rsk(req);
494	struct inet_connection_sock *newicsk;
495	struct tcp_sock *oldtp, *newtp;
 
496	u32 seq;
 
 
 
497
498	if (!newsk)
499		return NULL;
500
501	newicsk = inet_csk(newsk);
502	newtp = tcp_sk(newsk);
503	oldtp = tcp_sk(sk);
504
505	smc_check_reset_syn_req(oldtp, req, newtp);
506
507	/* Now setup tcp_sock */
508	newtp->pred_flags = 0;
509
510	seq = treq->rcv_isn + 1;
511	newtp->rcv_wup = seq;
512	WRITE_ONCE(newtp->copied_seq, seq);
513	WRITE_ONCE(newtp->rcv_nxt, seq);
514	newtp->segs_in = 1;
515
516	seq = treq->snt_isn + 1;
517	newtp->snd_sml = newtp->snd_una = seq;
518	WRITE_ONCE(newtp->snd_nxt, seq);
519	newtp->snd_up = seq;
520
521	INIT_LIST_HEAD(&newtp->tsq_node);
522	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
523
524	tcp_init_wl(newtp, treq->rcv_isn);
525
526	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
527	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
528
529	newtp->lsndtime = tcp_jiffies32;
530	newsk->sk_txhash = treq->txhash;
531	newtp->total_retrans = req->num_retrans;
532
533	tcp_init_xmit_timers(newsk);
534	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
535
536	if (sock_flag(newsk, SOCK_KEEPOPEN))
537		inet_csk_reset_keepalive_timer(newsk,
538					       keepalive_time_when(newtp));
539
540	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
541	newtp->rx_opt.sack_ok = ireq->sack_ok;
542	newtp->window_clamp = req->rsk_window_clamp;
543	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
544	newtp->rcv_wnd = req->rsk_rcv_wnd;
545	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
546	if (newtp->rx_opt.wscale_ok) {
547		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
548		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
549	} else {
550		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
551		newtp->window_clamp = min(newtp->window_clamp, 65535U);
552	}
553	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
554	newtp->max_window = newtp->snd_wnd;
555
556	if (newtp->rx_opt.tstamp_ok) {
557		newtp->rx_opt.ts_recent = req->ts_recent;
 
558		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
559		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
560	} else {
 
561		newtp->rx_opt.ts_recent_stamp = 0;
562		newtp->tcp_header_len = sizeof(struct tcphdr);
563	}
564	if (req->num_timeout) {
 
565		newtp->undo_marker = treq->snt_isn;
566		newtp->retrans_stamp = div_u64(treq->snt_synack,
567					       USEC_PER_SEC / TCP_TS_HZ);
 
 
 
 
 
 
 
 
 
568	}
569	newtp->tsoffset = treq->ts_off;
570#ifdef CONFIG_TCP_MD5SIG
571	newtp->md5sig_info = NULL;	/*XXX*/
572	if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req)))
573		newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
574#endif
 
 
 
 
 
 
 
575	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
576		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
577	newtp->rx_opt.mss_clamp = req->mss;
578	tcp_ecn_openreq_child(newtp, req);
579	newtp->fastopen_req = NULL;
580	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
581
582	newtp->bpf_chg_cc_inprogress = 0;
583	tcp_bpf_clone(sk, newsk);
584
585	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
586
587	return newsk;
588}
589EXPORT_SYMBOL(tcp_create_openreq_child);
590
591/*
592 * Process an incoming packet for SYN_RECV sockets represented as a
593 * request_sock. Normally sk is the listener socket but for TFO it
594 * points to the child socket.
595 *
596 * XXX (TFO) - The current impl contains a special check for ack
597 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
598 *
599 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
 
 
 
600 */
601
602struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
603			   struct request_sock *req,
604			   bool fastopen, bool *req_stolen)
605{
606	struct tcp_options_received tmp_opt;
607	struct sock *child;
608	const struct tcphdr *th = tcp_hdr(skb);
609	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
610	bool paws_reject = false;
611	bool own_req;
612
613	tmp_opt.saw_tstamp = 0;
614	if (th->doff > (sizeof(struct tcphdr)>>2)) {
615		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
616
617		if (tmp_opt.saw_tstamp) {
618			tmp_opt.ts_recent = req->ts_recent;
619			if (tmp_opt.rcv_tsecr)
620				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
621			/* We do not store true stamp, but it is not required,
622			 * it can be estimated (approximately)
623			 * from another data.
624			 */
625			tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
626			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
627		}
628	}
629
630	/* Check for pure retransmitted SYN. */
631	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
632	    flg == TCP_FLAG_SYN &&
633	    !paws_reject) {
634		/*
635		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
636		 * this case on figure 6 and figure 8, but formal
637		 * protocol description says NOTHING.
638		 * To be more exact, it says that we should send ACK,
639		 * because this segment (at least, if it has no data)
640		 * is out of window.
641		 *
642		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
643		 *  describe SYN-RECV state. All the description
644		 *  is wrong, we cannot believe to it and should
645		 *  rely only on common sense and implementation
646		 *  experience.
647		 *
648		 * Enforce "SYN-ACK" according to figure 8, figure 6
649		 * of RFC793, fixed by RFC1122.
650		 *
651		 * Note that even if there is new data in the SYN packet
652		 * they will be thrown away too.
653		 *
654		 * Reset timer after retransmitting SYNACK, similar to
655		 * the idea of fast retransmit in recovery.
656		 */
657		if (!tcp_oow_rate_limited(sock_net(sk), skb,
658					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
659					  &tcp_rsk(req)->last_oow_ack_time) &&
660
661		    !inet_rtx_syn_ack(sk, req)) {
662			unsigned long expires = jiffies;
663
664			expires += reqsk_timeout(req, TCP_RTO_MAX);
665			if (!fastopen)
666				mod_timer_pending(&req->rsk_timer, expires);
667			else
668				req->rsk_timer.expires = expires;
669		}
670		return NULL;
671	}
672
673	/* Further reproduces section "SEGMENT ARRIVES"
674	   for state SYN-RECEIVED of RFC793.
675	   It is broken, however, it does not work only
676	   when SYNs are crossed.
677
678	   You would think that SYN crossing is impossible here, since
679	   we should have a SYN_SENT socket (from connect()) on our end,
680	   but this is not true if the crossed SYNs were sent to both
681	   ends by a malicious third party.  We must defend against this,
682	   and to do that we first verify the ACK (as per RFC793, page
683	   36) and reset if it is invalid.  Is this a true full defense?
684	   To convince ourselves, let us consider a way in which the ACK
685	   test can still pass in this 'malicious crossed SYNs' case.
686	   Malicious sender sends identical SYNs (and thus identical sequence
687	   numbers) to both A and B:
688
689		A: gets SYN, seq=7
690		B: gets SYN, seq=7
691
692	   By our good fortune, both A and B select the same initial
693	   send sequence number of seven :-)
694
695		A: sends SYN|ACK, seq=7, ack_seq=8
696		B: sends SYN|ACK, seq=7, ack_seq=8
697
698	   So we are now A eating this SYN|ACK, ACK test passes.  So
699	   does sequence test, SYN is truncated, and thus we consider
700	   it a bare ACK.
701
702	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
703	   bare ACK.  Otherwise, we create an established connection.  Both
704	   ends (listening sockets) accept the new incoming connection and try
705	   to talk to each other. 8-)
706
707	   Note: This case is both harmless, and rare.  Possibility is about the
708	   same as us discovering intelligent life on another plant tomorrow.
709
710	   But generally, we should (RFC lies!) to accept ACK
711	   from SYNACK both here and in tcp_rcv_state_process().
712	   tcp_rcv_state_process() does not, hence, we do not too.
713
714	   Note that the case is absolutely generic:
715	   we cannot optimize anything here without
716	   violating protocol. All the checks must be made
717	   before attempt to create socket.
718	 */
719
720	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
721	 *                  and the incoming segment acknowledges something not yet
722	 *                  sent (the segment carries an unacceptable ACK) ...
723	 *                  a reset is sent."
724	 *
725	 * Invalid ACK: reset will be sent by listening socket.
726	 * Note that the ACK validity check for a Fast Open socket is done
727	 * elsewhere and is checked directly against the child socket rather
728	 * than req because user data may have been sent out.
729	 */
730	if ((flg & TCP_FLAG_ACK) && !fastopen &&
731	    (TCP_SKB_CB(skb)->ack_seq !=
732	     tcp_rsk(req)->snt_isn + 1))
733		return sk;
734
735	/* Also, it would be not so bad idea to check rcv_tsecr, which
736	 * is essentially ACK extension and too early or too late values
737	 * should cause reset in unsynchronized states.
738	 */
739
740	/* RFC793: "first check sequence number". */
741
742	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
743					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
744		/* Out of window: send ACK and drop. */
745		if (!(flg & TCP_FLAG_RST) &&
746		    !tcp_oow_rate_limited(sock_net(sk), skb,
747					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
748					  &tcp_rsk(req)->last_oow_ack_time))
749			req->rsk_ops->send_ack(sk, skb, req);
750		if (paws_reject)
751			__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
752		return NULL;
753	}
754
755	/* In sequence, PAWS is OK. */
756
 
 
 
757	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
758		req->ts_recent = tmp_opt.rcv_tsval;
759
760	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
761		/* Truncate SYN, it is out of window starting
762		   at tcp_rsk(req)->rcv_isn + 1. */
763		flg &= ~TCP_FLAG_SYN;
764	}
765
766	/* RFC793: "second check the RST bit" and
767	 *	   "fourth, check the SYN bit"
768	 */
769	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
770		__TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
771		goto embryonic_reset;
772	}
773
774	/* ACK sequence verified above, just make sure ACK is
775	 * set.  If ACK not set, just silently drop the packet.
776	 *
777	 * XXX (TFO) - if we ever allow "data after SYN", the
778	 * following check needs to be removed.
779	 */
780	if (!(flg & TCP_FLAG_ACK))
781		return NULL;
782
783	/* For Fast Open no more processing is needed (sk is the
784	 * child socket).
785	 */
786	if (fastopen)
787		return sk;
788
789	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
790	if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
791	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
792		inet_rsk(req)->acked = 1;
793		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
794		return NULL;
795	}
796
797	/* OK, ACK is valid, create big socket and
798	 * feed this segment to it. It will repeat all
799	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
800	 * ESTABLISHED STATE. If it will be dropped after
801	 * socket is created, wait for troubles.
802	 */
803	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
804							 req, &own_req);
805	if (!child)
806		goto listen_overflow;
807
808	if (own_req && rsk_drop_req(req)) {
809		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
810		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
811		return child;
812	}
813
814	sock_rps_save_rxhash(child, skb);
815	tcp_synack_rtt_meas(child, req);
816	*req_stolen = !own_req;
817	return inet_csk_complete_hashdance(sk, child, req, own_req);
818
819listen_overflow:
820	if (sk != req->rsk_listener)
821		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
822
823	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
824		inet_rsk(req)->acked = 1;
825		return NULL;
826	}
827
828embryonic_reset:
829	if (!(flg & TCP_FLAG_RST)) {
830		/* Received a bad SYN pkt - for TFO We try not to reset
831		 * the local connection unless it's really necessary to
832		 * avoid becoming vulnerable to outside attack aiming at
833		 * resetting legit local connections.
834		 */
835		req->rsk_ops->send_reset(sk, skb);
836	} else if (fastopen) { /* received a valid RST pkt */
837		reqsk_fastopen_remove(sk, req, true);
838		tcp_reset(sk, skb);
839	}
840	if (!fastopen) {
841		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
842
843		if (unlinked)
844			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
845		*req_stolen = !unlinked;
846	}
847	return NULL;
848}
849EXPORT_SYMBOL(tcp_check_req);
850
851/*
852 * Queue segment on the new socket if the new socket is active,
853 * otherwise we just shortcircuit this and continue with
854 * the new socket.
855 *
856 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
857 * when entering. But other states are possible due to a race condition
858 * where after __inet_lookup_established() fails but before the listener
859 * locked is obtained, other packets cause the same connection to
860 * be created.
861 */
862
863int tcp_child_process(struct sock *parent, struct sock *child,
864		      struct sk_buff *skb)
865	__releases(&((child)->sk_lock.slock))
866{
867	int ret = 0;
868	int state = child->sk_state;
869
870	/* record sk_napi_id and sk_rx_queue_mapping of child. */
871	sk_mark_napi_id_set(child, skb);
872
873	tcp_segs_in(tcp_sk(child), skb);
874	if (!sock_owned_by_user(child)) {
875		ret = tcp_rcv_state_process(child, skb);
876		/* Wakeup parent, send SIGIO */
877		if (state == TCP_SYN_RECV && child->sk_state != state)
878			parent->sk_data_ready(parent);
879	} else {
880		/* Alas, it is possible again, because we do lookup
881		 * in main socket hash table and lock on listening
882		 * socket does not protect us more.
883		 */
884		__sk_add_backlog(child, skb);
885	}
886
887	bh_unlock_sock(child);
888	sock_put(child);
889	return ret;
890}
891EXPORT_SYMBOL(tcp_child_process);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Implementation of the Transmission Control Protocol(TCP).
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 13 *		Florian La Roche, <flla@stud.uni-sb.de>
 14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19 *		Jorge Cwik, <jorge@laser.satlink.net>
 20 */
 21
 22#include <net/tcp.h>
 23#include <net/xfrm.h>
 24#include <net/busy_poll.h>
 25
 26static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 27{
 28	if (seq == s_win)
 29		return true;
 30	if (after(end_seq, s_win) && before(seq, e_win))
 31		return true;
 32	return seq == e_win && seq == end_seq;
 33}
 34
 35static enum tcp_tw_status
 36tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
 37				  const struct sk_buff *skb, int mib_idx)
 38{
 39	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 40
 41	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
 42				  &tcptw->tw_last_oow_ack_time)) {
 43		/* Send ACK. Note, we do not put the bucket,
 44		 * it will be released by caller.
 45		 */
 46		return TCP_TW_ACK;
 47	}
 48
 49	/* We are rate-limiting, so just release the tw sock and drop skb. */
 50	inet_twsk_put(tw);
 51	return TCP_TW_SUCCESS;
 52}
 53
 54static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq)
 55{
 56#ifdef CONFIG_TCP_AO
 57	struct tcp_ao_info *ao;
 58
 59	ao = rcu_dereference(tcptw->ao_info);
 60	if (unlikely(ao && seq < tcptw->tw_rcv_nxt))
 61		WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
 62#endif
 63	tcptw->tw_rcv_nxt = seq;
 64}
 65
 66/*
 67 * * Main purpose of TIME-WAIT state is to close connection gracefully,
 68 *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
 69 *   (and, probably, tail of data) and one or more our ACKs are lost.
 70 * * What is TIME-WAIT timeout? It is associated with maximal packet
 71 *   lifetime in the internet, which results in wrong conclusion, that
 72 *   it is set to catch "old duplicate segments" wandering out of their path.
 73 *   It is not quite correct. This timeout is calculated so that it exceeds
 74 *   maximal retransmission timeout enough to allow to lose one (or more)
 75 *   segments sent by peer and our ACKs. This time may be calculated from RTO.
 76 * * When TIME-WAIT socket receives RST, it means that another end
 77 *   finally closed and we are allowed to kill TIME-WAIT too.
 78 * * Second purpose of TIME-WAIT is catching old duplicate segments.
 79 *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
 80 *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
 81 * * If we invented some more clever way to catch duplicates
 82 *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
 83 *
 84 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
 85 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
 86 * from the very beginning.
 87 *
 88 * NOTE. With recycling (and later with fin-wait-2) TW bucket
 89 * is _not_ stateless. It means, that strictly speaking we must
 90 * spinlock it. I do not want! Well, probability of misbehaviour
 91 * is ridiculously low and, seems, we could use some mb() tricks
 92 * to avoid misread sequence numbers, states etc.  --ANK
 93 *
 94 * We don't need to initialize tmp_out.sack_ok as we don't use the results
 95 */
 96enum tcp_tw_status
 97tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
 98			   const struct tcphdr *th)
 99{
100	struct tcp_options_received tmp_opt;
101	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
102	bool paws_reject = false;
103
104	tmp_opt.saw_tstamp = 0;
105	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
106		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
107
108		if (tmp_opt.saw_tstamp) {
109			if (tmp_opt.rcv_tsecr)
110				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
111			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
112			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
113			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
114		}
115	}
116
117	if (tw->tw_substate == TCP_FIN_WAIT2) {
118		/* Just repeat all the checks of tcp_rcv_state_process() */
119
120		/* Out of window, send ACK */
121		if (paws_reject ||
122		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
123				   tcptw->tw_rcv_nxt,
124				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
125			return tcp_timewait_check_oow_rate_limit(
126				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
127
128		if (th->rst)
129			goto kill;
130
131		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
132			return TCP_TW_RST;
133
134		/* Dup ACK? */
135		if (!th->ack ||
136		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
137		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
138			inet_twsk_put(tw);
139			return TCP_TW_SUCCESS;
140		}
141
142		/* New data or FIN. If new data arrive after half-duplex close,
143		 * reset.
144		 */
145		if (!th->fin ||
146		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
147			return TCP_TW_RST;
148
149		/* FIN arrived, enter true time-wait state. */
150		tw->tw_substate	  = TCP_TIME_WAIT;
151		twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq);
152
153		if (tmp_opt.saw_tstamp) {
154			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
155			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
156		}
157
158		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
159		return TCP_TW_ACK;
160	}
161
162	/*
163	 *	Now real TIME-WAIT state.
164	 *
165	 *	RFC 1122:
166	 *	"When a connection is [...] on TIME-WAIT state [...]
167	 *	[a TCP] MAY accept a new SYN from the remote TCP to
168	 *	reopen the connection directly, if it:
169	 *
170	 *	(1)  assigns its initial sequence number for the new
171	 *	connection to be larger than the largest sequence
172	 *	number it used on the previous connection incarnation,
173	 *	and
174	 *
175	 *	(2)  returns to TIME-WAIT state if the SYN turns out
176	 *	to be an old duplicate".
177	 */
178
179	if (!paws_reject &&
180	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
181	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
182		/* In window segment, it may be only reset or bare ack. */
183
184		if (th->rst) {
185			/* This is TIME_WAIT assassination, in two flavors.
186			 * Oh well... nobody has a sufficient solution to this
187			 * protocol bug yet.
188			 */
189			if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
190kill:
191				inet_twsk_deschedule_put(tw);
192				return TCP_TW_SUCCESS;
193			}
194		} else {
195			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
196		}
197
198		if (tmp_opt.saw_tstamp) {
199			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
200			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
201		}
202
203		inet_twsk_put(tw);
204		return TCP_TW_SUCCESS;
205	}
206
207	/* Out of window segment.
208
209	   All the segments are ACKed immediately.
210
211	   The only exception is new SYN. We accept it, if it is
212	   not old duplicate and we are not in danger to be killed
213	   by delayed old duplicates. RFC check is that it has
214	   newer sequence number works at rates <40Mbit/sec.
215	   However, if paws works, it is reliable AND even more,
216	   we even may relax silly seq space cutoff.
217
218	   RED-PEN: we violate main RFC requirement, if this SYN will appear
219	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
220	   we must return socket to time-wait state. It is not good,
221	   but not fatal yet.
222	 */
223
224	if (th->syn && !th->rst && !th->ack && !paws_reject &&
225	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
226	     (tmp_opt.saw_tstamp &&
227	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
228		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
229		if (isn == 0)
230			isn++;
231		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
232		return TCP_TW_SYN;
233	}
234
235	if (paws_reject)
236		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
237
238	if (!th->rst) {
239		/* In this case we must reset the TIMEWAIT timer.
240		 *
241		 * If it is ACKless SYN it may be both old duplicate
242		 * and new good SYN with random sequence number <rcv_nxt.
243		 * Do not reschedule in the last case.
244		 */
245		if (paws_reject || th->ack)
246			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
247
248		return tcp_timewait_check_oow_rate_limit(
249			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
250	}
251	inet_twsk_put(tw);
252	return TCP_TW_SUCCESS;
253}
254EXPORT_SYMBOL(tcp_timewait_state_process);
255
256static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
257{
258#ifdef CONFIG_TCP_MD5SIG
259	const struct tcp_sock *tp = tcp_sk(sk);
260	struct tcp_md5sig_key *key;
261
262	/*
263	 * The timewait bucket does not have the key DB from the
264	 * sock structure. We just make a quick copy of the
265	 * md5 key being used (if indeed we are using one)
266	 * so the timewait ack generating code has the key.
267	 */
268	tcptw->tw_md5_key = NULL;
269	if (!static_branch_unlikely(&tcp_md5_needed.key))
270		return;
271
272	key = tp->af_specific->md5_lookup(sk, sk);
273	if (key) {
274		tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
275		if (!tcptw->tw_md5_key)
276			return;
 
 
277		if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
278			goto out_free;
279		tcp_md5_add_sigpool();
280	}
281	return;
282out_free:
283	WARN_ON_ONCE(1);
284	kfree(tcptw->tw_md5_key);
285	tcptw->tw_md5_key = NULL;
286#endif
287}
288
289/*
290 * Move a socket to time-wait or dead fin-wait-2 state.
291 */
292void tcp_time_wait(struct sock *sk, int state, int timeo)
293{
294	const struct inet_connection_sock *icsk = inet_csk(sk);
295	struct tcp_sock *tp = tcp_sk(sk);
296	struct net *net = sock_net(sk);
297	struct inet_timewait_sock *tw;
298
299	tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
300
301	if (tw) {
302		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
303		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
 
304
305		tw->tw_transparent	= inet_test_bit(TRANSPARENT, sk);
306		tw->tw_mark		= sk->sk_mark;
307		tw->tw_priority		= READ_ONCE(sk->sk_priority);
308		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
309		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
310		tcptw->tw_snd_nxt	= tp->snd_nxt;
311		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
312		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
313		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
314		tcptw->tw_ts_offset	= tp->tsoffset;
315		tw->tw_usec_ts		= tp->tcp_usec_ts;
316		tcptw->tw_last_oow_ack_time = 0;
317		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
318		tw->tw_txhash		= sk->sk_txhash;
319#if IS_ENABLED(CONFIG_IPV6)
320		if (tw->tw_family == PF_INET6) {
321			struct ipv6_pinfo *np = inet6_sk(sk);
322
323			tw->tw_v6_daddr = sk->sk_v6_daddr;
324			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
325			tw->tw_tclass = np->tclass;
326			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
 
327			tw->tw_ipv6only = sk->sk_ipv6only;
328		}
329#endif
330
331		tcp_time_wait_init(sk, tcptw);
332		tcp_ao_time_wait(tcptw, tp);
333
334		/* Get the TIME_WAIT timeout firing. */
335		if (timeo < rto)
336			timeo = rto;
337
338		if (state == TCP_TIME_WAIT)
339			timeo = TCP_TIMEWAIT_LEN;
340
341		/* tw_timer is pinned, so we need to make sure BH are disabled
342		 * in following section, otherwise timer handler could run before
343		 * we complete the initialization.
344		 */
345		local_bh_disable();
346		inet_twsk_schedule(tw, timeo);
347		/* Linkage updates.
348		 * Note that access to tw after this point is illegal.
349		 */
350		inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
351		local_bh_enable();
352	} else {
353		/* Sorry, if we're out of memory, just CLOSE this
354		 * socket up.  We've got bigger problems than
355		 * non-graceful socket closings.
356		 */
357		NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
358	}
359
360	tcp_update_metrics(sk);
361	tcp_done(sk);
362}
363EXPORT_SYMBOL(tcp_time_wait);
364
365#ifdef CONFIG_TCP_MD5SIG
366static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
367{
368	struct tcp_md5sig_key *key;
369
370	key = container_of(head, struct tcp_md5sig_key, rcu);
371	kfree(key);
372	static_branch_slow_dec_deferred(&tcp_md5_needed);
373	tcp_md5_release_sigpool();
374}
375#endif
376
377void tcp_twsk_destructor(struct sock *sk)
378{
379#ifdef CONFIG_TCP_MD5SIG
380	if (static_branch_unlikely(&tcp_md5_needed.key)) {
381		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
382
383		if (twsk->tw_md5_key)
384			call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
 
 
385	}
386#endif
387	tcp_ao_destroy_sock(sk, true);
388}
389EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
390
391void tcp_twsk_purge(struct list_head *net_exit_list, int family)
392{
393	bool purged_once = false;
394	struct net *net;
395
396	list_for_each_entry(net, net_exit_list, exit_list) {
397		if (net->ipv4.tcp_death_row.hashinfo->pernet) {
398			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
399			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
400		} else if (!purged_once) {
401			/* The last refcount is decremented in tcp_sk_exit_batch() */
402			if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
403				continue;
404
405			inet_twsk_purge(&tcp_hashinfo, family);
406			purged_once = true;
407		}
408	}
409}
410EXPORT_SYMBOL_GPL(tcp_twsk_purge);
411
412/* Warning : This function is called without sk_listener being locked.
413 * Be sure to read socket fields once, as their value could change under us.
414 */
415void tcp_openreq_init_rwin(struct request_sock *req,
416			   const struct sock *sk_listener,
417			   const struct dst_entry *dst)
418{
419	struct inet_request_sock *ireq = inet_rsk(req);
420	const struct tcp_sock *tp = tcp_sk(sk_listener);
421	int full_space = tcp_full_space(sk_listener);
422	u32 window_clamp;
423	__u8 rcv_wscale;
424	u32 rcv_wnd;
425	int mss;
426
427	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
428	window_clamp = READ_ONCE(tp->window_clamp);
429	/* Set this up on the first call only */
430	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
431
432	/* limit the window selection if the user enforce a smaller rx buffer */
433	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
434	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
435		req->rsk_window_clamp = full_space;
436
437	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
438	if (rcv_wnd == 0)
439		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
440	else if (full_space < rcv_wnd * mss)
441		full_space = rcv_wnd * mss;
442
443	/* tcp_full_space because it is guaranteed to be the first packet */
444	tcp_select_initial_window(sk_listener, full_space,
445		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
446		&req->rsk_rcv_wnd,
447		&req->rsk_window_clamp,
448		ireq->wscale_ok,
449		&rcv_wscale,
450		rcv_wnd);
451	ireq->rcv_wscale = rcv_wscale;
452}
453EXPORT_SYMBOL(tcp_openreq_init_rwin);
454
455static void tcp_ecn_openreq_child(struct tcp_sock *tp,
456				  const struct request_sock *req)
457{
458	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
459}
460
461void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
462{
463	struct inet_connection_sock *icsk = inet_csk(sk);
464	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
465	bool ca_got_dst = false;
466
467	if (ca_key != TCP_CA_UNSPEC) {
468		const struct tcp_congestion_ops *ca;
469
470		rcu_read_lock();
471		ca = tcp_ca_find_key(ca_key);
472		if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
473			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
474			icsk->icsk_ca_ops = ca;
475			ca_got_dst = true;
476		}
477		rcu_read_unlock();
478	}
479
480	/* If no valid choice made yet, assign current system default ca. */
481	if (!ca_got_dst &&
482	    (!icsk->icsk_ca_setsockopt ||
483	     !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
484		tcp_assign_congestion_control(sk);
485
486	tcp_set_ca_state(sk, TCP_CA_Open);
487}
488EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
489
490static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
491				    struct request_sock *req,
492				    struct tcp_sock *newtp)
493{
494#if IS_ENABLED(CONFIG_SMC)
495	struct inet_request_sock *ireq;
496
497	if (static_branch_unlikely(&tcp_have_smc)) {
498		ireq = inet_rsk(req);
499		if (oldtp->syn_smc && !ireq->smc_ok)
500			newtp->syn_smc = 0;
501	}
502#endif
503}
504
505/* This is not only more efficient than what we used to do, it eliminates
506 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
507 *
508 * Actually, we could lots of memory writes here. tp of listening
509 * socket contains all necessary default parameters.
510 */
511struct sock *tcp_create_openreq_child(const struct sock *sk,
512				      struct request_sock *req,
513				      struct sk_buff *skb)
514{
515	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
516	const struct inet_request_sock *ireq = inet_rsk(req);
517	struct tcp_request_sock *treq = tcp_rsk(req);
518	struct inet_connection_sock *newicsk;
519	const struct tcp_sock *oldtp;
520	struct tcp_sock *newtp;
521	u32 seq;
522#ifdef CONFIG_TCP_AO
523	struct tcp_ao_key *ao_key;
524#endif
525
526	if (!newsk)
527		return NULL;
528
529	newicsk = inet_csk(newsk);
530	newtp = tcp_sk(newsk);
531	oldtp = tcp_sk(sk);
532
533	smc_check_reset_syn_req(oldtp, req, newtp);
534
535	/* Now setup tcp_sock */
536	newtp->pred_flags = 0;
537
538	seq = treq->rcv_isn + 1;
539	newtp->rcv_wup = seq;
540	WRITE_ONCE(newtp->copied_seq, seq);
541	WRITE_ONCE(newtp->rcv_nxt, seq);
542	newtp->segs_in = 1;
543
544	seq = treq->snt_isn + 1;
545	newtp->snd_sml = newtp->snd_una = seq;
546	WRITE_ONCE(newtp->snd_nxt, seq);
547	newtp->snd_up = seq;
548
549	INIT_LIST_HEAD(&newtp->tsq_node);
550	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
551
552	tcp_init_wl(newtp, treq->rcv_isn);
553
554	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
555	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
556
557	newtp->lsndtime = tcp_jiffies32;
558	newsk->sk_txhash = READ_ONCE(treq->txhash);
559	newtp->total_retrans = req->num_retrans;
560
561	tcp_init_xmit_timers(newsk);
562	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
563
564	if (sock_flag(newsk, SOCK_KEEPOPEN))
565		inet_csk_reset_keepalive_timer(newsk,
566					       keepalive_time_when(newtp));
567
568	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
569	newtp->rx_opt.sack_ok = ireq->sack_ok;
570	newtp->window_clamp = req->rsk_window_clamp;
571	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
572	newtp->rcv_wnd = req->rsk_rcv_wnd;
573	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
574	if (newtp->rx_opt.wscale_ok) {
575		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
576		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
577	} else {
578		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
579		newtp->window_clamp = min(newtp->window_clamp, 65535U);
580	}
581	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
582	newtp->max_window = newtp->snd_wnd;
583
584	if (newtp->rx_opt.tstamp_ok) {
585		newtp->tcp_usec_ts = treq->req_usec_ts;
586		newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
587		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
588		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
589	} else {
590		newtp->tcp_usec_ts = 0;
591		newtp->rx_opt.ts_recent_stamp = 0;
592		newtp->tcp_header_len = sizeof(struct tcphdr);
593	}
594	if (req->num_timeout) {
595		newtp->total_rto = req->num_timeout;
596		newtp->undo_marker = treq->snt_isn;
597		if (newtp->tcp_usec_ts) {
598			newtp->retrans_stamp = treq->snt_synack;
599			newtp->total_rto_time = (u32)(tcp_clock_us() -
600						      newtp->retrans_stamp) / USEC_PER_MSEC;
601		} else {
602			newtp->retrans_stamp = div_u64(treq->snt_synack,
603						       USEC_PER_SEC / TCP_TS_HZ);
604			newtp->total_rto_time = tcp_clock_ms() -
605						newtp->retrans_stamp;
606		}
607		newtp->total_rto_recoveries = 1;
608	}
609	newtp->tsoffset = treq->ts_off;
610#ifdef CONFIG_TCP_MD5SIG
611	newtp->md5sig_info = NULL;	/*XXX*/
 
 
612#endif
613#ifdef CONFIG_TCP_AO
614	newtp->ao_info = NULL;
615	ao_key = treq->af_specific->ao_lookup(sk, req,
616				tcp_rsk(req)->ao_keyid, -1);
617	if (ao_key)
618		newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
619 #endif
620	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
621		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
622	newtp->rx_opt.mss_clamp = req->mss;
623	tcp_ecn_openreq_child(newtp, req);
624	newtp->fastopen_req = NULL;
625	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
626
627	newtp->bpf_chg_cc_inprogress = 0;
628	tcp_bpf_clone(sk, newsk);
629
630	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
631
632	return newsk;
633}
634EXPORT_SYMBOL(tcp_create_openreq_child);
635
636/*
637 * Process an incoming packet for SYN_RECV sockets represented as a
638 * request_sock. Normally sk is the listener socket but for TFO it
639 * points to the child socket.
640 *
641 * XXX (TFO) - The current impl contains a special check for ack
642 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
643 *
644 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
645 *
646 * Note: If @fastopen is true, this can be called from process context.
647 *       Otherwise, this is from BH context.
648 */
649
650struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
651			   struct request_sock *req,
652			   bool fastopen, bool *req_stolen)
653{
654	struct tcp_options_received tmp_opt;
655	struct sock *child;
656	const struct tcphdr *th = tcp_hdr(skb);
657	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
658	bool paws_reject = false;
659	bool own_req;
660
661	tmp_opt.saw_tstamp = 0;
662	if (th->doff > (sizeof(struct tcphdr)>>2)) {
663		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
664
665		if (tmp_opt.saw_tstamp) {
666			tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
667			if (tmp_opt.rcv_tsecr)
668				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
669			/* We do not store true stamp, but it is not required,
670			 * it can be estimated (approximately)
671			 * from another data.
672			 */
673			tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
674			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
675		}
676	}
677
678	/* Check for pure retransmitted SYN. */
679	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
680	    flg == TCP_FLAG_SYN &&
681	    !paws_reject) {
682		/*
683		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
684		 * this case on figure 6 and figure 8, but formal
685		 * protocol description says NOTHING.
686		 * To be more exact, it says that we should send ACK,
687		 * because this segment (at least, if it has no data)
688		 * is out of window.
689		 *
690		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
691		 *  describe SYN-RECV state. All the description
692		 *  is wrong, we cannot believe to it and should
693		 *  rely only on common sense and implementation
694		 *  experience.
695		 *
696		 * Enforce "SYN-ACK" according to figure 8, figure 6
697		 * of RFC793, fixed by RFC1122.
698		 *
699		 * Note that even if there is new data in the SYN packet
700		 * they will be thrown away too.
701		 *
702		 * Reset timer after retransmitting SYNACK, similar to
703		 * the idea of fast retransmit in recovery.
704		 */
705		if (!tcp_oow_rate_limited(sock_net(sk), skb,
706					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
707					  &tcp_rsk(req)->last_oow_ack_time) &&
708
709		    !inet_rtx_syn_ack(sk, req)) {
710			unsigned long expires = jiffies;
711
712			expires += reqsk_timeout(req, TCP_RTO_MAX);
713			if (!fastopen)
714				mod_timer_pending(&req->rsk_timer, expires);
715			else
716				req->rsk_timer.expires = expires;
717		}
718		return NULL;
719	}
720
721	/* Further reproduces section "SEGMENT ARRIVES"
722	   for state SYN-RECEIVED of RFC793.
723	   It is broken, however, it does not work only
724	   when SYNs are crossed.
725
726	   You would think that SYN crossing is impossible here, since
727	   we should have a SYN_SENT socket (from connect()) on our end,
728	   but this is not true if the crossed SYNs were sent to both
729	   ends by a malicious third party.  We must defend against this,
730	   and to do that we first verify the ACK (as per RFC793, page
731	   36) and reset if it is invalid.  Is this a true full defense?
732	   To convince ourselves, let us consider a way in which the ACK
733	   test can still pass in this 'malicious crossed SYNs' case.
734	   Malicious sender sends identical SYNs (and thus identical sequence
735	   numbers) to both A and B:
736
737		A: gets SYN, seq=7
738		B: gets SYN, seq=7
739
740	   By our good fortune, both A and B select the same initial
741	   send sequence number of seven :-)
742
743		A: sends SYN|ACK, seq=7, ack_seq=8
744		B: sends SYN|ACK, seq=7, ack_seq=8
745
746	   So we are now A eating this SYN|ACK, ACK test passes.  So
747	   does sequence test, SYN is truncated, and thus we consider
748	   it a bare ACK.
749
750	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
751	   bare ACK.  Otherwise, we create an established connection.  Both
752	   ends (listening sockets) accept the new incoming connection and try
753	   to talk to each other. 8-)
754
755	   Note: This case is both harmless, and rare.  Possibility is about the
756	   same as us discovering intelligent life on another plant tomorrow.
757
758	   But generally, we should (RFC lies!) to accept ACK
759	   from SYNACK both here and in tcp_rcv_state_process().
760	   tcp_rcv_state_process() does not, hence, we do not too.
761
762	   Note that the case is absolutely generic:
763	   we cannot optimize anything here without
764	   violating protocol. All the checks must be made
765	   before attempt to create socket.
766	 */
767
768	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
769	 *                  and the incoming segment acknowledges something not yet
770	 *                  sent (the segment carries an unacceptable ACK) ...
771	 *                  a reset is sent."
772	 *
773	 * Invalid ACK: reset will be sent by listening socket.
774	 * Note that the ACK validity check for a Fast Open socket is done
775	 * elsewhere and is checked directly against the child socket rather
776	 * than req because user data may have been sent out.
777	 */
778	if ((flg & TCP_FLAG_ACK) && !fastopen &&
779	    (TCP_SKB_CB(skb)->ack_seq !=
780	     tcp_rsk(req)->snt_isn + 1))
781		return sk;
782
783	/* Also, it would be not so bad idea to check rcv_tsecr, which
784	 * is essentially ACK extension and too early or too late values
785	 * should cause reset in unsynchronized states.
786	 */
787
788	/* RFC793: "first check sequence number". */
789
790	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
791					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
792		/* Out of window: send ACK and drop. */
793		if (!(flg & TCP_FLAG_RST) &&
794		    !tcp_oow_rate_limited(sock_net(sk), skb,
795					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
796					  &tcp_rsk(req)->last_oow_ack_time))
797			req->rsk_ops->send_ack(sk, skb, req);
798		if (paws_reject)
799			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
800		return NULL;
801	}
802
803	/* In sequence, PAWS is OK. */
804
805	/* TODO: We probably should defer ts_recent change once
806	 * we take ownership of @req.
807	 */
808	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
809		WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
810
811	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
812		/* Truncate SYN, it is out of window starting
813		   at tcp_rsk(req)->rcv_isn + 1. */
814		flg &= ~TCP_FLAG_SYN;
815	}
816
817	/* RFC793: "second check the RST bit" and
818	 *	   "fourth, check the SYN bit"
819	 */
820	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
821		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
822		goto embryonic_reset;
823	}
824
825	/* ACK sequence verified above, just make sure ACK is
826	 * set.  If ACK not set, just silently drop the packet.
827	 *
828	 * XXX (TFO) - if we ever allow "data after SYN", the
829	 * following check needs to be removed.
830	 */
831	if (!(flg & TCP_FLAG_ACK))
832		return NULL;
833
834	/* For Fast Open no more processing is needed (sk is the
835	 * child socket).
836	 */
837	if (fastopen)
838		return sk;
839
840	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
841	if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
842	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
843		inet_rsk(req)->acked = 1;
844		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
845		return NULL;
846	}
847
848	/* OK, ACK is valid, create big socket and
849	 * feed this segment to it. It will repeat all
850	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
851	 * ESTABLISHED STATE. If it will be dropped after
852	 * socket is created, wait for troubles.
853	 */
854	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
855							 req, &own_req);
856	if (!child)
857		goto listen_overflow;
858
859	if (own_req && rsk_drop_req(req)) {
860		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
861		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
862		return child;
863	}
864
865	sock_rps_save_rxhash(child, skb);
866	tcp_synack_rtt_meas(child, req);
867	*req_stolen = !own_req;
868	return inet_csk_complete_hashdance(sk, child, req, own_req);
869
870listen_overflow:
871	if (sk != req->rsk_listener)
872		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
873
874	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
875		inet_rsk(req)->acked = 1;
876		return NULL;
877	}
878
879embryonic_reset:
880	if (!(flg & TCP_FLAG_RST)) {
881		/* Received a bad SYN pkt - for TFO We try not to reset
882		 * the local connection unless it's really necessary to
883		 * avoid becoming vulnerable to outside attack aiming at
884		 * resetting legit local connections.
885		 */
886		req->rsk_ops->send_reset(sk, skb);
887	} else if (fastopen) { /* received a valid RST pkt */
888		reqsk_fastopen_remove(sk, req, true);
889		tcp_reset(sk, skb);
890	}
891	if (!fastopen) {
892		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
893
894		if (unlinked)
895			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
896		*req_stolen = !unlinked;
897	}
898	return NULL;
899}
900EXPORT_SYMBOL(tcp_check_req);
901
902/*
903 * Queue segment on the new socket if the new socket is active,
904 * otherwise we just shortcircuit this and continue with
905 * the new socket.
906 *
907 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
908 * when entering. But other states are possible due to a race condition
909 * where after __inet_lookup_established() fails but before the listener
910 * locked is obtained, other packets cause the same connection to
911 * be created.
912 */
913
914int tcp_child_process(struct sock *parent, struct sock *child,
915		      struct sk_buff *skb)
916	__releases(&((child)->sk_lock.slock))
917{
918	int ret = 0;
919	int state = child->sk_state;
920
921	/* record sk_napi_id and sk_rx_queue_mapping of child. */
922	sk_mark_napi_id_set(child, skb);
923
924	tcp_segs_in(tcp_sk(child), skb);
925	if (!sock_owned_by_user(child)) {
926		ret = tcp_rcv_state_process(child, skb);
927		/* Wakeup parent, send SIGIO */
928		if (state == TCP_SYN_RECV && child->sk_state != state)
929			parent->sk_data_ready(parent);
930	} else {
931		/* Alas, it is possible again, because we do lookup
932		 * in main socket hash table and lock on listening
933		 * socket does not protect us more.
934		 */
935		__sk_add_backlog(child, skb);
936	}
937
938	bh_unlock_sock(child);
939	sock_put(child);
940	return ret;
941}
942EXPORT_SYMBOL(tcp_child_process);