Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Implementation of the Transmission Control Protocol(TCP).
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 13 *		Florian La Roche, <flla@stud.uni-sb.de>
 14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19 *		Jorge Cwik, <jorge@laser.satlink.net>
 20 */
 21
 22#include <net/tcp.h>
 23#include <net/xfrm.h>
 24#include <net/busy_poll.h>
 25#include <net/rstreason.h>
 26
 27static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 28{
 29	if (seq == s_win)
 30		return true;
 31	if (after(end_seq, s_win) && before(seq, e_win))
 32		return true;
 33	return seq == e_win && seq == end_seq;
 34}
 35
 36static enum tcp_tw_status
 37tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
 38				  const struct sk_buff *skb, int mib_idx)
 39{
 40	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 41
 42	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
 43				  &tcptw->tw_last_oow_ack_time)) {
 44		/* Send ACK. Note, we do not put the bucket,
 45		 * it will be released by caller.
 46		 */
 47		return TCP_TW_ACK;
 48	}
 49
 50	/* We are rate-limiting, so just release the tw sock and drop skb. */
 51	inet_twsk_put(tw);
 52	return TCP_TW_SUCCESS;
 53}
 54
 55static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq,
 56				u32 rcv_nxt)
 57{
 58#ifdef CONFIG_TCP_AO
 59	struct tcp_ao_info *ao;
 60
 61	ao = rcu_dereference(tcptw->ao_info);
 62	if (unlikely(ao && seq < rcv_nxt))
 63		WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
 64#endif
 65	WRITE_ONCE(tcptw->tw_rcv_nxt, seq);
 66}
 67
 68/*
 69 * * Main purpose of TIME-WAIT state is to close connection gracefully,
 70 *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
 71 *   (and, probably, tail of data) and one or more our ACKs are lost.
 72 * * What is TIME-WAIT timeout? It is associated with maximal packet
 73 *   lifetime in the internet, which results in wrong conclusion, that
 74 *   it is set to catch "old duplicate segments" wandering out of their path.
 75 *   It is not quite correct. This timeout is calculated so that it exceeds
 76 *   maximal retransmission timeout enough to allow to lose one (or more)
 77 *   segments sent by peer and our ACKs. This time may be calculated from RTO.
 78 * * When TIME-WAIT socket receives RST, it means that another end
 79 *   finally closed and we are allowed to kill TIME-WAIT too.
 80 * * Second purpose of TIME-WAIT is catching old duplicate segments.
 81 *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
 82 *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
 83 * * If we invented some more clever way to catch duplicates
 84 *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
 85 *
 86 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
 87 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
 88 * from the very beginning.
 89 *
 90 * NOTE. With recycling (and later with fin-wait-2) TW bucket
 91 * is _not_ stateless. It means, that strictly speaking we must
 92 * spinlock it. I do not want! Well, probability of misbehaviour
 93 * is ridiculously low and, seems, we could use some mb() tricks
 94 * to avoid misread sequence numbers, states etc.  --ANK
 95 *
 96 * We don't need to initialize tmp_out.sack_ok as we don't use the results
 97 */
 98enum tcp_tw_status
 99tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
100			   const struct tcphdr *th, u32 *tw_isn)
101{
102	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
103	u32 rcv_nxt = READ_ONCE(tcptw->tw_rcv_nxt);
104	struct tcp_options_received tmp_opt;
 
105	bool paws_reject = false;
106	int ts_recent_stamp;
107
108	tmp_opt.saw_tstamp = 0;
109	ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp);
110	if (th->doff > (sizeof(*th) >> 2) && ts_recent_stamp) {
111		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
112
113		if (tmp_opt.saw_tstamp) {
114			if (tmp_opt.rcv_tsecr)
115				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
116			tmp_opt.ts_recent	= READ_ONCE(tcptw->tw_ts_recent);
117			tmp_opt.ts_recent_stamp	= ts_recent_stamp;
118			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
119		}
120	}
121
122	if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2) {
123		/* Just repeat all the checks of tcp_rcv_state_process() */
124
125		/* Out of window, send ACK */
126		if (paws_reject ||
127		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
128				   rcv_nxt,
129				   rcv_nxt + tcptw->tw_rcv_wnd))
130			return tcp_timewait_check_oow_rate_limit(
131				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
132
133		if (th->rst)
134			goto kill;
135
136		if (th->syn && !before(TCP_SKB_CB(skb)->seq, rcv_nxt))
137			return TCP_TW_RST;
138
139		/* Dup ACK? */
140		if (!th->ack ||
141		    !after(TCP_SKB_CB(skb)->end_seq, rcv_nxt) ||
142		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
143			inet_twsk_put(tw);
144			return TCP_TW_SUCCESS;
145		}
146
147		/* New data or FIN. If new data arrive after half-duplex close,
148		 * reset.
149		 */
150		if (!th->fin ||
151		    TCP_SKB_CB(skb)->end_seq != rcv_nxt + 1)
152			return TCP_TW_RST;
153
154		/* FIN arrived, enter true time-wait state. */
155		WRITE_ONCE(tw->tw_substate, TCP_TIME_WAIT);
156		twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq,
157				    rcv_nxt);
158
159		if (tmp_opt.saw_tstamp) {
160			WRITE_ONCE(tcptw->tw_ts_recent_stamp,
161				  ktime_get_seconds());
162			WRITE_ONCE(tcptw->tw_ts_recent,
163				   tmp_opt.rcv_tsval);
164		}
165
166		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
167		return TCP_TW_ACK;
168	}
169
170	/*
171	 *	Now real TIME-WAIT state.
172	 *
173	 *	RFC 1122:
174	 *	"When a connection is [...] on TIME-WAIT state [...]
175	 *	[a TCP] MAY accept a new SYN from the remote TCP to
176	 *	reopen the connection directly, if it:
177	 *
178	 *	(1)  assigns its initial sequence number for the new
179	 *	connection to be larger than the largest sequence
180	 *	number it used on the previous connection incarnation,
181	 *	and
182	 *
183	 *	(2)  returns to TIME-WAIT state if the SYN turns out
184	 *	to be an old duplicate".
185	 */
186
187	if (!paws_reject &&
188	    (TCP_SKB_CB(skb)->seq == rcv_nxt &&
189	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
190		/* In window segment, it may be only reset or bare ack. */
191
192		if (th->rst) {
193			/* This is TIME_WAIT assassination, in two flavors.
194			 * Oh well... nobody has a sufficient solution to this
195			 * protocol bug yet.
196			 */
197			if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
198kill:
199				inet_twsk_deschedule_put(tw);
200				return TCP_TW_SUCCESS;
201			}
202		} else {
203			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
204		}
205
206		if (tmp_opt.saw_tstamp) {
207			WRITE_ONCE(tcptw->tw_ts_recent,
208				   tmp_opt.rcv_tsval);
209			WRITE_ONCE(tcptw->tw_ts_recent_stamp,
210				   ktime_get_seconds());
211		}
212
213		inet_twsk_put(tw);
214		return TCP_TW_SUCCESS;
215	}
216
217	/* Out of window segment.
218
219	   All the segments are ACKed immediately.
220
221	   The only exception is new SYN. We accept it, if it is
222	   not old duplicate and we are not in danger to be killed
223	   by delayed old duplicates. RFC check is that it has
224	   newer sequence number works at rates <40Mbit/sec.
225	   However, if paws works, it is reliable AND even more,
226	   we even may relax silly seq space cutoff.
227
228	   RED-PEN: we violate main RFC requirement, if this SYN will appear
229	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
230	   we must return socket to time-wait state. It is not good,
231	   but not fatal yet.
232	 */
233
234	if (th->syn && !th->rst && !th->ack && !paws_reject &&
235	    (after(TCP_SKB_CB(skb)->seq, rcv_nxt) ||
236	     (tmp_opt.saw_tstamp &&
237	      (s32)(READ_ONCE(tcptw->tw_ts_recent) - tmp_opt.rcv_tsval) < 0))) {
238		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
239		if (isn == 0)
240			isn++;
241		*tw_isn = isn;
242		return TCP_TW_SYN;
243	}
244
245	if (paws_reject)
246		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
247
248	if (!th->rst) {
249		/* In this case we must reset the TIMEWAIT timer.
250		 *
251		 * If it is ACKless SYN it may be both old duplicate
252		 * and new good SYN with random sequence number <rcv_nxt.
253		 * Do not reschedule in the last case.
254		 */
255		if (paws_reject || th->ack)
256			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
257
258		return tcp_timewait_check_oow_rate_limit(
259			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
260	}
261	inet_twsk_put(tw);
262	return TCP_TW_SUCCESS;
263}
264EXPORT_SYMBOL(tcp_timewait_state_process);
265
266static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
267{
268#ifdef CONFIG_TCP_MD5SIG
269	const struct tcp_sock *tp = tcp_sk(sk);
270	struct tcp_md5sig_key *key;
271
272	/*
273	 * The timewait bucket does not have the key DB from the
274	 * sock structure. We just make a quick copy of the
275	 * md5 key being used (if indeed we are using one)
276	 * so the timewait ack generating code has the key.
277	 */
278	tcptw->tw_md5_key = NULL;
279	if (!static_branch_unlikely(&tcp_md5_needed.key))
280		return;
281
282	key = tp->af_specific->md5_lookup(sk, sk);
283	if (key) {
284		tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
285		if (!tcptw->tw_md5_key)
286			return;
287		if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
288			goto out_free;
289		tcp_md5_add_sigpool();
290	}
291	return;
292out_free:
293	WARN_ON_ONCE(1);
294	kfree(tcptw->tw_md5_key);
295	tcptw->tw_md5_key = NULL;
296#endif
297}
298
299/*
300 * Move a socket to time-wait or dead fin-wait-2 state.
301 */
302void tcp_time_wait(struct sock *sk, int state, int timeo)
303{
304	const struct inet_connection_sock *icsk = inet_csk(sk);
305	struct tcp_sock *tp = tcp_sk(sk);
306	struct net *net = sock_net(sk);
307	struct inet_timewait_sock *tw;
308
309	tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
310
311	if (tw) {
312		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
313		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
314
315		tw->tw_transparent	= inet_test_bit(TRANSPARENT, sk);
316		tw->tw_mark		= sk->sk_mark;
317		tw->tw_priority		= READ_ONCE(sk->sk_priority);
318		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
319		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
320		tcptw->tw_snd_nxt	= tp->snd_nxt;
321		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
322		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
323		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
324		tcptw->tw_ts_offset	= tp->tsoffset;
325		tw->tw_usec_ts		= tp->tcp_usec_ts;
326		tcptw->tw_last_oow_ack_time = 0;
327		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
328		tw->tw_txhash		= sk->sk_txhash;
329		tw->tw_tx_queue_mapping = sk->sk_tx_queue_mapping;
330#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
331		tw->tw_rx_queue_mapping = sk->sk_rx_queue_mapping;
332#endif
333#if IS_ENABLED(CONFIG_IPV6)
334		if (tw->tw_family == PF_INET6) {
335			struct ipv6_pinfo *np = inet6_sk(sk);
336
337			tw->tw_v6_daddr = sk->sk_v6_daddr;
338			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
339			tw->tw_tclass = np->tclass;
340			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
341			tw->tw_ipv6only = sk->sk_ipv6only;
342		}
343#endif
344
345		tcp_time_wait_init(sk, tcptw);
346		tcp_ao_time_wait(tcptw, tp);
347
348		/* Get the TIME_WAIT timeout firing. */
349		if (timeo < rto)
350			timeo = rto;
351
352		if (state == TCP_TIME_WAIT)
353			timeo = TCP_TIMEWAIT_LEN;
354
 
 
 
 
 
 
355		/* Linkage updates.
356		 * Note that access to tw after this point is illegal.
357		 */
358		inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo);
 
359	} else {
360		/* Sorry, if we're out of memory, just CLOSE this
361		 * socket up.  We've got bigger problems than
362		 * non-graceful socket closings.
363		 */
364		NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
365	}
366
367	tcp_update_metrics(sk);
368	tcp_done(sk);
369}
370EXPORT_SYMBOL(tcp_time_wait);
371
372#ifdef CONFIG_TCP_MD5SIG
373static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
374{
375	struct tcp_md5sig_key *key;
376
377	key = container_of(head, struct tcp_md5sig_key, rcu);
378	kfree(key);
379	static_branch_slow_dec_deferred(&tcp_md5_needed);
380	tcp_md5_release_sigpool();
381}
382#endif
383
384void tcp_twsk_destructor(struct sock *sk)
385{
386#ifdef CONFIG_TCP_MD5SIG
387	if (static_branch_unlikely(&tcp_md5_needed.key)) {
388		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
389
390		if (twsk->tw_md5_key)
391			call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
392	}
393#endif
394	tcp_ao_destroy_sock(sk, true);
395}
396EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
397
398void tcp_twsk_purge(struct list_head *net_exit_list)
399{
400	bool purged_once = false;
401	struct net *net;
402
403	list_for_each_entry(net, net_exit_list, exit_list) {
404		if (net->ipv4.tcp_death_row.hashinfo->pernet) {
405			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
406			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo);
407		} else if (!purged_once) {
408			inet_twsk_purge(&tcp_hashinfo);
 
 
 
 
409			purged_once = true;
410		}
411	}
412}
 
413
414/* Warning : This function is called without sk_listener being locked.
415 * Be sure to read socket fields once, as their value could change under us.
416 */
417void tcp_openreq_init_rwin(struct request_sock *req,
418			   const struct sock *sk_listener,
419			   const struct dst_entry *dst)
420{
421	struct inet_request_sock *ireq = inet_rsk(req);
422	const struct tcp_sock *tp = tcp_sk(sk_listener);
423	int full_space = tcp_full_space(sk_listener);
424	u32 window_clamp;
425	__u8 rcv_wscale;
426	u32 rcv_wnd;
427	int mss;
428
429	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
430	window_clamp = READ_ONCE(tp->window_clamp);
431	/* Set this up on the first call only */
432	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
433
434	/* limit the window selection if the user enforce a smaller rx buffer */
435	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
436	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
437		req->rsk_window_clamp = full_space;
438
439	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
440	if (rcv_wnd == 0)
441		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
442	else if (full_space < rcv_wnd * mss)
443		full_space = rcv_wnd * mss;
444
445	/* tcp_full_space because it is guaranteed to be the first packet */
446	tcp_select_initial_window(sk_listener, full_space,
447		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
448		&req->rsk_rcv_wnd,
449		&req->rsk_window_clamp,
450		ireq->wscale_ok,
451		&rcv_wscale,
452		rcv_wnd);
453	ireq->rcv_wscale = rcv_wscale;
454}
455EXPORT_SYMBOL(tcp_openreq_init_rwin);
456
457static void tcp_ecn_openreq_child(struct tcp_sock *tp,
458				  const struct request_sock *req)
459{
460	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
461}
462
463void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
464{
465	struct inet_connection_sock *icsk = inet_csk(sk);
466	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
467	bool ca_got_dst = false;
468
469	if (ca_key != TCP_CA_UNSPEC) {
470		const struct tcp_congestion_ops *ca;
471
472		rcu_read_lock();
473		ca = tcp_ca_find_key(ca_key);
474		if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
475			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
476			icsk->icsk_ca_ops = ca;
477			ca_got_dst = true;
478		}
479		rcu_read_unlock();
480	}
481
482	/* If no valid choice made yet, assign current system default ca. */
483	if (!ca_got_dst &&
484	    (!icsk->icsk_ca_setsockopt ||
485	     !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
486		tcp_assign_congestion_control(sk);
487
488	tcp_set_ca_state(sk, TCP_CA_Open);
489}
490EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
491
492static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
493				    struct request_sock *req,
494				    struct tcp_sock *newtp)
495{
496#if IS_ENABLED(CONFIG_SMC)
497	struct inet_request_sock *ireq;
498
499	if (static_branch_unlikely(&tcp_have_smc)) {
500		ireq = inet_rsk(req);
501		if (oldtp->syn_smc && !ireq->smc_ok)
502			newtp->syn_smc = 0;
503	}
504#endif
505}
506
507/* This is not only more efficient than what we used to do, it eliminates
508 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
509 *
510 * Actually, we could lots of memory writes here. tp of listening
511 * socket contains all necessary default parameters.
512 */
513struct sock *tcp_create_openreq_child(const struct sock *sk,
514				      struct request_sock *req,
515				      struct sk_buff *skb)
516{
517	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
518	const struct inet_request_sock *ireq = inet_rsk(req);
519	struct tcp_request_sock *treq = tcp_rsk(req);
520	struct inet_connection_sock *newicsk;
521	const struct tcp_sock *oldtp;
522	struct tcp_sock *newtp;
523	u32 seq;
 
 
 
524
525	if (!newsk)
526		return NULL;
527
528	newicsk = inet_csk(newsk);
529	newtp = tcp_sk(newsk);
530	oldtp = tcp_sk(sk);
531
532	smc_check_reset_syn_req(oldtp, req, newtp);
533
534	/* Now setup tcp_sock */
535	newtp->pred_flags = 0;
536
537	seq = treq->rcv_isn + 1;
538	newtp->rcv_wup = seq;
539	WRITE_ONCE(newtp->copied_seq, seq);
540	WRITE_ONCE(newtp->rcv_nxt, seq);
541	newtp->segs_in = 1;
542
543	seq = treq->snt_isn + 1;
544	newtp->snd_sml = newtp->snd_una = seq;
545	WRITE_ONCE(newtp->snd_nxt, seq);
546	newtp->snd_up = seq;
547
548	INIT_LIST_HEAD(&newtp->tsq_node);
549	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
550
551	tcp_init_wl(newtp, treq->rcv_isn);
552
553	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
554	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
555
556	newtp->lsndtime = tcp_jiffies32;
557	newsk->sk_txhash = READ_ONCE(treq->txhash);
558	newtp->total_retrans = req->num_retrans;
559
560	tcp_init_xmit_timers(newsk);
561	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
562
563	if (sock_flag(newsk, SOCK_KEEPOPEN))
564		inet_csk_reset_keepalive_timer(newsk,
565					       keepalive_time_when(newtp));
566
567	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
568	newtp->rx_opt.sack_ok = ireq->sack_ok;
569	newtp->window_clamp = req->rsk_window_clamp;
570	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
571	newtp->rcv_wnd = req->rsk_rcv_wnd;
572	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
573	if (newtp->rx_opt.wscale_ok) {
574		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
575		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
576	} else {
577		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
578		newtp->window_clamp = min(newtp->window_clamp, 65535U);
579	}
580	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
581	newtp->max_window = newtp->snd_wnd;
582
583	if (newtp->rx_opt.tstamp_ok) {
584		newtp->tcp_usec_ts = treq->req_usec_ts;
585		newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
586		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
587		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
588	} else {
589		newtp->tcp_usec_ts = 0;
590		newtp->rx_opt.ts_recent_stamp = 0;
591		newtp->tcp_header_len = sizeof(struct tcphdr);
592	}
593	if (req->num_timeout) {
594		newtp->total_rto = req->num_timeout;
595		newtp->undo_marker = treq->snt_isn;
596		if (newtp->tcp_usec_ts) {
597			newtp->retrans_stamp = treq->snt_synack;
598			newtp->total_rto_time = (u32)(tcp_clock_us() -
599						      newtp->retrans_stamp) / USEC_PER_MSEC;
600		} else {
601			newtp->retrans_stamp = div_u64(treq->snt_synack,
602						       USEC_PER_SEC / TCP_TS_HZ);
603			newtp->total_rto_time = tcp_clock_ms() -
604						newtp->retrans_stamp;
605		}
606		newtp->total_rto_recoveries = 1;
607	}
608	newtp->tsoffset = treq->ts_off;
609#ifdef CONFIG_TCP_MD5SIG
610	newtp->md5sig_info = NULL;	/*XXX*/
611#endif
612#ifdef CONFIG_TCP_AO
613	newtp->ao_info = NULL;
614
615	if (tcp_rsk_used_ao(req)) {
616		struct tcp_ao_key *ao_key;
617
618		ao_key = treq->af_specific->ao_lookup(sk, req, tcp_rsk(req)->ao_keyid, -1);
619		if (ao_key)
620			newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
621	}
622 #endif
623	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
624		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
625	newtp->rx_opt.mss_clamp = req->mss;
626	tcp_ecn_openreq_child(newtp, req);
627	newtp->fastopen_req = NULL;
628	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
629
630	newtp->bpf_chg_cc_inprogress = 0;
631	tcp_bpf_clone(sk, newsk);
632
633	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
634
635	xa_init_flags(&newsk->sk_user_frags, XA_FLAGS_ALLOC1);
636
637	return newsk;
638}
639EXPORT_SYMBOL(tcp_create_openreq_child);
640
641/*
642 * Process an incoming packet for SYN_RECV sockets represented as a
643 * request_sock. Normally sk is the listener socket but for TFO it
644 * points to the child socket.
645 *
646 * XXX (TFO) - The current impl contains a special check for ack
647 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
648 *
649 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
650 *
651 * Note: If @fastopen is true, this can be called from process context.
652 *       Otherwise, this is from BH context.
653 */
654
655struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
656			   struct request_sock *req,
657			   bool fastopen, bool *req_stolen)
658{
659	struct tcp_options_received tmp_opt;
660	struct sock *child;
661	const struct tcphdr *th = tcp_hdr(skb);
662	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
663	bool paws_reject = false;
664	bool own_req;
665
666	tmp_opt.saw_tstamp = 0;
667	if (th->doff > (sizeof(struct tcphdr)>>2)) {
668		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
669
670		if (tmp_opt.saw_tstamp) {
671			tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
672			if (tmp_opt.rcv_tsecr)
673				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
674			/* We do not store true stamp, but it is not required,
675			 * it can be estimated (approximately)
676			 * from another data.
677			 */
678			tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
679			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
680		}
681	}
682
683	/* Check for pure retransmitted SYN. */
684	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
685	    flg == TCP_FLAG_SYN &&
686	    !paws_reject) {
687		/*
688		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
689		 * this case on figure 6 and figure 8, but formal
690		 * protocol description says NOTHING.
691		 * To be more exact, it says that we should send ACK,
692		 * because this segment (at least, if it has no data)
693		 * is out of window.
694		 *
695		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
696		 *  describe SYN-RECV state. All the description
697		 *  is wrong, we cannot believe to it and should
698		 *  rely only on common sense and implementation
699		 *  experience.
700		 *
701		 * Enforce "SYN-ACK" according to figure 8, figure 6
702		 * of RFC793, fixed by RFC1122.
703		 *
704		 * Note that even if there is new data in the SYN packet
705		 * they will be thrown away too.
706		 *
707		 * Reset timer after retransmitting SYNACK, similar to
708		 * the idea of fast retransmit in recovery.
709		 */
710		if (!tcp_oow_rate_limited(sock_net(sk), skb,
711					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
712					  &tcp_rsk(req)->last_oow_ack_time) &&
713
714		    !inet_rtx_syn_ack(sk, req)) {
715			unsigned long expires = jiffies;
716
717			expires += reqsk_timeout(req, TCP_RTO_MAX);
718			if (!fastopen)
719				mod_timer_pending(&req->rsk_timer, expires);
720			else
721				req->rsk_timer.expires = expires;
722		}
723		return NULL;
724	}
725
726	/* Further reproduces section "SEGMENT ARRIVES"
727	   for state SYN-RECEIVED of RFC793.
728	   It is broken, however, it does not work only
729	   when SYNs are crossed.
730
731	   You would think that SYN crossing is impossible here, since
732	   we should have a SYN_SENT socket (from connect()) on our end,
733	   but this is not true if the crossed SYNs were sent to both
734	   ends by a malicious third party.  We must defend against this,
735	   and to do that we first verify the ACK (as per RFC793, page
736	   36) and reset if it is invalid.  Is this a true full defense?
737	   To convince ourselves, let us consider a way in which the ACK
738	   test can still pass in this 'malicious crossed SYNs' case.
739	   Malicious sender sends identical SYNs (and thus identical sequence
740	   numbers) to both A and B:
741
742		A: gets SYN, seq=7
743		B: gets SYN, seq=7
744
745	   By our good fortune, both A and B select the same initial
746	   send sequence number of seven :-)
747
748		A: sends SYN|ACK, seq=7, ack_seq=8
749		B: sends SYN|ACK, seq=7, ack_seq=8
750
751	   So we are now A eating this SYN|ACK, ACK test passes.  So
752	   does sequence test, SYN is truncated, and thus we consider
753	   it a bare ACK.
754
755	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
756	   bare ACK.  Otherwise, we create an established connection.  Both
757	   ends (listening sockets) accept the new incoming connection and try
758	   to talk to each other. 8-)
759
760	   Note: This case is both harmless, and rare.  Possibility is about the
761	   same as us discovering intelligent life on another plant tomorrow.
762
763	   But generally, we should (RFC lies!) to accept ACK
764	   from SYNACK both here and in tcp_rcv_state_process().
765	   tcp_rcv_state_process() does not, hence, we do not too.
766
767	   Note that the case is absolutely generic:
768	   we cannot optimize anything here without
769	   violating protocol. All the checks must be made
770	   before attempt to create socket.
771	 */
772
773	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
774	 *                  and the incoming segment acknowledges something not yet
775	 *                  sent (the segment carries an unacceptable ACK) ...
776	 *                  a reset is sent."
777	 *
778	 * Invalid ACK: reset will be sent by listening socket.
779	 * Note that the ACK validity check for a Fast Open socket is done
780	 * elsewhere and is checked directly against the child socket rather
781	 * than req because user data may have been sent out.
782	 */
783	if ((flg & TCP_FLAG_ACK) && !fastopen &&
784	    (TCP_SKB_CB(skb)->ack_seq !=
785	     tcp_rsk(req)->snt_isn + 1))
786		return sk;
787
788	/* Also, it would be not so bad idea to check rcv_tsecr, which
789	 * is essentially ACK extension and too early or too late values
790	 * should cause reset in unsynchronized states.
791	 */
792
793	/* RFC793: "first check sequence number". */
794
795	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq,
796					  TCP_SKB_CB(skb)->end_seq,
797					  tcp_rsk(req)->rcv_nxt,
798					  tcp_rsk(req)->rcv_nxt +
799					  tcp_synack_window(req))) {
800		/* Out of window: send ACK and drop. */
801		if (!(flg & TCP_FLAG_RST) &&
802		    !tcp_oow_rate_limited(sock_net(sk), skb,
803					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
804					  &tcp_rsk(req)->last_oow_ack_time))
805			req->rsk_ops->send_ack(sk, skb, req);
806		if (paws_reject)
807			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
808		return NULL;
809	}
810
811	/* In sequence, PAWS is OK. */
812
 
 
 
 
 
 
813	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
814		/* Truncate SYN, it is out of window starting
815		   at tcp_rsk(req)->rcv_isn + 1. */
816		flg &= ~TCP_FLAG_SYN;
817	}
818
819	/* RFC793: "second check the RST bit" and
820	 *	   "fourth, check the SYN bit"
821	 */
822	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
823		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
824		goto embryonic_reset;
825	}
826
827	/* ACK sequence verified above, just make sure ACK is
828	 * set.  If ACK not set, just silently drop the packet.
829	 *
830	 * XXX (TFO) - if we ever allow "data after SYN", the
831	 * following check needs to be removed.
832	 */
833	if (!(flg & TCP_FLAG_ACK))
834		return NULL;
835
836	/* For Fast Open no more processing is needed (sk is the
837	 * child socket).
838	 */
839	if (fastopen)
840		return sk;
841
842	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
843	if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
844	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
845		inet_rsk(req)->acked = 1;
846		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
847		return NULL;
848	}
849
850	/* OK, ACK is valid, create big socket and
851	 * feed this segment to it. It will repeat all
852	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
853	 * ESTABLISHED STATE. If it will be dropped after
854	 * socket is created, wait for troubles.
855	 */
856	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
857							 req, &own_req);
858	if (!child)
859		goto listen_overflow;
860
861	if (own_req && tmp_opt.saw_tstamp &&
862	    !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
863		tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval;
864
865	if (own_req && rsk_drop_req(req)) {
866		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
867		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
868		return child;
869	}
870
871	sock_rps_save_rxhash(child, skb);
872	tcp_synack_rtt_meas(child, req);
873	*req_stolen = !own_req;
874	return inet_csk_complete_hashdance(sk, child, req, own_req);
875
876listen_overflow:
877	if (sk != req->rsk_listener)
878		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
879
880	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
881		inet_rsk(req)->acked = 1;
882		return NULL;
883	}
884
885embryonic_reset:
886	if (!(flg & TCP_FLAG_RST)) {
887		/* Received a bad SYN pkt - for TFO We try not to reset
888		 * the local connection unless it's really necessary to
889		 * avoid becoming vulnerable to outside attack aiming at
890		 * resetting legit local connections.
891		 */
892		req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_INVALID_SYN);
893	} else if (fastopen) { /* received a valid RST pkt */
894		reqsk_fastopen_remove(sk, req, true);
895		tcp_reset(sk, skb);
896	}
897	if (!fastopen) {
898		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
899
900		if (unlinked)
901			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
902		*req_stolen = !unlinked;
903	}
904	return NULL;
905}
906EXPORT_SYMBOL(tcp_check_req);
907
908/*
909 * Queue segment on the new socket if the new socket is active,
910 * otherwise we just shortcircuit this and continue with
911 * the new socket.
912 *
913 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
914 * when entering. But other states are possible due to a race condition
915 * where after __inet_lookup_established() fails but before the listener
916 * locked is obtained, other packets cause the same connection to
917 * be created.
918 */
919
920enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
921				       struct sk_buff *skb)
922	__releases(&((child)->sk_lock.slock))
923{
924	enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
925	int state = child->sk_state;
926
927	/* record sk_napi_id and sk_rx_queue_mapping of child. */
928	sk_mark_napi_id_set(child, skb);
929
930	tcp_segs_in(tcp_sk(child), skb);
931	if (!sock_owned_by_user(child)) {
932		reason = tcp_rcv_state_process(child, skb);
933		/* Wakeup parent, send SIGIO */
934		if (state == TCP_SYN_RECV && child->sk_state != state)
935			parent->sk_data_ready(parent);
936	} else {
937		/* Alas, it is possible again, because we do lookup
938		 * in main socket hash table and lock on listening
939		 * socket does not protect us more.
940		 */
941		__sk_add_backlog(child, skb);
942	}
943
944	bh_unlock_sock(child);
945	sock_put(child);
946	return reason;
947}
948EXPORT_SYMBOL(tcp_child_process);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Implementation of the Transmission Control Protocol(TCP).
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 13 *		Florian La Roche, <flla@stud.uni-sb.de>
 14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19 *		Jorge Cwik, <jorge@laser.satlink.net>
 20 */
 21
 22#include <net/tcp.h>
 23#include <net/xfrm.h>
 24#include <net/busy_poll.h>
 
 25
 26static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 27{
 28	if (seq == s_win)
 29		return true;
 30	if (after(end_seq, s_win) && before(seq, e_win))
 31		return true;
 32	return seq == e_win && seq == end_seq;
 33}
 34
 35static enum tcp_tw_status
 36tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
 37				  const struct sk_buff *skb, int mib_idx)
 38{
 39	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 40
 41	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
 42				  &tcptw->tw_last_oow_ack_time)) {
 43		/* Send ACK. Note, we do not put the bucket,
 44		 * it will be released by caller.
 45		 */
 46		return TCP_TW_ACK;
 47	}
 48
 49	/* We are rate-limiting, so just release the tw sock and drop skb. */
 50	inet_twsk_put(tw);
 51	return TCP_TW_SUCCESS;
 52}
 53
 54static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq)
 
 55{
 56#ifdef CONFIG_TCP_AO
 57	struct tcp_ao_info *ao;
 58
 59	ao = rcu_dereference(tcptw->ao_info);
 60	if (unlikely(ao && seq < tcptw->tw_rcv_nxt))
 61		WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
 62#endif
 63	tcptw->tw_rcv_nxt = seq;
 64}
 65
 66/*
 67 * * Main purpose of TIME-WAIT state is to close connection gracefully,
 68 *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
 69 *   (and, probably, tail of data) and one or more our ACKs are lost.
 70 * * What is TIME-WAIT timeout? It is associated with maximal packet
 71 *   lifetime in the internet, which results in wrong conclusion, that
 72 *   it is set to catch "old duplicate segments" wandering out of their path.
 73 *   It is not quite correct. This timeout is calculated so that it exceeds
 74 *   maximal retransmission timeout enough to allow to lose one (or more)
 75 *   segments sent by peer and our ACKs. This time may be calculated from RTO.
 76 * * When TIME-WAIT socket receives RST, it means that another end
 77 *   finally closed and we are allowed to kill TIME-WAIT too.
 78 * * Second purpose of TIME-WAIT is catching old duplicate segments.
 79 *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
 80 *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
 81 * * If we invented some more clever way to catch duplicates
 82 *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
 83 *
 84 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
 85 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
 86 * from the very beginning.
 87 *
 88 * NOTE. With recycling (and later with fin-wait-2) TW bucket
 89 * is _not_ stateless. It means, that strictly speaking we must
 90 * spinlock it. I do not want! Well, probability of misbehaviour
 91 * is ridiculously low and, seems, we could use some mb() tricks
 92 * to avoid misread sequence numbers, states etc.  --ANK
 93 *
 94 * We don't need to initialize tmp_out.sack_ok as we don't use the results
 95 */
 96enum tcp_tw_status
 97tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
 98			   const struct tcphdr *th)
 99{
 
 
100	struct tcp_options_received tmp_opt;
101	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
102	bool paws_reject = false;
 
103
104	tmp_opt.saw_tstamp = 0;
105	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
 
106		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
107
108		if (tmp_opt.saw_tstamp) {
109			if (tmp_opt.rcv_tsecr)
110				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
111			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
112			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
113			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
114		}
115	}
116
117	if (tw->tw_substate == TCP_FIN_WAIT2) {
118		/* Just repeat all the checks of tcp_rcv_state_process() */
119
120		/* Out of window, send ACK */
121		if (paws_reject ||
122		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
123				   tcptw->tw_rcv_nxt,
124				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
125			return tcp_timewait_check_oow_rate_limit(
126				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
127
128		if (th->rst)
129			goto kill;
130
131		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
132			return TCP_TW_RST;
133
134		/* Dup ACK? */
135		if (!th->ack ||
136		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
137		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
138			inet_twsk_put(tw);
139			return TCP_TW_SUCCESS;
140		}
141
142		/* New data or FIN. If new data arrive after half-duplex close,
143		 * reset.
144		 */
145		if (!th->fin ||
146		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
147			return TCP_TW_RST;
148
149		/* FIN arrived, enter true time-wait state. */
150		tw->tw_substate	  = TCP_TIME_WAIT;
151		twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq);
 
152
153		if (tmp_opt.saw_tstamp) {
154			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
155			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
 
 
156		}
157
158		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
159		return TCP_TW_ACK;
160	}
161
162	/*
163	 *	Now real TIME-WAIT state.
164	 *
165	 *	RFC 1122:
166	 *	"When a connection is [...] on TIME-WAIT state [...]
167	 *	[a TCP] MAY accept a new SYN from the remote TCP to
168	 *	reopen the connection directly, if it:
169	 *
170	 *	(1)  assigns its initial sequence number for the new
171	 *	connection to be larger than the largest sequence
172	 *	number it used on the previous connection incarnation,
173	 *	and
174	 *
175	 *	(2)  returns to TIME-WAIT state if the SYN turns out
176	 *	to be an old duplicate".
177	 */
178
179	if (!paws_reject &&
180	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
181	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
182		/* In window segment, it may be only reset or bare ack. */
183
184		if (th->rst) {
185			/* This is TIME_WAIT assassination, in two flavors.
186			 * Oh well... nobody has a sufficient solution to this
187			 * protocol bug yet.
188			 */
189			if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
190kill:
191				inet_twsk_deschedule_put(tw);
192				return TCP_TW_SUCCESS;
193			}
194		} else {
195			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
196		}
197
198		if (tmp_opt.saw_tstamp) {
199			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
200			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
 
 
201		}
202
203		inet_twsk_put(tw);
204		return TCP_TW_SUCCESS;
205	}
206
207	/* Out of window segment.
208
209	   All the segments are ACKed immediately.
210
211	   The only exception is new SYN. We accept it, if it is
212	   not old duplicate and we are not in danger to be killed
213	   by delayed old duplicates. RFC check is that it has
214	   newer sequence number works at rates <40Mbit/sec.
215	   However, if paws works, it is reliable AND even more,
216	   we even may relax silly seq space cutoff.
217
218	   RED-PEN: we violate main RFC requirement, if this SYN will appear
219	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
220	   we must return socket to time-wait state. It is not good,
221	   but not fatal yet.
222	 */
223
224	if (th->syn && !th->rst && !th->ack && !paws_reject &&
225	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
226	     (tmp_opt.saw_tstamp &&
227	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
228		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
229		if (isn == 0)
230			isn++;
231		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
232		return TCP_TW_SYN;
233	}
234
235	if (paws_reject)
236		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
237
238	if (!th->rst) {
239		/* In this case we must reset the TIMEWAIT timer.
240		 *
241		 * If it is ACKless SYN it may be both old duplicate
242		 * and new good SYN with random sequence number <rcv_nxt.
243		 * Do not reschedule in the last case.
244		 */
245		if (paws_reject || th->ack)
246			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
247
248		return tcp_timewait_check_oow_rate_limit(
249			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
250	}
251	inet_twsk_put(tw);
252	return TCP_TW_SUCCESS;
253}
254EXPORT_SYMBOL(tcp_timewait_state_process);
255
256static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
257{
258#ifdef CONFIG_TCP_MD5SIG
259	const struct tcp_sock *tp = tcp_sk(sk);
260	struct tcp_md5sig_key *key;
261
262	/*
263	 * The timewait bucket does not have the key DB from the
264	 * sock structure. We just make a quick copy of the
265	 * md5 key being used (if indeed we are using one)
266	 * so the timewait ack generating code has the key.
267	 */
268	tcptw->tw_md5_key = NULL;
269	if (!static_branch_unlikely(&tcp_md5_needed.key))
270		return;
271
272	key = tp->af_specific->md5_lookup(sk, sk);
273	if (key) {
274		tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
275		if (!tcptw->tw_md5_key)
276			return;
277		if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
278			goto out_free;
279		tcp_md5_add_sigpool();
280	}
281	return;
282out_free:
283	WARN_ON_ONCE(1);
284	kfree(tcptw->tw_md5_key);
285	tcptw->tw_md5_key = NULL;
286#endif
287}
288
289/*
290 * Move a socket to time-wait or dead fin-wait-2 state.
291 */
292void tcp_time_wait(struct sock *sk, int state, int timeo)
293{
294	const struct inet_connection_sock *icsk = inet_csk(sk);
295	struct tcp_sock *tp = tcp_sk(sk);
296	struct net *net = sock_net(sk);
297	struct inet_timewait_sock *tw;
298
299	tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
300
301	if (tw) {
302		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
303		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
304
305		tw->tw_transparent	= inet_test_bit(TRANSPARENT, sk);
306		tw->tw_mark		= sk->sk_mark;
307		tw->tw_priority		= READ_ONCE(sk->sk_priority);
308		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
309		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
310		tcptw->tw_snd_nxt	= tp->snd_nxt;
311		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
312		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
313		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
314		tcptw->tw_ts_offset	= tp->tsoffset;
315		tw->tw_usec_ts		= tp->tcp_usec_ts;
316		tcptw->tw_last_oow_ack_time = 0;
317		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
318		tw->tw_txhash		= sk->sk_txhash;
 
 
 
 
319#if IS_ENABLED(CONFIG_IPV6)
320		if (tw->tw_family == PF_INET6) {
321			struct ipv6_pinfo *np = inet6_sk(sk);
322
323			tw->tw_v6_daddr = sk->sk_v6_daddr;
324			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
325			tw->tw_tclass = np->tclass;
326			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
327			tw->tw_ipv6only = sk->sk_ipv6only;
328		}
329#endif
330
331		tcp_time_wait_init(sk, tcptw);
332		tcp_ao_time_wait(tcptw, tp);
333
334		/* Get the TIME_WAIT timeout firing. */
335		if (timeo < rto)
336			timeo = rto;
337
338		if (state == TCP_TIME_WAIT)
339			timeo = TCP_TIMEWAIT_LEN;
340
341		/* tw_timer is pinned, so we need to make sure BH are disabled
342		 * in following section, otherwise timer handler could run before
343		 * we complete the initialization.
344		 */
345		local_bh_disable();
346		inet_twsk_schedule(tw, timeo);
347		/* Linkage updates.
348		 * Note that access to tw after this point is illegal.
349		 */
350		inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
351		local_bh_enable();
352	} else {
353		/* Sorry, if we're out of memory, just CLOSE this
354		 * socket up.  We've got bigger problems than
355		 * non-graceful socket closings.
356		 */
357		NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
358	}
359
360	tcp_update_metrics(sk);
361	tcp_done(sk);
362}
363EXPORT_SYMBOL(tcp_time_wait);
364
365#ifdef CONFIG_TCP_MD5SIG
366static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
367{
368	struct tcp_md5sig_key *key;
369
370	key = container_of(head, struct tcp_md5sig_key, rcu);
371	kfree(key);
372	static_branch_slow_dec_deferred(&tcp_md5_needed);
373	tcp_md5_release_sigpool();
374}
375#endif
376
377void tcp_twsk_destructor(struct sock *sk)
378{
379#ifdef CONFIG_TCP_MD5SIG
380	if (static_branch_unlikely(&tcp_md5_needed.key)) {
381		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
382
383		if (twsk->tw_md5_key)
384			call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
385	}
386#endif
387	tcp_ao_destroy_sock(sk, true);
388}
389EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
390
391void tcp_twsk_purge(struct list_head *net_exit_list, int family)
392{
393	bool purged_once = false;
394	struct net *net;
395
396	list_for_each_entry(net, net_exit_list, exit_list) {
397		if (net->ipv4.tcp_death_row.hashinfo->pernet) {
398			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
399			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
400		} else if (!purged_once) {
401			/* The last refcount is decremented in tcp_sk_exit_batch() */
402			if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
403				continue;
404
405			inet_twsk_purge(&tcp_hashinfo, family);
406			purged_once = true;
407		}
408	}
409}
410EXPORT_SYMBOL_GPL(tcp_twsk_purge);
411
412/* Warning : This function is called without sk_listener being locked.
413 * Be sure to read socket fields once, as their value could change under us.
414 */
415void tcp_openreq_init_rwin(struct request_sock *req,
416			   const struct sock *sk_listener,
417			   const struct dst_entry *dst)
418{
419	struct inet_request_sock *ireq = inet_rsk(req);
420	const struct tcp_sock *tp = tcp_sk(sk_listener);
421	int full_space = tcp_full_space(sk_listener);
422	u32 window_clamp;
423	__u8 rcv_wscale;
424	u32 rcv_wnd;
425	int mss;
426
427	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
428	window_clamp = READ_ONCE(tp->window_clamp);
429	/* Set this up on the first call only */
430	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
431
432	/* limit the window selection if the user enforce a smaller rx buffer */
433	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
434	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
435		req->rsk_window_clamp = full_space;
436
437	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
438	if (rcv_wnd == 0)
439		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
440	else if (full_space < rcv_wnd * mss)
441		full_space = rcv_wnd * mss;
442
443	/* tcp_full_space because it is guaranteed to be the first packet */
444	tcp_select_initial_window(sk_listener, full_space,
445		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
446		&req->rsk_rcv_wnd,
447		&req->rsk_window_clamp,
448		ireq->wscale_ok,
449		&rcv_wscale,
450		rcv_wnd);
451	ireq->rcv_wscale = rcv_wscale;
452}
453EXPORT_SYMBOL(tcp_openreq_init_rwin);
454
455static void tcp_ecn_openreq_child(struct tcp_sock *tp,
456				  const struct request_sock *req)
457{
458	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
459}
460
461void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
462{
463	struct inet_connection_sock *icsk = inet_csk(sk);
464	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
465	bool ca_got_dst = false;
466
467	if (ca_key != TCP_CA_UNSPEC) {
468		const struct tcp_congestion_ops *ca;
469
470		rcu_read_lock();
471		ca = tcp_ca_find_key(ca_key);
472		if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
473			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
474			icsk->icsk_ca_ops = ca;
475			ca_got_dst = true;
476		}
477		rcu_read_unlock();
478	}
479
480	/* If no valid choice made yet, assign current system default ca. */
481	if (!ca_got_dst &&
482	    (!icsk->icsk_ca_setsockopt ||
483	     !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
484		tcp_assign_congestion_control(sk);
485
486	tcp_set_ca_state(sk, TCP_CA_Open);
487}
488EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
489
490static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
491				    struct request_sock *req,
492				    struct tcp_sock *newtp)
493{
494#if IS_ENABLED(CONFIG_SMC)
495	struct inet_request_sock *ireq;
496
497	if (static_branch_unlikely(&tcp_have_smc)) {
498		ireq = inet_rsk(req);
499		if (oldtp->syn_smc && !ireq->smc_ok)
500			newtp->syn_smc = 0;
501	}
502#endif
503}
504
505/* This is not only more efficient than what we used to do, it eliminates
506 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
507 *
508 * Actually, we could lots of memory writes here. tp of listening
509 * socket contains all necessary default parameters.
510 */
511struct sock *tcp_create_openreq_child(const struct sock *sk,
512				      struct request_sock *req,
513				      struct sk_buff *skb)
514{
515	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
516	const struct inet_request_sock *ireq = inet_rsk(req);
517	struct tcp_request_sock *treq = tcp_rsk(req);
518	struct inet_connection_sock *newicsk;
519	const struct tcp_sock *oldtp;
520	struct tcp_sock *newtp;
521	u32 seq;
522#ifdef CONFIG_TCP_AO
523	struct tcp_ao_key *ao_key;
524#endif
525
526	if (!newsk)
527		return NULL;
528
529	newicsk = inet_csk(newsk);
530	newtp = tcp_sk(newsk);
531	oldtp = tcp_sk(sk);
532
533	smc_check_reset_syn_req(oldtp, req, newtp);
534
535	/* Now setup tcp_sock */
536	newtp->pred_flags = 0;
537
538	seq = treq->rcv_isn + 1;
539	newtp->rcv_wup = seq;
540	WRITE_ONCE(newtp->copied_seq, seq);
541	WRITE_ONCE(newtp->rcv_nxt, seq);
542	newtp->segs_in = 1;
543
544	seq = treq->snt_isn + 1;
545	newtp->snd_sml = newtp->snd_una = seq;
546	WRITE_ONCE(newtp->snd_nxt, seq);
547	newtp->snd_up = seq;
548
549	INIT_LIST_HEAD(&newtp->tsq_node);
550	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
551
552	tcp_init_wl(newtp, treq->rcv_isn);
553
554	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
555	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
556
557	newtp->lsndtime = tcp_jiffies32;
558	newsk->sk_txhash = READ_ONCE(treq->txhash);
559	newtp->total_retrans = req->num_retrans;
560
561	tcp_init_xmit_timers(newsk);
562	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
563
564	if (sock_flag(newsk, SOCK_KEEPOPEN))
565		inet_csk_reset_keepalive_timer(newsk,
566					       keepalive_time_when(newtp));
567
568	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
569	newtp->rx_opt.sack_ok = ireq->sack_ok;
570	newtp->window_clamp = req->rsk_window_clamp;
571	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
572	newtp->rcv_wnd = req->rsk_rcv_wnd;
573	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
574	if (newtp->rx_opt.wscale_ok) {
575		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
576		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
577	} else {
578		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
579		newtp->window_clamp = min(newtp->window_clamp, 65535U);
580	}
581	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
582	newtp->max_window = newtp->snd_wnd;
583
584	if (newtp->rx_opt.tstamp_ok) {
585		newtp->tcp_usec_ts = treq->req_usec_ts;
586		newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
587		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
588		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
589	} else {
590		newtp->tcp_usec_ts = 0;
591		newtp->rx_opt.ts_recent_stamp = 0;
592		newtp->tcp_header_len = sizeof(struct tcphdr);
593	}
594	if (req->num_timeout) {
595		newtp->total_rto = req->num_timeout;
596		newtp->undo_marker = treq->snt_isn;
597		if (newtp->tcp_usec_ts) {
598			newtp->retrans_stamp = treq->snt_synack;
599			newtp->total_rto_time = (u32)(tcp_clock_us() -
600						      newtp->retrans_stamp) / USEC_PER_MSEC;
601		} else {
602			newtp->retrans_stamp = div_u64(treq->snt_synack,
603						       USEC_PER_SEC / TCP_TS_HZ);
604			newtp->total_rto_time = tcp_clock_ms() -
605						newtp->retrans_stamp;
606		}
607		newtp->total_rto_recoveries = 1;
608	}
609	newtp->tsoffset = treq->ts_off;
610#ifdef CONFIG_TCP_MD5SIG
611	newtp->md5sig_info = NULL;	/*XXX*/
612#endif
613#ifdef CONFIG_TCP_AO
614	newtp->ao_info = NULL;
615	ao_key = treq->af_specific->ao_lookup(sk, req,
616				tcp_rsk(req)->ao_keyid, -1);
617	if (ao_key)
618		newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
 
 
 
 
619 #endif
620	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
621		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
622	newtp->rx_opt.mss_clamp = req->mss;
623	tcp_ecn_openreq_child(newtp, req);
624	newtp->fastopen_req = NULL;
625	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
626
627	newtp->bpf_chg_cc_inprogress = 0;
628	tcp_bpf_clone(sk, newsk);
629
630	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
631
 
 
632	return newsk;
633}
634EXPORT_SYMBOL(tcp_create_openreq_child);
635
636/*
637 * Process an incoming packet for SYN_RECV sockets represented as a
638 * request_sock. Normally sk is the listener socket but for TFO it
639 * points to the child socket.
640 *
641 * XXX (TFO) - The current impl contains a special check for ack
642 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
643 *
644 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
645 *
646 * Note: If @fastopen is true, this can be called from process context.
647 *       Otherwise, this is from BH context.
648 */
649
650struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
651			   struct request_sock *req,
652			   bool fastopen, bool *req_stolen)
653{
654	struct tcp_options_received tmp_opt;
655	struct sock *child;
656	const struct tcphdr *th = tcp_hdr(skb);
657	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
658	bool paws_reject = false;
659	bool own_req;
660
661	tmp_opt.saw_tstamp = 0;
662	if (th->doff > (sizeof(struct tcphdr)>>2)) {
663		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
664
665		if (tmp_opt.saw_tstamp) {
666			tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
667			if (tmp_opt.rcv_tsecr)
668				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
669			/* We do not store true stamp, but it is not required,
670			 * it can be estimated (approximately)
671			 * from another data.
672			 */
673			tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
674			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
675		}
676	}
677
678	/* Check for pure retransmitted SYN. */
679	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
680	    flg == TCP_FLAG_SYN &&
681	    !paws_reject) {
682		/*
683		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
684		 * this case on figure 6 and figure 8, but formal
685		 * protocol description says NOTHING.
686		 * To be more exact, it says that we should send ACK,
687		 * because this segment (at least, if it has no data)
688		 * is out of window.
689		 *
690		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
691		 *  describe SYN-RECV state. All the description
692		 *  is wrong, we cannot believe to it and should
693		 *  rely only on common sense and implementation
694		 *  experience.
695		 *
696		 * Enforce "SYN-ACK" according to figure 8, figure 6
697		 * of RFC793, fixed by RFC1122.
698		 *
699		 * Note that even if there is new data in the SYN packet
700		 * they will be thrown away too.
701		 *
702		 * Reset timer after retransmitting SYNACK, similar to
703		 * the idea of fast retransmit in recovery.
704		 */
705		if (!tcp_oow_rate_limited(sock_net(sk), skb,
706					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
707					  &tcp_rsk(req)->last_oow_ack_time) &&
708
709		    !inet_rtx_syn_ack(sk, req)) {
710			unsigned long expires = jiffies;
711
712			expires += reqsk_timeout(req, TCP_RTO_MAX);
713			if (!fastopen)
714				mod_timer_pending(&req->rsk_timer, expires);
715			else
716				req->rsk_timer.expires = expires;
717		}
718		return NULL;
719	}
720
721	/* Further reproduces section "SEGMENT ARRIVES"
722	   for state SYN-RECEIVED of RFC793.
723	   It is broken, however, it does not work only
724	   when SYNs are crossed.
725
726	   You would think that SYN crossing is impossible here, since
727	   we should have a SYN_SENT socket (from connect()) on our end,
728	   but this is not true if the crossed SYNs were sent to both
729	   ends by a malicious third party.  We must defend against this,
730	   and to do that we first verify the ACK (as per RFC793, page
731	   36) and reset if it is invalid.  Is this a true full defense?
732	   To convince ourselves, let us consider a way in which the ACK
733	   test can still pass in this 'malicious crossed SYNs' case.
734	   Malicious sender sends identical SYNs (and thus identical sequence
735	   numbers) to both A and B:
736
737		A: gets SYN, seq=7
738		B: gets SYN, seq=7
739
740	   By our good fortune, both A and B select the same initial
741	   send sequence number of seven :-)
742
743		A: sends SYN|ACK, seq=7, ack_seq=8
744		B: sends SYN|ACK, seq=7, ack_seq=8
745
746	   So we are now A eating this SYN|ACK, ACK test passes.  So
747	   does sequence test, SYN is truncated, and thus we consider
748	   it a bare ACK.
749
750	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
751	   bare ACK.  Otherwise, we create an established connection.  Both
752	   ends (listening sockets) accept the new incoming connection and try
753	   to talk to each other. 8-)
754
755	   Note: This case is both harmless, and rare.  Possibility is about the
756	   same as us discovering intelligent life on another plant tomorrow.
757
758	   But generally, we should (RFC lies!) to accept ACK
759	   from SYNACK both here and in tcp_rcv_state_process().
760	   tcp_rcv_state_process() does not, hence, we do not too.
761
762	   Note that the case is absolutely generic:
763	   we cannot optimize anything here without
764	   violating protocol. All the checks must be made
765	   before attempt to create socket.
766	 */
767
768	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
769	 *                  and the incoming segment acknowledges something not yet
770	 *                  sent (the segment carries an unacceptable ACK) ...
771	 *                  a reset is sent."
772	 *
773	 * Invalid ACK: reset will be sent by listening socket.
774	 * Note that the ACK validity check for a Fast Open socket is done
775	 * elsewhere and is checked directly against the child socket rather
776	 * than req because user data may have been sent out.
777	 */
778	if ((flg & TCP_FLAG_ACK) && !fastopen &&
779	    (TCP_SKB_CB(skb)->ack_seq !=
780	     tcp_rsk(req)->snt_isn + 1))
781		return sk;
782
783	/* Also, it would be not so bad idea to check rcv_tsecr, which
784	 * is essentially ACK extension and too early or too late values
785	 * should cause reset in unsynchronized states.
786	 */
787
788	/* RFC793: "first check sequence number". */
789
790	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
791					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
 
 
 
792		/* Out of window: send ACK and drop. */
793		if (!(flg & TCP_FLAG_RST) &&
794		    !tcp_oow_rate_limited(sock_net(sk), skb,
795					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
796					  &tcp_rsk(req)->last_oow_ack_time))
797			req->rsk_ops->send_ack(sk, skb, req);
798		if (paws_reject)
799			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
800		return NULL;
801	}
802
803	/* In sequence, PAWS is OK. */
804
805	/* TODO: We probably should defer ts_recent change once
806	 * we take ownership of @req.
807	 */
808	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
809		WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
810
811	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
812		/* Truncate SYN, it is out of window starting
813		   at tcp_rsk(req)->rcv_isn + 1. */
814		flg &= ~TCP_FLAG_SYN;
815	}
816
817	/* RFC793: "second check the RST bit" and
818	 *	   "fourth, check the SYN bit"
819	 */
820	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
821		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
822		goto embryonic_reset;
823	}
824
825	/* ACK sequence verified above, just make sure ACK is
826	 * set.  If ACK not set, just silently drop the packet.
827	 *
828	 * XXX (TFO) - if we ever allow "data after SYN", the
829	 * following check needs to be removed.
830	 */
831	if (!(flg & TCP_FLAG_ACK))
832		return NULL;
833
834	/* For Fast Open no more processing is needed (sk is the
835	 * child socket).
836	 */
837	if (fastopen)
838		return sk;
839
840	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
841	if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
842	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
843		inet_rsk(req)->acked = 1;
844		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
845		return NULL;
846	}
847
848	/* OK, ACK is valid, create big socket and
849	 * feed this segment to it. It will repeat all
850	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
851	 * ESTABLISHED STATE. If it will be dropped after
852	 * socket is created, wait for troubles.
853	 */
854	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
855							 req, &own_req);
856	if (!child)
857		goto listen_overflow;
858
 
 
 
 
859	if (own_req && rsk_drop_req(req)) {
860		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
861		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
862		return child;
863	}
864
865	sock_rps_save_rxhash(child, skb);
866	tcp_synack_rtt_meas(child, req);
867	*req_stolen = !own_req;
868	return inet_csk_complete_hashdance(sk, child, req, own_req);
869
870listen_overflow:
871	if (sk != req->rsk_listener)
872		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
873
874	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
875		inet_rsk(req)->acked = 1;
876		return NULL;
877	}
878
879embryonic_reset:
880	if (!(flg & TCP_FLAG_RST)) {
881		/* Received a bad SYN pkt - for TFO We try not to reset
882		 * the local connection unless it's really necessary to
883		 * avoid becoming vulnerable to outside attack aiming at
884		 * resetting legit local connections.
885		 */
886		req->rsk_ops->send_reset(sk, skb);
887	} else if (fastopen) { /* received a valid RST pkt */
888		reqsk_fastopen_remove(sk, req, true);
889		tcp_reset(sk, skb);
890	}
891	if (!fastopen) {
892		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
893
894		if (unlinked)
895			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
896		*req_stolen = !unlinked;
897	}
898	return NULL;
899}
900EXPORT_SYMBOL(tcp_check_req);
901
902/*
903 * Queue segment on the new socket if the new socket is active,
904 * otherwise we just shortcircuit this and continue with
905 * the new socket.
906 *
907 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
908 * when entering. But other states are possible due to a race condition
909 * where after __inet_lookup_established() fails but before the listener
910 * locked is obtained, other packets cause the same connection to
911 * be created.
912 */
913
914int tcp_child_process(struct sock *parent, struct sock *child,
915		      struct sk_buff *skb)
916	__releases(&((child)->sk_lock.slock))
917{
918	int ret = 0;
919	int state = child->sk_state;
920
921	/* record sk_napi_id and sk_rx_queue_mapping of child. */
922	sk_mark_napi_id_set(child, skb);
923
924	tcp_segs_in(tcp_sk(child), skb);
925	if (!sock_owned_by_user(child)) {
926		ret = tcp_rcv_state_process(child, skb);
927		/* Wakeup parent, send SIGIO */
928		if (state == TCP_SYN_RECV && child->sk_state != state)
929			parent->sk_data_ready(parent);
930	} else {
931		/* Alas, it is possible again, because we do lookup
932		 * in main socket hash table and lock on listening
933		 * socket does not protect us more.
934		 */
935		__sk_add_backlog(child, skb);
936	}
937
938	bh_unlock_sock(child);
939	sock_put(child);
940	return ret;
941}
942EXPORT_SYMBOL(tcp_child_process);