Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Implementation of the Transmission Control Protocol(TCP).
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 13 *		Florian La Roche, <flla@stud.uni-sb.de>
 14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19 *		Jorge Cwik, <jorge@laser.satlink.net>
 20 */
 21
 22#include <net/tcp.h>
 23#include <net/xfrm.h>
 24#include <net/busy_poll.h>
 25#include <net/rstreason.h>
 26
 27static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 28{
 29	if (seq == s_win)
 30		return true;
 31	if (after(end_seq, s_win) && before(seq, e_win))
 32		return true;
 33	return seq == e_win && seq == end_seq;
 34}
 35
 36static enum tcp_tw_status
 37tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
 38				  const struct sk_buff *skb, int mib_idx)
 39{
 40	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 41
 42	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
 43				  &tcptw->tw_last_oow_ack_time)) {
 44		/* Send ACK. Note, we do not put the bucket,
 45		 * it will be released by caller.
 46		 */
 47		return TCP_TW_ACK;
 48	}
 49
 50	/* We are rate-limiting, so just release the tw sock and drop skb. */
 51	inet_twsk_put(tw);
 52	return TCP_TW_SUCCESS;
 53}
 54
 55static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq,
 56				u32 rcv_nxt)
 57{
 58#ifdef CONFIG_TCP_AO
 59	struct tcp_ao_info *ao;
 60
 61	ao = rcu_dereference(tcptw->ao_info);
 62	if (unlikely(ao && seq < rcv_nxt))
 63		WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
 64#endif
 65	WRITE_ONCE(tcptw->tw_rcv_nxt, seq);
 66}
 67
 68/*
 69 * * Main purpose of TIME-WAIT state is to close connection gracefully,
 70 *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
 71 *   (and, probably, tail of data) and one or more our ACKs are lost.
 72 * * What is TIME-WAIT timeout? It is associated with maximal packet
 73 *   lifetime in the internet, which results in wrong conclusion, that
 74 *   it is set to catch "old duplicate segments" wandering out of their path.
 75 *   It is not quite correct. This timeout is calculated so that it exceeds
 76 *   maximal retransmission timeout enough to allow to lose one (or more)
 77 *   segments sent by peer and our ACKs. This time may be calculated from RTO.
 78 * * When TIME-WAIT socket receives RST, it means that another end
 79 *   finally closed and we are allowed to kill TIME-WAIT too.
 80 * * Second purpose of TIME-WAIT is catching old duplicate segments.
 81 *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
 82 *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
 83 * * If we invented some more clever way to catch duplicates
 84 *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
 85 *
 86 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
 87 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
 88 * from the very beginning.
 89 *
 90 * NOTE. With recycling (and later with fin-wait-2) TW bucket
 91 * is _not_ stateless. It means, that strictly speaking we must
 92 * spinlock it. I do not want! Well, probability of misbehaviour
 93 * is ridiculously low and, seems, we could use some mb() tricks
 94 * to avoid misread sequence numbers, states etc.  --ANK
 95 *
 96 * We don't need to initialize tmp_out.sack_ok as we don't use the results
 97 */
 98enum tcp_tw_status
 99tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
100			   const struct tcphdr *th, u32 *tw_isn)
101{
102	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
103	u32 rcv_nxt = READ_ONCE(tcptw->tw_rcv_nxt);
104	struct tcp_options_received tmp_opt;
 
105	bool paws_reject = false;
106	int ts_recent_stamp;
107
108	tmp_opt.saw_tstamp = 0;
109	ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp);
110	if (th->doff > (sizeof(*th) >> 2) && ts_recent_stamp) {
111		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
112
113		if (tmp_opt.saw_tstamp) {
114			if (tmp_opt.rcv_tsecr)
115				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
116			tmp_opt.ts_recent	= READ_ONCE(tcptw->tw_ts_recent);
117			tmp_opt.ts_recent_stamp	= ts_recent_stamp;
118			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
119		}
120	}
121
122	if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2) {
123		/* Just repeat all the checks of tcp_rcv_state_process() */
124
125		/* Out of window, send ACK */
126		if (paws_reject ||
127		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
128				   rcv_nxt,
129				   rcv_nxt + tcptw->tw_rcv_wnd))
130			return tcp_timewait_check_oow_rate_limit(
131				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
132
133		if (th->rst)
134			goto kill;
135
136		if (th->syn && !before(TCP_SKB_CB(skb)->seq, rcv_nxt))
137			return TCP_TW_RST;
138
139		/* Dup ACK? */
140		if (!th->ack ||
141		    !after(TCP_SKB_CB(skb)->end_seq, rcv_nxt) ||
142		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
143			inet_twsk_put(tw);
144			return TCP_TW_SUCCESS;
145		}
146
147		/* New data or FIN. If new data arrive after half-duplex close,
148		 * reset.
149		 */
150		if (!th->fin ||
151		    TCP_SKB_CB(skb)->end_seq != rcv_nxt + 1)
152			return TCP_TW_RST;
153
154		/* FIN arrived, enter true time-wait state. */
155		WRITE_ONCE(tw->tw_substate, TCP_TIME_WAIT);
156		twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq,
157				    rcv_nxt);
158
159		if (tmp_opt.saw_tstamp) {
160			WRITE_ONCE(tcptw->tw_ts_recent_stamp,
161				  ktime_get_seconds());
162			WRITE_ONCE(tcptw->tw_ts_recent,
163				   tmp_opt.rcv_tsval);
164		}
165
166		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
167		return TCP_TW_ACK;
168	}
169
170	/*
171	 *	Now real TIME-WAIT state.
172	 *
173	 *	RFC 1122:
174	 *	"When a connection is [...] on TIME-WAIT state [...]
175	 *	[a TCP] MAY accept a new SYN from the remote TCP to
176	 *	reopen the connection directly, if it:
177	 *
178	 *	(1)  assigns its initial sequence number for the new
179	 *	connection to be larger than the largest sequence
180	 *	number it used on the previous connection incarnation,
181	 *	and
182	 *
183	 *	(2)  returns to TIME-WAIT state if the SYN turns out
184	 *	to be an old duplicate".
185	 */
186
187	if (!paws_reject &&
188	    (TCP_SKB_CB(skb)->seq == rcv_nxt &&
189	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
190		/* In window segment, it may be only reset or bare ack. */
191
192		if (th->rst) {
193			/* This is TIME_WAIT assassination, in two flavors.
194			 * Oh well... nobody has a sufficient solution to this
195			 * protocol bug yet.
196			 */
197			if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
198kill:
199				inet_twsk_deschedule_put(tw);
200				return TCP_TW_SUCCESS;
201			}
202		} else {
203			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
204		}
205
206		if (tmp_opt.saw_tstamp) {
207			WRITE_ONCE(tcptw->tw_ts_recent,
208				   tmp_opt.rcv_tsval);
209			WRITE_ONCE(tcptw->tw_ts_recent_stamp,
210				   ktime_get_seconds());
211		}
212
213		inet_twsk_put(tw);
214		return TCP_TW_SUCCESS;
215	}
216
217	/* Out of window segment.
218
219	   All the segments are ACKed immediately.
220
221	   The only exception is new SYN. We accept it, if it is
222	   not old duplicate and we are not in danger to be killed
223	   by delayed old duplicates. RFC check is that it has
224	   newer sequence number works at rates <40Mbit/sec.
225	   However, if paws works, it is reliable AND even more,
226	   we even may relax silly seq space cutoff.
227
228	   RED-PEN: we violate main RFC requirement, if this SYN will appear
229	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
230	   we must return socket to time-wait state. It is not good,
231	   but not fatal yet.
232	 */
233
234	if (th->syn && !th->rst && !th->ack && !paws_reject &&
235	    (after(TCP_SKB_CB(skb)->seq, rcv_nxt) ||
236	     (tmp_opt.saw_tstamp &&
237	      (s32)(READ_ONCE(tcptw->tw_ts_recent) - tmp_opt.rcv_tsval) < 0))) {
238		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
239		if (isn == 0)
240			isn++;
241		*tw_isn = isn;
242		return TCP_TW_SYN;
243	}
244
245	if (paws_reject)
246		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
247
248	if (!th->rst) {
249		/* In this case we must reset the TIMEWAIT timer.
250		 *
251		 * If it is ACKless SYN it may be both old duplicate
252		 * and new good SYN with random sequence number <rcv_nxt.
253		 * Do not reschedule in the last case.
254		 */
255		if (paws_reject || th->ack)
256			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
257
258		return tcp_timewait_check_oow_rate_limit(
259			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
260	}
261	inet_twsk_put(tw);
262	return TCP_TW_SUCCESS;
263}
264EXPORT_SYMBOL(tcp_timewait_state_process);
265
266static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
267{
268#ifdef CONFIG_TCP_MD5SIG
269	const struct tcp_sock *tp = tcp_sk(sk);
270	struct tcp_md5sig_key *key;
271
272	/*
273	 * The timewait bucket does not have the key DB from the
274	 * sock structure. We just make a quick copy of the
275	 * md5 key being used (if indeed we are using one)
276	 * so the timewait ack generating code has the key.
277	 */
278	tcptw->tw_md5_key = NULL;
279	if (!static_branch_unlikely(&tcp_md5_needed.key))
280		return;
281
282	key = tp->af_specific->md5_lookup(sk, sk);
283	if (key) {
284		tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
285		if (!tcptw->tw_md5_key)
286			return;
287		if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
288			goto out_free;
289		tcp_md5_add_sigpool();
290	}
291	return;
292out_free:
293	WARN_ON_ONCE(1);
294	kfree(tcptw->tw_md5_key);
295	tcptw->tw_md5_key = NULL;
296#endif
297}
298
299/*
300 * Move a socket to time-wait or dead fin-wait-2 state.
301 */
302void tcp_time_wait(struct sock *sk, int state, int timeo)
303{
304	const struct inet_connection_sock *icsk = inet_csk(sk);
305	struct tcp_sock *tp = tcp_sk(sk);
306	struct net *net = sock_net(sk);
307	struct inet_timewait_sock *tw;
308
309	tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
310
311	if (tw) {
312		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
313		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
314
315		tw->tw_transparent	= inet_test_bit(TRANSPARENT, sk);
316		tw->tw_mark		= sk->sk_mark;
317		tw->tw_priority		= READ_ONCE(sk->sk_priority);
318		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
319		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
320		tcptw->tw_snd_nxt	= tp->snd_nxt;
321		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
322		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
323		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
324		tcptw->tw_ts_offset	= tp->tsoffset;
325		tw->tw_usec_ts		= tp->tcp_usec_ts;
326		tcptw->tw_last_oow_ack_time = 0;
327		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
328		tw->tw_txhash		= sk->sk_txhash;
329		tw->tw_tx_queue_mapping = sk->sk_tx_queue_mapping;
330#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
331		tw->tw_rx_queue_mapping = sk->sk_rx_queue_mapping;
332#endif
333#if IS_ENABLED(CONFIG_IPV6)
334		if (tw->tw_family == PF_INET6) {
335			struct ipv6_pinfo *np = inet6_sk(sk);
336
337			tw->tw_v6_daddr = sk->sk_v6_daddr;
338			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
339			tw->tw_tclass = np->tclass;
340			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
341			tw->tw_ipv6only = sk->sk_ipv6only;
342		}
343#endif
344
345		tcp_time_wait_init(sk, tcptw);
346		tcp_ao_time_wait(tcptw, tp);
347
348		/* Get the TIME_WAIT timeout firing. */
349		if (timeo < rto)
350			timeo = rto;
351
352		if (state == TCP_TIME_WAIT)
353			timeo = TCP_TIMEWAIT_LEN;
354
 
 
 
 
 
 
355		/* Linkage updates.
356		 * Note that access to tw after this point is illegal.
357		 */
358		inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo);
 
359	} else {
360		/* Sorry, if we're out of memory, just CLOSE this
361		 * socket up.  We've got bigger problems than
362		 * non-graceful socket closings.
363		 */
364		NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
365	}
366
367	tcp_update_metrics(sk);
368	tcp_done(sk);
369}
370EXPORT_SYMBOL(tcp_time_wait);
371
372#ifdef CONFIG_TCP_MD5SIG
373static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
374{
375	struct tcp_md5sig_key *key;
376
377	key = container_of(head, struct tcp_md5sig_key, rcu);
378	kfree(key);
379	static_branch_slow_dec_deferred(&tcp_md5_needed);
380	tcp_md5_release_sigpool();
381}
382#endif
383
384void tcp_twsk_destructor(struct sock *sk)
385{
386#ifdef CONFIG_TCP_MD5SIG
387	if (static_branch_unlikely(&tcp_md5_needed.key)) {
388		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
389
390		if (twsk->tw_md5_key)
391			call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
392	}
393#endif
394	tcp_ao_destroy_sock(sk, true);
395}
396EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
397
398void tcp_twsk_purge(struct list_head *net_exit_list)
399{
400	bool purged_once = false;
401	struct net *net;
402
403	list_for_each_entry(net, net_exit_list, exit_list) {
404		if (net->ipv4.tcp_death_row.hashinfo->pernet) {
405			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
406			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo);
407		} else if (!purged_once) {
408			inet_twsk_purge(&tcp_hashinfo);
409			purged_once = true;
410		}
411	}
412}
 
413
414/* Warning : This function is called without sk_listener being locked.
415 * Be sure to read socket fields once, as their value could change under us.
416 */
417void tcp_openreq_init_rwin(struct request_sock *req,
418			   const struct sock *sk_listener,
419			   const struct dst_entry *dst)
420{
421	struct inet_request_sock *ireq = inet_rsk(req);
422	const struct tcp_sock *tp = tcp_sk(sk_listener);
423	int full_space = tcp_full_space(sk_listener);
424	u32 window_clamp;
425	__u8 rcv_wscale;
426	u32 rcv_wnd;
427	int mss;
428
429	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
430	window_clamp = READ_ONCE(tp->window_clamp);
431	/* Set this up on the first call only */
432	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
433
434	/* limit the window selection if the user enforce a smaller rx buffer */
435	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
436	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
437		req->rsk_window_clamp = full_space;
438
439	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
440	if (rcv_wnd == 0)
441		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
442	else if (full_space < rcv_wnd * mss)
443		full_space = rcv_wnd * mss;
444
445	/* tcp_full_space because it is guaranteed to be the first packet */
446	tcp_select_initial_window(sk_listener, full_space,
447		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
448		&req->rsk_rcv_wnd,
449		&req->rsk_window_clamp,
450		ireq->wscale_ok,
451		&rcv_wscale,
452		rcv_wnd);
453	ireq->rcv_wscale = rcv_wscale;
454}
455EXPORT_SYMBOL(tcp_openreq_init_rwin);
456
457static void tcp_ecn_openreq_child(struct tcp_sock *tp,
458				  const struct request_sock *req)
459{
460	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
461}
462
463void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
464{
465	struct inet_connection_sock *icsk = inet_csk(sk);
466	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
467	bool ca_got_dst = false;
468
469	if (ca_key != TCP_CA_UNSPEC) {
470		const struct tcp_congestion_ops *ca;
471
472		rcu_read_lock();
473		ca = tcp_ca_find_key(ca_key);
474		if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
475			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
476			icsk->icsk_ca_ops = ca;
477			ca_got_dst = true;
478		}
479		rcu_read_unlock();
480	}
481
482	/* If no valid choice made yet, assign current system default ca. */
483	if (!ca_got_dst &&
484	    (!icsk->icsk_ca_setsockopt ||
485	     !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
486		tcp_assign_congestion_control(sk);
487
488	tcp_set_ca_state(sk, TCP_CA_Open);
489}
490EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
491
492static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
493				    struct request_sock *req,
494				    struct tcp_sock *newtp)
495{
496#if IS_ENABLED(CONFIG_SMC)
497	struct inet_request_sock *ireq;
498
499	if (static_branch_unlikely(&tcp_have_smc)) {
500		ireq = inet_rsk(req);
501		if (oldtp->syn_smc && !ireq->smc_ok)
502			newtp->syn_smc = 0;
503	}
504#endif
505}
506
507/* This is not only more efficient than what we used to do, it eliminates
508 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
509 *
510 * Actually, we could lots of memory writes here. tp of listening
511 * socket contains all necessary default parameters.
512 */
513struct sock *tcp_create_openreq_child(const struct sock *sk,
514				      struct request_sock *req,
515				      struct sk_buff *skb)
516{
517	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
518	const struct inet_request_sock *ireq = inet_rsk(req);
519	struct tcp_request_sock *treq = tcp_rsk(req);
520	struct inet_connection_sock *newicsk;
521	const struct tcp_sock *oldtp;
522	struct tcp_sock *newtp;
523	u32 seq;
 
 
 
524
525	if (!newsk)
526		return NULL;
527
528	newicsk = inet_csk(newsk);
529	newtp = tcp_sk(newsk);
530	oldtp = tcp_sk(sk);
531
532	smc_check_reset_syn_req(oldtp, req, newtp);
533
534	/* Now setup tcp_sock */
535	newtp->pred_flags = 0;
536
537	seq = treq->rcv_isn + 1;
538	newtp->rcv_wup = seq;
539	WRITE_ONCE(newtp->copied_seq, seq);
540	WRITE_ONCE(newtp->rcv_nxt, seq);
541	newtp->segs_in = 1;
542
543	seq = treq->snt_isn + 1;
544	newtp->snd_sml = newtp->snd_una = seq;
545	WRITE_ONCE(newtp->snd_nxt, seq);
546	newtp->snd_up = seq;
547
548	INIT_LIST_HEAD(&newtp->tsq_node);
549	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
550
551	tcp_init_wl(newtp, treq->rcv_isn);
552
553	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
554	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
555
556	newtp->lsndtime = tcp_jiffies32;
557	newsk->sk_txhash = READ_ONCE(treq->txhash);
558	newtp->total_retrans = req->num_retrans;
559
560	tcp_init_xmit_timers(newsk);
561	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
562
563	if (sock_flag(newsk, SOCK_KEEPOPEN))
564		inet_csk_reset_keepalive_timer(newsk,
565					       keepalive_time_when(newtp));
566
567	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
568	newtp->rx_opt.sack_ok = ireq->sack_ok;
569	newtp->window_clamp = req->rsk_window_clamp;
570	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
571	newtp->rcv_wnd = req->rsk_rcv_wnd;
572	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
573	if (newtp->rx_opt.wscale_ok) {
574		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
575		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
576	} else {
577		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
578		newtp->window_clamp = min(newtp->window_clamp, 65535U);
579	}
580	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
581	newtp->max_window = newtp->snd_wnd;
582
583	if (newtp->rx_opt.tstamp_ok) {
584		newtp->tcp_usec_ts = treq->req_usec_ts;
585		newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
586		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
587		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
588	} else {
589		newtp->tcp_usec_ts = 0;
590		newtp->rx_opt.ts_recent_stamp = 0;
591		newtp->tcp_header_len = sizeof(struct tcphdr);
592	}
593	if (req->num_timeout) {
594		newtp->total_rto = req->num_timeout;
595		newtp->undo_marker = treq->snt_isn;
596		if (newtp->tcp_usec_ts) {
597			newtp->retrans_stamp = treq->snt_synack;
598			newtp->total_rto_time = (u32)(tcp_clock_us() -
599						      newtp->retrans_stamp) / USEC_PER_MSEC;
600		} else {
601			newtp->retrans_stamp = div_u64(treq->snt_synack,
602						       USEC_PER_SEC / TCP_TS_HZ);
603			newtp->total_rto_time = tcp_clock_ms() -
604						newtp->retrans_stamp;
605		}
606		newtp->total_rto_recoveries = 1;
607	}
608	newtp->tsoffset = treq->ts_off;
609#ifdef CONFIG_TCP_MD5SIG
610	newtp->md5sig_info = NULL;	/*XXX*/
611#endif
612#ifdef CONFIG_TCP_AO
613	newtp->ao_info = NULL;
614
615	if (tcp_rsk_used_ao(req)) {
616		struct tcp_ao_key *ao_key;
617
618		ao_key = treq->af_specific->ao_lookup(sk, req, tcp_rsk(req)->ao_keyid, -1);
619		if (ao_key)
620			newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
621	}
622 #endif
623	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
624		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
625	newtp->rx_opt.mss_clamp = req->mss;
626	tcp_ecn_openreq_child(newtp, req);
627	newtp->fastopen_req = NULL;
628	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
629
630	newtp->bpf_chg_cc_inprogress = 0;
631	tcp_bpf_clone(sk, newsk);
632
633	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
634
635	xa_init_flags(&newsk->sk_user_frags, XA_FLAGS_ALLOC1);
636
637	return newsk;
638}
639EXPORT_SYMBOL(tcp_create_openreq_child);
640
641/*
642 * Process an incoming packet for SYN_RECV sockets represented as a
643 * request_sock. Normally sk is the listener socket but for TFO it
644 * points to the child socket.
645 *
646 * XXX (TFO) - The current impl contains a special check for ack
647 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
648 *
649 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
650 *
651 * Note: If @fastopen is true, this can be called from process context.
652 *       Otherwise, this is from BH context.
653 */
654
655struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
656			   struct request_sock *req,
657			   bool fastopen, bool *req_stolen)
658{
659	struct tcp_options_received tmp_opt;
660	struct sock *child;
661	const struct tcphdr *th = tcp_hdr(skb);
662	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
663	bool paws_reject = false;
664	bool own_req;
665
666	tmp_opt.saw_tstamp = 0;
667	if (th->doff > (sizeof(struct tcphdr)>>2)) {
668		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
669
670		if (tmp_opt.saw_tstamp) {
671			tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
672			if (tmp_opt.rcv_tsecr)
673				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
674			/* We do not store true stamp, but it is not required,
675			 * it can be estimated (approximately)
676			 * from another data.
677			 */
678			tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
679			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
680		}
681	}
682
683	/* Check for pure retransmitted SYN. */
684	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
685	    flg == TCP_FLAG_SYN &&
686	    !paws_reject) {
687		/*
688		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
689		 * this case on figure 6 and figure 8, but formal
690		 * protocol description says NOTHING.
691		 * To be more exact, it says that we should send ACK,
692		 * because this segment (at least, if it has no data)
693		 * is out of window.
694		 *
695		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
696		 *  describe SYN-RECV state. All the description
697		 *  is wrong, we cannot believe to it and should
698		 *  rely only on common sense and implementation
699		 *  experience.
700		 *
701		 * Enforce "SYN-ACK" according to figure 8, figure 6
702		 * of RFC793, fixed by RFC1122.
703		 *
704		 * Note that even if there is new data in the SYN packet
705		 * they will be thrown away too.
706		 *
707		 * Reset timer after retransmitting SYNACK, similar to
708		 * the idea of fast retransmit in recovery.
709		 */
710		if (!tcp_oow_rate_limited(sock_net(sk), skb,
711					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
712					  &tcp_rsk(req)->last_oow_ack_time) &&
713
714		    !inet_rtx_syn_ack(sk, req)) {
715			unsigned long expires = jiffies;
716
717			expires += reqsk_timeout(req, TCP_RTO_MAX);
718			if (!fastopen)
719				mod_timer_pending(&req->rsk_timer, expires);
720			else
721				req->rsk_timer.expires = expires;
722		}
723		return NULL;
724	}
725
726	/* Further reproduces section "SEGMENT ARRIVES"
727	   for state SYN-RECEIVED of RFC793.
728	   It is broken, however, it does not work only
729	   when SYNs are crossed.
730
731	   You would think that SYN crossing is impossible here, since
732	   we should have a SYN_SENT socket (from connect()) on our end,
733	   but this is not true if the crossed SYNs were sent to both
734	   ends by a malicious third party.  We must defend against this,
735	   and to do that we first verify the ACK (as per RFC793, page
736	   36) and reset if it is invalid.  Is this a true full defense?
737	   To convince ourselves, let us consider a way in which the ACK
738	   test can still pass in this 'malicious crossed SYNs' case.
739	   Malicious sender sends identical SYNs (and thus identical sequence
740	   numbers) to both A and B:
741
742		A: gets SYN, seq=7
743		B: gets SYN, seq=7
744
745	   By our good fortune, both A and B select the same initial
746	   send sequence number of seven :-)
747
748		A: sends SYN|ACK, seq=7, ack_seq=8
749		B: sends SYN|ACK, seq=7, ack_seq=8
750
751	   So we are now A eating this SYN|ACK, ACK test passes.  So
752	   does sequence test, SYN is truncated, and thus we consider
753	   it a bare ACK.
754
755	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
756	   bare ACK.  Otherwise, we create an established connection.  Both
757	   ends (listening sockets) accept the new incoming connection and try
758	   to talk to each other. 8-)
759
760	   Note: This case is both harmless, and rare.  Possibility is about the
761	   same as us discovering intelligent life on another plant tomorrow.
762
763	   But generally, we should (RFC lies!) to accept ACK
764	   from SYNACK both here and in tcp_rcv_state_process().
765	   tcp_rcv_state_process() does not, hence, we do not too.
766
767	   Note that the case is absolutely generic:
768	   we cannot optimize anything here without
769	   violating protocol. All the checks must be made
770	   before attempt to create socket.
771	 */
772
773	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
774	 *                  and the incoming segment acknowledges something not yet
775	 *                  sent (the segment carries an unacceptable ACK) ...
776	 *                  a reset is sent."
777	 *
778	 * Invalid ACK: reset will be sent by listening socket.
779	 * Note that the ACK validity check for a Fast Open socket is done
780	 * elsewhere and is checked directly against the child socket rather
781	 * than req because user data may have been sent out.
782	 */
783	if ((flg & TCP_FLAG_ACK) && !fastopen &&
784	    (TCP_SKB_CB(skb)->ack_seq !=
785	     tcp_rsk(req)->snt_isn + 1))
786		return sk;
787
788	/* Also, it would be not so bad idea to check rcv_tsecr, which
789	 * is essentially ACK extension and too early or too late values
790	 * should cause reset in unsynchronized states.
791	 */
792
793	/* RFC793: "first check sequence number". */
794
795	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq,
796					  TCP_SKB_CB(skb)->end_seq,
797					  tcp_rsk(req)->rcv_nxt,
798					  tcp_rsk(req)->rcv_nxt +
799					  tcp_synack_window(req))) {
800		/* Out of window: send ACK and drop. */
801		if (!(flg & TCP_FLAG_RST) &&
802		    !tcp_oow_rate_limited(sock_net(sk), skb,
803					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
804					  &tcp_rsk(req)->last_oow_ack_time))
805			req->rsk_ops->send_ack(sk, skb, req);
806		if (paws_reject)
807			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
808		return NULL;
809	}
810
811	/* In sequence, PAWS is OK. */
812
 
 
 
 
 
 
813	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
814		/* Truncate SYN, it is out of window starting
815		   at tcp_rsk(req)->rcv_isn + 1. */
816		flg &= ~TCP_FLAG_SYN;
817	}
818
819	/* RFC793: "second check the RST bit" and
820	 *	   "fourth, check the SYN bit"
821	 */
822	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
823		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
824		goto embryonic_reset;
825	}
826
827	/* ACK sequence verified above, just make sure ACK is
828	 * set.  If ACK not set, just silently drop the packet.
829	 *
830	 * XXX (TFO) - if we ever allow "data after SYN", the
831	 * following check needs to be removed.
832	 */
833	if (!(flg & TCP_FLAG_ACK))
834		return NULL;
835
836	/* For Fast Open no more processing is needed (sk is the
837	 * child socket).
838	 */
839	if (fastopen)
840		return sk;
841
842	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
843	if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
844	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
845		inet_rsk(req)->acked = 1;
846		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
847		return NULL;
848	}
849
850	/* OK, ACK is valid, create big socket and
851	 * feed this segment to it. It will repeat all
852	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
853	 * ESTABLISHED STATE. If it will be dropped after
854	 * socket is created, wait for troubles.
855	 */
856	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
857							 req, &own_req);
858	if (!child)
859		goto listen_overflow;
860
861	if (own_req && tmp_opt.saw_tstamp &&
862	    !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
863		tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval;
864
865	if (own_req && rsk_drop_req(req)) {
866		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
867		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
868		return child;
869	}
870
871	sock_rps_save_rxhash(child, skb);
872	tcp_synack_rtt_meas(child, req);
873	*req_stolen = !own_req;
874	return inet_csk_complete_hashdance(sk, child, req, own_req);
875
876listen_overflow:
877	if (sk != req->rsk_listener)
878		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
879
880	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
881		inet_rsk(req)->acked = 1;
882		return NULL;
883	}
884
885embryonic_reset:
886	if (!(flg & TCP_FLAG_RST)) {
887		/* Received a bad SYN pkt - for TFO We try not to reset
888		 * the local connection unless it's really necessary to
889		 * avoid becoming vulnerable to outside attack aiming at
890		 * resetting legit local connections.
891		 */
892		req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_INVALID_SYN);
893	} else if (fastopen) { /* received a valid RST pkt */
894		reqsk_fastopen_remove(sk, req, true);
895		tcp_reset(sk, skb);
896	}
897	if (!fastopen) {
898		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
899
900		if (unlinked)
901			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
902		*req_stolen = !unlinked;
903	}
904	return NULL;
905}
906EXPORT_SYMBOL(tcp_check_req);
907
908/*
909 * Queue segment on the new socket if the new socket is active,
910 * otherwise we just shortcircuit this and continue with
911 * the new socket.
912 *
913 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
914 * when entering. But other states are possible due to a race condition
915 * where after __inet_lookup_established() fails but before the listener
916 * locked is obtained, other packets cause the same connection to
917 * be created.
918 */
919
920enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
921				       struct sk_buff *skb)
922	__releases(&((child)->sk_lock.slock))
923{
924	enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
925	int state = child->sk_state;
926
927	/* record sk_napi_id and sk_rx_queue_mapping of child. */
928	sk_mark_napi_id_set(child, skb);
929
930	tcp_segs_in(tcp_sk(child), skb);
931	if (!sock_owned_by_user(child)) {
932		reason = tcp_rcv_state_process(child, skb);
933		/* Wakeup parent, send SIGIO */
934		if (state == TCP_SYN_RECV && child->sk_state != state)
935			parent->sk_data_ready(parent);
936	} else {
937		/* Alas, it is possible again, because we do lookup
938		 * in main socket hash table and lock on listening
939		 * socket does not protect us more.
940		 */
941		__sk_add_backlog(child, skb);
942	}
943
944	bh_unlock_sock(child);
945	sock_put(child);
946	return reason;
947}
948EXPORT_SYMBOL(tcp_child_process);
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Implementation of the Transmission Control Protocol(TCP).
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 13 *		Florian La Roche, <flla@stud.uni-sb.de>
 14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19 *		Jorge Cwik, <jorge@laser.satlink.net>
 20 */
 21
 22#include <net/tcp.h>
 23#include <net/xfrm.h>
 24#include <net/busy_poll.h>
 
 25
 26static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 27{
 28	if (seq == s_win)
 29		return true;
 30	if (after(end_seq, s_win) && before(seq, e_win))
 31		return true;
 32	return seq == e_win && seq == end_seq;
 33}
 34
 35static enum tcp_tw_status
 36tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
 37				  const struct sk_buff *skb, int mib_idx)
 38{
 39	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 40
 41	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
 42				  &tcptw->tw_last_oow_ack_time)) {
 43		/* Send ACK. Note, we do not put the bucket,
 44		 * it will be released by caller.
 45		 */
 46		return TCP_TW_ACK;
 47	}
 48
 49	/* We are rate-limiting, so just release the tw sock and drop skb. */
 50	inet_twsk_put(tw);
 51	return TCP_TW_SUCCESS;
 52}
 53
 54static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq)
 
 55{
 56#ifdef CONFIG_TCP_AO
 57	struct tcp_ao_info *ao;
 58
 59	ao = rcu_dereference(tcptw->ao_info);
 60	if (unlikely(ao && seq < tcptw->tw_rcv_nxt))
 61		WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
 62#endif
 63	tcptw->tw_rcv_nxt = seq;
 64}
 65
 66/*
 67 * * Main purpose of TIME-WAIT state is to close connection gracefully,
 68 *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
 69 *   (and, probably, tail of data) and one or more our ACKs are lost.
 70 * * What is TIME-WAIT timeout? It is associated with maximal packet
 71 *   lifetime in the internet, which results in wrong conclusion, that
 72 *   it is set to catch "old duplicate segments" wandering out of their path.
 73 *   It is not quite correct. This timeout is calculated so that it exceeds
 74 *   maximal retransmission timeout enough to allow to lose one (or more)
 75 *   segments sent by peer and our ACKs. This time may be calculated from RTO.
 76 * * When TIME-WAIT socket receives RST, it means that another end
 77 *   finally closed and we are allowed to kill TIME-WAIT too.
 78 * * Second purpose of TIME-WAIT is catching old duplicate segments.
 79 *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
 80 *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
 81 * * If we invented some more clever way to catch duplicates
 82 *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
 83 *
 84 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
 85 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
 86 * from the very beginning.
 87 *
 88 * NOTE. With recycling (and later with fin-wait-2) TW bucket
 89 * is _not_ stateless. It means, that strictly speaking we must
 90 * spinlock it. I do not want! Well, probability of misbehaviour
 91 * is ridiculously low and, seems, we could use some mb() tricks
 92 * to avoid misread sequence numbers, states etc.  --ANK
 93 *
 94 * We don't need to initialize tmp_out.sack_ok as we don't use the results
 95 */
 96enum tcp_tw_status
 97tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
 98			   const struct tcphdr *th)
 99{
 
 
100	struct tcp_options_received tmp_opt;
101	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
102	bool paws_reject = false;
 
103
104	tmp_opt.saw_tstamp = 0;
105	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
 
106		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
107
108		if (tmp_opt.saw_tstamp) {
109			if (tmp_opt.rcv_tsecr)
110				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
111			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
112			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
113			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
114		}
115	}
116
117	if (tw->tw_substate == TCP_FIN_WAIT2) {
118		/* Just repeat all the checks of tcp_rcv_state_process() */
119
120		/* Out of window, send ACK */
121		if (paws_reject ||
122		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
123				   tcptw->tw_rcv_nxt,
124				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
125			return tcp_timewait_check_oow_rate_limit(
126				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
127
128		if (th->rst)
129			goto kill;
130
131		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
132			return TCP_TW_RST;
133
134		/* Dup ACK? */
135		if (!th->ack ||
136		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
137		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
138			inet_twsk_put(tw);
139			return TCP_TW_SUCCESS;
140		}
141
142		/* New data or FIN. If new data arrive after half-duplex close,
143		 * reset.
144		 */
145		if (!th->fin ||
146		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
147			return TCP_TW_RST;
148
149		/* FIN arrived, enter true time-wait state. */
150		tw->tw_substate	  = TCP_TIME_WAIT;
151		twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq);
 
152
153		if (tmp_opt.saw_tstamp) {
154			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
155			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
 
 
156		}
157
158		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
159		return TCP_TW_ACK;
160	}
161
162	/*
163	 *	Now real TIME-WAIT state.
164	 *
165	 *	RFC 1122:
166	 *	"When a connection is [...] on TIME-WAIT state [...]
167	 *	[a TCP] MAY accept a new SYN from the remote TCP to
168	 *	reopen the connection directly, if it:
169	 *
170	 *	(1)  assigns its initial sequence number for the new
171	 *	connection to be larger than the largest sequence
172	 *	number it used on the previous connection incarnation,
173	 *	and
174	 *
175	 *	(2)  returns to TIME-WAIT state if the SYN turns out
176	 *	to be an old duplicate".
177	 */
178
179	if (!paws_reject &&
180	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
181	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
182		/* In window segment, it may be only reset or bare ack. */
183
184		if (th->rst) {
185			/* This is TIME_WAIT assassination, in two flavors.
186			 * Oh well... nobody has a sufficient solution to this
187			 * protocol bug yet.
188			 */
189			if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
190kill:
191				inet_twsk_deschedule_put(tw);
192				return TCP_TW_SUCCESS;
193			}
194		} else {
195			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
196		}
197
198		if (tmp_opt.saw_tstamp) {
199			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
200			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
 
 
201		}
202
203		inet_twsk_put(tw);
204		return TCP_TW_SUCCESS;
205	}
206
207	/* Out of window segment.
208
209	   All the segments are ACKed immediately.
210
211	   The only exception is new SYN. We accept it, if it is
212	   not old duplicate and we are not in danger to be killed
213	   by delayed old duplicates. RFC check is that it has
214	   newer sequence number works at rates <40Mbit/sec.
215	   However, if paws works, it is reliable AND even more,
216	   we even may relax silly seq space cutoff.
217
218	   RED-PEN: we violate main RFC requirement, if this SYN will appear
219	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
220	   we must return socket to time-wait state. It is not good,
221	   but not fatal yet.
222	 */
223
224	if (th->syn && !th->rst && !th->ack && !paws_reject &&
225	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
226	     (tmp_opt.saw_tstamp &&
227	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
228		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
229		if (isn == 0)
230			isn++;
231		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
232		return TCP_TW_SYN;
233	}
234
235	if (paws_reject)
236		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
237
238	if (!th->rst) {
239		/* In this case we must reset the TIMEWAIT timer.
240		 *
241		 * If it is ACKless SYN it may be both old duplicate
242		 * and new good SYN with random sequence number <rcv_nxt.
243		 * Do not reschedule in the last case.
244		 */
245		if (paws_reject || th->ack)
246			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
247
248		return tcp_timewait_check_oow_rate_limit(
249			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
250	}
251	inet_twsk_put(tw);
252	return TCP_TW_SUCCESS;
253}
254EXPORT_SYMBOL(tcp_timewait_state_process);
255
256static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
257{
258#ifdef CONFIG_TCP_MD5SIG
259	const struct tcp_sock *tp = tcp_sk(sk);
260	struct tcp_md5sig_key *key;
261
262	/*
263	 * The timewait bucket does not have the key DB from the
264	 * sock structure. We just make a quick copy of the
265	 * md5 key being used (if indeed we are using one)
266	 * so the timewait ack generating code has the key.
267	 */
268	tcptw->tw_md5_key = NULL;
269	if (!static_branch_unlikely(&tcp_md5_needed.key))
270		return;
271
272	key = tp->af_specific->md5_lookup(sk, sk);
273	if (key) {
274		tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
275		if (!tcptw->tw_md5_key)
276			return;
277		if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
278			goto out_free;
279		tcp_md5_add_sigpool();
280	}
281	return;
282out_free:
283	WARN_ON_ONCE(1);
284	kfree(tcptw->tw_md5_key);
285	tcptw->tw_md5_key = NULL;
286#endif
287}
288
289/*
290 * Move a socket to time-wait or dead fin-wait-2 state.
291 */
292void tcp_time_wait(struct sock *sk, int state, int timeo)
293{
294	const struct inet_connection_sock *icsk = inet_csk(sk);
295	struct tcp_sock *tp = tcp_sk(sk);
296	struct net *net = sock_net(sk);
297	struct inet_timewait_sock *tw;
298
299	tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
300
301	if (tw) {
302		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
303		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
304
305		tw->tw_transparent	= inet_test_bit(TRANSPARENT, sk);
306		tw->tw_mark		= sk->sk_mark;
307		tw->tw_priority		= READ_ONCE(sk->sk_priority);
308		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
309		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
310		tcptw->tw_snd_nxt	= tp->snd_nxt;
311		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
312		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
313		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
314		tcptw->tw_ts_offset	= tp->tsoffset;
315		tw->tw_usec_ts		= tp->tcp_usec_ts;
316		tcptw->tw_last_oow_ack_time = 0;
317		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
318		tw->tw_txhash		= sk->sk_txhash;
 
 
 
 
319#if IS_ENABLED(CONFIG_IPV6)
320		if (tw->tw_family == PF_INET6) {
321			struct ipv6_pinfo *np = inet6_sk(sk);
322
323			tw->tw_v6_daddr = sk->sk_v6_daddr;
324			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
325			tw->tw_tclass = np->tclass;
326			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
327			tw->tw_ipv6only = sk->sk_ipv6only;
328		}
329#endif
330
331		tcp_time_wait_init(sk, tcptw);
332		tcp_ao_time_wait(tcptw, tp);
333
334		/* Get the TIME_WAIT timeout firing. */
335		if (timeo < rto)
336			timeo = rto;
337
338		if (state == TCP_TIME_WAIT)
339			timeo = TCP_TIMEWAIT_LEN;
340
341		/* tw_timer is pinned, so we need to make sure BH are disabled
342		 * in following section, otherwise timer handler could run before
343		 * we complete the initialization.
344		 */
345		local_bh_disable();
346		inet_twsk_schedule(tw, timeo);
347		/* Linkage updates.
348		 * Note that access to tw after this point is illegal.
349		 */
350		inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
351		local_bh_enable();
352	} else {
353		/* Sorry, if we're out of memory, just CLOSE this
354		 * socket up.  We've got bigger problems than
355		 * non-graceful socket closings.
356		 */
357		NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
358	}
359
360	tcp_update_metrics(sk);
361	tcp_done(sk);
362}
363EXPORT_SYMBOL(tcp_time_wait);
364
365#ifdef CONFIG_TCP_MD5SIG
366static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
367{
368	struct tcp_md5sig_key *key;
369
370	key = container_of(head, struct tcp_md5sig_key, rcu);
371	kfree(key);
372	static_branch_slow_dec_deferred(&tcp_md5_needed);
373	tcp_md5_release_sigpool();
374}
375#endif
376
377void tcp_twsk_destructor(struct sock *sk)
378{
379#ifdef CONFIG_TCP_MD5SIG
380	if (static_branch_unlikely(&tcp_md5_needed.key)) {
381		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
382
383		if (twsk->tw_md5_key)
384			call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
385	}
386#endif
387	tcp_ao_destroy_sock(sk, true);
388}
389EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
390
391void tcp_twsk_purge(struct list_head *net_exit_list, int family)
392{
393	bool purged_once = false;
394	struct net *net;
395
396	list_for_each_entry(net, net_exit_list, exit_list) {
397		if (net->ipv4.tcp_death_row.hashinfo->pernet) {
398			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
399			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
400		} else if (!purged_once) {
401			inet_twsk_purge(&tcp_hashinfo, family);
402			purged_once = true;
403		}
404	}
405}
406EXPORT_SYMBOL_GPL(tcp_twsk_purge);
407
408/* Warning : This function is called without sk_listener being locked.
409 * Be sure to read socket fields once, as their value could change under us.
410 */
411void tcp_openreq_init_rwin(struct request_sock *req,
412			   const struct sock *sk_listener,
413			   const struct dst_entry *dst)
414{
415	struct inet_request_sock *ireq = inet_rsk(req);
416	const struct tcp_sock *tp = tcp_sk(sk_listener);
417	int full_space = tcp_full_space(sk_listener);
418	u32 window_clamp;
419	__u8 rcv_wscale;
420	u32 rcv_wnd;
421	int mss;
422
423	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
424	window_clamp = READ_ONCE(tp->window_clamp);
425	/* Set this up on the first call only */
426	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
427
428	/* limit the window selection if the user enforce a smaller rx buffer */
429	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
430	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
431		req->rsk_window_clamp = full_space;
432
433	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
434	if (rcv_wnd == 0)
435		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
436	else if (full_space < rcv_wnd * mss)
437		full_space = rcv_wnd * mss;
438
439	/* tcp_full_space because it is guaranteed to be the first packet */
440	tcp_select_initial_window(sk_listener, full_space,
441		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
442		&req->rsk_rcv_wnd,
443		&req->rsk_window_clamp,
444		ireq->wscale_ok,
445		&rcv_wscale,
446		rcv_wnd);
447	ireq->rcv_wscale = rcv_wscale;
448}
449EXPORT_SYMBOL(tcp_openreq_init_rwin);
450
451static void tcp_ecn_openreq_child(struct tcp_sock *tp,
452				  const struct request_sock *req)
453{
454	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
455}
456
457void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
458{
459	struct inet_connection_sock *icsk = inet_csk(sk);
460	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
461	bool ca_got_dst = false;
462
463	if (ca_key != TCP_CA_UNSPEC) {
464		const struct tcp_congestion_ops *ca;
465
466		rcu_read_lock();
467		ca = tcp_ca_find_key(ca_key);
468		if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
469			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
470			icsk->icsk_ca_ops = ca;
471			ca_got_dst = true;
472		}
473		rcu_read_unlock();
474	}
475
476	/* If no valid choice made yet, assign current system default ca. */
477	if (!ca_got_dst &&
478	    (!icsk->icsk_ca_setsockopt ||
479	     !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
480		tcp_assign_congestion_control(sk);
481
482	tcp_set_ca_state(sk, TCP_CA_Open);
483}
484EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
485
486static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
487				    struct request_sock *req,
488				    struct tcp_sock *newtp)
489{
490#if IS_ENABLED(CONFIG_SMC)
491	struct inet_request_sock *ireq;
492
493	if (static_branch_unlikely(&tcp_have_smc)) {
494		ireq = inet_rsk(req);
495		if (oldtp->syn_smc && !ireq->smc_ok)
496			newtp->syn_smc = 0;
497	}
498#endif
499}
500
501/* This is not only more efficient than what we used to do, it eliminates
502 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
503 *
504 * Actually, we could lots of memory writes here. tp of listening
505 * socket contains all necessary default parameters.
506 */
507struct sock *tcp_create_openreq_child(const struct sock *sk,
508				      struct request_sock *req,
509				      struct sk_buff *skb)
510{
511	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
512	const struct inet_request_sock *ireq = inet_rsk(req);
513	struct tcp_request_sock *treq = tcp_rsk(req);
514	struct inet_connection_sock *newicsk;
515	const struct tcp_sock *oldtp;
516	struct tcp_sock *newtp;
517	u32 seq;
518#ifdef CONFIG_TCP_AO
519	struct tcp_ao_key *ao_key;
520#endif
521
522	if (!newsk)
523		return NULL;
524
525	newicsk = inet_csk(newsk);
526	newtp = tcp_sk(newsk);
527	oldtp = tcp_sk(sk);
528
529	smc_check_reset_syn_req(oldtp, req, newtp);
530
531	/* Now setup tcp_sock */
532	newtp->pred_flags = 0;
533
534	seq = treq->rcv_isn + 1;
535	newtp->rcv_wup = seq;
536	WRITE_ONCE(newtp->copied_seq, seq);
537	WRITE_ONCE(newtp->rcv_nxt, seq);
538	newtp->segs_in = 1;
539
540	seq = treq->snt_isn + 1;
541	newtp->snd_sml = newtp->snd_una = seq;
542	WRITE_ONCE(newtp->snd_nxt, seq);
543	newtp->snd_up = seq;
544
545	INIT_LIST_HEAD(&newtp->tsq_node);
546	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
547
548	tcp_init_wl(newtp, treq->rcv_isn);
549
550	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
551	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
552
553	newtp->lsndtime = tcp_jiffies32;
554	newsk->sk_txhash = READ_ONCE(treq->txhash);
555	newtp->total_retrans = req->num_retrans;
556
557	tcp_init_xmit_timers(newsk);
558	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
559
560	if (sock_flag(newsk, SOCK_KEEPOPEN))
561		inet_csk_reset_keepalive_timer(newsk,
562					       keepalive_time_when(newtp));
563
564	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
565	newtp->rx_opt.sack_ok = ireq->sack_ok;
566	newtp->window_clamp = req->rsk_window_clamp;
567	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
568	newtp->rcv_wnd = req->rsk_rcv_wnd;
569	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
570	if (newtp->rx_opt.wscale_ok) {
571		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
572		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
573	} else {
574		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
575		newtp->window_clamp = min(newtp->window_clamp, 65535U);
576	}
577	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
578	newtp->max_window = newtp->snd_wnd;
579
580	if (newtp->rx_opt.tstamp_ok) {
581		newtp->tcp_usec_ts = treq->req_usec_ts;
582		newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
583		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
584		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
585	} else {
586		newtp->tcp_usec_ts = 0;
587		newtp->rx_opt.ts_recent_stamp = 0;
588		newtp->tcp_header_len = sizeof(struct tcphdr);
589	}
590	if (req->num_timeout) {
591		newtp->total_rto = req->num_timeout;
592		newtp->undo_marker = treq->snt_isn;
593		if (newtp->tcp_usec_ts) {
594			newtp->retrans_stamp = treq->snt_synack;
595			newtp->total_rto_time = (u32)(tcp_clock_us() -
596						      newtp->retrans_stamp) / USEC_PER_MSEC;
597		} else {
598			newtp->retrans_stamp = div_u64(treq->snt_synack,
599						       USEC_PER_SEC / TCP_TS_HZ);
600			newtp->total_rto_time = tcp_clock_ms() -
601						newtp->retrans_stamp;
602		}
603		newtp->total_rto_recoveries = 1;
604	}
605	newtp->tsoffset = treq->ts_off;
606#ifdef CONFIG_TCP_MD5SIG
607	newtp->md5sig_info = NULL;	/*XXX*/
608#endif
609#ifdef CONFIG_TCP_AO
610	newtp->ao_info = NULL;
611	ao_key = treq->af_specific->ao_lookup(sk, req,
612				tcp_rsk(req)->ao_keyid, -1);
613	if (ao_key)
614		newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
 
 
 
 
615 #endif
616	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
617		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
618	newtp->rx_opt.mss_clamp = req->mss;
619	tcp_ecn_openreq_child(newtp, req);
620	newtp->fastopen_req = NULL;
621	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
622
623	newtp->bpf_chg_cc_inprogress = 0;
624	tcp_bpf_clone(sk, newsk);
625
626	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
627
 
 
628	return newsk;
629}
630EXPORT_SYMBOL(tcp_create_openreq_child);
631
632/*
633 * Process an incoming packet for SYN_RECV sockets represented as a
634 * request_sock. Normally sk is the listener socket but for TFO it
635 * points to the child socket.
636 *
637 * XXX (TFO) - The current impl contains a special check for ack
638 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
639 *
640 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
641 *
642 * Note: If @fastopen is true, this can be called from process context.
643 *       Otherwise, this is from BH context.
644 */
645
646struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
647			   struct request_sock *req,
648			   bool fastopen, bool *req_stolen)
649{
650	struct tcp_options_received tmp_opt;
651	struct sock *child;
652	const struct tcphdr *th = tcp_hdr(skb);
653	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
654	bool paws_reject = false;
655	bool own_req;
656
657	tmp_opt.saw_tstamp = 0;
658	if (th->doff > (sizeof(struct tcphdr)>>2)) {
659		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
660
661		if (tmp_opt.saw_tstamp) {
662			tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
663			if (tmp_opt.rcv_tsecr)
664				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
665			/* We do not store true stamp, but it is not required,
666			 * it can be estimated (approximately)
667			 * from another data.
668			 */
669			tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
670			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
671		}
672	}
673
674	/* Check for pure retransmitted SYN. */
675	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
676	    flg == TCP_FLAG_SYN &&
677	    !paws_reject) {
678		/*
679		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
680		 * this case on figure 6 and figure 8, but formal
681		 * protocol description says NOTHING.
682		 * To be more exact, it says that we should send ACK,
683		 * because this segment (at least, if it has no data)
684		 * is out of window.
685		 *
686		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
687		 *  describe SYN-RECV state. All the description
688		 *  is wrong, we cannot believe to it and should
689		 *  rely only on common sense and implementation
690		 *  experience.
691		 *
692		 * Enforce "SYN-ACK" according to figure 8, figure 6
693		 * of RFC793, fixed by RFC1122.
694		 *
695		 * Note that even if there is new data in the SYN packet
696		 * they will be thrown away too.
697		 *
698		 * Reset timer after retransmitting SYNACK, similar to
699		 * the idea of fast retransmit in recovery.
700		 */
701		if (!tcp_oow_rate_limited(sock_net(sk), skb,
702					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
703					  &tcp_rsk(req)->last_oow_ack_time) &&
704
705		    !inet_rtx_syn_ack(sk, req)) {
706			unsigned long expires = jiffies;
707
708			expires += reqsk_timeout(req, TCP_RTO_MAX);
709			if (!fastopen)
710				mod_timer_pending(&req->rsk_timer, expires);
711			else
712				req->rsk_timer.expires = expires;
713		}
714		return NULL;
715	}
716
717	/* Further reproduces section "SEGMENT ARRIVES"
718	   for state SYN-RECEIVED of RFC793.
719	   It is broken, however, it does not work only
720	   when SYNs are crossed.
721
722	   You would think that SYN crossing is impossible here, since
723	   we should have a SYN_SENT socket (from connect()) on our end,
724	   but this is not true if the crossed SYNs were sent to both
725	   ends by a malicious third party.  We must defend against this,
726	   and to do that we first verify the ACK (as per RFC793, page
727	   36) and reset if it is invalid.  Is this a true full defense?
728	   To convince ourselves, let us consider a way in which the ACK
729	   test can still pass in this 'malicious crossed SYNs' case.
730	   Malicious sender sends identical SYNs (and thus identical sequence
731	   numbers) to both A and B:
732
733		A: gets SYN, seq=7
734		B: gets SYN, seq=7
735
736	   By our good fortune, both A and B select the same initial
737	   send sequence number of seven :-)
738
739		A: sends SYN|ACK, seq=7, ack_seq=8
740		B: sends SYN|ACK, seq=7, ack_seq=8
741
742	   So we are now A eating this SYN|ACK, ACK test passes.  So
743	   does sequence test, SYN is truncated, and thus we consider
744	   it a bare ACK.
745
746	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
747	   bare ACK.  Otherwise, we create an established connection.  Both
748	   ends (listening sockets) accept the new incoming connection and try
749	   to talk to each other. 8-)
750
751	   Note: This case is both harmless, and rare.  Possibility is about the
752	   same as us discovering intelligent life on another plant tomorrow.
753
754	   But generally, we should (RFC lies!) to accept ACK
755	   from SYNACK both here and in tcp_rcv_state_process().
756	   tcp_rcv_state_process() does not, hence, we do not too.
757
758	   Note that the case is absolutely generic:
759	   we cannot optimize anything here without
760	   violating protocol. All the checks must be made
761	   before attempt to create socket.
762	 */
763
764	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
765	 *                  and the incoming segment acknowledges something not yet
766	 *                  sent (the segment carries an unacceptable ACK) ...
767	 *                  a reset is sent."
768	 *
769	 * Invalid ACK: reset will be sent by listening socket.
770	 * Note that the ACK validity check for a Fast Open socket is done
771	 * elsewhere and is checked directly against the child socket rather
772	 * than req because user data may have been sent out.
773	 */
774	if ((flg & TCP_FLAG_ACK) && !fastopen &&
775	    (TCP_SKB_CB(skb)->ack_seq !=
776	     tcp_rsk(req)->snt_isn + 1))
777		return sk;
778
779	/* Also, it would be not so bad idea to check rcv_tsecr, which
780	 * is essentially ACK extension and too early or too late values
781	 * should cause reset in unsynchronized states.
782	 */
783
784	/* RFC793: "first check sequence number". */
785
786	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
787					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
 
 
 
788		/* Out of window: send ACK and drop. */
789		if (!(flg & TCP_FLAG_RST) &&
790		    !tcp_oow_rate_limited(sock_net(sk), skb,
791					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
792					  &tcp_rsk(req)->last_oow_ack_time))
793			req->rsk_ops->send_ack(sk, skb, req);
794		if (paws_reject)
795			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
796		return NULL;
797	}
798
799	/* In sequence, PAWS is OK. */
800
801	/* TODO: We probably should defer ts_recent change once
802	 * we take ownership of @req.
803	 */
804	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
805		WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
806
807	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
808		/* Truncate SYN, it is out of window starting
809		   at tcp_rsk(req)->rcv_isn + 1. */
810		flg &= ~TCP_FLAG_SYN;
811	}
812
813	/* RFC793: "second check the RST bit" and
814	 *	   "fourth, check the SYN bit"
815	 */
816	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
817		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
818		goto embryonic_reset;
819	}
820
821	/* ACK sequence verified above, just make sure ACK is
822	 * set.  If ACK not set, just silently drop the packet.
823	 *
824	 * XXX (TFO) - if we ever allow "data after SYN", the
825	 * following check needs to be removed.
826	 */
827	if (!(flg & TCP_FLAG_ACK))
828		return NULL;
829
830	/* For Fast Open no more processing is needed (sk is the
831	 * child socket).
832	 */
833	if (fastopen)
834		return sk;
835
836	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
837	if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
838	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
839		inet_rsk(req)->acked = 1;
840		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
841		return NULL;
842	}
843
844	/* OK, ACK is valid, create big socket and
845	 * feed this segment to it. It will repeat all
846	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
847	 * ESTABLISHED STATE. If it will be dropped after
848	 * socket is created, wait for troubles.
849	 */
850	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
851							 req, &own_req);
852	if (!child)
853		goto listen_overflow;
854
 
 
 
 
855	if (own_req && rsk_drop_req(req)) {
856		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
857		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
858		return child;
859	}
860
861	sock_rps_save_rxhash(child, skb);
862	tcp_synack_rtt_meas(child, req);
863	*req_stolen = !own_req;
864	return inet_csk_complete_hashdance(sk, child, req, own_req);
865
866listen_overflow:
867	if (sk != req->rsk_listener)
868		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
869
870	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
871		inet_rsk(req)->acked = 1;
872		return NULL;
873	}
874
875embryonic_reset:
876	if (!(flg & TCP_FLAG_RST)) {
877		/* Received a bad SYN pkt - for TFO We try not to reset
878		 * the local connection unless it's really necessary to
879		 * avoid becoming vulnerable to outside attack aiming at
880		 * resetting legit local connections.
881		 */
882		req->rsk_ops->send_reset(sk, skb);
883	} else if (fastopen) { /* received a valid RST pkt */
884		reqsk_fastopen_remove(sk, req, true);
885		tcp_reset(sk, skb);
886	}
887	if (!fastopen) {
888		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
889
890		if (unlinked)
891			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
892		*req_stolen = !unlinked;
893	}
894	return NULL;
895}
896EXPORT_SYMBOL(tcp_check_req);
897
898/*
899 * Queue segment on the new socket if the new socket is active,
900 * otherwise we just shortcircuit this and continue with
901 * the new socket.
902 *
903 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
904 * when entering. But other states are possible due to a race condition
905 * where after __inet_lookup_established() fails but before the listener
906 * locked is obtained, other packets cause the same connection to
907 * be created.
908 */
909
910enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
911				       struct sk_buff *skb)
912	__releases(&((child)->sk_lock.slock))
913{
914	enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
915	int state = child->sk_state;
916
917	/* record sk_napi_id and sk_rx_queue_mapping of child. */
918	sk_mark_napi_id_set(child, skb);
919
920	tcp_segs_in(tcp_sk(child), skb);
921	if (!sock_owned_by_user(child)) {
922		reason = tcp_rcv_state_process(child, skb);
923		/* Wakeup parent, send SIGIO */
924		if (state == TCP_SYN_RECV && child->sk_state != state)
925			parent->sk_data_ready(parent);
926	} else {
927		/* Alas, it is possible again, because we do lookup
928		 * in main socket hash table and lock on listening
929		 * socket does not protect us more.
930		 */
931		__sk_add_backlog(child, skb);
932	}
933
934	bh_unlock_sock(child);
935	sock_put(child);
936	return reason;
937}
938EXPORT_SYMBOL(tcp_child_process);