Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Implementation of the Transmission Control Protocol(TCP).
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 13 *		Florian La Roche, <flla@stud.uni-sb.de>
 14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19 *		Jorge Cwik, <jorge@laser.satlink.net>
 20 */
 21
 22#include <linux/module.h>
 23#include <linux/gfp.h>
 24#include <net/tcp.h>
 25#include <net/rstreason.h>
 26
 27static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
 28{
 29	const struct inet_connection_sock *icsk = inet_csk(sk);
 30	const struct tcp_sock *tp = tcp_sk(sk);
 31	u32 elapsed, user_timeout;
 32	s32 remaining;
 33
 34	user_timeout = READ_ONCE(icsk->icsk_user_timeout);
 35	if (!user_timeout)
 36		return icsk->icsk_rto;
 37
 38	elapsed = tcp_time_stamp_ts(tp) - tp->retrans_stamp;
 39	if (tp->tcp_usec_ts)
 40		elapsed /= USEC_PER_MSEC;
 41
 42	remaining = user_timeout - elapsed;
 43	if (remaining <= 0)
 44		return 1; /* user timeout has passed; fire ASAP */
 45
 46	return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
 47}
 48
 49u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
 50{
 51	const struct inet_connection_sock *icsk = inet_csk(sk);
 52	u32 remaining, user_timeout;
 53	s32 elapsed;
 54
 55	user_timeout = READ_ONCE(icsk->icsk_user_timeout);
 56	if (!user_timeout || !icsk->icsk_probes_tstamp)
 57		return when;
 58
 59	elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
 60	if (unlikely(elapsed < 0))
 61		elapsed = 0;
 62	remaining = msecs_to_jiffies(user_timeout) - elapsed;
 63	remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
 64
 65	return min_t(u32, remaining, when);
 66}
 67
 68/**
 69 *  tcp_write_err() - close socket and save error info
 70 *  @sk:  The socket the error has appeared on.
 71 *
 72 *  Returns: Nothing (void)
 73 */
 74
 75static void tcp_write_err(struct sock *sk)
 76{
 77	tcp_done_with_error(sk, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
 
 
 
 
 78	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
 79}
 80
 81/**
 82 *  tcp_out_of_resources() - Close socket if out of resources
 83 *  @sk:        pointer to current socket
 84 *  @do_reset:  send a last packet with reset flag
 85 *
 86 *  Do not allow orphaned sockets to eat all our resources.
 87 *  This is direct violation of TCP specs, but it is required
 88 *  to prevent DoS attacks. It is called when a retransmission timeout
 89 *  or zero probe timeout occurs on orphaned socket.
 90 *
 91 *  Also close if our net namespace is exiting; in that case there is no
 92 *  hope of ever communicating again since all netns interfaces are already
 93 *  down (or about to be down), and we need to release our dst references,
 94 *  which have been moved to the netns loopback interface, so the namespace
 95 *  can finish exiting.  This condition is only possible if we are a kernel
 96 *  socket, as those do not hold references to the namespace.
 97 *
 98 *  Criteria is still not confirmed experimentally and may change.
 99 *  We kill the socket, if:
100 *  1. If number of orphaned sockets exceeds an administratively configured
101 *     limit.
102 *  2. If we have strong memory pressure.
103 *  3. If our net namespace is exiting.
104 */
105static int tcp_out_of_resources(struct sock *sk, bool do_reset)
106{
107	struct tcp_sock *tp = tcp_sk(sk);
108	int shift = 0;
109
110	/* If peer does not open window for long time, or did not transmit
111	 * anything for long time, penalize it. */
112	if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
113		shift++;
114
115	/* If some dubious ICMP arrived, penalize even more. */
116	if (READ_ONCE(sk->sk_err_soft))
117		shift++;
118
119	if (tcp_check_oom(sk, shift)) {
120		/* Catch exceptional cases, when connection requires reset.
121		 *      1. Last segment was sent recently. */
122		if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
123		    /*  2. Window is closed. */
124		    (!tp->snd_wnd && !tp->packets_out))
125			do_reset = true;
126		if (do_reset)
127			tcp_send_active_reset(sk, GFP_ATOMIC,
128					      SK_RST_REASON_TCP_ABORT_ON_MEMORY);
129		tcp_done(sk);
130		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
131		return 1;
132	}
133
134	if (!check_net(sock_net(sk))) {
135		/* Not possible to send reset; just close */
136		tcp_done(sk);
137		return 1;
138	}
139
140	return 0;
141}
142
143/**
144 *  tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
145 *  @sk:    Pointer to the current socket.
146 *  @alive: bool, socket alive state
147 */
148static int tcp_orphan_retries(struct sock *sk, bool alive)
149{
150	int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
151
152	/* We know from an ICMP that something is wrong. */
153	if (READ_ONCE(sk->sk_err_soft) && !alive)
154		retries = 0;
155
156	/* However, if socket sent something recently, select some safe
157	 * number of retries. 8 corresponds to >100 seconds with minimal
158	 * RTO of 200msec. */
159	if (retries == 0 && alive)
160		retries = 8;
161	return retries;
162}
163
164static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
165{
166	const struct net *net = sock_net(sk);
167	int mss;
168
169	/* Black hole detection */
170	if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing))
171		return;
172
173	if (!icsk->icsk_mtup.enabled) {
174		icsk->icsk_mtup.enabled = 1;
175		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
176	} else {
177		mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
178		mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss);
179		mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor));
180		mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss));
181		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
182	}
183	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
184}
185
186static unsigned int tcp_model_timeout(struct sock *sk,
187				      unsigned int boundary,
188				      unsigned int rto_base)
189{
190	unsigned int linear_backoff_thresh, timeout;
191
192	linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
193	if (boundary <= linear_backoff_thresh)
194		timeout = ((2 << boundary) - 1) * rto_base;
195	else
196		timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
197			(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
198	return jiffies_to_msecs(timeout);
199}
200/**
201 *  retransmits_timed_out() - returns true if this connection has timed out
202 *  @sk:       The current socket
203 *  @boundary: max number of retransmissions
204 *  @timeout:  A custom timeout value.
205 *             If set to 0 the default timeout is calculated and used.
206 *             Using TCP_RTO_MIN and the number of unsuccessful retransmits.
207 *
208 * The default "timeout" value this function can calculate and use
209 * is equivalent to the timeout of a TCP Connection
210 * after "boundary" unsuccessful, exponentially backed-off
211 * retransmissions with an initial RTO of TCP_RTO_MIN.
212 */
213static bool retransmits_timed_out(struct sock *sk,
214				  unsigned int boundary,
215				  unsigned int timeout)
216{
217	struct tcp_sock *tp = tcp_sk(sk);
218	unsigned int start_ts, delta;
219
220	if (!inet_csk(sk)->icsk_retransmits)
221		return false;
222
223	start_ts = tp->retrans_stamp;
224	if (likely(timeout == 0)) {
225		unsigned int rto_base = TCP_RTO_MIN;
226
227		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
228			rto_base = tcp_timeout_init(sk);
229		timeout = tcp_model_timeout(sk, boundary, rto_base);
230	}
231
232	if (tp->tcp_usec_ts) {
233		/* delta maybe off up to a jiffy due to timer granularity. */
234		delta = tp->tcp_mstamp - start_ts + jiffies_to_usecs(1);
235		return (s32)(delta - timeout * USEC_PER_MSEC) >= 0;
236	}
237	return (s32)(tcp_time_stamp_ts(tp) - start_ts - timeout) >= 0;
238}
239
240/* A write timeout has occurred. Process the after effects. */
241static int tcp_write_timeout(struct sock *sk)
242{
243	struct inet_connection_sock *icsk = inet_csk(sk);
244	struct tcp_sock *tp = tcp_sk(sk);
245	struct net *net = sock_net(sk);
246	bool expired = false, do_reset;
247	int retry_until, max_retransmits;
248
249	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
250		if (icsk->icsk_retransmits)
251			__dst_negative_advice(sk);
252		/* Paired with WRITE_ONCE() in tcp_sock_set_syncnt() */
253		retry_until = READ_ONCE(icsk->icsk_syn_retries) ? :
254			READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
255
256		max_retransmits = retry_until;
257		if (sk->sk_state == TCP_SYN_SENT)
258			max_retransmits += READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts);
259
260		expired = icsk->icsk_retransmits >= max_retransmits;
261	} else {
262		if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
263			/* Black hole detection */
264			tcp_mtu_probing(icsk, sk);
265
266			__dst_negative_advice(sk);
 
 
267		}
268
269		retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
270		if (sock_flag(sk, SOCK_DEAD)) {
271			const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
272
273			retry_until = tcp_orphan_retries(sk, alive);
274			do_reset = alive ||
275				!retransmits_timed_out(sk, retry_until, 0);
276
277			if (tcp_out_of_resources(sk, do_reset))
278				return 1;
279		}
280	}
281	if (!expired)
282		expired = retransmits_timed_out(sk, retry_until,
283						READ_ONCE(icsk->icsk_user_timeout));
284	tcp_fastopen_active_detect_blackhole(sk, expired);
285	mptcp_active_detect_blackhole(sk, expired);
286
287	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
288		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
289				  icsk->icsk_retransmits,
290				  icsk->icsk_rto, (int)expired);
291
292	if (expired) {
293		/* Has it gone just too far? */
294		tcp_write_err(sk);
295		return 1;
296	}
297
298	if (sk_rethink_txhash(sk)) {
299		tp->timeout_rehash++;
300		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
301	}
302
303	return 0;
304}
305
306/* Called with BH disabled */
307void tcp_delack_timer_handler(struct sock *sk)
308{
309	struct inet_connection_sock *icsk = inet_csk(sk);
310	struct tcp_sock *tp = tcp_sk(sk);
311
312	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
313		return;
314
315	/* Handling the sack compression case */
316	if (tp->compressed_ack) {
317		tcp_mstamp_refresh(tp);
318		tcp_sack_compress_send_ack(sk);
319		return;
320	}
321
322	if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
323		return;
 
324
325	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
326		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
327		return;
328	}
329	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
330
331	if (inet_csk_ack_scheduled(sk)) {
332		if (!inet_csk_in_pingpong_mode(sk)) {
333			/* Delayed ACK missed: inflate ATO. */
334			icsk->icsk_ack.ato = min_t(u32, icsk->icsk_ack.ato << 1, icsk->icsk_rto);
335		} else {
336			/* Delayed ACK missed: leave pingpong mode and
337			 * deflate ATO.
338			 */
339			inet_csk_exit_pingpong_mode(sk);
340			icsk->icsk_ack.ato      = TCP_ATO_MIN;
341		}
342		tcp_mstamp_refresh(tp);
343		tcp_send_ack(sk);
344		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
345	}
 
 
 
 
346}
347
348
349/**
350 *  tcp_delack_timer() - The TCP delayed ACK timeout handler
351 *  @t:  Pointer to the timer. (gets casted to struct sock *)
352 *
353 *  This function gets (indirectly) called when the kernel timer for a TCP packet
354 *  of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
355 *
356 *  Returns: Nothing (void)
357 */
358static void tcp_delack_timer(struct timer_list *t)
359{
360	struct inet_connection_sock *icsk =
361			from_timer(icsk, t, icsk_delack_timer);
362	struct sock *sk = &icsk->icsk_inet.sk;
363
364	/* Avoid taking socket spinlock if there is no ACK to send.
365	 * The compressed_ack check is racy, but a separate hrtimer
366	 * will take care of it eventually.
367	 */
368	if (!(smp_load_acquire(&icsk->icsk_ack.pending) & ICSK_ACK_TIMER) &&
369	    !READ_ONCE(tcp_sk(sk)->compressed_ack))
370		goto out;
371
372	bh_lock_sock(sk);
373	if (!sock_owned_by_user(sk)) {
374		tcp_delack_timer_handler(sk);
375	} else {
 
376		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
377		/* deleguate our work to tcp_release_cb() */
378		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
379			sock_hold(sk);
380	}
381	bh_unlock_sock(sk);
382out:
383	sock_put(sk);
384}
385
386static void tcp_probe_timer(struct sock *sk)
387{
388	struct inet_connection_sock *icsk = inet_csk(sk);
389	struct sk_buff *skb = tcp_send_head(sk);
390	struct tcp_sock *tp = tcp_sk(sk);
391	int max_probes;
392
393	if (tp->packets_out || !skb) {
394		icsk->icsk_probes_out = 0;
395		icsk->icsk_probes_tstamp = 0;
396		return;
397	}
398
399	/* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
400	 * long as the receiver continues to respond probes. We support this by
401	 * default and reset icsk_probes_out with incoming ACKs. But if the
402	 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
403	 * kill the socket when the retry count and the time exceeds the
404	 * corresponding system limit. We also implement similar policy when
405	 * we use RTO to probe window in tcp_retransmit_timer().
406	 */
407	if (!icsk->icsk_probes_tstamp) {
408		icsk->icsk_probes_tstamp = tcp_jiffies32;
409	} else {
410		u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
411
412		if (user_timeout &&
413		    (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
414		     msecs_to_jiffies(user_timeout))
415			goto abort;
416	}
417	max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
 
418	if (sock_flag(sk, SOCK_DEAD)) {
419		const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
420
421		max_probes = tcp_orphan_retries(sk, alive);
422		if (!alive && icsk->icsk_backoff >= max_probes)
423			goto abort;
424		if (tcp_out_of_resources(sk, true))
425			return;
426	}
427
428	if (icsk->icsk_probes_out >= max_probes) {
429abort:		tcp_write_err(sk);
430	} else {
431		/* Only send another probe if we didn't close things up. */
432		tcp_send_probe0(sk);
433	}
434}
435
436static void tcp_update_rto_stats(struct sock *sk)
437{
438	struct inet_connection_sock *icsk = inet_csk(sk);
439	struct tcp_sock *tp = tcp_sk(sk);
440
441	if (!icsk->icsk_retransmits) {
442		tp->total_rto_recoveries++;
443		tp->rto_stamp = tcp_time_stamp_ms(tp);
444	}
445	icsk->icsk_retransmits++;
446	tp->total_rto++;
447}
448
449/*
450 *	Timer for Fast Open socket to retransmit SYNACK. Note that the
451 *	sk here is the child socket, not the parent (listener) socket.
452 */
453static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
454{
455	struct inet_connection_sock *icsk = inet_csk(sk);
 
 
456	struct tcp_sock *tp = tcp_sk(sk);
457	int max_retries;
458
459	req->rsk_ops->syn_ack_timeout(req);
460
461	/* Add one more retry for fastopen.
462	 * Paired with WRITE_ONCE() in tcp_sock_set_syncnt()
463	 */
464	max_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
465		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1;
466
467	if (req->num_timeout >= max_retries) {
468		tcp_write_err(sk);
469		return;
470	}
471	/* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
472	if (icsk->icsk_retransmits == 1)
473		tcp_enter_loss(sk);
474	/* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
475	 * returned from rtx_syn_ack() to make it more persistent like
476	 * regular retransmit because if the child socket has been accepted
477	 * it's not good to give up too easily.
478	 */
479	inet_rtx_syn_ack(sk, req);
480	req->num_timeout++;
481	tcp_update_rto_stats(sk);
482	if (!tp->retrans_stamp)
483		tp->retrans_stamp = tcp_time_stamp_ts(tp);
484	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
485			  req->timeout << req->num_timeout, TCP_RTO_MAX);
486}
487
488static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
489				     const struct sk_buff *skb,
490				     u32 rtx_delta)
491{
492	const struct inet_connection_sock *icsk = inet_csk(sk);
493	u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
494	const struct tcp_sock *tp = tcp_sk(sk);
495	int timeout = TCP_RTO_MAX * 2;
496	s32 rcv_delta;
497
498	if (user_timeout) {
499		/* If user application specified a TCP_USER_TIMEOUT,
500		 * it does not want win 0 packets to 'reset the timer'
501		 * while retransmits are not making progress.
502		 */
503		if (rtx_delta > user_timeout)
504			return true;
505		timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout));
506	}
507	/* Note: timer interrupt might have been delayed by at least one jiffy,
508	 * and tp->rcv_tstamp might very well have been written recently.
509	 * rcv_delta can thus be negative.
510	 */
511	rcv_delta = icsk->icsk_timeout - tp->rcv_tstamp;
512	if (rcv_delta <= timeout)
513		return false;
514
515	return msecs_to_jiffies(rtx_delta) > timeout;
516}
517
518/**
519 *  tcp_retransmit_timer() - The TCP retransmit timeout handler
520 *  @sk:  Pointer to the current socket.
521 *
522 *  This function gets called when the kernel timer for a TCP packet
523 *  of this socket expires.
524 *
525 *  It handles retransmission, timer adjustment and other necessary measures.
526 *
527 *  Returns: Nothing (void)
528 */
529void tcp_retransmit_timer(struct sock *sk)
530{
531	struct tcp_sock *tp = tcp_sk(sk);
532	struct net *net = sock_net(sk);
533	struct inet_connection_sock *icsk = inet_csk(sk);
534	struct request_sock *req;
535	struct sk_buff *skb;
536
537	req = rcu_dereference_protected(tp->fastopen_rsk,
538					lockdep_sock_is_held(sk));
539	if (req) {
540		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
541			     sk->sk_state != TCP_FIN_WAIT1);
542		tcp_fastopen_synack_timer(sk, req);
543		/* Before we receive ACK to our SYN-ACK don't retransmit
544		 * anything else (e.g., data or FIN segments).
545		 */
546		return;
547	}
548
549	if (!tp->packets_out)
550		return;
551
552	skb = tcp_rtx_queue_head(sk);
553	if (WARN_ON_ONCE(!skb))
554		return;
555
556	if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
557	    !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
558		/* Receiver dastardly shrinks window. Our retransmits
559		 * become zero probes, but we should not timeout this
560		 * connection. If the socket is an orphan, time it out,
561		 * we cannot allow such beasts to hang infinitely.
562		 */
563		struct inet_sock *inet = inet_sk(sk);
564		u32 rtx_delta;
565
566		rtx_delta = tcp_time_stamp_ts(tp) - (tp->retrans_stamp ?: 
567				tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb));
568		if (tp->tcp_usec_ts)
569			rtx_delta /= USEC_PER_MSEC;
570
571		if (sk->sk_family == AF_INET) {
572			net_dbg_ratelimited("Probing zero-window on %pI4:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
573				&inet->inet_daddr, ntohs(inet->inet_dport),
574				inet->inet_num, tp->snd_una, tp->snd_nxt,
575				jiffies_to_msecs(jiffies - tp->rcv_tstamp),
576				rtx_delta);
577		}
578#if IS_ENABLED(CONFIG_IPV6)
579		else if (sk->sk_family == AF_INET6) {
580			net_dbg_ratelimited("Probing zero-window on %pI6:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
581				&sk->sk_v6_daddr, ntohs(inet->inet_dport),
582				inet->inet_num, tp->snd_una, tp->snd_nxt,
583				jiffies_to_msecs(jiffies - tp->rcv_tstamp),
584				rtx_delta);
585		}
586#endif
587		if (tcp_rtx_probe0_timed_out(sk, skb, rtx_delta)) {
588			tcp_write_err(sk);
589			goto out;
590		}
591		tcp_enter_loss(sk);
592		tcp_retransmit_skb(sk, skb, 1);
593		__sk_dst_reset(sk);
594		goto out_reset_timer;
595	}
596
597	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
598	if (tcp_write_timeout(sk))
599		goto out;
600
601	if (icsk->icsk_retransmits == 0) {
602		int mib_idx = 0;
603
604		if (icsk->icsk_ca_state == TCP_CA_Recovery) {
605			if (tcp_is_sack(tp))
606				mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
607			else
608				mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
609		} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
610			mib_idx = LINUX_MIB_TCPLOSSFAILURES;
611		} else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
612			   tp->sacked_out) {
613			if (tcp_is_sack(tp))
614				mib_idx = LINUX_MIB_TCPSACKFAILURES;
615			else
616				mib_idx = LINUX_MIB_TCPRENOFAILURES;
617		}
618		if (mib_idx)
619			__NET_INC_STATS(sock_net(sk), mib_idx);
620	}
621
622	tcp_enter_loss(sk);
623
624	tcp_update_rto_stats(sk);
625	if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
626		/* Retransmission failed because of local congestion,
627		 * Let senders fight for local resources conservatively.
628		 */
629		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
630					  TCP_RESOURCE_PROBE_INTERVAL,
631					  TCP_RTO_MAX);
632		goto out;
633	}
634
635	/* Increase the timeout each time we retransmit.  Note that
636	 * we do not increase the rtt estimate.  rto is initialized
637	 * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
638	 * that doubling rto each time is the least we can get away with.
639	 * In KA9Q, Karn uses this for the first few times, and then
640	 * goes to quadratic.  netBSD doubles, but only goes up to *64,
641	 * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
642	 * defined in the protocol as the maximum possible RTT.  I guess
643	 * we'll have to use something other than TCP to talk to the
644	 * University of Mars.
645	 *
646	 * PAWS allows us longer timeouts and large windows, so once
647	 * implemented ftp to mars will work nicely. We will have to fix
648	 * the 120 second clamps though!
649	 */
 
650
651out_reset_timer:
652	/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
653	 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
654	 * might be increased if the stream oscillates between thin and thick,
655	 * thus the old value might already be too high compared to the value
656	 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
657	 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
658	 * exponential backoff behaviour to avoid continue hammering
659	 * linear-timeout retransmissions into a black hole
660	 */
661	if (sk->sk_state == TCP_ESTABLISHED &&
662	    (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) &&
663	    tcp_stream_is_thin(tp) &&
664	    icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
665		icsk->icsk_backoff = 0;
666		icsk->icsk_rto = clamp(__tcp_set_rto(tp),
667				       tcp_rto_min(sk),
668				       TCP_RTO_MAX);
669	} else if (sk->sk_state != TCP_SYN_SENT ||
670		   tp->total_rto >
671		   READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) {
672		/* Use normal (exponential) backoff unless linear timeouts are
673		 * activated.
674		 */
675		icsk->icsk_backoff++;
676		icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
677	}
678	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
679				  tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
680	if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
681		__sk_dst_reset(sk);
682
683out:;
684}
685
686/* Called with bottom-half processing disabled.
687   Called by tcp_write_timer() */
688void tcp_write_timer_handler(struct sock *sk)
689{
690	struct inet_connection_sock *icsk = inet_csk(sk);
691	int event;
692
693	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
694	    !icsk->icsk_pending)
695		return;
696
697	if (time_after(icsk->icsk_timeout, jiffies)) {
698		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
699		return;
700	}
701
702	tcp_mstamp_refresh(tcp_sk(sk));
703	event = icsk->icsk_pending;
704
705	switch (event) {
706	case ICSK_TIME_REO_TIMEOUT:
707		tcp_rack_reo_timeout(sk);
708		break;
709	case ICSK_TIME_LOSS_PROBE:
710		tcp_send_loss_probe(sk);
711		break;
712	case ICSK_TIME_RETRANS:
713		smp_store_release(&icsk->icsk_pending, 0);
714		tcp_retransmit_timer(sk);
715		break;
716	case ICSK_TIME_PROBE0:
717		smp_store_release(&icsk->icsk_pending, 0);
718		tcp_probe_timer(sk);
719		break;
720	}
 
 
 
721}
722
723static void tcp_write_timer(struct timer_list *t)
724{
725	struct inet_connection_sock *icsk =
726			from_timer(icsk, t, icsk_retransmit_timer);
727	struct sock *sk = &icsk->icsk_inet.sk;
728
729	/* Avoid locking the socket when there is no pending event. */
730	if (!smp_load_acquire(&icsk->icsk_pending))
731		goto out;
732
733	bh_lock_sock(sk);
734	if (!sock_owned_by_user(sk)) {
735		tcp_write_timer_handler(sk);
736	} else {
737		/* delegate our work to tcp_release_cb() */
738		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
739			sock_hold(sk);
740	}
741	bh_unlock_sock(sk);
742out:
743	sock_put(sk);
744}
745
746void tcp_syn_ack_timeout(const struct request_sock *req)
747{
748	struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
749
750	__NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
751}
752EXPORT_SYMBOL(tcp_syn_ack_timeout);
753
754void tcp_set_keepalive(struct sock *sk, int val)
755{
756	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
757		return;
758
759	if (val && !sock_flag(sk, SOCK_KEEPOPEN))
760		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
761	else if (!val)
762		inet_csk_delete_keepalive_timer(sk);
763}
764EXPORT_SYMBOL_GPL(tcp_set_keepalive);
765
766
767static void tcp_keepalive_timer (struct timer_list *t)
768{
769	struct sock *sk = from_timer(sk, t, sk_timer);
770	struct inet_connection_sock *icsk = inet_csk(sk);
771	struct tcp_sock *tp = tcp_sk(sk);
772	u32 elapsed;
773
774	/* Only process if socket is not in use. */
775	bh_lock_sock(sk);
776	if (sock_owned_by_user(sk)) {
777		/* Try again later. */
778		inet_csk_reset_keepalive_timer (sk, HZ/20);
779		goto out;
780	}
781
782	if (sk->sk_state == TCP_LISTEN) {
783		pr_err("Hmm... keepalive on a LISTEN ???\n");
784		goto out;
785	}
786
787	tcp_mstamp_refresh(tp);
788	if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
789		if (READ_ONCE(tp->linger2) >= 0) {
790			const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
791
792			if (tmo > 0) {
793				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
794				goto out;
795			}
796		}
797		tcp_send_active_reset(sk, GFP_ATOMIC, SK_RST_REASON_TCP_STATE);
798		goto death;
799	}
800
801	if (!sock_flag(sk, SOCK_KEEPOPEN) ||
802	    ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
803		goto out;
804
805	elapsed = keepalive_time_when(tp);
806
807	/* It is alive without keepalive 8) */
808	if (tp->packets_out || !tcp_write_queue_empty(sk))
809		goto resched;
810
811	elapsed = keepalive_time_elapsed(tp);
812
813	if (elapsed >= keepalive_time_when(tp)) {
814		u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
815
816		/* If the TCP_USER_TIMEOUT option is enabled, use that
817		 * to determine when to timeout instead.
818		 */
819		if ((user_timeout != 0 &&
820		    elapsed >= msecs_to_jiffies(user_timeout) &&
821		    icsk->icsk_probes_out > 0) ||
822		    (user_timeout == 0 &&
823		    icsk->icsk_probes_out >= keepalive_probes(tp))) {
824			tcp_send_active_reset(sk, GFP_ATOMIC,
825					      SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT);
826			tcp_write_err(sk);
827			goto out;
828		}
829		if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
830			icsk->icsk_probes_out++;
831			elapsed = keepalive_intvl_when(tp);
832		} else {
833			/* If keepalive was lost due to local congestion,
834			 * try harder.
835			 */
836			elapsed = TCP_RESOURCE_PROBE_INTERVAL;
837		}
838	} else {
839		/* It is tp->rcv_tstamp + keepalive_time_when(tp) */
840		elapsed = keepalive_time_when(tp) - elapsed;
841	}
842
 
 
843resched:
844	inet_csk_reset_keepalive_timer (sk, elapsed);
845	goto out;
846
847death:
848	tcp_done(sk);
849
850out:
851	bh_unlock_sock(sk);
852	sock_put(sk);
853}
854
855static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
856{
857	struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
858	struct sock *sk = (struct sock *)tp;
859
860	bh_lock_sock(sk);
861	if (!sock_owned_by_user(sk)) {
862		if (tp->compressed_ack) {
863			/* Since we have to send one ack finally,
864			 * subtract one from tp->compressed_ack to keep
865			 * LINUX_MIB_TCPACKCOMPRESSED accurate.
866			 */
867			tp->compressed_ack--;
868			tcp_mstamp_refresh(tp);
869			tcp_send_ack(sk);
870		}
871	} else {
872		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
873				      &sk->sk_tsq_flags))
874			sock_hold(sk);
875	}
876	bh_unlock_sock(sk);
877
878	sock_put(sk);
879
880	return HRTIMER_NORESTART;
881}
882
883void tcp_init_xmit_timers(struct sock *sk)
884{
885	inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
886				  &tcp_keepalive_timer);
887	hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
888		     HRTIMER_MODE_ABS_PINNED_SOFT);
889	tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
890
891	hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
892		     HRTIMER_MODE_REL_PINNED_SOFT);
893	tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
894}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Implementation of the Transmission Control Protocol(TCP).
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 13 *		Florian La Roche, <flla@stud.uni-sb.de>
 14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19 *		Jorge Cwik, <jorge@laser.satlink.net>
 20 */
 21
 22#include <linux/module.h>
 23#include <linux/gfp.h>
 24#include <net/tcp.h>
 
 25
 26static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
 27{
 28	struct inet_connection_sock *icsk = inet_csk(sk);
 29	u32 elapsed, start_ts;
 
 30	s32 remaining;
 31
 32	start_ts = tcp_sk(sk)->retrans_stamp;
 33	if (!icsk->icsk_user_timeout)
 34		return icsk->icsk_rto;
 35	elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
 36	remaining = icsk->icsk_user_timeout - elapsed;
 
 
 
 
 37	if (remaining <= 0)
 38		return 1; /* user timeout has passed; fire ASAP */
 39
 40	return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
 41}
 42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 43/**
 44 *  tcp_write_err() - close socket and save error info
 45 *  @sk:  The socket the error has appeared on.
 46 *
 47 *  Returns: Nothing (void)
 48 */
 49
 50static void tcp_write_err(struct sock *sk)
 51{
 52	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
 53	sk->sk_error_report(sk);
 54
 55	tcp_write_queue_purge(sk);
 56	tcp_done(sk);
 57	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
 58}
 59
 60/**
 61 *  tcp_out_of_resources() - Close socket if out of resources
 62 *  @sk:        pointer to current socket
 63 *  @do_reset:  send a last packet with reset flag
 64 *
 65 *  Do not allow orphaned sockets to eat all our resources.
 66 *  This is direct violation of TCP specs, but it is required
 67 *  to prevent DoS attacks. It is called when a retransmission timeout
 68 *  or zero probe timeout occurs on orphaned socket.
 69 *
 70 *  Also close if our net namespace is exiting; in that case there is no
 71 *  hope of ever communicating again since all netns interfaces are already
 72 *  down (or about to be down), and we need to release our dst references,
 73 *  which have been moved to the netns loopback interface, so the namespace
 74 *  can finish exiting.  This condition is only possible if we are a kernel
 75 *  socket, as those do not hold references to the namespace.
 76 *
 77 *  Criteria is still not confirmed experimentally and may change.
 78 *  We kill the socket, if:
 79 *  1. If number of orphaned sockets exceeds an administratively configured
 80 *     limit.
 81 *  2. If we have strong memory pressure.
 82 *  3. If our net namespace is exiting.
 83 */
 84static int tcp_out_of_resources(struct sock *sk, bool do_reset)
 85{
 86	struct tcp_sock *tp = tcp_sk(sk);
 87	int shift = 0;
 88
 89	/* If peer does not open window for long time, or did not transmit
 90	 * anything for long time, penalize it. */
 91	if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
 92		shift++;
 93
 94	/* If some dubious ICMP arrived, penalize even more. */
 95	if (sk->sk_err_soft)
 96		shift++;
 97
 98	if (tcp_check_oom(sk, shift)) {
 99		/* Catch exceptional cases, when connection requires reset.
100		 *      1. Last segment was sent recently. */
101		if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
102		    /*  2. Window is closed. */
103		    (!tp->snd_wnd && !tp->packets_out))
104			do_reset = true;
105		if (do_reset)
106			tcp_send_active_reset(sk, GFP_ATOMIC);
 
107		tcp_done(sk);
108		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
109		return 1;
110	}
111
112	if (!check_net(sock_net(sk))) {
113		/* Not possible to send reset; just close */
114		tcp_done(sk);
115		return 1;
116	}
117
118	return 0;
119}
120
121/**
122 *  tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
123 *  @sk:    Pointer to the current socket.
124 *  @alive: bool, socket alive state
125 */
126static int tcp_orphan_retries(struct sock *sk, bool alive)
127{
128	int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
129
130	/* We know from an ICMP that something is wrong. */
131	if (sk->sk_err_soft && !alive)
132		retries = 0;
133
134	/* However, if socket sent something recently, select some safe
135	 * number of retries. 8 corresponds to >100 seconds with minimal
136	 * RTO of 200msec. */
137	if (retries == 0 && alive)
138		retries = 8;
139	return retries;
140}
141
142static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
143{
144	const struct net *net = sock_net(sk);
145	int mss;
146
147	/* Black hole detection */
148	if (!net->ipv4.sysctl_tcp_mtu_probing)
149		return;
150
151	if (!icsk->icsk_mtup.enabled) {
152		icsk->icsk_mtup.enabled = 1;
153		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
154	} else {
155		mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
156		mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
157		mss = max(mss, net->ipv4.sysctl_tcp_mtu_probe_floor);
158		mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
159		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
160	}
161	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
162}
163
164static unsigned int tcp_model_timeout(struct sock *sk,
165				      unsigned int boundary,
166				      unsigned int rto_base)
167{
168	unsigned int linear_backoff_thresh, timeout;
169
170	linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
171	if (boundary <= linear_backoff_thresh)
172		timeout = ((2 << boundary) - 1) * rto_base;
173	else
174		timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
175			(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
176	return jiffies_to_msecs(timeout);
177}
178/**
179 *  retransmits_timed_out() - returns true if this connection has timed out
180 *  @sk:       The current socket
181 *  @boundary: max number of retransmissions
182 *  @timeout:  A custom timeout value.
183 *             If set to 0 the default timeout is calculated and used.
184 *             Using TCP_RTO_MIN and the number of unsuccessful retransmits.
185 *
186 * The default "timeout" value this function can calculate and use
187 * is equivalent to the timeout of a TCP Connection
188 * after "boundary" unsuccessful, exponentially backed-off
189 * retransmissions with an initial RTO of TCP_RTO_MIN.
190 */
191static bool retransmits_timed_out(struct sock *sk,
192				  unsigned int boundary,
193				  unsigned int timeout)
194{
195	unsigned int start_ts;
 
196
197	if (!inet_csk(sk)->icsk_retransmits)
198		return false;
199
200	start_ts = tcp_sk(sk)->retrans_stamp;
201	if (likely(timeout == 0)) {
202		unsigned int rto_base = TCP_RTO_MIN;
203
204		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
205			rto_base = tcp_timeout_init(sk);
206		timeout = tcp_model_timeout(sk, boundary, rto_base);
207	}
208
209	return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
 
 
 
 
 
210}
211
212/* A write timeout has occurred. Process the after effects. */
213static int tcp_write_timeout(struct sock *sk)
214{
215	struct inet_connection_sock *icsk = inet_csk(sk);
216	struct tcp_sock *tp = tcp_sk(sk);
217	struct net *net = sock_net(sk);
218	bool expired = false, do_reset;
219	int retry_until;
220
221	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
222		if (icsk->icsk_retransmits) {
223			dst_negative_advice(sk);
224		} else {
225			sk_rethink_txhash(sk);
226		}
227		retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
228		expired = icsk->icsk_retransmits >= retry_until;
 
 
 
 
229	} else {
230		if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
231			/* Black hole detection */
232			tcp_mtu_probing(icsk, sk);
233
234			dst_negative_advice(sk);
235		} else {
236			sk_rethink_txhash(sk);
237		}
238
239		retry_until = net->ipv4.sysctl_tcp_retries2;
240		if (sock_flag(sk, SOCK_DEAD)) {
241			const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
242
243			retry_until = tcp_orphan_retries(sk, alive);
244			do_reset = alive ||
245				!retransmits_timed_out(sk, retry_until, 0);
246
247			if (tcp_out_of_resources(sk, do_reset))
248				return 1;
249		}
250	}
251	if (!expired)
252		expired = retransmits_timed_out(sk, retry_until,
253						icsk->icsk_user_timeout);
254	tcp_fastopen_active_detect_blackhole(sk, expired);
 
255
256	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
257		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
258				  icsk->icsk_retransmits,
259				  icsk->icsk_rto, (int)expired);
260
261	if (expired) {
262		/* Has it gone just too far? */
263		tcp_write_err(sk);
264		return 1;
265	}
266
 
 
 
 
 
267	return 0;
268}
269
270/* Called with BH disabled */
271void tcp_delack_timer_handler(struct sock *sk)
272{
273	struct inet_connection_sock *icsk = inet_csk(sk);
 
274
275	sk_mem_reclaim_partial(sk);
 
 
 
 
 
 
 
 
276
277	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
278	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
279		goto out;
280
281	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
282		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
283		goto out;
284	}
285	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
286
287	if (inet_csk_ack_scheduled(sk)) {
288		if (!inet_csk_in_pingpong_mode(sk)) {
289			/* Delayed ACK missed: inflate ATO. */
290			icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
291		} else {
292			/* Delayed ACK missed: leave pingpong mode and
293			 * deflate ATO.
294			 */
295			inet_csk_exit_pingpong_mode(sk);
296			icsk->icsk_ack.ato      = TCP_ATO_MIN;
297		}
298		tcp_mstamp_refresh(tcp_sk(sk));
299		tcp_send_ack(sk);
300		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
301	}
302
303out:
304	if (tcp_under_memory_pressure(sk))
305		sk_mem_reclaim(sk);
306}
307
308
309/**
310 *  tcp_delack_timer() - The TCP delayed ACK timeout handler
311 *  @data:  Pointer to the current socket. (gets casted to struct sock *)
312 *
313 *  This function gets (indirectly) called when the kernel timer for a TCP packet
314 *  of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
315 *
316 *  Returns: Nothing (void)
317 */
318static void tcp_delack_timer(struct timer_list *t)
319{
320	struct inet_connection_sock *icsk =
321			from_timer(icsk, t, icsk_delack_timer);
322	struct sock *sk = &icsk->icsk_inet.sk;
323
 
 
 
 
 
 
 
 
324	bh_lock_sock(sk);
325	if (!sock_owned_by_user(sk)) {
326		tcp_delack_timer_handler(sk);
327	} else {
328		icsk->icsk_ack.blocked = 1;
329		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
330		/* deleguate our work to tcp_release_cb() */
331		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
332			sock_hold(sk);
333	}
334	bh_unlock_sock(sk);
 
335	sock_put(sk);
336}
337
338static void tcp_probe_timer(struct sock *sk)
339{
340	struct inet_connection_sock *icsk = inet_csk(sk);
341	struct sk_buff *skb = tcp_send_head(sk);
342	struct tcp_sock *tp = tcp_sk(sk);
343	int max_probes;
344
345	if (tp->packets_out || !skb) {
346		icsk->icsk_probes_out = 0;
 
347		return;
348	}
349
350	/* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
351	 * long as the receiver continues to respond probes. We support this by
352	 * default and reset icsk_probes_out with incoming ACKs. But if the
353	 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
354	 * kill the socket when the retry count and the time exceeds the
355	 * corresponding system limit. We also implement similar policy when
356	 * we use RTO to probe window in tcp_retransmit_timer().
357	 */
358	if (icsk->icsk_user_timeout) {
359		u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
360						tcp_probe0_base(sk));
 
361
362		if (elapsed >= icsk->icsk_user_timeout)
 
 
363			goto abort;
364	}
365
366	max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
367	if (sock_flag(sk, SOCK_DEAD)) {
368		const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
369
370		max_probes = tcp_orphan_retries(sk, alive);
371		if (!alive && icsk->icsk_backoff >= max_probes)
372			goto abort;
373		if (tcp_out_of_resources(sk, true))
374			return;
375	}
376
377	if (icsk->icsk_probes_out >= max_probes) {
378abort:		tcp_write_err(sk);
379	} else {
380		/* Only send another probe if we didn't close things up. */
381		tcp_send_probe0(sk);
382	}
383}
384
 
 
 
 
 
 
 
 
 
 
 
 
 
385/*
386 *	Timer for Fast Open socket to retransmit SYNACK. Note that the
387 *	sk here is the child socket, not the parent (listener) socket.
388 */
389static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
390{
391	struct inet_connection_sock *icsk = inet_csk(sk);
392	int max_retries = icsk->icsk_syn_retries ? :
393	    sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
394	struct tcp_sock *tp = tcp_sk(sk);
 
395
396	req->rsk_ops->syn_ack_timeout(req);
397
 
 
 
 
 
 
398	if (req->num_timeout >= max_retries) {
399		tcp_write_err(sk);
400		return;
401	}
402	/* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
403	if (icsk->icsk_retransmits == 1)
404		tcp_enter_loss(sk);
405	/* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
406	 * returned from rtx_syn_ack() to make it more persistent like
407	 * regular retransmit because if the child socket has been accepted
408	 * it's not good to give up too easily.
409	 */
410	inet_rtx_syn_ack(sk, req);
411	req->num_timeout++;
412	icsk->icsk_retransmits++;
413	if (!tp->retrans_stamp)
414		tp->retrans_stamp = tcp_time_stamp(tp);
415	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
416			  TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
417}
418
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419
420/**
421 *  tcp_retransmit_timer() - The TCP retransmit timeout handler
422 *  @sk:  Pointer to the current socket.
423 *
424 *  This function gets called when the kernel timer for a TCP packet
425 *  of this socket expires.
426 *
427 *  It handles retransmission, timer adjustment and other necesarry measures.
428 *
429 *  Returns: Nothing (void)
430 */
431void tcp_retransmit_timer(struct sock *sk)
432{
433	struct tcp_sock *tp = tcp_sk(sk);
434	struct net *net = sock_net(sk);
435	struct inet_connection_sock *icsk = inet_csk(sk);
436	struct request_sock *req;
 
437
438	req = rcu_dereference_protected(tp->fastopen_rsk,
439					lockdep_sock_is_held(sk));
440	if (req) {
441		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
442			     sk->sk_state != TCP_FIN_WAIT1);
443		tcp_fastopen_synack_timer(sk, req);
444		/* Before we receive ACK to our SYN-ACK don't retransmit
445		 * anything else (e.g., data or FIN segments).
446		 */
447		return;
448	}
449	if (!tp->packets_out || WARN_ON_ONCE(tcp_rtx_queue_empty(sk)))
 
450		return;
451
452	tp->tlp_high_seq = 0;
 
 
453
454	if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
455	    !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
456		/* Receiver dastardly shrinks window. Our retransmits
457		 * become zero probes, but we should not timeout this
458		 * connection. If the socket is an orphan, time it out,
459		 * we cannot allow such beasts to hang infinitely.
460		 */
461		struct inet_sock *inet = inet_sk(sk);
 
 
 
 
 
 
 
462		if (sk->sk_family == AF_INET) {
463			net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
464					    &inet->inet_daddr,
465					    ntohs(inet->inet_dport),
466					    inet->inet_num,
467					    tp->snd_una, tp->snd_nxt);
468		}
469#if IS_ENABLED(CONFIG_IPV6)
470		else if (sk->sk_family == AF_INET6) {
471			net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
472					    &sk->sk_v6_daddr,
473					    ntohs(inet->inet_dport),
474					    inet->inet_num,
475					    tp->snd_una, tp->snd_nxt);
476		}
477#endif
478		if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
479			tcp_write_err(sk);
480			goto out;
481		}
482		tcp_enter_loss(sk);
483		tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1);
484		__sk_dst_reset(sk);
485		goto out_reset_timer;
486	}
487
488	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
489	if (tcp_write_timeout(sk))
490		goto out;
491
492	if (icsk->icsk_retransmits == 0) {
493		int mib_idx = 0;
494
495		if (icsk->icsk_ca_state == TCP_CA_Recovery) {
496			if (tcp_is_sack(tp))
497				mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
498			else
499				mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
500		} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
501			mib_idx = LINUX_MIB_TCPLOSSFAILURES;
502		} else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
503			   tp->sacked_out) {
504			if (tcp_is_sack(tp))
505				mib_idx = LINUX_MIB_TCPSACKFAILURES;
506			else
507				mib_idx = LINUX_MIB_TCPRENOFAILURES;
508		}
509		if (mib_idx)
510			__NET_INC_STATS(sock_net(sk), mib_idx);
511	}
512
513	tcp_enter_loss(sk);
514
515	icsk->icsk_retransmits++;
516	if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
517		/* Retransmission failed because of local congestion,
518		 * Let senders fight for local resources conservatively.
519		 */
520		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
521					  TCP_RESOURCE_PROBE_INTERVAL,
522					  TCP_RTO_MAX);
523		goto out;
524	}
525
526	/* Increase the timeout each time we retransmit.  Note that
527	 * we do not increase the rtt estimate.  rto is initialized
528	 * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
529	 * that doubling rto each time is the least we can get away with.
530	 * In KA9Q, Karn uses this for the first few times, and then
531	 * goes to quadratic.  netBSD doubles, but only goes up to *64,
532	 * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
533	 * defined in the protocol as the maximum possible RTT.  I guess
534	 * we'll have to use something other than TCP to talk to the
535	 * University of Mars.
536	 *
537	 * PAWS allows us longer timeouts and large windows, so once
538	 * implemented ftp to mars will work nicely. We will have to fix
539	 * the 120 second clamps though!
540	 */
541	icsk->icsk_backoff++;
542
543out_reset_timer:
544	/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
545	 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
546	 * might be increased if the stream oscillates between thin and thick,
547	 * thus the old value might already be too high compared to the value
548	 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
549	 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
550	 * exponential backoff behaviour to avoid continue hammering
551	 * linear-timeout retransmissions into a black hole
552	 */
553	if (sk->sk_state == TCP_ESTABLISHED &&
554	    (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) &&
555	    tcp_stream_is_thin(tp) &&
556	    icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
557		icsk->icsk_backoff = 0;
558		icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
559	} else {
560		/* Use normal (exponential) backoff */
 
 
 
 
 
 
 
561		icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
562	}
563	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
564				  tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
565	if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
566		__sk_dst_reset(sk);
567
568out:;
569}
570
571/* Called with bottom-half processing disabled.
572   Called by tcp_write_timer() */
573void tcp_write_timer_handler(struct sock *sk)
574{
575	struct inet_connection_sock *icsk = inet_csk(sk);
576	int event;
577
578	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
579	    !icsk->icsk_pending)
580		goto out;
581
582	if (time_after(icsk->icsk_timeout, jiffies)) {
583		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
584		goto out;
585	}
586
587	tcp_mstamp_refresh(tcp_sk(sk));
588	event = icsk->icsk_pending;
589
590	switch (event) {
591	case ICSK_TIME_REO_TIMEOUT:
592		tcp_rack_reo_timeout(sk);
593		break;
594	case ICSK_TIME_LOSS_PROBE:
595		tcp_send_loss_probe(sk);
596		break;
597	case ICSK_TIME_RETRANS:
598		icsk->icsk_pending = 0;
599		tcp_retransmit_timer(sk);
600		break;
601	case ICSK_TIME_PROBE0:
602		icsk->icsk_pending = 0;
603		tcp_probe_timer(sk);
604		break;
605	}
606
607out:
608	sk_mem_reclaim(sk);
609}
610
611static void tcp_write_timer(struct timer_list *t)
612{
613	struct inet_connection_sock *icsk =
614			from_timer(icsk, t, icsk_retransmit_timer);
615	struct sock *sk = &icsk->icsk_inet.sk;
616
 
 
 
 
617	bh_lock_sock(sk);
618	if (!sock_owned_by_user(sk)) {
619		tcp_write_timer_handler(sk);
620	} else {
621		/* delegate our work to tcp_release_cb() */
622		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
623			sock_hold(sk);
624	}
625	bh_unlock_sock(sk);
 
626	sock_put(sk);
627}
628
629void tcp_syn_ack_timeout(const struct request_sock *req)
630{
631	struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
632
633	__NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
634}
635EXPORT_SYMBOL(tcp_syn_ack_timeout);
636
637void tcp_set_keepalive(struct sock *sk, int val)
638{
639	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
640		return;
641
642	if (val && !sock_flag(sk, SOCK_KEEPOPEN))
643		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
644	else if (!val)
645		inet_csk_delete_keepalive_timer(sk);
646}
647EXPORT_SYMBOL_GPL(tcp_set_keepalive);
648
649
650static void tcp_keepalive_timer (struct timer_list *t)
651{
652	struct sock *sk = from_timer(sk, t, sk_timer);
653	struct inet_connection_sock *icsk = inet_csk(sk);
654	struct tcp_sock *tp = tcp_sk(sk);
655	u32 elapsed;
656
657	/* Only process if socket is not in use. */
658	bh_lock_sock(sk);
659	if (sock_owned_by_user(sk)) {
660		/* Try again later. */
661		inet_csk_reset_keepalive_timer (sk, HZ/20);
662		goto out;
663	}
664
665	if (sk->sk_state == TCP_LISTEN) {
666		pr_err("Hmm... keepalive on a LISTEN ???\n");
667		goto out;
668	}
669
670	tcp_mstamp_refresh(tp);
671	if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
672		if (tp->linger2 >= 0) {
673			const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
674
675			if (tmo > 0) {
676				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
677				goto out;
678			}
679		}
680		tcp_send_active_reset(sk, GFP_ATOMIC);
681		goto death;
682	}
683
684	if (!sock_flag(sk, SOCK_KEEPOPEN) ||
685	    ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
686		goto out;
687
688	elapsed = keepalive_time_when(tp);
689
690	/* It is alive without keepalive 8) */
691	if (tp->packets_out || !tcp_write_queue_empty(sk))
692		goto resched;
693
694	elapsed = keepalive_time_elapsed(tp);
695
696	if (elapsed >= keepalive_time_when(tp)) {
 
 
697		/* If the TCP_USER_TIMEOUT option is enabled, use that
698		 * to determine when to timeout instead.
699		 */
700		if ((icsk->icsk_user_timeout != 0 &&
701		    elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) &&
702		    icsk->icsk_probes_out > 0) ||
703		    (icsk->icsk_user_timeout == 0 &&
704		    icsk->icsk_probes_out >= keepalive_probes(tp))) {
705			tcp_send_active_reset(sk, GFP_ATOMIC);
 
706			tcp_write_err(sk);
707			goto out;
708		}
709		if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
710			icsk->icsk_probes_out++;
711			elapsed = keepalive_intvl_when(tp);
712		} else {
713			/* If keepalive was lost due to local congestion,
714			 * try harder.
715			 */
716			elapsed = TCP_RESOURCE_PROBE_INTERVAL;
717		}
718	} else {
719		/* It is tp->rcv_tstamp + keepalive_time_when(tp) */
720		elapsed = keepalive_time_when(tp) - elapsed;
721	}
722
723	sk_mem_reclaim(sk);
724
725resched:
726	inet_csk_reset_keepalive_timer (sk, elapsed);
727	goto out;
728
729death:
730	tcp_done(sk);
731
732out:
733	bh_unlock_sock(sk);
734	sock_put(sk);
735}
736
737static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
738{
739	struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
740	struct sock *sk = (struct sock *)tp;
741
742	bh_lock_sock(sk);
743	if (!sock_owned_by_user(sk)) {
744		if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
 
 
 
 
 
 
745			tcp_send_ack(sk);
 
746	} else {
747		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
748				      &sk->sk_tsq_flags))
749			sock_hold(sk);
750	}
751	bh_unlock_sock(sk);
752
753	sock_put(sk);
754
755	return HRTIMER_NORESTART;
756}
757
758void tcp_init_xmit_timers(struct sock *sk)
759{
760	inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
761				  &tcp_keepalive_timer);
762	hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
763		     HRTIMER_MODE_ABS_PINNED_SOFT);
764	tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
765
766	hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
767		     HRTIMER_MODE_REL_PINNED_SOFT);
768	tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
769}