Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Implementation of the Transmission Control Protocol(TCP).
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 13 *		Florian La Roche, <flla@stud.uni-sb.de>
 14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19 *		Jorge Cwik, <jorge@laser.satlink.net>
 20 */
 21
 22#include <linux/module.h>
 23#include <linux/gfp.h>
 24#include <net/tcp.h>
 25
 26static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
 27{
 28	struct inet_connection_sock *icsk = inet_csk(sk);
 29	u32 elapsed, start_ts;
 30	s32 remaining;
 31
 32	start_ts = tcp_sk(sk)->retrans_stamp;
 33	if (!icsk->icsk_user_timeout)
 34		return icsk->icsk_rto;
 35	elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
 36	remaining = icsk->icsk_user_timeout - elapsed;
 37	if (remaining <= 0)
 38		return 1; /* user timeout has passed; fire ASAP */
 39
 40	return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
 41}
 42
 43u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
 44{
 45	struct inet_connection_sock *icsk = inet_csk(sk);
 46	u32 remaining;
 47	s32 elapsed;
 48
 49	if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp)
 50		return when;
 51
 52	elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
 53	if (unlikely(elapsed < 0))
 54		elapsed = 0;
 55	remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed;
 56	remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
 57
 58	return min_t(u32, remaining, when);
 59}
 60
 61/**
 62 *  tcp_write_err() - close socket and save error info
 63 *  @sk:  The socket the error has appeared on.
 64 *
 65 *  Returns: Nothing (void)
 66 */
 67
 68static void tcp_write_err(struct sock *sk)
 69{
 70	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
 71	sk_error_report(sk);
 72
 73	tcp_write_queue_purge(sk);
 74	tcp_done(sk);
 75	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
 76}
 77
 78/**
 79 *  tcp_out_of_resources() - Close socket if out of resources
 80 *  @sk:        pointer to current socket
 81 *  @do_reset:  send a last packet with reset flag
 82 *
 83 *  Do not allow orphaned sockets to eat all our resources.
 84 *  This is direct violation of TCP specs, but it is required
 85 *  to prevent DoS attacks. It is called when a retransmission timeout
 86 *  or zero probe timeout occurs on orphaned socket.
 87 *
 88 *  Also close if our net namespace is exiting; in that case there is no
 89 *  hope of ever communicating again since all netns interfaces are already
 90 *  down (or about to be down), and we need to release our dst references,
 91 *  which have been moved to the netns loopback interface, so the namespace
 92 *  can finish exiting.  This condition is only possible if we are a kernel
 93 *  socket, as those do not hold references to the namespace.
 94 *
 95 *  Criteria is still not confirmed experimentally and may change.
 96 *  We kill the socket, if:
 97 *  1. If number of orphaned sockets exceeds an administratively configured
 98 *     limit.
 99 *  2. If we have strong memory pressure.
100 *  3. If our net namespace is exiting.
101 */
102static int tcp_out_of_resources(struct sock *sk, bool do_reset)
103{
104	struct tcp_sock *tp = tcp_sk(sk);
105	int shift = 0;
106
107	/* If peer does not open window for long time, or did not transmit
108	 * anything for long time, penalize it. */
109	if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
110		shift++;
111
112	/* If some dubious ICMP arrived, penalize even more. */
113	if (sk->sk_err_soft)
114		shift++;
115
116	if (tcp_check_oom(sk, shift)) {
117		/* Catch exceptional cases, when connection requires reset.
118		 *      1. Last segment was sent recently. */
119		if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
120		    /*  2. Window is closed. */
121		    (!tp->snd_wnd && !tp->packets_out))
122			do_reset = true;
123		if (do_reset)
124			tcp_send_active_reset(sk, GFP_ATOMIC);
125		tcp_done(sk);
126		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
127		return 1;
128	}
129
130	if (!check_net(sock_net(sk))) {
131		/* Not possible to send reset; just close */
132		tcp_done(sk);
133		return 1;
134	}
135
136	return 0;
137}
138
139/**
140 *  tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
141 *  @sk:    Pointer to the current socket.
142 *  @alive: bool, socket alive state
143 */
144static int tcp_orphan_retries(struct sock *sk, bool alive)
145{
146	int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
147
148	/* We know from an ICMP that something is wrong. */
149	if (sk->sk_err_soft && !alive)
150		retries = 0;
151
152	/* However, if socket sent something recently, select some safe
153	 * number of retries. 8 corresponds to >100 seconds with minimal
154	 * RTO of 200msec. */
155	if (retries == 0 && alive)
156		retries = 8;
157	return retries;
158}
159
160static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
161{
162	const struct net *net = sock_net(sk);
163	int mss;
164
165	/* Black hole detection */
166	if (!net->ipv4.sysctl_tcp_mtu_probing)
167		return;
168
169	if (!icsk->icsk_mtup.enabled) {
170		icsk->icsk_mtup.enabled = 1;
171		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
172	} else {
173		mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
174		mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
175		mss = max(mss, net->ipv4.sysctl_tcp_mtu_probe_floor);
176		mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
177		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
178	}
179	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
180}
181
182static unsigned int tcp_model_timeout(struct sock *sk,
183				      unsigned int boundary,
184				      unsigned int rto_base)
185{
186	unsigned int linear_backoff_thresh, timeout;
187
188	linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
189	if (boundary <= linear_backoff_thresh)
190		timeout = ((2 << boundary) - 1) * rto_base;
191	else
192		timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
193			(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
194	return jiffies_to_msecs(timeout);
195}
 
 
196/**
197 *  retransmits_timed_out() - returns true if this connection has timed out
198 *  @sk:       The current socket
199 *  @boundary: max number of retransmissions
200 *  @timeout:  A custom timeout value.
201 *             If set to 0 the default timeout is calculated and used.
202 *             Using TCP_RTO_MIN and the number of unsuccessful retransmits.
 
203 *
204 * The default "timeout" value this function can calculate and use
205 * is equivalent to the timeout of a TCP Connection
206 * after "boundary" unsuccessful, exponentially backed-off
207 * retransmissions with an initial RTO of TCP_RTO_MIN.
 
 
208 */
209static bool retransmits_timed_out(struct sock *sk,
210				  unsigned int boundary,
211				  unsigned int timeout)
 
212{
213	unsigned int start_ts;
 
214
215	if (!inet_csk(sk)->icsk_retransmits)
216		return false;
217
218	start_ts = tcp_sk(sk)->retrans_stamp;
 
 
 
219	if (likely(timeout == 0)) {
220		unsigned int rto_base = TCP_RTO_MIN;
221
222		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
223			rto_base = tcp_timeout_init(sk);
224		timeout = tcp_model_timeout(sk, boundary, rto_base);
 
 
225	}
226
227	return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
228}
229
230/* A write timeout has occurred. Process the after effects. */
231static int tcp_write_timeout(struct sock *sk)
232{
233	struct inet_connection_sock *icsk = inet_csk(sk);
234	struct tcp_sock *tp = tcp_sk(sk);
235	struct net *net = sock_net(sk);
236	bool expired = false, do_reset;
237	int retry_until;
 
238
239	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
240		if (icsk->icsk_retransmits)
241			__dst_negative_advice(sk);
 
 
 
 
 
 
 
 
242		retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
243		expired = icsk->icsk_retransmits >= retry_until;
244	} else {
245		if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
 
 
 
 
 
 
 
 
 
 
 
 
246			/* Black hole detection */
247			tcp_mtu_probing(icsk, sk);
248
249			__dst_negative_advice(sk);
 
 
250		}
251
252		retry_until = net->ipv4.sysctl_tcp_retries2;
253		if (sock_flag(sk, SOCK_DEAD)) {
254			const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
255
256			retry_until = tcp_orphan_retries(sk, alive);
257			do_reset = alive ||
258				!retransmits_timed_out(sk, retry_until, 0);
259
260			if (tcp_out_of_resources(sk, do_reset))
261				return 1;
262		}
263	}
264	if (!expired)
265		expired = retransmits_timed_out(sk, retry_until,
266						icsk->icsk_user_timeout);
267	tcp_fastopen_active_detect_blackhole(sk, expired);
268
269	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
270		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
271				  icsk->icsk_retransmits,
272				  icsk->icsk_rto, (int)expired);
273
274	if (expired) {
 
275		/* Has it gone just too far? */
276		tcp_write_err(sk);
277		return 1;
278	}
279
280	if (sk_rethink_txhash(sk)) {
281		tp->timeout_rehash++;
282		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
283	}
284
285	return 0;
286}
287
288/* Called with BH disabled */
289void tcp_delack_timer_handler(struct sock *sk)
290{
 
291	struct inet_connection_sock *icsk = inet_csk(sk);
292
293	sk_mem_reclaim_partial(sk);
294
295	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
296	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
297		goto out;
298
299	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
300		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
301		goto out;
302	}
303	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
304
 
 
 
 
 
 
 
 
 
 
 
305	if (inet_csk_ack_scheduled(sk)) {
306		if (!inet_csk_in_pingpong_mode(sk)) {
307			/* Delayed ACK missed: inflate ATO. */
308			icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
309		} else {
310			/* Delayed ACK missed: leave pingpong mode and
311			 * deflate ATO.
312			 */
313			inet_csk_exit_pingpong_mode(sk);
314			icsk->icsk_ack.ato      = TCP_ATO_MIN;
315		}
316		tcp_mstamp_refresh(tcp_sk(sk));
317		tcp_send_ack(sk);
318		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
319	}
320
321out:
322	if (tcp_under_memory_pressure(sk))
323		sk_mem_reclaim(sk);
324}
325
326
327/**
328 *  tcp_delack_timer() - The TCP delayed ACK timeout handler
329 *  @t:  Pointer to the timer. (gets casted to struct sock *)
330 *
331 *  This function gets (indirectly) called when the kernel timer for a TCP packet
332 *  of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
333 *
334 *  Returns: Nothing (void)
335 */
336static void tcp_delack_timer(struct timer_list *t)
337{
338	struct inet_connection_sock *icsk =
339			from_timer(icsk, t, icsk_delack_timer);
340	struct sock *sk = &icsk->icsk_inet.sk;
341
342	bh_lock_sock(sk);
343	if (!sock_owned_by_user(sk)) {
344		tcp_delack_timer_handler(sk);
345	} else {
 
346		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
347		/* deleguate our work to tcp_release_cb() */
348		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
349			sock_hold(sk);
350	}
351	bh_unlock_sock(sk);
352	sock_put(sk);
353}
354
355static void tcp_probe_timer(struct sock *sk)
356{
357	struct inet_connection_sock *icsk = inet_csk(sk);
358	struct sk_buff *skb = tcp_send_head(sk);
359	struct tcp_sock *tp = tcp_sk(sk);
360	int max_probes;
 
361
362	if (tp->packets_out || !skb) {
363		icsk->icsk_probes_out = 0;
364		icsk->icsk_probes_tstamp = 0;
365		return;
366	}
367
368	/* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
369	 * long as the receiver continues to respond probes. We support this by
370	 * default and reset icsk_probes_out with incoming ACKs. But if the
371	 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
372	 * kill the socket when the retry count and the time exceeds the
373	 * corresponding system limit. We also implement similar policy when
374	 * we use RTO to probe window in tcp_retransmit_timer().
375	 */
376	if (!icsk->icsk_probes_tstamp)
377		icsk->icsk_probes_tstamp = tcp_jiffies32;
 
378	else if (icsk->icsk_user_timeout &&
379		 (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
380		 msecs_to_jiffies(icsk->icsk_user_timeout))
381		goto abort;
382
383	max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
384	if (sock_flag(sk, SOCK_DEAD)) {
385		const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
386
387		max_probes = tcp_orphan_retries(sk, alive);
388		if (!alive && icsk->icsk_backoff >= max_probes)
389			goto abort;
390		if (tcp_out_of_resources(sk, true))
391			return;
392	}
393
394	if (icsk->icsk_probes_out >= max_probes) {
395abort:		tcp_write_err(sk);
396	} else {
397		/* Only send another probe if we didn't close things up. */
398		tcp_send_probe0(sk);
399	}
400}
401
402/*
403 *	Timer for Fast Open socket to retransmit SYNACK. Note that the
404 *	sk here is the child socket, not the parent (listener) socket.
405 */
406static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
407{
408	struct inet_connection_sock *icsk = inet_csk(sk);
409	int max_retries = icsk->icsk_syn_retries ? :
410	    sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
411	struct tcp_sock *tp = tcp_sk(sk);
412
 
413	req->rsk_ops->syn_ack_timeout(req);
414
415	if (req->num_timeout >= max_retries) {
416		tcp_write_err(sk);
417		return;
418	}
419	/* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
420	if (icsk->icsk_retransmits == 1)
421		tcp_enter_loss(sk);
422	/* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
423	 * returned from rtx_syn_ack() to make it more persistent like
424	 * regular retransmit because if the child socket has been accepted
425	 * it's not good to give up too easily.
426	 */
427	inet_rtx_syn_ack(sk, req);
428	req->num_timeout++;
429	icsk->icsk_retransmits++;
430	if (!tp->retrans_stamp)
431		tp->retrans_stamp = tcp_time_stamp(tp);
432	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
433			  TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
434}
435
436
437/**
438 *  tcp_retransmit_timer() - The TCP retransmit timeout handler
439 *  @sk:  Pointer to the current socket.
440 *
441 *  This function gets called when the kernel timer for a TCP packet
442 *  of this socket expires.
443 *
444 *  It handles retransmission, timer adjustment and other necessary measures.
445 *
446 *  Returns: Nothing (void)
447 */
448void tcp_retransmit_timer(struct sock *sk)
449{
450	struct tcp_sock *tp = tcp_sk(sk);
451	struct net *net = sock_net(sk);
452	struct inet_connection_sock *icsk = inet_csk(sk);
453	struct request_sock *req;
454	struct sk_buff *skb;
455
456	req = rcu_dereference_protected(tp->fastopen_rsk,
457					lockdep_sock_is_held(sk));
458	if (req) {
459		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
460			     sk->sk_state != TCP_FIN_WAIT1);
461		tcp_fastopen_synack_timer(sk, req);
462		/* Before we receive ACK to our SYN-ACK don't retransmit
463		 * anything else (e.g., data or FIN segments).
464		 */
465		return;
466	}
467
468	if (!tp->packets_out)
469		return;
470
471	skb = tcp_rtx_queue_head(sk);
472	if (WARN_ON_ONCE(!skb))
473		return;
474
475	tp->tlp_high_seq = 0;
476
477	if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
478	    !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
479		/* Receiver dastardly shrinks window. Our retransmits
480		 * become zero probes, but we should not timeout this
481		 * connection. If the socket is an orphan, time it out,
482		 * we cannot allow such beasts to hang infinitely.
483		 */
484		struct inet_sock *inet = inet_sk(sk);
485		if (sk->sk_family == AF_INET) {
486			net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
487					    &inet->inet_daddr,
488					    ntohs(inet->inet_dport),
489					    inet->inet_num,
490					    tp->snd_una, tp->snd_nxt);
491		}
492#if IS_ENABLED(CONFIG_IPV6)
493		else if (sk->sk_family == AF_INET6) {
494			net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
495					    &sk->sk_v6_daddr,
496					    ntohs(inet->inet_dport),
497					    inet->inet_num,
498					    tp->snd_una, tp->snd_nxt);
499		}
500#endif
501		if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
502			tcp_write_err(sk);
503			goto out;
504		}
505		tcp_enter_loss(sk);
506		tcp_retransmit_skb(sk, skb, 1);
507		__sk_dst_reset(sk);
508		goto out_reset_timer;
509	}
510
511	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
512	if (tcp_write_timeout(sk))
513		goto out;
514
515	if (icsk->icsk_retransmits == 0) {
516		int mib_idx = 0;
517
518		if (icsk->icsk_ca_state == TCP_CA_Recovery) {
519			if (tcp_is_sack(tp))
520				mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
521			else
522				mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
523		} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
524			mib_idx = LINUX_MIB_TCPLOSSFAILURES;
525		} else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
526			   tp->sacked_out) {
527			if (tcp_is_sack(tp))
528				mib_idx = LINUX_MIB_TCPSACKFAILURES;
529			else
530				mib_idx = LINUX_MIB_TCPRENOFAILURES;
 
 
531		}
532		if (mib_idx)
533			__NET_INC_STATS(sock_net(sk), mib_idx);
534	}
535
536	tcp_enter_loss(sk);
537
538	icsk->icsk_retransmits++;
539	if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
540		/* Retransmission failed because of local congestion,
541		 * Let senders fight for local resources conservatively.
542		 */
 
 
543		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
544					  TCP_RESOURCE_PROBE_INTERVAL,
545					  TCP_RTO_MAX);
546		goto out;
547	}
548
549	/* Increase the timeout each time we retransmit.  Note that
550	 * we do not increase the rtt estimate.  rto is initialized
551	 * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
552	 * that doubling rto each time is the least we can get away with.
553	 * In KA9Q, Karn uses this for the first few times, and then
554	 * goes to quadratic.  netBSD doubles, but only goes up to *64,
555	 * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
556	 * defined in the protocol as the maximum possible RTT.  I guess
557	 * we'll have to use something other than TCP to talk to the
558	 * University of Mars.
559	 *
560	 * PAWS allows us longer timeouts and large windows, so once
561	 * implemented ftp to mars will work nicely. We will have to fix
562	 * the 120 second clamps though!
563	 */
564	icsk->icsk_backoff++;
 
565
566out_reset_timer:
567	/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
568	 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
569	 * might be increased if the stream oscillates between thin and thick,
570	 * thus the old value might already be too high compared to the value
571	 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
572	 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
573	 * exponential backoff behaviour to avoid continue hammering
574	 * linear-timeout retransmissions into a black hole
575	 */
576	if (sk->sk_state == TCP_ESTABLISHED &&
577	    (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) &&
578	    tcp_stream_is_thin(tp) &&
579	    icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
580		icsk->icsk_backoff = 0;
581		icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
582	} else {
583		/* Use normal (exponential) backoff */
584		icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
585	}
586	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
587				  tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
588	if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
589		__sk_dst_reset(sk);
590
591out:;
592}
593
594/* Called with bottom-half processing disabled.
595   Called by tcp_write_timer() */
596void tcp_write_timer_handler(struct sock *sk)
597{
598	struct inet_connection_sock *icsk = inet_csk(sk);
599	int event;
600
601	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
602	    !icsk->icsk_pending)
603		goto out;
604
605	if (time_after(icsk->icsk_timeout, jiffies)) {
606		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
607		goto out;
608	}
609
610	tcp_mstamp_refresh(tcp_sk(sk));
611	event = icsk->icsk_pending;
612
613	switch (event) {
614	case ICSK_TIME_REO_TIMEOUT:
615		tcp_rack_reo_timeout(sk);
616		break;
617	case ICSK_TIME_LOSS_PROBE:
618		tcp_send_loss_probe(sk);
619		break;
620	case ICSK_TIME_RETRANS:
621		icsk->icsk_pending = 0;
622		tcp_retransmit_timer(sk);
623		break;
624	case ICSK_TIME_PROBE0:
625		icsk->icsk_pending = 0;
626		tcp_probe_timer(sk);
627		break;
628	}
629
630out:
631	sk_mem_reclaim(sk);
632}
633
634static void tcp_write_timer(struct timer_list *t)
635{
636	struct inet_connection_sock *icsk =
637			from_timer(icsk, t, icsk_retransmit_timer);
638	struct sock *sk = &icsk->icsk_inet.sk;
639
640	bh_lock_sock(sk);
641	if (!sock_owned_by_user(sk)) {
642		tcp_write_timer_handler(sk);
643	} else {
644		/* delegate our work to tcp_release_cb() */
645		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
646			sock_hold(sk);
647	}
648	bh_unlock_sock(sk);
649	sock_put(sk);
650}
651
652void tcp_syn_ack_timeout(const struct request_sock *req)
653{
654	struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
655
656	__NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
657}
658EXPORT_SYMBOL(tcp_syn_ack_timeout);
659
660void tcp_set_keepalive(struct sock *sk, int val)
661{
662	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
663		return;
664
665	if (val && !sock_flag(sk, SOCK_KEEPOPEN))
666		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
667	else if (!val)
668		inet_csk_delete_keepalive_timer(sk);
669}
670EXPORT_SYMBOL_GPL(tcp_set_keepalive);
671
672
673static void tcp_keepalive_timer (struct timer_list *t)
674{
675	struct sock *sk = from_timer(sk, t, sk_timer);
676	struct inet_connection_sock *icsk = inet_csk(sk);
677	struct tcp_sock *tp = tcp_sk(sk);
678	u32 elapsed;
679
680	/* Only process if socket is not in use. */
681	bh_lock_sock(sk);
682	if (sock_owned_by_user(sk)) {
683		/* Try again later. */
684		inet_csk_reset_keepalive_timer (sk, HZ/20);
685		goto out;
686	}
687
688	if (sk->sk_state == TCP_LISTEN) {
689		pr_err("Hmm... keepalive on a LISTEN ???\n");
690		goto out;
691	}
692
693	tcp_mstamp_refresh(tp);
694	if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
695		if (tp->linger2 >= 0) {
696			const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
697
698			if (tmo > 0) {
699				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
700				goto out;
701			}
702		}
703		tcp_send_active_reset(sk, GFP_ATOMIC);
704		goto death;
705	}
706
707	if (!sock_flag(sk, SOCK_KEEPOPEN) ||
708	    ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
709		goto out;
710
711	elapsed = keepalive_time_when(tp);
712
713	/* It is alive without keepalive 8) */
714	if (tp->packets_out || !tcp_write_queue_empty(sk))
715		goto resched;
716
717	elapsed = keepalive_time_elapsed(tp);
718
719	if (elapsed >= keepalive_time_when(tp)) {
720		/* If the TCP_USER_TIMEOUT option is enabled, use that
721		 * to determine when to timeout instead.
722		 */
723		if ((icsk->icsk_user_timeout != 0 &&
724		    elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) &&
725		    icsk->icsk_probes_out > 0) ||
726		    (icsk->icsk_user_timeout == 0 &&
727		    icsk->icsk_probes_out >= keepalive_probes(tp))) {
728			tcp_send_active_reset(sk, GFP_ATOMIC);
729			tcp_write_err(sk);
730			goto out;
731		}
732		if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
733			icsk->icsk_probes_out++;
734			elapsed = keepalive_intvl_when(tp);
735		} else {
736			/* If keepalive was lost due to local congestion,
737			 * try harder.
738			 */
739			elapsed = TCP_RESOURCE_PROBE_INTERVAL;
740		}
741	} else {
742		/* It is tp->rcv_tstamp + keepalive_time_when(tp) */
743		elapsed = keepalive_time_when(tp) - elapsed;
744	}
745
746	sk_mem_reclaim(sk);
747
748resched:
749	inet_csk_reset_keepalive_timer (sk, elapsed);
750	goto out;
751
752death:
753	tcp_done(sk);
754
755out:
756	bh_unlock_sock(sk);
757	sock_put(sk);
758}
759
760static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
761{
762	struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
763	struct sock *sk = (struct sock *)tp;
764
765	bh_lock_sock(sk);
766	if (!sock_owned_by_user(sk)) {
767		if (tp->compressed_ack) {
768			/* Since we have to send one ack finally,
769			 * subtract one from tp->compressed_ack to keep
770			 * LINUX_MIB_TCPACKCOMPRESSED accurate.
771			 */
772			tp->compressed_ack--;
773			tcp_send_ack(sk);
774		}
775	} else {
776		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
777				      &sk->sk_tsq_flags))
778			sock_hold(sk);
779	}
780	bh_unlock_sock(sk);
781
782	sock_put(sk);
783
784	return HRTIMER_NORESTART;
785}
786
787void tcp_init_xmit_timers(struct sock *sk)
788{
789	inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
790				  &tcp_keepalive_timer);
791	hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
792		     HRTIMER_MODE_ABS_PINNED_SOFT);
793	tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
794
795	hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
796		     HRTIMER_MODE_REL_PINNED_SOFT);
797	tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
798}
v4.10.11
 
  1/*
  2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  3 *		operating system.  INET is implemented using the  BSD Socket
  4 *		interface as the means of communication with the user level.
  5 *
  6 *		Implementation of the Transmission Control Protocol(TCP).
  7 *
  8 * Authors:	Ross Biro
  9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 10 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 11 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 12 *		Florian La Roche, <flla@stud.uni-sb.de>
 13 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 14 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 15 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 16 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 17 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 18 *		Jorge Cwik, <jorge@laser.satlink.net>
 19 */
 20
 21#include <linux/module.h>
 22#include <linux/gfp.h>
 23#include <net/tcp.h>
 24
 25int sysctl_tcp_thin_linear_timeouts __read_mostly;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27/**
 28 *  tcp_write_err() - close socket and save error info
 29 *  @sk:  The socket the error has appeared on.
 30 *
 31 *  Returns: Nothing (void)
 32 */
 33
 34static void tcp_write_err(struct sock *sk)
 35{
 36	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
 37	sk->sk_error_report(sk);
 38
 
 39	tcp_done(sk);
 40	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
 41}
 42
 43/**
 44 *  tcp_out_of_resources() - Close socket if out of resources
 45 *  @sk:        pointer to current socket
 46 *  @do_reset:  send a last packet with reset flag
 47 *
 48 *  Do not allow orphaned sockets to eat all our resources.
 49 *  This is direct violation of TCP specs, but it is required
 50 *  to prevent DoS attacks. It is called when a retransmission timeout
 51 *  or zero probe timeout occurs on orphaned socket.
 52 *
 
 
 
 
 
 
 
 53 *  Criteria is still not confirmed experimentally and may change.
 54 *  We kill the socket, if:
 55 *  1. If number of orphaned sockets exceeds an administratively configured
 56 *     limit.
 57 *  2. If we have strong memory pressure.
 
 58 */
 59static int tcp_out_of_resources(struct sock *sk, bool do_reset)
 60{
 61	struct tcp_sock *tp = tcp_sk(sk);
 62	int shift = 0;
 63
 64	/* If peer does not open window for long time, or did not transmit
 65	 * anything for long time, penalize it. */
 66	if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
 67		shift++;
 68
 69	/* If some dubious ICMP arrived, penalize even more. */
 70	if (sk->sk_err_soft)
 71		shift++;
 72
 73	if (tcp_check_oom(sk, shift)) {
 74		/* Catch exceptional cases, when connection requires reset.
 75		 *      1. Last segment was sent recently. */
 76		if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
 77		    /*  2. Window is closed. */
 78		    (!tp->snd_wnd && !tp->packets_out))
 79			do_reset = true;
 80		if (do_reset)
 81			tcp_send_active_reset(sk, GFP_ATOMIC);
 82		tcp_done(sk);
 83		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
 84		return 1;
 85	}
 
 
 
 
 
 
 
 86	return 0;
 87}
 88
 89/**
 90 *  tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
 91 *  @sk:    Pointer to the current socket.
 92 *  @alive: bool, socket alive state
 93 */
 94static int tcp_orphan_retries(struct sock *sk, bool alive)
 95{
 96	int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
 97
 98	/* We know from an ICMP that something is wrong. */
 99	if (sk->sk_err_soft && !alive)
100		retries = 0;
101
102	/* However, if socket sent something recently, select some safe
103	 * number of retries. 8 corresponds to >100 seconds with minimal
104	 * RTO of 200msec. */
105	if (retries == 0 && alive)
106		retries = 8;
107	return retries;
108}
109
110static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
111{
112	struct net *net = sock_net(sk);
 
113
114	/* Black hole detection */
115	if (net->ipv4.sysctl_tcp_mtu_probing) {
116		if (!icsk->icsk_mtup.enabled) {
117			icsk->icsk_mtup.enabled = 1;
118			icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
119			tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
120		} else {
121			struct net *net = sock_net(sk);
122			struct tcp_sock *tp = tcp_sk(sk);
123			int mss;
124
125			mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
126			mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
127			mss = max(mss, 68 - tp->tcp_header_len);
128			icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
129			tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
130		}
131	}
 
 
 
 
 
 
 
 
 
 
 
 
132}
133
134
135/**
136 *  retransmits_timed_out() - returns true if this connection has timed out
137 *  @sk:       The current socket
138 *  @boundary: max number of retransmissions
139 *  @timeout:  A custom timeout value.
140 *             If set to 0 the default timeout is calculated and used.
141 *             Using TCP_RTO_MIN and the number of unsuccessful retransmits.
142 *  @syn_set:  true if the SYN Bit was set.
143 *
144 * The default "timeout" value this function can calculate and use
145 * is equivalent to the timeout of a TCP Connection
146 * after "boundary" unsuccessful, exponentially backed-off
147 * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
148 * syn_set flag is set.
149 *
150 */
151static bool retransmits_timed_out(struct sock *sk,
152				  unsigned int boundary,
153				  unsigned int timeout,
154				  bool syn_set)
155{
156	unsigned int linear_backoff_thresh, start_ts;
157	unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
158
159	if (!inet_csk(sk)->icsk_retransmits)
160		return false;
161
162	start_ts = tcp_sk(sk)->retrans_stamp;
163	if (unlikely(!start_ts))
164		start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
165
166	if (likely(timeout == 0)) {
167		linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
168
169		if (boundary <= linear_backoff_thresh)
170			timeout = ((2 << boundary) - 1) * rto_base;
171		else
172			timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
173				(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
174	}
175	return (tcp_time_stamp - start_ts) >= timeout;
 
176}
177
178/* A write timeout has occurred. Process the after effects. */
179static int tcp_write_timeout(struct sock *sk)
180{
181	struct inet_connection_sock *icsk = inet_csk(sk);
182	struct tcp_sock *tp = tcp_sk(sk);
183	struct net *net = sock_net(sk);
 
184	int retry_until;
185	bool do_reset, syn_set = false;
186
187	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
188		if (icsk->icsk_retransmits) {
189			dst_negative_advice(sk);
190			if (tp->syn_fastopen || tp->syn_data)
191				tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
192			if (tp->syn_data && icsk->icsk_retransmits == 1)
193				NET_INC_STATS(sock_net(sk),
194					      LINUX_MIB_TCPFASTOPENACTIVEFAIL);
195		} else if (!tp->syn_data && !tp->syn_fastopen) {
196			sk_rethink_txhash(sk);
197		}
198		retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
199		syn_set = true;
200	} else {
201		if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0, 0)) {
202			/* Some middle-boxes may black-hole Fast Open _after_
203			 * the handshake. Therefore we conservatively disable
204			 * Fast Open on this path on recurring timeouts with
205			 * few or zero bytes acked after Fast Open.
206			 */
207			if (tp->syn_data_acked &&
208			    tp->bytes_acked <= tp->rx_opt.mss_clamp) {
209				tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
210				if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
211					NET_INC_STATS(sock_net(sk),
212						      LINUX_MIB_TCPFASTOPENACTIVEFAIL);
213			}
214			/* Black hole detection */
215			tcp_mtu_probing(icsk, sk);
216
217			dst_negative_advice(sk);
218		} else {
219			sk_rethink_txhash(sk);
220		}
221
222		retry_until = net->ipv4.sysctl_tcp_retries2;
223		if (sock_flag(sk, SOCK_DEAD)) {
224			const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
225
226			retry_until = tcp_orphan_retries(sk, alive);
227			do_reset = alive ||
228				!retransmits_timed_out(sk, retry_until, 0, 0);
229
230			if (tcp_out_of_resources(sk, do_reset))
231				return 1;
232		}
233	}
 
 
 
 
 
 
 
 
 
234
235	if (retransmits_timed_out(sk, retry_until,
236				  syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
237		/* Has it gone just too far? */
238		tcp_write_err(sk);
239		return 1;
240	}
 
 
 
 
 
 
241	return 0;
242}
243
244/* Called with BH disabled */
245void tcp_delack_timer_handler(struct sock *sk)
246{
247	struct tcp_sock *tp = tcp_sk(sk);
248	struct inet_connection_sock *icsk = inet_csk(sk);
249
250	sk_mem_reclaim_partial(sk);
251
252	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
253	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
254		goto out;
255
256	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
257		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
258		goto out;
259	}
260	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
261
262	if (!skb_queue_empty(&tp->ucopy.prequeue)) {
263		struct sk_buff *skb;
264
265		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
266
267		while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
268			sk_backlog_rcv(sk, skb);
269
270		tp->ucopy.memory = 0;
271	}
272
273	if (inet_csk_ack_scheduled(sk)) {
274		if (!icsk->icsk_ack.pingpong) {
275			/* Delayed ACK missed: inflate ATO. */
276			icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
277		} else {
278			/* Delayed ACK missed: leave pingpong mode and
279			 * deflate ATO.
280			 */
281			icsk->icsk_ack.pingpong = 0;
282			icsk->icsk_ack.ato      = TCP_ATO_MIN;
283		}
 
284		tcp_send_ack(sk);
285		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
286	}
287
288out:
289	if (tcp_under_memory_pressure(sk))
290		sk_mem_reclaim(sk);
291}
292
293
294/**
295 *  tcp_delack_timer() - The TCP delayed ACK timeout handler
296 *  @data:  Pointer to the current socket. (gets casted to struct sock *)
297 *
298 *  This function gets (indirectly) called when the kernel timer for a TCP packet
299 *  of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
300 *
301 *  Returns: Nothing (void)
302 */
303static void tcp_delack_timer(unsigned long data)
304{
305	struct sock *sk = (struct sock *)data;
 
 
306
307	bh_lock_sock(sk);
308	if (!sock_owned_by_user(sk)) {
309		tcp_delack_timer_handler(sk);
310	} else {
311		inet_csk(sk)->icsk_ack.blocked = 1;
312		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
313		/* deleguate our work to tcp_release_cb() */
314		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
315			sock_hold(sk);
316	}
317	bh_unlock_sock(sk);
318	sock_put(sk);
319}
320
321static void tcp_probe_timer(struct sock *sk)
322{
323	struct inet_connection_sock *icsk = inet_csk(sk);
 
324	struct tcp_sock *tp = tcp_sk(sk);
325	int max_probes;
326	u32 start_ts;
327
328	if (tp->packets_out || !tcp_send_head(sk)) {
329		icsk->icsk_probes_out = 0;
 
330		return;
331	}
332
333	/* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
334	 * long as the receiver continues to respond probes. We support this by
335	 * default and reset icsk_probes_out with incoming ACKs. But if the
336	 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
337	 * kill the socket when the retry count and the time exceeds the
338	 * corresponding system limit. We also implement similar policy when
339	 * we use RTO to probe window in tcp_retransmit_timer().
340	 */
341	start_ts = tcp_skb_timestamp(tcp_send_head(sk));
342	if (!start_ts)
343		skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp);
344	else if (icsk->icsk_user_timeout &&
345		 (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
 
346		goto abort;
347
348	max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
349	if (sock_flag(sk, SOCK_DEAD)) {
350		const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
351
352		max_probes = tcp_orphan_retries(sk, alive);
353		if (!alive && icsk->icsk_backoff >= max_probes)
354			goto abort;
355		if (tcp_out_of_resources(sk, true))
356			return;
357	}
358
359	if (icsk->icsk_probes_out > max_probes) {
360abort:		tcp_write_err(sk);
361	} else {
362		/* Only send another probe if we didn't close things up. */
363		tcp_send_probe0(sk);
364	}
365}
366
367/*
368 *	Timer for Fast Open socket to retransmit SYNACK. Note that the
369 *	sk here is the child socket, not the parent (listener) socket.
370 */
371static void tcp_fastopen_synack_timer(struct sock *sk)
372{
373	struct inet_connection_sock *icsk = inet_csk(sk);
374	int max_retries = icsk->icsk_syn_retries ? :
375	    sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
376	struct request_sock *req;
377
378	req = tcp_sk(sk)->fastopen_rsk;
379	req->rsk_ops->syn_ack_timeout(req);
380
381	if (req->num_timeout >= max_retries) {
382		tcp_write_err(sk);
383		return;
384	}
 
 
 
385	/* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
386	 * returned from rtx_syn_ack() to make it more persistent like
387	 * regular retransmit because if the child socket has been accepted
388	 * it's not good to give up too easily.
389	 */
390	inet_rtx_syn_ack(sk, req);
391	req->num_timeout++;
392	icsk->icsk_retransmits++;
 
 
393	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
394			  TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
395}
396
397
398/**
399 *  tcp_retransmit_timer() - The TCP retransmit timeout handler
400 *  @sk:  Pointer to the current socket.
401 *
402 *  This function gets called when the kernel timer for a TCP packet
403 *  of this socket expires.
404 *
405 *  It handles retransmission, timer adjustment and other necesarry measures.
406 *
407 *  Returns: Nothing (void)
408 */
409void tcp_retransmit_timer(struct sock *sk)
410{
411	struct tcp_sock *tp = tcp_sk(sk);
412	struct net *net = sock_net(sk);
413	struct inet_connection_sock *icsk = inet_csk(sk);
 
 
414
415	if (tp->fastopen_rsk) {
 
 
416		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
417			     sk->sk_state != TCP_FIN_WAIT1);
418		tcp_fastopen_synack_timer(sk);
419		/* Before we receive ACK to our SYN-ACK don't retransmit
420		 * anything else (e.g., data or FIN segments).
421		 */
422		return;
423	}
 
424	if (!tp->packets_out)
425		goto out;
426
427	WARN_ON(tcp_write_queue_empty(sk));
 
 
428
429	tp->tlp_high_seq = 0;
430
431	if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
432	    !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
433		/* Receiver dastardly shrinks window. Our retransmits
434		 * become zero probes, but we should not timeout this
435		 * connection. If the socket is an orphan, time it out,
436		 * we cannot allow such beasts to hang infinitely.
437		 */
438		struct inet_sock *inet = inet_sk(sk);
439		if (sk->sk_family == AF_INET) {
440			net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
441					    &inet->inet_daddr,
442					    ntohs(inet->inet_dport),
443					    inet->inet_num,
444					    tp->snd_una, tp->snd_nxt);
445		}
446#if IS_ENABLED(CONFIG_IPV6)
447		else if (sk->sk_family == AF_INET6) {
448			net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
449					    &sk->sk_v6_daddr,
450					    ntohs(inet->inet_dport),
451					    inet->inet_num,
452					    tp->snd_una, tp->snd_nxt);
453		}
454#endif
455		if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
456			tcp_write_err(sk);
457			goto out;
458		}
459		tcp_enter_loss(sk);
460		tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1);
461		__sk_dst_reset(sk);
462		goto out_reset_timer;
463	}
464
 
465	if (tcp_write_timeout(sk))
466		goto out;
467
468	if (icsk->icsk_retransmits == 0) {
469		int mib_idx;
470
471		if (icsk->icsk_ca_state == TCP_CA_Recovery) {
472			if (tcp_is_sack(tp))
473				mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
474			else
475				mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
476		} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
477			mib_idx = LINUX_MIB_TCPLOSSFAILURES;
478		} else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
479			   tp->sacked_out) {
480			if (tcp_is_sack(tp))
481				mib_idx = LINUX_MIB_TCPSACKFAILURES;
482			else
483				mib_idx = LINUX_MIB_TCPRENOFAILURES;
484		} else {
485			mib_idx = LINUX_MIB_TCPTIMEOUTS;
486		}
487		__NET_INC_STATS(sock_net(sk), mib_idx);
 
488	}
489
490	tcp_enter_loss(sk);
491
492	if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1) > 0) {
 
493		/* Retransmission failed because of local congestion,
494		 * do not backoff.
495		 */
496		if (!icsk->icsk_retransmits)
497			icsk->icsk_retransmits = 1;
498		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
499					  min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
500					  TCP_RTO_MAX);
501		goto out;
502	}
503
504	/* Increase the timeout each time we retransmit.  Note that
505	 * we do not increase the rtt estimate.  rto is initialized
506	 * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
507	 * that doubling rto each time is the least we can get away with.
508	 * In KA9Q, Karn uses this for the first few times, and then
509	 * goes to quadratic.  netBSD doubles, but only goes up to *64,
510	 * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
511	 * defined in the protocol as the maximum possible RTT.  I guess
512	 * we'll have to use something other than TCP to talk to the
513	 * University of Mars.
514	 *
515	 * PAWS allows us longer timeouts and large windows, so once
516	 * implemented ftp to mars will work nicely. We will have to fix
517	 * the 120 second clamps though!
518	 */
519	icsk->icsk_backoff++;
520	icsk->icsk_retransmits++;
521
522out_reset_timer:
523	/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
524	 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
525	 * might be increased if the stream oscillates between thin and thick,
526	 * thus the old value might already be too high compared to the value
527	 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
528	 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
529	 * exponential backoff behaviour to avoid continue hammering
530	 * linear-timeout retransmissions into a black hole
531	 */
532	if (sk->sk_state == TCP_ESTABLISHED &&
533	    (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
534	    tcp_stream_is_thin(tp) &&
535	    icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
536		icsk->icsk_backoff = 0;
537		icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
538	} else {
539		/* Use normal (exponential) backoff */
540		icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
541	}
542	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
543	if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0, 0))
 
544		__sk_dst_reset(sk);
545
546out:;
547}
548
549/* Called with bottom-half processing disabled.
550   Called by tcp_write_timer() */
551void tcp_write_timer_handler(struct sock *sk)
552{
553	struct inet_connection_sock *icsk = inet_csk(sk);
554	int event;
555
556	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
557	    !icsk->icsk_pending)
558		goto out;
559
560	if (time_after(icsk->icsk_timeout, jiffies)) {
561		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
562		goto out;
563	}
564
 
565	event = icsk->icsk_pending;
566
567	switch (event) {
568	case ICSK_TIME_EARLY_RETRANS:
569		tcp_resume_early_retransmit(sk);
570		break;
571	case ICSK_TIME_LOSS_PROBE:
572		tcp_send_loss_probe(sk);
573		break;
574	case ICSK_TIME_RETRANS:
575		icsk->icsk_pending = 0;
576		tcp_retransmit_timer(sk);
577		break;
578	case ICSK_TIME_PROBE0:
579		icsk->icsk_pending = 0;
580		tcp_probe_timer(sk);
581		break;
582	}
583
584out:
585	sk_mem_reclaim(sk);
586}
587
588static void tcp_write_timer(unsigned long data)
589{
590	struct sock *sk = (struct sock *)data;
 
 
591
592	bh_lock_sock(sk);
593	if (!sock_owned_by_user(sk)) {
594		tcp_write_timer_handler(sk);
595	} else {
596		/* delegate our work to tcp_release_cb() */
597		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
598			sock_hold(sk);
599	}
600	bh_unlock_sock(sk);
601	sock_put(sk);
602}
603
604void tcp_syn_ack_timeout(const struct request_sock *req)
605{
606	struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
607
608	__NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
609}
610EXPORT_SYMBOL(tcp_syn_ack_timeout);
611
612void tcp_set_keepalive(struct sock *sk, int val)
613{
614	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
615		return;
616
617	if (val && !sock_flag(sk, SOCK_KEEPOPEN))
618		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
619	else if (!val)
620		inet_csk_delete_keepalive_timer(sk);
621}
 
622
623
624static void tcp_keepalive_timer (unsigned long data)
625{
626	struct sock *sk = (struct sock *) data;
627	struct inet_connection_sock *icsk = inet_csk(sk);
628	struct tcp_sock *tp = tcp_sk(sk);
629	u32 elapsed;
630
631	/* Only process if socket is not in use. */
632	bh_lock_sock(sk);
633	if (sock_owned_by_user(sk)) {
634		/* Try again later. */
635		inet_csk_reset_keepalive_timer (sk, HZ/20);
636		goto out;
637	}
638
639	if (sk->sk_state == TCP_LISTEN) {
640		pr_err("Hmm... keepalive on a LISTEN ???\n");
641		goto out;
642	}
643
 
644	if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
645		if (tp->linger2 >= 0) {
646			const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
647
648			if (tmo > 0) {
649				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
650				goto out;
651			}
652		}
653		tcp_send_active_reset(sk, GFP_ATOMIC);
654		goto death;
655	}
656
657	if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
 
658		goto out;
659
660	elapsed = keepalive_time_when(tp);
661
662	/* It is alive without keepalive 8) */
663	if (tp->packets_out || tcp_send_head(sk))
664		goto resched;
665
666	elapsed = keepalive_time_elapsed(tp);
667
668	if (elapsed >= keepalive_time_when(tp)) {
669		/* If the TCP_USER_TIMEOUT option is enabled, use that
670		 * to determine when to timeout instead.
671		 */
672		if ((icsk->icsk_user_timeout != 0 &&
673		    elapsed >= icsk->icsk_user_timeout &&
674		    icsk->icsk_probes_out > 0) ||
675		    (icsk->icsk_user_timeout == 0 &&
676		    icsk->icsk_probes_out >= keepalive_probes(tp))) {
677			tcp_send_active_reset(sk, GFP_ATOMIC);
678			tcp_write_err(sk);
679			goto out;
680		}
681		if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
682			icsk->icsk_probes_out++;
683			elapsed = keepalive_intvl_when(tp);
684		} else {
685			/* If keepalive was lost due to local congestion,
686			 * try harder.
687			 */
688			elapsed = TCP_RESOURCE_PROBE_INTERVAL;
689		}
690	} else {
691		/* It is tp->rcv_tstamp + keepalive_time_when(tp) */
692		elapsed = keepalive_time_when(tp) - elapsed;
693	}
694
695	sk_mem_reclaim(sk);
696
697resched:
698	inet_csk_reset_keepalive_timer (sk, elapsed);
699	goto out;
700
701death:
702	tcp_done(sk);
703
704out:
705	bh_unlock_sock(sk);
706	sock_put(sk);
707}
708
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709void tcp_init_xmit_timers(struct sock *sk)
710{
711	inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
712				  &tcp_keepalive_timer);
 
 
 
 
 
 
 
713}