Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		Implementation of the Transmission Control Protocol(TCP).
  8 *
  9 * Authors:	Ross Biro
 10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 13 *		Florian La Roche, <flla@stud.uni-sb.de>
 14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 19 *		Jorge Cwik, <jorge@laser.satlink.net>
 20 */
 21
 22#include <linux/module.h>
 23#include <linux/gfp.h>
 24#include <net/tcp.h>
 25
 26static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
 27{
 28	struct inet_connection_sock *icsk = inet_csk(sk);
 29	u32 elapsed, start_ts;
 30	s32 remaining;
 31
 32	start_ts = tcp_sk(sk)->retrans_stamp;
 33	if (!icsk->icsk_user_timeout)
 34		return icsk->icsk_rto;
 35	elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
 36	remaining = icsk->icsk_user_timeout - elapsed;
 37	if (remaining <= 0)
 38		return 1; /* user timeout has passed; fire ASAP */
 39
 40	return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
 41}
 42
 43u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
 44{
 45	struct inet_connection_sock *icsk = inet_csk(sk);
 46	u32 remaining;
 47	s32 elapsed;
 48
 49	if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp)
 50		return when;
 51
 52	elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
 53	if (unlikely(elapsed < 0))
 54		elapsed = 0;
 55	remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed;
 56	remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
 57
 58	return min_t(u32, remaining, when);
 59}
 60
 61/**
 62 *  tcp_write_err() - close socket and save error info
 63 *  @sk:  The socket the error has appeared on.
 64 *
 65 *  Returns: Nothing (void)
 66 */
 67
 68static void tcp_write_err(struct sock *sk)
 69{
 70	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
 71	sk_error_report(sk);
 72
 73	tcp_write_queue_purge(sk);
 74	tcp_done(sk);
 75	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
 76}
 77
 78/**
 79 *  tcp_out_of_resources() - Close socket if out of resources
 80 *  @sk:        pointer to current socket
 81 *  @do_reset:  send a last packet with reset flag
 82 *
 83 *  Do not allow orphaned sockets to eat all our resources.
 84 *  This is direct violation of TCP specs, but it is required
 85 *  to prevent DoS attacks. It is called when a retransmission timeout
 86 *  or zero probe timeout occurs on orphaned socket.
 87 *
 88 *  Also close if our net namespace is exiting; in that case there is no
 89 *  hope of ever communicating again since all netns interfaces are already
 90 *  down (or about to be down), and we need to release our dst references,
 91 *  which have been moved to the netns loopback interface, so the namespace
 92 *  can finish exiting.  This condition is only possible if we are a kernel
 93 *  socket, as those do not hold references to the namespace.
 94 *
 95 *  Criteria is still not confirmed experimentally and may change.
 96 *  We kill the socket, if:
 97 *  1. If number of orphaned sockets exceeds an administratively configured
 98 *     limit.
 99 *  2. If we have strong memory pressure.
100 *  3. If our net namespace is exiting.
101 */
102static int tcp_out_of_resources(struct sock *sk, bool do_reset)
103{
104	struct tcp_sock *tp = tcp_sk(sk);
105	int shift = 0;
106
107	/* If peer does not open window for long time, or did not transmit
108	 * anything for long time, penalize it. */
109	if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
110		shift++;
111
112	/* If some dubious ICMP arrived, penalize even more. */
113	if (sk->sk_err_soft)
114		shift++;
115
116	if (tcp_check_oom(sk, shift)) {
117		/* Catch exceptional cases, when connection requires reset.
118		 *      1. Last segment was sent recently. */
119		if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
120		    /*  2. Window is closed. */
121		    (!tp->snd_wnd && !tp->packets_out))
122			do_reset = true;
123		if (do_reset)
124			tcp_send_active_reset(sk, GFP_ATOMIC);
125		tcp_done(sk);
126		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
127		return 1;
128	}
129
130	if (!check_net(sock_net(sk))) {
131		/* Not possible to send reset; just close */
132		tcp_done(sk);
133		return 1;
134	}
135
136	return 0;
137}
138
139/**
140 *  tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
141 *  @sk:    Pointer to the current socket.
142 *  @alive: bool, socket alive state
143 */
144static int tcp_orphan_retries(struct sock *sk, bool alive)
145{
146	int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
147
148	/* We know from an ICMP that something is wrong. */
149	if (sk->sk_err_soft && !alive)
150		retries = 0;
151
152	/* However, if socket sent something recently, select some safe
153	 * number of retries. 8 corresponds to >100 seconds with minimal
154	 * RTO of 200msec. */
155	if (retries == 0 && alive)
156		retries = 8;
157	return retries;
158}
159
160static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
161{
162	const struct net *net = sock_net(sk);
163	int mss;
164
165	/* Black hole detection */
166	if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing))
167		return;
 
 
 
 
 
 
 
168
169	if (!icsk->icsk_mtup.enabled) {
170		icsk->icsk_mtup.enabled = 1;
171		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
172	} else {
173		mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
174		mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss);
175		mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor));
176		mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss));
177		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
178	}
179	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
180}
181
182static unsigned int tcp_model_timeout(struct sock *sk,
183				      unsigned int boundary,
184				      unsigned int rto_base)
185{
186	unsigned int linear_backoff_thresh, timeout;
187
188	linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
189	if (boundary <= linear_backoff_thresh)
190		timeout = ((2 << boundary) - 1) * rto_base;
191	else
192		timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
193			(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
194	return jiffies_to_msecs(timeout);
195}
196/**
197 *  retransmits_timed_out() - returns true if this connection has timed out
198 *  @sk:       The current socket
199 *  @boundary: max number of retransmissions
200 *  @timeout:  A custom timeout value.
201 *             If set to 0 the default timeout is calculated and used.
202 *             Using TCP_RTO_MIN and the number of unsuccessful retransmits.
203 *
204 * The default "timeout" value this function can calculate and use
205 * is equivalent to the timeout of a TCP Connection
206 * after "boundary" unsuccessful, exponentially backed-off
207 * retransmissions with an initial RTO of TCP_RTO_MIN.
208 */
209static bool retransmits_timed_out(struct sock *sk,
210				  unsigned int boundary,
211				  unsigned int timeout)
 
212{
213	unsigned int start_ts;
 
214
215	if (!inet_csk(sk)->icsk_retransmits)
216		return false;
217
218	start_ts = tcp_sk(sk)->retrans_stamp;
 
 
 
219	if (likely(timeout == 0)) {
220		unsigned int rto_base = TCP_RTO_MIN;
221
222		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
223			rto_base = tcp_timeout_init(sk);
224		timeout = tcp_model_timeout(sk, boundary, rto_base);
 
 
225	}
226
227	return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
228}
229
230/* A write timeout has occurred. Process the after effects. */
231static int tcp_write_timeout(struct sock *sk)
232{
233	struct inet_connection_sock *icsk = inet_csk(sk);
234	struct tcp_sock *tp = tcp_sk(sk);
235	struct net *net = sock_net(sk);
236	bool expired = false, do_reset;
237	int retry_until;
 
238
239	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
240		if (icsk->icsk_retransmits)
241			__dst_negative_advice(sk);
242		retry_until = icsk->icsk_syn_retries ? :
243			READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
244		expired = icsk->icsk_retransmits >= retry_until;
 
 
 
 
 
245	} else {
246		if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
 
 
 
 
 
 
 
 
 
 
 
 
247			/* Black hole detection */
248			tcp_mtu_probing(icsk, sk);
249
250			__dst_negative_advice(sk);
251		}
252
253		retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
254		if (sock_flag(sk, SOCK_DEAD)) {
255			const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
256
257			retry_until = tcp_orphan_retries(sk, alive);
258			do_reset = alive ||
259				!retransmits_timed_out(sk, retry_until, 0);
260
261			if (tcp_out_of_resources(sk, do_reset))
262				return 1;
263		}
264	}
265	if (!expired)
266		expired = retransmits_timed_out(sk, retry_until,
267						icsk->icsk_user_timeout);
268	tcp_fastopen_active_detect_blackhole(sk, expired);
269
270	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
271		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
272				  icsk->icsk_retransmits,
273				  icsk->icsk_rto, (int)expired);
274
275	if (expired) {
 
276		/* Has it gone just too far? */
277		tcp_write_err(sk);
278		return 1;
279	}
280
281	if (sk_rethink_txhash(sk)) {
282		tp->timeout_rehash++;
283		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
284	}
285
286	return 0;
287}
288
289/* Called with BH disabled */
290void tcp_delack_timer_handler(struct sock *sk)
291{
 
292	struct inet_connection_sock *icsk = inet_csk(sk);
293
294	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
295	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
296		return;
 
297
298	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
299		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
300		return;
301	}
302	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
303
 
 
 
 
 
 
 
 
 
 
 
304	if (inet_csk_ack_scheduled(sk)) {
305		if (!inet_csk_in_pingpong_mode(sk)) {
306			/* Delayed ACK missed: inflate ATO. */
307			icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
308		} else {
309			/* Delayed ACK missed: leave pingpong mode and
310			 * deflate ATO.
311			 */
312			inet_csk_exit_pingpong_mode(sk);
313			icsk->icsk_ack.ato      = TCP_ATO_MIN;
314		}
315		tcp_mstamp_refresh(tcp_sk(sk));
316		tcp_send_ack(sk);
317		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
318	}
319}
320
 
 
 
 
321
322/**
323 *  tcp_delack_timer() - The TCP delayed ACK timeout handler
324 *  @t:  Pointer to the timer. (gets casted to struct sock *)
325 *
326 *  This function gets (indirectly) called when the kernel timer for a TCP packet
327 *  of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
328 *
329 *  Returns: Nothing (void)
330 */
331static void tcp_delack_timer(struct timer_list *t)
332{
333	struct inet_connection_sock *icsk =
334			from_timer(icsk, t, icsk_delack_timer);
335	struct sock *sk = &icsk->icsk_inet.sk;
336
337	bh_lock_sock(sk);
338	if (!sock_owned_by_user(sk)) {
339		tcp_delack_timer_handler(sk);
340	} else {
341		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
 
342		/* deleguate our work to tcp_release_cb() */
343		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
344			sock_hold(sk);
345	}
346	bh_unlock_sock(sk);
347	sock_put(sk);
348}
349
350static void tcp_probe_timer(struct sock *sk)
351{
352	struct inet_connection_sock *icsk = inet_csk(sk);
353	struct sk_buff *skb = tcp_send_head(sk);
354	struct tcp_sock *tp = tcp_sk(sk);
355	int max_probes;
 
356
357	if (tp->packets_out || !skb) {
358		icsk->icsk_probes_out = 0;
359		icsk->icsk_probes_tstamp = 0;
360		return;
361	}
362
363	/* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
364	 * long as the receiver continues to respond probes. We support this by
365	 * default and reset icsk_probes_out with incoming ACKs. But if the
366	 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
367	 * kill the socket when the retry count and the time exceeds the
368	 * corresponding system limit. We also implement similar policy when
369	 * we use RTO to probe window in tcp_retransmit_timer().
370	 */
371	if (!icsk->icsk_probes_tstamp)
372		icsk->icsk_probes_tstamp = tcp_jiffies32;
 
373	else if (icsk->icsk_user_timeout &&
374		 (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
375		 msecs_to_jiffies(icsk->icsk_user_timeout))
376		goto abort;
377
378	max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
379	if (sock_flag(sk, SOCK_DEAD)) {
380		const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
381
382		max_probes = tcp_orphan_retries(sk, alive);
383		if (!alive && icsk->icsk_backoff >= max_probes)
384			goto abort;
385		if (tcp_out_of_resources(sk, true))
386			return;
387	}
388
389	if (icsk->icsk_probes_out >= max_probes) {
390abort:		tcp_write_err(sk);
391	} else {
392		/* Only send another probe if we didn't close things up. */
393		tcp_send_probe0(sk);
394	}
395}
396
397/*
398 *	Timer for Fast Open socket to retransmit SYNACK. Note that the
399 *	sk here is the child socket, not the parent (listener) socket.
400 */
401static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
402{
403	struct inet_connection_sock *icsk = inet_csk(sk);
404	struct tcp_sock *tp = tcp_sk(sk);
405	int max_retries;
 
406
 
407	req->rsk_ops->syn_ack_timeout(req);
408
409	/* add one more retry for fastopen */
410	max_retries = icsk->icsk_syn_retries ? :
411		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1;
412
413	if (req->num_timeout >= max_retries) {
414		tcp_write_err(sk);
415		return;
416	}
417	/* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
418	if (icsk->icsk_retransmits == 1)
419		tcp_enter_loss(sk);
420	/* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
421	 * returned from rtx_syn_ack() to make it more persistent like
422	 * regular retransmit because if the child socket has been accepted
423	 * it's not good to give up too easily.
424	 */
425	inet_rtx_syn_ack(sk, req);
426	req->num_timeout++;
427	icsk->icsk_retransmits++;
428	if (!tp->retrans_stamp)
429		tp->retrans_stamp = tcp_time_stamp(tp);
430	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
431			  req->timeout << req->num_timeout, TCP_RTO_MAX);
432}
433
434
435/**
436 *  tcp_retransmit_timer() - The TCP retransmit timeout handler
437 *  @sk:  Pointer to the current socket.
438 *
439 *  This function gets called when the kernel timer for a TCP packet
440 *  of this socket expires.
441 *
442 *  It handles retransmission, timer adjustment and other necessary measures.
443 *
444 *  Returns: Nothing (void)
445 */
 
446void tcp_retransmit_timer(struct sock *sk)
447{
448	struct tcp_sock *tp = tcp_sk(sk);
449	struct net *net = sock_net(sk);
450	struct inet_connection_sock *icsk = inet_csk(sk);
451	struct request_sock *req;
452	struct sk_buff *skb;
453
454	req = rcu_dereference_protected(tp->fastopen_rsk,
455					lockdep_sock_is_held(sk));
456	if (req) {
457		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
458			     sk->sk_state != TCP_FIN_WAIT1);
459		tcp_fastopen_synack_timer(sk, req);
460		/* Before we receive ACK to our SYN-ACK don't retransmit
461		 * anything else (e.g., data or FIN segments).
462		 */
463		return;
464	}
465
466	if (!tp->packets_out)
467		return;
468
469	skb = tcp_rtx_queue_head(sk);
470	if (WARN_ON_ONCE(!skb))
471		return;
472
473	tp->tlp_high_seq = 0;
474
475	if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
476	    !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
477		/* Receiver dastardly shrinks window. Our retransmits
478		 * become zero probes, but we should not timeout this
479		 * connection. If the socket is an orphan, time it out,
480		 * we cannot allow such beasts to hang infinitely.
481		 */
482		struct inet_sock *inet = inet_sk(sk);
483		if (sk->sk_family == AF_INET) {
484			net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
485					    &inet->inet_daddr,
486					    ntohs(inet->inet_dport),
487					    inet->inet_num,
488					    tp->snd_una, tp->snd_nxt);
489		}
490#if IS_ENABLED(CONFIG_IPV6)
491		else if (sk->sk_family == AF_INET6) {
492			net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
493					    &sk->sk_v6_daddr,
494					    ntohs(inet->inet_dport),
495					    inet->inet_num,
496					    tp->snd_una, tp->snd_nxt);
497		}
498#endif
499		if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
500			tcp_write_err(sk);
501			goto out;
502		}
503		tcp_enter_loss(sk);
504		tcp_retransmit_skb(sk, skb, 1);
505		__sk_dst_reset(sk);
506		goto out_reset_timer;
507	}
508
509	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
510	if (tcp_write_timeout(sk))
511		goto out;
512
513	if (icsk->icsk_retransmits == 0) {
514		int mib_idx = 0;
515
516		if (icsk->icsk_ca_state == TCP_CA_Recovery) {
517			if (tcp_is_sack(tp))
518				mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
519			else
520				mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
521		} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
522			mib_idx = LINUX_MIB_TCPLOSSFAILURES;
523		} else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
524			   tp->sacked_out) {
525			if (tcp_is_sack(tp))
526				mib_idx = LINUX_MIB_TCPSACKFAILURES;
527			else
528				mib_idx = LINUX_MIB_TCPRENOFAILURES;
 
 
529		}
530		if (mib_idx)
531			__NET_INC_STATS(sock_net(sk), mib_idx);
532	}
533
534	tcp_enter_loss(sk);
535
536	icsk->icsk_retransmits++;
537	if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
538		/* Retransmission failed because of local congestion,
539		 * Let senders fight for local resources conservatively.
540		 */
 
 
541		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
542					  TCP_RESOURCE_PROBE_INTERVAL,
543					  TCP_RTO_MAX);
544		goto out;
545	}
546
547	/* Increase the timeout each time we retransmit.  Note that
548	 * we do not increase the rtt estimate.  rto is initialized
549	 * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
550	 * that doubling rto each time is the least we can get away with.
551	 * In KA9Q, Karn uses this for the first few times, and then
552	 * goes to quadratic.  netBSD doubles, but only goes up to *64,
553	 * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
554	 * defined in the protocol as the maximum possible RTT.  I guess
555	 * we'll have to use something other than TCP to talk to the
556	 * University of Mars.
557	 *
558	 * PAWS allows us longer timeouts and large windows, so once
559	 * implemented ftp to mars will work nicely. We will have to fix
560	 * the 120 second clamps though!
561	 */
562	icsk->icsk_backoff++;
 
563
564out_reset_timer:
565	/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
566	 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
567	 * might be increased if the stream oscillates between thin and thick,
568	 * thus the old value might already be too high compared to the value
569	 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
570	 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
571	 * exponential backoff behaviour to avoid continue hammering
572	 * linear-timeout retransmissions into a black hole
573	 */
574	if (sk->sk_state == TCP_ESTABLISHED &&
575	    (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) &&
576	    tcp_stream_is_thin(tp) &&
577	    icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
578		icsk->icsk_backoff = 0;
579		icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
580	} else {
581		/* Use normal (exponential) backoff */
582		icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
583	}
584	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
585				  tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
586	if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
587		__sk_dst_reset(sk);
588
589out:;
590}
591
592/* Called with bottom-half processing disabled.
593   Called by tcp_write_timer() */
594void tcp_write_timer_handler(struct sock *sk)
595{
596	struct inet_connection_sock *icsk = inet_csk(sk);
597	int event;
598
599	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
600	    !icsk->icsk_pending)
601		return;
602
603	if (time_after(icsk->icsk_timeout, jiffies)) {
604		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
605		return;
606	}
607
608	tcp_mstamp_refresh(tcp_sk(sk));
609	event = icsk->icsk_pending;
610
611	switch (event) {
612	case ICSK_TIME_REO_TIMEOUT:
613		tcp_rack_reo_timeout(sk);
614		break;
615	case ICSK_TIME_LOSS_PROBE:
616		tcp_send_loss_probe(sk);
617		break;
618	case ICSK_TIME_RETRANS:
619		icsk->icsk_pending = 0;
620		tcp_retransmit_timer(sk);
621		break;
622	case ICSK_TIME_PROBE0:
623		icsk->icsk_pending = 0;
624		tcp_probe_timer(sk);
625		break;
626	}
 
 
 
627}
628
629static void tcp_write_timer(struct timer_list *t)
630{
631	struct inet_connection_sock *icsk =
632			from_timer(icsk, t, icsk_retransmit_timer);
633	struct sock *sk = &icsk->icsk_inet.sk;
634
635	bh_lock_sock(sk);
636	if (!sock_owned_by_user(sk)) {
637		tcp_write_timer_handler(sk);
638	} else {
639		/* delegate our work to tcp_release_cb() */
640		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
641			sock_hold(sk);
642	}
643	bh_unlock_sock(sk);
644	sock_put(sk);
645}
646
647void tcp_syn_ack_timeout(const struct request_sock *req)
648{
649	struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
650
651	__NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
652}
653EXPORT_SYMBOL(tcp_syn_ack_timeout);
654
655void tcp_set_keepalive(struct sock *sk, int val)
656{
657	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
658		return;
659
660	if (val && !sock_flag(sk, SOCK_KEEPOPEN))
661		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
662	else if (!val)
663		inet_csk_delete_keepalive_timer(sk);
664}
665EXPORT_SYMBOL_GPL(tcp_set_keepalive);
666
667
668static void tcp_keepalive_timer (struct timer_list *t)
669{
670	struct sock *sk = from_timer(sk, t, sk_timer);
671	struct inet_connection_sock *icsk = inet_csk(sk);
672	struct tcp_sock *tp = tcp_sk(sk);
673	u32 elapsed;
674
675	/* Only process if socket is not in use. */
676	bh_lock_sock(sk);
677	if (sock_owned_by_user(sk)) {
678		/* Try again later. */
679		inet_csk_reset_keepalive_timer (sk, HZ/20);
680		goto out;
681	}
682
683	if (sk->sk_state == TCP_LISTEN) {
684		pr_err("Hmm... keepalive on a LISTEN ???\n");
685		goto out;
686	}
687
688	tcp_mstamp_refresh(tp);
689	if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
690		if (tp->linger2 >= 0) {
691			const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
692
693			if (tmo > 0) {
694				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
695				goto out;
696			}
697		}
698		tcp_send_active_reset(sk, GFP_ATOMIC);
699		goto death;
700	}
701
702	if (!sock_flag(sk, SOCK_KEEPOPEN) ||
703	    ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
704		goto out;
705
706	elapsed = keepalive_time_when(tp);
707
708	/* It is alive without keepalive 8) */
709	if (tp->packets_out || !tcp_write_queue_empty(sk))
710		goto resched;
711
712	elapsed = keepalive_time_elapsed(tp);
713
714	if (elapsed >= keepalive_time_when(tp)) {
715		/* If the TCP_USER_TIMEOUT option is enabled, use that
716		 * to determine when to timeout instead.
717		 */
718		if ((icsk->icsk_user_timeout != 0 &&
719		    elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) &&
720		    icsk->icsk_probes_out > 0) ||
721		    (icsk->icsk_user_timeout == 0 &&
722		    icsk->icsk_probes_out >= keepalive_probes(tp))) {
723			tcp_send_active_reset(sk, GFP_ATOMIC);
724			tcp_write_err(sk);
725			goto out;
726		}
727		if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
728			icsk->icsk_probes_out++;
729			elapsed = keepalive_intvl_when(tp);
730		} else {
731			/* If keepalive was lost due to local congestion,
732			 * try harder.
733			 */
734			elapsed = TCP_RESOURCE_PROBE_INTERVAL;
735		}
736	} else {
737		/* It is tp->rcv_tstamp + keepalive_time_when(tp) */
738		elapsed = keepalive_time_when(tp) - elapsed;
739	}
740
 
 
741resched:
742	inet_csk_reset_keepalive_timer (sk, elapsed);
743	goto out;
744
745death:
746	tcp_done(sk);
747
748out:
749	bh_unlock_sock(sk);
750	sock_put(sk);
751}
752
753static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
754{
755	struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
756	struct sock *sk = (struct sock *)tp;
757
758	bh_lock_sock(sk);
759	if (!sock_owned_by_user(sk)) {
760		if (tp->compressed_ack) {
761			/* Since we have to send one ack finally,
762			 * subtract one from tp->compressed_ack to keep
763			 * LINUX_MIB_TCPACKCOMPRESSED accurate.
764			 */
765			tp->compressed_ack--;
766			tcp_send_ack(sk);
767		}
768	} else {
769		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
770				      &sk->sk_tsq_flags))
771			sock_hold(sk);
772	}
773	bh_unlock_sock(sk);
774
775	sock_put(sk);
776
777	return HRTIMER_NORESTART;
778}
779
780void tcp_init_xmit_timers(struct sock *sk)
781{
782	inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
783				  &tcp_keepalive_timer);
784	hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
785		     HRTIMER_MODE_ABS_PINNED_SOFT);
786	tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
787
788	hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
789		     HRTIMER_MODE_REL_PINNED_SOFT);
790	tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
791}
v4.6
 
  1/*
  2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  3 *		operating system.  INET is implemented using the  BSD Socket
  4 *		interface as the means of communication with the user level.
  5 *
  6 *		Implementation of the Transmission Control Protocol(TCP).
  7 *
  8 * Authors:	Ross Biro
  9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 10 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 11 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 12 *		Florian La Roche, <flla@stud.uni-sb.de>
 13 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 14 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 15 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 16 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 17 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 18 *		Jorge Cwik, <jorge@laser.satlink.net>
 19 */
 20
 21#include <linux/module.h>
 22#include <linux/gfp.h>
 23#include <net/tcp.h>
 24
 25int sysctl_tcp_thin_linear_timeouts __read_mostly;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27static void tcp_write_err(struct sock *sk)
 28{
 29	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
 30	sk->sk_error_report(sk);
 31
 
 32	tcp_done(sk);
 33	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
 34}
 35
 36/* Do not allow orphaned sockets to eat all our resources.
 37 * This is direct violation of TCP specs, but it is required
 38 * to prevent DoS attacks. It is called when a retransmission timeout
 39 * or zero probe timeout occurs on orphaned socket.
 40 *
 41 * Criteria is still not confirmed experimentally and may change.
 42 * We kill the socket, if:
 43 * 1. If number of orphaned sockets exceeds an administratively configured
 44 *    limit.
 45 * 2. If we have strong memory pressure.
 
 
 
 
 
 
 
 
 
 
 
 
 
 46 */
 47static int tcp_out_of_resources(struct sock *sk, bool do_reset)
 48{
 49	struct tcp_sock *tp = tcp_sk(sk);
 50	int shift = 0;
 51
 52	/* If peer does not open window for long time, or did not transmit
 53	 * anything for long time, penalize it. */
 54	if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
 55		shift++;
 56
 57	/* If some dubious ICMP arrived, penalize even more. */
 58	if (sk->sk_err_soft)
 59		shift++;
 60
 61	if (tcp_check_oom(sk, shift)) {
 62		/* Catch exceptional cases, when connection requires reset.
 63		 *      1. Last segment was sent recently. */
 64		if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
 65		    /*  2. Window is closed. */
 66		    (!tp->snd_wnd && !tp->packets_out))
 67			do_reset = true;
 68		if (do_reset)
 69			tcp_send_active_reset(sk, GFP_ATOMIC);
 70		tcp_done(sk);
 71		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
 72		return 1;
 73	}
 
 
 
 
 
 
 
 74	return 0;
 75}
 76
 77/* Calculate maximal number or retries on an orphaned socket. */
 
 
 
 
 78static int tcp_orphan_retries(struct sock *sk, bool alive)
 79{
 80	int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
 81
 82	/* We know from an ICMP that something is wrong. */
 83	if (sk->sk_err_soft && !alive)
 84		retries = 0;
 85
 86	/* However, if socket sent something recently, select some safe
 87	 * number of retries. 8 corresponds to >100 seconds with minimal
 88	 * RTO of 200msec. */
 89	if (retries == 0 && alive)
 90		retries = 8;
 91	return retries;
 92}
 93
 94static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
 95{
 96	struct net *net = sock_net(sk);
 
 97
 98	/* Black hole detection */
 99	if (net->ipv4.sysctl_tcp_mtu_probing) {
100		if (!icsk->icsk_mtup.enabled) {
101			icsk->icsk_mtup.enabled = 1;
102			icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
103			tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
104		} else {
105			struct net *net = sock_net(sk);
106			struct tcp_sock *tp = tcp_sk(sk);
107			int mss;
108
109			mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
110			mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
111			mss = max(mss, 68 - tp->tcp_header_len);
112			icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
113			tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
114		}
115	}
116}
117
118/* This function calculates a "timeout" which is equivalent to the timeout of a
119 * TCP connection after "boundary" unsuccessful, exponentially backed-off
120 * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
121 * syn_set flag is set.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122 */
123static bool retransmits_timed_out(struct sock *sk,
124				  unsigned int boundary,
125				  unsigned int timeout,
126				  bool syn_set)
127{
128	unsigned int linear_backoff_thresh, start_ts;
129	unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
130
131	if (!inet_csk(sk)->icsk_retransmits)
132		return false;
133
134	start_ts = tcp_sk(sk)->retrans_stamp;
135	if (unlikely(!start_ts))
136		start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
137
138	if (likely(timeout == 0)) {
139		linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
140
141		if (boundary <= linear_backoff_thresh)
142			timeout = ((2 << boundary) - 1) * rto_base;
143		else
144			timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
145				(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
146	}
147	return (tcp_time_stamp - start_ts) >= timeout;
 
148}
149
150/* A write timeout has occurred. Process the after effects. */
151static int tcp_write_timeout(struct sock *sk)
152{
153	struct inet_connection_sock *icsk = inet_csk(sk);
154	struct tcp_sock *tp = tcp_sk(sk);
155	struct net *net = sock_net(sk);
 
156	int retry_until;
157	bool do_reset, syn_set = false;
158
159	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
160		if (icsk->icsk_retransmits) {
161			dst_negative_advice(sk);
162			if (tp->syn_fastopen || tp->syn_data)
163				tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
164			if (tp->syn_data && icsk->icsk_retransmits == 1)
165				NET_INC_STATS_BH(sock_net(sk),
166						 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
167		}
168		retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
169		syn_set = true;
170	} else {
171		if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0, 0)) {
172			/* Some middle-boxes may black-hole Fast Open _after_
173			 * the handshake. Therefore we conservatively disable
174			 * Fast Open on this path on recurring timeouts with
175			 * few or zero bytes acked after Fast Open.
176			 */
177			if (tp->syn_data_acked &&
178			    tp->bytes_acked <= tp->rx_opt.mss_clamp) {
179				tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
180				if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
181					NET_INC_STATS_BH(sock_net(sk),
182							 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
183			}
184			/* Black hole detection */
185			tcp_mtu_probing(icsk, sk);
186
187			dst_negative_advice(sk);
188		}
189
190		retry_until = net->ipv4.sysctl_tcp_retries2;
191		if (sock_flag(sk, SOCK_DEAD)) {
192			const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
193
194			retry_until = tcp_orphan_retries(sk, alive);
195			do_reset = alive ||
196				!retransmits_timed_out(sk, retry_until, 0, 0);
197
198			if (tcp_out_of_resources(sk, do_reset))
199				return 1;
200		}
201	}
 
 
 
 
 
 
 
 
 
202
203	if (retransmits_timed_out(sk, retry_until,
204				  syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
205		/* Has it gone just too far? */
206		tcp_write_err(sk);
207		return 1;
208	}
 
 
 
 
 
 
209	return 0;
210}
211
 
212void tcp_delack_timer_handler(struct sock *sk)
213{
214	struct tcp_sock *tp = tcp_sk(sk);
215	struct inet_connection_sock *icsk = inet_csk(sk);
216
217	sk_mem_reclaim_partial(sk);
218
219	if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
220		goto out;
221
222	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
223		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
224		goto out;
225	}
226	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
227
228	if (!skb_queue_empty(&tp->ucopy.prequeue)) {
229		struct sk_buff *skb;
230
231		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
232
233		while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
234			sk_backlog_rcv(sk, skb);
235
236		tp->ucopy.memory = 0;
237	}
238
239	if (inet_csk_ack_scheduled(sk)) {
240		if (!icsk->icsk_ack.pingpong) {
241			/* Delayed ACK missed: inflate ATO. */
242			icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
243		} else {
244			/* Delayed ACK missed: leave pingpong mode and
245			 * deflate ATO.
246			 */
247			icsk->icsk_ack.pingpong = 0;
248			icsk->icsk_ack.ato      = TCP_ATO_MIN;
249		}
 
250		tcp_send_ack(sk);
251		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
252	}
 
253
254out:
255	if (tcp_under_memory_pressure(sk))
256		sk_mem_reclaim(sk);
257}
258
259static void tcp_delack_timer(unsigned long data)
 
 
 
 
 
 
 
 
 
260{
261	struct sock *sk = (struct sock *)data;
 
 
262
263	bh_lock_sock(sk);
264	if (!sock_owned_by_user(sk)) {
265		tcp_delack_timer_handler(sk);
266	} else {
267		inet_csk(sk)->icsk_ack.blocked = 1;
268		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
269		/* deleguate our work to tcp_release_cb() */
270		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
271			sock_hold(sk);
272	}
273	bh_unlock_sock(sk);
274	sock_put(sk);
275}
276
277static void tcp_probe_timer(struct sock *sk)
278{
279	struct inet_connection_sock *icsk = inet_csk(sk);
 
280	struct tcp_sock *tp = tcp_sk(sk);
281	int max_probes;
282	u32 start_ts;
283
284	if (tp->packets_out || !tcp_send_head(sk)) {
285		icsk->icsk_probes_out = 0;
 
286		return;
287	}
288
289	/* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
290	 * long as the receiver continues to respond probes. We support this by
291	 * default and reset icsk_probes_out with incoming ACKs. But if the
292	 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
293	 * kill the socket when the retry count and the time exceeds the
294	 * corresponding system limit. We also implement similar policy when
295	 * we use RTO to probe window in tcp_retransmit_timer().
296	 */
297	start_ts = tcp_skb_timestamp(tcp_send_head(sk));
298	if (!start_ts)
299		skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp);
300	else if (icsk->icsk_user_timeout &&
301		 (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
 
302		goto abort;
303
304	max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
305	if (sock_flag(sk, SOCK_DEAD)) {
306		const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
307
308		max_probes = tcp_orphan_retries(sk, alive);
309		if (!alive && icsk->icsk_backoff >= max_probes)
310			goto abort;
311		if (tcp_out_of_resources(sk, true))
312			return;
313	}
314
315	if (icsk->icsk_probes_out > max_probes) {
316abort:		tcp_write_err(sk);
317	} else {
318		/* Only send another probe if we didn't close things up. */
319		tcp_send_probe0(sk);
320	}
321}
322
323/*
324 *	Timer for Fast Open socket to retransmit SYNACK. Note that the
325 *	sk here is the child socket, not the parent (listener) socket.
326 */
327static void tcp_fastopen_synack_timer(struct sock *sk)
328{
329	struct inet_connection_sock *icsk = inet_csk(sk);
330	int max_retries = icsk->icsk_syn_retries ? :
331	    sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
332	struct request_sock *req;
333
334	req = tcp_sk(sk)->fastopen_rsk;
335	req->rsk_ops->syn_ack_timeout(req);
336
 
 
 
 
337	if (req->num_timeout >= max_retries) {
338		tcp_write_err(sk);
339		return;
340	}
 
 
 
341	/* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
342	 * returned from rtx_syn_ack() to make it more persistent like
343	 * regular retransmit because if the child socket has been accepted
344	 * it's not good to give up too easily.
345	 */
346	inet_rtx_syn_ack(sk, req);
347	req->num_timeout++;
 
 
 
348	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
349			  TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
350}
351
352/*
353 *	The TCP retransmit timer.
 
 
 
 
 
 
 
 
 
354 */
355
356void tcp_retransmit_timer(struct sock *sk)
357{
358	struct tcp_sock *tp = tcp_sk(sk);
359	struct net *net = sock_net(sk);
360	struct inet_connection_sock *icsk = inet_csk(sk);
 
 
361
362	if (tp->fastopen_rsk) {
 
 
363		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
364			     sk->sk_state != TCP_FIN_WAIT1);
365		tcp_fastopen_synack_timer(sk);
366		/* Before we receive ACK to our SYN-ACK don't retransmit
367		 * anything else (e.g., data or FIN segments).
368		 */
369		return;
370	}
 
371	if (!tp->packets_out)
372		goto out;
373
374	WARN_ON(tcp_write_queue_empty(sk));
 
 
375
376	tp->tlp_high_seq = 0;
377
378	if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
379	    !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
380		/* Receiver dastardly shrinks window. Our retransmits
381		 * become zero probes, but we should not timeout this
382		 * connection. If the socket is an orphan, time it out,
383		 * we cannot allow such beasts to hang infinitely.
384		 */
385		struct inet_sock *inet = inet_sk(sk);
386		if (sk->sk_family == AF_INET) {
387			net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
388					    &inet->inet_daddr,
389					    ntohs(inet->inet_dport),
390					    inet->inet_num,
391					    tp->snd_una, tp->snd_nxt);
392		}
393#if IS_ENABLED(CONFIG_IPV6)
394		else if (sk->sk_family == AF_INET6) {
395			net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
396					    &sk->sk_v6_daddr,
397					    ntohs(inet->inet_dport),
398					    inet->inet_num,
399					    tp->snd_una, tp->snd_nxt);
400		}
401#endif
402		if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
403			tcp_write_err(sk);
404			goto out;
405		}
406		tcp_enter_loss(sk);
407		tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
408		__sk_dst_reset(sk);
409		goto out_reset_timer;
410	}
411
 
412	if (tcp_write_timeout(sk))
413		goto out;
414
415	if (icsk->icsk_retransmits == 0) {
416		int mib_idx;
417
418		if (icsk->icsk_ca_state == TCP_CA_Recovery) {
419			if (tcp_is_sack(tp))
420				mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
421			else
422				mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
423		} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
424			mib_idx = LINUX_MIB_TCPLOSSFAILURES;
425		} else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
426			   tp->sacked_out) {
427			if (tcp_is_sack(tp))
428				mib_idx = LINUX_MIB_TCPSACKFAILURES;
429			else
430				mib_idx = LINUX_MIB_TCPRENOFAILURES;
431		} else {
432			mib_idx = LINUX_MIB_TCPTIMEOUTS;
433		}
434		NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
435	}
436
437	tcp_enter_loss(sk);
438
439	if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
 
440		/* Retransmission failed because of local congestion,
441		 * do not backoff.
442		 */
443		if (!icsk->icsk_retransmits)
444			icsk->icsk_retransmits = 1;
445		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
446					  min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
447					  TCP_RTO_MAX);
448		goto out;
449	}
450
451	/* Increase the timeout each time we retransmit.  Note that
452	 * we do not increase the rtt estimate.  rto is initialized
453	 * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
454	 * that doubling rto each time is the least we can get away with.
455	 * In KA9Q, Karn uses this for the first few times, and then
456	 * goes to quadratic.  netBSD doubles, but only goes up to *64,
457	 * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
458	 * defined in the protocol as the maximum possible RTT.  I guess
459	 * we'll have to use something other than TCP to talk to the
460	 * University of Mars.
461	 *
462	 * PAWS allows us longer timeouts and large windows, so once
463	 * implemented ftp to mars will work nicely. We will have to fix
464	 * the 120 second clamps though!
465	 */
466	icsk->icsk_backoff++;
467	icsk->icsk_retransmits++;
468
469out_reset_timer:
470	/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
471	 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
472	 * might be increased if the stream oscillates between thin and thick,
473	 * thus the old value might already be too high compared to the value
474	 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
475	 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
476	 * exponential backoff behaviour to avoid continue hammering
477	 * linear-timeout retransmissions into a black hole
478	 */
479	if (sk->sk_state == TCP_ESTABLISHED &&
480	    (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
481	    tcp_stream_is_thin(tp) &&
482	    icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
483		icsk->icsk_backoff = 0;
484		icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
485	} else {
486		/* Use normal (exponential) backoff */
487		icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
488	}
489	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
490	if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0, 0))
 
491		__sk_dst_reset(sk);
492
493out:;
494}
495
 
 
496void tcp_write_timer_handler(struct sock *sk)
497{
498	struct inet_connection_sock *icsk = inet_csk(sk);
499	int event;
500
501	if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
502		goto out;
 
503
504	if (time_after(icsk->icsk_timeout, jiffies)) {
505		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
506		goto out;
507	}
508
 
509	event = icsk->icsk_pending;
510
511	switch (event) {
512	case ICSK_TIME_EARLY_RETRANS:
513		tcp_resume_early_retransmit(sk);
514		break;
515	case ICSK_TIME_LOSS_PROBE:
516		tcp_send_loss_probe(sk);
517		break;
518	case ICSK_TIME_RETRANS:
519		icsk->icsk_pending = 0;
520		tcp_retransmit_timer(sk);
521		break;
522	case ICSK_TIME_PROBE0:
523		icsk->icsk_pending = 0;
524		tcp_probe_timer(sk);
525		break;
526	}
527
528out:
529	sk_mem_reclaim(sk);
530}
531
532static void tcp_write_timer(unsigned long data)
533{
534	struct sock *sk = (struct sock *)data;
 
 
535
536	bh_lock_sock(sk);
537	if (!sock_owned_by_user(sk)) {
538		tcp_write_timer_handler(sk);
539	} else {
540		/* deleguate our work to tcp_release_cb() */
541		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
542			sock_hold(sk);
543	}
544	bh_unlock_sock(sk);
545	sock_put(sk);
546}
547
548void tcp_syn_ack_timeout(const struct request_sock *req)
549{
550	struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
551
552	NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS);
553}
554EXPORT_SYMBOL(tcp_syn_ack_timeout);
555
556void tcp_set_keepalive(struct sock *sk, int val)
557{
558	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
559		return;
560
561	if (val && !sock_flag(sk, SOCK_KEEPOPEN))
562		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
563	else if (!val)
564		inet_csk_delete_keepalive_timer(sk);
565}
 
566
567
568static void tcp_keepalive_timer (unsigned long data)
569{
570	struct sock *sk = (struct sock *) data;
571	struct inet_connection_sock *icsk = inet_csk(sk);
572	struct tcp_sock *tp = tcp_sk(sk);
573	u32 elapsed;
574
575	/* Only process if socket is not in use. */
576	bh_lock_sock(sk);
577	if (sock_owned_by_user(sk)) {
578		/* Try again later. */
579		inet_csk_reset_keepalive_timer (sk, HZ/20);
580		goto out;
581	}
582
583	if (sk->sk_state == TCP_LISTEN) {
584		pr_err("Hmm... keepalive on a LISTEN ???\n");
585		goto out;
586	}
587
 
588	if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
589		if (tp->linger2 >= 0) {
590			const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
591
592			if (tmo > 0) {
593				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
594				goto out;
595			}
596		}
597		tcp_send_active_reset(sk, GFP_ATOMIC);
598		goto death;
599	}
600
601	if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
 
602		goto out;
603
604	elapsed = keepalive_time_when(tp);
605
606	/* It is alive without keepalive 8) */
607	if (tp->packets_out || tcp_send_head(sk))
608		goto resched;
609
610	elapsed = keepalive_time_elapsed(tp);
611
612	if (elapsed >= keepalive_time_when(tp)) {
613		/* If the TCP_USER_TIMEOUT option is enabled, use that
614		 * to determine when to timeout instead.
615		 */
616		if ((icsk->icsk_user_timeout != 0 &&
617		    elapsed >= icsk->icsk_user_timeout &&
618		    icsk->icsk_probes_out > 0) ||
619		    (icsk->icsk_user_timeout == 0 &&
620		    icsk->icsk_probes_out >= keepalive_probes(tp))) {
621			tcp_send_active_reset(sk, GFP_ATOMIC);
622			tcp_write_err(sk);
623			goto out;
624		}
625		if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
626			icsk->icsk_probes_out++;
627			elapsed = keepalive_intvl_when(tp);
628		} else {
629			/* If keepalive was lost due to local congestion,
630			 * try harder.
631			 */
632			elapsed = TCP_RESOURCE_PROBE_INTERVAL;
633		}
634	} else {
635		/* It is tp->rcv_tstamp + keepalive_time_when(tp) */
636		elapsed = keepalive_time_when(tp) - elapsed;
637	}
638
639	sk_mem_reclaim(sk);
640
641resched:
642	inet_csk_reset_keepalive_timer (sk, elapsed);
643	goto out;
644
645death:
646	tcp_done(sk);
647
648out:
649	bh_unlock_sock(sk);
650	sock_put(sk);
651}
652
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
653void tcp_init_xmit_timers(struct sock *sk)
654{
655	inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
656				  &tcp_keepalive_timer);
 
 
 
 
 
 
 
657}