Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/tcp.h>
  3#include <net/tcp.h>
  4
  5static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
  6{
  7	struct tcp_sock *tp = tcp_sk(sk);
  8
  9	tcp_skb_mark_lost_uncond_verify(tp, skb);
 10	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
 11		/* Account for retransmits that are lost again */
 12		TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
 13		tp->retrans_out -= tcp_skb_pcount(skb);
 14		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
 15			      tcp_skb_pcount(skb));
 16	}
 17}
 18
 19static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
 20{
 21	return t1 > t2 || (t1 == t2 && after(seq1, seq2));
 22}
 23
 24/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
 25 *
 26 * Marks a packet lost, if some packet sent later has been (s)acked.
 27 * The underlying idea is similar to the traditional dupthresh and FACK
 28 * but they look at different metrics:
 29 *
 30 * dupthresh: 3 OOO packets delivered (packet count)
 31 * FACK: sequence delta to highest sacked sequence (sequence space)
 32 * RACK: sent time delta to the latest delivered packet (time domain)
 33 *
 34 * The advantage of RACK is it applies to both original and retransmitted
 35 * packet and therefore is robust against tail losses. Another advantage
 36 * is being more resilient to reordering by simply allowing some
 37 * "settling delay", instead of tweaking the dupthresh.
 38 *
 39 * When tcp_rack_detect_loss() detects some packets are lost and we
 40 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
 41 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
 42 * make us enter the CA_Recovery state.
 43 */
 44static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
 45{
 46	struct tcp_sock *tp = tcp_sk(sk);
 47	u32 min_rtt = tcp_min_rtt(tp);
 48	struct sk_buff *skb, *n;
 49	u32 reo_wnd;
 50
 51	*reo_timeout = 0;
 52	/* To be more reordering resilient, allow min_rtt/4 settling delay
 53	 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
 54	 * RTT because reordering is often a path property and less related
 55	 * to queuing or delayed ACKs.
 56	 */
 57	reo_wnd = 1000;
 58	if ((tp->rack.reord || inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery) &&
 59	    min_rtt != ~0U) {
 60		reo_wnd = max((min_rtt >> 2) * tp->rack.reo_wnd_steps, reo_wnd);
 61		reo_wnd = min(reo_wnd, tp->srtt_us >> 3);
 62	}
 63
 64	list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
 65				 tcp_tsorted_anchor) {
 66		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
 67		s32 remaining;
 68
 69		/* Skip ones marked lost but not yet retransmitted */
 70		if ((scb->sacked & TCPCB_LOST) &&
 71		    !(scb->sacked & TCPCB_SACKED_RETRANS))
 72			continue;
 73
 74		if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
 75					 tp->rack.end_seq, scb->end_seq))
 76			break;
 77
 78		/* A packet is lost if it has not been s/acked beyond
 79		 * the recent RTT plus the reordering window.
 80		 */
 81		remaining = tp->rack.rtt_us + reo_wnd -
 82			    tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
 83		if (remaining <= 0) {
 84			tcp_rack_mark_skb_lost(sk, skb);
 85			list_del_init(&skb->tcp_tsorted_anchor);
 86		} else {
 87			/* Record maximum wait time */
 88			*reo_timeout = max_t(u32, *reo_timeout, remaining);
 89		}
 90	}
 91}
 92
 93void tcp_rack_mark_lost(struct sock *sk)
 94{
 95	struct tcp_sock *tp = tcp_sk(sk);
 96	u32 timeout;
 97
 98	if (!tp->rack.advanced)
 99		return;
100
101	/* Reset the advanced flag to avoid unnecessary queue scanning */
102	tp->rack.advanced = 0;
103	tcp_rack_detect_loss(sk, &timeout);
104	if (timeout) {
105		timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
106		inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
107					  timeout, inet_csk(sk)->icsk_rto);
108	}
109}
110
111/* Record the most recently (re)sent time among the (s)acked packets
112 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
113 * draft-cheng-tcpm-rack-00.txt
114 */
115void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
116		      u64 xmit_time)
117{
118	u32 rtt_us;
119
120	rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
121	if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
122		/* If the sacked packet was retransmitted, it's ambiguous
123		 * whether the retransmission or the original (or the prior
124		 * retransmission) was sacked.
125		 *
126		 * If the original is lost, there is no ambiguity. Otherwise
127		 * we assume the original can be delayed up to aRTT + min_rtt.
128		 * the aRTT term is bounded by the fast recovery or timeout,
129		 * so it's at least one RTT (i.e., retransmission is at least
130		 * an RTT later).
131		 */
132		return;
133	}
134	tp->rack.advanced = 1;
135	tp->rack.rtt_us = rtt_us;
136	if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
137				end_seq, tp->rack.end_seq)) {
138		tp->rack.mstamp = xmit_time;
139		tp->rack.end_seq = end_seq;
140	}
141}
142
143/* We have waited long enough to accommodate reordering. Mark the expired
144 * packets lost and retransmit them.
145 */
146void tcp_rack_reo_timeout(struct sock *sk)
147{
148	struct tcp_sock *tp = tcp_sk(sk);
149	u32 timeout, prior_inflight;
150
151	prior_inflight = tcp_packets_in_flight(tp);
152	tcp_rack_detect_loss(sk, &timeout);
153	if (prior_inflight != tcp_packets_in_flight(tp)) {
154		if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
155			tcp_enter_recovery(sk, false);
156			if (!inet_csk(sk)->icsk_ca_ops->cong_control)
157				tcp_cwnd_reduction(sk, 1, 0);
158		}
159		tcp_xmit_retransmit_queue(sk);
160	}
161	if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
162		tcp_rearm_rto(sk);
163}
164
165/* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
166 *
167 * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded
168 * by srtt), since there is possibility that spurious retransmission was
169 * due to reordering delay longer than reo_wnd.
170 *
171 * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
172 * no. of successful recoveries (accounts for full DSACK-based loss
173 * recovery undo). After that, reset it to default (min_rtt/4).
174 *
175 * At max, reo_wnd is incremented only once per rtt. So that the new
176 * DSACK on which we are reacting, is due to the spurious retx (approx)
177 * after the reo_wnd has been updated last time.
178 *
179 * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
180 * absolute value to account for change in rtt.
181 */
182void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
183{
184	struct tcp_sock *tp = tcp_sk(sk);
185
186	if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
187	    !rs->prior_delivered)
188		return;
189
190	/* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
191	if (before(rs->prior_delivered, tp->rack.last_delivered))
192		tp->rack.dsack_seen = 0;
193
194	/* Adjust the reo_wnd if update is pending */
195	if (tp->rack.dsack_seen) {
196		tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
197					       tp->rack.reo_wnd_steps + 1);
198		tp->rack.dsack_seen = 0;
199		tp->rack.last_delivered = tp->delivered;
200		tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
201	} else if (!tp->rack.reo_wnd_persist) {
202		tp->rack.reo_wnd_steps = 1;
203	}
204}