Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/tcp.h>
  3#include <net/tcp.h>
  4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
  6{
  7	return t1 > t2 || (t1 == t2 && after(seq1, seq2));
  8}
  9
 10static u32 tcp_rack_reo_wnd(const struct sock *sk)
 11{
 12	struct tcp_sock *tp = tcp_sk(sk);
 13
 14	if (!tp->reord_seen) {
 15		/* If reordering has not been observed, be aggressive during
 16		 * the recovery or starting the recovery by DUPACK threshold.
 17		 */
 18		if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
 19			return 0;
 20
 21		if (tp->sacked_out >= tp->reordering &&
 22		    !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH))
 23			return 0;
 24	}
 25
 26	/* To be more reordering resilient, allow min_rtt/4 settling delay.
 27	 * Use min_rtt instead of the smoothed RTT because reordering is
 28	 * often a path property and less related to queuing or delayed ACKs.
 29	 * Upon receiving DSACKs, linearly increase the window up to the
 30	 * smoothed RTT.
 31	 */
 32	return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
 33		   tp->srtt_us >> 3);
 34}
 35
 36s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
 37{
 38	return tp->rack.rtt_us + reo_wnd -
 39	       tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
 40}
 41
 42/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
 43 *
 44 * Marks a packet lost, if some packet sent later has been (s)acked.
 45 * The underlying idea is similar to the traditional dupthresh and FACK
 46 * but they look at different metrics:
 47 *
 48 * dupthresh: 3 OOO packets delivered (packet count)
 49 * FACK: sequence delta to highest sacked sequence (sequence space)
 50 * RACK: sent time delta to the latest delivered packet (time domain)
 51 *
 52 * The advantage of RACK is it applies to both original and retransmitted
 53 * packet and therefore is robust against tail losses. Another advantage
 54 * is being more resilient to reordering by simply allowing some
 55 * "settling delay", instead of tweaking the dupthresh.
 56 *
 57 * When tcp_rack_detect_loss() detects some packets are lost and we
 58 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
 59 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
 60 * make us enter the CA_Recovery state.
 61 */
 62static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
 63{
 64	struct tcp_sock *tp = tcp_sk(sk);
 65	struct sk_buff *skb, *n;
 66	u32 reo_wnd;
 67
 68	*reo_timeout = 0;
 69	reo_wnd = tcp_rack_reo_wnd(sk);
 70	list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
 71				 tcp_tsorted_anchor) {
 72		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
 73		s32 remaining;
 74
 75		/* Skip ones marked lost but not yet retransmitted */
 76		if ((scb->sacked & TCPCB_LOST) &&
 77		    !(scb->sacked & TCPCB_SACKED_RETRANS))
 78			continue;
 79
 80		if (!tcp_rack_sent_after(tp->rack.mstamp,
 81					 tcp_skb_timestamp_us(skb),
 82					 tp->rack.end_seq, scb->end_seq))
 83			break;
 84
 85		/* A packet is lost if it has not been s/acked beyond
 86		 * the recent RTT plus the reordering window.
 87		 */
 88		remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
 89		if (remaining <= 0) {
 90			tcp_mark_skb_lost(sk, skb);
 91			list_del_init(&skb->tcp_tsorted_anchor);
 92		} else {
 93			/* Record maximum wait time */
 94			*reo_timeout = max_t(u32, *reo_timeout, remaining);
 95		}
 96	}
 97}
 98
 99bool tcp_rack_mark_lost(struct sock *sk)
100{
101	struct tcp_sock *tp = tcp_sk(sk);
102	u32 timeout;
103
104	if (!tp->rack.advanced)
105		return false;
106
107	/* Reset the advanced flag to avoid unnecessary queue scanning */
108	tp->rack.advanced = 0;
109	tcp_rack_detect_loss(sk, &timeout);
110	if (timeout) {
111		timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
112		inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
113					  timeout, inet_csk(sk)->icsk_rto);
114	}
115	return !!timeout;
116}
117
118/* Record the most recently (re)sent time among the (s)acked packets
119 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
120 * draft-cheng-tcpm-rack-00.txt
121 */
122void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
123		      u64 xmit_time)
124{
125	u32 rtt_us;
126
127	rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
128	if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
129		/* If the sacked packet was retransmitted, it's ambiguous
130		 * whether the retransmission or the original (or the prior
131		 * retransmission) was sacked.
132		 *
133		 * If the original is lost, there is no ambiguity. Otherwise
134		 * we assume the original can be delayed up to aRTT + min_rtt.
135		 * the aRTT term is bounded by the fast recovery or timeout,
136		 * so it's at least one RTT (i.e., retransmission is at least
137		 * an RTT later).
138		 */
139		return;
140	}
141	tp->rack.advanced = 1;
142	tp->rack.rtt_us = rtt_us;
143	if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
144				end_seq, tp->rack.end_seq)) {
145		tp->rack.mstamp = xmit_time;
146		tp->rack.end_seq = end_seq;
147	}
148}
149
150/* We have waited long enough to accommodate reordering. Mark the expired
151 * packets lost and retransmit them.
152 */
153void tcp_rack_reo_timeout(struct sock *sk)
154{
155	struct tcp_sock *tp = tcp_sk(sk);
156	u32 timeout, prior_inflight;
157	u32 lost = tp->lost;
158
159	prior_inflight = tcp_packets_in_flight(tp);
160	tcp_rack_detect_loss(sk, &timeout);
161	if (prior_inflight != tcp_packets_in_flight(tp)) {
162		if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
163			tcp_enter_recovery(sk, false);
164			if (!inet_csk(sk)->icsk_ca_ops->cong_control)
165				tcp_cwnd_reduction(sk, 1, tp->lost - lost, 0);
166		}
167		tcp_xmit_retransmit_queue(sk);
168	}
169	if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
170		tcp_rearm_rto(sk);
171}
172
173/* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
174 *
175 * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded
176 * by srtt), since there is possibility that spurious retransmission was
177 * due to reordering delay longer than reo_wnd.
178 *
179 * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
180 * no. of successful recoveries (accounts for full DSACK-based loss
181 * recovery undo). After that, reset it to default (min_rtt/4).
182 *
183 * At max, reo_wnd is incremented only once per rtt. So that the new
184 * DSACK on which we are reacting, is due to the spurious retx (approx)
185 * after the reo_wnd has been updated last time.
186 *
187 * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
188 * absolute value to account for change in rtt.
189 */
190void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
191{
192	struct tcp_sock *tp = tcp_sk(sk);
193
194	if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
195	    !rs->prior_delivered)
196		return;
197
198	/* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
199	if (before(rs->prior_delivered, tp->rack.last_delivered))
200		tp->rack.dsack_seen = 0;
201
202	/* Adjust the reo_wnd if update is pending */
203	if (tp->rack.dsack_seen) {
204		tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
205					       tp->rack.reo_wnd_steps + 1);
206		tp->rack.dsack_seen = 0;
207		tp->rack.last_delivered = tp->delivered;
208		tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
209	} else if (!tp->rack.reo_wnd_persist) {
210		tp->rack.reo_wnd_steps = 1;
211	}
212}
213
214/* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
215 * the next unacked packet upon receiving
216 * a) three or more DUPACKs to start the fast recovery
217 * b) an ACK acknowledging new data during the fast recovery.
218 */
219void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
220{
221	const u8 state = inet_csk(sk)->icsk_ca_state;
222	struct tcp_sock *tp = tcp_sk(sk);
223
224	if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
225	    (state == TCP_CA_Recovery && snd_una_advanced)) {
226		struct sk_buff *skb = tcp_rtx_queue_head(sk);
227		u32 mss;
228
229		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
230			return;
231
232		mss = tcp_skb_mss(skb);
233		if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
234			tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
235				     mss, mss, GFP_ATOMIC);
236
237		tcp_mark_skb_lost(sk, skb);
238	}
239}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/tcp.h>
  3#include <net/tcp.h>
  4
  5void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
  6{
  7	struct tcp_sock *tp = tcp_sk(sk);
  8
  9	tcp_skb_mark_lost_uncond_verify(tp, skb);
 10	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
 11		/* Account for retransmits that are lost again */
 12		TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
 13		tp->retrans_out -= tcp_skb_pcount(skb);
 14		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
 15			      tcp_skb_pcount(skb));
 16	}
 17}
 18
 19static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
 20{
 21	return t1 > t2 || (t1 == t2 && after(seq1, seq2));
 22}
 23
 24static u32 tcp_rack_reo_wnd(const struct sock *sk)
 25{
 26	struct tcp_sock *tp = tcp_sk(sk);
 27
 28	if (!tp->reord_seen) {
 29		/* If reordering has not been observed, be aggressive during
 30		 * the recovery or starting the recovery by DUPACK threshold.
 31		 */
 32		if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
 33			return 0;
 34
 35		if (tp->sacked_out >= tp->reordering &&
 36		    !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH))
 37			return 0;
 38	}
 39
 40	/* To be more reordering resilient, allow min_rtt/4 settling delay.
 41	 * Use min_rtt instead of the smoothed RTT because reordering is
 42	 * often a path property and less related to queuing or delayed ACKs.
 43	 * Upon receiving DSACKs, linearly increase the window up to the
 44	 * smoothed RTT.
 45	 */
 46	return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
 47		   tp->srtt_us >> 3);
 48}
 49
 50s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
 51{
 52	return tp->rack.rtt_us + reo_wnd -
 53	       tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
 54}
 55
 56/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
 57 *
 58 * Marks a packet lost, if some packet sent later has been (s)acked.
 59 * The underlying idea is similar to the traditional dupthresh and FACK
 60 * but they look at different metrics:
 61 *
 62 * dupthresh: 3 OOO packets delivered (packet count)
 63 * FACK: sequence delta to highest sacked sequence (sequence space)
 64 * RACK: sent time delta to the latest delivered packet (time domain)
 65 *
 66 * The advantage of RACK is it applies to both original and retransmitted
 67 * packet and therefore is robust against tail losses. Another advantage
 68 * is being more resilient to reordering by simply allowing some
 69 * "settling delay", instead of tweaking the dupthresh.
 70 *
 71 * When tcp_rack_detect_loss() detects some packets are lost and we
 72 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
 73 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
 74 * make us enter the CA_Recovery state.
 75 */
 76static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
 77{
 78	struct tcp_sock *tp = tcp_sk(sk);
 79	struct sk_buff *skb, *n;
 80	u32 reo_wnd;
 81
 82	*reo_timeout = 0;
 83	reo_wnd = tcp_rack_reo_wnd(sk);
 84	list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
 85				 tcp_tsorted_anchor) {
 86		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
 87		s32 remaining;
 88
 89		/* Skip ones marked lost but not yet retransmitted */
 90		if ((scb->sacked & TCPCB_LOST) &&
 91		    !(scb->sacked & TCPCB_SACKED_RETRANS))
 92			continue;
 93
 94		if (!tcp_rack_sent_after(tp->rack.mstamp,
 95					 tcp_skb_timestamp_us(skb),
 96					 tp->rack.end_seq, scb->end_seq))
 97			break;
 98
 99		/* A packet is lost if it has not been s/acked beyond
100		 * the recent RTT plus the reordering window.
101		 */
102		remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
103		if (remaining <= 0) {
104			tcp_mark_skb_lost(sk, skb);
105			list_del_init(&skb->tcp_tsorted_anchor);
106		} else {
107			/* Record maximum wait time */
108			*reo_timeout = max_t(u32, *reo_timeout, remaining);
109		}
110	}
111}
112
113void tcp_rack_mark_lost(struct sock *sk)
114{
115	struct tcp_sock *tp = tcp_sk(sk);
116	u32 timeout;
117
118	if (!tp->rack.advanced)
119		return;
120
121	/* Reset the advanced flag to avoid unnecessary queue scanning */
122	tp->rack.advanced = 0;
123	tcp_rack_detect_loss(sk, &timeout);
124	if (timeout) {
125		timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
126		inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
127					  timeout, inet_csk(sk)->icsk_rto);
128	}
 
129}
130
131/* Record the most recently (re)sent time among the (s)acked packets
132 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
133 * draft-cheng-tcpm-rack-00.txt
134 */
135void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
136		      u64 xmit_time)
137{
138	u32 rtt_us;
139
140	rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
141	if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
142		/* If the sacked packet was retransmitted, it's ambiguous
143		 * whether the retransmission or the original (or the prior
144		 * retransmission) was sacked.
145		 *
146		 * If the original is lost, there is no ambiguity. Otherwise
147		 * we assume the original can be delayed up to aRTT + min_rtt.
148		 * the aRTT term is bounded by the fast recovery or timeout,
149		 * so it's at least one RTT (i.e., retransmission is at least
150		 * an RTT later).
151		 */
152		return;
153	}
154	tp->rack.advanced = 1;
155	tp->rack.rtt_us = rtt_us;
156	if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
157				end_seq, tp->rack.end_seq)) {
158		tp->rack.mstamp = xmit_time;
159		tp->rack.end_seq = end_seq;
160	}
161}
162
163/* We have waited long enough to accommodate reordering. Mark the expired
164 * packets lost and retransmit them.
165 */
166void tcp_rack_reo_timeout(struct sock *sk)
167{
168	struct tcp_sock *tp = tcp_sk(sk);
169	u32 timeout, prior_inflight;
 
170
171	prior_inflight = tcp_packets_in_flight(tp);
172	tcp_rack_detect_loss(sk, &timeout);
173	if (prior_inflight != tcp_packets_in_flight(tp)) {
174		if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
175			tcp_enter_recovery(sk, false);
176			if (!inet_csk(sk)->icsk_ca_ops->cong_control)
177				tcp_cwnd_reduction(sk, 1, 0);
178		}
179		tcp_xmit_retransmit_queue(sk);
180	}
181	if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
182		tcp_rearm_rto(sk);
183}
184
185/* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
186 *
187 * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded
188 * by srtt), since there is possibility that spurious retransmission was
189 * due to reordering delay longer than reo_wnd.
190 *
191 * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
192 * no. of successful recoveries (accounts for full DSACK-based loss
193 * recovery undo). After that, reset it to default (min_rtt/4).
194 *
195 * At max, reo_wnd is incremented only once per rtt. So that the new
196 * DSACK on which we are reacting, is due to the spurious retx (approx)
197 * after the reo_wnd has been updated last time.
198 *
199 * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
200 * absolute value to account for change in rtt.
201 */
202void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
203{
204	struct tcp_sock *tp = tcp_sk(sk);
205
206	if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
207	    !rs->prior_delivered)
208		return;
209
210	/* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
211	if (before(rs->prior_delivered, tp->rack.last_delivered))
212		tp->rack.dsack_seen = 0;
213
214	/* Adjust the reo_wnd if update is pending */
215	if (tp->rack.dsack_seen) {
216		tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
217					       tp->rack.reo_wnd_steps + 1);
218		tp->rack.dsack_seen = 0;
219		tp->rack.last_delivered = tp->delivered;
220		tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
221	} else if (!tp->rack.reo_wnd_persist) {
222		tp->rack.reo_wnd_steps = 1;
223	}
224}
225
226/* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
227 * the next unacked packet upon receiving
228 * a) three or more DUPACKs to start the fast recovery
229 * b) an ACK acknowledging new data during the fast recovery.
230 */
231void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
232{
233	const u8 state = inet_csk(sk)->icsk_ca_state;
234	struct tcp_sock *tp = tcp_sk(sk);
235
236	if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
237	    (state == TCP_CA_Recovery && snd_una_advanced)) {
238		struct sk_buff *skb = tcp_rtx_queue_head(sk);
239		u32 mss;
240
241		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
242			return;
243
244		mss = tcp_skb_mss(skb);
245		if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
246			tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
247				     mss, mss, GFP_ATOMIC);
248
249		tcp_skb_mark_lost_uncond_verify(tp, skb);
250	}
251}