Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *
  4 *   YeAH TCP
  5 *
  6 * For further details look at:
  7 *   https://web.archive.org/web/20080316215752/http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
  8 *
  9 */
 10#include <linux/mm.h>
 11#include <linux/module.h>
 12#include <linux/skbuff.h>
 13#include <linux/inet_diag.h>
 14
 15#include <net/tcp.h>
 16
 17#include "tcp_vegas.h"
 18
 19#define TCP_YEAH_ALPHA       80 /* number of packets queued at the bottleneck */
 20#define TCP_YEAH_GAMMA        1 /* fraction of queue to be removed per rtt */
 21#define TCP_YEAH_DELTA        3 /* log minimum fraction of cwnd to be removed on loss */
 22#define TCP_YEAH_EPSILON      1 /* log maximum fraction to be removed on early decongestion */
 23#define TCP_YEAH_PHY          8 /* maximum delta from base */
 24#define TCP_YEAH_RHO         16 /* minimum number of consecutive rtt to consider competition on loss */
 25#define TCP_YEAH_ZETA        50 /* minimum number of state switches to reset reno_count */
 26
 27#define TCP_SCALABLE_AI_CNT	 100U
 28
 29/* YeAH variables */
 30struct yeah {
 31	struct vegas vegas;	/* must be first */
 32
 33	/* YeAH */
 34	u32 lastQ;
 35	u32 doing_reno_now;
 36
 37	u32 reno_count;
 38	u32 fast_count;
 
 
 39};
 40
 41static void tcp_yeah_init(struct sock *sk)
 42{
 43	struct tcp_sock *tp = tcp_sk(sk);
 44	struct yeah *yeah = inet_csk_ca(sk);
 45
 46	tcp_vegas_init(sk);
 47
 48	yeah->doing_reno_now = 0;
 49	yeah->lastQ = 0;
 50
 51	yeah->reno_count = 2;
 52
 53	/* Ensure the MD arithmetic works.  This is somewhat pedantic,
 54	 * since I don't think we will see a cwnd this large. :) */
 55	tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
 
 
 
 
 
 
 
 
 
 
 
 
 
 56}
 57
 58static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 59{
 60	struct tcp_sock *tp = tcp_sk(sk);
 61	struct yeah *yeah = inet_csk_ca(sk);
 62
 63	if (!tcp_is_cwnd_limited(sk))
 64		return;
 65
 66	if (tcp_in_slow_start(tp)) {
 67		acked = tcp_slow_start(tp, acked);
 68		if (!acked)
 69			goto do_vegas;
 70	}
 71
 72	if (!yeah->doing_reno_now) {
 73		/* Scalable */
 74		tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
 75				  acked);
 
 
 
 
 
 
 
 
 76	} else {
 77		/* Reno */
 78		tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
 79	}
 80
 81	/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
 82	 *
 83	 * These are so named because they represent the approximate values
 84	 * of snd_una and snd_nxt at the beginning of the current RTT. More
 85	 * precisely, they represent the amount of data sent during the RTT.
 86	 * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
 87	 * we will calculate that (v_beg_snd_nxt - v_vegas.beg_snd_una) outstanding
 88	 * bytes of data have been ACKed during the course of the RTT, giving
 89	 * an "actual" rate of:
 90	 *
 91	 *     (v_beg_snd_nxt - v_vegas.beg_snd_una) / (rtt duration)
 92	 *
 93	 * Unfortunately, v_vegas.beg_snd_una is not exactly equal to snd_una,
 94	 * because delayed ACKs can cover more than one segment, so they
 95	 * don't line up yeahly with the boundaries of RTTs.
 96	 *
 97	 * Another unfortunate fact of life is that delayed ACKs delay the
 98	 * advance of the left edge of our send window, so that the number
 99	 * of bytes we send in an RTT is often less than our cwnd will allow.
100	 * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
101	 */
102do_vegas:
103	if (after(ack, yeah->vegas.beg_snd_nxt)) {
 
104		/* We do the Vegas calculations only if we got enough RTT
105		 * samples that we can be reasonably sure that we got
106		 * at least one RTT sample that wasn't from a delayed ACK.
107		 * If we only had 2 samples total,
108		 * then that means we're getting only 1 ACK per RTT, which
109		 * means they're almost certainly delayed ACKs.
110		 * If  we have 3 samples, we should be OK.
111		 */
112
113		if (yeah->vegas.cntRTT > 2) {
114			u32 rtt, queue;
115			u64 bw;
116
117			/* We have enough RTT samples, so, using the Vegas
118			 * algorithm, we determine if we should increase or
119			 * decrease cwnd, and by how much.
120			 */
121
122			/* Pluck out the RTT we are using for the Vegas
123			 * calculations. This is the min RTT seen during the
124			 * last RTT. Taking the min filters out the effects
125			 * of delayed ACKs, at the cost of noticing congestion
126			 * a bit later.
127			 */
128			rtt = yeah->vegas.minRTT;
129
130			/* Compute excess number of packets above bandwidth
131			 * Avoid doing full 64 bit divide.
132			 */
133			bw = tp->snd_cwnd;
134			bw *= rtt - yeah->vegas.baseRTT;
135			do_div(bw, rtt);
136			queue = bw;
137
138			if (queue > TCP_YEAH_ALPHA ||
139			    rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
140				if (queue > TCP_YEAH_ALPHA &&
141				    tp->snd_cwnd > yeah->reno_count) {
142					u32 reduction = min(queue / TCP_YEAH_GAMMA ,
143							    tp->snd_cwnd >> TCP_YEAH_EPSILON);
144
145					tp->snd_cwnd -= reduction;
146
147					tp->snd_cwnd = max(tp->snd_cwnd,
148							   yeah->reno_count);
149
150					tp->snd_ssthresh = tp->snd_cwnd;
151				}
152
153				if (yeah->reno_count <= 2)
154					yeah->reno_count = max(tp->snd_cwnd>>1, 2U);
155				else
156					yeah->reno_count++;
157
158				yeah->doing_reno_now = min(yeah->doing_reno_now + 1,
159							   0xffffffU);
160			} else {
161				yeah->fast_count++;
162
163				if (yeah->fast_count > TCP_YEAH_ZETA) {
164					yeah->reno_count = 2;
165					yeah->fast_count = 0;
166				}
167
168				yeah->doing_reno_now = 0;
169			}
170
171			yeah->lastQ = queue;
 
172		}
173
174		/* Save the extent of the current window so we can use this
175		 * at the end of the next RTT.
176		 */
177		yeah->vegas.beg_snd_una  = yeah->vegas.beg_snd_nxt;
178		yeah->vegas.beg_snd_nxt  = tp->snd_nxt;
179		yeah->vegas.beg_snd_cwnd = tp->snd_cwnd;
180
181		/* Wipe the slate clean for the next RTT. */
182		yeah->vegas.cntRTT = 0;
183		yeah->vegas.minRTT = 0x7fffffff;
184	}
185}
186
187static u32 tcp_yeah_ssthresh(struct sock *sk)
188{
189	const struct tcp_sock *tp = tcp_sk(sk);
190	struct yeah *yeah = inet_csk_ca(sk);
191	u32 reduction;
192
193	if (yeah->doing_reno_now < TCP_YEAH_RHO) {
194		reduction = yeah->lastQ;
195
196		reduction = min(reduction, max(tp->snd_cwnd>>1, 2U));
197
198		reduction = max(reduction, tp->snd_cwnd >> TCP_YEAH_DELTA);
199	} else
200		reduction = max(tp->snd_cwnd>>1, 2U);
201
202	yeah->fast_count = 0;
203	yeah->reno_count = max(yeah->reno_count>>1, 2U);
204
205	return max_t(int, tp->snd_cwnd - reduction, 2);
206}
207
208static struct tcp_congestion_ops tcp_yeah __read_mostly = {
 
209	.init		= tcp_yeah_init,
210	.ssthresh	= tcp_yeah_ssthresh,
211	.undo_cwnd      = tcp_reno_undo_cwnd,
212	.cong_avoid	= tcp_yeah_cong_avoid,
 
213	.set_state	= tcp_vegas_state,
214	.cwnd_event	= tcp_vegas_cwnd_event,
215	.get_info	= tcp_vegas_get_info,
216	.pkts_acked	= tcp_vegas_pkts_acked,
217
218	.owner		= THIS_MODULE,
219	.name		= "yeah",
220};
221
222static int __init tcp_yeah_register(void)
223{
224	BUG_ON(sizeof(struct yeah) > ICSK_CA_PRIV_SIZE);
225	tcp_register_congestion_control(&tcp_yeah);
226	return 0;
227}
228
229static void __exit tcp_yeah_unregister(void)
230{
231	tcp_unregister_congestion_control(&tcp_yeah);
232}
233
234module_init(tcp_yeah_register);
235module_exit(tcp_yeah_unregister);
236
237MODULE_AUTHOR("Angelo P. Castellani");
238MODULE_LICENSE("GPL");
239MODULE_DESCRIPTION("YeAH TCP");
v3.5.6
 
  1/*
  2 *
  3 *   YeAH TCP
  4 *
  5 * For further details look at:
  6 *    http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
  7 *
  8 */
  9#include <linux/mm.h>
 10#include <linux/module.h>
 11#include <linux/skbuff.h>
 12#include <linux/inet_diag.h>
 13
 14#include <net/tcp.h>
 15
 16#include "tcp_vegas.h"
 17
 18#define TCP_YEAH_ALPHA       80 //lin number of packets queued at the bottleneck
 19#define TCP_YEAH_GAMMA        1 //lin fraction of queue to be removed per rtt
 20#define TCP_YEAH_DELTA        3 //log minimum fraction of cwnd to be removed on loss
 21#define TCP_YEAH_EPSILON      1 //log maximum fraction to be removed on early decongestion
 22#define TCP_YEAH_PHY          8 //lin maximum delta from base
 23#define TCP_YEAH_RHO         16 //lin minimum number of consecutive rtt to consider competition on loss
 24#define TCP_YEAH_ZETA        50 //lin minimum number of state switchs to reset reno_count
 25
 26#define TCP_SCALABLE_AI_CNT	 100U
 27
 28/* YeAH variables */
 29struct yeah {
 30	struct vegas vegas;	/* must be first */
 31
 32	/* YeAH */
 33	u32 lastQ;
 34	u32 doing_reno_now;
 35
 36	u32 reno_count;
 37	u32 fast_count;
 38
 39	u32 pkts_acked;
 40};
 41
 42static void tcp_yeah_init(struct sock *sk)
 43{
 44	struct tcp_sock *tp = tcp_sk(sk);
 45	struct yeah *yeah = inet_csk_ca(sk);
 46
 47	tcp_vegas_init(sk);
 48
 49	yeah->doing_reno_now = 0;
 50	yeah->lastQ = 0;
 51
 52	yeah->reno_count = 2;
 53
 54	/* Ensure the MD arithmetic works.  This is somewhat pedantic,
 55	 * since I don't think we will see a cwnd this large. :) */
 56	tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
 57
 58}
 59
 60
 61static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
 62{
 63	const struct inet_connection_sock *icsk = inet_csk(sk);
 64	struct yeah *yeah = inet_csk_ca(sk);
 65
 66	if (icsk->icsk_ca_state == TCP_CA_Open)
 67		yeah->pkts_acked = pkts_acked;
 68
 69	tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
 70}
 71
 72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
 73{
 74	struct tcp_sock *tp = tcp_sk(sk);
 75	struct yeah *yeah = inet_csk_ca(sk);
 76
 77	if (!tcp_is_cwnd_limited(sk, in_flight))
 78		return;
 79
 80	if (tp->snd_cwnd <= tp->snd_ssthresh)
 81		tcp_slow_start(tp);
 
 
 
 82
 83	else if (!yeah->doing_reno_now) {
 84		/* Scalable */
 85
 86		tp->snd_cwnd_cnt += yeah->pkts_acked;
 87		if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){
 88			if (tp->snd_cwnd < tp->snd_cwnd_clamp)
 89				tp->snd_cwnd++;
 90			tp->snd_cwnd_cnt = 0;
 91		}
 92
 93		yeah->pkts_acked = 1;
 94
 95	} else {
 96		/* Reno */
 97		tcp_cong_avoid_ai(tp, tp->snd_cwnd);
 98	}
 99
100	/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
101	 *
102	 * These are so named because they represent the approximate values
103	 * of snd_una and snd_nxt at the beginning of the current RTT. More
104	 * precisely, they represent the amount of data sent during the RTT.
105	 * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
106	 * we will calculate that (v_beg_snd_nxt - v_vegas.beg_snd_una) outstanding
107	 * bytes of data have been ACKed during the course of the RTT, giving
108	 * an "actual" rate of:
109	 *
110	 *     (v_beg_snd_nxt - v_vegas.beg_snd_una) / (rtt duration)
111	 *
112	 * Unfortunately, v_vegas.beg_snd_una is not exactly equal to snd_una,
113	 * because delayed ACKs can cover more than one segment, so they
114	 * don't line up yeahly with the boundaries of RTTs.
115	 *
116	 * Another unfortunate fact of life is that delayed ACKs delay the
117	 * advance of the left edge of our send window, so that the number
118	 * of bytes we send in an RTT is often less than our cwnd will allow.
119	 * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
120	 */
121
122	if (after(ack, yeah->vegas.beg_snd_nxt)) {
123
124		/* We do the Vegas calculations only if we got enough RTT
125		 * samples that we can be reasonably sure that we got
126		 * at least one RTT sample that wasn't from a delayed ACK.
127		 * If we only had 2 samples total,
128		 * then that means we're getting only 1 ACK per RTT, which
129		 * means they're almost certainly delayed ACKs.
130		 * If  we have 3 samples, we should be OK.
131		 */
132
133		if (yeah->vegas.cntRTT > 2) {
134			u32 rtt, queue;
135			u64 bw;
136
137			/* We have enough RTT samples, so, using the Vegas
138			 * algorithm, we determine if we should increase or
139			 * decrease cwnd, and by how much.
140			 */
141
142			/* Pluck out the RTT we are using for the Vegas
143			 * calculations. This is the min RTT seen during the
144			 * last RTT. Taking the min filters out the effects
145			 * of delayed ACKs, at the cost of noticing congestion
146			 * a bit later.
147			 */
148			rtt = yeah->vegas.minRTT;
149
150			/* Compute excess number of packets above bandwidth
151			 * Avoid doing full 64 bit divide.
152			 */
153			bw = tp->snd_cwnd;
154			bw *= rtt - yeah->vegas.baseRTT;
155			do_div(bw, rtt);
156			queue = bw;
157
158			if (queue > TCP_YEAH_ALPHA ||
159			    rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
160				if (queue > TCP_YEAH_ALPHA &&
161				    tp->snd_cwnd > yeah->reno_count) {
162					u32 reduction = min(queue / TCP_YEAH_GAMMA ,
163							    tp->snd_cwnd >> TCP_YEAH_EPSILON);
164
165					tp->snd_cwnd -= reduction;
166
167					tp->snd_cwnd = max(tp->snd_cwnd,
168							   yeah->reno_count);
169
170					tp->snd_ssthresh = tp->snd_cwnd;
171				}
172
173				if (yeah->reno_count <= 2)
174					yeah->reno_count = max(tp->snd_cwnd>>1, 2U);
175				else
176					yeah->reno_count++;
177
178				yeah->doing_reno_now = min(yeah->doing_reno_now + 1,
179							   0xffffffU);
180			} else {
181				yeah->fast_count++;
182
183				if (yeah->fast_count > TCP_YEAH_ZETA) {
184					yeah->reno_count = 2;
185					yeah->fast_count = 0;
186				}
187
188				yeah->doing_reno_now = 0;
189			}
190
191			yeah->lastQ = queue;
192
193		}
194
195		/* Save the extent of the current window so we can use this
196		 * at the end of the next RTT.
197		 */
198		yeah->vegas.beg_snd_una  = yeah->vegas.beg_snd_nxt;
199		yeah->vegas.beg_snd_nxt  = tp->snd_nxt;
200		yeah->vegas.beg_snd_cwnd = tp->snd_cwnd;
201
202		/* Wipe the slate clean for the next RTT. */
203		yeah->vegas.cntRTT = 0;
204		yeah->vegas.minRTT = 0x7fffffff;
205	}
206}
207
208static u32 tcp_yeah_ssthresh(struct sock *sk) {
 
209	const struct tcp_sock *tp = tcp_sk(sk);
210	struct yeah *yeah = inet_csk_ca(sk);
211	u32 reduction;
212
213	if (yeah->doing_reno_now < TCP_YEAH_RHO) {
214		reduction = yeah->lastQ;
215
216		reduction = min( reduction, max(tp->snd_cwnd>>1, 2U) );
217
218		reduction = max( reduction, tp->snd_cwnd >> TCP_YEAH_DELTA);
219	} else
220		reduction = max(tp->snd_cwnd>>1, 2U);
221
222	yeah->fast_count = 0;
223	yeah->reno_count = max(yeah->reno_count>>1, 2U);
224
225	return tp->snd_cwnd - reduction;
226}
227
228static struct tcp_congestion_ops tcp_yeah __read_mostly = {
229	.flags		= TCP_CONG_RTT_STAMP,
230	.init		= tcp_yeah_init,
231	.ssthresh	= tcp_yeah_ssthresh,
 
232	.cong_avoid	= tcp_yeah_cong_avoid,
233	.min_cwnd	= tcp_reno_min_cwnd,
234	.set_state	= tcp_vegas_state,
235	.cwnd_event	= tcp_vegas_cwnd_event,
236	.get_info	= tcp_vegas_get_info,
237	.pkts_acked	= tcp_yeah_pkts_acked,
238
239	.owner		= THIS_MODULE,
240	.name		= "yeah",
241};
242
243static int __init tcp_yeah_register(void)
244{
245	BUG_ON(sizeof(struct yeah) > ICSK_CA_PRIV_SIZE);
246	tcp_register_congestion_control(&tcp_yeah);
247	return 0;
248}
249
250static void __exit tcp_yeah_unregister(void)
251{
252	tcp_unregister_congestion_control(&tcp_yeah);
253}
254
255module_init(tcp_yeah_register);
256module_exit(tcp_yeah_unregister);
257
258MODULE_AUTHOR("Angelo P. Castellani");
259MODULE_LICENSE("GPL");
260MODULE_DESCRIPTION("YeAH TCP");