Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 *
  3 *   YeAH TCP
  4 *
  5 * For further details look at:
  6 *   https://web.archive.org/web/20080316215752/http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
  7 *
  8 */
  9#include <linux/mm.h>
 10#include <linux/module.h>
 11#include <linux/skbuff.h>
 12#include <linux/inet_diag.h>
 13
 14#include <net/tcp.h>
 15
 16#include "tcp_vegas.h"
 17
 18#define TCP_YEAH_ALPHA       80 /* number of packets queued at the bottleneck */
 19#define TCP_YEAH_GAMMA        1 /* fraction of queue to be removed per rtt */
 20#define TCP_YEAH_DELTA        3 /* log minimum fraction of cwnd to be removed on loss */
 21#define TCP_YEAH_EPSILON      1 /* log maximum fraction to be removed on early decongestion */
 22#define TCP_YEAH_PHY          8 /* maximum delta from base */
 23#define TCP_YEAH_RHO         16 /* minimum number of consecutive rtt to consider competition on loss */
 24#define TCP_YEAH_ZETA        50 /* minimum number of state switches to reset reno_count */
 25
 26#define TCP_SCALABLE_AI_CNT	 100U
 27
 28/* YeAH variables */
 29struct yeah {
 30	struct vegas vegas;	/* must be first */
 31
 32	/* YeAH */
 33	u32 lastQ;
 34	u32 doing_reno_now;
 35
 36	u32 reno_count;
 37	u32 fast_count;
 38
 39	u32 pkts_acked;
 40};
 41
 42static void tcp_yeah_init(struct sock *sk)
 43{
 44	struct tcp_sock *tp = tcp_sk(sk);
 45	struct yeah *yeah = inet_csk_ca(sk);
 46
 47	tcp_vegas_init(sk);
 48
 49	yeah->doing_reno_now = 0;
 50	yeah->lastQ = 0;
 51
 52	yeah->reno_count = 2;
 53
 54	/* Ensure the MD arithmetic works.  This is somewhat pedantic,
 55	 * since I don't think we will see a cwnd this large. :) */
 56	tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
 57}
 58
 59static void tcp_yeah_pkts_acked(struct sock *sk,
 60				const struct ack_sample *sample)
 61{
 62	const struct inet_connection_sock *icsk = inet_csk(sk);
 63	struct yeah *yeah = inet_csk_ca(sk);
 64
 65	if (icsk->icsk_ca_state == TCP_CA_Open)
 66		yeah->pkts_acked = sample->pkts_acked;
 67
 68	tcp_vegas_pkts_acked(sk, sample);
 69}
 70
 71static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 72{
 73	struct tcp_sock *tp = tcp_sk(sk);
 74	struct yeah *yeah = inet_csk_ca(sk);
 75
 76	if (!tcp_is_cwnd_limited(sk))
 77		return;
 78
 79	if (tcp_in_slow_start(tp))
 80		tcp_slow_start(tp, acked);
 81
 82	else if (!yeah->doing_reno_now) {
 83		/* Scalable */
 84
 85		tp->snd_cwnd_cnt += yeah->pkts_acked;
 86		if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)) {
 87			if (tp->snd_cwnd < tp->snd_cwnd_clamp)
 88				tp->snd_cwnd++;
 89			tp->snd_cwnd_cnt = 0;
 90		}
 91
 92		yeah->pkts_acked = 1;
 93
 94	} else {
 95		/* Reno */
 96		tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
 97	}
 98
 99	/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
100	 *
101	 * These are so named because they represent the approximate values
102	 * of snd_una and snd_nxt at the beginning of the current RTT. More
103	 * precisely, they represent the amount of data sent during the RTT.
104	 * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
105	 * we will calculate that (v_beg_snd_nxt - v_vegas.beg_snd_una) outstanding
106	 * bytes of data have been ACKed during the course of the RTT, giving
107	 * an "actual" rate of:
108	 *
109	 *     (v_beg_snd_nxt - v_vegas.beg_snd_una) / (rtt duration)
110	 *
111	 * Unfortunately, v_vegas.beg_snd_una is not exactly equal to snd_una,
112	 * because delayed ACKs can cover more than one segment, so they
113	 * don't line up yeahly with the boundaries of RTTs.
114	 *
115	 * Another unfortunate fact of life is that delayed ACKs delay the
116	 * advance of the left edge of our send window, so that the number
117	 * of bytes we send in an RTT is often less than our cwnd will allow.
118	 * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
119	 */
120
121	if (after(ack, yeah->vegas.beg_snd_nxt)) {
122		/* We do the Vegas calculations only if we got enough RTT
123		 * samples that we can be reasonably sure that we got
124		 * at least one RTT sample that wasn't from a delayed ACK.
125		 * If we only had 2 samples total,
126		 * then that means we're getting only 1 ACK per RTT, which
127		 * means they're almost certainly delayed ACKs.
128		 * If  we have 3 samples, we should be OK.
129		 */
130
131		if (yeah->vegas.cntRTT > 2) {
132			u32 rtt, queue;
133			u64 bw;
134
135			/* We have enough RTT samples, so, using the Vegas
136			 * algorithm, we determine if we should increase or
137			 * decrease cwnd, and by how much.
138			 */
139
140			/* Pluck out the RTT we are using for the Vegas
141			 * calculations. This is the min RTT seen during the
142			 * last RTT. Taking the min filters out the effects
143			 * of delayed ACKs, at the cost of noticing congestion
144			 * a bit later.
145			 */
146			rtt = yeah->vegas.minRTT;
147
148			/* Compute excess number of packets above bandwidth
149			 * Avoid doing full 64 bit divide.
150			 */
151			bw = tp->snd_cwnd;
152			bw *= rtt - yeah->vegas.baseRTT;
153			do_div(bw, rtt);
154			queue = bw;
155
156			if (queue > TCP_YEAH_ALPHA ||
157			    rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
158				if (queue > TCP_YEAH_ALPHA &&
159				    tp->snd_cwnd > yeah->reno_count) {
160					u32 reduction = min(queue / TCP_YEAH_GAMMA ,
161							    tp->snd_cwnd >> TCP_YEAH_EPSILON);
162
163					tp->snd_cwnd -= reduction;
164
165					tp->snd_cwnd = max(tp->snd_cwnd,
166							   yeah->reno_count);
167
168					tp->snd_ssthresh = tp->snd_cwnd;
169				}
170
171				if (yeah->reno_count <= 2)
172					yeah->reno_count = max(tp->snd_cwnd>>1, 2U);
173				else
174					yeah->reno_count++;
175
176				yeah->doing_reno_now = min(yeah->doing_reno_now + 1,
177							   0xffffffU);
178			} else {
179				yeah->fast_count++;
180
181				if (yeah->fast_count > TCP_YEAH_ZETA) {
182					yeah->reno_count = 2;
183					yeah->fast_count = 0;
184				}
185
186				yeah->doing_reno_now = 0;
187			}
188
189			yeah->lastQ = queue;
190		}
191
192		/* Save the extent of the current window so we can use this
193		 * at the end of the next RTT.
194		 */
195		yeah->vegas.beg_snd_una  = yeah->vegas.beg_snd_nxt;
196		yeah->vegas.beg_snd_nxt  = tp->snd_nxt;
197		yeah->vegas.beg_snd_cwnd = tp->snd_cwnd;
198
199		/* Wipe the slate clean for the next RTT. */
200		yeah->vegas.cntRTT = 0;
201		yeah->vegas.minRTT = 0x7fffffff;
202	}
203}
204
205static u32 tcp_yeah_ssthresh(struct sock *sk)
206{
207	const struct tcp_sock *tp = tcp_sk(sk);
208	struct yeah *yeah = inet_csk_ca(sk);
209	u32 reduction;
210
211	if (yeah->doing_reno_now < TCP_YEAH_RHO) {
212		reduction = yeah->lastQ;
213
214		reduction = min(reduction, max(tp->snd_cwnd>>1, 2U));
215
216		reduction = max(reduction, tp->snd_cwnd >> TCP_YEAH_DELTA);
217	} else
218		reduction = max(tp->snd_cwnd>>1, 2U);
219
220	yeah->fast_count = 0;
221	yeah->reno_count = max(yeah->reno_count>>1, 2U);
222
223	return max_t(int, tp->snd_cwnd - reduction, 2);
224}
225
226static struct tcp_congestion_ops tcp_yeah __read_mostly = {
227	.init		= tcp_yeah_init,
228	.ssthresh	= tcp_yeah_ssthresh,
229	.undo_cwnd      = tcp_reno_undo_cwnd,
230	.cong_avoid	= tcp_yeah_cong_avoid,
231	.set_state	= tcp_vegas_state,
232	.cwnd_event	= tcp_vegas_cwnd_event,
233	.get_info	= tcp_vegas_get_info,
234	.pkts_acked	= tcp_yeah_pkts_acked,
235
236	.owner		= THIS_MODULE,
237	.name		= "yeah",
238};
239
240static int __init tcp_yeah_register(void)
241{
242	BUG_ON(sizeof(struct yeah) > ICSK_CA_PRIV_SIZE);
243	tcp_register_congestion_control(&tcp_yeah);
244	return 0;
245}
246
247static void __exit tcp_yeah_unregister(void)
248{
249	tcp_unregister_congestion_control(&tcp_yeah);
250}
251
252module_init(tcp_yeah_register);
253module_exit(tcp_yeah_unregister);
254
255MODULE_AUTHOR("Angelo P. Castellani");
256MODULE_LICENSE("GPL");
257MODULE_DESCRIPTION("YeAH TCP");
v4.6
  1/*
  2 *
  3 *   YeAH TCP
  4 *
  5 * For further details look at:
  6 *   https://web.archive.org/web/20080316215752/http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
  7 *
  8 */
  9#include <linux/mm.h>
 10#include <linux/module.h>
 11#include <linux/skbuff.h>
 12#include <linux/inet_diag.h>
 13
 14#include <net/tcp.h>
 15
 16#include "tcp_vegas.h"
 17
 18#define TCP_YEAH_ALPHA       80 /* number of packets queued at the bottleneck */
 19#define TCP_YEAH_GAMMA        1 /* fraction of queue to be removed per rtt */
 20#define TCP_YEAH_DELTA        3 /* log minimum fraction of cwnd to be removed on loss */
 21#define TCP_YEAH_EPSILON      1 /* log maximum fraction to be removed on early decongestion */
 22#define TCP_YEAH_PHY          8 /* maximum delta from base */
 23#define TCP_YEAH_RHO         16 /* minimum number of consecutive rtt to consider competition on loss */
 24#define TCP_YEAH_ZETA        50 /* minimum number of state switches to reset reno_count */
 25
 26#define TCP_SCALABLE_AI_CNT	 100U
 27
 28/* YeAH variables */
 29struct yeah {
 30	struct vegas vegas;	/* must be first */
 31
 32	/* YeAH */
 33	u32 lastQ;
 34	u32 doing_reno_now;
 35
 36	u32 reno_count;
 37	u32 fast_count;
 38
 39	u32 pkts_acked;
 40};
 41
 42static void tcp_yeah_init(struct sock *sk)
 43{
 44	struct tcp_sock *tp = tcp_sk(sk);
 45	struct yeah *yeah = inet_csk_ca(sk);
 46
 47	tcp_vegas_init(sk);
 48
 49	yeah->doing_reno_now = 0;
 50	yeah->lastQ = 0;
 51
 52	yeah->reno_count = 2;
 53
 54	/* Ensure the MD arithmetic works.  This is somewhat pedantic,
 55	 * since I don't think we will see a cwnd this large. :) */
 56	tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
 57}
 58
 59static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
 
 60{
 61	const struct inet_connection_sock *icsk = inet_csk(sk);
 62	struct yeah *yeah = inet_csk_ca(sk);
 63
 64	if (icsk->icsk_ca_state == TCP_CA_Open)
 65		yeah->pkts_acked = pkts_acked;
 66
 67	tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
 68}
 69
 70static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 71{
 72	struct tcp_sock *tp = tcp_sk(sk);
 73	struct yeah *yeah = inet_csk_ca(sk);
 74
 75	if (!tcp_is_cwnd_limited(sk))
 76		return;
 77
 78	if (tp->snd_cwnd <= tp->snd_ssthresh)
 79		tcp_slow_start(tp, acked);
 80
 81	else if (!yeah->doing_reno_now) {
 82		/* Scalable */
 83
 84		tp->snd_cwnd_cnt += yeah->pkts_acked;
 85		if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)) {
 86			if (tp->snd_cwnd < tp->snd_cwnd_clamp)
 87				tp->snd_cwnd++;
 88			tp->snd_cwnd_cnt = 0;
 89		}
 90
 91		yeah->pkts_acked = 1;
 92
 93	} else {
 94		/* Reno */
 95		tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
 96	}
 97
 98	/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
 99	 *
100	 * These are so named because they represent the approximate values
101	 * of snd_una and snd_nxt at the beginning of the current RTT. More
102	 * precisely, they represent the amount of data sent during the RTT.
103	 * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
104	 * we will calculate that (v_beg_snd_nxt - v_vegas.beg_snd_una) outstanding
105	 * bytes of data have been ACKed during the course of the RTT, giving
106	 * an "actual" rate of:
107	 *
108	 *     (v_beg_snd_nxt - v_vegas.beg_snd_una) / (rtt duration)
109	 *
110	 * Unfortunately, v_vegas.beg_snd_una is not exactly equal to snd_una,
111	 * because delayed ACKs can cover more than one segment, so they
112	 * don't line up yeahly with the boundaries of RTTs.
113	 *
114	 * Another unfortunate fact of life is that delayed ACKs delay the
115	 * advance of the left edge of our send window, so that the number
116	 * of bytes we send in an RTT is often less than our cwnd will allow.
117	 * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
118	 */
119
120	if (after(ack, yeah->vegas.beg_snd_nxt)) {
121		/* We do the Vegas calculations only if we got enough RTT
122		 * samples that we can be reasonably sure that we got
123		 * at least one RTT sample that wasn't from a delayed ACK.
124		 * If we only had 2 samples total,
125		 * then that means we're getting only 1 ACK per RTT, which
126		 * means they're almost certainly delayed ACKs.
127		 * If  we have 3 samples, we should be OK.
128		 */
129
130		if (yeah->vegas.cntRTT > 2) {
131			u32 rtt, queue;
132			u64 bw;
133
134			/* We have enough RTT samples, so, using the Vegas
135			 * algorithm, we determine if we should increase or
136			 * decrease cwnd, and by how much.
137			 */
138
139			/* Pluck out the RTT we are using for the Vegas
140			 * calculations. This is the min RTT seen during the
141			 * last RTT. Taking the min filters out the effects
142			 * of delayed ACKs, at the cost of noticing congestion
143			 * a bit later.
144			 */
145			rtt = yeah->vegas.minRTT;
146
147			/* Compute excess number of packets above bandwidth
148			 * Avoid doing full 64 bit divide.
149			 */
150			bw = tp->snd_cwnd;
151			bw *= rtt - yeah->vegas.baseRTT;
152			do_div(bw, rtt);
153			queue = bw;
154
155			if (queue > TCP_YEAH_ALPHA ||
156			    rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
157				if (queue > TCP_YEAH_ALPHA &&
158				    tp->snd_cwnd > yeah->reno_count) {
159					u32 reduction = min(queue / TCP_YEAH_GAMMA ,
160							    tp->snd_cwnd >> TCP_YEAH_EPSILON);
161
162					tp->snd_cwnd -= reduction;
163
164					tp->snd_cwnd = max(tp->snd_cwnd,
165							   yeah->reno_count);
166
167					tp->snd_ssthresh = tp->snd_cwnd;
168				}
169
170				if (yeah->reno_count <= 2)
171					yeah->reno_count = max(tp->snd_cwnd>>1, 2U);
172				else
173					yeah->reno_count++;
174
175				yeah->doing_reno_now = min(yeah->doing_reno_now + 1,
176							   0xffffffU);
177			} else {
178				yeah->fast_count++;
179
180				if (yeah->fast_count > TCP_YEAH_ZETA) {
181					yeah->reno_count = 2;
182					yeah->fast_count = 0;
183				}
184
185				yeah->doing_reno_now = 0;
186			}
187
188			yeah->lastQ = queue;
189		}
190
191		/* Save the extent of the current window so we can use this
192		 * at the end of the next RTT.
193		 */
194		yeah->vegas.beg_snd_una  = yeah->vegas.beg_snd_nxt;
195		yeah->vegas.beg_snd_nxt  = tp->snd_nxt;
196		yeah->vegas.beg_snd_cwnd = tp->snd_cwnd;
197
198		/* Wipe the slate clean for the next RTT. */
199		yeah->vegas.cntRTT = 0;
200		yeah->vegas.minRTT = 0x7fffffff;
201	}
202}
203
204static u32 tcp_yeah_ssthresh(struct sock *sk)
205{
206	const struct tcp_sock *tp = tcp_sk(sk);
207	struct yeah *yeah = inet_csk_ca(sk);
208	u32 reduction;
209
210	if (yeah->doing_reno_now < TCP_YEAH_RHO) {
211		reduction = yeah->lastQ;
212
213		reduction = min(reduction, max(tp->snd_cwnd>>1, 2U));
214
215		reduction = max(reduction, tp->snd_cwnd >> TCP_YEAH_DELTA);
216	} else
217		reduction = max(tp->snd_cwnd>>1, 2U);
218
219	yeah->fast_count = 0;
220	yeah->reno_count = max(yeah->reno_count>>1, 2U);
221
222	return max_t(int, tp->snd_cwnd - reduction, 2);
223}
224
225static struct tcp_congestion_ops tcp_yeah __read_mostly = {
226	.init		= tcp_yeah_init,
227	.ssthresh	= tcp_yeah_ssthresh,
 
228	.cong_avoid	= tcp_yeah_cong_avoid,
229	.set_state	= tcp_vegas_state,
230	.cwnd_event	= tcp_vegas_cwnd_event,
231	.get_info	= tcp_vegas_get_info,
232	.pkts_acked	= tcp_yeah_pkts_acked,
233
234	.owner		= THIS_MODULE,
235	.name		= "yeah",
236};
237
238static int __init tcp_yeah_register(void)
239{
240	BUG_ON(sizeof(struct yeah) > ICSK_CA_PRIV_SIZE);
241	tcp_register_congestion_control(&tcp_yeah);
242	return 0;
243}
244
245static void __exit tcp_yeah_unregister(void)
246{
247	tcp_unregister_congestion_control(&tcp_yeah);
248}
249
250module_init(tcp_yeah_register);
251module_exit(tcp_yeah_unregister);
252
253MODULE_AUTHOR("Angelo P. Castellani");
254MODULE_LICENSE("GPL");
255MODULE_DESCRIPTION("YeAH TCP");