Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Binary Increase Congestion control for TCP
  4 * Home page:
  5 *      http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
  6 * This is from the implementation of BICTCP in
  7 * Lison-Xu, Kahaled Harfoush, and Injong Rhee.
  8 *  "Binary Increase Congestion Control for Fast, Long Distance
  9 *  Networks" in InfoComm 2004
 10 * Available from:
 11 *  http://netsrv.csc.ncsu.edu/export/bitcp.pdf
 12 *
 13 * Unless BIC is enabled and congestion window is large
 14 * this behaves the same as the original Reno.
 15 */
 16
 17#include <linux/mm.h>
 18#include <linux/module.h>
 19#include <net/tcp.h>
 20
 21#define BICTCP_BETA_SCALE    1024	/* Scale factor beta calculation
 22					 * max_cwnd = snd_cwnd * beta
 23					 */
 24#define BICTCP_B		4	 /*
 25					  * In binary search,
 26					  * go to point (max+min)/N
 27					  */
 28
 29static int fast_convergence = 1;
 30static int max_increment = 16;
 31static int low_window = 14;
 32static int beta = 819;		/* = 819/1024 (BICTCP_BETA_SCALE) */
 33static int initial_ssthresh;
 34static int smooth_part = 20;
 35
 36module_param(fast_convergence, int, 0644);
 37MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence");
 38module_param(max_increment, int, 0644);
 39MODULE_PARM_DESC(max_increment, "Limit on increment allowed during binary search");
 40module_param(low_window, int, 0644);
 41MODULE_PARM_DESC(low_window, "lower bound on congestion window (for TCP friendliness)");
 42module_param(beta, int, 0644);
 43MODULE_PARM_DESC(beta, "beta for multiplicative increase");
 44module_param(initial_ssthresh, int, 0644);
 45MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold");
 46module_param(smooth_part, int, 0644);
 47MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wmax-B to Wmax");
 48
 49/* BIC TCP Parameters */
 50struct bictcp {
 51	u32	cnt;		/* increase cwnd by 1 after ACKs */
 52	u32	last_max_cwnd;	/* last maximum snd_cwnd */
 
 53	u32	last_cwnd;	/* the last snd_cwnd */
 54	u32	last_time;	/* time when updated last_cwnd */
 55	u32	epoch_start;	/* beginning of an epoch */
 56#define ACK_RATIO_SHIFT	4
 57	u32	delayed_ack;	/* estimate the ratio of Packets/ACKs << 4 */
 58};
 59
 60static inline void bictcp_reset(struct bictcp *ca)
 61{
 62	ca->cnt = 0;
 63	ca->last_max_cwnd = 0;
 64	ca->last_cwnd = 0;
 65	ca->last_time = 0;
 66	ca->epoch_start = 0;
 67	ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
 68}
 69
 70static void bictcp_init(struct sock *sk)
 71{
 72	struct bictcp *ca = inet_csk_ca(sk);
 73
 74	bictcp_reset(ca);
 
 75
 76	if (initial_ssthresh)
 77		tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
 78}
 79
 80/*
 81 * Compute congestion window to use.
 82 */
 83static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
 84{
 85	if (ca->last_cwnd == cwnd &&
 86	    (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
 87		return;
 88
 89	ca->last_cwnd = cwnd;
 90	ca->last_time = tcp_jiffies32;
 91
 92	if (ca->epoch_start == 0) /* record the beginning of an epoch */
 93		ca->epoch_start = tcp_jiffies32;
 94
 95	/* start off normal */
 96	if (cwnd <= low_window) {
 97		ca->cnt = cwnd;
 98		return;
 99	}
100
101	/* binary increase */
102	if (cwnd < ca->last_max_cwnd) {
103		__u32	dist = (ca->last_max_cwnd - cwnd)
104			/ BICTCP_B;
105
106		if (dist > max_increment)
107			/* linear increase */
108			ca->cnt = cwnd / max_increment;
109		else if (dist <= 1U)
110			/* binary search increase */
111			ca->cnt = (cwnd * smooth_part) / BICTCP_B;
112		else
113			/* binary search increase */
114			ca->cnt = cwnd / dist;
115	} else {
116		/* slow start AMD linear increase */
117		if (cwnd < ca->last_max_cwnd + BICTCP_B)
118			/* slow start */
119			ca->cnt = (cwnd * smooth_part) / BICTCP_B;
120		else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1))
121			/* slow start */
122			ca->cnt = (cwnd * (BICTCP_B-1))
123				/ (cwnd - ca->last_max_cwnd);
124		else
125			/* linear increase */
126			ca->cnt = cwnd / max_increment;
127	}
128
129	/* if in slow start or link utilization is very low */
130	if (ca->last_max_cwnd == 0) {
131		if (ca->cnt > 20) /* increase cwnd 5% per RTT */
132			ca->cnt = 20;
133	}
134
135	ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
136	if (ca->cnt == 0)			/* cannot be zero */
137		ca->cnt = 1;
138}
139
140static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
141{
142	struct tcp_sock *tp = tcp_sk(sk);
143	struct bictcp *ca = inet_csk_ca(sk);
144
145	if (!tcp_is_cwnd_limited(sk))
146		return;
147
148	if (tcp_in_slow_start(tp)) {
149		acked = tcp_slow_start(tp, acked);
150		if (!acked)
151			return;
 
152	}
153	bictcp_update(ca, tcp_snd_cwnd(tp));
154	tcp_cong_avoid_ai(tp, ca->cnt, acked);
155}
156
157/*
158 *	behave like Reno until low_window is reached,
159 *	then increase congestion window slowly
160 */
161static u32 bictcp_recalc_ssthresh(struct sock *sk)
162{
163	const struct tcp_sock *tp = tcp_sk(sk);
164	struct bictcp *ca = inet_csk_ca(sk);
165
166	ca->epoch_start = 0;	/* end of epoch */
167
168	/* Wmax and fast convergence */
169	if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence)
170		ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta))
171			/ (2 * BICTCP_BETA_SCALE);
172	else
173		ca->last_max_cwnd = tcp_snd_cwnd(tp);
174
175	if (tcp_snd_cwnd(tp) <= low_window)
176		return max(tcp_snd_cwnd(tp) >> 1U, 2U);
 
 
177	else
178		return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U);
 
 
 
 
 
 
 
 
179}
180
181static void bictcp_state(struct sock *sk, u8 new_state)
182{
183	if (new_state == TCP_CA_Loss)
184		bictcp_reset(inet_csk_ca(sk));
185}
186
187/* Track delayed acknowledgment ratio using sliding window
188 * ratio = (15*ratio + sample) / 16
189 */
190static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
191{
192	const struct inet_connection_sock *icsk = inet_csk(sk);
193
194	if (icsk->icsk_ca_state == TCP_CA_Open) {
195		struct bictcp *ca = inet_csk_ca(sk);
196
197		ca->delayed_ack += sample->pkts_acked -
198			(ca->delayed_ack >> ACK_RATIO_SHIFT);
199	}
200}
201
202static struct tcp_congestion_ops bictcp __read_mostly = {
203	.init		= bictcp_init,
204	.ssthresh	= bictcp_recalc_ssthresh,
205	.cong_avoid	= bictcp_cong_avoid,
206	.set_state	= bictcp_state,
207	.undo_cwnd	= tcp_reno_undo_cwnd,
208	.pkts_acked     = bictcp_acked,
209	.owner		= THIS_MODULE,
210	.name		= "bic",
211};
212
213static int __init bictcp_register(void)
214{
215	BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
216	return tcp_register_congestion_control(&bictcp);
217}
218
219static void __exit bictcp_unregister(void)
220{
221	tcp_unregister_congestion_control(&bictcp);
222}
223
224module_init(bictcp_register);
225module_exit(bictcp_unregister);
226
227MODULE_AUTHOR("Stephen Hemminger");
228MODULE_LICENSE("GPL");
229MODULE_DESCRIPTION("BIC TCP");
v4.10.11
 
  1/*
  2 * Binary Increase Congestion control for TCP
  3 * Home page:
  4 *      http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
  5 * This is from the implementation of BICTCP in
  6 * Lison-Xu, Kahaled Harfoush, and Injong Rhee.
  7 *  "Binary Increase Congestion Control for Fast, Long Distance
  8 *  Networks" in InfoComm 2004
  9 * Available from:
 10 *  http://netsrv.csc.ncsu.edu/export/bitcp.pdf
 11 *
 12 * Unless BIC is enabled and congestion window is large
 13 * this behaves the same as the original Reno.
 14 */
 15
 16#include <linux/mm.h>
 17#include <linux/module.h>
 18#include <net/tcp.h>
 19
 20#define BICTCP_BETA_SCALE    1024	/* Scale factor beta calculation
 21					 * max_cwnd = snd_cwnd * beta
 22					 */
 23#define BICTCP_B		4	 /*
 24					  * In binary search,
 25					  * go to point (max+min)/N
 26					  */
 27
 28static int fast_convergence = 1;
 29static int max_increment = 16;
 30static int low_window = 14;
 31static int beta = 819;		/* = 819/1024 (BICTCP_BETA_SCALE) */
 32static int initial_ssthresh;
 33static int smooth_part = 20;
 34
 35module_param(fast_convergence, int, 0644);
 36MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence");
 37module_param(max_increment, int, 0644);
 38MODULE_PARM_DESC(max_increment, "Limit on increment allowed during binary search");
 39module_param(low_window, int, 0644);
 40MODULE_PARM_DESC(low_window, "lower bound on congestion window (for TCP friendliness)");
 41module_param(beta, int, 0644);
 42MODULE_PARM_DESC(beta, "beta for multiplicative increase");
 43module_param(initial_ssthresh, int, 0644);
 44MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold");
 45module_param(smooth_part, int, 0644);
 46MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wmax-B to Wmax");
 47
 48/* BIC TCP Parameters */
 49struct bictcp {
 50	u32	cnt;		/* increase cwnd by 1 after ACKs */
 51	u32	last_max_cwnd;	/* last maximum snd_cwnd */
 52	u32	loss_cwnd;	/* congestion window at last loss */
 53	u32	last_cwnd;	/* the last snd_cwnd */
 54	u32	last_time;	/* time when updated last_cwnd */
 55	u32	epoch_start;	/* beginning of an epoch */
 56#define ACK_RATIO_SHIFT	4
 57	u32	delayed_ack;	/* estimate the ratio of Packets/ACKs << 4 */
 58};
 59
 60static inline void bictcp_reset(struct bictcp *ca)
 61{
 62	ca->cnt = 0;
 63	ca->last_max_cwnd = 0;
 64	ca->last_cwnd = 0;
 65	ca->last_time = 0;
 66	ca->epoch_start = 0;
 67	ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
 68}
 69
 70static void bictcp_init(struct sock *sk)
 71{
 72	struct bictcp *ca = inet_csk_ca(sk);
 73
 74	bictcp_reset(ca);
 75	ca->loss_cwnd = 0;
 76
 77	if (initial_ssthresh)
 78		tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
 79}
 80
 81/*
 82 * Compute congestion window to use.
 83 */
 84static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
 85{
 86	if (ca->last_cwnd == cwnd &&
 87	    (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
 88		return;
 89
 90	ca->last_cwnd = cwnd;
 91	ca->last_time = tcp_time_stamp;
 92
 93	if (ca->epoch_start == 0) /* record the beginning of an epoch */
 94		ca->epoch_start = tcp_time_stamp;
 95
 96	/* start off normal */
 97	if (cwnd <= low_window) {
 98		ca->cnt = cwnd;
 99		return;
100	}
101
102	/* binary increase */
103	if (cwnd < ca->last_max_cwnd) {
104		__u32	dist = (ca->last_max_cwnd - cwnd)
105			/ BICTCP_B;
106
107		if (dist > max_increment)
108			/* linear increase */
109			ca->cnt = cwnd / max_increment;
110		else if (dist <= 1U)
111			/* binary search increase */
112			ca->cnt = (cwnd * smooth_part) / BICTCP_B;
113		else
114			/* binary search increase */
115			ca->cnt = cwnd / dist;
116	} else {
117		/* slow start AMD linear increase */
118		if (cwnd < ca->last_max_cwnd + BICTCP_B)
119			/* slow start */
120			ca->cnt = (cwnd * smooth_part) / BICTCP_B;
121		else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1))
122			/* slow start */
123			ca->cnt = (cwnd * (BICTCP_B-1))
124				/ (cwnd - ca->last_max_cwnd);
125		else
126			/* linear increase */
127			ca->cnt = cwnd / max_increment;
128	}
129
130	/* if in slow start or link utilization is very low */
131	if (ca->last_max_cwnd == 0) {
132		if (ca->cnt > 20) /* increase cwnd 5% per RTT */
133			ca->cnt = 20;
134	}
135
136	ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
137	if (ca->cnt == 0)			/* cannot be zero */
138		ca->cnt = 1;
139}
140
141static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
142{
143	struct tcp_sock *tp = tcp_sk(sk);
144	struct bictcp *ca = inet_csk_ca(sk);
145
146	if (!tcp_is_cwnd_limited(sk))
147		return;
148
149	if (tcp_in_slow_start(tp))
150		tcp_slow_start(tp, acked);
151	else {
152		bictcp_update(ca, tp->snd_cwnd);
153		tcp_cong_avoid_ai(tp, ca->cnt, 1);
154	}
 
 
155}
156
157/*
158 *	behave like Reno until low_window is reached,
159 *	then increase congestion window slowly
160 */
161static u32 bictcp_recalc_ssthresh(struct sock *sk)
162{
163	const struct tcp_sock *tp = tcp_sk(sk);
164	struct bictcp *ca = inet_csk_ca(sk);
165
166	ca->epoch_start = 0;	/* end of epoch */
167
168	/* Wmax and fast convergence */
169	if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
170		ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
171			/ (2 * BICTCP_BETA_SCALE);
172	else
173		ca->last_max_cwnd = tp->snd_cwnd;
174
175	ca->loss_cwnd = tp->snd_cwnd;
176
177	if (tp->snd_cwnd <= low_window)
178		return max(tp->snd_cwnd >> 1U, 2U);
179	else
180		return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
181}
182
183static u32 bictcp_undo_cwnd(struct sock *sk)
184{
185	const struct tcp_sock *tp = tcp_sk(sk);
186	const struct bictcp *ca = inet_csk_ca(sk);
187
188	return max(tp->snd_cwnd, ca->loss_cwnd);
189}
190
191static void bictcp_state(struct sock *sk, u8 new_state)
192{
193	if (new_state == TCP_CA_Loss)
194		bictcp_reset(inet_csk_ca(sk));
195}
196
197/* Track delayed acknowledgment ratio using sliding window
198 * ratio = (15*ratio + sample) / 16
199 */
200static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
201{
202	const struct inet_connection_sock *icsk = inet_csk(sk);
203
204	if (icsk->icsk_ca_state == TCP_CA_Open) {
205		struct bictcp *ca = inet_csk_ca(sk);
206
207		ca->delayed_ack += sample->pkts_acked -
208			(ca->delayed_ack >> ACK_RATIO_SHIFT);
209	}
210}
211
212static struct tcp_congestion_ops bictcp __read_mostly = {
213	.init		= bictcp_init,
214	.ssthresh	= bictcp_recalc_ssthresh,
215	.cong_avoid	= bictcp_cong_avoid,
216	.set_state	= bictcp_state,
217	.undo_cwnd	= bictcp_undo_cwnd,
218	.pkts_acked     = bictcp_acked,
219	.owner		= THIS_MODULE,
220	.name		= "bic",
221};
222
223static int __init bictcp_register(void)
224{
225	BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
226	return tcp_register_congestion_control(&bictcp);
227}
228
229static void __exit bictcp_unregister(void)
230{
231	tcp_unregister_congestion_control(&bictcp);
232}
233
234module_init(bictcp_register);
235module_exit(bictcp_unregister);
236
237MODULE_AUTHOR("Stephen Hemminger");
238MODULE_LICENSE("GPL");
239MODULE_DESCRIPTION("BIC TCP");