Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3/* WARNING: This implemenation is not necessarily the same
  4 * as the tcp_cubic.c.  The purpose is mainly for testing
  5 * the kernel BPF logic.
  6 *
  7 * Highlights:
  8 * 1. CONFIG_HZ .kconfig map is used.
  9 * 2. In bictcp_update(), calculation is changed to use usec
 10 *    resolution (i.e. USEC_PER_JIFFY) instead of using jiffies.
 11 *    Thus, usecs_to_jiffies() is not used in the bpf_cubic.c.
 12 * 3. In bitctcp_update() [under tcp_friendliness], the original
 13 *    "while (ca->ack_cnt > delta)" loop is changed to the equivalent
 14 *    "ca->ack_cnt / delta" operation.
 15 */
 16
 17#include <linux/bpf.h>
 
 
 18#include "bpf_tcp_helpers.h"
 19
 20char _license[] SEC("license") = "GPL";
 21
 22#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
 23
 24#define BICTCP_BETA_SCALE    1024	/* Scale factor beta calculation
 25					 * max_cwnd = snd_cwnd * beta
 26					 */
 27#define	BICTCP_HZ		10	/* BIC HZ 2^10 = 1024 */
 28
 29/* Two methods of hybrid slow start */
 30#define HYSTART_ACK_TRAIN	0x1
 31#define HYSTART_DELAY		0x2
 32
 33/* Number of delay samples for detecting the increase of delay */
 34#define HYSTART_MIN_SAMPLES	8
 35#define HYSTART_DELAY_MIN	(4000U)	/* 4ms */
 36#define HYSTART_DELAY_MAX	(16000U)	/* 16 ms */
 37#define HYSTART_DELAY_THRESH(x)	clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
 38
 39static int fast_convergence = 1;
 40static const int beta = 717;	/* = 717/1024 (BICTCP_BETA_SCALE) */
 41static int initial_ssthresh;
 42static const int bic_scale = 41;
 43static int tcp_friendliness = 1;
 44
 45static int hystart = 1;
 46static int hystart_detect = HYSTART_ACK_TRAIN | HYSTART_DELAY;
 47static int hystart_low_window = 16;
 48static int hystart_ack_delta_us = 2000;
 49
 50static const __u32 cube_rtt_scale = (bic_scale * 10);	/* 1024*c/rtt */
 51static const __u32 beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3
 52				/ (BICTCP_BETA_SCALE - beta);
 53/* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
 54 *  so K = cubic_root( (wmax-cwnd)*rtt/c )
 55 * the unit of K is bictcp_HZ=2^10, not HZ
 56 *
 57 *  c = bic_scale >> 10
 58 *  rtt = 100ms
 59 *
 60 * the following code has been designed and tested for
 61 * cwnd < 1 million packets
 62 * RTT < 100 seconds
 63 * HZ < 1,000,00  (corresponding to 10 nano-second)
 64 */
 65
 66/* 1/c * 2^2*bictcp_HZ * srtt, 2^40 */
 67static const __u64 cube_factor = (__u64)(1ull << (10+3*BICTCP_HZ))
 68				/ (bic_scale * 10);
 69
 70/* BIC TCP Parameters */
 71struct bictcp {
 72	__u32	cnt;		/* increase cwnd by 1 after ACKs */
 73	__u32	last_max_cwnd;	/* last maximum snd_cwnd */
 74	__u32	last_cwnd;	/* the last snd_cwnd */
 75	__u32	last_time;	/* time when updated last_cwnd */
 76	__u32	bic_origin_point;/* origin point of bic function */
 77	__u32	bic_K;		/* time to origin point
 78				   from the beginning of the current epoch */
 79	__u32	delay_min;	/* min delay (usec) */
 80	__u32	epoch_start;	/* beginning of an epoch */
 81	__u32	ack_cnt;	/* number of acks */
 82	__u32	tcp_cwnd;	/* estimated tcp cwnd */
 83	__u16	unused;
 84	__u8	sample_cnt;	/* number of samples to decide curr_rtt */
 85	__u8	found;		/* the exit point is found? */
 86	__u32	round_start;	/* beginning of each round */
 87	__u32	end_seq;	/* end_seq of the round */
 88	__u32	last_ack;	/* last time when the ACK spacing is close */
 89	__u32	curr_rtt;	/* the minimum rtt of current round */
 90};
 91
 92static inline void bictcp_reset(struct bictcp *ca)
 93{
 94	ca->cnt = 0;
 95	ca->last_max_cwnd = 0;
 96	ca->last_cwnd = 0;
 97	ca->last_time = 0;
 98	ca->bic_origin_point = 0;
 99	ca->bic_K = 0;
100	ca->delay_min = 0;
101	ca->epoch_start = 0;
102	ca->ack_cnt = 0;
103	ca->tcp_cwnd = 0;
104	ca->found = 0;
105}
106
107extern unsigned long CONFIG_HZ __kconfig;
108#define HZ CONFIG_HZ
109#define USEC_PER_MSEC	1000UL
110#define USEC_PER_SEC	1000000UL
111#define USEC_PER_JIFFY	(USEC_PER_SEC / HZ)
112
113static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor)
114{
115	return dividend / divisor;
116}
117
118#define div64_ul div64_u64
119
120#define BITS_PER_U64 (sizeof(__u64) * 8)
121static __always_inline int fls64(__u64 x)
122{
123	int num = BITS_PER_U64 - 1;
124
125	if (x == 0)
126		return 0;
127
128	if (!(x & (~0ull << (BITS_PER_U64-32)))) {
129		num -= 32;
130		x <<= 32;
131	}
132	if (!(x & (~0ull << (BITS_PER_U64-16)))) {
133		num -= 16;
134		x <<= 16;
135	}
136	if (!(x & (~0ull << (BITS_PER_U64-8)))) {
137		num -= 8;
138		x <<= 8;
139	}
140	if (!(x & (~0ull << (BITS_PER_U64-4)))) {
141		num -= 4;
142		x <<= 4;
143	}
144	if (!(x & (~0ull << (BITS_PER_U64-2)))) {
145		num -= 2;
146		x <<= 2;
147	}
148	if (!(x & (~0ull << (BITS_PER_U64-1))))
149		num -= 1;
150
151	return num + 1;
152}
153
154static __always_inline __u32 bictcp_clock_us(const struct sock *sk)
155{
156	return tcp_sk(sk)->tcp_mstamp;
157}
158
159static __always_inline void bictcp_hystart_reset(struct sock *sk)
160{
161	struct tcp_sock *tp = tcp_sk(sk);
162	struct bictcp *ca = inet_csk_ca(sk);
163
164	ca->round_start = ca->last_ack = bictcp_clock_us(sk);
165	ca->end_seq = tp->snd_nxt;
166	ca->curr_rtt = ~0U;
167	ca->sample_cnt = 0;
168}
169
170/* "struct_ops/" prefix is not a requirement
171 * It will be recognized as BPF_PROG_TYPE_STRUCT_OPS
172 * as long as it is used in one of the func ptr
173 * under SEC(".struct_ops").
174 */
175SEC("struct_ops/bictcp_init")
176void BPF_PROG(bictcp_init, struct sock *sk)
177{
178	struct bictcp *ca = inet_csk_ca(sk);
179
180	bictcp_reset(ca);
181
182	if (hystart)
183		bictcp_hystart_reset(sk);
184
185	if (!hystart && initial_ssthresh)
186		tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
187}
188
189/* No prefix in SEC will also work.
190 * The remaining tcp-cubic functions have an easier way.
191 */
192SEC("no-sec-prefix-bictcp_cwnd_event")
193void BPF_PROG(bictcp_cwnd_event, struct sock *sk, enum tcp_ca_event event)
194{
195	if (event == CA_EVENT_TX_START) {
196		struct bictcp *ca = inet_csk_ca(sk);
197		__u32 now = tcp_jiffies32;
198		__s32 delta;
199
200		delta = now - tcp_sk(sk)->lsndtime;
201
202		/* We were application limited (idle) for a while.
203		 * Shift epoch_start to keep cwnd growth to cubic curve.
204		 */
205		if (ca->epoch_start && delta > 0) {
206			ca->epoch_start += delta;
207			if (after(ca->epoch_start, now))
208				ca->epoch_start = now;
209		}
210		return;
211	}
212}
213
214/*
215 * cbrt(x) MSB values for x MSB values in [0..63].
216 * Precomputed then refined by hand - Willy Tarreau
217 *
218 * For x in [0..63],
219 *   v = cbrt(x << 18) - 1
220 *   cbrt(x) = (v[x] + 10) >> 6
221 */
222static const __u8 v[] = {
223	/* 0x00 */    0,   54,   54,   54,  118,  118,  118,  118,
224	/* 0x08 */  123,  129,  134,  138,  143,  147,  151,  156,
225	/* 0x10 */  157,  161,  164,  168,  170,  173,  176,  179,
226	/* 0x18 */  181,  185,  187,  190,  192,  194,  197,  199,
227	/* 0x20 */  200,  202,  204,  206,  209,  211,  213,  215,
228	/* 0x28 */  217,  219,  221,  222,  224,  225,  227,  229,
229	/* 0x30 */  231,  232,  234,  236,  237,  239,  240,  242,
230	/* 0x38 */  244,  245,  246,  248,  250,  251,  252,  254,
231};
232
233/* calculate the cubic root of x using a table lookup followed by one
234 * Newton-Raphson iteration.
235 * Avg err ~= 0.195%
236 */
237static __always_inline __u32 cubic_root(__u64 a)
238{
239	__u32 x, b, shift;
240
241	if (a < 64) {
242		/* a in [0..63] */
243		return ((__u32)v[(__u32)a] + 35) >> 6;
244	}
245
246	b = fls64(a);
247	b = ((b * 84) >> 8) - 1;
248	shift = (a >> (b * 3));
249
250	/* it is needed for verifier's bound check on v */
251	if (shift >= 64)
252		return 0;
253
254	x = ((__u32)(((__u32)v[shift] + 10) << b)) >> 6;
255
256	/*
257	 * Newton-Raphson iteration
258	 *                         2
259	 * x    = ( 2 * x  +  a / x  ) / 3
260	 *  k+1          k         k
261	 */
262	x = (2 * x + (__u32)div64_u64(a, (__u64)x * (__u64)(x - 1)));
263	x = ((x * 341) >> 10);
264	return x;
265}
266
267/*
268 * Compute congestion window to use.
269 */
270static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd,
271					  __u32 acked)
272{
273	__u32 delta, bic_target, max_cnt;
274	__u64 offs, t;
275
276	ca->ack_cnt += acked;	/* count the number of ACKed packets */
277
278	if (ca->last_cwnd == cwnd &&
279	    (__s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
280		return;
281
282	/* The CUBIC function can update ca->cnt at most once per jiffy.
283	 * On all cwnd reduction events, ca->epoch_start is set to 0,
284	 * which will force a recalculation of ca->cnt.
285	 */
286	if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
287		goto tcp_friendliness;
288
289	ca->last_cwnd = cwnd;
290	ca->last_time = tcp_jiffies32;
291
292	if (ca->epoch_start == 0) {
293		ca->epoch_start = tcp_jiffies32;	/* record beginning */
294		ca->ack_cnt = acked;			/* start counting */
295		ca->tcp_cwnd = cwnd;			/* syn with cubic */
296
297		if (ca->last_max_cwnd <= cwnd) {
298			ca->bic_K = 0;
299			ca->bic_origin_point = cwnd;
300		} else {
301			/* Compute new K based on
302			 * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ)
303			 */
304			ca->bic_K = cubic_root(cube_factor
305					       * (ca->last_max_cwnd - cwnd));
306			ca->bic_origin_point = ca->last_max_cwnd;
307		}
308	}
309
310	/* cubic function - calc*/
311	/* calculate c * time^3 / rtt,
312	 *  while considering overflow in calculation of time^3
313	 * (so time^3 is done by using 64 bit)
314	 * and without the support of division of 64bit numbers
315	 * (so all divisions are done by using 32 bit)
316	 *  also NOTE the unit of those veriables
317	 *	  time  = (t - K) / 2^bictcp_HZ
318	 *	  c = bic_scale >> 10
319	 * rtt  = (srtt >> 3) / HZ
320	 * !!! The following code does not have overflow problems,
321	 * if the cwnd < 1 million packets !!!
322	 */
323
324	t = (__s32)(tcp_jiffies32 - ca->epoch_start) * USEC_PER_JIFFY;
325	t += ca->delay_min;
326	/* change the unit from usec to bictcp_HZ */
327	t <<= BICTCP_HZ;
328	t /= USEC_PER_SEC;
329
330	if (t < ca->bic_K)		/* t - K */
331		offs = ca->bic_K - t;
332	else
333		offs = t - ca->bic_K;
334
335	/* c/rtt * (t-K)^3 */
336	delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
337	if (t < ca->bic_K)                            /* below origin*/
338		bic_target = ca->bic_origin_point - delta;
339	else                                          /* above origin*/
340		bic_target = ca->bic_origin_point + delta;
341
342	/* cubic function - calc bictcp_cnt*/
343	if (bic_target > cwnd) {
344		ca->cnt = cwnd / (bic_target - cwnd);
345	} else {
346		ca->cnt = 100 * cwnd;              /* very small increment*/
347	}
348
349	/*
350	 * The initial growth of cubic function may be too conservative
351	 * when the available bandwidth is still unknown.
352	 */
353	if (ca->last_max_cwnd == 0 && ca->cnt > 20)
354		ca->cnt = 20;	/* increase cwnd 5% per RTT */
355
356tcp_friendliness:
357	/* TCP Friendly */
358	if (tcp_friendliness) {
359		__u32 scale = beta_scale;
360		__u32 n;
361
362		/* update tcp cwnd */
363		delta = (cwnd * scale) >> 3;
364		if (ca->ack_cnt > delta && delta) {
365			n = ca->ack_cnt / delta;
366			ca->ack_cnt -= n * delta;
367			ca->tcp_cwnd += n;
368		}
369
370		if (ca->tcp_cwnd > cwnd) {	/* if bic is slower than tcp */
371			delta = ca->tcp_cwnd - cwnd;
372			max_cnt = cwnd / delta;
373			if (ca->cnt > max_cnt)
374				ca->cnt = max_cnt;
375		}
376	}
377
378	/* The maximum rate of cwnd increase CUBIC allows is 1 packet per
379	 * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
380	 */
381	ca->cnt = max(ca->cnt, 2U);
382}
383
384/* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */
385void BPF_STRUCT_OPS(bictcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
386{
387	struct tcp_sock *tp = tcp_sk(sk);
388	struct bictcp *ca = inet_csk_ca(sk);
389
390	if (!tcp_is_cwnd_limited(sk))
391		return;
392
393	if (tcp_in_slow_start(tp)) {
394		if (hystart && after(ack, ca->end_seq))
395			bictcp_hystart_reset(sk);
396		acked = tcp_slow_start(tp, acked);
397		if (!acked)
398			return;
399	}
400	bictcp_update(ca, tp->snd_cwnd, acked);
401	tcp_cong_avoid_ai(tp, ca->cnt, acked);
402}
403
404__u32 BPF_STRUCT_OPS(bictcp_recalc_ssthresh, struct sock *sk)
405{
406	const struct tcp_sock *tp = tcp_sk(sk);
407	struct bictcp *ca = inet_csk_ca(sk);
408
409	ca->epoch_start = 0;	/* end of epoch */
410
411	/* Wmax and fast convergence */
412	if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
413		ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
414			/ (2 * BICTCP_BETA_SCALE);
415	else
416		ca->last_max_cwnd = tp->snd_cwnd;
417
418	return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
419}
420
421void BPF_STRUCT_OPS(bictcp_state, struct sock *sk, __u8 new_state)
422{
423	if (new_state == TCP_CA_Loss) {
424		bictcp_reset(inet_csk_ca(sk));
425		bictcp_hystart_reset(sk);
426	}
427}
428
429#define GSO_MAX_SIZE		65536
430
431/* Account for TSO/GRO delays.
432 * Otherwise short RTT flows could get too small ssthresh, since during
433 * slow start we begin with small TSO packets and ca->delay_min would
434 * not account for long aggregation delay when TSO packets get bigger.
435 * Ideally even with a very small RTT we would like to have at least one
436 * TSO packet being sent and received by GRO, and another one in qdisc layer.
437 * We apply another 100% factor because @rate is doubled at this point.
438 * We cap the cushion to 1ms.
439 */
440static __always_inline __u32 hystart_ack_delay(struct sock *sk)
441{
442	unsigned long rate;
443
444	rate = sk->sk_pacing_rate;
445	if (!rate)
446		return 0;
447	return min((__u64)USEC_PER_MSEC,
448		   div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
449}
450
451static __always_inline void hystart_update(struct sock *sk, __u32 delay)
452{
453	struct tcp_sock *tp = tcp_sk(sk);
454	struct bictcp *ca = inet_csk_ca(sk);
455	__u32 threshold;
456
457	if (hystart_detect & HYSTART_ACK_TRAIN) {
458		__u32 now = bictcp_clock_us(sk);
459
460		/* first detection parameter - ack-train detection */
461		if ((__s32)(now - ca->last_ack) <= hystart_ack_delta_us) {
462			ca->last_ack = now;
463
464			threshold = ca->delay_min + hystart_ack_delay(sk);
465
466			/* Hystart ack train triggers if we get ack past
467			 * ca->delay_min/2.
468			 * Pacing might have delayed packets up to RTT/2
469			 * during slow start.
470			 */
471			if (sk->sk_pacing_status == SK_PACING_NONE)
472				threshold >>= 1;
473
474			if ((__s32)(now - ca->round_start) > threshold) {
475				ca->found = 1;
476				tp->snd_ssthresh = tp->snd_cwnd;
477			}
478		}
479	}
480
481	if (hystart_detect & HYSTART_DELAY) {
482		/* obtain the minimum delay of more than sampling packets */
483		if (ca->curr_rtt > delay)
484			ca->curr_rtt = delay;
485		if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
486			ca->sample_cnt++;
487		} else {
488			if (ca->curr_rtt > ca->delay_min +
489			    HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
490				ca->found = 1;
491				tp->snd_ssthresh = tp->snd_cwnd;
492			}
493		}
494	}
495}
496
497void BPF_STRUCT_OPS(bictcp_acked, struct sock *sk,
498		    const struct ack_sample *sample)
499{
500	const struct tcp_sock *tp = tcp_sk(sk);
501	struct bictcp *ca = inet_csk_ca(sk);
502	__u32 delay;
503
504	/* Some calls are for duplicates without timetamps */
505	if (sample->rtt_us < 0)
506		return;
507
508	/* Discard delay samples right after fast recovery */
509	if (ca->epoch_start && (__s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
510		return;
511
512	delay = sample->rtt_us;
513	if (delay == 0)
514		delay = 1;
515
516	/* first time call or link delay decreases */
517	if (ca->delay_min == 0 || ca->delay_min > delay)
518		ca->delay_min = delay;
519
520	/* hystart triggers when cwnd is larger than some threshold */
521	if (!ca->found && tcp_in_slow_start(tp) && hystart &&
522	    tp->snd_cwnd >= hystart_low_window)
523		hystart_update(sk, delay);
524}
525
526__u32 BPF_STRUCT_OPS(tcp_reno_undo_cwnd, struct sock *sk)
527{
528	const struct tcp_sock *tp = tcp_sk(sk);
529
530	return max(tp->snd_cwnd, tp->prior_cwnd);
 
 
531}
532
533SEC(".struct_ops")
534struct tcp_congestion_ops cubic = {
535	.init		= (void *)bictcp_init,
536	.ssthresh	= (void *)bictcp_recalc_ssthresh,
537	.cong_avoid	= (void *)bictcp_cong_avoid,
538	.set_state	= (void *)bictcp_state,
539	.undo_cwnd	= (void *)tcp_reno_undo_cwnd,
540	.cwnd_event	= (void *)bictcp_cwnd_event,
541	.pkts_acked     = (void *)bictcp_acked,
542	.name		= "bpf_cubic",
543};
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3/* WARNING: This implemenation is not necessarily the same
  4 * as the tcp_cubic.c.  The purpose is mainly for testing
  5 * the kernel BPF logic.
  6 *
  7 * Highlights:
  8 * 1. CONFIG_HZ .kconfig map is used.
  9 * 2. In bictcp_update(), calculation is changed to use usec
 10 *    resolution (i.e. USEC_PER_JIFFY) instead of using jiffies.
 11 *    Thus, usecs_to_jiffies() is not used in the bpf_cubic.c.
 12 * 3. In bitctcp_update() [under tcp_friendliness], the original
 13 *    "while (ca->ack_cnt > delta)" loop is changed to the equivalent
 14 *    "ca->ack_cnt / delta" operation.
 15 */
 16
 17#include <linux/bpf.h>
 18#include <linux/stddef.h>
 19#include <linux/tcp.h>
 20#include "bpf_tcp_helpers.h"
 21
 22char _license[] SEC("license") = "GPL";
 23
 24#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
 25
 26#define BICTCP_BETA_SCALE    1024	/* Scale factor beta calculation
 27					 * max_cwnd = snd_cwnd * beta
 28					 */
 29#define	BICTCP_HZ		10	/* BIC HZ 2^10 = 1024 */
 30
 31/* Two methods of hybrid slow start */
 32#define HYSTART_ACK_TRAIN	0x1
 33#define HYSTART_DELAY		0x2
 34
 35/* Number of delay samples for detecting the increase of delay */
 36#define HYSTART_MIN_SAMPLES	8
 37#define HYSTART_DELAY_MIN	(4000U)	/* 4ms */
 38#define HYSTART_DELAY_MAX	(16000U)	/* 16 ms */
 39#define HYSTART_DELAY_THRESH(x)	clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
 40
 41static int fast_convergence = 1;
 42static const int beta = 717;	/* = 717/1024 (BICTCP_BETA_SCALE) */
 43static int initial_ssthresh;
 44static const int bic_scale = 41;
 45static int tcp_friendliness = 1;
 46
 47static int hystart = 1;
 48static int hystart_detect = HYSTART_ACK_TRAIN | HYSTART_DELAY;
 49static int hystart_low_window = 16;
 50static int hystart_ack_delta_us = 2000;
 51
 52static const __u32 cube_rtt_scale = (bic_scale * 10);	/* 1024*c/rtt */
 53static const __u32 beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3
 54				/ (BICTCP_BETA_SCALE - beta);
 55/* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
 56 *  so K = cubic_root( (wmax-cwnd)*rtt/c )
 57 * the unit of K is bictcp_HZ=2^10, not HZ
 58 *
 59 *  c = bic_scale >> 10
 60 *  rtt = 100ms
 61 *
 62 * the following code has been designed and tested for
 63 * cwnd < 1 million packets
 64 * RTT < 100 seconds
 65 * HZ < 1,000,00  (corresponding to 10 nano-second)
 66 */
 67
 68/* 1/c * 2^2*bictcp_HZ * srtt, 2^40 */
 69static const __u64 cube_factor = (__u64)(1ull << (10+3*BICTCP_HZ))
 70				/ (bic_scale * 10);
 71
 72/* BIC TCP Parameters */
 73struct bictcp {
 74	__u32	cnt;		/* increase cwnd by 1 after ACKs */
 75	__u32	last_max_cwnd;	/* last maximum snd_cwnd */
 76	__u32	last_cwnd;	/* the last snd_cwnd */
 77	__u32	last_time;	/* time when updated last_cwnd */
 78	__u32	bic_origin_point;/* origin point of bic function */
 79	__u32	bic_K;		/* time to origin point
 80				   from the beginning of the current epoch */
 81	__u32	delay_min;	/* min delay (usec) */
 82	__u32	epoch_start;	/* beginning of an epoch */
 83	__u32	ack_cnt;	/* number of acks */
 84	__u32	tcp_cwnd;	/* estimated tcp cwnd */
 85	__u16	unused;
 86	__u8	sample_cnt;	/* number of samples to decide curr_rtt */
 87	__u8	found;		/* the exit point is found? */
 88	__u32	round_start;	/* beginning of each round */
 89	__u32	end_seq;	/* end_seq of the round */
 90	__u32	last_ack;	/* last time when the ACK spacing is close */
 91	__u32	curr_rtt;	/* the minimum rtt of current round */
 92};
 93
 94static inline void bictcp_reset(struct bictcp *ca)
 95{
 96	ca->cnt = 0;
 97	ca->last_max_cwnd = 0;
 98	ca->last_cwnd = 0;
 99	ca->last_time = 0;
100	ca->bic_origin_point = 0;
101	ca->bic_K = 0;
102	ca->delay_min = 0;
103	ca->epoch_start = 0;
104	ca->ack_cnt = 0;
105	ca->tcp_cwnd = 0;
106	ca->found = 0;
107}
108
109extern unsigned long CONFIG_HZ __kconfig;
110#define HZ CONFIG_HZ
111#define USEC_PER_MSEC	1000UL
112#define USEC_PER_SEC	1000000UL
113#define USEC_PER_JIFFY	(USEC_PER_SEC / HZ)
114
115static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor)
116{
117	return dividend / divisor;
118}
119
120#define div64_ul div64_u64
121
122#define BITS_PER_U64 (sizeof(__u64) * 8)
123static __always_inline int fls64(__u64 x)
124{
125	int num = BITS_PER_U64 - 1;
126
127	if (x == 0)
128		return 0;
129
130	if (!(x & (~0ull << (BITS_PER_U64-32)))) {
131		num -= 32;
132		x <<= 32;
133	}
134	if (!(x & (~0ull << (BITS_PER_U64-16)))) {
135		num -= 16;
136		x <<= 16;
137	}
138	if (!(x & (~0ull << (BITS_PER_U64-8)))) {
139		num -= 8;
140		x <<= 8;
141	}
142	if (!(x & (~0ull << (BITS_PER_U64-4)))) {
143		num -= 4;
144		x <<= 4;
145	}
146	if (!(x & (~0ull << (BITS_PER_U64-2)))) {
147		num -= 2;
148		x <<= 2;
149	}
150	if (!(x & (~0ull << (BITS_PER_U64-1))))
151		num -= 1;
152
153	return num + 1;
154}
155
156static __always_inline __u32 bictcp_clock_us(const struct sock *sk)
157{
158	return tcp_sk(sk)->tcp_mstamp;
159}
160
161static __always_inline void bictcp_hystart_reset(struct sock *sk)
162{
163	struct tcp_sock *tp = tcp_sk(sk);
164	struct bictcp *ca = inet_csk_ca(sk);
165
166	ca->round_start = ca->last_ack = bictcp_clock_us(sk);
167	ca->end_seq = tp->snd_nxt;
168	ca->curr_rtt = ~0U;
169	ca->sample_cnt = 0;
170}
171
172/* "struct_ops/" prefix is a requirement */
173SEC("struct_ops/bpf_cubic_init")
174void BPF_PROG(bpf_cubic_init, struct sock *sk)
 
 
 
 
175{
176	struct bictcp *ca = inet_csk_ca(sk);
177
178	bictcp_reset(ca);
179
180	if (hystart)
181		bictcp_hystart_reset(sk);
182
183	if (!hystart && initial_ssthresh)
184		tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
185}
186
187/* "struct_ops" prefix is a requirement */
188SEC("struct_ops/bpf_cubic_cwnd_event")
189void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
 
 
190{
191	if (event == CA_EVENT_TX_START) {
192		struct bictcp *ca = inet_csk_ca(sk);
193		__u32 now = tcp_jiffies32;
194		__s32 delta;
195
196		delta = now - tcp_sk(sk)->lsndtime;
197
198		/* We were application limited (idle) for a while.
199		 * Shift epoch_start to keep cwnd growth to cubic curve.
200		 */
201		if (ca->epoch_start && delta > 0) {
202			ca->epoch_start += delta;
203			if (after(ca->epoch_start, now))
204				ca->epoch_start = now;
205		}
206		return;
207	}
208}
209
210/*
211 * cbrt(x) MSB values for x MSB values in [0..63].
212 * Precomputed then refined by hand - Willy Tarreau
213 *
214 * For x in [0..63],
215 *   v = cbrt(x << 18) - 1
216 *   cbrt(x) = (v[x] + 10) >> 6
217 */
218static const __u8 v[] = {
219	/* 0x00 */    0,   54,   54,   54,  118,  118,  118,  118,
220	/* 0x08 */  123,  129,  134,  138,  143,  147,  151,  156,
221	/* 0x10 */  157,  161,  164,  168,  170,  173,  176,  179,
222	/* 0x18 */  181,  185,  187,  190,  192,  194,  197,  199,
223	/* 0x20 */  200,  202,  204,  206,  209,  211,  213,  215,
224	/* 0x28 */  217,  219,  221,  222,  224,  225,  227,  229,
225	/* 0x30 */  231,  232,  234,  236,  237,  239,  240,  242,
226	/* 0x38 */  244,  245,  246,  248,  250,  251,  252,  254,
227};
228
229/* calculate the cubic root of x using a table lookup followed by one
230 * Newton-Raphson iteration.
231 * Avg err ~= 0.195%
232 */
233static __always_inline __u32 cubic_root(__u64 a)
234{
235	__u32 x, b, shift;
236
237	if (a < 64) {
238		/* a in [0..63] */
239		return ((__u32)v[(__u32)a] + 35) >> 6;
240	}
241
242	b = fls64(a);
243	b = ((b * 84) >> 8) - 1;
244	shift = (a >> (b * 3));
245
246	/* it is needed for verifier's bound check on v */
247	if (shift >= 64)
248		return 0;
249
250	x = ((__u32)(((__u32)v[shift] + 10) << b)) >> 6;
251
252	/*
253	 * Newton-Raphson iteration
254	 *                         2
255	 * x    = ( 2 * x  +  a / x  ) / 3
256	 *  k+1          k         k
257	 */
258	x = (2 * x + (__u32)div64_u64(a, (__u64)x * (__u64)(x - 1)));
259	x = ((x * 341) >> 10);
260	return x;
261}
262
263/*
264 * Compute congestion window to use.
265 */
266static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd,
267					  __u32 acked)
268{
269	__u32 delta, bic_target, max_cnt;
270	__u64 offs, t;
271
272	ca->ack_cnt += acked;	/* count the number of ACKed packets */
273
274	if (ca->last_cwnd == cwnd &&
275	    (__s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
276		return;
277
278	/* The CUBIC function can update ca->cnt at most once per jiffy.
279	 * On all cwnd reduction events, ca->epoch_start is set to 0,
280	 * which will force a recalculation of ca->cnt.
281	 */
282	if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
283		goto tcp_friendliness;
284
285	ca->last_cwnd = cwnd;
286	ca->last_time = tcp_jiffies32;
287
288	if (ca->epoch_start == 0) {
289		ca->epoch_start = tcp_jiffies32;	/* record beginning */
290		ca->ack_cnt = acked;			/* start counting */
291		ca->tcp_cwnd = cwnd;			/* syn with cubic */
292
293		if (ca->last_max_cwnd <= cwnd) {
294			ca->bic_K = 0;
295			ca->bic_origin_point = cwnd;
296		} else {
297			/* Compute new K based on
298			 * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ)
299			 */
300			ca->bic_K = cubic_root(cube_factor
301					       * (ca->last_max_cwnd - cwnd));
302			ca->bic_origin_point = ca->last_max_cwnd;
303		}
304	}
305
306	/* cubic function - calc*/
307	/* calculate c * time^3 / rtt,
308	 *  while considering overflow in calculation of time^3
309	 * (so time^3 is done by using 64 bit)
310	 * and without the support of division of 64bit numbers
311	 * (so all divisions are done by using 32 bit)
312	 *  also NOTE the unit of those veriables
313	 *	  time  = (t - K) / 2^bictcp_HZ
314	 *	  c = bic_scale >> 10
315	 * rtt  = (srtt >> 3) / HZ
316	 * !!! The following code does not have overflow problems,
317	 * if the cwnd < 1 million packets !!!
318	 */
319
320	t = (__s32)(tcp_jiffies32 - ca->epoch_start) * USEC_PER_JIFFY;
321	t += ca->delay_min;
322	/* change the unit from usec to bictcp_HZ */
323	t <<= BICTCP_HZ;
324	t /= USEC_PER_SEC;
325
326	if (t < ca->bic_K)		/* t - K */
327		offs = ca->bic_K - t;
328	else
329		offs = t - ca->bic_K;
330
331	/* c/rtt * (t-K)^3 */
332	delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
333	if (t < ca->bic_K)                            /* below origin*/
334		bic_target = ca->bic_origin_point - delta;
335	else                                          /* above origin*/
336		bic_target = ca->bic_origin_point + delta;
337
338	/* cubic function - calc bictcp_cnt*/
339	if (bic_target > cwnd) {
340		ca->cnt = cwnd / (bic_target - cwnd);
341	} else {
342		ca->cnt = 100 * cwnd;              /* very small increment*/
343	}
344
345	/*
346	 * The initial growth of cubic function may be too conservative
347	 * when the available bandwidth is still unknown.
348	 */
349	if (ca->last_max_cwnd == 0 && ca->cnt > 20)
350		ca->cnt = 20;	/* increase cwnd 5% per RTT */
351
352tcp_friendliness:
353	/* TCP Friendly */
354	if (tcp_friendliness) {
355		__u32 scale = beta_scale;
356		__u32 n;
357
358		/* update tcp cwnd */
359		delta = (cwnd * scale) >> 3;
360		if (ca->ack_cnt > delta && delta) {
361			n = ca->ack_cnt / delta;
362			ca->ack_cnt -= n * delta;
363			ca->tcp_cwnd += n;
364		}
365
366		if (ca->tcp_cwnd > cwnd) {	/* if bic is slower than tcp */
367			delta = ca->tcp_cwnd - cwnd;
368			max_cnt = cwnd / delta;
369			if (ca->cnt > max_cnt)
370				ca->cnt = max_cnt;
371		}
372	}
373
374	/* The maximum rate of cwnd increase CUBIC allows is 1 packet per
375	 * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
376	 */
377	ca->cnt = max(ca->cnt, 2U);
378}
379
380/* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */
381void BPF_STRUCT_OPS(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
382{
383	struct tcp_sock *tp = tcp_sk(sk);
384	struct bictcp *ca = inet_csk_ca(sk);
385
386	if (!tcp_is_cwnd_limited(sk))
387		return;
388
389	if (tcp_in_slow_start(tp)) {
390		if (hystart && after(ack, ca->end_seq))
391			bictcp_hystart_reset(sk);
392		acked = tcp_slow_start(tp, acked);
393		if (!acked)
394			return;
395	}
396	bictcp_update(ca, tp->snd_cwnd, acked);
397	tcp_cong_avoid_ai(tp, ca->cnt, acked);
398}
399
400__u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk)
401{
402	const struct tcp_sock *tp = tcp_sk(sk);
403	struct bictcp *ca = inet_csk_ca(sk);
404
405	ca->epoch_start = 0;	/* end of epoch */
406
407	/* Wmax and fast convergence */
408	if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
409		ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
410			/ (2 * BICTCP_BETA_SCALE);
411	else
412		ca->last_max_cwnd = tp->snd_cwnd;
413
414	return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
415}
416
417void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state)
418{
419	if (new_state == TCP_CA_Loss) {
420		bictcp_reset(inet_csk_ca(sk));
421		bictcp_hystart_reset(sk);
422	}
423}
424
425#define GSO_MAX_SIZE		65536
426
427/* Account for TSO/GRO delays.
428 * Otherwise short RTT flows could get too small ssthresh, since during
429 * slow start we begin with small TSO packets and ca->delay_min would
430 * not account for long aggregation delay when TSO packets get bigger.
431 * Ideally even with a very small RTT we would like to have at least one
432 * TSO packet being sent and received by GRO, and another one in qdisc layer.
433 * We apply another 100% factor because @rate is doubled at this point.
434 * We cap the cushion to 1ms.
435 */
436static __always_inline __u32 hystart_ack_delay(struct sock *sk)
437{
438	unsigned long rate;
439
440	rate = sk->sk_pacing_rate;
441	if (!rate)
442		return 0;
443	return min((__u64)USEC_PER_MSEC,
444		   div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
445}
446
447static __always_inline void hystart_update(struct sock *sk, __u32 delay)
448{
449	struct tcp_sock *tp = tcp_sk(sk);
450	struct bictcp *ca = inet_csk_ca(sk);
451	__u32 threshold;
452
453	if (hystart_detect & HYSTART_ACK_TRAIN) {
454		__u32 now = bictcp_clock_us(sk);
455
456		/* first detection parameter - ack-train detection */
457		if ((__s32)(now - ca->last_ack) <= hystart_ack_delta_us) {
458			ca->last_ack = now;
459
460			threshold = ca->delay_min + hystart_ack_delay(sk);
461
462			/* Hystart ack train triggers if we get ack past
463			 * ca->delay_min/2.
464			 * Pacing might have delayed packets up to RTT/2
465			 * during slow start.
466			 */
467			if (sk->sk_pacing_status == SK_PACING_NONE)
468				threshold >>= 1;
469
470			if ((__s32)(now - ca->round_start) > threshold) {
471				ca->found = 1;
472				tp->snd_ssthresh = tp->snd_cwnd;
473			}
474		}
475	}
476
477	if (hystart_detect & HYSTART_DELAY) {
478		/* obtain the minimum delay of more than sampling packets */
479		if (ca->curr_rtt > delay)
480			ca->curr_rtt = delay;
481		if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
482			ca->sample_cnt++;
483		} else {
484			if (ca->curr_rtt > ca->delay_min +
485			    HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
486				ca->found = 1;
487				tp->snd_ssthresh = tp->snd_cwnd;
488			}
489		}
490	}
491}
492
493void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
494		    const struct ack_sample *sample)
495{
496	const struct tcp_sock *tp = tcp_sk(sk);
497	struct bictcp *ca = inet_csk_ca(sk);
498	__u32 delay;
499
500	/* Some calls are for duplicates without timetamps */
501	if (sample->rtt_us < 0)
502		return;
503
504	/* Discard delay samples right after fast recovery */
505	if (ca->epoch_start && (__s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
506		return;
507
508	delay = sample->rtt_us;
509	if (delay == 0)
510		delay = 1;
511
512	/* first time call or link delay decreases */
513	if (ca->delay_min == 0 || ca->delay_min > delay)
514		ca->delay_min = delay;
515
516	/* hystart triggers when cwnd is larger than some threshold */
517	if (!ca->found && tcp_in_slow_start(tp) && hystart &&
518	    tp->snd_cwnd >= hystart_low_window)
519		hystart_update(sk, delay);
520}
521
522extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
 
 
523
524__u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk)
525{
526	return tcp_reno_undo_cwnd(sk);
527}
528
529SEC(".struct_ops")
530struct tcp_congestion_ops cubic = {
531	.init		= (void *)bpf_cubic_init,
532	.ssthresh	= (void *)bpf_cubic_recalc_ssthresh,
533	.cong_avoid	= (void *)bpf_cubic_cong_avoid,
534	.set_state	= (void *)bpf_cubic_state,
535	.undo_cwnd	= (void *)bpf_cubic_undo_cwnd,
536	.cwnd_event	= (void *)bpf_cubic_cwnd_event,
537	.pkts_acked     = (void *)bpf_cubic_acked,
538	.name		= "bpf_cubic",
539};