Linux Audio

Check our new training course

Loading...
  1/* Bottleneck Bandwidth and RTT (BBR) congestion control
  2 *
  3 * BBR congestion control computes the sending rate based on the delivery
  4 * rate (throughput) estimated from ACKs. In a nutshell:
  5 *
  6 *   On each ACK, update our model of the network path:
  7 *      bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips)
  8 *      min_rtt = windowed_min(rtt, 10 seconds)
  9 *   pacing_rate = pacing_gain * bottleneck_bandwidth
 10 *   cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4)
 11 *
 12 * The core algorithm does not react directly to packet losses or delays,
 13 * although BBR may adjust the size of next send per ACK when loss is
 14 * observed, or adjust the sending rate if it estimates there is a
 15 * traffic policer, in order to keep the drop rate reasonable.
 16 *
 17 * Here is a state transition diagram for BBR:
 18 *
 19 *             |
 20 *             V
 21 *    +---> STARTUP  ----+
 22 *    |        |         |
 23 *    |        V         |
 24 *    |      DRAIN   ----+
 25 *    |        |         |
 26 *    |        V         |
 27 *    +---> PROBE_BW ----+
 28 *    |      ^    |      |
 29 *    |      |    |      |
 30 *    |      +----+      |
 31 *    |                  |
 32 *    +---- PROBE_RTT <--+
 33 *
 34 * A BBR flow starts in STARTUP, and ramps up its sending rate quickly.
 35 * When it estimates the pipe is full, it enters DRAIN to drain the queue.
 36 * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT.
 37 * A long-lived BBR flow spends the vast majority of its time remaining
 38 * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth
 39 * in a fair manner, with a small, bounded queue. *If* a flow has been
 40 * continuously sending for the entire min_rtt window, and hasn't seen an RTT
 41 * sample that matches or decreases its min_rtt estimate for 10 seconds, then
 42 * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
 43 * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if
 44 * we estimated that we reached the full bw of the pipe then we enter PROBE_BW;
 45 * otherwise we enter STARTUP to try to fill the pipe.
 46 *
 47 * BBR is described in detail in:
 48 *   "BBR: Congestion-Based Congestion Control",
 49 *   Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
 50 *   Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
 51 *
 52 * There is a public e-mail list for discussing BBR development and testing:
 53 *   https://groups.google.com/forum/#!forum/bbr-dev
 54 *
 55 * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
 56 * otherwise TCP stack falls back to an internal pacing using one high
 57 * resolution timer per TCP socket and may use more resources.
 58 */
 59#include <linux/module.h>
 60#include <net/tcp.h>
 61#include <linux/inet_diag.h>
 62#include <linux/inet.h>
 63#include <linux/random.h>
 64#include <linux/win_minmax.h>
 65
 66/* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
 67 * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
 68 * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
 69 * Since the minimum window is >=4 packets, the lower bound isn't
 70 * an issue. The upper bound isn't an issue with existing technologies.
 71 */
 72#define BW_SCALE 24
 73#define BW_UNIT (1 << BW_SCALE)
 74
 75#define BBR_SCALE 8	/* scaling factor for fractions in BBR (e.g. gains) */
 76#define BBR_UNIT (1 << BBR_SCALE)
 77
 78/* BBR has the following modes for deciding how fast to send: */
 79enum bbr_mode {
 80	BBR_STARTUP,	/* ramp up sending rate rapidly to fill pipe */
 81	BBR_DRAIN,	/* drain any queue created during startup */
 82	BBR_PROBE_BW,	/* discover, share bw: pace around estimated bw */
 83	BBR_PROBE_RTT,	/* cut inflight to min to probe min_rtt */
 84};
 85
 86/* BBR congestion control block */
 87struct bbr {
 88	u32	min_rtt_us;	        /* min RTT in min_rtt_win_sec window */
 89	u32	min_rtt_stamp;	        /* timestamp of min_rtt_us */
 90	u32	probe_rtt_done_stamp;   /* end time for BBR_PROBE_RTT mode */
 91	struct minmax bw;	/* Max recent delivery rate in pkts/uS << 24 */
 92	u32	rtt_cnt;	    /* count of packet-timed rounds elapsed */
 93	u32     next_rtt_delivered; /* scb->tx.delivered at end of round */
 94	u64	cycle_mstamp;	     /* time of this cycle phase start */
 95	u32     mode:3,		     /* current bbr_mode in state machine */
 96		prev_ca_state:3,     /* CA state on previous ACK */
 97		packet_conservation:1,  /* use packet conservation? */
 98		restore_cwnd:1,	     /* decided to revert cwnd to old value */
 99		round_start:1,	     /* start of packet-timed tx->ack round? */
100		idle_restart:1,	     /* restarting after idle? */
101		probe_rtt_round_done:1,  /* a BBR_PROBE_RTT round at 4 pkts? */
102		unused:12,
103		lt_is_sampling:1,    /* taking long-term ("LT") samples now? */
104		lt_rtt_cnt:7,	     /* round trips in long-term interval */
105		lt_use_bw:1;	     /* use lt_bw as our bw estimate? */
106	u32	lt_bw;		     /* LT est delivery rate in pkts/uS << 24 */
107	u32	lt_last_delivered;   /* LT intvl start: tp->delivered */
108	u32	lt_last_stamp;	     /* LT intvl start: tp->delivered_mstamp */
109	u32	lt_last_lost;	     /* LT intvl start: tp->lost */
110	u32	pacing_gain:10,	/* current gain for setting pacing rate */
111		cwnd_gain:10,	/* current gain for setting cwnd */
112		full_bw_reached:1,   /* reached full bw in Startup? */
113		full_bw_cnt:2,	/* number of rounds without large bw gains */
114		cycle_idx:3,	/* current index in pacing_gain cycle array */
115		has_seen_rtt:1, /* have we seen an RTT sample yet? */
116		unused_b:5;
117	u32	prior_cwnd;	/* prior cwnd upon entering loss recovery */
118	u32	full_bw;	/* recent bw, to estimate if pipe is full */
 
 
 
 
 
 
 
 
119};
120
121#define CYCLE_LEN	8	/* number of phases in a pacing gain cycle */
122
123/* Window length of bw filter (in rounds): */
124static const int bbr_bw_rtts = CYCLE_LEN + 2;
125/* Window length of min_rtt filter (in sec): */
126static const u32 bbr_min_rtt_win_sec = 10;
127/* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */
128static const u32 bbr_probe_rtt_mode_ms = 200;
129/* Skip TSO below the following bandwidth (bits/sec): */
130static const int bbr_min_tso_rate = 1200000;
131
 
 
 
 
 
 
 
 
132/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
133 * that will allow a smoothly increasing pacing rate that will double each RTT
134 * and send the same number of packets per RTT that an un-paced, slow-starting
135 * Reno or CUBIC flow would:
136 */
137static const int bbr_high_gain  = BBR_UNIT * 2885 / 1000 + 1;
138/* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
139 * the queue created in BBR_STARTUP in a single round:
140 */
141static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
142/* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */
143static const int bbr_cwnd_gain  = BBR_UNIT * 2;
144/* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */
145static const int bbr_pacing_gain[] = {
146	BBR_UNIT * 5 / 4,	/* probe for more available bw */
147	BBR_UNIT * 3 / 4,	/* drain queue and/or yield bw to other flows */
148	BBR_UNIT, BBR_UNIT, BBR_UNIT,	/* cruise at 1.0*bw to utilize pipe, */
149	BBR_UNIT, BBR_UNIT, BBR_UNIT	/* without creating excess queue... */
150};
151/* Randomize the starting gain cycling phase over N phases: */
152static const u32 bbr_cycle_rand = 7;
153
154/* Try to keep at least this many packets in flight, if things go smoothly. For
155 * smooth functioning, a sliding window protocol ACKing every other packet
156 * needs at least 4 packets in flight:
157 */
158static const u32 bbr_cwnd_min_target = 4;
159
160/* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
161/* If bw has increased significantly (1.25x), there may be more bw available: */
162static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
163/* But after 3 rounds w/o significant bw growth, estimate pipe is full: */
164static const u32 bbr_full_bw_cnt = 3;
165
166/* "long-term" ("LT") bandwidth estimator parameters... */
167/* The minimum number of rounds in an LT bw sampling interval: */
168static const u32 bbr_lt_intvl_min_rtts = 4;
169/* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */
170static const u32 bbr_lt_loss_thresh = 50;
171/* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */
172static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8;
173/* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */
174static const u32 bbr_lt_bw_diff = 4000 / 8;
175/* If we estimate we're policed, use lt_bw for this many round trips: */
176static const u32 bbr_lt_bw_max_rtts = 48;
177
 
 
 
 
 
 
 
 
 
 
 
178/* Do we estimate that STARTUP filled the pipe? */
179static bool bbr_full_bw_reached(const struct sock *sk)
180{
181	const struct bbr *bbr = inet_csk_ca(sk);
182
183	return bbr->full_bw_reached;
184}
185
186/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
187static u32 bbr_max_bw(const struct sock *sk)
188{
189	struct bbr *bbr = inet_csk_ca(sk);
190
191	return minmax_get(&bbr->bw);
192}
193
194/* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
195static u32 bbr_bw(const struct sock *sk)
196{
197	struct bbr *bbr = inet_csk_ca(sk);
198
199	return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
200}
201
 
 
 
 
 
 
 
 
 
 
202/* Return rate in bytes per second, optionally with a gain.
203 * The order here is chosen carefully to avoid overflow of u64. This should
204 * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
205 */
206static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
207{
208	rate *= tcp_mss_to_mtu(sk, tcp_sk(sk)->mss_cache);
 
 
209	rate *= gain;
210	rate >>= BBR_SCALE;
211	rate *= USEC_PER_SEC;
212	return rate >> BW_SCALE;
213}
214
215/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
216static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
217{
218	u64 rate = bw;
219
220	rate = bbr_rate_bytes_per_sec(sk, rate, gain);
221	rate = min_t(u64, rate, sk->sk_max_pacing_rate);
222	return rate;
223}
224
225/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
226static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
227{
228	struct tcp_sock *tp = tcp_sk(sk);
229	struct bbr *bbr = inet_csk_ca(sk);
230	u64 bw;
231	u32 rtt_us;
232
233	if (tp->srtt_us) {		/* any RTT sample yet? */
234		rtt_us = max(tp->srtt_us >> 3, 1U);
235		bbr->has_seen_rtt = 1;
236	} else {			 /* no RTT sample yet */
237		rtt_us = USEC_PER_MSEC;	 /* use nominal default RTT */
238	}
239	bw = (u64)tp->snd_cwnd * BW_UNIT;
240	do_div(bw, rtt_us);
241	sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
242}
243
244/* Pace using current bw estimate and a gain factor. In order to help drive the
245 * network toward lower queues while maintaining high utilization and low
246 * latency, the average pacing rate aims to be slightly (~1%) lower than the
247 * estimated bandwidth. This is an important aspect of the design. In this
248 * implementation this slightly lower pacing rate is achieved implicitly by not
249 * including link-layer headers in the packet size used for the pacing rate.
250 */
251static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
252{
253	struct tcp_sock *tp = tcp_sk(sk);
254	struct bbr *bbr = inet_csk_ca(sk);
255	u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
256
257	if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
258		bbr_init_pacing_rate_from_rtt(sk);
259	if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
260		sk->sk_pacing_rate = rate;
261}
262
263/* override sysctl_tcp_min_tso_segs */
264static u32 bbr_min_tso_segs(struct sock *sk)
265{
266	return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
267}
268
269static u32 bbr_tso_segs_goal(struct sock *sk)
270{
271	struct tcp_sock *tp = tcp_sk(sk);
272	u32 segs, bytes;
273
274	/* Sort of tcp_tso_autosize() but ignoring
275	 * driver provided sk_gso_max_size.
276	 */
277	bytes = min_t(u32, sk->sk_pacing_rate >> sk->sk_pacing_shift,
 
278		      GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
279	segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
280
281	return min(segs, 0x7FU);
282}
283
284/* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
285static void bbr_save_cwnd(struct sock *sk)
286{
287	struct tcp_sock *tp = tcp_sk(sk);
288	struct bbr *bbr = inet_csk_ca(sk);
289
290	if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
291		bbr->prior_cwnd = tp->snd_cwnd;  /* this cwnd is good enough */
292	else  /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
293		bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
294}
295
296static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
297{
298	struct tcp_sock *tp = tcp_sk(sk);
299	struct bbr *bbr = inet_csk_ca(sk);
300
301	if (event == CA_EVENT_TX_START && tp->app_limited) {
302		bbr->idle_restart = 1;
 
 
303		/* Avoid pointless buffer overflows: pace at est. bw if we don't
304		 * need more speed (we're restarting from idle and app-limited).
305		 */
306		if (bbr->mode == BBR_PROBE_BW)
307			bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
 
 
308	}
309}
310
311/* Find target cwnd. Right-size the cwnd based on min RTT and the
312 * estimated bottleneck bandwidth:
313 *
314 * cwnd = bw * min_rtt * gain = BDP * gain
315 *
316 * The key factor, gain, controls the amount of queue. While a small gain
317 * builds a smaller queue, it becomes more vulnerable to noise in RTT
318 * measurements (e.g., delayed ACKs or other ACK compression effects). This
319 * noise may cause BBR to under-estimate the rate.
320 *
321 * To achieve full performance in high-speed paths, we budget enough cwnd to
322 * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
323 *   - one skb in sending host Qdisc,
324 *   - one skb in sending host TSO/GSO engine
325 *   - one skb being received by receiver host LRO/GRO/delayed-ACK engine
326 * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
327 * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
328 * which allows 2 outstanding 2-packet sequences, to try to keep pipe
329 * full even with ACK-every-other-packet delayed ACKs.
330 */
331static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
332{
333	struct bbr *bbr = inet_csk_ca(sk);
334	u32 cwnd;
335	u64 w;
336
337	/* If we've never had a valid RTT sample, cap cwnd at the initial
338	 * default. This should only happen when the connection is not using TCP
339	 * timestamps and has retransmitted all of the SYN/SYNACK/data packets
340	 * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
341	 * case we need to slow-start up toward something safe: TCP_INIT_CWND.
342	 */
343	if (unlikely(bbr->min_rtt_us == ~0U))	 /* no valid RTT samples yet? */
344		return TCP_INIT_CWND;  /* be safe: cap at default initial cwnd*/
345
346	w = (u64)bw * bbr->min_rtt_us;
347
348	/* Apply a gain to the given value, then remove the BW_SCALE shift. */
349	cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
351	/* Allow enough full-sized skbs in flight to utilize end systems. */
352	cwnd += 3 * bbr_tso_segs_goal(sk);
353
354	/* Reduce delayed ACKs by rounding up cwnd to the next even number. */
355	cwnd = (cwnd + 1) & ~1U;
356
 
 
 
 
357	return cwnd;
358}
359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360/* An optimization in BBR to reduce losses: On the first round of recovery, we
361 * follow the packet conservation principle: send P packets per P packets acked.
362 * After that, we slow-start and send at most 2*P packets per P packets acked.
363 * After recovery finishes, or upon undo, we restore the cwnd we had when
364 * recovery started (capped by the target cwnd based on estimated BDP).
365 *
366 * TODO(ycheng/ncardwell): implement a rate-based approach.
367 */
368static bool bbr_set_cwnd_to_recover_or_restore(
369	struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd)
370{
371	struct tcp_sock *tp = tcp_sk(sk);
372	struct bbr *bbr = inet_csk_ca(sk);
373	u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
374	u32 cwnd = tp->snd_cwnd;
375
376	/* An ACK for P pkts should release at most 2*P packets. We do this
377	 * in two steps. First, here we deduct the number of lost packets.
378	 * Then, in bbr_set_cwnd() we slow start up toward the target cwnd.
379	 */
380	if (rs->losses > 0)
381		cwnd = max_t(s32, cwnd - rs->losses, 1);
382
383	if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) {
384		/* Starting 1st round of Recovery, so do packet conservation. */
385		bbr->packet_conservation = 1;
386		bbr->next_rtt_delivered = tp->delivered;  /* start round now */
387		/* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */
388		cwnd = tcp_packets_in_flight(tp) + acked;
389	} else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
390		/* Exiting loss recovery; restore cwnd saved before recovery. */
391		bbr->restore_cwnd = 1;
392		bbr->packet_conservation = 0;
393	}
394	bbr->prev_ca_state = state;
395
396	if (bbr->restore_cwnd) {
397		/* Restore cwnd after exiting loss recovery or PROBE_RTT. */
398		cwnd = max(cwnd, bbr->prior_cwnd);
399		bbr->restore_cwnd = 0;
400	}
401
402	if (bbr->packet_conservation) {
403		*new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
404		return true;	/* yes, using packet conservation */
405	}
406	*new_cwnd = cwnd;
407	return false;
408}
409
410/* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
411 * has drawn us down below target), or snap down to target if we're above it.
412 */
413static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
414			 u32 acked, u32 bw, int gain)
415{
416	struct tcp_sock *tp = tcp_sk(sk);
417	struct bbr *bbr = inet_csk_ca(sk);
418	u32 cwnd = 0, target_cwnd = 0;
419
420	if (!acked)
421		return;
422
423	if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
424		goto done;
425
 
 
 
 
 
 
 
 
426	/* If we're below target cwnd, slow start cwnd toward target cwnd. */
427	target_cwnd = bbr_target_cwnd(sk, bw, gain);
428	if (bbr_full_bw_reached(sk))  /* only cut cwnd if we filled the pipe */
429		cwnd = min(cwnd + acked, target_cwnd);
430	else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
431		cwnd = cwnd + acked;
432	cwnd = max(cwnd, bbr_cwnd_min_target);
433
434done:
435	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);	/* apply global cap */
436	if (bbr->mode == BBR_PROBE_RTT)  /* drain queue, refresh min_rtt */
437		tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target);
438}
439
440/* End cycle phase if it's time and/or we hit the phase's in-flight target. */
441static bool bbr_is_next_cycle_phase(struct sock *sk,
442				    const struct rate_sample *rs)
443{
444	struct tcp_sock *tp = tcp_sk(sk);
445	struct bbr *bbr = inet_csk_ca(sk);
446	bool is_full_length =
447		tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
448		bbr->min_rtt_us;
449	u32 inflight, bw;
450
451	/* The pacing_gain of 1.0 paces at the estimated bw to try to fully
452	 * use the pipe without increasing the queue.
453	 */
454	if (bbr->pacing_gain == BBR_UNIT)
455		return is_full_length;		/* just use wall clock time */
456
457	inflight = rs->prior_in_flight;  /* what was in-flight before ACK? */
458	bw = bbr_max_bw(sk);
459
460	/* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at
461	 * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is
462	 * small (e.g. on a LAN). We do not persist if packets are lost, since
463	 * a path with small buffers may not hold that much.
464	 */
465	if (bbr->pacing_gain > BBR_UNIT)
466		return is_full_length &&
467			(rs->losses ||  /* perhaps pacing_gain*BDP won't fit */
468			 inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain));
469
470	/* A pacing_gain < 1.0 tries to drain extra queue we added if bw
471	 * probing didn't find more bw. If inflight falls to match BDP then we
472	 * estimate queue is drained; persisting would underutilize the pipe.
473	 */
474	return is_full_length ||
475		inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT);
476}
477
478static void bbr_advance_cycle_phase(struct sock *sk)
479{
480	struct tcp_sock *tp = tcp_sk(sk);
481	struct bbr *bbr = inet_csk_ca(sk);
482
483	bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
484	bbr->cycle_mstamp = tp->delivered_mstamp;
485	bbr->pacing_gain = bbr->lt_use_bw ? BBR_UNIT :
486					    bbr_pacing_gain[bbr->cycle_idx];
487}
488
489/* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
490static void bbr_update_cycle_phase(struct sock *sk,
491				   const struct rate_sample *rs)
492{
493	struct bbr *bbr = inet_csk_ca(sk);
494
495	if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs))
496		bbr_advance_cycle_phase(sk);
497}
498
499static void bbr_reset_startup_mode(struct sock *sk)
500{
501	struct bbr *bbr = inet_csk_ca(sk);
502
503	bbr->mode = BBR_STARTUP;
504	bbr->pacing_gain = bbr_high_gain;
505	bbr->cwnd_gain	 = bbr_high_gain;
506}
507
508static void bbr_reset_probe_bw_mode(struct sock *sk)
509{
510	struct bbr *bbr = inet_csk_ca(sk);
511
512	bbr->mode = BBR_PROBE_BW;
513	bbr->pacing_gain = BBR_UNIT;
514	bbr->cwnd_gain = bbr_cwnd_gain;
515	bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand);
516	bbr_advance_cycle_phase(sk);	/* flip to next phase of gain cycle */
517}
518
519static void bbr_reset_mode(struct sock *sk)
520{
521	if (!bbr_full_bw_reached(sk))
522		bbr_reset_startup_mode(sk);
523	else
524		bbr_reset_probe_bw_mode(sk);
525}
526
527/* Start a new long-term sampling interval. */
528static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
529{
530	struct tcp_sock *tp = tcp_sk(sk);
531	struct bbr *bbr = inet_csk_ca(sk);
532
533	bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
534	bbr->lt_last_delivered = tp->delivered;
535	bbr->lt_last_lost = tp->lost;
536	bbr->lt_rtt_cnt = 0;
537}
538
539/* Completely reset long-term bandwidth sampling. */
540static void bbr_reset_lt_bw_sampling(struct sock *sk)
541{
542	struct bbr *bbr = inet_csk_ca(sk);
543
544	bbr->lt_bw = 0;
545	bbr->lt_use_bw = 0;
546	bbr->lt_is_sampling = false;
547	bbr_reset_lt_bw_sampling_interval(sk);
548}
549
550/* Long-term bw sampling interval is done. Estimate whether we're policed. */
551static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw)
552{
553	struct bbr *bbr = inet_csk_ca(sk);
554	u32 diff;
555
556	if (bbr->lt_bw) {  /* do we have bw from a previous interval? */
557		/* Is new bw close to the lt_bw from the previous interval? */
558		diff = abs(bw - bbr->lt_bw);
559		if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) ||
560		    (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <=
561		     bbr_lt_bw_diff)) {
562			/* All criteria are met; estimate we're policed. */
563			bbr->lt_bw = (bw + bbr->lt_bw) >> 1;  /* avg 2 intvls */
564			bbr->lt_use_bw = 1;
565			bbr->pacing_gain = BBR_UNIT;  /* try to avoid drops */
566			bbr->lt_rtt_cnt = 0;
567			return;
568		}
569	}
570	bbr->lt_bw = bw;
571	bbr_reset_lt_bw_sampling_interval(sk);
572}
573
574/* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of
575 * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and
576 * explicitly models their policed rate, to reduce unnecessary losses. We
577 * estimate that we're policed if we see 2 consecutive sampling intervals with
578 * consistent throughput and high packet loss. If we think we're being policed,
579 * set lt_bw to the "long-term" average delivery rate from those 2 intervals.
580 */
581static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
582{
583	struct tcp_sock *tp = tcp_sk(sk);
584	struct bbr *bbr = inet_csk_ca(sk);
585	u32 lost, delivered;
586	u64 bw;
587	u32 t;
588
589	if (bbr->lt_use_bw) {	/* already using long-term rate, lt_bw? */
590		if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
591		    ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) {
592			bbr_reset_lt_bw_sampling(sk);    /* stop using lt_bw */
593			bbr_reset_probe_bw_mode(sk);  /* restart gain cycling */
594		}
595		return;
596	}
597
598	/* Wait for the first loss before sampling, to let the policer exhaust
599	 * its tokens and estimate the steady-state rate allowed by the policer.
600	 * Starting samples earlier includes bursts that over-estimate the bw.
601	 */
602	if (!bbr->lt_is_sampling) {
603		if (!rs->losses)
604			return;
605		bbr_reset_lt_bw_sampling_interval(sk);
606		bbr->lt_is_sampling = true;
607	}
608
609	/* To avoid underestimates, reset sampling if we run out of data. */
610	if (rs->is_app_limited) {
611		bbr_reset_lt_bw_sampling(sk);
612		return;
613	}
614
615	if (bbr->round_start)
616		bbr->lt_rtt_cnt++;	/* count round trips in this interval */
617	if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts)
618		return;		/* sampling interval needs to be longer */
619	if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) {
620		bbr_reset_lt_bw_sampling(sk);  /* interval is too long */
621		return;
622	}
623
624	/* End sampling interval when a packet is lost, so we estimate the
625	 * policer tokens were exhausted. Stopping the sampling before the
626	 * tokens are exhausted under-estimates the policed rate.
627	 */
628	if (!rs->losses)
629		return;
630
631	/* Calculate packets lost and delivered in sampling interval. */
632	lost = tp->lost - bbr->lt_last_lost;
633	delivered = tp->delivered - bbr->lt_last_delivered;
634	/* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */
635	if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered)
636		return;
637
638	/* Find average delivery rate in this sampling interval. */
639	t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
640	if ((s32)t < 1)
641		return;		/* interval is less than one ms, so wait */
642	/* Check if can multiply without overflow */
643	if (t >= ~0U / USEC_PER_MSEC) {
644		bbr_reset_lt_bw_sampling(sk);  /* interval too long; reset */
645		return;
646	}
647	t *= USEC_PER_MSEC;
648	bw = (u64)delivered * BW_UNIT;
649	do_div(bw, t);
650	bbr_lt_bw_interval_done(sk, bw);
651}
652
653/* Estimate the bandwidth based on how fast packets are delivered */
654static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
655{
656	struct tcp_sock *tp = tcp_sk(sk);
657	struct bbr *bbr = inet_csk_ca(sk);
658	u64 bw;
659
660	bbr->round_start = 0;
661	if (rs->delivered < 0 || rs->interval_us <= 0)
662		return; /* Not a valid observation */
663
664	/* See if we've reached the next RTT */
665	if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) {
666		bbr->next_rtt_delivered = tp->delivered;
667		bbr->rtt_cnt++;
668		bbr->round_start = 1;
669		bbr->packet_conservation = 0;
670	}
671
672	bbr_lt_bw_sampling(sk, rs);
673
674	/* Divide delivered by the interval to find a (lower bound) bottleneck
675	 * bandwidth sample. Delivered is in packets and interval_us in uS and
676	 * ratio will be <<1 for most connections. So delivered is first scaled.
677	 */
678	bw = (u64)rs->delivered * BW_UNIT;
679	do_div(bw, rs->interval_us);
680
681	/* If this sample is application-limited, it is likely to have a very
682	 * low delivered count that represents application behavior rather than
683	 * the available network rate. Such a sample could drag down estimated
684	 * bw, causing needless slow-down. Thus, to continue to send at the
685	 * last measured network rate, we filter out app-limited samples unless
686	 * they describe the path bw at least as well as our bw model.
687	 *
688	 * So the goal during app-limited phase is to proceed with the best
689	 * network rate no matter how long. We automatically leave this
690	 * phase when app writes faster than the network can deliver :)
691	 */
692	if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) {
693		/* Incorporate new sample into our max bw filter. */
694		minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw);
695	}
696}
697
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
698/* Estimate when the pipe is full, using the change in delivery rate: BBR
699 * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
700 * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
701 * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
702 * higher rwin, 3: we get higher delivery rate samples. Or transient
703 * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
704 * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
705 */
706static void bbr_check_full_bw_reached(struct sock *sk,
707				      const struct rate_sample *rs)
708{
709	struct bbr *bbr = inet_csk_ca(sk);
710	u32 bw_thresh;
711
712	if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
713		return;
714
715	bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE;
716	if (bbr_max_bw(sk) >= bw_thresh) {
717		bbr->full_bw = bbr_max_bw(sk);
718		bbr->full_bw_cnt = 0;
719		return;
720	}
721	++bbr->full_bw_cnt;
722	bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
723}
724
725/* If pipe is probably full, drain the queue and then enter steady-state. */
726static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
727{
728	struct bbr *bbr = inet_csk_ca(sk);
729
730	if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
731		bbr->mode = BBR_DRAIN;	/* drain queue we created */
732		bbr->pacing_gain = bbr_drain_gain;	/* pace slow to drain */
733		bbr->cwnd_gain = bbr_high_gain;	/* maintain cwnd */
734		tcp_sk(sk)->snd_ssthresh =
735				bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT);
736	}	/* fall through to check if in-flight is already small: */
737	if (bbr->mode == BBR_DRAIN &&
738	    tcp_packets_in_flight(tcp_sk(sk)) <=
739	    bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT))
740		bbr_reset_probe_bw_mode(sk);  /* we estimate queue is drained */
741}
742
 
 
 
 
 
 
 
 
 
 
 
 
 
 
743/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
744 * periodically drain the bottleneck queue, to converge to measure the true
745 * min_rtt (unloaded propagation delay). This allows the flows to keep queues
746 * small (reducing queuing delay and packet loss) and achieve fairness among
747 * BBR flows.
748 *
749 * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
750 * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
751 * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
752 * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
753 * re-enter the previous mode. BBR uses 200ms to approximately bound the
754 * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
755 *
756 * Note that flows need only pay 2% if they are busy sending over the last 10
757 * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
758 * natural silences or low-rate periods within 10 seconds where the rate is low
759 * enough for long enough to drain its queue in the bottleneck. We pick up
760 * these min RTT measurements opportunistically with our min_rtt filter. :-)
761 */
762static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
763{
764	struct tcp_sock *tp = tcp_sk(sk);
765	struct bbr *bbr = inet_csk_ca(sk);
766	bool filter_expired;
767
768	/* Track min RTT seen in the min_rtt_win_sec filter window: */
769	filter_expired = after(tcp_jiffies32,
770			       bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
771	if (rs->rtt_us >= 0 &&
772	    (rs->rtt_us <= bbr->min_rtt_us ||
773	     (filter_expired && !rs->is_ack_delayed))) {
774		bbr->min_rtt_us = rs->rtt_us;
775		bbr->min_rtt_stamp = tcp_jiffies32;
776	}
777
778	if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
779	    !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
780		bbr->mode = BBR_PROBE_RTT;  /* dip, drain queue */
781		bbr->pacing_gain = BBR_UNIT;
782		bbr->cwnd_gain = BBR_UNIT;
783		bbr_save_cwnd(sk);  /* note cwnd so we can restore it */
784		bbr->probe_rtt_done_stamp = 0;
785	}
786
787	if (bbr->mode == BBR_PROBE_RTT) {
788		/* Ignore low rate samples during this mode. */
789		tp->app_limited =
790			(tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
791		/* Maintain min packets in flight for max(200 ms, 1 round). */
792		if (!bbr->probe_rtt_done_stamp &&
793		    tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
794			bbr->probe_rtt_done_stamp = tcp_jiffies32 +
795				msecs_to_jiffies(bbr_probe_rtt_mode_ms);
796			bbr->probe_rtt_round_done = 0;
797			bbr->next_rtt_delivered = tp->delivered;
798		} else if (bbr->probe_rtt_done_stamp) {
799			if (bbr->round_start)
800				bbr->probe_rtt_round_done = 1;
801			if (bbr->probe_rtt_round_done &&
802			    after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) {
803				bbr->min_rtt_stamp = tcp_jiffies32;
804				bbr->restore_cwnd = 1;  /* snap to prior_cwnd */
805				bbr_reset_mode(sk);
806			}
807		}
808	}
809	/* Restart after idle ends only once we process a new S/ACK for data */
810	if (rs->delivered > 0)
811		bbr->idle_restart = 0;
812}
813
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
814static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
815{
816	bbr_update_bw(sk, rs);
 
817	bbr_update_cycle_phase(sk, rs);
818	bbr_check_full_bw_reached(sk, rs);
819	bbr_check_drain(sk, rs);
820	bbr_update_min_rtt(sk, rs);
 
821}
822
823static void bbr_main(struct sock *sk, const struct rate_sample *rs)
824{
825	struct bbr *bbr = inet_csk_ca(sk);
826	u32 bw;
827
828	bbr_update_model(sk, rs);
829
830	bw = bbr_bw(sk);
831	bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
832	bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
833}
834
835static void bbr_init(struct sock *sk)
836{
837	struct tcp_sock *tp = tcp_sk(sk);
838	struct bbr *bbr = inet_csk_ca(sk);
839
840	bbr->prior_cwnd = 0;
841	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
842	bbr->rtt_cnt = 0;
843	bbr->next_rtt_delivered = 0;
844	bbr->prev_ca_state = TCP_CA_Open;
845	bbr->packet_conservation = 0;
846
847	bbr->probe_rtt_done_stamp = 0;
848	bbr->probe_rtt_round_done = 0;
849	bbr->min_rtt_us = tcp_min_rtt(tp);
850	bbr->min_rtt_stamp = tcp_jiffies32;
851
852	minmax_reset(&bbr->bw, bbr->rtt_cnt, 0);  /* init max bw to 0 */
853
854	bbr->has_seen_rtt = 0;
855	bbr_init_pacing_rate_from_rtt(sk);
856
857	bbr->restore_cwnd = 0;
858	bbr->round_start = 0;
859	bbr->idle_restart = 0;
860	bbr->full_bw_reached = 0;
861	bbr->full_bw = 0;
862	bbr->full_bw_cnt = 0;
863	bbr->cycle_mstamp = 0;
864	bbr->cycle_idx = 0;
865	bbr_reset_lt_bw_sampling(sk);
866	bbr_reset_startup_mode(sk);
 
 
 
 
 
 
 
867
868	cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
869}
870
871static u32 bbr_sndbuf_expand(struct sock *sk)
872{
873	/* Provision 3 * cwnd since BBR may slow-start even during recovery. */
874	return 3;
875}
876
877/* In theory BBR does not need to undo the cwnd since it does not
878 * always reduce cwnd on losses (see bbr_main()). Keep it for now.
879 */
880static u32 bbr_undo_cwnd(struct sock *sk)
881{
882	struct bbr *bbr = inet_csk_ca(sk);
883
884	bbr->full_bw = 0;   /* spurious slow-down; reset full pipe detection */
885	bbr->full_bw_cnt = 0;
886	bbr_reset_lt_bw_sampling(sk);
887	return tcp_sk(sk)->snd_cwnd;
888}
889
890/* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
891static u32 bbr_ssthresh(struct sock *sk)
892{
893	bbr_save_cwnd(sk);
894	return tcp_sk(sk)->snd_ssthresh;
895}
896
897static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
898			   union tcp_cc_info *info)
899{
900	if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
901	    ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
902		struct tcp_sock *tp = tcp_sk(sk);
903		struct bbr *bbr = inet_csk_ca(sk);
904		u64 bw = bbr_bw(sk);
905
906		bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE;
907		memset(&info->bbr, 0, sizeof(info->bbr));
908		info->bbr.bbr_bw_lo		= (u32)bw;
909		info->bbr.bbr_bw_hi		= (u32)(bw >> 32);
910		info->bbr.bbr_min_rtt		= bbr->min_rtt_us;
911		info->bbr.bbr_pacing_gain	= bbr->pacing_gain;
912		info->bbr.bbr_cwnd_gain		= bbr->cwnd_gain;
913		*attr = INET_DIAG_BBRINFO;
914		return sizeof(info->bbr);
915	}
916	return 0;
917}
918
919static void bbr_set_state(struct sock *sk, u8 new_state)
920{
921	struct bbr *bbr = inet_csk_ca(sk);
922
923	if (new_state == TCP_CA_Loss) {
924		struct rate_sample rs = { .losses = 1 };
925
926		bbr->prev_ca_state = TCP_CA_Loss;
927		bbr->full_bw = 0;
928		bbr->round_start = 1;	/* treat RTO like end of a round */
929		bbr_lt_bw_sampling(sk, &rs);
930	}
931}
932
933static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
934	.flags		= TCP_CONG_NON_RESTRICTED,
935	.name		= "bbr",
936	.owner		= THIS_MODULE,
937	.init		= bbr_init,
938	.cong_control	= bbr_main,
939	.sndbuf_expand	= bbr_sndbuf_expand,
940	.undo_cwnd	= bbr_undo_cwnd,
941	.cwnd_event	= bbr_cwnd_event,
942	.ssthresh	= bbr_ssthresh,
943	.min_tso_segs	= bbr_min_tso_segs,
944	.get_info	= bbr_get_info,
945	.set_state	= bbr_set_state,
946};
947
948static int __init bbr_register(void)
949{
950	BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
951	return tcp_register_congestion_control(&tcp_bbr_cong_ops);
952}
953
954static void __exit bbr_unregister(void)
955{
956	tcp_unregister_congestion_control(&tcp_bbr_cong_ops);
957}
958
959module_init(bbr_register);
960module_exit(bbr_unregister);
961
962MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
963MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
964MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
965MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
966MODULE_LICENSE("Dual BSD/GPL");
967MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");
   1/* Bottleneck Bandwidth and RTT (BBR) congestion control
   2 *
   3 * BBR congestion control computes the sending rate based on the delivery
   4 * rate (throughput) estimated from ACKs. In a nutshell:
   5 *
   6 *   On each ACK, update our model of the network path:
   7 *      bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips)
   8 *      min_rtt = windowed_min(rtt, 10 seconds)
   9 *   pacing_rate = pacing_gain * bottleneck_bandwidth
  10 *   cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4)
  11 *
  12 * The core algorithm does not react directly to packet losses or delays,
  13 * although BBR may adjust the size of next send per ACK when loss is
  14 * observed, or adjust the sending rate if it estimates there is a
  15 * traffic policer, in order to keep the drop rate reasonable.
  16 *
  17 * Here is a state transition diagram for BBR:
  18 *
  19 *             |
  20 *             V
  21 *    +---> STARTUP  ----+
  22 *    |        |         |
  23 *    |        V         |
  24 *    |      DRAIN   ----+
  25 *    |        |         |
  26 *    |        V         |
  27 *    +---> PROBE_BW ----+
  28 *    |      ^    |      |
  29 *    |      |    |      |
  30 *    |      +----+      |
  31 *    |                  |
  32 *    +---- PROBE_RTT <--+
  33 *
  34 * A BBR flow starts in STARTUP, and ramps up its sending rate quickly.
  35 * When it estimates the pipe is full, it enters DRAIN to drain the queue.
  36 * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT.
  37 * A long-lived BBR flow spends the vast majority of its time remaining
  38 * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth
  39 * in a fair manner, with a small, bounded queue. *If* a flow has been
  40 * continuously sending for the entire min_rtt window, and hasn't seen an RTT
  41 * sample that matches or decreases its min_rtt estimate for 10 seconds, then
  42 * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
  43 * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if
  44 * we estimated that we reached the full bw of the pipe then we enter PROBE_BW;
  45 * otherwise we enter STARTUP to try to fill the pipe.
  46 *
  47 * BBR is described in detail in:
  48 *   "BBR: Congestion-Based Congestion Control",
  49 *   Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
  50 *   Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
  51 *
  52 * There is a public e-mail list for discussing BBR development and testing:
  53 *   https://groups.google.com/forum/#!forum/bbr-dev
  54 *
  55 * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
  56 * otherwise TCP stack falls back to an internal pacing using one high
  57 * resolution timer per TCP socket and may use more resources.
  58 */
  59#include <linux/module.h>
  60#include <net/tcp.h>
  61#include <linux/inet_diag.h>
  62#include <linux/inet.h>
  63#include <linux/random.h>
  64#include <linux/win_minmax.h>
  65
  66/* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
  67 * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
  68 * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
  69 * Since the minimum window is >=4 packets, the lower bound isn't
  70 * an issue. The upper bound isn't an issue with existing technologies.
  71 */
  72#define BW_SCALE 24
  73#define BW_UNIT (1 << BW_SCALE)
  74
  75#define BBR_SCALE 8	/* scaling factor for fractions in BBR (e.g. gains) */
  76#define BBR_UNIT (1 << BBR_SCALE)
  77
  78/* BBR has the following modes for deciding how fast to send: */
  79enum bbr_mode {
  80	BBR_STARTUP,	/* ramp up sending rate rapidly to fill pipe */
  81	BBR_DRAIN,	/* drain any queue created during startup */
  82	BBR_PROBE_BW,	/* discover, share bw: pace around estimated bw */
  83	BBR_PROBE_RTT,	/* cut inflight to min to probe min_rtt */
  84};
  85
  86/* BBR congestion control block */
  87struct bbr {
  88	u32	min_rtt_us;	        /* min RTT in min_rtt_win_sec window */
  89	u32	min_rtt_stamp;	        /* timestamp of min_rtt_us */
  90	u32	probe_rtt_done_stamp;   /* end time for BBR_PROBE_RTT mode */
  91	struct minmax bw;	/* Max recent delivery rate in pkts/uS << 24 */
  92	u32	rtt_cnt;	    /* count of packet-timed rounds elapsed */
  93	u32     next_rtt_delivered; /* scb->tx.delivered at end of round */
  94	u64	cycle_mstamp;	     /* time of this cycle phase start */
  95	u32     mode:3,		     /* current bbr_mode in state machine */
  96		prev_ca_state:3,     /* CA state on previous ACK */
  97		packet_conservation:1,  /* use packet conservation? */
 
  98		round_start:1,	     /* start of packet-timed tx->ack round? */
  99		idle_restart:1,	     /* restarting after idle? */
 100		probe_rtt_round_done:1,  /* a BBR_PROBE_RTT round at 4 pkts? */
 101		unused:13,
 102		lt_is_sampling:1,    /* taking long-term ("LT") samples now? */
 103		lt_rtt_cnt:7,	     /* round trips in long-term interval */
 104		lt_use_bw:1;	     /* use lt_bw as our bw estimate? */
 105	u32	lt_bw;		     /* LT est delivery rate in pkts/uS << 24 */
 106	u32	lt_last_delivered;   /* LT intvl start: tp->delivered */
 107	u32	lt_last_stamp;	     /* LT intvl start: tp->delivered_mstamp */
 108	u32	lt_last_lost;	     /* LT intvl start: tp->lost */
 109	u32	pacing_gain:10,	/* current gain for setting pacing rate */
 110		cwnd_gain:10,	/* current gain for setting cwnd */
 111		full_bw_reached:1,   /* reached full bw in Startup? */
 112		full_bw_cnt:2,	/* number of rounds without large bw gains */
 113		cycle_idx:3,	/* current index in pacing_gain cycle array */
 114		has_seen_rtt:1, /* have we seen an RTT sample yet? */
 115		unused_b:5;
 116	u32	prior_cwnd;	/* prior cwnd upon entering loss recovery */
 117	u32	full_bw;	/* recent bw, to estimate if pipe is full */
 118
 119	/* For tracking ACK aggregation: */
 120	u64	ack_epoch_mstamp;	/* start of ACK sampling epoch */
 121	u16	extra_acked[2];		/* max excess data ACKed in epoch */
 122	u32	ack_epoch_acked:20,	/* packets (S)ACKed in sampling epoch */
 123		extra_acked_win_rtts:5,	/* age of extra_acked, in round trips */
 124		extra_acked_win_idx:1,	/* current index in extra_acked array */
 125		unused_c:6;
 126};
 127
 128#define CYCLE_LEN	8	/* number of phases in a pacing gain cycle */
 129
 130/* Window length of bw filter (in rounds): */
 131static const int bbr_bw_rtts = CYCLE_LEN + 2;
 132/* Window length of min_rtt filter (in sec): */
 133static const u32 bbr_min_rtt_win_sec = 10;
 134/* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */
 135static const u32 bbr_probe_rtt_mode_ms = 200;
 136/* Skip TSO below the following bandwidth (bits/sec): */
 137static const int bbr_min_tso_rate = 1200000;
 138
 139/* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck.
 140 * In order to help drive the network toward lower queues and low latency while
 141 * maintaining high utilization, the average pacing rate aims to be slightly
 142 * lower than the estimated bandwidth. This is an important aspect of the
 143 * design.
 144 */
 145static const int bbr_pacing_margin_percent = 1;
 146
 147/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
 148 * that will allow a smoothly increasing pacing rate that will double each RTT
 149 * and send the same number of packets per RTT that an un-paced, slow-starting
 150 * Reno or CUBIC flow would:
 151 */
 152static const int bbr_high_gain  = BBR_UNIT * 2885 / 1000 + 1;
 153/* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
 154 * the queue created in BBR_STARTUP in a single round:
 155 */
 156static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
 157/* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */
 158static const int bbr_cwnd_gain  = BBR_UNIT * 2;
 159/* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */
 160static const int bbr_pacing_gain[] = {
 161	BBR_UNIT * 5 / 4,	/* probe for more available bw */
 162	BBR_UNIT * 3 / 4,	/* drain queue and/or yield bw to other flows */
 163	BBR_UNIT, BBR_UNIT, BBR_UNIT,	/* cruise at 1.0*bw to utilize pipe, */
 164	BBR_UNIT, BBR_UNIT, BBR_UNIT	/* without creating excess queue... */
 165};
 166/* Randomize the starting gain cycling phase over N phases: */
 167static const u32 bbr_cycle_rand = 7;
 168
 169/* Try to keep at least this many packets in flight, if things go smoothly. For
 170 * smooth functioning, a sliding window protocol ACKing every other packet
 171 * needs at least 4 packets in flight:
 172 */
 173static const u32 bbr_cwnd_min_target = 4;
 174
 175/* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
 176/* If bw has increased significantly (1.25x), there may be more bw available: */
 177static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
 178/* But after 3 rounds w/o significant bw growth, estimate pipe is full: */
 179static const u32 bbr_full_bw_cnt = 3;
 180
 181/* "long-term" ("LT") bandwidth estimator parameters... */
 182/* The minimum number of rounds in an LT bw sampling interval: */
 183static const u32 bbr_lt_intvl_min_rtts = 4;
 184/* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */
 185static const u32 bbr_lt_loss_thresh = 50;
 186/* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */
 187static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8;
 188/* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */
 189static const u32 bbr_lt_bw_diff = 4000 / 8;
 190/* If we estimate we're policed, use lt_bw for this many round trips: */
 191static const u32 bbr_lt_bw_max_rtts = 48;
 192
 193/* Gain factor for adding extra_acked to target cwnd: */
 194static const int bbr_extra_acked_gain = BBR_UNIT;
 195/* Window length of extra_acked window. */
 196static const u32 bbr_extra_acked_win_rtts = 5;
 197/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
 198static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
 199/* Time period for clamping cwnd increment due to ack aggregation */
 200static const u32 bbr_extra_acked_max_us = 100 * 1000;
 201
 202static void bbr_check_probe_rtt_done(struct sock *sk);
 203
 204/* Do we estimate that STARTUP filled the pipe? */
 205static bool bbr_full_bw_reached(const struct sock *sk)
 206{
 207	const struct bbr *bbr = inet_csk_ca(sk);
 208
 209	return bbr->full_bw_reached;
 210}
 211
 212/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
 213static u32 bbr_max_bw(const struct sock *sk)
 214{
 215	struct bbr *bbr = inet_csk_ca(sk);
 216
 217	return minmax_get(&bbr->bw);
 218}
 219
 220/* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
 221static u32 bbr_bw(const struct sock *sk)
 222{
 223	struct bbr *bbr = inet_csk_ca(sk);
 224
 225	return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
 226}
 227
 228/* Return maximum extra acked in past k-2k round trips,
 229 * where k = bbr_extra_acked_win_rtts.
 230 */
 231static u16 bbr_extra_acked(const struct sock *sk)
 232{
 233	struct bbr *bbr = inet_csk_ca(sk);
 234
 235	return max(bbr->extra_acked[0], bbr->extra_acked[1]);
 236}
 237
 238/* Return rate in bytes per second, optionally with a gain.
 239 * The order here is chosen carefully to avoid overflow of u64. This should
 240 * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
 241 */
 242static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
 243{
 244	unsigned int mss = tcp_sk(sk)->mss_cache;
 245
 246	rate *= mss;
 247	rate *= gain;
 248	rate >>= BBR_SCALE;
 249	rate *= USEC_PER_SEC / 100 * (100 - bbr_pacing_margin_percent);
 250	return rate >> BW_SCALE;
 251}
 252
 253/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
 254static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
 255{
 256	u64 rate = bw;
 257
 258	rate = bbr_rate_bytes_per_sec(sk, rate, gain);
 259	rate = min_t(u64, rate, sk->sk_max_pacing_rate);
 260	return rate;
 261}
 262
 263/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
 264static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
 265{
 266	struct tcp_sock *tp = tcp_sk(sk);
 267	struct bbr *bbr = inet_csk_ca(sk);
 268	u64 bw;
 269	u32 rtt_us;
 270
 271	if (tp->srtt_us) {		/* any RTT sample yet? */
 272		rtt_us = max(tp->srtt_us >> 3, 1U);
 273		bbr->has_seen_rtt = 1;
 274	} else {			 /* no RTT sample yet */
 275		rtt_us = USEC_PER_MSEC;	 /* use nominal default RTT */
 276	}
 277	bw = (u64)tp->snd_cwnd * BW_UNIT;
 278	do_div(bw, rtt_us);
 279	sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
 280}
 281
 282/* Pace using current bw estimate and a gain factor. */
 
 
 
 
 
 
 283static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
 284{
 285	struct tcp_sock *tp = tcp_sk(sk);
 286	struct bbr *bbr = inet_csk_ca(sk);
 287	unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain);
 288
 289	if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
 290		bbr_init_pacing_rate_from_rtt(sk);
 291	if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
 292		sk->sk_pacing_rate = rate;
 293}
 294
 295/* override sysctl_tcp_min_tso_segs */
 296static u32 bbr_min_tso_segs(struct sock *sk)
 297{
 298	return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
 299}
 300
 301static u32 bbr_tso_segs_goal(struct sock *sk)
 302{
 303	struct tcp_sock *tp = tcp_sk(sk);
 304	u32 segs, bytes;
 305
 306	/* Sort of tcp_tso_autosize() but ignoring
 307	 * driver provided sk_gso_max_size.
 308	 */
 309	bytes = min_t(unsigned long,
 310		      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
 311		      GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
 312	segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
 313
 314	return min(segs, 0x7FU);
 315}
 316
 317/* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
 318static void bbr_save_cwnd(struct sock *sk)
 319{
 320	struct tcp_sock *tp = tcp_sk(sk);
 321	struct bbr *bbr = inet_csk_ca(sk);
 322
 323	if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
 324		bbr->prior_cwnd = tp->snd_cwnd;  /* this cwnd is good enough */
 325	else  /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
 326		bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
 327}
 328
 329static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
 330{
 331	struct tcp_sock *tp = tcp_sk(sk);
 332	struct bbr *bbr = inet_csk_ca(sk);
 333
 334	if (event == CA_EVENT_TX_START && tp->app_limited) {
 335		bbr->idle_restart = 1;
 336		bbr->ack_epoch_mstamp = tp->tcp_mstamp;
 337		bbr->ack_epoch_acked = 0;
 338		/* Avoid pointless buffer overflows: pace at est. bw if we don't
 339		 * need more speed (we're restarting from idle and app-limited).
 340		 */
 341		if (bbr->mode == BBR_PROBE_BW)
 342			bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
 343		else if (bbr->mode == BBR_PROBE_RTT)
 344			bbr_check_probe_rtt_done(sk);
 345	}
 346}
 347
 348/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
 
 349 *
 350 * bdp = ceil(bw * min_rtt * gain)
 351 *
 352 * The key factor, gain, controls the amount of queue. While a small gain
 353 * builds a smaller queue, it becomes more vulnerable to noise in RTT
 354 * measurements (e.g., delayed ACKs or other ACK compression effects). This
 355 * noise may cause BBR to under-estimate the rate.
 
 
 
 
 
 
 
 
 
 
 356 */
 357static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
 358{
 359	struct bbr *bbr = inet_csk_ca(sk);
 360	u32 bdp;
 361	u64 w;
 362
 363	/* If we've never had a valid RTT sample, cap cwnd at the initial
 364	 * default. This should only happen when the connection is not using TCP
 365	 * timestamps and has retransmitted all of the SYN/SYNACK/data packets
 366	 * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
 367	 * case we need to slow-start up toward something safe: TCP_INIT_CWND.
 368	 */
 369	if (unlikely(bbr->min_rtt_us == ~0U))	 /* no valid RTT samples yet? */
 370		return TCP_INIT_CWND;  /* be safe: cap at default initial cwnd*/
 371
 372	w = (u64)bw * bbr->min_rtt_us;
 373
 374	/* Apply a gain to the given value, remove the BW_SCALE shift, and
 375	 * round the value up to avoid a negative feedback loop.
 376	 */
 377	bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
 378
 379	return bdp;
 380}
 381
 382/* To achieve full performance in high-speed paths, we budget enough cwnd to
 383 * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
 384 *   - one skb in sending host Qdisc,
 385 *   - one skb in sending host TSO/GSO engine
 386 *   - one skb being received by receiver host LRO/GRO/delayed-ACK engine
 387 * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
 388 * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
 389 * which allows 2 outstanding 2-packet sequences, to try to keep pipe
 390 * full even with ACK-every-other-packet delayed ACKs.
 391 */
 392static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
 393{
 394	struct bbr *bbr = inet_csk_ca(sk);
 395
 396	/* Allow enough full-sized skbs in flight to utilize end systems. */
 397	cwnd += 3 * bbr_tso_segs_goal(sk);
 398
 399	/* Reduce delayed ACKs by rounding up cwnd to the next even number. */
 400	cwnd = (cwnd + 1) & ~1U;
 401
 402	/* Ensure gain cycling gets inflight above BDP even for small BDPs. */
 403	if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == 0)
 404		cwnd += 2;
 405
 406	return cwnd;
 407}
 408
 409/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
 410static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
 411{
 412	u32 inflight;
 413
 414	inflight = bbr_bdp(sk, bw, gain);
 415	inflight = bbr_quantization_budget(sk, inflight);
 416
 417	return inflight;
 418}
 419
 420/* With pacing at lower layers, there's often less data "in the network" than
 421 * "in flight". With TSQ and departure time pacing at lower layers (e.g. fq),
 422 * we often have several skbs queued in the pacing layer with a pre-scheduled
 423 * earliest departure time (EDT). BBR adapts its pacing rate based on the
 424 * inflight level that it estimates has already been "baked in" by previous
 425 * departure time decisions. We calculate a rough estimate of the number of our
 426 * packets that might be in the network at the earliest departure time for the
 427 * next skb scheduled:
 428 *   in_network_at_edt = inflight_at_edt - (EDT - now) * bw
 429 * If we're increasing inflight, then we want to know if the transmit of the
 430 * EDT skb will push inflight above the target, so inflight_at_edt includes
 431 * bbr_tso_segs_goal() from the skb departing at EDT. If decreasing inflight,
 432 * then estimate if inflight will sink too low just before the EDT transmit.
 433 */
 434static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
 435{
 436	struct tcp_sock *tp = tcp_sk(sk);
 437	struct bbr *bbr = inet_csk_ca(sk);
 438	u64 now_ns, edt_ns, interval_us;
 439	u32 interval_delivered, inflight_at_edt;
 440
 441	now_ns = tp->tcp_clock_cache;
 442	edt_ns = max(tp->tcp_wstamp_ns, now_ns);
 443	interval_us = div_u64(edt_ns - now_ns, NSEC_PER_USEC);
 444	interval_delivered = (u64)bbr_bw(sk) * interval_us >> BW_SCALE;
 445	inflight_at_edt = inflight_now;
 446	if (bbr->pacing_gain > BBR_UNIT)              /* increasing inflight */
 447		inflight_at_edt += bbr_tso_segs_goal(sk);  /* include EDT skb */
 448	if (interval_delivered >= inflight_at_edt)
 449		return 0;
 450	return inflight_at_edt - interval_delivered;
 451}
 452
 453/* Find the cwnd increment based on estimate of ack aggregation */
 454static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
 455{
 456	u32 max_aggr_cwnd, aggr_cwnd = 0;
 457
 458	if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) {
 459		max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
 460				/ BW_UNIT;
 461		aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk))
 462			     >> BBR_SCALE;
 463		aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
 464	}
 465
 466	return aggr_cwnd;
 467}
 468
 469/* An optimization in BBR to reduce losses: On the first round of recovery, we
 470 * follow the packet conservation principle: send P packets per P packets acked.
 471 * After that, we slow-start and send at most 2*P packets per P packets acked.
 472 * After recovery finishes, or upon undo, we restore the cwnd we had when
 473 * recovery started (capped by the target cwnd based on estimated BDP).
 474 *
 475 * TODO(ycheng/ncardwell): implement a rate-based approach.
 476 */
 477static bool bbr_set_cwnd_to_recover_or_restore(
 478	struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd)
 479{
 480	struct tcp_sock *tp = tcp_sk(sk);
 481	struct bbr *bbr = inet_csk_ca(sk);
 482	u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
 483	u32 cwnd = tp->snd_cwnd;
 484
 485	/* An ACK for P pkts should release at most 2*P packets. We do this
 486	 * in two steps. First, here we deduct the number of lost packets.
 487	 * Then, in bbr_set_cwnd() we slow start up toward the target cwnd.
 488	 */
 489	if (rs->losses > 0)
 490		cwnd = max_t(s32, cwnd - rs->losses, 1);
 491
 492	if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) {
 493		/* Starting 1st round of Recovery, so do packet conservation. */
 494		bbr->packet_conservation = 1;
 495		bbr->next_rtt_delivered = tp->delivered;  /* start round now */
 496		/* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */
 497		cwnd = tcp_packets_in_flight(tp) + acked;
 498	} else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
 499		/* Exiting loss recovery; restore cwnd saved before recovery. */
 500		cwnd = max(cwnd, bbr->prior_cwnd);
 501		bbr->packet_conservation = 0;
 502	}
 503	bbr->prev_ca_state = state;
 504
 
 
 
 
 
 
 505	if (bbr->packet_conservation) {
 506		*new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
 507		return true;	/* yes, using packet conservation */
 508	}
 509	*new_cwnd = cwnd;
 510	return false;
 511}
 512
 513/* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
 514 * has drawn us down below target), or snap down to target if we're above it.
 515 */
 516static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
 517			 u32 acked, u32 bw, int gain)
 518{
 519	struct tcp_sock *tp = tcp_sk(sk);
 520	struct bbr *bbr = inet_csk_ca(sk);
 521	u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
 522
 523	if (!acked)
 524		goto done;  /* no packet fully ACKed; just apply caps */
 525
 526	if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
 527		goto done;
 528
 529	target_cwnd = bbr_bdp(sk, bw, gain);
 530
 531	/* Increment the cwnd to account for excess ACKed data that seems
 532	 * due to aggregation (of data and/or ACKs) visible in the ACK stream.
 533	 */
 534	target_cwnd += bbr_ack_aggregation_cwnd(sk);
 535	target_cwnd = bbr_quantization_budget(sk, target_cwnd);
 536
 537	/* If we're below target cwnd, slow start cwnd toward target cwnd. */
 
 538	if (bbr_full_bw_reached(sk))  /* only cut cwnd if we filled the pipe */
 539		cwnd = min(cwnd + acked, target_cwnd);
 540	else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
 541		cwnd = cwnd + acked;
 542	cwnd = max(cwnd, bbr_cwnd_min_target);
 543
 544done:
 545	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);	/* apply global cap */
 546	if (bbr->mode == BBR_PROBE_RTT)  /* drain queue, refresh min_rtt */
 547		tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target);
 548}
 549
 550/* End cycle phase if it's time and/or we hit the phase's in-flight target. */
 551static bool bbr_is_next_cycle_phase(struct sock *sk,
 552				    const struct rate_sample *rs)
 553{
 554	struct tcp_sock *tp = tcp_sk(sk);
 555	struct bbr *bbr = inet_csk_ca(sk);
 556	bool is_full_length =
 557		tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
 558		bbr->min_rtt_us;
 559	u32 inflight, bw;
 560
 561	/* The pacing_gain of 1.0 paces at the estimated bw to try to fully
 562	 * use the pipe without increasing the queue.
 563	 */
 564	if (bbr->pacing_gain == BBR_UNIT)
 565		return is_full_length;		/* just use wall clock time */
 566
 567	inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight);
 568	bw = bbr_max_bw(sk);
 569
 570	/* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at
 571	 * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is
 572	 * small (e.g. on a LAN). We do not persist if packets are lost, since
 573	 * a path with small buffers may not hold that much.
 574	 */
 575	if (bbr->pacing_gain > BBR_UNIT)
 576		return is_full_length &&
 577			(rs->losses ||  /* perhaps pacing_gain*BDP won't fit */
 578			 inflight >= bbr_inflight(sk, bw, bbr->pacing_gain));
 579
 580	/* A pacing_gain < 1.0 tries to drain extra queue we added if bw
 581	 * probing didn't find more bw. If inflight falls to match BDP then we
 582	 * estimate queue is drained; persisting would underutilize the pipe.
 583	 */
 584	return is_full_length ||
 585		inflight <= bbr_inflight(sk, bw, BBR_UNIT);
 586}
 587
 588static void bbr_advance_cycle_phase(struct sock *sk)
 589{
 590	struct tcp_sock *tp = tcp_sk(sk);
 591	struct bbr *bbr = inet_csk_ca(sk);
 592
 593	bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
 594	bbr->cycle_mstamp = tp->delivered_mstamp;
 
 
 595}
 596
 597/* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
 598static void bbr_update_cycle_phase(struct sock *sk,
 599				   const struct rate_sample *rs)
 600{
 601	struct bbr *bbr = inet_csk_ca(sk);
 602
 603	if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs))
 604		bbr_advance_cycle_phase(sk);
 605}
 606
 607static void bbr_reset_startup_mode(struct sock *sk)
 608{
 609	struct bbr *bbr = inet_csk_ca(sk);
 610
 611	bbr->mode = BBR_STARTUP;
 
 
 612}
 613
 614static void bbr_reset_probe_bw_mode(struct sock *sk)
 615{
 616	struct bbr *bbr = inet_csk_ca(sk);
 617
 618	bbr->mode = BBR_PROBE_BW;
 
 
 619	bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand);
 620	bbr_advance_cycle_phase(sk);	/* flip to next phase of gain cycle */
 621}
 622
 623static void bbr_reset_mode(struct sock *sk)
 624{
 625	if (!bbr_full_bw_reached(sk))
 626		bbr_reset_startup_mode(sk);
 627	else
 628		bbr_reset_probe_bw_mode(sk);
 629}
 630
 631/* Start a new long-term sampling interval. */
 632static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
 633{
 634	struct tcp_sock *tp = tcp_sk(sk);
 635	struct bbr *bbr = inet_csk_ca(sk);
 636
 637	bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
 638	bbr->lt_last_delivered = tp->delivered;
 639	bbr->lt_last_lost = tp->lost;
 640	bbr->lt_rtt_cnt = 0;
 641}
 642
 643/* Completely reset long-term bandwidth sampling. */
 644static void bbr_reset_lt_bw_sampling(struct sock *sk)
 645{
 646	struct bbr *bbr = inet_csk_ca(sk);
 647
 648	bbr->lt_bw = 0;
 649	bbr->lt_use_bw = 0;
 650	bbr->lt_is_sampling = false;
 651	bbr_reset_lt_bw_sampling_interval(sk);
 652}
 653
 654/* Long-term bw sampling interval is done. Estimate whether we're policed. */
 655static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw)
 656{
 657	struct bbr *bbr = inet_csk_ca(sk);
 658	u32 diff;
 659
 660	if (bbr->lt_bw) {  /* do we have bw from a previous interval? */
 661		/* Is new bw close to the lt_bw from the previous interval? */
 662		diff = abs(bw - bbr->lt_bw);
 663		if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) ||
 664		    (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <=
 665		     bbr_lt_bw_diff)) {
 666			/* All criteria are met; estimate we're policed. */
 667			bbr->lt_bw = (bw + bbr->lt_bw) >> 1;  /* avg 2 intvls */
 668			bbr->lt_use_bw = 1;
 669			bbr->pacing_gain = BBR_UNIT;  /* try to avoid drops */
 670			bbr->lt_rtt_cnt = 0;
 671			return;
 672		}
 673	}
 674	bbr->lt_bw = bw;
 675	bbr_reset_lt_bw_sampling_interval(sk);
 676}
 677
 678/* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of
 679 * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and
 680 * explicitly models their policed rate, to reduce unnecessary losses. We
 681 * estimate that we're policed if we see 2 consecutive sampling intervals with
 682 * consistent throughput and high packet loss. If we think we're being policed,
 683 * set lt_bw to the "long-term" average delivery rate from those 2 intervals.
 684 */
 685static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
 686{
 687	struct tcp_sock *tp = tcp_sk(sk);
 688	struct bbr *bbr = inet_csk_ca(sk);
 689	u32 lost, delivered;
 690	u64 bw;
 691	u32 t;
 692
 693	if (bbr->lt_use_bw) {	/* already using long-term rate, lt_bw? */
 694		if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
 695		    ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) {
 696			bbr_reset_lt_bw_sampling(sk);    /* stop using lt_bw */
 697			bbr_reset_probe_bw_mode(sk);  /* restart gain cycling */
 698		}
 699		return;
 700	}
 701
 702	/* Wait for the first loss before sampling, to let the policer exhaust
 703	 * its tokens and estimate the steady-state rate allowed by the policer.
 704	 * Starting samples earlier includes bursts that over-estimate the bw.
 705	 */
 706	if (!bbr->lt_is_sampling) {
 707		if (!rs->losses)
 708			return;
 709		bbr_reset_lt_bw_sampling_interval(sk);
 710		bbr->lt_is_sampling = true;
 711	}
 712
 713	/* To avoid underestimates, reset sampling if we run out of data. */
 714	if (rs->is_app_limited) {
 715		bbr_reset_lt_bw_sampling(sk);
 716		return;
 717	}
 718
 719	if (bbr->round_start)
 720		bbr->lt_rtt_cnt++;	/* count round trips in this interval */
 721	if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts)
 722		return;		/* sampling interval needs to be longer */
 723	if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) {
 724		bbr_reset_lt_bw_sampling(sk);  /* interval is too long */
 725		return;
 726	}
 727
 728	/* End sampling interval when a packet is lost, so we estimate the
 729	 * policer tokens were exhausted. Stopping the sampling before the
 730	 * tokens are exhausted under-estimates the policed rate.
 731	 */
 732	if (!rs->losses)
 733		return;
 734
 735	/* Calculate packets lost and delivered in sampling interval. */
 736	lost = tp->lost - bbr->lt_last_lost;
 737	delivered = tp->delivered - bbr->lt_last_delivered;
 738	/* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */
 739	if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered)
 740		return;
 741
 742	/* Find average delivery rate in this sampling interval. */
 743	t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
 744	if ((s32)t < 1)
 745		return;		/* interval is less than one ms, so wait */
 746	/* Check if can multiply without overflow */
 747	if (t >= ~0U / USEC_PER_MSEC) {
 748		bbr_reset_lt_bw_sampling(sk);  /* interval too long; reset */
 749		return;
 750	}
 751	t *= USEC_PER_MSEC;
 752	bw = (u64)delivered * BW_UNIT;
 753	do_div(bw, t);
 754	bbr_lt_bw_interval_done(sk, bw);
 755}
 756
 757/* Estimate the bandwidth based on how fast packets are delivered */
 758static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
 759{
 760	struct tcp_sock *tp = tcp_sk(sk);
 761	struct bbr *bbr = inet_csk_ca(sk);
 762	u64 bw;
 763
 764	bbr->round_start = 0;
 765	if (rs->delivered < 0 || rs->interval_us <= 0)
 766		return; /* Not a valid observation */
 767
 768	/* See if we've reached the next RTT */
 769	if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) {
 770		bbr->next_rtt_delivered = tp->delivered;
 771		bbr->rtt_cnt++;
 772		bbr->round_start = 1;
 773		bbr->packet_conservation = 0;
 774	}
 775
 776	bbr_lt_bw_sampling(sk, rs);
 777
 778	/* Divide delivered by the interval to find a (lower bound) bottleneck
 779	 * bandwidth sample. Delivered is in packets and interval_us in uS and
 780	 * ratio will be <<1 for most connections. So delivered is first scaled.
 781	 */
 782	bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
 
 783
 784	/* If this sample is application-limited, it is likely to have a very
 785	 * low delivered count that represents application behavior rather than
 786	 * the available network rate. Such a sample could drag down estimated
 787	 * bw, causing needless slow-down. Thus, to continue to send at the
 788	 * last measured network rate, we filter out app-limited samples unless
 789	 * they describe the path bw at least as well as our bw model.
 790	 *
 791	 * So the goal during app-limited phase is to proceed with the best
 792	 * network rate no matter how long. We automatically leave this
 793	 * phase when app writes faster than the network can deliver :)
 794	 */
 795	if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) {
 796		/* Incorporate new sample into our max bw filter. */
 797		minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw);
 798	}
 799}
 800
 801/* Estimates the windowed max degree of ack aggregation.
 802 * This is used to provision extra in-flight data to keep sending during
 803 * inter-ACK silences.
 804 *
 805 * Degree of ack aggregation is estimated as extra data acked beyond expected.
 806 *
 807 * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
 808 * cwnd += max_extra_acked
 809 *
 810 * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
 811 * Max filter is an approximate sliding window of 5-10 (packet timed) round
 812 * trips.
 813 */
 814static void bbr_update_ack_aggregation(struct sock *sk,
 815				       const struct rate_sample *rs)
 816{
 817	u32 epoch_us, expected_acked, extra_acked;
 818	struct bbr *bbr = inet_csk_ca(sk);
 819	struct tcp_sock *tp = tcp_sk(sk);
 820
 821	if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 ||
 822	    rs->delivered < 0 || rs->interval_us <= 0)
 823		return;
 824
 825	if (bbr->round_start) {
 826		bbr->extra_acked_win_rtts = min(0x1F,
 827						bbr->extra_acked_win_rtts + 1);
 828		if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) {
 829			bbr->extra_acked_win_rtts = 0;
 830			bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
 831						   0 : 1;
 832			bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
 833		}
 834	}
 835
 836	/* Compute how many packets we expected to be delivered over epoch. */
 837	epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
 838				      bbr->ack_epoch_mstamp);
 839	expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
 840
 841	/* Reset the aggregation epoch if ACK rate is below expected rate or
 842	 * significantly large no. of ack received since epoch (potentially
 843	 * quite old epoch).
 844	 */
 845	if (bbr->ack_epoch_acked <= expected_acked ||
 846	    (bbr->ack_epoch_acked + rs->acked_sacked >=
 847	     bbr_ack_epoch_acked_reset_thresh)) {
 848		bbr->ack_epoch_acked = 0;
 849		bbr->ack_epoch_mstamp = tp->delivered_mstamp;
 850		expected_acked = 0;
 851	}
 852
 853	/* Compute excess data delivered, beyond what was expected. */
 854	bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
 855				     bbr->ack_epoch_acked + rs->acked_sacked);
 856	extra_acked = bbr->ack_epoch_acked - expected_acked;
 857	extra_acked = min(extra_acked, tp->snd_cwnd);
 858	if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
 859		bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
 860}
 861
 862/* Estimate when the pipe is full, using the change in delivery rate: BBR
 863 * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
 864 * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
 865 * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
 866 * higher rwin, 3: we get higher delivery rate samples. Or transient
 867 * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
 868 * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
 869 */
 870static void bbr_check_full_bw_reached(struct sock *sk,
 871				      const struct rate_sample *rs)
 872{
 873	struct bbr *bbr = inet_csk_ca(sk);
 874	u32 bw_thresh;
 875
 876	if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
 877		return;
 878
 879	bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE;
 880	if (bbr_max_bw(sk) >= bw_thresh) {
 881		bbr->full_bw = bbr_max_bw(sk);
 882		bbr->full_bw_cnt = 0;
 883		return;
 884	}
 885	++bbr->full_bw_cnt;
 886	bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
 887}
 888
 889/* If pipe is probably full, drain the queue and then enter steady-state. */
 890static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
 891{
 892	struct bbr *bbr = inet_csk_ca(sk);
 893
 894	if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
 895		bbr->mode = BBR_DRAIN;	/* drain queue we created */
 
 
 896		tcp_sk(sk)->snd_ssthresh =
 897				bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
 898	}	/* fall through to check if in-flight is already small: */
 899	if (bbr->mode == BBR_DRAIN &&
 900	    bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
 901	    bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
 902		bbr_reset_probe_bw_mode(sk);  /* we estimate queue is drained */
 903}
 904
 905static void bbr_check_probe_rtt_done(struct sock *sk)
 906{
 907	struct tcp_sock *tp = tcp_sk(sk);
 908	struct bbr *bbr = inet_csk_ca(sk);
 909
 910	if (!(bbr->probe_rtt_done_stamp &&
 911	      after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
 912		return;
 913
 914	bbr->min_rtt_stamp = tcp_jiffies32;  /* wait a while until PROBE_RTT */
 915	tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
 916	bbr_reset_mode(sk);
 917}
 918
 919/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
 920 * periodically drain the bottleneck queue, to converge to measure the true
 921 * min_rtt (unloaded propagation delay). This allows the flows to keep queues
 922 * small (reducing queuing delay and packet loss) and achieve fairness among
 923 * BBR flows.
 924 *
 925 * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
 926 * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
 927 * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
 928 * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
 929 * re-enter the previous mode. BBR uses 200ms to approximately bound the
 930 * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
 931 *
 932 * Note that flows need only pay 2% if they are busy sending over the last 10
 933 * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
 934 * natural silences or low-rate periods within 10 seconds where the rate is low
 935 * enough for long enough to drain its queue in the bottleneck. We pick up
 936 * these min RTT measurements opportunistically with our min_rtt filter. :-)
 937 */
 938static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
 939{
 940	struct tcp_sock *tp = tcp_sk(sk);
 941	struct bbr *bbr = inet_csk_ca(sk);
 942	bool filter_expired;
 943
 944	/* Track min RTT seen in the min_rtt_win_sec filter window: */
 945	filter_expired = after(tcp_jiffies32,
 946			       bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
 947	if (rs->rtt_us >= 0 &&
 948	    (rs->rtt_us <= bbr->min_rtt_us ||
 949	     (filter_expired && !rs->is_ack_delayed))) {
 950		bbr->min_rtt_us = rs->rtt_us;
 951		bbr->min_rtt_stamp = tcp_jiffies32;
 952	}
 953
 954	if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
 955	    !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
 956		bbr->mode = BBR_PROBE_RTT;  /* dip, drain queue */
 
 
 957		bbr_save_cwnd(sk);  /* note cwnd so we can restore it */
 958		bbr->probe_rtt_done_stamp = 0;
 959	}
 960
 961	if (bbr->mode == BBR_PROBE_RTT) {
 962		/* Ignore low rate samples during this mode. */
 963		tp->app_limited =
 964			(tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
 965		/* Maintain min packets in flight for max(200 ms, 1 round). */
 966		if (!bbr->probe_rtt_done_stamp &&
 967		    tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
 968			bbr->probe_rtt_done_stamp = tcp_jiffies32 +
 969				msecs_to_jiffies(bbr_probe_rtt_mode_ms);
 970			bbr->probe_rtt_round_done = 0;
 971			bbr->next_rtt_delivered = tp->delivered;
 972		} else if (bbr->probe_rtt_done_stamp) {
 973			if (bbr->round_start)
 974				bbr->probe_rtt_round_done = 1;
 975			if (bbr->probe_rtt_round_done)
 976				bbr_check_probe_rtt_done(sk);
 
 
 
 
 977		}
 978	}
 979	/* Restart after idle ends only once we process a new S/ACK for data */
 980	if (rs->delivered > 0)
 981		bbr->idle_restart = 0;
 982}
 983
 984static void bbr_update_gains(struct sock *sk)
 985{
 986	struct bbr *bbr = inet_csk_ca(sk);
 987
 988	switch (bbr->mode) {
 989	case BBR_STARTUP:
 990		bbr->pacing_gain = bbr_high_gain;
 991		bbr->cwnd_gain	 = bbr_high_gain;
 992		break;
 993	case BBR_DRAIN:
 994		bbr->pacing_gain = bbr_drain_gain;	/* slow, to drain */
 995		bbr->cwnd_gain	 = bbr_high_gain;	/* keep cwnd */
 996		break;
 997	case BBR_PROBE_BW:
 998		bbr->pacing_gain = (bbr->lt_use_bw ?
 999				    BBR_UNIT :
1000				    bbr_pacing_gain[bbr->cycle_idx]);
1001		bbr->cwnd_gain	 = bbr_cwnd_gain;
1002		break;
1003	case BBR_PROBE_RTT:
1004		bbr->pacing_gain = BBR_UNIT;
1005		bbr->cwnd_gain	 = BBR_UNIT;
1006		break;
1007	default:
1008		WARN_ONCE(1, "BBR bad mode: %u\n", bbr->mode);
1009		break;
1010	}
1011}
1012
1013static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
1014{
1015	bbr_update_bw(sk, rs);
1016	bbr_update_ack_aggregation(sk, rs);
1017	bbr_update_cycle_phase(sk, rs);
1018	bbr_check_full_bw_reached(sk, rs);
1019	bbr_check_drain(sk, rs);
1020	bbr_update_min_rtt(sk, rs);
1021	bbr_update_gains(sk);
1022}
1023
1024static void bbr_main(struct sock *sk, const struct rate_sample *rs)
1025{
1026	struct bbr *bbr = inet_csk_ca(sk);
1027	u32 bw;
1028
1029	bbr_update_model(sk, rs);
1030
1031	bw = bbr_bw(sk);
1032	bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
1033	bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
1034}
1035
1036static void bbr_init(struct sock *sk)
1037{
1038	struct tcp_sock *tp = tcp_sk(sk);
1039	struct bbr *bbr = inet_csk_ca(sk);
1040
1041	bbr->prior_cwnd = 0;
1042	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1043	bbr->rtt_cnt = 0;
1044	bbr->next_rtt_delivered = 0;
1045	bbr->prev_ca_state = TCP_CA_Open;
1046	bbr->packet_conservation = 0;
1047
1048	bbr->probe_rtt_done_stamp = 0;
1049	bbr->probe_rtt_round_done = 0;
1050	bbr->min_rtt_us = tcp_min_rtt(tp);
1051	bbr->min_rtt_stamp = tcp_jiffies32;
1052
1053	minmax_reset(&bbr->bw, bbr->rtt_cnt, 0);  /* init max bw to 0 */
1054
1055	bbr->has_seen_rtt = 0;
1056	bbr_init_pacing_rate_from_rtt(sk);
1057
 
1058	bbr->round_start = 0;
1059	bbr->idle_restart = 0;
1060	bbr->full_bw_reached = 0;
1061	bbr->full_bw = 0;
1062	bbr->full_bw_cnt = 0;
1063	bbr->cycle_mstamp = 0;
1064	bbr->cycle_idx = 0;
1065	bbr_reset_lt_bw_sampling(sk);
1066	bbr_reset_startup_mode(sk);
1067
1068	bbr->ack_epoch_mstamp = tp->tcp_mstamp;
1069	bbr->ack_epoch_acked = 0;
1070	bbr->extra_acked_win_rtts = 0;
1071	bbr->extra_acked_win_idx = 0;
1072	bbr->extra_acked[0] = 0;
1073	bbr->extra_acked[1] = 0;
1074
1075	cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
1076}
1077
1078static u32 bbr_sndbuf_expand(struct sock *sk)
1079{
1080	/* Provision 3 * cwnd since BBR may slow-start even during recovery. */
1081	return 3;
1082}
1083
1084/* In theory BBR does not need to undo the cwnd since it does not
1085 * always reduce cwnd on losses (see bbr_main()). Keep it for now.
1086 */
1087static u32 bbr_undo_cwnd(struct sock *sk)
1088{
1089	struct bbr *bbr = inet_csk_ca(sk);
1090
1091	bbr->full_bw = 0;   /* spurious slow-down; reset full pipe detection */
1092	bbr->full_bw_cnt = 0;
1093	bbr_reset_lt_bw_sampling(sk);
1094	return tcp_sk(sk)->snd_cwnd;
1095}
1096
1097/* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
1098static u32 bbr_ssthresh(struct sock *sk)
1099{
1100	bbr_save_cwnd(sk);
1101	return tcp_sk(sk)->snd_ssthresh;
1102}
1103
1104static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
1105			   union tcp_cc_info *info)
1106{
1107	if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
1108	    ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
1109		struct tcp_sock *tp = tcp_sk(sk);
1110		struct bbr *bbr = inet_csk_ca(sk);
1111		u64 bw = bbr_bw(sk);
1112
1113		bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE;
1114		memset(&info->bbr, 0, sizeof(info->bbr));
1115		info->bbr.bbr_bw_lo		= (u32)bw;
1116		info->bbr.bbr_bw_hi		= (u32)(bw >> 32);
1117		info->bbr.bbr_min_rtt		= bbr->min_rtt_us;
1118		info->bbr.bbr_pacing_gain	= bbr->pacing_gain;
1119		info->bbr.bbr_cwnd_gain		= bbr->cwnd_gain;
1120		*attr = INET_DIAG_BBRINFO;
1121		return sizeof(info->bbr);
1122	}
1123	return 0;
1124}
1125
1126static void bbr_set_state(struct sock *sk, u8 new_state)
1127{
1128	struct bbr *bbr = inet_csk_ca(sk);
1129
1130	if (new_state == TCP_CA_Loss) {
1131		struct rate_sample rs = { .losses = 1 };
1132
1133		bbr->prev_ca_state = TCP_CA_Loss;
1134		bbr->full_bw = 0;
1135		bbr->round_start = 1;	/* treat RTO like end of a round */
1136		bbr_lt_bw_sampling(sk, &rs);
1137	}
1138}
1139
1140static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
1141	.flags		= TCP_CONG_NON_RESTRICTED,
1142	.name		= "bbr",
1143	.owner		= THIS_MODULE,
1144	.init		= bbr_init,
1145	.cong_control	= bbr_main,
1146	.sndbuf_expand	= bbr_sndbuf_expand,
1147	.undo_cwnd	= bbr_undo_cwnd,
1148	.cwnd_event	= bbr_cwnd_event,
1149	.ssthresh	= bbr_ssthresh,
1150	.min_tso_segs	= bbr_min_tso_segs,
1151	.get_info	= bbr_get_info,
1152	.set_state	= bbr_set_state,
1153};
1154
1155static int __init bbr_register(void)
1156{
1157	BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
1158	return tcp_register_congestion_control(&tcp_bbr_cong_ops);
1159}
1160
1161static void __exit bbr_unregister(void)
1162{
1163	tcp_unregister_congestion_control(&tcp_bbr_cong_ops);
1164}
1165
1166module_init(bbr_register);
1167module_exit(bbr_unregister);
1168
1169MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
1170MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
1171MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
1172MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
1173MODULE_LICENSE("Dual BSD/GPL");
1174MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");