1 /* Bottleneck Bandwidth and RTT (BBR) congestion control
2 *
3 * BBR congestion control computes the sending rate based on the delivery
4 * rate (throughput) estimated from ACKs. In a nutshell:
5 *
6 * On each ACK, update our model of the network path:
7 * bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips)
8 * min_rtt = windowed_min(rtt, 10 seconds)
9 * pacing_rate = pacing_gain * bottleneck_bandwidth
10 * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4)
11 *
12 * The core algorithm does not react directly to packet losses or delays,
13 * although BBR may adjust the size of next send per ACK when loss is
14 * observed, or adjust the sending rate if it estimates there is a
15 * traffic policer, in order to keep the drop rate reasonable.
16 *
17 * Here is a state transition diagram for BBR:
18 *
19 * |
20 * V
21 * +---> STARTUP ----+
22 * | | |
23 * | V |
24 * | DRAIN ----+
25 * | | |
26 * | V |
27 * +---> PROBE_BW ----+
28 * | ^ | |
29 * | | | |
30 * | +----+ |
31 * | |
32 * +---- PROBE_RTT <--+
33 *
34 * A BBR flow starts in STARTUP, and ramps up its sending rate quickly.
35 * When it estimates the pipe is full, it enters DRAIN to drain the queue.
36 * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT.
37 * A long-lived BBR flow spends the vast majority of its time remaining
38 * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth
39 * in a fair manner, with a small, bounded queue. *If* a flow has been
40 * continuously sending for the entire min_rtt window, and hasn't seen an RTT
41 * sample that matches or decreases its min_rtt estimate for 10 seconds, then
42 * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
43 * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if
44 * we estimated that we reached the full bw of the pipe then we enter PROBE_BW;
45 * otherwise we enter STARTUP to try to fill the pipe.
46 *
47 * BBR is described in detail in:
48 * "BBR: Congestion-Based Congestion Control",
49 * Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
50 * Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
51 *
52 * There is a public e-mail list for discussing BBR development and testing:
53 * https://groups.google.com/forum/#!forum/bbr-dev
54 *
55 * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
56 * otherwise TCP stack falls back to an internal pacing using one high
57 * resolution timer per TCP socket and may use more resources.
58 */
59 #include <linux/btf.h>
60 #include <linux/btf_ids.h>
61 #include <linux/module.h>
62 #include <net/tcp.h>
63 #include <linux/inet_diag.h>
64 #include <linux/inet.h>
65 #include <linux/random.h>
66 #include <linux/win_minmax.h>
67
68 /* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
69 * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
70 * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
71 * Since the minimum window is >=4 packets, the lower bound isn't
72 * an issue. The upper bound isn't an issue with existing technologies.
73 */
74 #define BW_SCALE 24
75 #define BW_UNIT (1 << BW_SCALE)
76
77 #define BBR_SCALE 8 /* scaling factor for fractions in BBR (e.g. gains) */
78 #define BBR_UNIT (1 << BBR_SCALE)
79
80 /* BBR has the following modes for deciding how fast to send: */
81 enum bbr_mode {
82 BBR_STARTUP, /* ramp up sending rate rapidly to fill pipe */
83 BBR_DRAIN, /* drain any queue created during startup */
84 BBR_PROBE_BW, /* discover, share bw: pace around estimated bw */
85 BBR_PROBE_RTT, /* cut inflight to min to probe min_rtt */
86 };
87
88 /* BBR congestion control block */
89 struct bbr {
90 u32 min_rtt_us; /* min RTT in min_rtt_win_sec window */
91 u32 min_rtt_stamp; /* timestamp of min_rtt_us */
92 u32 probe_rtt_done_stamp; /* end time for BBR_PROBE_RTT mode */
93 struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */
94 u32 rtt_cnt; /* count of packet-timed rounds elapsed */
95 u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
96 u64 cycle_mstamp; /* time of this cycle phase start */
97 u32 mode:3, /* current bbr_mode in state machine */
98 prev_ca_state:3, /* CA state on previous ACK */
99 packet_conservation:1, /* use packet conservation? */
100 round_start:1, /* start of packet-timed tx->ack round? */
101 idle_restart:1, /* restarting after idle? */
102 probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */
103 unused:13,
104 lt_is_sampling:1, /* taking long-term ("LT") samples now? */
105 lt_rtt_cnt:7, /* round trips in long-term interval */
106 lt_use_bw:1; /* use lt_bw as our bw estimate? */
107 u32 lt_bw; /* LT est delivery rate in pkts/uS << 24 */
108 u32 lt_last_delivered; /* LT intvl start: tp->delivered */
109 u32 lt_last_stamp; /* LT intvl start: tp->delivered_mstamp */
110 u32 lt_last_lost; /* LT intvl start: tp->lost */
111 u32 pacing_gain:10, /* current gain for setting pacing rate */
112 cwnd_gain:10, /* current gain for setting cwnd */
113 full_bw_reached:1, /* reached full bw in Startup? */
114 full_bw_cnt:2, /* number of rounds without large bw gains */
115 cycle_idx:3, /* current index in pacing_gain cycle array */
116 has_seen_rtt:1, /* have we seen an RTT sample yet? */
117 unused_b:5;
118 u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
119 u32 full_bw; /* recent bw, to estimate if pipe is full */
120
121 /* For tracking ACK aggregation: */
122 u64 ack_epoch_mstamp; /* start of ACK sampling epoch */
123 u16 extra_acked[2]; /* max excess data ACKed in epoch */
124 u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */
125 extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
126 extra_acked_win_idx:1, /* current index in extra_acked array */
127 unused_c:6;
128 };
129
130 #define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
131
132 /* Window length of bw filter (in rounds): */
133 static const int bbr_bw_rtts = CYCLE_LEN + 2;
134 /* Window length of min_rtt filter (in sec): */
135 static const u32 bbr_min_rtt_win_sec = 10;
136 /* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */
137 static const u32 bbr_probe_rtt_mode_ms = 200;
138 /* Skip TSO below the following bandwidth (bits/sec): */
139 static const int bbr_min_tso_rate = 1200000;
140
141 /* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck.
142 * In order to help drive the network toward lower queues and low latency while
143 * maintaining high utilization, the average pacing rate aims to be slightly
144 * lower than the estimated bandwidth. This is an important aspect of the
145 * design.
146 */
147 static const int bbr_pacing_margin_percent = 1;
148
149 /* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
150 * that will allow a smoothly increasing pacing rate that will double each RTT
151 * and send the same number of packets per RTT that an un-paced, slow-starting
152 * Reno or CUBIC flow would:
153 */
154 static const int bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1;
155 /* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
156 * the queue created in BBR_STARTUP in a single round:
157 */
158 static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
159 /* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */
160 static const int bbr_cwnd_gain = BBR_UNIT * 2;
161 /* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */
162 static const int bbr_pacing_gain[] = {
163 BBR_UNIT * 5 / 4, /* probe for more available bw */
164 BBR_UNIT * 3 / 4, /* drain queue and/or yield bw to other flows */
165 BBR_UNIT, BBR_UNIT, BBR_UNIT, /* cruise at 1.0*bw to utilize pipe, */
166 BBR_UNIT, BBR_UNIT, BBR_UNIT /* without creating excess queue... */
167 };
168 /* Randomize the starting gain cycling phase over N phases: */
169 static const u32 bbr_cycle_rand = 7;
170
171 /* Try to keep at least this many packets in flight, if things go smoothly. For
172 * smooth functioning, a sliding window protocol ACKing every other packet
173 * needs at least 4 packets in flight:
174 */
175 static const u32 bbr_cwnd_min_target = 4;
176
177 /* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
178 /* If bw has increased significantly (1.25x), there may be more bw available: */
179 static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
180 /* But after 3 rounds w/o significant bw growth, estimate pipe is full: */
181 static const u32 bbr_full_bw_cnt = 3;
182
183 /* "long-term" ("LT") bandwidth estimator parameters... */
184 /* The minimum number of rounds in an LT bw sampling interval: */
185 static const u32 bbr_lt_intvl_min_rtts = 4;
186 /* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */
187 static const u32 bbr_lt_loss_thresh = 50;
188 /* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */
189 static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8;
190 /* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */
191 static const u32 bbr_lt_bw_diff = 4000 / 8;
192 /* If we estimate we're policed, use lt_bw for this many round trips: */
193 static const u32 bbr_lt_bw_max_rtts = 48;
194
195 /* Gain factor for adding extra_acked to target cwnd: */
196 static const int bbr_extra_acked_gain = BBR_UNIT;
197 /* Window length of extra_acked window. */
198 static const u32 bbr_extra_acked_win_rtts = 5;
199 /* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
200 static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
201 /* Time period for clamping cwnd increment due to ack aggregation */
202 static const u32 bbr_extra_acked_max_us = 100 * 1000;
203
204 static void bbr_check_probe_rtt_done(struct sock *sk);
205
206 /* Do we estimate that STARTUP filled the pipe? */
bbr_full_bw_reached(const struct sock * sk)207 static bool bbr_full_bw_reached(const struct sock *sk)
208 {
209 const struct bbr *bbr = inet_csk_ca(sk);
210
211 return bbr->full_bw_reached;
212 }
213
214 /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
bbr_max_bw(const struct sock * sk)215 static u32 bbr_max_bw(const struct sock *sk)
216 {
217 struct bbr *bbr = inet_csk_ca(sk);
218
219 return minmax_get(&bbr->bw);
220 }
221
222 /* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
bbr_bw(const struct sock * sk)223 static u32 bbr_bw(const struct sock *sk)
224 {
225 struct bbr *bbr = inet_csk_ca(sk);
226
227 return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
228 }
229
230 /* Return maximum extra acked in past k-2k round trips,
231 * where k = bbr_extra_acked_win_rtts.
232 */
bbr_extra_acked(const struct sock * sk)233 static u16 bbr_extra_acked(const struct sock *sk)
234 {
235 struct bbr *bbr = inet_csk_ca(sk);
236
237 return max(bbr->extra_acked[0], bbr->extra_acked[1]);
238 }
239
240 /* Return rate in bytes per second, optionally with a gain.
241 * The order here is chosen carefully to avoid overflow of u64. This should
242 * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
243 */
bbr_rate_bytes_per_sec(struct sock * sk,u64 rate,int gain)244 static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
245 {
246 unsigned int mss = tcp_sk(sk)->mss_cache;
247
248 rate *= mss;
249 rate *= gain;
250 rate >>= BBR_SCALE;
251 rate *= USEC_PER_SEC / 100 * (100 - bbr_pacing_margin_percent);
252 return rate >> BW_SCALE;
253 }
254
255 /* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
bbr_bw_to_pacing_rate(struct sock * sk,u32 bw,int gain)256 static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
257 {
258 u64 rate = bw;
259
260 rate = bbr_rate_bytes_per_sec(sk, rate, gain);
261 rate = min_t(u64, rate, sk->sk_max_pacing_rate);
262 return rate;
263 }
264
265 /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
bbr_init_pacing_rate_from_rtt(struct sock * sk)266 static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
267 {
268 struct tcp_sock *tp = tcp_sk(sk);
269 struct bbr *bbr = inet_csk_ca(sk);
270 u64 bw;
271 u32 rtt_us;
272
273 if (tp->srtt_us) { /* any RTT sample yet? */
274 rtt_us = max(tp->srtt_us >> 3, 1U);
275 bbr->has_seen_rtt = 1;
276 } else { /* no RTT sample yet */
277 rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
278 }
279 bw = (u64)tcp_snd_cwnd(tp) * BW_UNIT;
280 do_div(bw, rtt_us);
281 sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
282 }
283
284 /* Pace using current bw estimate and a gain factor. */
bbr_set_pacing_rate(struct sock * sk,u32 bw,int gain)285 static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
286 {
287 struct tcp_sock *tp = tcp_sk(sk);
288 struct bbr *bbr = inet_csk_ca(sk);
289 unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain);
290
291 if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
292 bbr_init_pacing_rate_from_rtt(sk);
293 if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
294 sk->sk_pacing_rate = rate;
295 }
296
297 /* override sysctl_tcp_min_tso_segs */
bbr_min_tso_segs(struct sock * sk)298 __bpf_kfunc static u32 bbr_min_tso_segs(struct sock *sk)
299 {
300 return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
301 }
302
bbr_tso_segs_goal(struct sock * sk)303 static u32 bbr_tso_segs_goal(struct sock *sk)
304 {
305 struct tcp_sock *tp = tcp_sk(sk);
306 u32 segs, bytes;
307
308 /* Sort of tcp_tso_autosize() but ignoring
309 * driver provided sk_gso_max_size.
310 */
311 bytes = min_t(unsigned long,
312 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
313 GSO_LEGACY_MAX_SIZE - 1 - MAX_TCP_HEADER);
314 segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
315
316 return min(segs, 0x7FU);
317 }
318
319 /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
bbr_save_cwnd(struct sock * sk)320 static void bbr_save_cwnd(struct sock *sk)
321 {
322 struct tcp_sock *tp = tcp_sk(sk);
323 struct bbr *bbr = inet_csk_ca(sk);
324
325 if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
326 bbr->prior_cwnd = tcp_snd_cwnd(tp); /* this cwnd is good enough */
327 else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
328 bbr->prior_cwnd = max(bbr->prior_cwnd, tcp_snd_cwnd(tp));
329 }
330
bbr_cwnd_event(struct sock * sk,enum tcp_ca_event event)331 __bpf_kfunc static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
332 {
333 struct tcp_sock *tp = tcp_sk(sk);
334 struct bbr *bbr = inet_csk_ca(sk);
335
336 if (event == CA_EVENT_TX_START && tp->app_limited) {
337 bbr->idle_restart = 1;
338 bbr->ack_epoch_mstamp = tp->tcp_mstamp;
339 bbr->ack_epoch_acked = 0;
340 /* Avoid pointless buffer overflows: pace at est. bw if we don't
341 * need more speed (we're restarting from idle and app-limited).
342 */
343 if (bbr->mode == BBR_PROBE_BW)
344 bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
345 else if (bbr->mode == BBR_PROBE_RTT)
346 bbr_check_probe_rtt_done(sk);
347 }
348 }
349
350 /* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
351 *
352 * bdp = ceil(bw * min_rtt * gain)
353 *
354 * The key factor, gain, controls the amount of queue. While a small gain
355 * builds a smaller queue, it becomes more vulnerable to noise in RTT
356 * measurements (e.g., delayed ACKs or other ACK compression effects). This
357 * noise may cause BBR to under-estimate the rate.
358 */
bbr_bdp(struct sock * sk,u32 bw,int gain)359 static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
360 {
361 struct bbr *bbr = inet_csk_ca(sk);
362 u32 bdp;
363 u64 w;
364
365 /* If we've never had a valid RTT sample, cap cwnd at the initial
366 * default. This should only happen when the connection is not using TCP
367 * timestamps and has retransmitted all of the SYN/SYNACK/data packets
368 * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
369 * case we need to slow-start up toward something safe: TCP_INIT_CWND.
370 */
371 if (unlikely(bbr->min_rtt_us == ~0U)) /* no valid RTT samples yet? */
372 return TCP_INIT_CWND; /* be safe: cap at default initial cwnd*/
373
374 w = (u64)bw * bbr->min_rtt_us;
375
376 /* Apply a gain to the given value, remove the BW_SCALE shift, and
377 * round the value up to avoid a negative feedback loop.
378 */
379 bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
380
381 return bdp;
382 }
383
384 /* To achieve full performance in high-speed paths, we budget enough cwnd to
385 * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
386 * - one skb in sending host Qdisc,
387 * - one skb in sending host TSO/GSO engine
388 * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
389 * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
390 * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
391 * which allows 2 outstanding 2-packet sequences, to try to keep pipe
392 * full even with ACK-every-other-packet delayed ACKs.
393 */
bbr_quantization_budget(struct sock * sk,u32 cwnd)394 static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
395 {
396 struct bbr *bbr = inet_csk_ca(sk);
397
398 /* Allow enough full-sized skbs in flight to utilize end systems. */
399 cwnd += 3 * bbr_tso_segs_goal(sk);
400
401 /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
402 cwnd = (cwnd + 1) & ~1U;
403
404 /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
405 if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == 0)
406 cwnd += 2;
407
408 return cwnd;
409 }
410
411 /* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
bbr_inflight(struct sock * sk,u32 bw,int gain)412 static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
413 {
414 u32 inflight;
415
416 inflight = bbr_bdp(sk, bw, gain);
417 inflight = bbr_quantization_budget(sk, inflight);
418
419 return inflight;
420 }
421
422 /* With pacing at lower layers, there's often less data "in the network" than
423 * "in flight". With TSQ and departure time pacing at lower layers (e.g. fq),
424 * we often have several skbs queued in the pacing layer with a pre-scheduled
425 * earliest departure time (EDT). BBR adapts its pacing rate based on the
426 * inflight level that it estimates has already been "baked in" by previous
427 * departure time decisions. We calculate a rough estimate of the number of our
428 * packets that might be in the network at the earliest departure time for the
429 * next skb scheduled:
430 * in_network_at_edt = inflight_at_edt - (EDT - now) * bw
431 * If we're increasing inflight, then we want to know if the transmit of the
432 * EDT skb will push inflight above the target, so inflight_at_edt includes
433 * bbr_tso_segs_goal() from the skb departing at EDT. If decreasing inflight,
434 * then estimate if inflight will sink too low just before the EDT transmit.
435 */
bbr_packets_in_net_at_edt(struct sock * sk,u32 inflight_now)436 static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
437 {
438 struct tcp_sock *tp = tcp_sk(sk);
439 struct bbr *bbr = inet_csk_ca(sk);
440 u64 now_ns, edt_ns, interval_us;
441 u32 interval_delivered, inflight_at_edt;
442
443 now_ns = tp->tcp_clock_cache;
444 edt_ns = max(tp->tcp_wstamp_ns, now_ns);
445 interval_us = div_u64(edt_ns - now_ns, NSEC_PER_USEC);
446 interval_delivered = (u64)bbr_bw(sk) * interval_us >> BW_SCALE;
447 inflight_at_edt = inflight_now;
448 if (bbr->pacing_gain > BBR_UNIT) /* increasing inflight */
449 inflight_at_edt += bbr_tso_segs_goal(sk); /* include EDT skb */
450 if (interval_delivered >= inflight_at_edt)
451 return 0;
452 return inflight_at_edt - interval_delivered;
453 }
454
455 /* Find the cwnd increment based on estimate of ack aggregation */
bbr_ack_aggregation_cwnd(struct sock * sk)456 static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
457 {
458 u32 max_aggr_cwnd, aggr_cwnd = 0;
459
460 if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) {
461 max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
462 / BW_UNIT;
463 aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk))
464 >> BBR_SCALE;
465 aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
466 }
467
468 return aggr_cwnd;
469 }
470
471 /* An optimization in BBR to reduce losses: On the first round of recovery, we
472 * follow the packet conservation principle: send P packets per P packets acked.
473 * After that, we slow-start and send at most 2*P packets per P packets acked.
474 * After recovery finishes, or upon undo, we restore the cwnd we had when
475 * recovery started (capped by the target cwnd based on estimated BDP).
476 *
477 * TODO(ycheng/ncardwell): implement a rate-based approach.
478 */
bbr_set_cwnd_to_recover_or_restore(struct sock * sk,const struct rate_sample * rs,u32 acked,u32 * new_cwnd)479 static bool bbr_set_cwnd_to_recover_or_restore(
480 struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd)
481 {
482 struct tcp_sock *tp = tcp_sk(sk);
483 struct bbr *bbr = inet_csk_ca(sk);
484 u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
485 u32 cwnd = tcp_snd_cwnd(tp);
486
487 /* An ACK for P pkts should release at most 2*P packets. We do this
488 * in two steps. First, here we deduct the number of lost packets.
489 * Then, in bbr_set_cwnd() we slow start up toward the target cwnd.
490 */
491 if (rs->losses > 0)
492 cwnd = max_t(s32, cwnd - rs->losses, 1);
493
494 if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) {
495 /* Starting 1st round of Recovery, so do packet conservation. */
496 bbr->packet_conservation = 1;
497 bbr->next_rtt_delivered = tp->delivered; /* start round now */
498 /* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */
499 cwnd = tcp_packets_in_flight(tp) + acked;
500 } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
501 /* Exiting loss recovery; restore cwnd saved before recovery. */
502 cwnd = max(cwnd, bbr->prior_cwnd);
503 bbr->packet_conservation = 0;
504 }
505 bbr->prev_ca_state = state;
506
507 if (bbr->packet_conservation) {
508 *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
509 return true; /* yes, using packet conservation */
510 }
511 *new_cwnd = cwnd;
512 return false;
513 }
514
515 /* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
516 * has drawn us down below target), or snap down to target if we're above it.
517 */
bbr_set_cwnd(struct sock * sk,const struct rate_sample * rs,u32 acked,u32 bw,int gain)518 static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
519 u32 acked, u32 bw, int gain)
520 {
521 struct tcp_sock *tp = tcp_sk(sk);
522 struct bbr *bbr = inet_csk_ca(sk);
523 u32 cwnd = tcp_snd_cwnd(tp), target_cwnd = 0;
524
525 if (!acked)
526 goto done; /* no packet fully ACKed; just apply caps */
527
528 if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
529 goto done;
530
531 target_cwnd = bbr_bdp(sk, bw, gain);
532
533 /* Increment the cwnd to account for excess ACKed data that seems
534 * due to aggregation (of data and/or ACKs) visible in the ACK stream.
535 */
536 target_cwnd += bbr_ack_aggregation_cwnd(sk);
537 target_cwnd = bbr_quantization_budget(sk, target_cwnd);
538
539 /* If we're below target cwnd, slow start cwnd toward target cwnd. */
540 if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
541 cwnd = min(cwnd + acked, target_cwnd);
542 else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
543 cwnd = cwnd + acked;
544 cwnd = max(cwnd, bbr_cwnd_min_target);
545
546 done:
547 tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); /* apply global cap */
548 if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */
549 tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), bbr_cwnd_min_target));
550 }
551
552 /* End cycle phase if it's time and/or we hit the phase's in-flight target. */
bbr_is_next_cycle_phase(struct sock * sk,const struct rate_sample * rs)553 static bool bbr_is_next_cycle_phase(struct sock *sk,
554 const struct rate_sample *rs)
555 {
556 struct tcp_sock *tp = tcp_sk(sk);
557 struct bbr *bbr = inet_csk_ca(sk);
558 bool is_full_length =
559 tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
560 bbr->min_rtt_us;
561 u32 inflight, bw;
562
563 /* The pacing_gain of 1.0 paces at the estimated bw to try to fully
564 * use the pipe without increasing the queue.
565 */
566 if (bbr->pacing_gain == BBR_UNIT)
567 return is_full_length; /* just use wall clock time */
568
569 inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight);
570 bw = bbr_max_bw(sk);
571
572 /* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at
573 * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is
574 * small (e.g. on a LAN). We do not persist if packets are lost, since
575 * a path with small buffers may not hold that much.
576 */
577 if (bbr->pacing_gain > BBR_UNIT)
578 return is_full_length &&
579 (rs->losses || /* perhaps pacing_gain*BDP won't fit */
580 inflight >= bbr_inflight(sk, bw, bbr->pacing_gain));
581
582 /* A pacing_gain < 1.0 tries to drain extra queue we added if bw
583 * probing didn't find more bw. If inflight falls to match BDP then we
584 * estimate queue is drained; persisting would underutilize the pipe.
585 */
586 return is_full_length ||
587 inflight <= bbr_inflight(sk, bw, BBR_UNIT);
588 }
589
bbr_advance_cycle_phase(struct sock * sk)590 static void bbr_advance_cycle_phase(struct sock *sk)
591 {
592 struct tcp_sock *tp = tcp_sk(sk);
593 struct bbr *bbr = inet_csk_ca(sk);
594
595 bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
596 bbr->cycle_mstamp = tp->delivered_mstamp;
597 }
598
599 /* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
bbr_update_cycle_phase(struct sock * sk,const struct rate_sample * rs)600 static void bbr_update_cycle_phase(struct sock *sk,
601 const struct rate_sample *rs)
602 {
603 struct bbr *bbr = inet_csk_ca(sk);
604
605 if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs))
606 bbr_advance_cycle_phase(sk);
607 }
608
bbr_reset_startup_mode(struct sock * sk)609 static void bbr_reset_startup_mode(struct sock *sk)
610 {
611 struct bbr *bbr = inet_csk_ca(sk);
612
613 bbr->mode = BBR_STARTUP;
614 }
615
bbr_reset_probe_bw_mode(struct sock * sk)616 static void bbr_reset_probe_bw_mode(struct sock *sk)
617 {
618 struct bbr *bbr = inet_csk_ca(sk);
619
620 bbr->mode = BBR_PROBE_BW;
621 bbr->cycle_idx = CYCLE_LEN - 1 - get_random_u32_below(bbr_cycle_rand);
622 bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */
623 }
624
bbr_reset_mode(struct sock * sk)625 static void bbr_reset_mode(struct sock *sk)
626 {
627 if (!bbr_full_bw_reached(sk))
628 bbr_reset_startup_mode(sk);
629 else
630 bbr_reset_probe_bw_mode(sk);
631 }
632
633 /* Start a new long-term sampling interval. */
bbr_reset_lt_bw_sampling_interval(struct sock * sk)634 static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
635 {
636 struct tcp_sock *tp = tcp_sk(sk);
637 struct bbr *bbr = inet_csk_ca(sk);
638
639 bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
640 bbr->lt_last_delivered = tp->delivered;
641 bbr->lt_last_lost = tp->lost;
642 bbr->lt_rtt_cnt = 0;
643 }
644
645 /* Completely reset long-term bandwidth sampling. */
bbr_reset_lt_bw_sampling(struct sock * sk)646 static void bbr_reset_lt_bw_sampling(struct sock *sk)
647 {
648 struct bbr *bbr = inet_csk_ca(sk);
649
650 bbr->lt_bw = 0;
651 bbr->lt_use_bw = 0;
652 bbr->lt_is_sampling = false;
653 bbr_reset_lt_bw_sampling_interval(sk);
654 }
655
656 /* Long-term bw sampling interval is done. Estimate whether we're policed. */
bbr_lt_bw_interval_done(struct sock * sk,u32 bw)657 static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw)
658 {
659 struct bbr *bbr = inet_csk_ca(sk);
660 u32 diff;
661
662 if (bbr->lt_bw) { /* do we have bw from a previous interval? */
663 /* Is new bw close to the lt_bw from the previous interval? */
664 diff = abs(bw - bbr->lt_bw);
665 if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) ||
666 (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <=
667 bbr_lt_bw_diff)) {
668 /* All criteria are met; estimate we're policed. */
669 bbr->lt_bw = (bw + bbr->lt_bw) >> 1; /* avg 2 intvls */
670 bbr->lt_use_bw = 1;
671 bbr->pacing_gain = BBR_UNIT; /* try to avoid drops */
672 bbr->lt_rtt_cnt = 0;
673 return;
674 }
675 }
676 bbr->lt_bw = bw;
677 bbr_reset_lt_bw_sampling_interval(sk);
678 }
679
680 /* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of
681 * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and
682 * explicitly models their policed rate, to reduce unnecessary losses. We
683 * estimate that we're policed if we see 2 consecutive sampling intervals with
684 * consistent throughput and high packet loss. If we think we're being policed,
685 * set lt_bw to the "long-term" average delivery rate from those 2 intervals.
686 */
bbr_lt_bw_sampling(struct sock * sk,const struct rate_sample * rs)687 static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
688 {
689 struct tcp_sock *tp = tcp_sk(sk);
690 struct bbr *bbr = inet_csk_ca(sk);
691 u32 lost, delivered;
692 u64 bw;
693 u32 t;
694
695 if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */
696 if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
697 ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) {
698 bbr_reset_lt_bw_sampling(sk); /* stop using lt_bw */
699 bbr_reset_probe_bw_mode(sk); /* restart gain cycling */
700 }
701 return;
702 }
703
704 /* Wait for the first loss before sampling, to let the policer exhaust
705 * its tokens and estimate the steady-state rate allowed by the policer.
706 * Starting samples earlier includes bursts that over-estimate the bw.
707 */
708 if (!bbr->lt_is_sampling) {
709 if (!rs->losses)
710 return;
711 bbr_reset_lt_bw_sampling_interval(sk);
712 bbr->lt_is_sampling = true;
713 }
714
715 /* To avoid underestimates, reset sampling if we run out of data. */
716 if (rs->is_app_limited) {
717 bbr_reset_lt_bw_sampling(sk);
718 return;
719 }
720
721 if (bbr->round_start)
722 bbr->lt_rtt_cnt++; /* count round trips in this interval */
723 if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts)
724 return; /* sampling interval needs to be longer */
725 if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) {
726 bbr_reset_lt_bw_sampling(sk); /* interval is too long */
727 return;
728 }
729
730 /* End sampling interval when a packet is lost, so we estimate the
731 * policer tokens were exhausted. Stopping the sampling before the
732 * tokens are exhausted under-estimates the policed rate.
733 */
734 if (!rs->losses)
735 return;
736
737 /* Calculate packets lost and delivered in sampling interval. */
738 lost = tp->lost - bbr->lt_last_lost;
739 delivered = tp->delivered - bbr->lt_last_delivered;
740 /* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */
741 if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered)
742 return;
743
744 /* Find average delivery rate in this sampling interval. */
745 t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
746 if ((s32)t < 1)
747 return; /* interval is less than one ms, so wait */
748 /* Check if can multiply without overflow */
749 if (t >= ~0U / USEC_PER_MSEC) {
750 bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */
751 return;
752 }
753 t *= USEC_PER_MSEC;
754 bw = (u64)delivered * BW_UNIT;
755 do_div(bw, t);
756 bbr_lt_bw_interval_done(sk, bw);
757 }
758
759 /* Estimate the bandwidth based on how fast packets are delivered */
bbr_update_bw(struct sock * sk,const struct rate_sample * rs)760 static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
761 {
762 struct tcp_sock *tp = tcp_sk(sk);
763 struct bbr *bbr = inet_csk_ca(sk);
764 u64 bw;
765
766 bbr->round_start = 0;
767 if (rs->delivered < 0 || rs->interval_us <= 0)
768 return; /* Not a valid observation */
769
770 /* See if we've reached the next RTT */
771 if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) {
772 bbr->next_rtt_delivered = tp->delivered;
773 bbr->rtt_cnt++;
774 bbr->round_start = 1;
775 bbr->packet_conservation = 0;
776 }
777
778 bbr_lt_bw_sampling(sk, rs);
779
780 /* Divide delivered by the interval to find a (lower bound) bottleneck
781 * bandwidth sample. Delivered is in packets and interval_us in uS and
782 * ratio will be <<1 for most connections. So delivered is first scaled.
783 */
784 bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
785
786 /* If this sample is application-limited, it is likely to have a very
787 * low delivered count that represents application behavior rather than
788 * the available network rate. Such a sample could drag down estimated
789 * bw, causing needless slow-down. Thus, to continue to send at the
790 * last measured network rate, we filter out app-limited samples unless
791 * they describe the path bw at least as well as our bw model.
792 *
793 * So the goal during app-limited phase is to proceed with the best
794 * network rate no matter how long. We automatically leave this
795 * phase when app writes faster than the network can deliver :)
796 */
797 if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) {
798 /* Incorporate new sample into our max bw filter. */
799 minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw);
800 }
801 }
802
803 /* Estimates the windowed max degree of ack aggregation.
804 * This is used to provision extra in-flight data to keep sending during
805 * inter-ACK silences.
806 *
807 * Degree of ack aggregation is estimated as extra data acked beyond expected.
808 *
809 * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
810 * cwnd += max_extra_acked
811 *
812 * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
813 * Max filter is an approximate sliding window of 5-10 (packet timed) round
814 * trips.
815 */
bbr_update_ack_aggregation(struct sock * sk,const struct rate_sample * rs)816 static void bbr_update_ack_aggregation(struct sock *sk,
817 const struct rate_sample *rs)
818 {
819 u32 epoch_us, expected_acked, extra_acked;
820 struct bbr *bbr = inet_csk_ca(sk);
821 struct tcp_sock *tp = tcp_sk(sk);
822
823 if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 ||
824 rs->delivered < 0 || rs->interval_us <= 0)
825 return;
826
827 if (bbr->round_start) {
828 bbr->extra_acked_win_rtts = min(0x1F,
829 bbr->extra_acked_win_rtts + 1);
830 if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) {
831 bbr->extra_acked_win_rtts = 0;
832 bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
833 0 : 1;
834 bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
835 }
836 }
837
838 /* Compute how many packets we expected to be delivered over epoch. */
839 epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
840 bbr->ack_epoch_mstamp);
841 expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
842
843 /* Reset the aggregation epoch if ACK rate is below expected rate or
844 * significantly large no. of ack received since epoch (potentially
845 * quite old epoch).
846 */
847 if (bbr->ack_epoch_acked <= expected_acked ||
848 (bbr->ack_epoch_acked + rs->acked_sacked >=
849 bbr_ack_epoch_acked_reset_thresh)) {
850 bbr->ack_epoch_acked = 0;
851 bbr->ack_epoch_mstamp = tp->delivered_mstamp;
852 expected_acked = 0;
853 }
854
855 /* Compute excess data delivered, beyond what was expected. */
856 bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
857 bbr->ack_epoch_acked + rs->acked_sacked);
858 extra_acked = bbr->ack_epoch_acked - expected_acked;
859 extra_acked = min(extra_acked, tcp_snd_cwnd(tp));
860 if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
861 bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
862 }
863
864 /* Estimate when the pipe is full, using the change in delivery rate: BBR
865 * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
866 * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
867 * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
868 * higher rwin, 3: we get higher delivery rate samples. Or transient
869 * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
870 * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
871 */
bbr_check_full_bw_reached(struct sock * sk,const struct rate_sample * rs)872 static void bbr_check_full_bw_reached(struct sock *sk,
873 const struct rate_sample *rs)
874 {
875 struct bbr *bbr = inet_csk_ca(sk);
876 u32 bw_thresh;
877
878 if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
879 return;
880
881 bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE;
882 if (bbr_max_bw(sk) >= bw_thresh) {
883 bbr->full_bw = bbr_max_bw(sk);
884 bbr->full_bw_cnt = 0;
885 return;
886 }
887 ++bbr->full_bw_cnt;
888 bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
889 }
890
891 /* If pipe is probably full, drain the queue and then enter steady-state. */
bbr_check_drain(struct sock * sk,const struct rate_sample * rs)892 static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
893 {
894 struct bbr *bbr = inet_csk_ca(sk);
895
896 if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
897 bbr->mode = BBR_DRAIN; /* drain queue we created */
898 tcp_sk(sk)->snd_ssthresh =
899 bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
900 } /* fall through to check if in-flight is already small: */
901 if (bbr->mode == BBR_DRAIN &&
902 bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
903 bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
904 bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
905 }
906
bbr_check_probe_rtt_done(struct sock * sk)907 static void bbr_check_probe_rtt_done(struct sock *sk)
908 {
909 struct tcp_sock *tp = tcp_sk(sk);
910 struct bbr *bbr = inet_csk_ca(sk);
911
912 if (!(bbr->probe_rtt_done_stamp &&
913 after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
914 return;
915
916 bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */
917 tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp), bbr->prior_cwnd));
918 bbr_reset_mode(sk);
919 }
920
921 /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
922 * periodically drain the bottleneck queue, to converge to measure the true
923 * min_rtt (unloaded propagation delay). This allows the flows to keep queues
924 * small (reducing queuing delay and packet loss) and achieve fairness among
925 * BBR flows.
926 *
927 * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
928 * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
929 * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
930 * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
931 * re-enter the previous mode. BBR uses 200ms to approximately bound the
932 * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
933 *
934 * Note that flows need only pay 2% if they are busy sending over the last 10
935 * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
936 * natural silences or low-rate periods within 10 seconds where the rate is low
937 * enough for long enough to drain its queue in the bottleneck. We pick up
938 * these min RTT measurements opportunistically with our min_rtt filter. :-)
939 */
bbr_update_min_rtt(struct sock * sk,const struct rate_sample * rs)940 static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
941 {
942 struct tcp_sock *tp = tcp_sk(sk);
943 struct bbr *bbr = inet_csk_ca(sk);
944 bool filter_expired;
945
946 /* Track min RTT seen in the min_rtt_win_sec filter window: */
947 filter_expired = after(tcp_jiffies32,
948 bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
949 if (rs->rtt_us >= 0 &&
950 (rs->rtt_us < bbr->min_rtt_us ||
951 (filter_expired && !rs->is_ack_delayed))) {
952 bbr->min_rtt_us = rs->rtt_us;
953 bbr->min_rtt_stamp = tcp_jiffies32;
954 }
955
956 if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
957 !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
958 bbr->mode = BBR_PROBE_RTT; /* dip, drain queue */
959 bbr_save_cwnd(sk); /* note cwnd so we can restore it */
960 bbr->probe_rtt_done_stamp = 0;
961 }
962
963 if (bbr->mode == BBR_PROBE_RTT) {
964 /* Ignore low rate samples during this mode. */
965 tp->app_limited =
966 (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
967 /* Maintain min packets in flight for max(200 ms, 1 round). */
968 if (!bbr->probe_rtt_done_stamp &&
969 tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
970 bbr->probe_rtt_done_stamp = tcp_jiffies32 +
971 msecs_to_jiffies(bbr_probe_rtt_mode_ms);
972 bbr->probe_rtt_round_done = 0;
973 bbr->next_rtt_delivered = tp->delivered;
974 } else if (bbr->probe_rtt_done_stamp) {
975 if (bbr->round_start)
976 bbr->probe_rtt_round_done = 1;
977 if (bbr->probe_rtt_round_done)
978 bbr_check_probe_rtt_done(sk);
979 }
980 }
981 /* Restart after idle ends only once we process a new S/ACK for data */
982 if (rs->delivered > 0)
983 bbr->idle_restart = 0;
984 }
985
bbr_update_gains(struct sock * sk)986 static void bbr_update_gains(struct sock *sk)
987 {
988 struct bbr *bbr = inet_csk_ca(sk);
989
990 switch (bbr->mode) {
991 case BBR_STARTUP:
992 bbr->pacing_gain = bbr_high_gain;
993 bbr->cwnd_gain = bbr_high_gain;
994 break;
995 case BBR_DRAIN:
996 bbr->pacing_gain = bbr_drain_gain; /* slow, to drain */
997 bbr->cwnd_gain = bbr_high_gain; /* keep cwnd */
998 break;
999 case BBR_PROBE_BW:
1000 bbr->pacing_gain = (bbr->lt_use_bw ?
1001 BBR_UNIT :
1002 bbr_pacing_gain[bbr->cycle_idx]);
1003 bbr->cwnd_gain = bbr_cwnd_gain;
1004 break;
1005 case BBR_PROBE_RTT:
1006 bbr->pacing_gain = BBR_UNIT;
1007 bbr->cwnd_gain = BBR_UNIT;
1008 break;
1009 default:
1010 WARN_ONCE(1, "BBR bad mode: %u\n", bbr->mode);
1011 break;
1012 }
1013 }
1014
bbr_update_model(struct sock * sk,const struct rate_sample * rs)1015 static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
1016 {
1017 bbr_update_bw(sk, rs);
1018 bbr_update_ack_aggregation(sk, rs);
1019 bbr_update_cycle_phase(sk, rs);
1020 bbr_check_full_bw_reached(sk, rs);
1021 bbr_check_drain(sk, rs);
1022 bbr_update_min_rtt(sk, rs);
1023 bbr_update_gains(sk);
1024 }
1025
bbr_main(struct sock * sk,const struct rate_sample * rs)1026 __bpf_kfunc static void bbr_main(struct sock *sk, const struct rate_sample *rs)
1027 {
1028 struct bbr *bbr = inet_csk_ca(sk);
1029 u32 bw;
1030
1031 bbr_update_model(sk, rs);
1032
1033 bw = bbr_bw(sk);
1034 bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
1035 bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
1036 }
1037
bbr_init(struct sock * sk)1038 __bpf_kfunc static void bbr_init(struct sock *sk)
1039 {
1040 struct tcp_sock *tp = tcp_sk(sk);
1041 struct bbr *bbr = inet_csk_ca(sk);
1042
1043 bbr->prior_cwnd = 0;
1044 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1045 bbr->rtt_cnt = 0;
1046 bbr->next_rtt_delivered = tp->delivered;
1047 bbr->prev_ca_state = TCP_CA_Open;
1048 bbr->packet_conservation = 0;
1049
1050 bbr->probe_rtt_done_stamp = 0;
1051 bbr->probe_rtt_round_done = 0;
1052 bbr->min_rtt_us = tcp_min_rtt(tp);
1053 bbr->min_rtt_stamp = tcp_jiffies32;
1054
1055 minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
1056
1057 bbr->has_seen_rtt = 0;
1058 bbr_init_pacing_rate_from_rtt(sk);
1059
1060 bbr->round_start = 0;
1061 bbr->idle_restart = 0;
1062 bbr->full_bw_reached = 0;
1063 bbr->full_bw = 0;
1064 bbr->full_bw_cnt = 0;
1065 bbr->cycle_mstamp = 0;
1066 bbr->cycle_idx = 0;
1067 bbr_reset_lt_bw_sampling(sk);
1068 bbr_reset_startup_mode(sk);
1069
1070 bbr->ack_epoch_mstamp = tp->tcp_mstamp;
1071 bbr->ack_epoch_acked = 0;
1072 bbr->extra_acked_win_rtts = 0;
1073 bbr->extra_acked_win_idx = 0;
1074 bbr->extra_acked[0] = 0;
1075 bbr->extra_acked[1] = 0;
1076
1077 cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
1078 }
1079
bbr_sndbuf_expand(struct sock * sk)1080 __bpf_kfunc static u32 bbr_sndbuf_expand(struct sock *sk)
1081 {
1082 /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
1083 return 3;
1084 }
1085
1086 /* In theory BBR does not need to undo the cwnd since it does not
1087 * always reduce cwnd on losses (see bbr_main()). Keep it for now.
1088 */
bbr_undo_cwnd(struct sock * sk)1089 __bpf_kfunc static u32 bbr_undo_cwnd(struct sock *sk)
1090 {
1091 struct bbr *bbr = inet_csk_ca(sk);
1092
1093 bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */
1094 bbr->full_bw_cnt = 0;
1095 bbr_reset_lt_bw_sampling(sk);
1096 return tcp_snd_cwnd(tcp_sk(sk));
1097 }
1098
1099 /* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
bbr_ssthresh(struct sock * sk)1100 __bpf_kfunc static u32 bbr_ssthresh(struct sock *sk)
1101 {
1102 bbr_save_cwnd(sk);
1103 return tcp_sk(sk)->snd_ssthresh;
1104 }
1105
bbr_get_info(struct sock * sk,u32 ext,int * attr,union tcp_cc_info * info)1106 static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
1107 union tcp_cc_info *info)
1108 {
1109 if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
1110 ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
1111 struct tcp_sock *tp = tcp_sk(sk);
1112 struct bbr *bbr = inet_csk_ca(sk);
1113 u64 bw = bbr_bw(sk);
1114
1115 bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE;
1116 memset(&info->bbr, 0, sizeof(info->bbr));
1117 info->bbr.bbr_bw_lo = (u32)bw;
1118 info->bbr.bbr_bw_hi = (u32)(bw >> 32);
1119 info->bbr.bbr_min_rtt = bbr->min_rtt_us;
1120 info->bbr.bbr_pacing_gain = bbr->pacing_gain;
1121 info->bbr.bbr_cwnd_gain = bbr->cwnd_gain;
1122 *attr = INET_DIAG_BBRINFO;
1123 return sizeof(info->bbr);
1124 }
1125 return 0;
1126 }
1127
bbr_set_state(struct sock * sk,u8 new_state)1128 __bpf_kfunc static void bbr_set_state(struct sock *sk, u8 new_state)
1129 {
1130 struct bbr *bbr = inet_csk_ca(sk);
1131
1132 if (new_state == TCP_CA_Loss) {
1133 struct rate_sample rs = { .losses = 1 };
1134
1135 bbr->prev_ca_state = TCP_CA_Loss;
1136 bbr->full_bw = 0;
1137 bbr->round_start = 1; /* treat RTO like end of a round */
1138 bbr_lt_bw_sampling(sk, &rs);
1139 }
1140 }
1141
1142 static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
1143 .flags = TCP_CONG_NON_RESTRICTED,
1144 .name = "bbr",
1145 .owner = THIS_MODULE,
1146 .init = bbr_init,
1147 .cong_control = bbr_main,
1148 .sndbuf_expand = bbr_sndbuf_expand,
1149 .undo_cwnd = bbr_undo_cwnd,
1150 .cwnd_event = bbr_cwnd_event,
1151 .ssthresh = bbr_ssthresh,
1152 .min_tso_segs = bbr_min_tso_segs,
1153 .get_info = bbr_get_info,
1154 .set_state = bbr_set_state,
1155 };
1156
1157 BTF_SET8_START(tcp_bbr_check_kfunc_ids)
1158 #ifdef CONFIG_X86
1159 #ifdef CONFIG_DYNAMIC_FTRACE
1160 BTF_ID_FLAGS(func, bbr_init)
1161 BTF_ID_FLAGS(func, bbr_main)
1162 BTF_ID_FLAGS(func, bbr_sndbuf_expand)
1163 BTF_ID_FLAGS(func, bbr_undo_cwnd)
1164 BTF_ID_FLAGS(func, bbr_cwnd_event)
1165 BTF_ID_FLAGS(func, bbr_ssthresh)
1166 BTF_ID_FLAGS(func, bbr_min_tso_segs)
1167 BTF_ID_FLAGS(func, bbr_set_state)
1168 #endif
1169 #endif
1170 BTF_SET8_END(tcp_bbr_check_kfunc_ids)
1171
1172 static const struct btf_kfunc_id_set tcp_bbr_kfunc_set = {
1173 .owner = THIS_MODULE,
1174 .set = &tcp_bbr_check_kfunc_ids,
1175 };
1176
bbr_register(void)1177 static int __init bbr_register(void)
1178 {
1179 int ret;
1180
1181 BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
1182
1183 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &tcp_bbr_kfunc_set);
1184 if (ret < 0)
1185 return ret;
1186 return tcp_register_congestion_control(&tcp_bbr_cong_ops);
1187 }
1188
bbr_unregister(void)1189 static void __exit bbr_unregister(void)
1190 {
1191 tcp_unregister_congestion_control(&tcp_bbr_cong_ops);
1192 }
1193
1194 module_init(bbr_register);
1195 module_exit(bbr_unregister);
1196
1197 MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
1198 MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
1199 MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
1200 MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
1201 MODULE_LICENSE("Dual BSD/GPL");
1202 MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");
1203