1 /* Bottleneck Bandwidth and RTT (BBR) congestion control 2 * 3 * BBR congestion control computes the sending rate based on the delivery 4 * rate (throughput) estimated from ACKs. In a nutshell: 5 * 6 * On each ACK, update our model of the network path: 7 * bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips) 8 * min_rtt = windowed_min(rtt, 10 seconds) 9 * pacing_rate = pacing_gain * bottleneck_bandwidth 10 * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4) 11 * 12 * The core algorithm does not react directly to packet losses or delays, 13 * although BBR may adjust the size of next send per ACK when loss is 14 * observed, or adjust the sending rate if it estimates there is a 15 * traffic policer, in order to keep the drop rate reasonable. 16 * 17 * Here is a state transition diagram for BBR: 18 * 19 * | 20 * V 21 * +---> STARTUP ----+ 22 * | | | 23 * | V | 24 * | DRAIN ----+ 25 * | | | 26 * | V | 27 * +---> PROBE_BW ----+ 28 * | ^ | | 29 * | | | | 30 * | +----+ | 31 * | | 32 * +---- PROBE_RTT <--+ 33 * 34 * A BBR flow starts in STARTUP, and ramps up its sending rate quickly. 35 * When it estimates the pipe is full, it enters DRAIN to drain the queue. 36 * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT. 37 * A long-lived BBR flow spends the vast majority of its time remaining 38 * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth 39 * in a fair manner, with a small, bounded queue. *If* a flow has been 40 * continuously sending for the entire min_rtt window, and hasn't seen an RTT 41 * sample that matches or decreases its min_rtt estimate for 10 seconds, then 42 * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe 43 * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if 44 * we estimated that we reached the full bw of the pipe then we enter PROBE_BW; 45 * otherwise we enter STARTUP to try to fill the pipe. 46 * 47 * BBR is described in detail in: 48 * "BBR: Congestion-Based Congestion Control", 49 * Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh, 50 * Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016. 51 * 52 * There is a public e-mail list for discussing BBR development and testing: 53 * https://groups.google.com/forum/#!forum/bbr-dev 54 * 55 * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled, 56 * otherwise TCP stack falls back to an internal pacing using one high 57 * resolution timer per TCP socket and may use more resources. 58 */ 59 #include <linux/module.h> 60 #include <net/tcp.h> 61 #include <linux/inet_diag.h> 62 #include <linux/inet.h> 63 #include <linux/random.h> 64 #include <linux/win_minmax.h> 65 66 /* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth 67 * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps. 68 * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32. 69 * Since the minimum window is >=4 packets, the lower bound isn't 70 * an issue. The upper bound isn't an issue with existing technologies. 71 */ 72 #define BW_SCALE 24 73 #define BW_UNIT (1 << BW_SCALE) 74 75 #define BBR_SCALE 8 /* scaling factor for fractions in BBR (e.g. gains) */ 76 #define BBR_UNIT (1 << BBR_SCALE) 77 78 /* BBR has the following modes for deciding how fast to send: */ 79 enum bbr_mode { 80 BBR_STARTUP, /* ramp up sending rate rapidly to fill pipe */ 81 BBR_DRAIN, /* drain any queue created during startup */ 82 BBR_PROBE_BW, /* discover, share bw: pace around estimated bw */ 83 BBR_PROBE_RTT, /* cut inflight to min to probe min_rtt */ 84 }; 85 86 /* BBR congestion control block */ 87 struct bbr { 88 u32 min_rtt_us; /* min RTT in min_rtt_win_sec window */ 89 u32 min_rtt_stamp; /* timestamp of min_rtt_us */ 90 u32 probe_rtt_done_stamp; /* end time for BBR_PROBE_RTT mode */ 91 struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */ 92 u32 rtt_cnt; /* count of packet-timed rounds elapsed */ 93 u32 next_rtt_delivered; /* scb->tx.delivered at end of round */ 94 u64 cycle_mstamp; /* time of this cycle phase start */ 95 u32 mode:3, /* current bbr_mode in state machine */ 96 prev_ca_state:3, /* CA state on previous ACK */ 97 packet_conservation:1, /* use packet conservation? */ 98 round_start:1, /* start of packet-timed tx->ack round? */ 99 idle_restart:1, /* restarting after idle? */ 100 probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */ 101 unused:13, 102 lt_is_sampling:1, /* taking long-term ("LT") samples now? */ 103 lt_rtt_cnt:7, /* round trips in long-term interval */ 104 lt_use_bw:1; /* use lt_bw as our bw estimate? */ 105 u32 lt_bw; /* LT est delivery rate in pkts/uS << 24 */ 106 u32 lt_last_delivered; /* LT intvl start: tp->delivered */ 107 u32 lt_last_stamp; /* LT intvl start: tp->delivered_mstamp */ 108 u32 lt_last_lost; /* LT intvl start: tp->lost */ 109 u32 pacing_gain:10, /* current gain for setting pacing rate */ 110 cwnd_gain:10, /* current gain for setting cwnd */ 111 full_bw_reached:1, /* reached full bw in Startup? */ 112 full_bw_cnt:2, /* number of rounds without large bw gains */ 113 cycle_idx:3, /* current index in pacing_gain cycle array */ 114 has_seen_rtt:1, /* have we seen an RTT sample yet? */ 115 unused_b:5; 116 u32 prior_cwnd; /* prior cwnd upon entering loss recovery */ 117 u32 full_bw; /* recent bw, to estimate if pipe is full */ 118 119 /* For tracking ACK aggregation: */ 120 u64 ack_epoch_mstamp; /* start of ACK sampling epoch */ 121 u16 extra_acked[2]; /* max excess data ACKed in epoch */ 122 u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */ 123 extra_acked_win_rtts:5, /* age of extra_acked, in round trips */ 124 extra_acked_win_idx:1, /* current index in extra_acked array */ 125 unused_c:6; 126 }; 127 128 #define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */ 129 130 /* Window length of bw filter (in rounds): */ 131 static const int bbr_bw_rtts = CYCLE_LEN + 2; 132 /* Window length of min_rtt filter (in sec): */ 133 static const u32 bbr_min_rtt_win_sec = 10; 134 /* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */ 135 static const u32 bbr_probe_rtt_mode_ms = 200; 136 /* Skip TSO below the following bandwidth (bits/sec): */ 137 static const int bbr_min_tso_rate = 1200000; 138 139 /* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck. 140 * In order to help drive the network toward lower queues and low latency while 141 * maintaining high utilization, the average pacing rate aims to be slightly 142 * lower than the estimated bandwidth. This is an important aspect of the 143 * design. 144 */ 145 static const int bbr_pacing_margin_percent = 1; 146 147 /* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain 148 * that will allow a smoothly increasing pacing rate that will double each RTT 149 * and send the same number of packets per RTT that an un-paced, slow-starting 150 * Reno or CUBIC flow would: 151 */ 152 static const int bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1; 153 /* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain 154 * the queue created in BBR_STARTUP in a single round: 155 */ 156 static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885; 157 /* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */ 158 static const int bbr_cwnd_gain = BBR_UNIT * 2; 159 /* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */ 160 static const int bbr_pacing_gain[] = { 161 BBR_UNIT * 5 / 4, /* probe for more available bw */ 162 BBR_UNIT * 3 / 4, /* drain queue and/or yield bw to other flows */ 163 BBR_UNIT, BBR_UNIT, BBR_UNIT, /* cruise at 1.0*bw to utilize pipe, */ 164 BBR_UNIT, BBR_UNIT, BBR_UNIT /* without creating excess queue... */ 165 }; 166 /* Randomize the starting gain cycling phase over N phases: */ 167 static const u32 bbr_cycle_rand = 7; 168 169 /* Try to keep at least this many packets in flight, if things go smoothly. For 170 * smooth functioning, a sliding window protocol ACKing every other packet 171 * needs at least 4 packets in flight: 172 */ 173 static const u32 bbr_cwnd_min_target = 4; 174 175 /* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */ 176 /* If bw has increased significantly (1.25x), there may be more bw available: */ 177 static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4; 178 /* But after 3 rounds w/o significant bw growth, estimate pipe is full: */ 179 static const u32 bbr_full_bw_cnt = 3; 180 181 /* "long-term" ("LT") bandwidth estimator parameters... */ 182 /* The minimum number of rounds in an LT bw sampling interval: */ 183 static const u32 bbr_lt_intvl_min_rtts = 4; 184 /* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */ 185 static const u32 bbr_lt_loss_thresh = 50; 186 /* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */ 187 static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8; 188 /* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */ 189 static const u32 bbr_lt_bw_diff = 4000 / 8; 190 /* If we estimate we're policed, use lt_bw for this many round trips: */ 191 static const u32 bbr_lt_bw_max_rtts = 48; 192 193 /* Gain factor for adding extra_acked to target cwnd: */ 194 static const int bbr_extra_acked_gain = BBR_UNIT; 195 /* Window length of extra_acked window. */ 196 static const u32 bbr_extra_acked_win_rtts = 5; 197 /* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */ 198 static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20; 199 /* Time period for clamping cwnd increment due to ack aggregation */ 200 static const u32 bbr_extra_acked_max_us = 100 * 1000; 201 202 static void bbr_check_probe_rtt_done(struct sock *sk); 203 204 /* Do we estimate that STARTUP filled the pipe? */ 205 static bool bbr_full_bw_reached(const struct sock *sk) 206 { 207 const struct bbr *bbr = inet_csk_ca(sk); 208 209 return bbr->full_bw_reached; 210 } 211 212 /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */ 213 static u32 bbr_max_bw(const struct sock *sk) 214 { 215 struct bbr *bbr = inet_csk_ca(sk); 216 217 return minmax_get(&bbr->bw); 218 } 219 220 /* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */ 221 static u32 bbr_bw(const struct sock *sk) 222 { 223 struct bbr *bbr = inet_csk_ca(sk); 224 225 return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk); 226 } 227 228 /* Return maximum extra acked in past k-2k round trips, 229 * where k = bbr_extra_acked_win_rtts. 230 */ 231 static u16 bbr_extra_acked(const struct sock *sk) 232 { 233 struct bbr *bbr = inet_csk_ca(sk); 234 235 return max(bbr->extra_acked[0], bbr->extra_acked[1]); 236 } 237 238 /* Return rate in bytes per second, optionally with a gain. 239 * The order here is chosen carefully to avoid overflow of u64. This should 240 * work for input rates of up to 2.9Tbit/sec and gain of 2.89x. 241 */ 242 static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain) 243 { 244 unsigned int mss = tcp_sk(sk)->mss_cache; 245 246 rate *= mss; 247 rate *= gain; 248 rate >>= BBR_SCALE; 249 rate *= USEC_PER_SEC / 100 * (100 - bbr_pacing_margin_percent); 250 return rate >> BW_SCALE; 251 } 252 253 /* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */ 254 static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain) 255 { 256 u64 rate = bw; 257 258 rate = bbr_rate_bytes_per_sec(sk, rate, gain); 259 rate = min_t(u64, rate, sk->sk_max_pacing_rate); 260 return rate; 261 } 262 263 /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */ 264 static void bbr_init_pacing_rate_from_rtt(struct sock *sk) 265 { 266 struct tcp_sock *tp = tcp_sk(sk); 267 struct bbr *bbr = inet_csk_ca(sk); 268 u64 bw; 269 u32 rtt_us; 270 271 if (tp->srtt_us) { /* any RTT sample yet? */ 272 rtt_us = max(tp->srtt_us >> 3, 1U); 273 bbr->has_seen_rtt = 1; 274 } else { /* no RTT sample yet */ 275 rtt_us = USEC_PER_MSEC; /* use nominal default RTT */ 276 } 277 bw = (u64)tp->snd_cwnd * BW_UNIT; 278 do_div(bw, rtt_us); 279 sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain); 280 } 281 282 /* Pace using current bw estimate and a gain factor. */ 283 static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain) 284 { 285 struct tcp_sock *tp = tcp_sk(sk); 286 struct bbr *bbr = inet_csk_ca(sk); 287 unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain); 288 289 if (unlikely(!bbr->has_seen_rtt && tp->srtt_us)) 290 bbr_init_pacing_rate_from_rtt(sk); 291 if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate) 292 sk->sk_pacing_rate = rate; 293 } 294 295 /* override sysctl_tcp_min_tso_segs */ 296 static u32 bbr_min_tso_segs(struct sock *sk) 297 { 298 return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2; 299 } 300 301 static u32 bbr_tso_segs_goal(struct sock *sk) 302 { 303 struct tcp_sock *tp = tcp_sk(sk); 304 u32 segs, bytes; 305 306 /* Sort of tcp_tso_autosize() but ignoring 307 * driver provided sk_gso_max_size. 308 */ 309 bytes = min_t(unsigned long, 310 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), 311 GSO_MAX_SIZE - 1 - MAX_TCP_HEADER); 312 segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk)); 313 314 return min(segs, 0x7FU); 315 } 316 317 /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */ 318 static void bbr_save_cwnd(struct sock *sk) 319 { 320 struct tcp_sock *tp = tcp_sk(sk); 321 struct bbr *bbr = inet_csk_ca(sk); 322 323 if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT) 324 bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */ 325 else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */ 326 bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd); 327 } 328 329 static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) 330 { 331 struct tcp_sock *tp = tcp_sk(sk); 332 struct bbr *bbr = inet_csk_ca(sk); 333 334 if (event == CA_EVENT_TX_START && tp->app_limited) { 335 bbr->idle_restart = 1; 336 bbr->ack_epoch_mstamp = tp->tcp_mstamp; 337 bbr->ack_epoch_acked = 0; 338 /* Avoid pointless buffer overflows: pace at est. bw if we don't 339 * need more speed (we're restarting from idle and app-limited). 340 */ 341 if (bbr->mode == BBR_PROBE_BW) 342 bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); 343 else if (bbr->mode == BBR_PROBE_RTT) 344 bbr_check_probe_rtt_done(sk); 345 } 346 } 347 348 /* Calculate bdp based on min RTT and the estimated bottleneck bandwidth: 349 * 350 * bdp = ceil(bw * min_rtt * gain) 351 * 352 * The key factor, gain, controls the amount of queue. While a small gain 353 * builds a smaller queue, it becomes more vulnerable to noise in RTT 354 * measurements (e.g., delayed ACKs or other ACK compression effects). This 355 * noise may cause BBR to under-estimate the rate. 356 */ 357 static u32 bbr_bdp(struct sock *sk, u32 bw, int gain) 358 { 359 struct bbr *bbr = inet_csk_ca(sk); 360 u32 bdp; 361 u64 w; 362 363 /* If we've never had a valid RTT sample, cap cwnd at the initial 364 * default. This should only happen when the connection is not using TCP 365 * timestamps and has retransmitted all of the SYN/SYNACK/data packets 366 * ACKed so far. In this case, an RTO can cut cwnd to 1, in which 367 * case we need to slow-start up toward something safe: TCP_INIT_CWND. 368 */ 369 if (unlikely(bbr->min_rtt_us == ~0U)) /* no valid RTT samples yet? */ 370 return TCP_INIT_CWND; /* be safe: cap at default initial cwnd*/ 371 372 w = (u64)bw * bbr->min_rtt_us; 373 374 /* Apply a gain to the given value, remove the BW_SCALE shift, and 375 * round the value up to avoid a negative feedback loop. 376 */ 377 bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT; 378 379 return bdp; 380 } 381 382 /* To achieve full performance in high-speed paths, we budget enough cwnd to 383 * fit full-sized skbs in-flight on both end hosts to fully utilize the path: 384 * - one skb in sending host Qdisc, 385 * - one skb in sending host TSO/GSO engine 386 * - one skb being received by receiver host LRO/GRO/delayed-ACK engine 387 * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because 388 * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets, 389 * which allows 2 outstanding 2-packet sequences, to try to keep pipe 390 * full even with ACK-every-other-packet delayed ACKs. 391 */ 392 static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd) 393 { 394 struct bbr *bbr = inet_csk_ca(sk); 395 396 /* Allow enough full-sized skbs in flight to utilize end systems. */ 397 cwnd += 3 * bbr_tso_segs_goal(sk); 398 399 /* Reduce delayed ACKs by rounding up cwnd to the next even number. */ 400 cwnd = (cwnd + 1) & ~1U; 401 402 /* Ensure gain cycling gets inflight above BDP even for small BDPs. */ 403 if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == 0) 404 cwnd += 2; 405 406 return cwnd; 407 } 408 409 /* Find inflight based on min RTT and the estimated bottleneck bandwidth. */ 410 static u32 bbr_inflight(struct sock *sk, u32 bw, int gain) 411 { 412 u32 inflight; 413 414 inflight = bbr_bdp(sk, bw, gain); 415 inflight = bbr_quantization_budget(sk, inflight); 416 417 return inflight; 418 } 419 420 /* With pacing at lower layers, there's often less data "in the network" than 421 * "in flight". With TSQ and departure time pacing at lower layers (e.g. fq), 422 * we often have several skbs queued in the pacing layer with a pre-scheduled 423 * earliest departure time (EDT). BBR adapts its pacing rate based on the 424 * inflight level that it estimates has already been "baked in" by previous 425 * departure time decisions. We calculate a rough estimate of the number of our 426 * packets that might be in the network at the earliest departure time for the 427 * next skb scheduled: 428 * in_network_at_edt = inflight_at_edt - (EDT - now) * bw 429 * If we're increasing inflight, then we want to know if the transmit of the 430 * EDT skb will push inflight above the target, so inflight_at_edt includes 431 * bbr_tso_segs_goal() from the skb departing at EDT. If decreasing inflight, 432 * then estimate if inflight will sink too low just before the EDT transmit. 433 */ 434 static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now) 435 { 436 struct tcp_sock *tp = tcp_sk(sk); 437 struct bbr *bbr = inet_csk_ca(sk); 438 u64 now_ns, edt_ns, interval_us; 439 u32 interval_delivered, inflight_at_edt; 440 441 now_ns = tp->tcp_clock_cache; 442 edt_ns = max(tp->tcp_wstamp_ns, now_ns); 443 interval_us = div_u64(edt_ns - now_ns, NSEC_PER_USEC); 444 interval_delivered = (u64)bbr_bw(sk) * interval_us >> BW_SCALE; 445 inflight_at_edt = inflight_now; 446 if (bbr->pacing_gain > BBR_UNIT) /* increasing inflight */ 447 inflight_at_edt += bbr_tso_segs_goal(sk); /* include EDT skb */ 448 if (interval_delivered >= inflight_at_edt) 449 return 0; 450 return inflight_at_edt - interval_delivered; 451 } 452 453 /* Find the cwnd increment based on estimate of ack aggregation */ 454 static u32 bbr_ack_aggregation_cwnd(struct sock *sk) 455 { 456 u32 max_aggr_cwnd, aggr_cwnd = 0; 457 458 if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) { 459 max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us) 460 / BW_UNIT; 461 aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk)) 462 >> BBR_SCALE; 463 aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd); 464 } 465 466 return aggr_cwnd; 467 } 468 469 /* An optimization in BBR to reduce losses: On the first round of recovery, we 470 * follow the packet conservation principle: send P packets per P packets acked. 471 * After that, we slow-start and send at most 2*P packets per P packets acked. 472 * After recovery finishes, or upon undo, we restore the cwnd we had when 473 * recovery started (capped by the target cwnd based on estimated BDP). 474 * 475 * TODO(ycheng/ncardwell): implement a rate-based approach. 476 */ 477 static bool bbr_set_cwnd_to_recover_or_restore( 478 struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd) 479 { 480 struct tcp_sock *tp = tcp_sk(sk); 481 struct bbr *bbr = inet_csk_ca(sk); 482 u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state; 483 u32 cwnd = tp->snd_cwnd; 484 485 /* An ACK for P pkts should release at most 2*P packets. We do this 486 * in two steps. First, here we deduct the number of lost packets. 487 * Then, in bbr_set_cwnd() we slow start up toward the target cwnd. 488 */ 489 if (rs->losses > 0) 490 cwnd = max_t(s32, cwnd - rs->losses, 1); 491 492 if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) { 493 /* Starting 1st round of Recovery, so do packet conservation. */ 494 bbr->packet_conservation = 1; 495 bbr->next_rtt_delivered = tp->delivered; /* start round now */ 496 /* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */ 497 cwnd = tcp_packets_in_flight(tp) + acked; 498 } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) { 499 /* Exiting loss recovery; restore cwnd saved before recovery. */ 500 cwnd = max(cwnd, bbr->prior_cwnd); 501 bbr->packet_conservation = 0; 502 } 503 bbr->prev_ca_state = state; 504 505 if (bbr->packet_conservation) { 506 *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); 507 return true; /* yes, using packet conservation */ 508 } 509 *new_cwnd = cwnd; 510 return false; 511 } 512 513 /* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss 514 * has drawn us down below target), or snap down to target if we're above it. 515 */ 516 static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs, 517 u32 acked, u32 bw, int gain) 518 { 519 struct tcp_sock *tp = tcp_sk(sk); 520 struct bbr *bbr = inet_csk_ca(sk); 521 u32 cwnd = tp->snd_cwnd, target_cwnd = 0; 522 523 if (!acked) 524 goto done; /* no packet fully ACKed; just apply caps */ 525 526 if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) 527 goto done; 528 529 target_cwnd = bbr_bdp(sk, bw, gain); 530 531 /* Increment the cwnd to account for excess ACKed data that seems 532 * due to aggregation (of data and/or ACKs) visible in the ACK stream. 533 */ 534 target_cwnd += bbr_ack_aggregation_cwnd(sk); 535 target_cwnd = bbr_quantization_budget(sk, target_cwnd); 536 537 /* If we're below target cwnd, slow start cwnd toward target cwnd. */ 538 if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */ 539 cwnd = min(cwnd + acked, target_cwnd); 540 else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND) 541 cwnd = cwnd + acked; 542 cwnd = max(cwnd, bbr_cwnd_min_target); 543 544 done: 545 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); /* apply global cap */ 546 if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */ 547 tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target); 548 } 549 550 /* End cycle phase if it's time and/or we hit the phase's in-flight target. */ 551 static bool bbr_is_next_cycle_phase(struct sock *sk, 552 const struct rate_sample *rs) 553 { 554 struct tcp_sock *tp = tcp_sk(sk); 555 struct bbr *bbr = inet_csk_ca(sk); 556 bool is_full_length = 557 tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) > 558 bbr->min_rtt_us; 559 u32 inflight, bw; 560 561 /* The pacing_gain of 1.0 paces at the estimated bw to try to fully 562 * use the pipe without increasing the queue. 563 */ 564 if (bbr->pacing_gain == BBR_UNIT) 565 return is_full_length; /* just use wall clock time */ 566 567 inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight); 568 bw = bbr_max_bw(sk); 569 570 /* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at 571 * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is 572 * small (e.g. on a LAN). We do not persist if packets are lost, since 573 * a path with small buffers may not hold that much. 574 */ 575 if (bbr->pacing_gain > BBR_UNIT) 576 return is_full_length && 577 (rs->losses || /* perhaps pacing_gain*BDP won't fit */ 578 inflight >= bbr_inflight(sk, bw, bbr->pacing_gain)); 579 580 /* A pacing_gain < 1.0 tries to drain extra queue we added if bw 581 * probing didn't find more bw. If inflight falls to match BDP then we 582 * estimate queue is drained; persisting would underutilize the pipe. 583 */ 584 return is_full_length || 585 inflight <= bbr_inflight(sk, bw, BBR_UNIT); 586 } 587 588 static void bbr_advance_cycle_phase(struct sock *sk) 589 { 590 struct tcp_sock *tp = tcp_sk(sk); 591 struct bbr *bbr = inet_csk_ca(sk); 592 593 bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1); 594 bbr->cycle_mstamp = tp->delivered_mstamp; 595 } 596 597 /* Gain cycling: cycle pacing gain to converge to fair share of available bw. */ 598 static void bbr_update_cycle_phase(struct sock *sk, 599 const struct rate_sample *rs) 600 { 601 struct bbr *bbr = inet_csk_ca(sk); 602 603 if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs)) 604 bbr_advance_cycle_phase(sk); 605 } 606 607 static void bbr_reset_startup_mode(struct sock *sk) 608 { 609 struct bbr *bbr = inet_csk_ca(sk); 610 611 bbr->mode = BBR_STARTUP; 612 } 613 614 static void bbr_reset_probe_bw_mode(struct sock *sk) 615 { 616 struct bbr *bbr = inet_csk_ca(sk); 617 618 bbr->mode = BBR_PROBE_BW; 619 bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand); 620 bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */ 621 } 622 623 static void bbr_reset_mode(struct sock *sk) 624 { 625 if (!bbr_full_bw_reached(sk)) 626 bbr_reset_startup_mode(sk); 627 else 628 bbr_reset_probe_bw_mode(sk); 629 } 630 631 /* Start a new long-term sampling interval. */ 632 static void bbr_reset_lt_bw_sampling_interval(struct sock *sk) 633 { 634 struct tcp_sock *tp = tcp_sk(sk); 635 struct bbr *bbr = inet_csk_ca(sk); 636 637 bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC); 638 bbr->lt_last_delivered = tp->delivered; 639 bbr->lt_last_lost = tp->lost; 640 bbr->lt_rtt_cnt = 0; 641 } 642 643 /* Completely reset long-term bandwidth sampling. */ 644 static void bbr_reset_lt_bw_sampling(struct sock *sk) 645 { 646 struct bbr *bbr = inet_csk_ca(sk); 647 648 bbr->lt_bw = 0; 649 bbr->lt_use_bw = 0; 650 bbr->lt_is_sampling = false; 651 bbr_reset_lt_bw_sampling_interval(sk); 652 } 653 654 /* Long-term bw sampling interval is done. Estimate whether we're policed. */ 655 static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw) 656 { 657 struct bbr *bbr = inet_csk_ca(sk); 658 u32 diff; 659 660 if (bbr->lt_bw) { /* do we have bw from a previous interval? */ 661 /* Is new bw close to the lt_bw from the previous interval? */ 662 diff = abs(bw - bbr->lt_bw); 663 if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) || 664 (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <= 665 bbr_lt_bw_diff)) { 666 /* All criteria are met; estimate we're policed. */ 667 bbr->lt_bw = (bw + bbr->lt_bw) >> 1; /* avg 2 intvls */ 668 bbr->lt_use_bw = 1; 669 bbr->pacing_gain = BBR_UNIT; /* try to avoid drops */ 670 bbr->lt_rtt_cnt = 0; 671 return; 672 } 673 } 674 bbr->lt_bw = bw; 675 bbr_reset_lt_bw_sampling_interval(sk); 676 } 677 678 /* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of 679 * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and 680 * explicitly models their policed rate, to reduce unnecessary losses. We 681 * estimate that we're policed if we see 2 consecutive sampling intervals with 682 * consistent throughput and high packet loss. If we think we're being policed, 683 * set lt_bw to the "long-term" average delivery rate from those 2 intervals. 684 */ 685 static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs) 686 { 687 struct tcp_sock *tp = tcp_sk(sk); 688 struct bbr *bbr = inet_csk_ca(sk); 689 u32 lost, delivered; 690 u64 bw; 691 u32 t; 692 693 if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */ 694 if (bbr->mode == BBR_PROBE_BW && bbr->round_start && 695 ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) { 696 bbr_reset_lt_bw_sampling(sk); /* stop using lt_bw */ 697 bbr_reset_probe_bw_mode(sk); /* restart gain cycling */ 698 } 699 return; 700 } 701 702 /* Wait for the first loss before sampling, to let the policer exhaust 703 * its tokens and estimate the steady-state rate allowed by the policer. 704 * Starting samples earlier includes bursts that over-estimate the bw. 705 */ 706 if (!bbr->lt_is_sampling) { 707 if (!rs->losses) 708 return; 709 bbr_reset_lt_bw_sampling_interval(sk); 710 bbr->lt_is_sampling = true; 711 } 712 713 /* To avoid underestimates, reset sampling if we run out of data. */ 714 if (rs->is_app_limited) { 715 bbr_reset_lt_bw_sampling(sk); 716 return; 717 } 718 719 if (bbr->round_start) 720 bbr->lt_rtt_cnt++; /* count round trips in this interval */ 721 if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts) 722 return; /* sampling interval needs to be longer */ 723 if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) { 724 bbr_reset_lt_bw_sampling(sk); /* interval is too long */ 725 return; 726 } 727 728 /* End sampling interval when a packet is lost, so we estimate the 729 * policer tokens were exhausted. Stopping the sampling before the 730 * tokens are exhausted under-estimates the policed rate. 731 */ 732 if (!rs->losses) 733 return; 734 735 /* Calculate packets lost and delivered in sampling interval. */ 736 lost = tp->lost - bbr->lt_last_lost; 737 delivered = tp->delivered - bbr->lt_last_delivered; 738 /* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */ 739 if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered) 740 return; 741 742 /* Find average delivery rate in this sampling interval. */ 743 t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp; 744 if ((s32)t < 1) 745 return; /* interval is less than one ms, so wait */ 746 /* Check if can multiply without overflow */ 747 if (t >= ~0U / USEC_PER_MSEC) { 748 bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */ 749 return; 750 } 751 t *= USEC_PER_MSEC; 752 bw = (u64)delivered * BW_UNIT; 753 do_div(bw, t); 754 bbr_lt_bw_interval_done(sk, bw); 755 } 756 757 /* Estimate the bandwidth based on how fast packets are delivered */ 758 static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs) 759 { 760 struct tcp_sock *tp = tcp_sk(sk); 761 struct bbr *bbr = inet_csk_ca(sk); 762 u64 bw; 763 764 bbr->round_start = 0; 765 if (rs->delivered < 0 || rs->interval_us <= 0) 766 return; /* Not a valid observation */ 767 768 /* See if we've reached the next RTT */ 769 if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) { 770 bbr->next_rtt_delivered = tp->delivered; 771 bbr->rtt_cnt++; 772 bbr->round_start = 1; 773 bbr->packet_conservation = 0; 774 } 775 776 bbr_lt_bw_sampling(sk, rs); 777 778 /* Divide delivered by the interval to find a (lower bound) bottleneck 779 * bandwidth sample. Delivered is in packets and interval_us in uS and 780 * ratio will be <<1 for most connections. So delivered is first scaled. 781 */ 782 bw = (u64)rs->delivered * BW_UNIT; 783 do_div(bw, rs->interval_us); 784 785 /* If this sample is application-limited, it is likely to have a very 786 * low delivered count that represents application behavior rather than 787 * the available network rate. Such a sample could drag down estimated 788 * bw, causing needless slow-down. Thus, to continue to send at the 789 * last measured network rate, we filter out app-limited samples unless 790 * they describe the path bw at least as well as our bw model. 791 * 792 * So the goal during app-limited phase is to proceed with the best 793 * network rate no matter how long. We automatically leave this 794 * phase when app writes faster than the network can deliver :) 795 */ 796 if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) { 797 /* Incorporate new sample into our max bw filter. */ 798 minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw); 799 } 800 } 801 802 /* Estimates the windowed max degree of ack aggregation. 803 * This is used to provision extra in-flight data to keep sending during 804 * inter-ACK silences. 805 * 806 * Degree of ack aggregation is estimated as extra data acked beyond expected. 807 * 808 * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval" 809 * cwnd += max_extra_acked 810 * 811 * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms). 812 * Max filter is an approximate sliding window of 5-10 (packet timed) round 813 * trips. 814 */ 815 static void bbr_update_ack_aggregation(struct sock *sk, 816 const struct rate_sample *rs) 817 { 818 u32 epoch_us, expected_acked, extra_acked; 819 struct bbr *bbr = inet_csk_ca(sk); 820 struct tcp_sock *tp = tcp_sk(sk); 821 822 if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 || 823 rs->delivered < 0 || rs->interval_us <= 0) 824 return; 825 826 if (bbr->round_start) { 827 bbr->extra_acked_win_rtts = min(0x1F, 828 bbr->extra_acked_win_rtts + 1); 829 if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) { 830 bbr->extra_acked_win_rtts = 0; 831 bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ? 832 0 : 1; 833 bbr->extra_acked[bbr->extra_acked_win_idx] = 0; 834 } 835 } 836 837 /* Compute how many packets we expected to be delivered over epoch. */ 838 epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp, 839 bbr->ack_epoch_mstamp); 840 expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT; 841 842 /* Reset the aggregation epoch if ACK rate is below expected rate or 843 * significantly large no. of ack received since epoch (potentially 844 * quite old epoch). 845 */ 846 if (bbr->ack_epoch_acked <= expected_acked || 847 (bbr->ack_epoch_acked + rs->acked_sacked >= 848 bbr_ack_epoch_acked_reset_thresh)) { 849 bbr->ack_epoch_acked = 0; 850 bbr->ack_epoch_mstamp = tp->delivered_mstamp; 851 expected_acked = 0; 852 } 853 854 /* Compute excess data delivered, beyond what was expected. */ 855 bbr->ack_epoch_acked = min_t(u32, 0xFFFFF, 856 bbr->ack_epoch_acked + rs->acked_sacked); 857 extra_acked = bbr->ack_epoch_acked - expected_acked; 858 extra_acked = min(extra_acked, tp->snd_cwnd); 859 if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx]) 860 bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked; 861 } 862 863 /* Estimate when the pipe is full, using the change in delivery rate: BBR 864 * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by 865 * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited 866 * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the 867 * higher rwin, 3: we get higher delivery rate samples. Or transient 868 * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar 869 * design goal, but uses delay and inter-ACK spacing instead of bandwidth. 870 */ 871 static void bbr_check_full_bw_reached(struct sock *sk, 872 const struct rate_sample *rs) 873 { 874 struct bbr *bbr = inet_csk_ca(sk); 875 u32 bw_thresh; 876 877 if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited) 878 return; 879 880 bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE; 881 if (bbr_max_bw(sk) >= bw_thresh) { 882 bbr->full_bw = bbr_max_bw(sk); 883 bbr->full_bw_cnt = 0; 884 return; 885 } 886 ++bbr->full_bw_cnt; 887 bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt; 888 } 889 890 /* If pipe is probably full, drain the queue and then enter steady-state. */ 891 static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs) 892 { 893 struct bbr *bbr = inet_csk_ca(sk); 894 895 if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) { 896 bbr->mode = BBR_DRAIN; /* drain queue we created */ 897 tcp_sk(sk)->snd_ssthresh = 898 bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT); 899 } /* fall through to check if in-flight is already small: */ 900 if (bbr->mode == BBR_DRAIN && 901 bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <= 902 bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT)) 903 bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ 904 } 905 906 static void bbr_check_probe_rtt_done(struct sock *sk) 907 { 908 struct tcp_sock *tp = tcp_sk(sk); 909 struct bbr *bbr = inet_csk_ca(sk); 910 911 if (!(bbr->probe_rtt_done_stamp && 912 after(tcp_jiffies32, bbr->probe_rtt_done_stamp))) 913 return; 914 915 bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */ 916 tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd); 917 bbr_reset_mode(sk); 918 } 919 920 /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and 921 * periodically drain the bottleneck queue, to converge to measure the true 922 * min_rtt (unloaded propagation delay). This allows the flows to keep queues 923 * small (reducing queuing delay and packet loss) and achieve fairness among 924 * BBR flows. 925 * 926 * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires, 927 * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets. 928 * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed 929 * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and 930 * re-enter the previous mode. BBR uses 200ms to approximately bound the 931 * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s). 932 * 933 * Note that flows need only pay 2% if they are busy sending over the last 10 934 * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have 935 * natural silences or low-rate periods within 10 seconds where the rate is low 936 * enough for long enough to drain its queue in the bottleneck. We pick up 937 * these min RTT measurements opportunistically with our min_rtt filter. :-) 938 */ 939 static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) 940 { 941 struct tcp_sock *tp = tcp_sk(sk); 942 struct bbr *bbr = inet_csk_ca(sk); 943 bool filter_expired; 944 945 /* Track min RTT seen in the min_rtt_win_sec filter window: */ 946 filter_expired = after(tcp_jiffies32, 947 bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ); 948 if (rs->rtt_us >= 0 && 949 (rs->rtt_us <= bbr->min_rtt_us || 950 (filter_expired && !rs->is_ack_delayed))) { 951 bbr->min_rtt_us = rs->rtt_us; 952 bbr->min_rtt_stamp = tcp_jiffies32; 953 } 954 955 if (bbr_probe_rtt_mode_ms > 0 && filter_expired && 956 !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) { 957 bbr->mode = BBR_PROBE_RTT; /* dip, drain queue */ 958 bbr_save_cwnd(sk); /* note cwnd so we can restore it */ 959 bbr->probe_rtt_done_stamp = 0; 960 } 961 962 if (bbr->mode == BBR_PROBE_RTT) { 963 /* Ignore low rate samples during this mode. */ 964 tp->app_limited = 965 (tp->delivered + tcp_packets_in_flight(tp)) ? : 1; 966 /* Maintain min packets in flight for max(200 ms, 1 round). */ 967 if (!bbr->probe_rtt_done_stamp && 968 tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) { 969 bbr->probe_rtt_done_stamp = tcp_jiffies32 + 970 msecs_to_jiffies(bbr_probe_rtt_mode_ms); 971 bbr->probe_rtt_round_done = 0; 972 bbr->next_rtt_delivered = tp->delivered; 973 } else if (bbr->probe_rtt_done_stamp) { 974 if (bbr->round_start) 975 bbr->probe_rtt_round_done = 1; 976 if (bbr->probe_rtt_round_done) 977 bbr_check_probe_rtt_done(sk); 978 } 979 } 980 /* Restart after idle ends only once we process a new S/ACK for data */ 981 if (rs->delivered > 0) 982 bbr->idle_restart = 0; 983 } 984 985 static void bbr_update_gains(struct sock *sk) 986 { 987 struct bbr *bbr = inet_csk_ca(sk); 988 989 switch (bbr->mode) { 990 case BBR_STARTUP: 991 bbr->pacing_gain = bbr_high_gain; 992 bbr->cwnd_gain = bbr_high_gain; 993 break; 994 case BBR_DRAIN: 995 bbr->pacing_gain = bbr_drain_gain; /* slow, to drain */ 996 bbr->cwnd_gain = bbr_high_gain; /* keep cwnd */ 997 break; 998 case BBR_PROBE_BW: 999 bbr->pacing_gain = (bbr->lt_use_bw ? 1000 BBR_UNIT : 1001 bbr_pacing_gain[bbr->cycle_idx]); 1002 bbr->cwnd_gain = bbr_cwnd_gain; 1003 break; 1004 case BBR_PROBE_RTT: 1005 bbr->pacing_gain = BBR_UNIT; 1006 bbr->cwnd_gain = BBR_UNIT; 1007 break; 1008 default: 1009 WARN_ONCE(1, "BBR bad mode: %u\n", bbr->mode); 1010 break; 1011 } 1012 } 1013 1014 static void bbr_update_model(struct sock *sk, const struct rate_sample *rs) 1015 { 1016 bbr_update_bw(sk, rs); 1017 bbr_update_ack_aggregation(sk, rs); 1018 bbr_update_cycle_phase(sk, rs); 1019 bbr_check_full_bw_reached(sk, rs); 1020 bbr_check_drain(sk, rs); 1021 bbr_update_min_rtt(sk, rs); 1022 bbr_update_gains(sk); 1023 } 1024 1025 static void bbr_main(struct sock *sk, const struct rate_sample *rs) 1026 { 1027 struct bbr *bbr = inet_csk_ca(sk); 1028 u32 bw; 1029 1030 bbr_update_model(sk, rs); 1031 1032 bw = bbr_bw(sk); 1033 bbr_set_pacing_rate(sk, bw, bbr->pacing_gain); 1034 bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain); 1035 } 1036 1037 static void bbr_init(struct sock *sk) 1038 { 1039 struct tcp_sock *tp = tcp_sk(sk); 1040 struct bbr *bbr = inet_csk_ca(sk); 1041 1042 bbr->prior_cwnd = 0; 1043 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 1044 bbr->rtt_cnt = 0; 1045 bbr->next_rtt_delivered = 0; 1046 bbr->prev_ca_state = TCP_CA_Open; 1047 bbr->packet_conservation = 0; 1048 1049 bbr->probe_rtt_done_stamp = 0; 1050 bbr->probe_rtt_round_done = 0; 1051 bbr->min_rtt_us = tcp_min_rtt(tp); 1052 bbr->min_rtt_stamp = tcp_jiffies32; 1053 1054 minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */ 1055 1056 bbr->has_seen_rtt = 0; 1057 bbr_init_pacing_rate_from_rtt(sk); 1058 1059 bbr->round_start = 0; 1060 bbr->idle_restart = 0; 1061 bbr->full_bw_reached = 0; 1062 bbr->full_bw = 0; 1063 bbr->full_bw_cnt = 0; 1064 bbr->cycle_mstamp = 0; 1065 bbr->cycle_idx = 0; 1066 bbr_reset_lt_bw_sampling(sk); 1067 bbr_reset_startup_mode(sk); 1068 1069 bbr->ack_epoch_mstamp = tp->tcp_mstamp; 1070 bbr->ack_epoch_acked = 0; 1071 bbr->extra_acked_win_rtts = 0; 1072 bbr->extra_acked_win_idx = 0; 1073 bbr->extra_acked[0] = 0; 1074 bbr->extra_acked[1] = 0; 1075 1076 cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED); 1077 } 1078 1079 static u32 bbr_sndbuf_expand(struct sock *sk) 1080 { 1081 /* Provision 3 * cwnd since BBR may slow-start even during recovery. */ 1082 return 3; 1083 } 1084 1085 /* In theory BBR does not need to undo the cwnd since it does not 1086 * always reduce cwnd on losses (see bbr_main()). Keep it for now. 1087 */ 1088 static u32 bbr_undo_cwnd(struct sock *sk) 1089 { 1090 struct bbr *bbr = inet_csk_ca(sk); 1091 1092 bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */ 1093 bbr->full_bw_cnt = 0; 1094 bbr_reset_lt_bw_sampling(sk); 1095 return tcp_sk(sk)->snd_cwnd; 1096 } 1097 1098 /* Entering loss recovery, so save cwnd for when we exit or undo recovery. */ 1099 static u32 bbr_ssthresh(struct sock *sk) 1100 { 1101 bbr_save_cwnd(sk); 1102 return tcp_sk(sk)->snd_ssthresh; 1103 } 1104 1105 static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr, 1106 union tcp_cc_info *info) 1107 { 1108 if (ext & (1 << (INET_DIAG_BBRINFO - 1)) || 1109 ext & (1 << (INET_DIAG_VEGASINFO - 1))) { 1110 struct tcp_sock *tp = tcp_sk(sk); 1111 struct bbr *bbr = inet_csk_ca(sk); 1112 u64 bw = bbr_bw(sk); 1113 1114 bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE; 1115 memset(&info->bbr, 0, sizeof(info->bbr)); 1116 info->bbr.bbr_bw_lo = (u32)bw; 1117 info->bbr.bbr_bw_hi = (u32)(bw >> 32); 1118 info->bbr.bbr_min_rtt = bbr->min_rtt_us; 1119 info->bbr.bbr_pacing_gain = bbr->pacing_gain; 1120 info->bbr.bbr_cwnd_gain = bbr->cwnd_gain; 1121 *attr = INET_DIAG_BBRINFO; 1122 return sizeof(info->bbr); 1123 } 1124 return 0; 1125 } 1126 1127 static void bbr_set_state(struct sock *sk, u8 new_state) 1128 { 1129 struct bbr *bbr = inet_csk_ca(sk); 1130 1131 if (new_state == TCP_CA_Loss) { 1132 struct rate_sample rs = { .losses = 1 }; 1133 1134 bbr->prev_ca_state = TCP_CA_Loss; 1135 bbr->full_bw = 0; 1136 bbr->round_start = 1; /* treat RTO like end of a round */ 1137 bbr_lt_bw_sampling(sk, &rs); 1138 } 1139 } 1140 1141 static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = { 1142 .flags = TCP_CONG_NON_RESTRICTED, 1143 .name = "bbr", 1144 .owner = THIS_MODULE, 1145 .init = bbr_init, 1146 .cong_control = bbr_main, 1147 .sndbuf_expand = bbr_sndbuf_expand, 1148 .undo_cwnd = bbr_undo_cwnd, 1149 .cwnd_event = bbr_cwnd_event, 1150 .ssthresh = bbr_ssthresh, 1151 .min_tso_segs = bbr_min_tso_segs, 1152 .get_info = bbr_get_info, 1153 .set_state = bbr_set_state, 1154 }; 1155 1156 static int __init bbr_register(void) 1157 { 1158 BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE); 1159 return tcp_register_congestion_control(&tcp_bbr_cong_ops); 1160 } 1161 1162 static void __exit bbr_unregister(void) 1163 { 1164 tcp_unregister_congestion_control(&tcp_bbr_cong_ops); 1165 } 1166 1167 module_init(bbr_register); 1168 module_exit(bbr_unregister); 1169 1170 MODULE_AUTHOR("Van Jacobson <vanj@google.com>"); 1171 MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>"); 1172 MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>"); 1173 MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>"); 1174 MODULE_LICENSE("Dual BSD/GPL"); 1175 MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)"); 1176