1 /* 2 * net/sched/sch_tbf.c Token Bucket Filter queue. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs - 11 * original idea by Martin Devera 12 * 13 */ 14 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/string.h> 19 #include <linux/errno.h> 20 #include <linux/skbuff.h> 21 #include <net/netlink.h> 22 #include <net/sch_generic.h> 23 #include <net/pkt_sched.h> 24 25 26 /* Simple Token Bucket Filter. 27 ======================================= 28 29 SOURCE. 30 ------- 31 32 None. 33 34 Description. 35 ------------ 36 37 A data flow obeys TBF with rate R and depth B, if for any 38 time interval t_i...t_f the number of transmitted bits 39 does not exceed B + R*(t_f-t_i). 40 41 Packetized version of this definition: 42 The sequence of packets of sizes s_i served at moments t_i 43 obeys TBF, if for any i<=k: 44 45 s_i+....+s_k <= B + R*(t_k - t_i) 46 47 Algorithm. 48 ---------- 49 50 Let N(t_i) be B/R initially and N(t) grow continuously with time as: 51 52 N(t+delta) = min{B/R, N(t) + delta} 53 54 If the first packet in queue has length S, it may be 55 transmitted only at the time t_* when S/R <= N(t_*), 56 and in this case N(t) jumps: 57 58 N(t_* + 0) = N(t_* - 0) - S/R. 59 60 61 62 Actually, QoS requires two TBF to be applied to a data stream. 63 One of them controls steady state burst size, another 64 one with rate P (peak rate) and depth M (equal to link MTU) 65 limits bursts at a smaller time scale. 66 67 It is easy to see that P>R, and B>M. If P is infinity, this double 68 TBF is equivalent to a single one. 69 70 When TBF works in reshaping mode, latency is estimated as: 71 72 lat = max ((L-B)/R, (L-M)/P) 73 74 75 NOTES. 76 ------ 77 78 If TBF throttles, it starts a watchdog timer, which will wake it up 79 when it is ready to transmit. 80 Note that the minimal timer resolution is 1/HZ. 81 If no new packets arrive during this period, 82 or if the device is not awaken by EOI for some previous packet, 83 TBF can stop its activity for 1/HZ. 84 85 86 This means, that with depth B, the maximal rate is 87 88 R_crit = B*HZ 89 90 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes. 91 92 Note that the peak rate TBF is much more tough: with MTU 1500 93 P_crit = 150Kbytes/sec. So, if you need greater peak 94 rates, use alpha with HZ=1000 :-) 95 96 With classful TBF, limit is just kept for backwards compatibility. 97 It is passed to the default bfifo qdisc - if the inner qdisc is 98 changed the limit is not effective anymore. 99 */ 100 101 struct tbf_sched_data { 102 /* Parameters */ 103 u32 limit; /* Maximal length of backlog: bytes */ 104 u32 max_size; 105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */ 106 s64 mtu; 107 struct psched_ratecfg rate; 108 struct psched_ratecfg peak; 109 110 /* Variables */ 111 s64 tokens; /* Current number of B tokens */ 112 s64 ptokens; /* Current number of P tokens */ 113 s64 t_c; /* Time check-point */ 114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */ 115 struct qdisc_watchdog watchdog; /* Watchdog timer */ 116 }; 117 118 119 /* Time to Length, convert time in ns to length in bytes 120 * to determinate how many bytes can be sent in given time. 121 */ 122 static u64 psched_ns_t2l(const struct psched_ratecfg *r, 123 u64 time_in_ns) 124 { 125 /* The formula is : 126 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC 127 */ 128 u64 len = time_in_ns * r->rate_bytes_ps; 129 130 do_div(len, NSEC_PER_SEC); 131 132 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) { 133 do_div(len, 53); 134 len = len * 48; 135 } 136 137 if (len > r->overhead) 138 len -= r->overhead; 139 else 140 len = 0; 141 142 return len; 143 } 144 145 /* 146 * Return length of individual segments of a gso packet, 147 * including all headers (MAC, IP, TCP/UDP) 148 */ 149 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) 150 { 151 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 152 return hdr_len + skb_gso_transport_seglen(skb); 153 } 154 155 /* GSO packet is too big, segment it so that tbf can transmit 156 * each segment in time 157 */ 158 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, 159 struct sk_buff **to_free) 160 { 161 struct tbf_sched_data *q = qdisc_priv(sch); 162 struct sk_buff *segs, *nskb; 163 netdev_features_t features = netif_skb_features(skb); 164 unsigned int len = 0, prev_len = qdisc_pkt_len(skb); 165 int ret, nb; 166 167 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 168 169 if (IS_ERR_OR_NULL(segs)) 170 return qdisc_drop(skb, sch, to_free); 171 172 nb = 0; 173 while (segs) { 174 nskb = segs->next; 175 segs->next = NULL; 176 qdisc_skb_cb(segs)->pkt_len = segs->len; 177 len += segs->len; 178 ret = qdisc_enqueue(segs, q->qdisc, to_free); 179 if (ret != NET_XMIT_SUCCESS) { 180 if (net_xmit_drop_count(ret)) 181 qdisc_qstats_drop(sch); 182 } else { 183 nb++; 184 } 185 segs = nskb; 186 } 187 sch->q.qlen += nb; 188 if (nb > 1) 189 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); 190 consume_skb(skb); 191 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; 192 } 193 194 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, 195 struct sk_buff **to_free) 196 { 197 struct tbf_sched_data *q = qdisc_priv(sch); 198 int ret; 199 200 if (qdisc_pkt_len(skb) > q->max_size) { 201 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) 202 return tbf_segment(skb, sch, to_free); 203 return qdisc_drop(skb, sch, to_free); 204 } 205 ret = qdisc_enqueue(skb, q->qdisc, to_free); 206 if (ret != NET_XMIT_SUCCESS) { 207 if (net_xmit_drop_count(ret)) 208 qdisc_qstats_drop(sch); 209 return ret; 210 } 211 212 qdisc_qstats_backlog_inc(sch, skb); 213 sch->q.qlen++; 214 return NET_XMIT_SUCCESS; 215 } 216 217 static bool tbf_peak_present(const struct tbf_sched_data *q) 218 { 219 return q->peak.rate_bytes_ps; 220 } 221 222 static struct sk_buff *tbf_dequeue(struct Qdisc *sch) 223 { 224 struct tbf_sched_data *q = qdisc_priv(sch); 225 struct sk_buff *skb; 226 227 skb = q->qdisc->ops->peek(q->qdisc); 228 229 if (skb) { 230 s64 now; 231 s64 toks; 232 s64 ptoks = 0; 233 unsigned int len = qdisc_pkt_len(skb); 234 235 now = ktime_get_ns(); 236 toks = min_t(s64, now - q->t_c, q->buffer); 237 238 if (tbf_peak_present(q)) { 239 ptoks = toks + q->ptokens; 240 if (ptoks > q->mtu) 241 ptoks = q->mtu; 242 ptoks -= (s64) psched_l2t_ns(&q->peak, len); 243 } 244 toks += q->tokens; 245 if (toks > q->buffer) 246 toks = q->buffer; 247 toks -= (s64) psched_l2t_ns(&q->rate, len); 248 249 if ((toks|ptoks) >= 0) { 250 skb = qdisc_dequeue_peeked(q->qdisc); 251 if (unlikely(!skb)) 252 return NULL; 253 254 q->t_c = now; 255 q->tokens = toks; 256 q->ptokens = ptoks; 257 qdisc_qstats_backlog_dec(sch, skb); 258 sch->q.qlen--; 259 qdisc_bstats_update(sch, skb); 260 return skb; 261 } 262 263 qdisc_watchdog_schedule_ns(&q->watchdog, 264 now + max_t(long, -toks, -ptoks)); 265 266 /* Maybe we have a shorter packet in the queue, 267 which can be sent now. It sounds cool, 268 but, however, this is wrong in principle. 269 We MUST NOT reorder packets under these circumstances. 270 271 Really, if we split the flow into independent 272 subflows, it would be a very good solution. 273 This is the main idea of all FQ algorithms 274 (cf. CSZ, HPFQ, HFSC) 275 */ 276 277 qdisc_qstats_overlimit(sch); 278 } 279 return NULL; 280 } 281 282 static void tbf_reset(struct Qdisc *sch) 283 { 284 struct tbf_sched_data *q = qdisc_priv(sch); 285 286 qdisc_reset(q->qdisc); 287 sch->qstats.backlog = 0; 288 sch->q.qlen = 0; 289 q->t_c = ktime_get_ns(); 290 q->tokens = q->buffer; 291 q->ptokens = q->mtu; 292 qdisc_watchdog_cancel(&q->watchdog); 293 } 294 295 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = { 296 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) }, 297 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 298 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 299 [TCA_TBF_RATE64] = { .type = NLA_U64 }, 300 [TCA_TBF_PRATE64] = { .type = NLA_U64 }, 301 [TCA_TBF_BURST] = { .type = NLA_U32 }, 302 [TCA_TBF_PBURST] = { .type = NLA_U32 }, 303 }; 304 305 static int tbf_change(struct Qdisc *sch, struct nlattr *opt, 306 struct netlink_ext_ack *extack) 307 { 308 int err; 309 struct tbf_sched_data *q = qdisc_priv(sch); 310 struct nlattr *tb[TCA_TBF_MAX + 1]; 311 struct tc_tbf_qopt *qopt; 312 struct Qdisc *child = NULL; 313 struct psched_ratecfg rate; 314 struct psched_ratecfg peak; 315 u64 max_size; 316 s64 buffer, mtu; 317 u64 rate64 = 0, prate64 = 0; 318 319 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy, NULL); 320 if (err < 0) 321 return err; 322 323 err = -EINVAL; 324 if (tb[TCA_TBF_PARMS] == NULL) 325 goto done; 326 327 qopt = nla_data(tb[TCA_TBF_PARMS]); 328 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE) 329 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate, 330 tb[TCA_TBF_RTAB], 331 NULL)); 332 333 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE) 334 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate, 335 tb[TCA_TBF_PTAB], 336 NULL)); 337 338 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U); 339 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U); 340 341 if (tb[TCA_TBF_RATE64]) 342 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); 343 psched_ratecfg_precompute(&rate, &qopt->rate, rate64); 344 345 if (tb[TCA_TBF_BURST]) { 346 max_size = nla_get_u32(tb[TCA_TBF_BURST]); 347 buffer = psched_l2t_ns(&rate, max_size); 348 } else { 349 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U); 350 } 351 352 if (qopt->peakrate.rate) { 353 if (tb[TCA_TBF_PRATE64]) 354 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]); 355 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64); 356 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) { 357 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n", 358 peak.rate_bytes_ps, rate.rate_bytes_ps); 359 err = -EINVAL; 360 goto done; 361 } 362 363 if (tb[TCA_TBF_PBURST]) { 364 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]); 365 max_size = min_t(u32, max_size, pburst); 366 mtu = psched_l2t_ns(&peak, pburst); 367 } else { 368 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu)); 369 } 370 } else { 371 memset(&peak, 0, sizeof(peak)); 372 } 373 374 if (max_size < psched_mtu(qdisc_dev(sch))) 375 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n", 376 max_size, qdisc_dev(sch)->name, 377 psched_mtu(qdisc_dev(sch))); 378 379 if (!max_size) { 380 err = -EINVAL; 381 goto done; 382 } 383 384 if (q->qdisc != &noop_qdisc) { 385 err = fifo_set_limit(q->qdisc, qopt->limit); 386 if (err) 387 goto done; 388 } else if (qopt->limit > 0) { 389 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit, 390 extack); 391 if (IS_ERR(child)) { 392 err = PTR_ERR(child); 393 goto done; 394 } 395 } 396 397 sch_tree_lock(sch); 398 if (child) { 399 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, 400 q->qdisc->qstats.backlog); 401 qdisc_destroy(q->qdisc); 402 q->qdisc = child; 403 if (child != &noop_qdisc) 404 qdisc_hash_add(child, true); 405 } 406 q->limit = qopt->limit; 407 if (tb[TCA_TBF_PBURST]) 408 q->mtu = mtu; 409 else 410 q->mtu = PSCHED_TICKS2NS(qopt->mtu); 411 q->max_size = max_size; 412 if (tb[TCA_TBF_BURST]) 413 q->buffer = buffer; 414 else 415 q->buffer = PSCHED_TICKS2NS(qopt->buffer); 416 q->tokens = q->buffer; 417 q->ptokens = q->mtu; 418 419 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg)); 420 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg)); 421 422 sch_tree_unlock(sch); 423 err = 0; 424 done: 425 return err; 426 } 427 428 static int tbf_init(struct Qdisc *sch, struct nlattr *opt, 429 struct netlink_ext_ack *extack) 430 { 431 struct tbf_sched_data *q = qdisc_priv(sch); 432 433 qdisc_watchdog_init(&q->watchdog, sch); 434 q->qdisc = &noop_qdisc; 435 436 if (!opt) 437 return -EINVAL; 438 439 q->t_c = ktime_get_ns(); 440 441 return tbf_change(sch, opt, extack); 442 } 443 444 static void tbf_destroy(struct Qdisc *sch) 445 { 446 struct tbf_sched_data *q = qdisc_priv(sch); 447 448 qdisc_watchdog_cancel(&q->watchdog); 449 qdisc_destroy(q->qdisc); 450 } 451 452 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) 453 { 454 struct tbf_sched_data *q = qdisc_priv(sch); 455 struct nlattr *nest; 456 struct tc_tbf_qopt opt; 457 458 sch->qstats.backlog = q->qdisc->qstats.backlog; 459 nest = nla_nest_start(skb, TCA_OPTIONS); 460 if (nest == NULL) 461 goto nla_put_failure; 462 463 opt.limit = q->limit; 464 psched_ratecfg_getrate(&opt.rate, &q->rate); 465 if (tbf_peak_present(q)) 466 psched_ratecfg_getrate(&opt.peakrate, &q->peak); 467 else 468 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 469 opt.mtu = PSCHED_NS2TICKS(q->mtu); 470 opt.buffer = PSCHED_NS2TICKS(q->buffer); 471 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt)) 472 goto nla_put_failure; 473 if (q->rate.rate_bytes_ps >= (1ULL << 32) && 474 nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps, 475 TCA_TBF_PAD)) 476 goto nla_put_failure; 477 if (tbf_peak_present(q) && 478 q->peak.rate_bytes_ps >= (1ULL << 32) && 479 nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps, 480 TCA_TBF_PAD)) 481 goto nla_put_failure; 482 483 return nla_nest_end(skb, nest); 484 485 nla_put_failure: 486 nla_nest_cancel(skb, nest); 487 return -1; 488 } 489 490 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl, 491 struct sk_buff *skb, struct tcmsg *tcm) 492 { 493 struct tbf_sched_data *q = qdisc_priv(sch); 494 495 tcm->tcm_handle |= TC_H_MIN(1); 496 tcm->tcm_info = q->qdisc->handle; 497 498 return 0; 499 } 500 501 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 502 struct Qdisc **old, struct netlink_ext_ack *extack) 503 { 504 struct tbf_sched_data *q = qdisc_priv(sch); 505 506 if (new == NULL) 507 new = &noop_qdisc; 508 509 *old = qdisc_replace(sch, new, &q->qdisc); 510 return 0; 511 } 512 513 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg) 514 { 515 struct tbf_sched_data *q = qdisc_priv(sch); 516 return q->qdisc; 517 } 518 519 static unsigned long tbf_find(struct Qdisc *sch, u32 classid) 520 { 521 return 1; 522 } 523 524 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) 525 { 526 if (!walker->stop) { 527 if (walker->count >= walker->skip) 528 if (walker->fn(sch, 1, walker) < 0) { 529 walker->stop = 1; 530 return; 531 } 532 walker->count++; 533 } 534 } 535 536 static const struct Qdisc_class_ops tbf_class_ops = { 537 .graft = tbf_graft, 538 .leaf = tbf_leaf, 539 .find = tbf_find, 540 .walk = tbf_walk, 541 .dump = tbf_dump_class, 542 }; 543 544 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = { 545 .next = NULL, 546 .cl_ops = &tbf_class_ops, 547 .id = "tbf", 548 .priv_size = sizeof(struct tbf_sched_data), 549 .enqueue = tbf_enqueue, 550 .dequeue = tbf_dequeue, 551 .peek = qdisc_peek_dequeued, 552 .init = tbf_init, 553 .reset = tbf_reset, 554 .destroy = tbf_destroy, 555 .change = tbf_change, 556 .dump = tbf_dump, 557 .owner = THIS_MODULE, 558 }; 559 560 static int __init tbf_module_init(void) 561 { 562 return register_qdisc(&tbf_qdisc_ops); 563 } 564 565 static void __exit tbf_module_exit(void) 566 { 567 unregister_qdisc(&tbf_qdisc_ops); 568 } 569 module_init(tbf_module_init) 570 module_exit(tbf_module_exit) 571 MODULE_LICENSE("GPL"); 572