1 /* 2 * net/sched/sch_tbf.c Token Bucket Filter queue. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs - 11 * original idea by Martin Devera 12 * 13 */ 14 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/string.h> 19 #include <linux/errno.h> 20 #include <linux/skbuff.h> 21 #include <net/netlink.h> 22 #include <net/sch_generic.h> 23 #include <net/pkt_sched.h> 24 25 26 /* Simple Token Bucket Filter. 27 ======================================= 28 29 SOURCE. 30 ------- 31 32 None. 33 34 Description. 35 ------------ 36 37 A data flow obeys TBF with rate R and depth B, if for any 38 time interval t_i...t_f the number of transmitted bits 39 does not exceed B + R*(t_f-t_i). 40 41 Packetized version of this definition: 42 The sequence of packets of sizes s_i served at moments t_i 43 obeys TBF, if for any i<=k: 44 45 s_i+....+s_k <= B + R*(t_k - t_i) 46 47 Algorithm. 48 ---------- 49 50 Let N(t_i) be B/R initially and N(t) grow continuously with time as: 51 52 N(t+delta) = min{B/R, N(t) + delta} 53 54 If the first packet in queue has length S, it may be 55 transmitted only at the time t_* when S/R <= N(t_*), 56 and in this case N(t) jumps: 57 58 N(t_* + 0) = N(t_* - 0) - S/R. 59 60 61 62 Actually, QoS requires two TBF to be applied to a data stream. 63 One of them controls steady state burst size, another 64 one with rate P (peak rate) and depth M (equal to link MTU) 65 limits bursts at a smaller time scale. 66 67 It is easy to see that P>R, and B>M. If P is infinity, this double 68 TBF is equivalent to a single one. 69 70 When TBF works in reshaping mode, latency is estimated as: 71 72 lat = max ((L-B)/R, (L-M)/P) 73 74 75 NOTES. 76 ------ 77 78 If TBF throttles, it starts a watchdog timer, which will wake it up 79 when it is ready to transmit. 80 Note that the minimal timer resolution is 1/HZ. 81 If no new packets arrive during this period, 82 or if the device is not awaken by EOI for some previous packet, 83 TBF can stop its activity for 1/HZ. 84 85 86 This means, that with depth B, the maximal rate is 87 88 R_crit = B*HZ 89 90 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes. 91 92 Note that the peak rate TBF is much more tough: with MTU 1500 93 P_crit = 150Kbytes/sec. So, if you need greater peak 94 rates, use alpha with HZ=1000 :-) 95 96 With classful TBF, limit is just kept for backwards compatibility. 97 It is passed to the default bfifo qdisc - if the inner qdisc is 98 changed the limit is not effective anymore. 99 */ 100 101 struct tbf_sched_data { 102 /* Parameters */ 103 u32 limit; /* Maximal length of backlog: bytes */ 104 u32 max_size; 105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */ 106 s64 mtu; 107 struct psched_ratecfg rate; 108 struct psched_ratecfg peak; 109 110 /* Variables */ 111 s64 tokens; /* Current number of B tokens */ 112 s64 ptokens; /* Current number of P tokens */ 113 s64 t_c; /* Time check-point */ 114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */ 115 struct qdisc_watchdog watchdog; /* Watchdog timer */ 116 }; 117 118 119 /* Time to Length, convert time in ns to length in bytes 120 * to determinate how many bytes can be sent in given time. 121 */ 122 static u64 psched_ns_t2l(const struct psched_ratecfg *r, 123 u64 time_in_ns) 124 { 125 /* The formula is : 126 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC 127 */ 128 u64 len = time_in_ns * r->rate_bytes_ps; 129 130 do_div(len, NSEC_PER_SEC); 131 132 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) { 133 do_div(len, 53); 134 len = len * 48; 135 } 136 137 if (len > r->overhead) 138 len -= r->overhead; 139 else 140 len = 0; 141 142 return len; 143 } 144 145 /* 146 * Return length of individual segments of a gso packet, 147 * including all headers (MAC, IP, TCP/UDP) 148 */ 149 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) 150 { 151 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 152 return hdr_len + skb_gso_transport_seglen(skb); 153 } 154 155 /* GSO packet is too big, segment it so that tbf can transmit 156 * each segment in time 157 */ 158 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) 159 { 160 struct tbf_sched_data *q = qdisc_priv(sch); 161 struct sk_buff *segs, *nskb; 162 netdev_features_t features = netif_skb_features(skb); 163 unsigned int len = 0, prev_len = qdisc_pkt_len(skb); 164 int ret, nb; 165 166 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 167 168 if (IS_ERR_OR_NULL(segs)) 169 return qdisc_drop(skb, sch); 170 171 nb = 0; 172 while (segs) { 173 nskb = segs->next; 174 segs->next = NULL; 175 qdisc_skb_cb(segs)->pkt_len = segs->len; 176 len += segs->len; 177 ret = qdisc_enqueue(segs, q->qdisc); 178 if (ret != NET_XMIT_SUCCESS) { 179 if (net_xmit_drop_count(ret)) 180 qdisc_qstats_drop(sch); 181 } else { 182 nb++; 183 } 184 segs = nskb; 185 } 186 sch->q.qlen += nb; 187 if (nb > 1) 188 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); 189 consume_skb(skb); 190 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; 191 } 192 193 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) 194 { 195 struct tbf_sched_data *q = qdisc_priv(sch); 196 int ret; 197 198 if (qdisc_pkt_len(skb) > q->max_size) { 199 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) 200 return tbf_segment(skb, sch); 201 return qdisc_drop(skb, sch); 202 } 203 ret = qdisc_enqueue(skb, q->qdisc); 204 if (ret != NET_XMIT_SUCCESS) { 205 if (net_xmit_drop_count(ret)) 206 qdisc_qstats_drop(sch); 207 return ret; 208 } 209 210 qdisc_qstats_backlog_inc(sch, skb); 211 sch->q.qlen++; 212 return NET_XMIT_SUCCESS; 213 } 214 215 static bool tbf_peak_present(const struct tbf_sched_data *q) 216 { 217 return q->peak.rate_bytes_ps; 218 } 219 220 static struct sk_buff *tbf_dequeue(struct Qdisc *sch) 221 { 222 struct tbf_sched_data *q = qdisc_priv(sch); 223 struct sk_buff *skb; 224 225 skb = q->qdisc->ops->peek(q->qdisc); 226 227 if (skb) { 228 s64 now; 229 s64 toks; 230 s64 ptoks = 0; 231 unsigned int len = qdisc_pkt_len(skb); 232 233 now = ktime_get_ns(); 234 toks = min_t(s64, now - q->t_c, q->buffer); 235 236 if (tbf_peak_present(q)) { 237 ptoks = toks + q->ptokens; 238 if (ptoks > q->mtu) 239 ptoks = q->mtu; 240 ptoks -= (s64) psched_l2t_ns(&q->peak, len); 241 } 242 toks += q->tokens; 243 if (toks > q->buffer) 244 toks = q->buffer; 245 toks -= (s64) psched_l2t_ns(&q->rate, len); 246 247 if ((toks|ptoks) >= 0) { 248 skb = qdisc_dequeue_peeked(q->qdisc); 249 if (unlikely(!skb)) 250 return NULL; 251 252 q->t_c = now; 253 q->tokens = toks; 254 q->ptokens = ptoks; 255 qdisc_qstats_backlog_dec(sch, skb); 256 sch->q.qlen--; 257 qdisc_bstats_update(sch, skb); 258 return skb; 259 } 260 261 qdisc_watchdog_schedule_ns(&q->watchdog, 262 now + max_t(long, -toks, -ptoks)); 263 264 /* Maybe we have a shorter packet in the queue, 265 which can be sent now. It sounds cool, 266 but, however, this is wrong in principle. 267 We MUST NOT reorder packets under these circumstances. 268 269 Really, if we split the flow into independent 270 subflows, it would be a very good solution. 271 This is the main idea of all FQ algorithms 272 (cf. CSZ, HPFQ, HFSC) 273 */ 274 275 qdisc_qstats_overlimit(sch); 276 } 277 return NULL; 278 } 279 280 static void tbf_reset(struct Qdisc *sch) 281 { 282 struct tbf_sched_data *q = qdisc_priv(sch); 283 284 qdisc_reset(q->qdisc); 285 sch->qstats.backlog = 0; 286 sch->q.qlen = 0; 287 q->t_c = ktime_get_ns(); 288 q->tokens = q->buffer; 289 q->ptokens = q->mtu; 290 qdisc_watchdog_cancel(&q->watchdog); 291 } 292 293 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = { 294 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) }, 295 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 296 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 297 [TCA_TBF_RATE64] = { .type = NLA_U64 }, 298 [TCA_TBF_PRATE64] = { .type = NLA_U64 }, 299 [TCA_TBF_BURST] = { .type = NLA_U32 }, 300 [TCA_TBF_PBURST] = { .type = NLA_U32 }, 301 }; 302 303 static int tbf_change(struct Qdisc *sch, struct nlattr *opt) 304 { 305 int err; 306 struct tbf_sched_data *q = qdisc_priv(sch); 307 struct nlattr *tb[TCA_TBF_MAX + 1]; 308 struct tc_tbf_qopt *qopt; 309 struct Qdisc *child = NULL; 310 struct psched_ratecfg rate; 311 struct psched_ratecfg peak; 312 u64 max_size; 313 s64 buffer, mtu; 314 u64 rate64 = 0, prate64 = 0; 315 316 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy); 317 if (err < 0) 318 return err; 319 320 err = -EINVAL; 321 if (tb[TCA_TBF_PARMS] == NULL) 322 goto done; 323 324 qopt = nla_data(tb[TCA_TBF_PARMS]); 325 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE) 326 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate, 327 tb[TCA_TBF_RTAB])); 328 329 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE) 330 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate, 331 tb[TCA_TBF_PTAB])); 332 333 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U); 334 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U); 335 336 if (tb[TCA_TBF_RATE64]) 337 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); 338 psched_ratecfg_precompute(&rate, &qopt->rate, rate64); 339 340 if (tb[TCA_TBF_BURST]) { 341 max_size = nla_get_u32(tb[TCA_TBF_BURST]); 342 buffer = psched_l2t_ns(&rate, max_size); 343 } else { 344 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U); 345 } 346 347 if (qopt->peakrate.rate) { 348 if (tb[TCA_TBF_PRATE64]) 349 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]); 350 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64); 351 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) { 352 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n", 353 peak.rate_bytes_ps, rate.rate_bytes_ps); 354 err = -EINVAL; 355 goto done; 356 } 357 358 if (tb[TCA_TBF_PBURST]) { 359 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]); 360 max_size = min_t(u32, max_size, pburst); 361 mtu = psched_l2t_ns(&peak, pburst); 362 } else { 363 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu)); 364 } 365 } else { 366 memset(&peak, 0, sizeof(peak)); 367 } 368 369 if (max_size < psched_mtu(qdisc_dev(sch))) 370 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n", 371 max_size, qdisc_dev(sch)->name, 372 psched_mtu(qdisc_dev(sch))); 373 374 if (!max_size) { 375 err = -EINVAL; 376 goto done; 377 } 378 379 if (q->qdisc != &noop_qdisc) { 380 err = fifo_set_limit(q->qdisc, qopt->limit); 381 if (err) 382 goto done; 383 } else if (qopt->limit > 0) { 384 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); 385 if (IS_ERR(child)) { 386 err = PTR_ERR(child); 387 goto done; 388 } 389 } 390 391 sch_tree_lock(sch); 392 if (child) { 393 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, 394 q->qdisc->qstats.backlog); 395 qdisc_destroy(q->qdisc); 396 q->qdisc = child; 397 } 398 q->limit = qopt->limit; 399 if (tb[TCA_TBF_PBURST]) 400 q->mtu = mtu; 401 else 402 q->mtu = PSCHED_TICKS2NS(qopt->mtu); 403 q->max_size = max_size; 404 if (tb[TCA_TBF_BURST]) 405 q->buffer = buffer; 406 else 407 q->buffer = PSCHED_TICKS2NS(qopt->buffer); 408 q->tokens = q->buffer; 409 q->ptokens = q->mtu; 410 411 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg)); 412 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg)); 413 414 sch_tree_unlock(sch); 415 err = 0; 416 done: 417 return err; 418 } 419 420 static int tbf_init(struct Qdisc *sch, struct nlattr *opt) 421 { 422 struct tbf_sched_data *q = qdisc_priv(sch); 423 424 if (opt == NULL) 425 return -EINVAL; 426 427 q->t_c = ktime_get_ns(); 428 qdisc_watchdog_init(&q->watchdog, sch); 429 q->qdisc = &noop_qdisc; 430 431 return tbf_change(sch, opt); 432 } 433 434 static void tbf_destroy(struct Qdisc *sch) 435 { 436 struct tbf_sched_data *q = qdisc_priv(sch); 437 438 qdisc_watchdog_cancel(&q->watchdog); 439 qdisc_destroy(q->qdisc); 440 } 441 442 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) 443 { 444 struct tbf_sched_data *q = qdisc_priv(sch); 445 struct nlattr *nest; 446 struct tc_tbf_qopt opt; 447 448 sch->qstats.backlog = q->qdisc->qstats.backlog; 449 nest = nla_nest_start(skb, TCA_OPTIONS); 450 if (nest == NULL) 451 goto nla_put_failure; 452 453 opt.limit = q->limit; 454 psched_ratecfg_getrate(&opt.rate, &q->rate); 455 if (tbf_peak_present(q)) 456 psched_ratecfg_getrate(&opt.peakrate, &q->peak); 457 else 458 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 459 opt.mtu = PSCHED_NS2TICKS(q->mtu); 460 opt.buffer = PSCHED_NS2TICKS(q->buffer); 461 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt)) 462 goto nla_put_failure; 463 if (q->rate.rate_bytes_ps >= (1ULL << 32) && 464 nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps, 465 TCA_TBF_PAD)) 466 goto nla_put_failure; 467 if (tbf_peak_present(q) && 468 q->peak.rate_bytes_ps >= (1ULL << 32) && 469 nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps, 470 TCA_TBF_PAD)) 471 goto nla_put_failure; 472 473 return nla_nest_end(skb, nest); 474 475 nla_put_failure: 476 nla_nest_cancel(skb, nest); 477 return -1; 478 } 479 480 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl, 481 struct sk_buff *skb, struct tcmsg *tcm) 482 { 483 struct tbf_sched_data *q = qdisc_priv(sch); 484 485 tcm->tcm_handle |= TC_H_MIN(1); 486 tcm->tcm_info = q->qdisc->handle; 487 488 return 0; 489 } 490 491 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 492 struct Qdisc **old) 493 { 494 struct tbf_sched_data *q = qdisc_priv(sch); 495 496 if (new == NULL) 497 new = &noop_qdisc; 498 499 *old = qdisc_replace(sch, new, &q->qdisc); 500 return 0; 501 } 502 503 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg) 504 { 505 struct tbf_sched_data *q = qdisc_priv(sch); 506 return q->qdisc; 507 } 508 509 static unsigned long tbf_get(struct Qdisc *sch, u32 classid) 510 { 511 return 1; 512 } 513 514 static void tbf_put(struct Qdisc *sch, unsigned long arg) 515 { 516 } 517 518 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) 519 { 520 if (!walker->stop) { 521 if (walker->count >= walker->skip) 522 if (walker->fn(sch, 1, walker) < 0) { 523 walker->stop = 1; 524 return; 525 } 526 walker->count++; 527 } 528 } 529 530 static const struct Qdisc_class_ops tbf_class_ops = { 531 .graft = tbf_graft, 532 .leaf = tbf_leaf, 533 .get = tbf_get, 534 .put = tbf_put, 535 .walk = tbf_walk, 536 .dump = tbf_dump_class, 537 }; 538 539 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = { 540 .next = NULL, 541 .cl_ops = &tbf_class_ops, 542 .id = "tbf", 543 .priv_size = sizeof(struct tbf_sched_data), 544 .enqueue = tbf_enqueue, 545 .dequeue = tbf_dequeue, 546 .peek = qdisc_peek_dequeued, 547 .init = tbf_init, 548 .reset = tbf_reset, 549 .destroy = tbf_destroy, 550 .change = tbf_change, 551 .dump = tbf_dump, 552 .owner = THIS_MODULE, 553 }; 554 555 static int __init tbf_module_init(void) 556 { 557 return register_qdisc(&tbf_qdisc_ops); 558 } 559 560 static void __exit tbf_module_exit(void) 561 { 562 unregister_qdisc(&tbf_qdisc_ops); 563 } 564 module_init(tbf_module_init) 565 module_exit(tbf_module_exit) 566 MODULE_LICENSE("GPL"); 567