1 /* 2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net> 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 2 7 * of the License, or (at your option) any later version. 8 * 9 * 2003-10-17 - Ported from altq 10 */ 11 /* 12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 13 * 14 * Permission to use, copy, modify, and distribute this software and 15 * its documentation is hereby granted (including for commercial or 16 * for-profit use), provided that both the copyright notice and this 17 * permission notice appear in all copies of the software, derivative 18 * works, or modified versions, and any portions thereof. 19 * 20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 33 * DAMAGE. 34 * 35 * Carnegie Mellon encourages (but does not require) users of this 36 * software to return any improvements or extensions that they make, 37 * and to grant Carnegie Mellon the rights to redistribute these 38 * changes without encumbrance. 39 */ 40 /* 41 * H-FSC is described in Proceedings of SIGCOMM'97, 42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 43 * Real-Time and Priority Service" 44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 45 * 46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 47 * when a class has an upperlimit, the fit-time is computed from the 48 * upperlimit service curve. the link-sharing scheduler does not schedule 49 * a class whose fit-time exceeds the current time. 50 */ 51 52 #include <linux/kernel.h> 53 #include <linux/module.h> 54 #include <linux/types.h> 55 #include <linux/errno.h> 56 #include <linux/compiler.h> 57 #include <linux/spinlock.h> 58 #include <linux/skbuff.h> 59 #include <linux/string.h> 60 #include <linux/slab.h> 61 #include <linux/list.h> 62 #include <linux/rbtree.h> 63 #include <linux/init.h> 64 #include <linux/rtnetlink.h> 65 #include <linux/pkt_sched.h> 66 #include <net/netlink.h> 67 #include <net/pkt_sched.h> 68 #include <net/pkt_cls.h> 69 #include <asm/div64.h> 70 71 /* 72 * kernel internal service curve representation: 73 * coordinates are given by 64 bit unsigned integers. 74 * x-axis: unit is clock count. 75 * y-axis: unit is byte. 76 * 77 * The service curve parameters are converted to the internal 78 * representation. The slope values are scaled to avoid overflow. 79 * the inverse slope values as well as the y-projection of the 1st 80 * segment are kept in order to avoid 64-bit divide operations 81 * that are expensive on 32-bit architectures. 82 */ 83 84 struct internal_sc 85 { 86 u64 sm1; /* scaled slope of the 1st segment */ 87 u64 ism1; /* scaled inverse-slope of the 1st segment */ 88 u64 dx; /* the x-projection of the 1st segment */ 89 u64 dy; /* the y-projection of the 1st segment */ 90 u64 sm2; /* scaled slope of the 2nd segment */ 91 u64 ism2; /* scaled inverse-slope of the 2nd segment */ 92 }; 93 94 /* runtime service curve */ 95 struct runtime_sc 96 { 97 u64 x; /* current starting position on x-axis */ 98 u64 y; /* current starting position on y-axis */ 99 u64 sm1; /* scaled slope of the 1st segment */ 100 u64 ism1; /* scaled inverse-slope of the 1st segment */ 101 u64 dx; /* the x-projection of the 1st segment */ 102 u64 dy; /* the y-projection of the 1st segment */ 103 u64 sm2; /* scaled slope of the 2nd segment */ 104 u64 ism2; /* scaled inverse-slope of the 2nd segment */ 105 }; 106 107 enum hfsc_class_flags 108 { 109 HFSC_RSC = 0x1, 110 HFSC_FSC = 0x2, 111 HFSC_USC = 0x4 112 }; 113 114 struct hfsc_class 115 { 116 struct Qdisc_class_common cl_common; 117 unsigned int refcnt; /* usage count */ 118 119 struct gnet_stats_basic_packed bstats; 120 struct gnet_stats_queue qstats; 121 struct gnet_stats_rate_est rate_est; 122 unsigned int level; /* class level in hierarchy */ 123 struct tcf_proto *filter_list; /* filter list */ 124 unsigned int filter_cnt; /* filter count */ 125 126 struct hfsc_sched *sched; /* scheduler data */ 127 struct hfsc_class *cl_parent; /* parent class */ 128 struct list_head siblings; /* sibling classes */ 129 struct list_head children; /* child classes */ 130 struct Qdisc *qdisc; /* leaf qdisc */ 131 132 struct rb_node el_node; /* qdisc's eligible tree member */ 133 struct rb_root vt_tree; /* active children sorted by cl_vt */ 134 struct rb_node vt_node; /* parent's vt_tree member */ 135 struct rb_root cf_tree; /* active children sorted by cl_f */ 136 struct rb_node cf_node; /* parent's cf_heap member */ 137 struct list_head dlist; /* drop list member */ 138 139 u64 cl_total; /* total work in bytes */ 140 u64 cl_cumul; /* cumulative work in bytes done by 141 real-time criteria */ 142 143 u64 cl_d; /* deadline*/ 144 u64 cl_e; /* eligible time */ 145 u64 cl_vt; /* virtual time */ 146 u64 cl_f; /* time when this class will fit for 147 link-sharing, max(myf, cfmin) */ 148 u64 cl_myf; /* my fit-time (calculated from this 149 class's own upperlimit curve) */ 150 u64 cl_myfadj; /* my fit-time adjustment (to cancel 151 history dependence) */ 152 u64 cl_cfmin; /* earliest children's fit-time (used 153 with cl_myf to obtain cl_f) */ 154 u64 cl_cvtmin; /* minimal virtual time among the 155 children fit for link-sharing 156 (monotonic within a period) */ 157 u64 cl_vtadj; /* intra-period cumulative vt 158 adjustment */ 159 u64 cl_vtoff; /* inter-period cumulative vt offset */ 160 u64 cl_cvtmax; /* max child's vt in the last period */ 161 u64 cl_cvtoff; /* cumulative cvtmax of all periods */ 162 u64 cl_pcvtoff; /* parent's cvtoff at initialization 163 time */ 164 165 struct internal_sc cl_rsc; /* internal real-time service curve */ 166 struct internal_sc cl_fsc; /* internal fair service curve */ 167 struct internal_sc cl_usc; /* internal upperlimit service curve */ 168 struct runtime_sc cl_deadline; /* deadline curve */ 169 struct runtime_sc cl_eligible; /* eligible curve */ 170 struct runtime_sc cl_virtual; /* virtual curve */ 171 struct runtime_sc cl_ulimit; /* upperlimit curve */ 172 173 unsigned long cl_flags; /* which curves are valid */ 174 unsigned long cl_vtperiod; /* vt period sequence number */ 175 unsigned long cl_parentperiod;/* parent's vt period sequence number*/ 176 unsigned long cl_nactive; /* number of active children */ 177 }; 178 179 struct hfsc_sched 180 { 181 u16 defcls; /* default class id */ 182 struct hfsc_class root; /* root class */ 183 struct Qdisc_class_hash clhash; /* class hash */ 184 struct rb_root eligible; /* eligible tree */ 185 struct list_head droplist; /* active leaf class list (for 186 dropping) */ 187 struct qdisc_watchdog watchdog; /* watchdog timer */ 188 }; 189 190 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ 191 192 193 /* 194 * eligible tree holds backlogged classes being sorted by their eligible times. 195 * there is one eligible tree per hfsc instance. 196 */ 197 198 static void 199 eltree_insert(struct hfsc_class *cl) 200 { 201 struct rb_node **p = &cl->sched->eligible.rb_node; 202 struct rb_node *parent = NULL; 203 struct hfsc_class *cl1; 204 205 while (*p != NULL) { 206 parent = *p; 207 cl1 = rb_entry(parent, struct hfsc_class, el_node); 208 if (cl->cl_e >= cl1->cl_e) 209 p = &parent->rb_right; 210 else 211 p = &parent->rb_left; 212 } 213 rb_link_node(&cl->el_node, parent, p); 214 rb_insert_color(&cl->el_node, &cl->sched->eligible); 215 } 216 217 static inline void 218 eltree_remove(struct hfsc_class *cl) 219 { 220 rb_erase(&cl->el_node, &cl->sched->eligible); 221 } 222 223 static inline void 224 eltree_update(struct hfsc_class *cl) 225 { 226 eltree_remove(cl); 227 eltree_insert(cl); 228 } 229 230 /* find the class with the minimum deadline among the eligible classes */ 231 static inline struct hfsc_class * 232 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time) 233 { 234 struct hfsc_class *p, *cl = NULL; 235 struct rb_node *n; 236 237 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { 238 p = rb_entry(n, struct hfsc_class, el_node); 239 if (p->cl_e > cur_time) 240 break; 241 if (cl == NULL || p->cl_d < cl->cl_d) 242 cl = p; 243 } 244 return cl; 245 } 246 247 /* find the class with minimum eligible time among the eligible classes */ 248 static inline struct hfsc_class * 249 eltree_get_minel(struct hfsc_sched *q) 250 { 251 struct rb_node *n; 252 253 n = rb_first(&q->eligible); 254 if (n == NULL) 255 return NULL; 256 return rb_entry(n, struct hfsc_class, el_node); 257 } 258 259 /* 260 * vttree holds holds backlogged child classes being sorted by their virtual 261 * time. each intermediate class has one vttree. 262 */ 263 static void 264 vttree_insert(struct hfsc_class *cl) 265 { 266 struct rb_node **p = &cl->cl_parent->vt_tree.rb_node; 267 struct rb_node *parent = NULL; 268 struct hfsc_class *cl1; 269 270 while (*p != NULL) { 271 parent = *p; 272 cl1 = rb_entry(parent, struct hfsc_class, vt_node); 273 if (cl->cl_vt >= cl1->cl_vt) 274 p = &parent->rb_right; 275 else 276 p = &parent->rb_left; 277 } 278 rb_link_node(&cl->vt_node, parent, p); 279 rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree); 280 } 281 282 static inline void 283 vttree_remove(struct hfsc_class *cl) 284 { 285 rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree); 286 } 287 288 static inline void 289 vttree_update(struct hfsc_class *cl) 290 { 291 vttree_remove(cl); 292 vttree_insert(cl); 293 } 294 295 static inline struct hfsc_class * 296 vttree_firstfit(struct hfsc_class *cl, u64 cur_time) 297 { 298 struct hfsc_class *p; 299 struct rb_node *n; 300 301 for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) { 302 p = rb_entry(n, struct hfsc_class, vt_node); 303 if (p->cl_f <= cur_time) 304 return p; 305 } 306 return NULL; 307 } 308 309 /* 310 * get the leaf class with the minimum vt in the hierarchy 311 */ 312 static struct hfsc_class * 313 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time) 314 { 315 /* if root-class's cfmin is bigger than cur_time nothing to do */ 316 if (cl->cl_cfmin > cur_time) 317 return NULL; 318 319 while (cl->level > 0) { 320 cl = vttree_firstfit(cl, cur_time); 321 if (cl == NULL) 322 return NULL; 323 /* 324 * update parent's cl_cvtmin. 325 */ 326 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 327 cl->cl_parent->cl_cvtmin = cl->cl_vt; 328 } 329 return cl; 330 } 331 332 static void 333 cftree_insert(struct hfsc_class *cl) 334 { 335 struct rb_node **p = &cl->cl_parent->cf_tree.rb_node; 336 struct rb_node *parent = NULL; 337 struct hfsc_class *cl1; 338 339 while (*p != NULL) { 340 parent = *p; 341 cl1 = rb_entry(parent, struct hfsc_class, cf_node); 342 if (cl->cl_f >= cl1->cl_f) 343 p = &parent->rb_right; 344 else 345 p = &parent->rb_left; 346 } 347 rb_link_node(&cl->cf_node, parent, p); 348 rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree); 349 } 350 351 static inline void 352 cftree_remove(struct hfsc_class *cl) 353 { 354 rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree); 355 } 356 357 static inline void 358 cftree_update(struct hfsc_class *cl) 359 { 360 cftree_remove(cl); 361 cftree_insert(cl); 362 } 363 364 /* 365 * service curve support functions 366 * 367 * external service curve parameters 368 * m: bps 369 * d: us 370 * internal service curve parameters 371 * sm: (bytes/psched_us) << SM_SHIFT 372 * ism: (psched_us/byte) << ISM_SHIFT 373 * dx: psched_us 374 * 375 * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us. 376 * 377 * sm and ism are scaled in order to keep effective digits. 378 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective 379 * digits in decimal using the following table. 380 * 381 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 382 * ------------+------------------------------------------------------- 383 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3 384 * 385 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125 386 * 387 * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18. 388 */ 389 #define SM_SHIFT (30 - PSCHED_SHIFT) 390 #define ISM_SHIFT (8 + PSCHED_SHIFT) 391 392 #define SM_MASK ((1ULL << SM_SHIFT) - 1) 393 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1) 394 395 static inline u64 396 seg_x2y(u64 x, u64 sm) 397 { 398 u64 y; 399 400 /* 401 * compute 402 * y = x * sm >> SM_SHIFT 403 * but divide it for the upper and lower bits to avoid overflow 404 */ 405 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 406 return y; 407 } 408 409 static inline u64 410 seg_y2x(u64 y, u64 ism) 411 { 412 u64 x; 413 414 if (y == 0) 415 x = 0; 416 else if (ism == HT_INFINITY) 417 x = HT_INFINITY; 418 else { 419 x = (y >> ISM_SHIFT) * ism 420 + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 421 } 422 return x; 423 } 424 425 /* Convert m (bps) into sm (bytes/psched us) */ 426 static u64 427 m2sm(u32 m) 428 { 429 u64 sm; 430 431 sm = ((u64)m << SM_SHIFT); 432 sm += PSCHED_TICKS_PER_SEC - 1; 433 do_div(sm, PSCHED_TICKS_PER_SEC); 434 return sm; 435 } 436 437 /* convert m (bps) into ism (psched us/byte) */ 438 static u64 439 m2ism(u32 m) 440 { 441 u64 ism; 442 443 if (m == 0) 444 ism = HT_INFINITY; 445 else { 446 ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT); 447 ism += m - 1; 448 do_div(ism, m); 449 } 450 return ism; 451 } 452 453 /* convert d (us) into dx (psched us) */ 454 static u64 455 d2dx(u32 d) 456 { 457 u64 dx; 458 459 dx = ((u64)d * PSCHED_TICKS_PER_SEC); 460 dx += USEC_PER_SEC - 1; 461 do_div(dx, USEC_PER_SEC); 462 return dx; 463 } 464 465 /* convert sm (bytes/psched us) into m (bps) */ 466 static u32 467 sm2m(u64 sm) 468 { 469 u64 m; 470 471 m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT; 472 return (u32)m; 473 } 474 475 /* convert dx (psched us) into d (us) */ 476 static u32 477 dx2d(u64 dx) 478 { 479 u64 d; 480 481 d = dx * USEC_PER_SEC; 482 do_div(d, PSCHED_TICKS_PER_SEC); 483 return (u32)d; 484 } 485 486 static void 487 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc) 488 { 489 isc->sm1 = m2sm(sc->m1); 490 isc->ism1 = m2ism(sc->m1); 491 isc->dx = d2dx(sc->d); 492 isc->dy = seg_x2y(isc->dx, isc->sm1); 493 isc->sm2 = m2sm(sc->m2); 494 isc->ism2 = m2ism(sc->m2); 495 } 496 497 /* 498 * initialize the runtime service curve with the given internal 499 * service curve starting at (x, y). 500 */ 501 static void 502 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) 503 { 504 rtsc->x = x; 505 rtsc->y = y; 506 rtsc->sm1 = isc->sm1; 507 rtsc->ism1 = isc->ism1; 508 rtsc->dx = isc->dx; 509 rtsc->dy = isc->dy; 510 rtsc->sm2 = isc->sm2; 511 rtsc->ism2 = isc->ism2; 512 } 513 514 /* 515 * calculate the y-projection of the runtime service curve by the 516 * given x-projection value 517 */ 518 static u64 519 rtsc_y2x(struct runtime_sc *rtsc, u64 y) 520 { 521 u64 x; 522 523 if (y < rtsc->y) 524 x = rtsc->x; 525 else if (y <= rtsc->y + rtsc->dy) { 526 /* x belongs to the 1st segment */ 527 if (rtsc->dy == 0) 528 x = rtsc->x + rtsc->dx; 529 else 530 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 531 } else { 532 /* x belongs to the 2nd segment */ 533 x = rtsc->x + rtsc->dx 534 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 535 } 536 return x; 537 } 538 539 static u64 540 rtsc_x2y(struct runtime_sc *rtsc, u64 x) 541 { 542 u64 y; 543 544 if (x <= rtsc->x) 545 y = rtsc->y; 546 else if (x <= rtsc->x + rtsc->dx) 547 /* y belongs to the 1st segment */ 548 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 549 else 550 /* y belongs to the 2nd segment */ 551 y = rtsc->y + rtsc->dy 552 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 553 return y; 554 } 555 556 /* 557 * update the runtime service curve by taking the minimum of the current 558 * runtime service curve and the service curve starting at (x, y). 559 */ 560 static void 561 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) 562 { 563 u64 y1, y2, dx, dy; 564 u32 dsm; 565 566 if (isc->sm1 <= isc->sm2) { 567 /* service curve is convex */ 568 y1 = rtsc_x2y(rtsc, x); 569 if (y1 < y) 570 /* the current rtsc is smaller */ 571 return; 572 rtsc->x = x; 573 rtsc->y = y; 574 return; 575 } 576 577 /* 578 * service curve is concave 579 * compute the two y values of the current rtsc 580 * y1: at x 581 * y2: at (x + dx) 582 */ 583 y1 = rtsc_x2y(rtsc, x); 584 if (y1 <= y) { 585 /* rtsc is below isc, no change to rtsc */ 586 return; 587 } 588 589 y2 = rtsc_x2y(rtsc, x + isc->dx); 590 if (y2 >= y + isc->dy) { 591 /* rtsc is above isc, replace rtsc by isc */ 592 rtsc->x = x; 593 rtsc->y = y; 594 rtsc->dx = isc->dx; 595 rtsc->dy = isc->dy; 596 return; 597 } 598 599 /* 600 * the two curves intersect 601 * compute the offsets (dx, dy) using the reverse 602 * function of seg_x2y() 603 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 604 */ 605 dx = (y1 - y) << SM_SHIFT; 606 dsm = isc->sm1 - isc->sm2; 607 do_div(dx, dsm); 608 /* 609 * check if (x, y1) belongs to the 1st segment of rtsc. 610 * if so, add the offset. 611 */ 612 if (rtsc->x + rtsc->dx > x) 613 dx += rtsc->x + rtsc->dx - x; 614 dy = seg_x2y(dx, isc->sm1); 615 616 rtsc->x = x; 617 rtsc->y = y; 618 rtsc->dx = dx; 619 rtsc->dy = dy; 620 } 621 622 static void 623 init_ed(struct hfsc_class *cl, unsigned int next_len) 624 { 625 u64 cur_time = psched_get_time(); 626 627 /* update the deadline curve */ 628 rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); 629 630 /* 631 * update the eligible curve. 632 * for concave, it is equal to the deadline curve. 633 * for convex, it is a linear curve with slope m2. 634 */ 635 cl->cl_eligible = cl->cl_deadline; 636 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { 637 cl->cl_eligible.dx = 0; 638 cl->cl_eligible.dy = 0; 639 } 640 641 /* compute e and d */ 642 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 643 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 644 645 eltree_insert(cl); 646 } 647 648 static void 649 update_ed(struct hfsc_class *cl, unsigned int next_len) 650 { 651 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 652 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 653 654 eltree_update(cl); 655 } 656 657 static inline void 658 update_d(struct hfsc_class *cl, unsigned int next_len) 659 { 660 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 661 } 662 663 static inline void 664 update_cfmin(struct hfsc_class *cl) 665 { 666 struct rb_node *n = rb_first(&cl->cf_tree); 667 struct hfsc_class *p; 668 669 if (n == NULL) { 670 cl->cl_cfmin = 0; 671 return; 672 } 673 p = rb_entry(n, struct hfsc_class, cf_node); 674 cl->cl_cfmin = p->cl_f; 675 } 676 677 static void 678 init_vf(struct hfsc_class *cl, unsigned int len) 679 { 680 struct hfsc_class *max_cl; 681 struct rb_node *n; 682 u64 vt, f, cur_time; 683 int go_active; 684 685 cur_time = 0; 686 go_active = 1; 687 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 688 if (go_active && cl->cl_nactive++ == 0) 689 go_active = 1; 690 else 691 go_active = 0; 692 693 if (go_active) { 694 n = rb_last(&cl->cl_parent->vt_tree); 695 if (n != NULL) { 696 max_cl = rb_entry(n, struct hfsc_class,vt_node); 697 /* 698 * set vt to the average of the min and max 699 * classes. if the parent's period didn't 700 * change, don't decrease vt of the class. 701 */ 702 vt = max_cl->cl_vt; 703 if (cl->cl_parent->cl_cvtmin != 0) 704 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 705 706 if (cl->cl_parent->cl_vtperiod != 707 cl->cl_parentperiod || vt > cl->cl_vt) 708 cl->cl_vt = vt; 709 } else { 710 /* 711 * first child for a new parent backlog period. 712 * add parent's cvtmax to cvtoff to make a new 713 * vt (vtoff + vt) larger than the vt in the 714 * last period for all children. 715 */ 716 vt = cl->cl_parent->cl_cvtmax; 717 cl->cl_parent->cl_cvtoff += vt; 718 cl->cl_parent->cl_cvtmax = 0; 719 cl->cl_parent->cl_cvtmin = 0; 720 cl->cl_vt = 0; 721 } 722 723 cl->cl_vtoff = cl->cl_parent->cl_cvtoff - 724 cl->cl_pcvtoff; 725 726 /* update the virtual curve */ 727 vt = cl->cl_vt + cl->cl_vtoff; 728 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt, 729 cl->cl_total); 730 if (cl->cl_virtual.x == vt) { 731 cl->cl_virtual.x -= cl->cl_vtoff; 732 cl->cl_vtoff = 0; 733 } 734 cl->cl_vtadj = 0; 735 736 cl->cl_vtperiod++; /* increment vt period */ 737 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 738 if (cl->cl_parent->cl_nactive == 0) 739 cl->cl_parentperiod++; 740 cl->cl_f = 0; 741 742 vttree_insert(cl); 743 cftree_insert(cl); 744 745 if (cl->cl_flags & HFSC_USC) { 746 /* class has upper limit curve */ 747 if (cur_time == 0) 748 cur_time = psched_get_time(); 749 750 /* update the ulimit curve */ 751 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time, 752 cl->cl_total); 753 /* compute myf */ 754 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 755 cl->cl_total); 756 cl->cl_myfadj = 0; 757 } 758 } 759 760 f = max(cl->cl_myf, cl->cl_cfmin); 761 if (f != cl->cl_f) { 762 cl->cl_f = f; 763 cftree_update(cl); 764 update_cfmin(cl->cl_parent); 765 } 766 } 767 } 768 769 static void 770 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) 771 { 772 u64 f; /* , myf_bound, delta; */ 773 int go_passive = 0; 774 775 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC) 776 go_passive = 1; 777 778 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 779 cl->cl_total += len; 780 781 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0) 782 continue; 783 784 if (go_passive && --cl->cl_nactive == 0) 785 go_passive = 1; 786 else 787 go_passive = 0; 788 789 if (go_passive) { 790 /* no more active child, going passive */ 791 792 /* update cvtmax of the parent class */ 793 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 794 cl->cl_parent->cl_cvtmax = cl->cl_vt; 795 796 /* remove this class from the vt tree */ 797 vttree_remove(cl); 798 799 cftree_remove(cl); 800 update_cfmin(cl->cl_parent); 801 802 continue; 803 } 804 805 /* 806 * update vt and f 807 */ 808 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 809 - cl->cl_vtoff + cl->cl_vtadj; 810 811 /* 812 * if vt of the class is smaller than cvtmin, 813 * the class was skipped in the past due to non-fit. 814 * if so, we need to adjust vtadj. 815 */ 816 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 817 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 818 cl->cl_vt = cl->cl_parent->cl_cvtmin; 819 } 820 821 /* update the vt tree */ 822 vttree_update(cl); 823 824 if (cl->cl_flags & HFSC_USC) { 825 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit, 826 cl->cl_total); 827 #if 0 828 /* 829 * This code causes classes to stay way under their 830 * limit when multiple classes are used at gigabit 831 * speed. needs investigation. -kaber 832 */ 833 /* 834 * if myf lags behind by more than one clock tick 835 * from the current time, adjust myfadj to prevent 836 * a rate-limited class from going greedy. 837 * in a steady state under rate-limiting, myf 838 * fluctuates within one clock tick. 839 */ 840 myf_bound = cur_time - PSCHED_JIFFIE2US(1); 841 if (cl->cl_myf < myf_bound) { 842 delta = cur_time - cl->cl_myf; 843 cl->cl_myfadj += delta; 844 cl->cl_myf += delta; 845 } 846 #endif 847 } 848 849 f = max(cl->cl_myf, cl->cl_cfmin); 850 if (f != cl->cl_f) { 851 cl->cl_f = f; 852 cftree_update(cl); 853 update_cfmin(cl->cl_parent); 854 } 855 } 856 } 857 858 static void 859 set_active(struct hfsc_class *cl, unsigned int len) 860 { 861 if (cl->cl_flags & HFSC_RSC) 862 init_ed(cl, len); 863 if (cl->cl_flags & HFSC_FSC) 864 init_vf(cl, len); 865 866 list_add_tail(&cl->dlist, &cl->sched->droplist); 867 } 868 869 static void 870 set_passive(struct hfsc_class *cl) 871 { 872 if (cl->cl_flags & HFSC_RSC) 873 eltree_remove(cl); 874 875 list_del(&cl->dlist); 876 877 /* 878 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0) 879 * needs to be called explicitly to remove a class from vttree. 880 */ 881 } 882 883 static unsigned int 884 qdisc_peek_len(struct Qdisc *sch) 885 { 886 struct sk_buff *skb; 887 unsigned int len; 888 889 skb = sch->ops->peek(sch); 890 if (skb == NULL) { 891 qdisc_warn_nonwc("qdisc_peek_len", sch); 892 return 0; 893 } 894 len = qdisc_pkt_len(skb); 895 896 return len; 897 } 898 899 static void 900 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) 901 { 902 unsigned int len = cl->qdisc->q.qlen; 903 904 qdisc_reset(cl->qdisc); 905 qdisc_tree_decrease_qlen(cl->qdisc, len); 906 } 907 908 static void 909 hfsc_adjust_levels(struct hfsc_class *cl) 910 { 911 struct hfsc_class *p; 912 unsigned int level; 913 914 do { 915 level = 0; 916 list_for_each_entry(p, &cl->children, siblings) { 917 if (p->level >= level) 918 level = p->level + 1; 919 } 920 cl->level = level; 921 } while ((cl = cl->cl_parent) != NULL); 922 } 923 924 static inline struct hfsc_class * 925 hfsc_find_class(u32 classid, struct Qdisc *sch) 926 { 927 struct hfsc_sched *q = qdisc_priv(sch); 928 struct Qdisc_class_common *clc; 929 930 clc = qdisc_class_find(&q->clhash, classid); 931 if (clc == NULL) 932 return NULL; 933 return container_of(clc, struct hfsc_class, cl_common); 934 } 935 936 static void 937 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc, 938 u64 cur_time) 939 { 940 sc2isc(rsc, &cl->cl_rsc); 941 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); 942 cl->cl_eligible = cl->cl_deadline; 943 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { 944 cl->cl_eligible.dx = 0; 945 cl->cl_eligible.dy = 0; 946 } 947 cl->cl_flags |= HFSC_RSC; 948 } 949 950 static void 951 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc) 952 { 953 sc2isc(fsc, &cl->cl_fsc); 954 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total); 955 cl->cl_flags |= HFSC_FSC; 956 } 957 958 static void 959 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc, 960 u64 cur_time) 961 { 962 sc2isc(usc, &cl->cl_usc); 963 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total); 964 cl->cl_flags |= HFSC_USC; 965 } 966 967 static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = { 968 [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) }, 969 [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) }, 970 [TCA_HFSC_USC] = { .len = sizeof(struct tc_service_curve) }, 971 }; 972 973 static int 974 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 975 struct nlattr **tca, unsigned long *arg) 976 { 977 struct hfsc_sched *q = qdisc_priv(sch); 978 struct hfsc_class *cl = (struct hfsc_class *)*arg; 979 struct hfsc_class *parent = NULL; 980 struct nlattr *opt = tca[TCA_OPTIONS]; 981 struct nlattr *tb[TCA_HFSC_MAX + 1]; 982 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL; 983 u64 cur_time; 984 int err; 985 986 if (opt == NULL) 987 return -EINVAL; 988 989 err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy); 990 if (err < 0) 991 return err; 992 993 if (tb[TCA_HFSC_RSC]) { 994 rsc = nla_data(tb[TCA_HFSC_RSC]); 995 if (rsc->m1 == 0 && rsc->m2 == 0) 996 rsc = NULL; 997 } 998 999 if (tb[TCA_HFSC_FSC]) { 1000 fsc = nla_data(tb[TCA_HFSC_FSC]); 1001 if (fsc->m1 == 0 && fsc->m2 == 0) 1002 fsc = NULL; 1003 } 1004 1005 if (tb[TCA_HFSC_USC]) { 1006 usc = nla_data(tb[TCA_HFSC_USC]); 1007 if (usc->m1 == 0 && usc->m2 == 0) 1008 usc = NULL; 1009 } 1010 1011 if (cl != NULL) { 1012 if (parentid) { 1013 if (cl->cl_parent && 1014 cl->cl_parent->cl_common.classid != parentid) 1015 return -EINVAL; 1016 if (cl->cl_parent == NULL && parentid != TC_H_ROOT) 1017 return -EINVAL; 1018 } 1019 cur_time = psched_get_time(); 1020 1021 if (tca[TCA_RATE]) { 1022 err = gen_replace_estimator(&cl->bstats, &cl->rate_est, 1023 qdisc_root_sleeping_lock(sch), 1024 tca[TCA_RATE]); 1025 if (err) 1026 return err; 1027 } 1028 1029 sch_tree_lock(sch); 1030 if (rsc != NULL) 1031 hfsc_change_rsc(cl, rsc, cur_time); 1032 if (fsc != NULL) 1033 hfsc_change_fsc(cl, fsc); 1034 if (usc != NULL) 1035 hfsc_change_usc(cl, usc, cur_time); 1036 1037 if (cl->qdisc->q.qlen != 0) { 1038 if (cl->cl_flags & HFSC_RSC) 1039 update_ed(cl, qdisc_peek_len(cl->qdisc)); 1040 if (cl->cl_flags & HFSC_FSC) 1041 update_vf(cl, 0, cur_time); 1042 } 1043 sch_tree_unlock(sch); 1044 1045 return 0; 1046 } 1047 1048 if (parentid == TC_H_ROOT) 1049 return -EEXIST; 1050 1051 parent = &q->root; 1052 if (parentid) { 1053 parent = hfsc_find_class(parentid, sch); 1054 if (parent == NULL) 1055 return -ENOENT; 1056 } 1057 1058 if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0) 1059 return -EINVAL; 1060 if (hfsc_find_class(classid, sch)) 1061 return -EEXIST; 1062 1063 if (rsc == NULL && fsc == NULL) 1064 return -EINVAL; 1065 1066 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL); 1067 if (cl == NULL) 1068 return -ENOBUFS; 1069 1070 if (tca[TCA_RATE]) { 1071 err = gen_new_estimator(&cl->bstats, &cl->rate_est, 1072 qdisc_root_sleeping_lock(sch), 1073 tca[TCA_RATE]); 1074 if (err) { 1075 kfree(cl); 1076 return err; 1077 } 1078 } 1079 1080 if (rsc != NULL) 1081 hfsc_change_rsc(cl, rsc, 0); 1082 if (fsc != NULL) 1083 hfsc_change_fsc(cl, fsc); 1084 if (usc != NULL) 1085 hfsc_change_usc(cl, usc, 0); 1086 1087 cl->cl_common.classid = classid; 1088 cl->refcnt = 1; 1089 cl->sched = q; 1090 cl->cl_parent = parent; 1091 cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, 1092 &pfifo_qdisc_ops, classid); 1093 if (cl->qdisc == NULL) 1094 cl->qdisc = &noop_qdisc; 1095 INIT_LIST_HEAD(&cl->children); 1096 cl->vt_tree = RB_ROOT; 1097 cl->cf_tree = RB_ROOT; 1098 1099 sch_tree_lock(sch); 1100 qdisc_class_hash_insert(&q->clhash, &cl->cl_common); 1101 list_add_tail(&cl->siblings, &parent->children); 1102 if (parent->level == 0) 1103 hfsc_purge_queue(sch, parent); 1104 hfsc_adjust_levels(parent); 1105 cl->cl_pcvtoff = parent->cl_cvtoff; 1106 sch_tree_unlock(sch); 1107 1108 qdisc_class_hash_grow(sch, &q->clhash); 1109 1110 *arg = (unsigned long)cl; 1111 return 0; 1112 } 1113 1114 static void 1115 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl) 1116 { 1117 struct hfsc_sched *q = qdisc_priv(sch); 1118 1119 tcf_destroy_chain(&cl->filter_list); 1120 qdisc_destroy(cl->qdisc); 1121 gen_kill_estimator(&cl->bstats, &cl->rate_est); 1122 if (cl != &q->root) 1123 kfree(cl); 1124 } 1125 1126 static int 1127 hfsc_delete_class(struct Qdisc *sch, unsigned long arg) 1128 { 1129 struct hfsc_sched *q = qdisc_priv(sch); 1130 struct hfsc_class *cl = (struct hfsc_class *)arg; 1131 1132 if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root) 1133 return -EBUSY; 1134 1135 sch_tree_lock(sch); 1136 1137 list_del(&cl->siblings); 1138 hfsc_adjust_levels(cl->cl_parent); 1139 1140 hfsc_purge_queue(sch, cl); 1141 qdisc_class_hash_remove(&q->clhash, &cl->cl_common); 1142 1143 BUG_ON(--cl->refcnt == 0); 1144 /* 1145 * This shouldn't happen: we "hold" one cops->get() when called 1146 * from tc_ctl_tclass; the destroy method is done from cops->put(). 1147 */ 1148 1149 sch_tree_unlock(sch); 1150 return 0; 1151 } 1152 1153 static struct hfsc_class * 1154 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) 1155 { 1156 struct hfsc_sched *q = qdisc_priv(sch); 1157 struct hfsc_class *head, *cl; 1158 struct tcf_result res; 1159 struct tcf_proto *tcf; 1160 int result; 1161 1162 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 && 1163 (cl = hfsc_find_class(skb->priority, sch)) != NULL) 1164 if (cl->level == 0) 1165 return cl; 1166 1167 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 1168 head = &q->root; 1169 tcf = q->root.filter_list; 1170 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 1171 #ifdef CONFIG_NET_CLS_ACT 1172 switch (result) { 1173 case TC_ACT_QUEUED: 1174 case TC_ACT_STOLEN: 1175 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 1176 case TC_ACT_SHOT: 1177 return NULL; 1178 } 1179 #endif 1180 if ((cl = (struct hfsc_class *)res.class) == NULL) { 1181 if ((cl = hfsc_find_class(res.classid, sch)) == NULL) 1182 break; /* filter selected invalid classid */ 1183 if (cl->level >= head->level) 1184 break; /* filter may only point downwards */ 1185 } 1186 1187 if (cl->level == 0) 1188 return cl; /* hit leaf class */ 1189 1190 /* apply inner filter chain */ 1191 tcf = cl->filter_list; 1192 head = cl; 1193 } 1194 1195 /* classification failed, try default class */ 1196 cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); 1197 if (cl == NULL || cl->level > 0) 1198 return NULL; 1199 1200 return cl; 1201 } 1202 1203 static int 1204 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1205 struct Qdisc **old) 1206 { 1207 struct hfsc_class *cl = (struct hfsc_class *)arg; 1208 1209 if (cl->level > 0) 1210 return -EINVAL; 1211 if (new == NULL) { 1212 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, 1213 &pfifo_qdisc_ops, 1214 cl->cl_common.classid); 1215 if (new == NULL) 1216 new = &noop_qdisc; 1217 } 1218 1219 sch_tree_lock(sch); 1220 hfsc_purge_queue(sch, cl); 1221 *old = cl->qdisc; 1222 cl->qdisc = new; 1223 sch_tree_unlock(sch); 1224 return 0; 1225 } 1226 1227 static struct Qdisc * 1228 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg) 1229 { 1230 struct hfsc_class *cl = (struct hfsc_class *)arg; 1231 1232 if (cl->level == 0) 1233 return cl->qdisc; 1234 1235 return NULL; 1236 } 1237 1238 static void 1239 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) 1240 { 1241 struct hfsc_class *cl = (struct hfsc_class *)arg; 1242 1243 if (cl->qdisc->q.qlen == 0) { 1244 update_vf(cl, 0, 0); 1245 set_passive(cl); 1246 } 1247 } 1248 1249 static unsigned long 1250 hfsc_get_class(struct Qdisc *sch, u32 classid) 1251 { 1252 struct hfsc_class *cl = hfsc_find_class(classid, sch); 1253 1254 if (cl != NULL) 1255 cl->refcnt++; 1256 1257 return (unsigned long)cl; 1258 } 1259 1260 static void 1261 hfsc_put_class(struct Qdisc *sch, unsigned long arg) 1262 { 1263 struct hfsc_class *cl = (struct hfsc_class *)arg; 1264 1265 if (--cl->refcnt == 0) 1266 hfsc_destroy_class(sch, cl); 1267 } 1268 1269 static unsigned long 1270 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid) 1271 { 1272 struct hfsc_class *p = (struct hfsc_class *)parent; 1273 struct hfsc_class *cl = hfsc_find_class(classid, sch); 1274 1275 if (cl != NULL) { 1276 if (p != NULL && p->level <= cl->level) 1277 return 0; 1278 cl->filter_cnt++; 1279 } 1280 1281 return (unsigned long)cl; 1282 } 1283 1284 static void 1285 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg) 1286 { 1287 struct hfsc_class *cl = (struct hfsc_class *)arg; 1288 1289 cl->filter_cnt--; 1290 } 1291 1292 static struct tcf_proto ** 1293 hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) 1294 { 1295 struct hfsc_sched *q = qdisc_priv(sch); 1296 struct hfsc_class *cl = (struct hfsc_class *)arg; 1297 1298 if (cl == NULL) 1299 cl = &q->root; 1300 1301 return &cl->filter_list; 1302 } 1303 1304 static int 1305 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) 1306 { 1307 struct tc_service_curve tsc; 1308 1309 tsc.m1 = sm2m(sc->sm1); 1310 tsc.d = dx2d(sc->dx); 1311 tsc.m2 = sm2m(sc->sm2); 1312 NLA_PUT(skb, attr, sizeof(tsc), &tsc); 1313 1314 return skb->len; 1315 1316 nla_put_failure: 1317 return -1; 1318 } 1319 1320 static inline int 1321 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) 1322 { 1323 if ((cl->cl_flags & HFSC_RSC) && 1324 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0)) 1325 goto nla_put_failure; 1326 1327 if ((cl->cl_flags & HFSC_FSC) && 1328 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0)) 1329 goto nla_put_failure; 1330 1331 if ((cl->cl_flags & HFSC_USC) && 1332 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0)) 1333 goto nla_put_failure; 1334 1335 return skb->len; 1336 1337 nla_put_failure: 1338 return -1; 1339 } 1340 1341 static int 1342 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, 1343 struct tcmsg *tcm) 1344 { 1345 struct hfsc_class *cl = (struct hfsc_class *)arg; 1346 struct nlattr *nest; 1347 1348 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid : 1349 TC_H_ROOT; 1350 tcm->tcm_handle = cl->cl_common.classid; 1351 if (cl->level == 0) 1352 tcm->tcm_info = cl->qdisc->handle; 1353 1354 nest = nla_nest_start(skb, TCA_OPTIONS); 1355 if (nest == NULL) 1356 goto nla_put_failure; 1357 if (hfsc_dump_curves(skb, cl) < 0) 1358 goto nla_put_failure; 1359 nla_nest_end(skb, nest); 1360 return skb->len; 1361 1362 nla_put_failure: 1363 nla_nest_cancel(skb, nest); 1364 return -EMSGSIZE; 1365 } 1366 1367 static int 1368 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, 1369 struct gnet_dump *d) 1370 { 1371 struct hfsc_class *cl = (struct hfsc_class *)arg; 1372 struct tc_hfsc_stats xstats; 1373 1374 cl->qstats.qlen = cl->qdisc->q.qlen; 1375 xstats.level = cl->level; 1376 xstats.period = cl->cl_vtperiod; 1377 xstats.work = cl->cl_total; 1378 xstats.rtwork = cl->cl_cumul; 1379 1380 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || 1381 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || 1382 gnet_stats_copy_queue(d, &cl->qstats) < 0) 1383 return -1; 1384 1385 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 1386 } 1387 1388 1389 1390 static void 1391 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1392 { 1393 struct hfsc_sched *q = qdisc_priv(sch); 1394 struct hlist_node *n; 1395 struct hfsc_class *cl; 1396 unsigned int i; 1397 1398 if (arg->stop) 1399 return; 1400 1401 for (i = 0; i < q->clhash.hashsize; i++) { 1402 hlist_for_each_entry(cl, n, &q->clhash.hash[i], 1403 cl_common.hnode) { 1404 if (arg->count < arg->skip) { 1405 arg->count++; 1406 continue; 1407 } 1408 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { 1409 arg->stop = 1; 1410 return; 1411 } 1412 arg->count++; 1413 } 1414 } 1415 } 1416 1417 static void 1418 hfsc_schedule_watchdog(struct Qdisc *sch) 1419 { 1420 struct hfsc_sched *q = qdisc_priv(sch); 1421 struct hfsc_class *cl; 1422 u64 next_time = 0; 1423 1424 if ((cl = eltree_get_minel(q)) != NULL) 1425 next_time = cl->cl_e; 1426 if (q->root.cl_cfmin != 0) { 1427 if (next_time == 0 || next_time > q->root.cl_cfmin) 1428 next_time = q->root.cl_cfmin; 1429 } 1430 WARN_ON(next_time == 0); 1431 qdisc_watchdog_schedule(&q->watchdog, next_time); 1432 } 1433 1434 static int 1435 hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) 1436 { 1437 struct hfsc_sched *q = qdisc_priv(sch); 1438 struct tc_hfsc_qopt *qopt; 1439 int err; 1440 1441 if (opt == NULL || nla_len(opt) < sizeof(*qopt)) 1442 return -EINVAL; 1443 qopt = nla_data(opt); 1444 1445 q->defcls = qopt->defcls; 1446 err = qdisc_class_hash_init(&q->clhash); 1447 if (err < 0) 1448 return err; 1449 q->eligible = RB_ROOT; 1450 INIT_LIST_HEAD(&q->droplist); 1451 1452 q->root.cl_common.classid = sch->handle; 1453 q->root.refcnt = 1; 1454 q->root.sched = q; 1455 q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, 1456 &pfifo_qdisc_ops, 1457 sch->handle); 1458 if (q->root.qdisc == NULL) 1459 q->root.qdisc = &noop_qdisc; 1460 INIT_LIST_HEAD(&q->root.children); 1461 q->root.vt_tree = RB_ROOT; 1462 q->root.cf_tree = RB_ROOT; 1463 1464 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common); 1465 qdisc_class_hash_grow(sch, &q->clhash); 1466 1467 qdisc_watchdog_init(&q->watchdog, sch); 1468 1469 return 0; 1470 } 1471 1472 static int 1473 hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt) 1474 { 1475 struct hfsc_sched *q = qdisc_priv(sch); 1476 struct tc_hfsc_qopt *qopt; 1477 1478 if (opt == NULL || nla_len(opt) < sizeof(*qopt)) 1479 return -EINVAL; 1480 qopt = nla_data(opt); 1481 1482 sch_tree_lock(sch); 1483 q->defcls = qopt->defcls; 1484 sch_tree_unlock(sch); 1485 1486 return 0; 1487 } 1488 1489 static void 1490 hfsc_reset_class(struct hfsc_class *cl) 1491 { 1492 cl->cl_total = 0; 1493 cl->cl_cumul = 0; 1494 cl->cl_d = 0; 1495 cl->cl_e = 0; 1496 cl->cl_vt = 0; 1497 cl->cl_vtadj = 0; 1498 cl->cl_vtoff = 0; 1499 cl->cl_cvtmin = 0; 1500 cl->cl_cvtmax = 0; 1501 cl->cl_cvtoff = 0; 1502 cl->cl_pcvtoff = 0; 1503 cl->cl_vtperiod = 0; 1504 cl->cl_parentperiod = 0; 1505 cl->cl_f = 0; 1506 cl->cl_myf = 0; 1507 cl->cl_myfadj = 0; 1508 cl->cl_cfmin = 0; 1509 cl->cl_nactive = 0; 1510 1511 cl->vt_tree = RB_ROOT; 1512 cl->cf_tree = RB_ROOT; 1513 qdisc_reset(cl->qdisc); 1514 1515 if (cl->cl_flags & HFSC_RSC) 1516 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0); 1517 if (cl->cl_flags & HFSC_FSC) 1518 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0); 1519 if (cl->cl_flags & HFSC_USC) 1520 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0); 1521 } 1522 1523 static void 1524 hfsc_reset_qdisc(struct Qdisc *sch) 1525 { 1526 struct hfsc_sched *q = qdisc_priv(sch); 1527 struct hfsc_class *cl; 1528 struct hlist_node *n; 1529 unsigned int i; 1530 1531 for (i = 0; i < q->clhash.hashsize; i++) { 1532 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) 1533 hfsc_reset_class(cl); 1534 } 1535 q->eligible = RB_ROOT; 1536 INIT_LIST_HEAD(&q->droplist); 1537 qdisc_watchdog_cancel(&q->watchdog); 1538 sch->q.qlen = 0; 1539 } 1540 1541 static void 1542 hfsc_destroy_qdisc(struct Qdisc *sch) 1543 { 1544 struct hfsc_sched *q = qdisc_priv(sch); 1545 struct hlist_node *n, *next; 1546 struct hfsc_class *cl; 1547 unsigned int i; 1548 1549 for (i = 0; i < q->clhash.hashsize; i++) { 1550 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) 1551 tcf_destroy_chain(&cl->filter_list); 1552 } 1553 for (i = 0; i < q->clhash.hashsize; i++) { 1554 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 1555 cl_common.hnode) 1556 hfsc_destroy_class(sch, cl); 1557 } 1558 qdisc_class_hash_destroy(&q->clhash); 1559 qdisc_watchdog_cancel(&q->watchdog); 1560 } 1561 1562 static int 1563 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) 1564 { 1565 struct hfsc_sched *q = qdisc_priv(sch); 1566 unsigned char *b = skb_tail_pointer(skb); 1567 struct tc_hfsc_qopt qopt; 1568 1569 qopt.defcls = q->defcls; 1570 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); 1571 return skb->len; 1572 1573 nla_put_failure: 1574 nlmsg_trim(skb, b); 1575 return -1; 1576 } 1577 1578 static int 1579 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 1580 { 1581 struct hfsc_class *cl; 1582 int uninitialized_var(err); 1583 1584 cl = hfsc_classify(skb, sch, &err); 1585 if (cl == NULL) { 1586 if (err & __NET_XMIT_BYPASS) 1587 sch->qstats.drops++; 1588 kfree_skb(skb); 1589 return err; 1590 } 1591 1592 err = qdisc_enqueue(skb, cl->qdisc); 1593 if (unlikely(err != NET_XMIT_SUCCESS)) { 1594 if (net_xmit_drop_count(err)) { 1595 cl->qstats.drops++; 1596 sch->qstats.drops++; 1597 } 1598 return err; 1599 } 1600 1601 if (cl->qdisc->q.qlen == 1) 1602 set_active(cl, qdisc_pkt_len(skb)); 1603 1604 cl->bstats.packets++; 1605 cl->bstats.bytes += qdisc_pkt_len(skb); 1606 sch->bstats.packets++; 1607 sch->bstats.bytes += qdisc_pkt_len(skb); 1608 sch->q.qlen++; 1609 1610 return NET_XMIT_SUCCESS; 1611 } 1612 1613 static struct sk_buff * 1614 hfsc_dequeue(struct Qdisc *sch) 1615 { 1616 struct hfsc_sched *q = qdisc_priv(sch); 1617 struct hfsc_class *cl; 1618 struct sk_buff *skb; 1619 u64 cur_time; 1620 unsigned int next_len; 1621 int realtime = 0; 1622 1623 if (sch->q.qlen == 0) 1624 return NULL; 1625 1626 cur_time = psched_get_time(); 1627 1628 /* 1629 * if there are eligible classes, use real-time criteria. 1630 * find the class with the minimum deadline among 1631 * the eligible classes. 1632 */ 1633 if ((cl = eltree_get_mindl(q, cur_time)) != NULL) { 1634 realtime = 1; 1635 } else { 1636 /* 1637 * use link-sharing criteria 1638 * get the class with the minimum vt in the hierarchy 1639 */ 1640 cl = vttree_get_minvt(&q->root, cur_time); 1641 if (cl == NULL) { 1642 sch->qstats.overlimits++; 1643 hfsc_schedule_watchdog(sch); 1644 return NULL; 1645 } 1646 } 1647 1648 skb = qdisc_dequeue_peeked(cl->qdisc); 1649 if (skb == NULL) { 1650 qdisc_warn_nonwc("HFSC", cl->qdisc); 1651 return NULL; 1652 } 1653 1654 update_vf(cl, qdisc_pkt_len(skb), cur_time); 1655 if (realtime) 1656 cl->cl_cumul += qdisc_pkt_len(skb); 1657 1658 if (cl->qdisc->q.qlen != 0) { 1659 if (cl->cl_flags & HFSC_RSC) { 1660 /* update ed */ 1661 next_len = qdisc_peek_len(cl->qdisc); 1662 if (realtime) 1663 update_ed(cl, next_len); 1664 else 1665 update_d(cl, next_len); 1666 } 1667 } else { 1668 /* the class becomes passive */ 1669 set_passive(cl); 1670 } 1671 1672 sch->flags &= ~TCQ_F_THROTTLED; 1673 sch->q.qlen--; 1674 1675 return skb; 1676 } 1677 1678 static unsigned int 1679 hfsc_drop(struct Qdisc *sch) 1680 { 1681 struct hfsc_sched *q = qdisc_priv(sch); 1682 struct hfsc_class *cl; 1683 unsigned int len; 1684 1685 list_for_each_entry(cl, &q->droplist, dlist) { 1686 if (cl->qdisc->ops->drop != NULL && 1687 (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) { 1688 if (cl->qdisc->q.qlen == 0) { 1689 update_vf(cl, 0, 0); 1690 set_passive(cl); 1691 } else { 1692 list_move_tail(&cl->dlist, &q->droplist); 1693 } 1694 cl->qstats.drops++; 1695 sch->qstats.drops++; 1696 sch->q.qlen--; 1697 return len; 1698 } 1699 } 1700 return 0; 1701 } 1702 1703 static const struct Qdisc_class_ops hfsc_class_ops = { 1704 .change = hfsc_change_class, 1705 .delete = hfsc_delete_class, 1706 .graft = hfsc_graft_class, 1707 .leaf = hfsc_class_leaf, 1708 .qlen_notify = hfsc_qlen_notify, 1709 .get = hfsc_get_class, 1710 .put = hfsc_put_class, 1711 .bind_tcf = hfsc_bind_tcf, 1712 .unbind_tcf = hfsc_unbind_tcf, 1713 .tcf_chain = hfsc_tcf_chain, 1714 .dump = hfsc_dump_class, 1715 .dump_stats = hfsc_dump_class_stats, 1716 .walk = hfsc_walk 1717 }; 1718 1719 static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = { 1720 .id = "hfsc", 1721 .init = hfsc_init_qdisc, 1722 .change = hfsc_change_qdisc, 1723 .reset = hfsc_reset_qdisc, 1724 .destroy = hfsc_destroy_qdisc, 1725 .dump = hfsc_dump_qdisc, 1726 .enqueue = hfsc_enqueue, 1727 .dequeue = hfsc_dequeue, 1728 .peek = qdisc_peek_dequeued, 1729 .drop = hfsc_drop, 1730 .cl_ops = &hfsc_class_ops, 1731 .priv_size = sizeof(struct hfsc_sched), 1732 .owner = THIS_MODULE 1733 }; 1734 1735 static int __init 1736 hfsc_init(void) 1737 { 1738 return register_qdisc(&hfsc_qdisc_ops); 1739 } 1740 1741 static void __exit 1742 hfsc_cleanup(void) 1743 { 1744 unregister_qdisc(&hfsc_qdisc_ops); 1745 } 1746 1747 MODULE_LICENSE("GPL"); 1748 module_init(hfsc_init); 1749 module_exit(hfsc_cleanup); 1750