1 /* 2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net> 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 2 7 * of the License, or (at your option) any later version. 8 * 9 * 2003-10-17 - Ported from altq 10 */ 11 /* 12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 13 * 14 * Permission to use, copy, modify, and distribute this software and 15 * its documentation is hereby granted (including for commercial or 16 * for-profit use), provided that both the copyright notice and this 17 * permission notice appear in all copies of the software, derivative 18 * works, or modified versions, and any portions thereof. 19 * 20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 33 * DAMAGE. 34 * 35 * Carnegie Mellon encourages (but does not require) users of this 36 * software to return any improvements or extensions that they make, 37 * and to grant Carnegie Mellon the rights to redistribute these 38 * changes without encumbrance. 39 */ 40 /* 41 * H-FSC is described in Proceedings of SIGCOMM'97, 42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 43 * Real-Time and Priority Service" 44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 45 * 46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 47 * when a class has an upperlimit, the fit-time is computed from the 48 * upperlimit service curve. the link-sharing scheduler does not schedule 49 * a class whose fit-time exceeds the current time. 50 */ 51 52 #include <linux/kernel.h> 53 #include <linux/module.h> 54 #include <linux/types.h> 55 #include <linux/errno.h> 56 #include <linux/compiler.h> 57 #include <linux/spinlock.h> 58 #include <linux/skbuff.h> 59 #include <linux/string.h> 60 #include <linux/slab.h> 61 #include <linux/list.h> 62 #include <linux/rbtree.h> 63 #include <linux/init.h> 64 #include <linux/rtnetlink.h> 65 #include <linux/pkt_sched.h> 66 #include <net/netlink.h> 67 #include <net/pkt_sched.h> 68 #include <net/pkt_cls.h> 69 #include <asm/div64.h> 70 71 /* 72 * kernel internal service curve representation: 73 * coordinates are given by 64 bit unsigned integers. 74 * x-axis: unit is clock count. 75 * y-axis: unit is byte. 76 * 77 * The service curve parameters are converted to the internal 78 * representation. The slope values are scaled to avoid overflow. 79 * the inverse slope values as well as the y-projection of the 1st 80 * segment are kept in order to to avoid 64-bit divide operations 81 * that are expensive on 32-bit architectures. 82 */ 83 84 struct internal_sc 85 { 86 u64 sm1; /* scaled slope of the 1st segment */ 87 u64 ism1; /* scaled inverse-slope of the 1st segment */ 88 u64 dx; /* the x-projection of the 1st segment */ 89 u64 dy; /* the y-projection of the 1st segment */ 90 u64 sm2; /* scaled slope of the 2nd segment */ 91 u64 ism2; /* scaled inverse-slope of the 2nd segment */ 92 }; 93 94 /* runtime service curve */ 95 struct runtime_sc 96 { 97 u64 x; /* current starting position on x-axis */ 98 u64 y; /* current starting position on y-axis */ 99 u64 sm1; /* scaled slope of the 1st segment */ 100 u64 ism1; /* scaled inverse-slope of the 1st segment */ 101 u64 dx; /* the x-projection of the 1st segment */ 102 u64 dy; /* the y-projection of the 1st segment */ 103 u64 sm2; /* scaled slope of the 2nd segment */ 104 u64 ism2; /* scaled inverse-slope of the 2nd segment */ 105 }; 106 107 enum hfsc_class_flags 108 { 109 HFSC_RSC = 0x1, 110 HFSC_FSC = 0x2, 111 HFSC_USC = 0x4 112 }; 113 114 struct hfsc_class 115 { 116 struct Qdisc_class_common cl_common; 117 unsigned int refcnt; /* usage count */ 118 119 struct gnet_stats_basic bstats; 120 struct gnet_stats_queue qstats; 121 struct gnet_stats_rate_est rate_est; 122 unsigned int level; /* class level in hierarchy */ 123 struct tcf_proto *filter_list; /* filter list */ 124 unsigned int filter_cnt; /* filter count */ 125 126 struct hfsc_sched *sched; /* scheduler data */ 127 struct hfsc_class *cl_parent; /* parent class */ 128 struct list_head siblings; /* sibling classes */ 129 struct list_head children; /* child classes */ 130 struct Qdisc *qdisc; /* leaf qdisc */ 131 132 struct rb_node el_node; /* qdisc's eligible tree member */ 133 struct rb_root vt_tree; /* active children sorted by cl_vt */ 134 struct rb_node vt_node; /* parent's vt_tree member */ 135 struct rb_root cf_tree; /* active children sorted by cl_f */ 136 struct rb_node cf_node; /* parent's cf_heap member */ 137 struct list_head dlist; /* drop list member */ 138 139 u64 cl_total; /* total work in bytes */ 140 u64 cl_cumul; /* cumulative work in bytes done by 141 real-time criteria */ 142 143 u64 cl_d; /* deadline*/ 144 u64 cl_e; /* eligible time */ 145 u64 cl_vt; /* virtual time */ 146 u64 cl_f; /* time when this class will fit for 147 link-sharing, max(myf, cfmin) */ 148 u64 cl_myf; /* my fit-time (calculated from this 149 class's own upperlimit curve) */ 150 u64 cl_myfadj; /* my fit-time adjustment (to cancel 151 history dependence) */ 152 u64 cl_cfmin; /* earliest children's fit-time (used 153 with cl_myf to obtain cl_f) */ 154 u64 cl_cvtmin; /* minimal virtual time among the 155 children fit for link-sharing 156 (monotonic within a period) */ 157 u64 cl_vtadj; /* intra-period cumulative vt 158 adjustment */ 159 u64 cl_vtoff; /* inter-period cumulative vt offset */ 160 u64 cl_cvtmax; /* max child's vt in the last period */ 161 u64 cl_cvtoff; /* cumulative cvtmax of all periods */ 162 u64 cl_pcvtoff; /* parent's cvtoff at initialization 163 time */ 164 165 struct internal_sc cl_rsc; /* internal real-time service curve */ 166 struct internal_sc cl_fsc; /* internal fair service curve */ 167 struct internal_sc cl_usc; /* internal upperlimit service curve */ 168 struct runtime_sc cl_deadline; /* deadline curve */ 169 struct runtime_sc cl_eligible; /* eligible curve */ 170 struct runtime_sc cl_virtual; /* virtual curve */ 171 struct runtime_sc cl_ulimit; /* upperlimit curve */ 172 173 unsigned long cl_flags; /* which curves are valid */ 174 unsigned long cl_vtperiod; /* vt period sequence number */ 175 unsigned long cl_parentperiod;/* parent's vt period sequence number*/ 176 unsigned long cl_nactive; /* number of active children */ 177 }; 178 179 struct hfsc_sched 180 { 181 u16 defcls; /* default class id */ 182 struct hfsc_class root; /* root class */ 183 struct Qdisc_class_hash clhash; /* class hash */ 184 struct rb_root eligible; /* eligible tree */ 185 struct list_head droplist; /* active leaf class list (for 186 dropping) */ 187 struct sk_buff_head requeue; /* requeued packet */ 188 struct qdisc_watchdog watchdog; /* watchdog timer */ 189 }; 190 191 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ 192 193 194 /* 195 * eligible tree holds backlogged classes being sorted by their eligible times. 196 * there is one eligible tree per hfsc instance. 197 */ 198 199 static void 200 eltree_insert(struct hfsc_class *cl) 201 { 202 struct rb_node **p = &cl->sched->eligible.rb_node; 203 struct rb_node *parent = NULL; 204 struct hfsc_class *cl1; 205 206 while (*p != NULL) { 207 parent = *p; 208 cl1 = rb_entry(parent, struct hfsc_class, el_node); 209 if (cl->cl_e >= cl1->cl_e) 210 p = &parent->rb_right; 211 else 212 p = &parent->rb_left; 213 } 214 rb_link_node(&cl->el_node, parent, p); 215 rb_insert_color(&cl->el_node, &cl->sched->eligible); 216 } 217 218 static inline void 219 eltree_remove(struct hfsc_class *cl) 220 { 221 rb_erase(&cl->el_node, &cl->sched->eligible); 222 } 223 224 static inline void 225 eltree_update(struct hfsc_class *cl) 226 { 227 eltree_remove(cl); 228 eltree_insert(cl); 229 } 230 231 /* find the class with the minimum deadline among the eligible classes */ 232 static inline struct hfsc_class * 233 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time) 234 { 235 struct hfsc_class *p, *cl = NULL; 236 struct rb_node *n; 237 238 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { 239 p = rb_entry(n, struct hfsc_class, el_node); 240 if (p->cl_e > cur_time) 241 break; 242 if (cl == NULL || p->cl_d < cl->cl_d) 243 cl = p; 244 } 245 return cl; 246 } 247 248 /* find the class with minimum eligible time among the eligible classes */ 249 static inline struct hfsc_class * 250 eltree_get_minel(struct hfsc_sched *q) 251 { 252 struct rb_node *n; 253 254 n = rb_first(&q->eligible); 255 if (n == NULL) 256 return NULL; 257 return rb_entry(n, struct hfsc_class, el_node); 258 } 259 260 /* 261 * vttree holds holds backlogged child classes being sorted by their virtual 262 * time. each intermediate class has one vttree. 263 */ 264 static void 265 vttree_insert(struct hfsc_class *cl) 266 { 267 struct rb_node **p = &cl->cl_parent->vt_tree.rb_node; 268 struct rb_node *parent = NULL; 269 struct hfsc_class *cl1; 270 271 while (*p != NULL) { 272 parent = *p; 273 cl1 = rb_entry(parent, struct hfsc_class, vt_node); 274 if (cl->cl_vt >= cl1->cl_vt) 275 p = &parent->rb_right; 276 else 277 p = &parent->rb_left; 278 } 279 rb_link_node(&cl->vt_node, parent, p); 280 rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree); 281 } 282 283 static inline void 284 vttree_remove(struct hfsc_class *cl) 285 { 286 rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree); 287 } 288 289 static inline void 290 vttree_update(struct hfsc_class *cl) 291 { 292 vttree_remove(cl); 293 vttree_insert(cl); 294 } 295 296 static inline struct hfsc_class * 297 vttree_firstfit(struct hfsc_class *cl, u64 cur_time) 298 { 299 struct hfsc_class *p; 300 struct rb_node *n; 301 302 for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) { 303 p = rb_entry(n, struct hfsc_class, vt_node); 304 if (p->cl_f <= cur_time) 305 return p; 306 } 307 return NULL; 308 } 309 310 /* 311 * get the leaf class with the minimum vt in the hierarchy 312 */ 313 static struct hfsc_class * 314 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time) 315 { 316 /* if root-class's cfmin is bigger than cur_time nothing to do */ 317 if (cl->cl_cfmin > cur_time) 318 return NULL; 319 320 while (cl->level > 0) { 321 cl = vttree_firstfit(cl, cur_time); 322 if (cl == NULL) 323 return NULL; 324 /* 325 * update parent's cl_cvtmin. 326 */ 327 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 328 cl->cl_parent->cl_cvtmin = cl->cl_vt; 329 } 330 return cl; 331 } 332 333 static void 334 cftree_insert(struct hfsc_class *cl) 335 { 336 struct rb_node **p = &cl->cl_parent->cf_tree.rb_node; 337 struct rb_node *parent = NULL; 338 struct hfsc_class *cl1; 339 340 while (*p != NULL) { 341 parent = *p; 342 cl1 = rb_entry(parent, struct hfsc_class, cf_node); 343 if (cl->cl_f >= cl1->cl_f) 344 p = &parent->rb_right; 345 else 346 p = &parent->rb_left; 347 } 348 rb_link_node(&cl->cf_node, parent, p); 349 rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree); 350 } 351 352 static inline void 353 cftree_remove(struct hfsc_class *cl) 354 { 355 rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree); 356 } 357 358 static inline void 359 cftree_update(struct hfsc_class *cl) 360 { 361 cftree_remove(cl); 362 cftree_insert(cl); 363 } 364 365 /* 366 * service curve support functions 367 * 368 * external service curve parameters 369 * m: bps 370 * d: us 371 * internal service curve parameters 372 * sm: (bytes/psched_us) << SM_SHIFT 373 * ism: (psched_us/byte) << ISM_SHIFT 374 * dx: psched_us 375 * 376 * The clock source resolution with ktime is 1.024us. 377 * 378 * sm and ism are scaled in order to keep effective digits. 379 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective 380 * digits in decimal using the following table. 381 * 382 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 383 * ------------+------------------------------------------------------- 384 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3 385 * 386 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125 387 */ 388 #define SM_SHIFT 20 389 #define ISM_SHIFT 18 390 391 #define SM_MASK ((1ULL << SM_SHIFT) - 1) 392 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1) 393 394 static inline u64 395 seg_x2y(u64 x, u64 sm) 396 { 397 u64 y; 398 399 /* 400 * compute 401 * y = x * sm >> SM_SHIFT 402 * but divide it for the upper and lower bits to avoid overflow 403 */ 404 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 405 return y; 406 } 407 408 static inline u64 409 seg_y2x(u64 y, u64 ism) 410 { 411 u64 x; 412 413 if (y == 0) 414 x = 0; 415 else if (ism == HT_INFINITY) 416 x = HT_INFINITY; 417 else { 418 x = (y >> ISM_SHIFT) * ism 419 + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 420 } 421 return x; 422 } 423 424 /* Convert m (bps) into sm (bytes/psched us) */ 425 static u64 426 m2sm(u32 m) 427 { 428 u64 sm; 429 430 sm = ((u64)m << SM_SHIFT); 431 sm += PSCHED_TICKS_PER_SEC - 1; 432 do_div(sm, PSCHED_TICKS_PER_SEC); 433 return sm; 434 } 435 436 /* convert m (bps) into ism (psched us/byte) */ 437 static u64 438 m2ism(u32 m) 439 { 440 u64 ism; 441 442 if (m == 0) 443 ism = HT_INFINITY; 444 else { 445 ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT); 446 ism += m - 1; 447 do_div(ism, m); 448 } 449 return ism; 450 } 451 452 /* convert d (us) into dx (psched us) */ 453 static u64 454 d2dx(u32 d) 455 { 456 u64 dx; 457 458 dx = ((u64)d * PSCHED_TICKS_PER_SEC); 459 dx += USEC_PER_SEC - 1; 460 do_div(dx, USEC_PER_SEC); 461 return dx; 462 } 463 464 /* convert sm (bytes/psched us) into m (bps) */ 465 static u32 466 sm2m(u64 sm) 467 { 468 u64 m; 469 470 m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT; 471 return (u32)m; 472 } 473 474 /* convert dx (psched us) into d (us) */ 475 static u32 476 dx2d(u64 dx) 477 { 478 u64 d; 479 480 d = dx * USEC_PER_SEC; 481 do_div(d, PSCHED_TICKS_PER_SEC); 482 return (u32)d; 483 } 484 485 static void 486 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc) 487 { 488 isc->sm1 = m2sm(sc->m1); 489 isc->ism1 = m2ism(sc->m1); 490 isc->dx = d2dx(sc->d); 491 isc->dy = seg_x2y(isc->dx, isc->sm1); 492 isc->sm2 = m2sm(sc->m2); 493 isc->ism2 = m2ism(sc->m2); 494 } 495 496 /* 497 * initialize the runtime service curve with the given internal 498 * service curve starting at (x, y). 499 */ 500 static void 501 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) 502 { 503 rtsc->x = x; 504 rtsc->y = y; 505 rtsc->sm1 = isc->sm1; 506 rtsc->ism1 = isc->ism1; 507 rtsc->dx = isc->dx; 508 rtsc->dy = isc->dy; 509 rtsc->sm2 = isc->sm2; 510 rtsc->ism2 = isc->ism2; 511 } 512 513 /* 514 * calculate the y-projection of the runtime service curve by the 515 * given x-projection value 516 */ 517 static u64 518 rtsc_y2x(struct runtime_sc *rtsc, u64 y) 519 { 520 u64 x; 521 522 if (y < rtsc->y) 523 x = rtsc->x; 524 else if (y <= rtsc->y + rtsc->dy) { 525 /* x belongs to the 1st segment */ 526 if (rtsc->dy == 0) 527 x = rtsc->x + rtsc->dx; 528 else 529 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 530 } else { 531 /* x belongs to the 2nd segment */ 532 x = rtsc->x + rtsc->dx 533 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 534 } 535 return x; 536 } 537 538 static u64 539 rtsc_x2y(struct runtime_sc *rtsc, u64 x) 540 { 541 u64 y; 542 543 if (x <= rtsc->x) 544 y = rtsc->y; 545 else if (x <= rtsc->x + rtsc->dx) 546 /* y belongs to the 1st segment */ 547 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 548 else 549 /* y belongs to the 2nd segment */ 550 y = rtsc->y + rtsc->dy 551 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 552 return y; 553 } 554 555 /* 556 * update the runtime service curve by taking the minimum of the current 557 * runtime service curve and the service curve starting at (x, y). 558 */ 559 static void 560 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) 561 { 562 u64 y1, y2, dx, dy; 563 u32 dsm; 564 565 if (isc->sm1 <= isc->sm2) { 566 /* service curve is convex */ 567 y1 = rtsc_x2y(rtsc, x); 568 if (y1 < y) 569 /* the current rtsc is smaller */ 570 return; 571 rtsc->x = x; 572 rtsc->y = y; 573 return; 574 } 575 576 /* 577 * service curve is concave 578 * compute the two y values of the current rtsc 579 * y1: at x 580 * y2: at (x + dx) 581 */ 582 y1 = rtsc_x2y(rtsc, x); 583 if (y1 <= y) { 584 /* rtsc is below isc, no change to rtsc */ 585 return; 586 } 587 588 y2 = rtsc_x2y(rtsc, x + isc->dx); 589 if (y2 >= y + isc->dy) { 590 /* rtsc is above isc, replace rtsc by isc */ 591 rtsc->x = x; 592 rtsc->y = y; 593 rtsc->dx = isc->dx; 594 rtsc->dy = isc->dy; 595 return; 596 } 597 598 /* 599 * the two curves intersect 600 * compute the offsets (dx, dy) using the reverse 601 * function of seg_x2y() 602 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 603 */ 604 dx = (y1 - y) << SM_SHIFT; 605 dsm = isc->sm1 - isc->sm2; 606 do_div(dx, dsm); 607 /* 608 * check if (x, y1) belongs to the 1st segment of rtsc. 609 * if so, add the offset. 610 */ 611 if (rtsc->x + rtsc->dx > x) 612 dx += rtsc->x + rtsc->dx - x; 613 dy = seg_x2y(dx, isc->sm1); 614 615 rtsc->x = x; 616 rtsc->y = y; 617 rtsc->dx = dx; 618 rtsc->dy = dy; 619 return; 620 } 621 622 static void 623 init_ed(struct hfsc_class *cl, unsigned int next_len) 624 { 625 u64 cur_time = psched_get_time(); 626 627 /* update the deadline curve */ 628 rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); 629 630 /* 631 * update the eligible curve. 632 * for concave, it is equal to the deadline curve. 633 * for convex, it is a linear curve with slope m2. 634 */ 635 cl->cl_eligible = cl->cl_deadline; 636 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { 637 cl->cl_eligible.dx = 0; 638 cl->cl_eligible.dy = 0; 639 } 640 641 /* compute e and d */ 642 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 643 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 644 645 eltree_insert(cl); 646 } 647 648 static void 649 update_ed(struct hfsc_class *cl, unsigned int next_len) 650 { 651 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 652 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 653 654 eltree_update(cl); 655 } 656 657 static inline void 658 update_d(struct hfsc_class *cl, unsigned int next_len) 659 { 660 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 661 } 662 663 static inline void 664 update_cfmin(struct hfsc_class *cl) 665 { 666 struct rb_node *n = rb_first(&cl->cf_tree); 667 struct hfsc_class *p; 668 669 if (n == NULL) { 670 cl->cl_cfmin = 0; 671 return; 672 } 673 p = rb_entry(n, struct hfsc_class, cf_node); 674 cl->cl_cfmin = p->cl_f; 675 } 676 677 static void 678 init_vf(struct hfsc_class *cl, unsigned int len) 679 { 680 struct hfsc_class *max_cl; 681 struct rb_node *n; 682 u64 vt, f, cur_time; 683 int go_active; 684 685 cur_time = 0; 686 go_active = 1; 687 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 688 if (go_active && cl->cl_nactive++ == 0) 689 go_active = 1; 690 else 691 go_active = 0; 692 693 if (go_active) { 694 n = rb_last(&cl->cl_parent->vt_tree); 695 if (n != NULL) { 696 max_cl = rb_entry(n, struct hfsc_class,vt_node); 697 /* 698 * set vt to the average of the min and max 699 * classes. if the parent's period didn't 700 * change, don't decrease vt of the class. 701 */ 702 vt = max_cl->cl_vt; 703 if (cl->cl_parent->cl_cvtmin != 0) 704 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 705 706 if (cl->cl_parent->cl_vtperiod != 707 cl->cl_parentperiod || vt > cl->cl_vt) 708 cl->cl_vt = vt; 709 } else { 710 /* 711 * first child for a new parent backlog period. 712 * add parent's cvtmax to cvtoff to make a new 713 * vt (vtoff + vt) larger than the vt in the 714 * last period for all children. 715 */ 716 vt = cl->cl_parent->cl_cvtmax; 717 cl->cl_parent->cl_cvtoff += vt; 718 cl->cl_parent->cl_cvtmax = 0; 719 cl->cl_parent->cl_cvtmin = 0; 720 cl->cl_vt = 0; 721 } 722 723 cl->cl_vtoff = cl->cl_parent->cl_cvtoff - 724 cl->cl_pcvtoff; 725 726 /* update the virtual curve */ 727 vt = cl->cl_vt + cl->cl_vtoff; 728 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt, 729 cl->cl_total); 730 if (cl->cl_virtual.x == vt) { 731 cl->cl_virtual.x -= cl->cl_vtoff; 732 cl->cl_vtoff = 0; 733 } 734 cl->cl_vtadj = 0; 735 736 cl->cl_vtperiod++; /* increment vt period */ 737 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 738 if (cl->cl_parent->cl_nactive == 0) 739 cl->cl_parentperiod++; 740 cl->cl_f = 0; 741 742 vttree_insert(cl); 743 cftree_insert(cl); 744 745 if (cl->cl_flags & HFSC_USC) { 746 /* class has upper limit curve */ 747 if (cur_time == 0) 748 cur_time = psched_get_time(); 749 750 /* update the ulimit curve */ 751 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time, 752 cl->cl_total); 753 /* compute myf */ 754 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 755 cl->cl_total); 756 cl->cl_myfadj = 0; 757 } 758 } 759 760 f = max(cl->cl_myf, cl->cl_cfmin); 761 if (f != cl->cl_f) { 762 cl->cl_f = f; 763 cftree_update(cl); 764 update_cfmin(cl->cl_parent); 765 } 766 } 767 } 768 769 static void 770 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) 771 { 772 u64 f; /* , myf_bound, delta; */ 773 int go_passive = 0; 774 775 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC) 776 go_passive = 1; 777 778 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 779 cl->cl_total += len; 780 781 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0) 782 continue; 783 784 if (go_passive && --cl->cl_nactive == 0) 785 go_passive = 1; 786 else 787 go_passive = 0; 788 789 if (go_passive) { 790 /* no more active child, going passive */ 791 792 /* update cvtmax of the parent class */ 793 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 794 cl->cl_parent->cl_cvtmax = cl->cl_vt; 795 796 /* remove this class from the vt tree */ 797 vttree_remove(cl); 798 799 cftree_remove(cl); 800 update_cfmin(cl->cl_parent); 801 802 continue; 803 } 804 805 /* 806 * update vt and f 807 */ 808 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 809 - cl->cl_vtoff + cl->cl_vtadj; 810 811 /* 812 * if vt of the class is smaller than cvtmin, 813 * the class was skipped in the past due to non-fit. 814 * if so, we need to adjust vtadj. 815 */ 816 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 817 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 818 cl->cl_vt = cl->cl_parent->cl_cvtmin; 819 } 820 821 /* update the vt tree */ 822 vttree_update(cl); 823 824 if (cl->cl_flags & HFSC_USC) { 825 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit, 826 cl->cl_total); 827 #if 0 828 /* 829 * This code causes classes to stay way under their 830 * limit when multiple classes are used at gigabit 831 * speed. needs investigation. -kaber 832 */ 833 /* 834 * if myf lags behind by more than one clock tick 835 * from the current time, adjust myfadj to prevent 836 * a rate-limited class from going greedy. 837 * in a steady state under rate-limiting, myf 838 * fluctuates within one clock tick. 839 */ 840 myf_bound = cur_time - PSCHED_JIFFIE2US(1); 841 if (cl->cl_myf < myf_bound) { 842 delta = cur_time - cl->cl_myf; 843 cl->cl_myfadj += delta; 844 cl->cl_myf += delta; 845 } 846 #endif 847 } 848 849 f = max(cl->cl_myf, cl->cl_cfmin); 850 if (f != cl->cl_f) { 851 cl->cl_f = f; 852 cftree_update(cl); 853 update_cfmin(cl->cl_parent); 854 } 855 } 856 } 857 858 static void 859 set_active(struct hfsc_class *cl, unsigned int len) 860 { 861 if (cl->cl_flags & HFSC_RSC) 862 init_ed(cl, len); 863 if (cl->cl_flags & HFSC_FSC) 864 init_vf(cl, len); 865 866 list_add_tail(&cl->dlist, &cl->sched->droplist); 867 } 868 869 static void 870 set_passive(struct hfsc_class *cl) 871 { 872 if (cl->cl_flags & HFSC_RSC) 873 eltree_remove(cl); 874 875 list_del(&cl->dlist); 876 877 /* 878 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0) 879 * needs to be called explicitly to remove a class from vttree. 880 */ 881 } 882 883 /* 884 * hack to get length of first packet in queue. 885 */ 886 static unsigned int 887 qdisc_peek_len(struct Qdisc *sch) 888 { 889 struct sk_buff *skb; 890 unsigned int len; 891 892 skb = sch->dequeue(sch); 893 if (skb == NULL) { 894 if (net_ratelimit()) 895 printk("qdisc_peek_len: non work-conserving qdisc ?\n"); 896 return 0; 897 } 898 len = qdisc_pkt_len(skb); 899 if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) { 900 if (net_ratelimit()) 901 printk("qdisc_peek_len: failed to requeue\n"); 902 qdisc_tree_decrease_qlen(sch, 1); 903 return 0; 904 } 905 return len; 906 } 907 908 static void 909 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) 910 { 911 unsigned int len = cl->qdisc->q.qlen; 912 913 qdisc_reset(cl->qdisc); 914 qdisc_tree_decrease_qlen(cl->qdisc, len); 915 } 916 917 static void 918 hfsc_adjust_levels(struct hfsc_class *cl) 919 { 920 struct hfsc_class *p; 921 unsigned int level; 922 923 do { 924 level = 0; 925 list_for_each_entry(p, &cl->children, siblings) { 926 if (p->level >= level) 927 level = p->level + 1; 928 } 929 cl->level = level; 930 } while ((cl = cl->cl_parent) != NULL); 931 } 932 933 static inline struct hfsc_class * 934 hfsc_find_class(u32 classid, struct Qdisc *sch) 935 { 936 struct hfsc_sched *q = qdisc_priv(sch); 937 struct Qdisc_class_common *clc; 938 939 clc = qdisc_class_find(&q->clhash, classid); 940 if (clc == NULL) 941 return NULL; 942 return container_of(clc, struct hfsc_class, cl_common); 943 } 944 945 static void 946 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc, 947 u64 cur_time) 948 { 949 sc2isc(rsc, &cl->cl_rsc); 950 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); 951 cl->cl_eligible = cl->cl_deadline; 952 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { 953 cl->cl_eligible.dx = 0; 954 cl->cl_eligible.dy = 0; 955 } 956 cl->cl_flags |= HFSC_RSC; 957 } 958 959 static void 960 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc) 961 { 962 sc2isc(fsc, &cl->cl_fsc); 963 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total); 964 cl->cl_flags |= HFSC_FSC; 965 } 966 967 static void 968 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc, 969 u64 cur_time) 970 { 971 sc2isc(usc, &cl->cl_usc); 972 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total); 973 cl->cl_flags |= HFSC_USC; 974 } 975 976 static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = { 977 [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) }, 978 [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) }, 979 [TCA_HFSC_USC] = { .len = sizeof(struct tc_service_curve) }, 980 }; 981 982 static int 983 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 984 struct nlattr **tca, unsigned long *arg) 985 { 986 struct hfsc_sched *q = qdisc_priv(sch); 987 struct hfsc_class *cl = (struct hfsc_class *)*arg; 988 struct hfsc_class *parent = NULL; 989 struct nlattr *opt = tca[TCA_OPTIONS]; 990 struct nlattr *tb[TCA_HFSC_MAX + 1]; 991 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL; 992 u64 cur_time; 993 int err; 994 995 if (opt == NULL) 996 return -EINVAL; 997 998 err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy); 999 if (err < 0) 1000 return err; 1001 1002 if (tb[TCA_HFSC_RSC]) { 1003 rsc = nla_data(tb[TCA_HFSC_RSC]); 1004 if (rsc->m1 == 0 && rsc->m2 == 0) 1005 rsc = NULL; 1006 } 1007 1008 if (tb[TCA_HFSC_FSC]) { 1009 fsc = nla_data(tb[TCA_HFSC_FSC]); 1010 if (fsc->m1 == 0 && fsc->m2 == 0) 1011 fsc = NULL; 1012 } 1013 1014 if (tb[TCA_HFSC_USC]) { 1015 usc = nla_data(tb[TCA_HFSC_USC]); 1016 if (usc->m1 == 0 && usc->m2 == 0) 1017 usc = NULL; 1018 } 1019 1020 if (cl != NULL) { 1021 if (parentid) { 1022 if (cl->cl_parent && 1023 cl->cl_parent->cl_common.classid != parentid) 1024 return -EINVAL; 1025 if (cl->cl_parent == NULL && parentid != TC_H_ROOT) 1026 return -EINVAL; 1027 } 1028 cur_time = psched_get_time(); 1029 1030 sch_tree_lock(sch); 1031 if (rsc != NULL) 1032 hfsc_change_rsc(cl, rsc, cur_time); 1033 if (fsc != NULL) 1034 hfsc_change_fsc(cl, fsc); 1035 if (usc != NULL) 1036 hfsc_change_usc(cl, usc, cur_time); 1037 1038 if (cl->qdisc->q.qlen != 0) { 1039 if (cl->cl_flags & HFSC_RSC) 1040 update_ed(cl, qdisc_peek_len(cl->qdisc)); 1041 if (cl->cl_flags & HFSC_FSC) 1042 update_vf(cl, 0, cur_time); 1043 } 1044 sch_tree_unlock(sch); 1045 1046 if (tca[TCA_RATE]) 1047 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1048 qdisc_root_sleeping_lock(sch), 1049 tca[TCA_RATE]); 1050 return 0; 1051 } 1052 1053 if (parentid == TC_H_ROOT) 1054 return -EEXIST; 1055 1056 parent = &q->root; 1057 if (parentid) { 1058 parent = hfsc_find_class(parentid, sch); 1059 if (parent == NULL) 1060 return -ENOENT; 1061 } 1062 1063 if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0) 1064 return -EINVAL; 1065 if (hfsc_find_class(classid, sch)) 1066 return -EEXIST; 1067 1068 if (rsc == NULL && fsc == NULL) 1069 return -EINVAL; 1070 1071 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL); 1072 if (cl == NULL) 1073 return -ENOBUFS; 1074 1075 if (rsc != NULL) 1076 hfsc_change_rsc(cl, rsc, 0); 1077 if (fsc != NULL) 1078 hfsc_change_fsc(cl, fsc); 1079 if (usc != NULL) 1080 hfsc_change_usc(cl, usc, 0); 1081 1082 cl->cl_common.classid = classid; 1083 cl->refcnt = 1; 1084 cl->sched = q; 1085 cl->cl_parent = parent; 1086 cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, 1087 &pfifo_qdisc_ops, classid); 1088 if (cl->qdisc == NULL) 1089 cl->qdisc = &noop_qdisc; 1090 INIT_LIST_HEAD(&cl->children); 1091 cl->vt_tree = RB_ROOT; 1092 cl->cf_tree = RB_ROOT; 1093 1094 sch_tree_lock(sch); 1095 qdisc_class_hash_insert(&q->clhash, &cl->cl_common); 1096 list_add_tail(&cl->siblings, &parent->children); 1097 if (parent->level == 0) 1098 hfsc_purge_queue(sch, parent); 1099 hfsc_adjust_levels(parent); 1100 cl->cl_pcvtoff = parent->cl_cvtoff; 1101 sch_tree_unlock(sch); 1102 1103 qdisc_class_hash_grow(sch, &q->clhash); 1104 1105 if (tca[TCA_RATE]) 1106 gen_new_estimator(&cl->bstats, &cl->rate_est, 1107 qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); 1108 *arg = (unsigned long)cl; 1109 return 0; 1110 } 1111 1112 static void 1113 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl) 1114 { 1115 struct hfsc_sched *q = qdisc_priv(sch); 1116 1117 tcf_destroy_chain(&cl->filter_list); 1118 qdisc_destroy(cl->qdisc); 1119 gen_kill_estimator(&cl->bstats, &cl->rate_est); 1120 if (cl != &q->root) 1121 kfree(cl); 1122 } 1123 1124 static int 1125 hfsc_delete_class(struct Qdisc *sch, unsigned long arg) 1126 { 1127 struct hfsc_sched *q = qdisc_priv(sch); 1128 struct hfsc_class *cl = (struct hfsc_class *)arg; 1129 1130 if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root) 1131 return -EBUSY; 1132 1133 sch_tree_lock(sch); 1134 1135 list_del(&cl->siblings); 1136 hfsc_adjust_levels(cl->cl_parent); 1137 1138 hfsc_purge_queue(sch, cl); 1139 qdisc_class_hash_remove(&q->clhash, &cl->cl_common); 1140 1141 if (--cl->refcnt == 0) 1142 hfsc_destroy_class(sch, cl); 1143 1144 sch_tree_unlock(sch); 1145 return 0; 1146 } 1147 1148 static struct hfsc_class * 1149 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) 1150 { 1151 struct hfsc_sched *q = qdisc_priv(sch); 1152 struct hfsc_class *cl; 1153 struct tcf_result res; 1154 struct tcf_proto *tcf; 1155 int result; 1156 1157 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 && 1158 (cl = hfsc_find_class(skb->priority, sch)) != NULL) 1159 if (cl->level == 0) 1160 return cl; 1161 1162 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 1163 tcf = q->root.filter_list; 1164 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 1165 #ifdef CONFIG_NET_CLS_ACT 1166 switch (result) { 1167 case TC_ACT_QUEUED: 1168 case TC_ACT_STOLEN: 1169 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 1170 case TC_ACT_SHOT: 1171 return NULL; 1172 } 1173 #endif 1174 if ((cl = (struct hfsc_class *)res.class) == NULL) { 1175 if ((cl = hfsc_find_class(res.classid, sch)) == NULL) 1176 break; /* filter selected invalid classid */ 1177 } 1178 1179 if (cl->level == 0) 1180 return cl; /* hit leaf class */ 1181 1182 /* apply inner filter chain */ 1183 tcf = cl->filter_list; 1184 } 1185 1186 /* classification failed, try default class */ 1187 cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); 1188 if (cl == NULL || cl->level > 0) 1189 return NULL; 1190 1191 return cl; 1192 } 1193 1194 static int 1195 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1196 struct Qdisc **old) 1197 { 1198 struct hfsc_class *cl = (struct hfsc_class *)arg; 1199 1200 if (cl == NULL) 1201 return -ENOENT; 1202 if (cl->level > 0) 1203 return -EINVAL; 1204 if (new == NULL) { 1205 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, 1206 &pfifo_qdisc_ops, 1207 cl->cl_common.classid); 1208 if (new == NULL) 1209 new = &noop_qdisc; 1210 } 1211 1212 sch_tree_lock(sch); 1213 hfsc_purge_queue(sch, cl); 1214 *old = xchg(&cl->qdisc, new); 1215 sch_tree_unlock(sch); 1216 return 0; 1217 } 1218 1219 static struct Qdisc * 1220 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg) 1221 { 1222 struct hfsc_class *cl = (struct hfsc_class *)arg; 1223 1224 if (cl != NULL && cl->level == 0) 1225 return cl->qdisc; 1226 1227 return NULL; 1228 } 1229 1230 static void 1231 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) 1232 { 1233 struct hfsc_class *cl = (struct hfsc_class *)arg; 1234 1235 if (cl->qdisc->q.qlen == 0) { 1236 update_vf(cl, 0, 0); 1237 set_passive(cl); 1238 } 1239 } 1240 1241 static unsigned long 1242 hfsc_get_class(struct Qdisc *sch, u32 classid) 1243 { 1244 struct hfsc_class *cl = hfsc_find_class(classid, sch); 1245 1246 if (cl != NULL) 1247 cl->refcnt++; 1248 1249 return (unsigned long)cl; 1250 } 1251 1252 static void 1253 hfsc_put_class(struct Qdisc *sch, unsigned long arg) 1254 { 1255 struct hfsc_class *cl = (struct hfsc_class *)arg; 1256 1257 if (--cl->refcnt == 0) 1258 hfsc_destroy_class(sch, cl); 1259 } 1260 1261 static unsigned long 1262 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid) 1263 { 1264 struct hfsc_class *p = (struct hfsc_class *)parent; 1265 struct hfsc_class *cl = hfsc_find_class(classid, sch); 1266 1267 if (cl != NULL) { 1268 if (p != NULL && p->level <= cl->level) 1269 return 0; 1270 cl->filter_cnt++; 1271 } 1272 1273 return (unsigned long)cl; 1274 } 1275 1276 static void 1277 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg) 1278 { 1279 struct hfsc_class *cl = (struct hfsc_class *)arg; 1280 1281 cl->filter_cnt--; 1282 } 1283 1284 static struct tcf_proto ** 1285 hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) 1286 { 1287 struct hfsc_sched *q = qdisc_priv(sch); 1288 struct hfsc_class *cl = (struct hfsc_class *)arg; 1289 1290 if (cl == NULL) 1291 cl = &q->root; 1292 1293 return &cl->filter_list; 1294 } 1295 1296 static int 1297 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) 1298 { 1299 struct tc_service_curve tsc; 1300 1301 tsc.m1 = sm2m(sc->sm1); 1302 tsc.d = dx2d(sc->dx); 1303 tsc.m2 = sm2m(sc->sm2); 1304 NLA_PUT(skb, attr, sizeof(tsc), &tsc); 1305 1306 return skb->len; 1307 1308 nla_put_failure: 1309 return -1; 1310 } 1311 1312 static inline int 1313 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) 1314 { 1315 if ((cl->cl_flags & HFSC_RSC) && 1316 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0)) 1317 goto nla_put_failure; 1318 1319 if ((cl->cl_flags & HFSC_FSC) && 1320 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0)) 1321 goto nla_put_failure; 1322 1323 if ((cl->cl_flags & HFSC_USC) && 1324 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0)) 1325 goto nla_put_failure; 1326 1327 return skb->len; 1328 1329 nla_put_failure: 1330 return -1; 1331 } 1332 1333 static int 1334 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, 1335 struct tcmsg *tcm) 1336 { 1337 struct hfsc_class *cl = (struct hfsc_class *)arg; 1338 struct nlattr *nest; 1339 1340 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid : 1341 TC_H_ROOT; 1342 tcm->tcm_handle = cl->cl_common.classid; 1343 if (cl->level == 0) 1344 tcm->tcm_info = cl->qdisc->handle; 1345 1346 nest = nla_nest_start(skb, TCA_OPTIONS); 1347 if (nest == NULL) 1348 goto nla_put_failure; 1349 if (hfsc_dump_curves(skb, cl) < 0) 1350 goto nla_put_failure; 1351 nla_nest_end(skb, nest); 1352 return skb->len; 1353 1354 nla_put_failure: 1355 nla_nest_cancel(skb, nest); 1356 return -EMSGSIZE; 1357 } 1358 1359 static int 1360 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, 1361 struct gnet_dump *d) 1362 { 1363 struct hfsc_class *cl = (struct hfsc_class *)arg; 1364 struct tc_hfsc_stats xstats; 1365 1366 cl->qstats.qlen = cl->qdisc->q.qlen; 1367 xstats.level = cl->level; 1368 xstats.period = cl->cl_vtperiod; 1369 xstats.work = cl->cl_total; 1370 xstats.rtwork = cl->cl_cumul; 1371 1372 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || 1373 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1374 gnet_stats_copy_queue(d, &cl->qstats) < 0) 1375 return -1; 1376 1377 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 1378 } 1379 1380 1381 1382 static void 1383 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1384 { 1385 struct hfsc_sched *q = qdisc_priv(sch); 1386 struct hlist_node *n; 1387 struct hfsc_class *cl; 1388 unsigned int i; 1389 1390 if (arg->stop) 1391 return; 1392 1393 for (i = 0; i < q->clhash.hashsize; i++) { 1394 hlist_for_each_entry(cl, n, &q->clhash.hash[i], 1395 cl_common.hnode) { 1396 if (arg->count < arg->skip) { 1397 arg->count++; 1398 continue; 1399 } 1400 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { 1401 arg->stop = 1; 1402 return; 1403 } 1404 arg->count++; 1405 } 1406 } 1407 } 1408 1409 static void 1410 hfsc_schedule_watchdog(struct Qdisc *sch) 1411 { 1412 struct hfsc_sched *q = qdisc_priv(sch); 1413 struct hfsc_class *cl; 1414 u64 next_time = 0; 1415 1416 if ((cl = eltree_get_minel(q)) != NULL) 1417 next_time = cl->cl_e; 1418 if (q->root.cl_cfmin != 0) { 1419 if (next_time == 0 || next_time > q->root.cl_cfmin) 1420 next_time = q->root.cl_cfmin; 1421 } 1422 WARN_ON(next_time == 0); 1423 qdisc_watchdog_schedule(&q->watchdog, next_time); 1424 } 1425 1426 static int 1427 hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) 1428 { 1429 struct hfsc_sched *q = qdisc_priv(sch); 1430 struct tc_hfsc_qopt *qopt; 1431 int err; 1432 1433 if (opt == NULL || nla_len(opt) < sizeof(*qopt)) 1434 return -EINVAL; 1435 qopt = nla_data(opt); 1436 1437 q->defcls = qopt->defcls; 1438 err = qdisc_class_hash_init(&q->clhash); 1439 if (err < 0) 1440 return err; 1441 q->eligible = RB_ROOT; 1442 INIT_LIST_HEAD(&q->droplist); 1443 skb_queue_head_init(&q->requeue); 1444 1445 q->root.cl_common.classid = sch->handle; 1446 q->root.refcnt = 1; 1447 q->root.sched = q; 1448 q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, 1449 &pfifo_qdisc_ops, 1450 sch->handle); 1451 if (q->root.qdisc == NULL) 1452 q->root.qdisc = &noop_qdisc; 1453 INIT_LIST_HEAD(&q->root.children); 1454 q->root.vt_tree = RB_ROOT; 1455 q->root.cf_tree = RB_ROOT; 1456 1457 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common); 1458 qdisc_class_hash_grow(sch, &q->clhash); 1459 1460 qdisc_watchdog_init(&q->watchdog, sch); 1461 1462 return 0; 1463 } 1464 1465 static int 1466 hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt) 1467 { 1468 struct hfsc_sched *q = qdisc_priv(sch); 1469 struct tc_hfsc_qopt *qopt; 1470 1471 if (opt == NULL || nla_len(opt) < sizeof(*qopt)) 1472 return -EINVAL; 1473 qopt = nla_data(opt); 1474 1475 sch_tree_lock(sch); 1476 q->defcls = qopt->defcls; 1477 sch_tree_unlock(sch); 1478 1479 return 0; 1480 } 1481 1482 static void 1483 hfsc_reset_class(struct hfsc_class *cl) 1484 { 1485 cl->cl_total = 0; 1486 cl->cl_cumul = 0; 1487 cl->cl_d = 0; 1488 cl->cl_e = 0; 1489 cl->cl_vt = 0; 1490 cl->cl_vtadj = 0; 1491 cl->cl_vtoff = 0; 1492 cl->cl_cvtmin = 0; 1493 cl->cl_cvtmax = 0; 1494 cl->cl_cvtoff = 0; 1495 cl->cl_pcvtoff = 0; 1496 cl->cl_vtperiod = 0; 1497 cl->cl_parentperiod = 0; 1498 cl->cl_f = 0; 1499 cl->cl_myf = 0; 1500 cl->cl_myfadj = 0; 1501 cl->cl_cfmin = 0; 1502 cl->cl_nactive = 0; 1503 1504 cl->vt_tree = RB_ROOT; 1505 cl->cf_tree = RB_ROOT; 1506 qdisc_reset(cl->qdisc); 1507 1508 if (cl->cl_flags & HFSC_RSC) 1509 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0); 1510 if (cl->cl_flags & HFSC_FSC) 1511 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0); 1512 if (cl->cl_flags & HFSC_USC) 1513 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0); 1514 } 1515 1516 static void 1517 hfsc_reset_qdisc(struct Qdisc *sch) 1518 { 1519 struct hfsc_sched *q = qdisc_priv(sch); 1520 struct hfsc_class *cl; 1521 struct hlist_node *n; 1522 unsigned int i; 1523 1524 for (i = 0; i < q->clhash.hashsize; i++) { 1525 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) 1526 hfsc_reset_class(cl); 1527 } 1528 __skb_queue_purge(&q->requeue); 1529 q->eligible = RB_ROOT; 1530 INIT_LIST_HEAD(&q->droplist); 1531 qdisc_watchdog_cancel(&q->watchdog); 1532 sch->q.qlen = 0; 1533 } 1534 1535 static void 1536 hfsc_destroy_qdisc(struct Qdisc *sch) 1537 { 1538 struct hfsc_sched *q = qdisc_priv(sch); 1539 struct hlist_node *n, *next; 1540 struct hfsc_class *cl; 1541 unsigned int i; 1542 1543 for (i = 0; i < q->clhash.hashsize; i++) { 1544 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) 1545 tcf_destroy_chain(&cl->filter_list); 1546 } 1547 for (i = 0; i < q->clhash.hashsize; i++) { 1548 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], 1549 cl_common.hnode) 1550 hfsc_destroy_class(sch, cl); 1551 } 1552 qdisc_class_hash_destroy(&q->clhash); 1553 __skb_queue_purge(&q->requeue); 1554 qdisc_watchdog_cancel(&q->watchdog); 1555 } 1556 1557 static int 1558 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) 1559 { 1560 struct hfsc_sched *q = qdisc_priv(sch); 1561 unsigned char *b = skb_tail_pointer(skb); 1562 struct tc_hfsc_qopt qopt; 1563 1564 qopt.defcls = q->defcls; 1565 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); 1566 return skb->len; 1567 1568 nla_put_failure: 1569 nlmsg_trim(skb, b); 1570 return -1; 1571 } 1572 1573 static int 1574 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 1575 { 1576 struct hfsc_class *cl; 1577 int err; 1578 1579 cl = hfsc_classify(skb, sch, &err); 1580 if (cl == NULL) { 1581 if (err & __NET_XMIT_BYPASS) 1582 sch->qstats.drops++; 1583 kfree_skb(skb); 1584 return err; 1585 } 1586 1587 err = qdisc_enqueue(skb, cl->qdisc); 1588 if (unlikely(err != NET_XMIT_SUCCESS)) { 1589 if (net_xmit_drop_count(err)) { 1590 cl->qstats.drops++; 1591 sch->qstats.drops++; 1592 } 1593 return err; 1594 } 1595 1596 if (cl->qdisc->q.qlen == 1) 1597 set_active(cl, qdisc_pkt_len(skb)); 1598 1599 cl->bstats.packets++; 1600 cl->bstats.bytes += qdisc_pkt_len(skb); 1601 sch->bstats.packets++; 1602 sch->bstats.bytes += qdisc_pkt_len(skb); 1603 sch->q.qlen++; 1604 1605 return NET_XMIT_SUCCESS; 1606 } 1607 1608 static struct sk_buff * 1609 hfsc_dequeue(struct Qdisc *sch) 1610 { 1611 struct hfsc_sched *q = qdisc_priv(sch); 1612 struct hfsc_class *cl; 1613 struct sk_buff *skb; 1614 u64 cur_time; 1615 unsigned int next_len; 1616 int realtime = 0; 1617 1618 if (sch->q.qlen == 0) 1619 return NULL; 1620 if ((skb = __skb_dequeue(&q->requeue))) 1621 goto out; 1622 1623 cur_time = psched_get_time(); 1624 1625 /* 1626 * if there are eligible classes, use real-time criteria. 1627 * find the class with the minimum deadline among 1628 * the eligible classes. 1629 */ 1630 if ((cl = eltree_get_mindl(q, cur_time)) != NULL) { 1631 realtime = 1; 1632 } else { 1633 /* 1634 * use link-sharing criteria 1635 * get the class with the minimum vt in the hierarchy 1636 */ 1637 cl = vttree_get_minvt(&q->root, cur_time); 1638 if (cl == NULL) { 1639 sch->qstats.overlimits++; 1640 hfsc_schedule_watchdog(sch); 1641 return NULL; 1642 } 1643 } 1644 1645 skb = cl->qdisc->dequeue(cl->qdisc); 1646 if (skb == NULL) { 1647 if (net_ratelimit()) 1648 printk("HFSC: Non-work-conserving qdisc ?\n"); 1649 return NULL; 1650 } 1651 1652 update_vf(cl, qdisc_pkt_len(skb), cur_time); 1653 if (realtime) 1654 cl->cl_cumul += qdisc_pkt_len(skb); 1655 1656 if (cl->qdisc->q.qlen != 0) { 1657 if (cl->cl_flags & HFSC_RSC) { 1658 /* update ed */ 1659 next_len = qdisc_peek_len(cl->qdisc); 1660 if (realtime) 1661 update_ed(cl, next_len); 1662 else 1663 update_d(cl, next_len); 1664 } 1665 } else { 1666 /* the class becomes passive */ 1667 set_passive(cl); 1668 } 1669 1670 out: 1671 sch->flags &= ~TCQ_F_THROTTLED; 1672 sch->q.qlen--; 1673 1674 return skb; 1675 } 1676 1677 static int 1678 hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch) 1679 { 1680 struct hfsc_sched *q = qdisc_priv(sch); 1681 1682 __skb_queue_head(&q->requeue, skb); 1683 sch->q.qlen++; 1684 sch->qstats.requeues++; 1685 return NET_XMIT_SUCCESS; 1686 } 1687 1688 static unsigned int 1689 hfsc_drop(struct Qdisc *sch) 1690 { 1691 struct hfsc_sched *q = qdisc_priv(sch); 1692 struct hfsc_class *cl; 1693 unsigned int len; 1694 1695 list_for_each_entry(cl, &q->droplist, dlist) { 1696 if (cl->qdisc->ops->drop != NULL && 1697 (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) { 1698 if (cl->qdisc->q.qlen == 0) { 1699 update_vf(cl, 0, 0); 1700 set_passive(cl); 1701 } else { 1702 list_move_tail(&cl->dlist, &q->droplist); 1703 } 1704 cl->qstats.drops++; 1705 sch->qstats.drops++; 1706 sch->q.qlen--; 1707 return len; 1708 } 1709 } 1710 return 0; 1711 } 1712 1713 static const struct Qdisc_class_ops hfsc_class_ops = { 1714 .change = hfsc_change_class, 1715 .delete = hfsc_delete_class, 1716 .graft = hfsc_graft_class, 1717 .leaf = hfsc_class_leaf, 1718 .qlen_notify = hfsc_qlen_notify, 1719 .get = hfsc_get_class, 1720 .put = hfsc_put_class, 1721 .bind_tcf = hfsc_bind_tcf, 1722 .unbind_tcf = hfsc_unbind_tcf, 1723 .tcf_chain = hfsc_tcf_chain, 1724 .dump = hfsc_dump_class, 1725 .dump_stats = hfsc_dump_class_stats, 1726 .walk = hfsc_walk 1727 }; 1728 1729 static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = { 1730 .id = "hfsc", 1731 .init = hfsc_init_qdisc, 1732 .change = hfsc_change_qdisc, 1733 .reset = hfsc_reset_qdisc, 1734 .destroy = hfsc_destroy_qdisc, 1735 .dump = hfsc_dump_qdisc, 1736 .enqueue = hfsc_enqueue, 1737 .dequeue = hfsc_dequeue, 1738 .requeue = hfsc_requeue, 1739 .drop = hfsc_drop, 1740 .cl_ops = &hfsc_class_ops, 1741 .priv_size = sizeof(struct hfsc_sched), 1742 .owner = THIS_MODULE 1743 }; 1744 1745 static int __init 1746 hfsc_init(void) 1747 { 1748 return register_qdisc(&hfsc_qdisc_ops); 1749 } 1750 1751 static void __exit 1752 hfsc_cleanup(void) 1753 { 1754 unregister_qdisc(&hfsc_qdisc_ops); 1755 } 1756 1757 MODULE_LICENSE("GPL"); 1758 module_init(hfsc_init); 1759 module_exit(hfsc_cleanup); 1760