1 /* 2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net> 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 2 7 * of the License, or (at your option) any later version. 8 * 9 * 2003-10-17 - Ported from altq 10 */ 11 /* 12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 13 * 14 * Permission to use, copy, modify, and distribute this software and 15 * its documentation is hereby granted (including for commercial or 16 * for-profit use), provided that both the copyright notice and this 17 * permission notice appear in all copies of the software, derivative 18 * works, or modified versions, and any portions thereof. 19 * 20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 33 * DAMAGE. 34 * 35 * Carnegie Mellon encourages (but does not require) users of this 36 * software to return any improvements or extensions that they make, 37 * and to grant Carnegie Mellon the rights to redistribute these 38 * changes without encumbrance. 39 */ 40 /* 41 * H-FSC is described in Proceedings of SIGCOMM'97, 42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 43 * Real-Time and Priority Service" 44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 45 * 46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 47 * when a class has an upperlimit, the fit-time is computed from the 48 * upperlimit service curve. the link-sharing scheduler does not schedule 49 * a class whose fit-time exceeds the current time. 50 */ 51 52 #include <linux/kernel.h> 53 #include <linux/module.h> 54 #include <linux/types.h> 55 #include <linux/errno.h> 56 #include <linux/jiffies.h> 57 #include <linux/compiler.h> 58 #include <linux/spinlock.h> 59 #include <linux/skbuff.h> 60 #include <linux/string.h> 61 #include <linux/slab.h> 62 #include <linux/timer.h> 63 #include <linux/list.h> 64 #include <linux/rbtree.h> 65 #include <linux/init.h> 66 #include <linux/netdevice.h> 67 #include <linux/rtnetlink.h> 68 #include <linux/pkt_sched.h> 69 #include <net/pkt_sched.h> 70 #include <net/pkt_cls.h> 71 #include <asm/system.h> 72 #include <asm/div64.h> 73 74 /* 75 * kernel internal service curve representation: 76 * coordinates are given by 64 bit unsigned integers. 77 * x-axis: unit is clock count. 78 * y-axis: unit is byte. 79 * 80 * The service curve parameters are converted to the internal 81 * representation. The slope values are scaled to avoid overflow. 82 * the inverse slope values as well as the y-projection of the 1st 83 * segment are kept in order to to avoid 64-bit divide operations 84 * that are expensive on 32-bit architectures. 85 */ 86 87 struct internal_sc 88 { 89 u64 sm1; /* scaled slope of the 1st segment */ 90 u64 ism1; /* scaled inverse-slope of the 1st segment */ 91 u64 dx; /* the x-projection of the 1st segment */ 92 u64 dy; /* the y-projection of the 1st segment */ 93 u64 sm2; /* scaled slope of the 2nd segment */ 94 u64 ism2; /* scaled inverse-slope of the 2nd segment */ 95 }; 96 97 /* runtime service curve */ 98 struct runtime_sc 99 { 100 u64 x; /* current starting position on x-axis */ 101 u64 y; /* current starting position on y-axis */ 102 u64 sm1; /* scaled slope of the 1st segment */ 103 u64 ism1; /* scaled inverse-slope of the 1st segment */ 104 u64 dx; /* the x-projection of the 1st segment */ 105 u64 dy; /* the y-projection of the 1st segment */ 106 u64 sm2; /* scaled slope of the 2nd segment */ 107 u64 ism2; /* scaled inverse-slope of the 2nd segment */ 108 }; 109 110 enum hfsc_class_flags 111 { 112 HFSC_RSC = 0x1, 113 HFSC_FSC = 0x2, 114 HFSC_USC = 0x4 115 }; 116 117 struct hfsc_class 118 { 119 u32 classid; /* class id */ 120 unsigned int refcnt; /* usage count */ 121 122 struct gnet_stats_basic bstats; 123 struct gnet_stats_queue qstats; 124 struct gnet_stats_rate_est rate_est; 125 spinlock_t *stats_lock; 126 unsigned int level; /* class level in hierarchy */ 127 struct tcf_proto *filter_list; /* filter list */ 128 unsigned int filter_cnt; /* filter count */ 129 130 struct hfsc_sched *sched; /* scheduler data */ 131 struct hfsc_class *cl_parent; /* parent class */ 132 struct list_head siblings; /* sibling classes */ 133 struct list_head children; /* child classes */ 134 struct Qdisc *qdisc; /* leaf qdisc */ 135 136 struct rb_node el_node; /* qdisc's eligible tree member */ 137 struct rb_root vt_tree; /* active children sorted by cl_vt */ 138 struct rb_node vt_node; /* parent's vt_tree member */ 139 struct rb_root cf_tree; /* active children sorted by cl_f */ 140 struct rb_node cf_node; /* parent's cf_heap member */ 141 struct list_head hlist; /* hash list member */ 142 struct list_head dlist; /* drop list member */ 143 144 u64 cl_total; /* total work in bytes */ 145 u64 cl_cumul; /* cumulative work in bytes done by 146 real-time criteria */ 147 148 u64 cl_d; /* deadline*/ 149 u64 cl_e; /* eligible time */ 150 u64 cl_vt; /* virtual time */ 151 u64 cl_f; /* time when this class will fit for 152 link-sharing, max(myf, cfmin) */ 153 u64 cl_myf; /* my fit-time (calculated from this 154 class's own upperlimit curve) */ 155 u64 cl_myfadj; /* my fit-time adjustment (to cancel 156 history dependence) */ 157 u64 cl_cfmin; /* earliest children's fit-time (used 158 with cl_myf to obtain cl_f) */ 159 u64 cl_cvtmin; /* minimal virtual time among the 160 children fit for link-sharing 161 (monotonic within a period) */ 162 u64 cl_vtadj; /* intra-period cumulative vt 163 adjustment */ 164 u64 cl_vtoff; /* inter-period cumulative vt offset */ 165 u64 cl_cvtmax; /* max child's vt in the last period */ 166 u64 cl_cvtoff; /* cumulative cvtmax of all periods */ 167 u64 cl_pcvtoff; /* parent's cvtoff at initalization 168 time */ 169 170 struct internal_sc cl_rsc; /* internal real-time service curve */ 171 struct internal_sc cl_fsc; /* internal fair service curve */ 172 struct internal_sc cl_usc; /* internal upperlimit service curve */ 173 struct runtime_sc cl_deadline; /* deadline curve */ 174 struct runtime_sc cl_eligible; /* eligible curve */ 175 struct runtime_sc cl_virtual; /* virtual curve */ 176 struct runtime_sc cl_ulimit; /* upperlimit curve */ 177 178 unsigned long cl_flags; /* which curves are valid */ 179 unsigned long cl_vtperiod; /* vt period sequence number */ 180 unsigned long cl_parentperiod;/* parent's vt period sequence number*/ 181 unsigned long cl_nactive; /* number of active children */ 182 }; 183 184 #define HFSC_HSIZE 16 185 186 struct hfsc_sched 187 { 188 u16 defcls; /* default class id */ 189 struct hfsc_class root; /* root class */ 190 struct list_head clhash[HFSC_HSIZE]; /* class hash */ 191 struct rb_root eligible; /* eligible tree */ 192 struct list_head droplist; /* active leaf class list (for 193 dropping) */ 194 struct sk_buff_head requeue; /* requeued packet */ 195 struct timer_list wd_timer; /* watchdog timer */ 196 }; 197 198 /* 199 * macros 200 */ 201 #ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY 202 #include <linux/time.h> 203 #undef PSCHED_GET_TIME 204 #define PSCHED_GET_TIME(stamp) \ 205 do { \ 206 struct timeval tv; \ 207 do_gettimeofday(&tv); \ 208 (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec; \ 209 } while (0) 210 #endif 211 212 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ 213 214 215 /* 216 * eligible tree holds backlogged classes being sorted by their eligible times. 217 * there is one eligible tree per hfsc instance. 218 */ 219 220 static void 221 eltree_insert(struct hfsc_class *cl) 222 { 223 struct rb_node **p = &cl->sched->eligible.rb_node; 224 struct rb_node *parent = NULL; 225 struct hfsc_class *cl1; 226 227 while (*p != NULL) { 228 parent = *p; 229 cl1 = rb_entry(parent, struct hfsc_class, el_node); 230 if (cl->cl_e >= cl1->cl_e) 231 p = &parent->rb_right; 232 else 233 p = &parent->rb_left; 234 } 235 rb_link_node(&cl->el_node, parent, p); 236 rb_insert_color(&cl->el_node, &cl->sched->eligible); 237 } 238 239 static inline void 240 eltree_remove(struct hfsc_class *cl) 241 { 242 rb_erase(&cl->el_node, &cl->sched->eligible); 243 } 244 245 static inline void 246 eltree_update(struct hfsc_class *cl) 247 { 248 eltree_remove(cl); 249 eltree_insert(cl); 250 } 251 252 /* find the class with the minimum deadline among the eligible classes */ 253 static inline struct hfsc_class * 254 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time) 255 { 256 struct hfsc_class *p, *cl = NULL; 257 struct rb_node *n; 258 259 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { 260 p = rb_entry(n, struct hfsc_class, el_node); 261 if (p->cl_e > cur_time) 262 break; 263 if (cl == NULL || p->cl_d < cl->cl_d) 264 cl = p; 265 } 266 return cl; 267 } 268 269 /* find the class with minimum eligible time among the eligible classes */ 270 static inline struct hfsc_class * 271 eltree_get_minel(struct hfsc_sched *q) 272 { 273 struct rb_node *n; 274 275 n = rb_first(&q->eligible); 276 if (n == NULL) 277 return NULL; 278 return rb_entry(n, struct hfsc_class, el_node); 279 } 280 281 /* 282 * vttree holds holds backlogged child classes being sorted by their virtual 283 * time. each intermediate class has one vttree. 284 */ 285 static void 286 vttree_insert(struct hfsc_class *cl) 287 { 288 struct rb_node **p = &cl->cl_parent->vt_tree.rb_node; 289 struct rb_node *parent = NULL; 290 struct hfsc_class *cl1; 291 292 while (*p != NULL) { 293 parent = *p; 294 cl1 = rb_entry(parent, struct hfsc_class, vt_node); 295 if (cl->cl_vt >= cl1->cl_vt) 296 p = &parent->rb_right; 297 else 298 p = &parent->rb_left; 299 } 300 rb_link_node(&cl->vt_node, parent, p); 301 rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree); 302 } 303 304 static inline void 305 vttree_remove(struct hfsc_class *cl) 306 { 307 rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree); 308 } 309 310 static inline void 311 vttree_update(struct hfsc_class *cl) 312 { 313 vttree_remove(cl); 314 vttree_insert(cl); 315 } 316 317 static inline struct hfsc_class * 318 vttree_firstfit(struct hfsc_class *cl, u64 cur_time) 319 { 320 struct hfsc_class *p; 321 struct rb_node *n; 322 323 for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) { 324 p = rb_entry(n, struct hfsc_class, vt_node); 325 if (p->cl_f <= cur_time) 326 return p; 327 } 328 return NULL; 329 } 330 331 /* 332 * get the leaf class with the minimum vt in the hierarchy 333 */ 334 static struct hfsc_class * 335 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time) 336 { 337 /* if root-class's cfmin is bigger than cur_time nothing to do */ 338 if (cl->cl_cfmin > cur_time) 339 return NULL; 340 341 while (cl->level > 0) { 342 cl = vttree_firstfit(cl, cur_time); 343 if (cl == NULL) 344 return NULL; 345 /* 346 * update parent's cl_cvtmin. 347 */ 348 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 349 cl->cl_parent->cl_cvtmin = cl->cl_vt; 350 } 351 return cl; 352 } 353 354 static void 355 cftree_insert(struct hfsc_class *cl) 356 { 357 struct rb_node **p = &cl->cl_parent->cf_tree.rb_node; 358 struct rb_node *parent = NULL; 359 struct hfsc_class *cl1; 360 361 while (*p != NULL) { 362 parent = *p; 363 cl1 = rb_entry(parent, struct hfsc_class, cf_node); 364 if (cl->cl_f >= cl1->cl_f) 365 p = &parent->rb_right; 366 else 367 p = &parent->rb_left; 368 } 369 rb_link_node(&cl->cf_node, parent, p); 370 rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree); 371 } 372 373 static inline void 374 cftree_remove(struct hfsc_class *cl) 375 { 376 rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree); 377 } 378 379 static inline void 380 cftree_update(struct hfsc_class *cl) 381 { 382 cftree_remove(cl); 383 cftree_insert(cl); 384 } 385 386 /* 387 * service curve support functions 388 * 389 * external service curve parameters 390 * m: bps 391 * d: us 392 * internal service curve parameters 393 * sm: (bytes/psched_us) << SM_SHIFT 394 * ism: (psched_us/byte) << ISM_SHIFT 395 * dx: psched_us 396 * 397 * Clock source resolution (CONFIG_NET_SCH_CLK_*) 398 * JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us. 399 * CPU: resolution is between 0.5us and 1us. 400 * GETTIMEOFDAY: resolution is exactly 1us. 401 * 402 * sm and ism are scaled in order to keep effective digits. 403 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective 404 * digits in decimal using the following table. 405 * 406 * Note: We can afford the additional accuracy (altq hfsc keeps at most 407 * 3 effective digits) thanks to the fact that linux clock is bounded 408 * much more tightly. 409 * 410 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 411 * ------------+------------------------------------------------------- 412 * bytes/0.5us 6.25e-3 62.5e-3 625e-3 6250e-e 62500e-3 413 * bytes/us 12.5e-3 125e-3 1250e-3 12500e-3 125000e-3 414 * bytes/1.27us 15.875e-3 158.75e-3 1587.5e-3 15875e-3 158750e-3 415 * 416 * 0.5us/byte 160 16 1.6 0.16 0.016 417 * us/byte 80 8 0.8 0.08 0.008 418 * 1.27us/byte 63 6.3 0.63 0.063 0.0063 419 */ 420 #define SM_SHIFT 20 421 #define ISM_SHIFT 18 422 423 #define SM_MASK ((1ULL << SM_SHIFT) - 1) 424 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1) 425 426 static inline u64 427 seg_x2y(u64 x, u64 sm) 428 { 429 u64 y; 430 431 /* 432 * compute 433 * y = x * sm >> SM_SHIFT 434 * but divide it for the upper and lower bits to avoid overflow 435 */ 436 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 437 return y; 438 } 439 440 static inline u64 441 seg_y2x(u64 y, u64 ism) 442 { 443 u64 x; 444 445 if (y == 0) 446 x = 0; 447 else if (ism == HT_INFINITY) 448 x = HT_INFINITY; 449 else { 450 x = (y >> ISM_SHIFT) * ism 451 + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 452 } 453 return x; 454 } 455 456 /* Convert m (bps) into sm (bytes/psched us) */ 457 static u64 458 m2sm(u32 m) 459 { 460 u64 sm; 461 462 sm = ((u64)m << SM_SHIFT); 463 sm += PSCHED_JIFFIE2US(HZ) - 1; 464 do_div(sm, PSCHED_JIFFIE2US(HZ)); 465 return sm; 466 } 467 468 /* convert m (bps) into ism (psched us/byte) */ 469 static u64 470 m2ism(u32 m) 471 { 472 u64 ism; 473 474 if (m == 0) 475 ism = HT_INFINITY; 476 else { 477 ism = ((u64)PSCHED_JIFFIE2US(HZ) << ISM_SHIFT); 478 ism += m - 1; 479 do_div(ism, m); 480 } 481 return ism; 482 } 483 484 /* convert d (us) into dx (psched us) */ 485 static u64 486 d2dx(u32 d) 487 { 488 u64 dx; 489 490 dx = ((u64)d * PSCHED_JIFFIE2US(HZ)); 491 dx += USEC_PER_SEC - 1; 492 do_div(dx, USEC_PER_SEC); 493 return dx; 494 } 495 496 /* convert sm (bytes/psched us) into m (bps) */ 497 static u32 498 sm2m(u64 sm) 499 { 500 u64 m; 501 502 m = (sm * PSCHED_JIFFIE2US(HZ)) >> SM_SHIFT; 503 return (u32)m; 504 } 505 506 /* convert dx (psched us) into d (us) */ 507 static u32 508 dx2d(u64 dx) 509 { 510 u64 d; 511 512 d = dx * USEC_PER_SEC; 513 do_div(d, PSCHED_JIFFIE2US(HZ)); 514 return (u32)d; 515 } 516 517 static void 518 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc) 519 { 520 isc->sm1 = m2sm(sc->m1); 521 isc->ism1 = m2ism(sc->m1); 522 isc->dx = d2dx(sc->d); 523 isc->dy = seg_x2y(isc->dx, isc->sm1); 524 isc->sm2 = m2sm(sc->m2); 525 isc->ism2 = m2ism(sc->m2); 526 } 527 528 /* 529 * initialize the runtime service curve with the given internal 530 * service curve starting at (x, y). 531 */ 532 static void 533 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) 534 { 535 rtsc->x = x; 536 rtsc->y = y; 537 rtsc->sm1 = isc->sm1; 538 rtsc->ism1 = isc->ism1; 539 rtsc->dx = isc->dx; 540 rtsc->dy = isc->dy; 541 rtsc->sm2 = isc->sm2; 542 rtsc->ism2 = isc->ism2; 543 } 544 545 /* 546 * calculate the y-projection of the runtime service curve by the 547 * given x-projection value 548 */ 549 static u64 550 rtsc_y2x(struct runtime_sc *rtsc, u64 y) 551 { 552 u64 x; 553 554 if (y < rtsc->y) 555 x = rtsc->x; 556 else if (y <= rtsc->y + rtsc->dy) { 557 /* x belongs to the 1st segment */ 558 if (rtsc->dy == 0) 559 x = rtsc->x + rtsc->dx; 560 else 561 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 562 } else { 563 /* x belongs to the 2nd segment */ 564 x = rtsc->x + rtsc->dx 565 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 566 } 567 return x; 568 } 569 570 static u64 571 rtsc_x2y(struct runtime_sc *rtsc, u64 x) 572 { 573 u64 y; 574 575 if (x <= rtsc->x) 576 y = rtsc->y; 577 else if (x <= rtsc->x + rtsc->dx) 578 /* y belongs to the 1st segment */ 579 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 580 else 581 /* y belongs to the 2nd segment */ 582 y = rtsc->y + rtsc->dy 583 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 584 return y; 585 } 586 587 /* 588 * update the runtime service curve by taking the minimum of the current 589 * runtime service curve and the service curve starting at (x, y). 590 */ 591 static void 592 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) 593 { 594 u64 y1, y2, dx, dy; 595 u32 dsm; 596 597 if (isc->sm1 <= isc->sm2) { 598 /* service curve is convex */ 599 y1 = rtsc_x2y(rtsc, x); 600 if (y1 < y) 601 /* the current rtsc is smaller */ 602 return; 603 rtsc->x = x; 604 rtsc->y = y; 605 return; 606 } 607 608 /* 609 * service curve is concave 610 * compute the two y values of the current rtsc 611 * y1: at x 612 * y2: at (x + dx) 613 */ 614 y1 = rtsc_x2y(rtsc, x); 615 if (y1 <= y) { 616 /* rtsc is below isc, no change to rtsc */ 617 return; 618 } 619 620 y2 = rtsc_x2y(rtsc, x + isc->dx); 621 if (y2 >= y + isc->dy) { 622 /* rtsc is above isc, replace rtsc by isc */ 623 rtsc->x = x; 624 rtsc->y = y; 625 rtsc->dx = isc->dx; 626 rtsc->dy = isc->dy; 627 return; 628 } 629 630 /* 631 * the two curves intersect 632 * compute the offsets (dx, dy) using the reverse 633 * function of seg_x2y() 634 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 635 */ 636 dx = (y1 - y) << SM_SHIFT; 637 dsm = isc->sm1 - isc->sm2; 638 do_div(dx, dsm); 639 /* 640 * check if (x, y1) belongs to the 1st segment of rtsc. 641 * if so, add the offset. 642 */ 643 if (rtsc->x + rtsc->dx > x) 644 dx += rtsc->x + rtsc->dx - x; 645 dy = seg_x2y(dx, isc->sm1); 646 647 rtsc->x = x; 648 rtsc->y = y; 649 rtsc->dx = dx; 650 rtsc->dy = dy; 651 return; 652 } 653 654 static void 655 init_ed(struct hfsc_class *cl, unsigned int next_len) 656 { 657 u64 cur_time; 658 659 PSCHED_GET_TIME(cur_time); 660 661 /* update the deadline curve */ 662 rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); 663 664 /* 665 * update the eligible curve. 666 * for concave, it is equal to the deadline curve. 667 * for convex, it is a linear curve with slope m2. 668 */ 669 cl->cl_eligible = cl->cl_deadline; 670 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { 671 cl->cl_eligible.dx = 0; 672 cl->cl_eligible.dy = 0; 673 } 674 675 /* compute e and d */ 676 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 677 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 678 679 eltree_insert(cl); 680 } 681 682 static void 683 update_ed(struct hfsc_class *cl, unsigned int next_len) 684 { 685 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 686 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 687 688 eltree_update(cl); 689 } 690 691 static inline void 692 update_d(struct hfsc_class *cl, unsigned int next_len) 693 { 694 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 695 } 696 697 static inline void 698 update_cfmin(struct hfsc_class *cl) 699 { 700 struct rb_node *n = rb_first(&cl->cf_tree); 701 struct hfsc_class *p; 702 703 if (n == NULL) { 704 cl->cl_cfmin = 0; 705 return; 706 } 707 p = rb_entry(n, struct hfsc_class, cf_node); 708 cl->cl_cfmin = p->cl_f; 709 } 710 711 static void 712 init_vf(struct hfsc_class *cl, unsigned int len) 713 { 714 struct hfsc_class *max_cl; 715 struct rb_node *n; 716 u64 vt, f, cur_time; 717 int go_active; 718 719 cur_time = 0; 720 go_active = 1; 721 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 722 if (go_active && cl->cl_nactive++ == 0) 723 go_active = 1; 724 else 725 go_active = 0; 726 727 if (go_active) { 728 n = rb_last(&cl->cl_parent->vt_tree); 729 if (n != NULL) { 730 max_cl = rb_entry(n, struct hfsc_class,vt_node); 731 /* 732 * set vt to the average of the min and max 733 * classes. if the parent's period didn't 734 * change, don't decrease vt of the class. 735 */ 736 vt = max_cl->cl_vt; 737 if (cl->cl_parent->cl_cvtmin != 0) 738 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 739 740 if (cl->cl_parent->cl_vtperiod != 741 cl->cl_parentperiod || vt > cl->cl_vt) 742 cl->cl_vt = vt; 743 } else { 744 /* 745 * first child for a new parent backlog period. 746 * add parent's cvtmax to cvtoff to make a new 747 * vt (vtoff + vt) larger than the vt in the 748 * last period for all children. 749 */ 750 vt = cl->cl_parent->cl_cvtmax; 751 cl->cl_parent->cl_cvtoff += vt; 752 cl->cl_parent->cl_cvtmax = 0; 753 cl->cl_parent->cl_cvtmin = 0; 754 cl->cl_vt = 0; 755 } 756 757 cl->cl_vtoff = cl->cl_parent->cl_cvtoff - 758 cl->cl_pcvtoff; 759 760 /* update the virtual curve */ 761 vt = cl->cl_vt + cl->cl_vtoff; 762 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt, 763 cl->cl_total); 764 if (cl->cl_virtual.x == vt) { 765 cl->cl_virtual.x -= cl->cl_vtoff; 766 cl->cl_vtoff = 0; 767 } 768 cl->cl_vtadj = 0; 769 770 cl->cl_vtperiod++; /* increment vt period */ 771 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 772 if (cl->cl_parent->cl_nactive == 0) 773 cl->cl_parentperiod++; 774 cl->cl_f = 0; 775 776 vttree_insert(cl); 777 cftree_insert(cl); 778 779 if (cl->cl_flags & HFSC_USC) { 780 /* class has upper limit curve */ 781 if (cur_time == 0) 782 PSCHED_GET_TIME(cur_time); 783 784 /* update the ulimit curve */ 785 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time, 786 cl->cl_total); 787 /* compute myf */ 788 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 789 cl->cl_total); 790 cl->cl_myfadj = 0; 791 } 792 } 793 794 f = max(cl->cl_myf, cl->cl_cfmin); 795 if (f != cl->cl_f) { 796 cl->cl_f = f; 797 cftree_update(cl); 798 update_cfmin(cl->cl_parent); 799 } 800 } 801 } 802 803 static void 804 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) 805 { 806 u64 f; /* , myf_bound, delta; */ 807 int go_passive = 0; 808 809 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC) 810 go_passive = 1; 811 812 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 813 cl->cl_total += len; 814 815 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0) 816 continue; 817 818 if (go_passive && --cl->cl_nactive == 0) 819 go_passive = 1; 820 else 821 go_passive = 0; 822 823 if (go_passive) { 824 /* no more active child, going passive */ 825 826 /* update cvtmax of the parent class */ 827 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 828 cl->cl_parent->cl_cvtmax = cl->cl_vt; 829 830 /* remove this class from the vt tree */ 831 vttree_remove(cl); 832 833 cftree_remove(cl); 834 update_cfmin(cl->cl_parent); 835 836 continue; 837 } 838 839 /* 840 * update vt and f 841 */ 842 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 843 - cl->cl_vtoff + cl->cl_vtadj; 844 845 /* 846 * if vt of the class is smaller than cvtmin, 847 * the class was skipped in the past due to non-fit. 848 * if so, we need to adjust vtadj. 849 */ 850 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 851 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 852 cl->cl_vt = cl->cl_parent->cl_cvtmin; 853 } 854 855 /* update the vt tree */ 856 vttree_update(cl); 857 858 if (cl->cl_flags & HFSC_USC) { 859 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit, 860 cl->cl_total); 861 #if 0 862 /* 863 * This code causes classes to stay way under their 864 * limit when multiple classes are used at gigabit 865 * speed. needs investigation. -kaber 866 */ 867 /* 868 * if myf lags behind by more than one clock tick 869 * from the current time, adjust myfadj to prevent 870 * a rate-limited class from going greedy. 871 * in a steady state under rate-limiting, myf 872 * fluctuates within one clock tick. 873 */ 874 myf_bound = cur_time - PSCHED_JIFFIE2US(1); 875 if (cl->cl_myf < myf_bound) { 876 delta = cur_time - cl->cl_myf; 877 cl->cl_myfadj += delta; 878 cl->cl_myf += delta; 879 } 880 #endif 881 } 882 883 f = max(cl->cl_myf, cl->cl_cfmin); 884 if (f != cl->cl_f) { 885 cl->cl_f = f; 886 cftree_update(cl); 887 update_cfmin(cl->cl_parent); 888 } 889 } 890 } 891 892 static void 893 set_active(struct hfsc_class *cl, unsigned int len) 894 { 895 if (cl->cl_flags & HFSC_RSC) 896 init_ed(cl, len); 897 if (cl->cl_flags & HFSC_FSC) 898 init_vf(cl, len); 899 900 list_add_tail(&cl->dlist, &cl->sched->droplist); 901 } 902 903 static void 904 set_passive(struct hfsc_class *cl) 905 { 906 if (cl->cl_flags & HFSC_RSC) 907 eltree_remove(cl); 908 909 list_del(&cl->dlist); 910 911 /* 912 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0) 913 * needs to be called explicitly to remove a class from vttree. 914 */ 915 } 916 917 /* 918 * hack to get length of first packet in queue. 919 */ 920 static unsigned int 921 qdisc_peek_len(struct Qdisc *sch) 922 { 923 struct sk_buff *skb; 924 unsigned int len; 925 926 skb = sch->dequeue(sch); 927 if (skb == NULL) { 928 if (net_ratelimit()) 929 printk("qdisc_peek_len: non work-conserving qdisc ?\n"); 930 return 0; 931 } 932 len = skb->len; 933 if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) { 934 if (net_ratelimit()) 935 printk("qdisc_peek_len: failed to requeue\n"); 936 qdisc_tree_decrease_qlen(sch, 1); 937 return 0; 938 } 939 return len; 940 } 941 942 static void 943 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) 944 { 945 unsigned int len = cl->qdisc->q.qlen; 946 947 qdisc_reset(cl->qdisc); 948 qdisc_tree_decrease_qlen(cl->qdisc, len); 949 } 950 951 static void 952 hfsc_adjust_levels(struct hfsc_class *cl) 953 { 954 struct hfsc_class *p; 955 unsigned int level; 956 957 do { 958 level = 0; 959 list_for_each_entry(p, &cl->children, siblings) { 960 if (p->level >= level) 961 level = p->level + 1; 962 } 963 cl->level = level; 964 } while ((cl = cl->cl_parent) != NULL); 965 } 966 967 static inline unsigned int 968 hfsc_hash(u32 h) 969 { 970 h ^= h >> 8; 971 h ^= h >> 4; 972 973 return h & (HFSC_HSIZE - 1); 974 } 975 976 static inline struct hfsc_class * 977 hfsc_find_class(u32 classid, struct Qdisc *sch) 978 { 979 struct hfsc_sched *q = qdisc_priv(sch); 980 struct hfsc_class *cl; 981 982 list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) { 983 if (cl->classid == classid) 984 return cl; 985 } 986 return NULL; 987 } 988 989 static void 990 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc, 991 u64 cur_time) 992 { 993 sc2isc(rsc, &cl->cl_rsc); 994 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); 995 cl->cl_eligible = cl->cl_deadline; 996 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { 997 cl->cl_eligible.dx = 0; 998 cl->cl_eligible.dy = 0; 999 } 1000 cl->cl_flags |= HFSC_RSC; 1001 } 1002 1003 static void 1004 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc) 1005 { 1006 sc2isc(fsc, &cl->cl_fsc); 1007 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total); 1008 cl->cl_flags |= HFSC_FSC; 1009 } 1010 1011 static void 1012 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc, 1013 u64 cur_time) 1014 { 1015 sc2isc(usc, &cl->cl_usc); 1016 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total); 1017 cl->cl_flags |= HFSC_USC; 1018 } 1019 1020 static int 1021 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 1022 struct rtattr **tca, unsigned long *arg) 1023 { 1024 struct hfsc_sched *q = qdisc_priv(sch); 1025 struct hfsc_class *cl = (struct hfsc_class *)*arg; 1026 struct hfsc_class *parent = NULL; 1027 struct rtattr *opt = tca[TCA_OPTIONS-1]; 1028 struct rtattr *tb[TCA_HFSC_MAX]; 1029 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL; 1030 u64 cur_time; 1031 1032 if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt)) 1033 return -EINVAL; 1034 1035 if (tb[TCA_HFSC_RSC-1]) { 1036 if (RTA_PAYLOAD(tb[TCA_HFSC_RSC-1]) < sizeof(*rsc)) 1037 return -EINVAL; 1038 rsc = RTA_DATA(tb[TCA_HFSC_RSC-1]); 1039 if (rsc->m1 == 0 && rsc->m2 == 0) 1040 rsc = NULL; 1041 } 1042 1043 if (tb[TCA_HFSC_FSC-1]) { 1044 if (RTA_PAYLOAD(tb[TCA_HFSC_FSC-1]) < sizeof(*fsc)) 1045 return -EINVAL; 1046 fsc = RTA_DATA(tb[TCA_HFSC_FSC-1]); 1047 if (fsc->m1 == 0 && fsc->m2 == 0) 1048 fsc = NULL; 1049 } 1050 1051 if (tb[TCA_HFSC_USC-1]) { 1052 if (RTA_PAYLOAD(tb[TCA_HFSC_USC-1]) < sizeof(*usc)) 1053 return -EINVAL; 1054 usc = RTA_DATA(tb[TCA_HFSC_USC-1]); 1055 if (usc->m1 == 0 && usc->m2 == 0) 1056 usc = NULL; 1057 } 1058 1059 if (cl != NULL) { 1060 if (parentid) { 1061 if (cl->cl_parent && cl->cl_parent->classid != parentid) 1062 return -EINVAL; 1063 if (cl->cl_parent == NULL && parentid != TC_H_ROOT) 1064 return -EINVAL; 1065 } 1066 PSCHED_GET_TIME(cur_time); 1067 1068 sch_tree_lock(sch); 1069 if (rsc != NULL) 1070 hfsc_change_rsc(cl, rsc, cur_time); 1071 if (fsc != NULL) 1072 hfsc_change_fsc(cl, fsc); 1073 if (usc != NULL) 1074 hfsc_change_usc(cl, usc, cur_time); 1075 1076 if (cl->qdisc->q.qlen != 0) { 1077 if (cl->cl_flags & HFSC_RSC) 1078 update_ed(cl, qdisc_peek_len(cl->qdisc)); 1079 if (cl->cl_flags & HFSC_FSC) 1080 update_vf(cl, 0, cur_time); 1081 } 1082 sch_tree_unlock(sch); 1083 1084 #ifdef CONFIG_NET_ESTIMATOR 1085 if (tca[TCA_RATE-1]) 1086 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1087 cl->stats_lock, tca[TCA_RATE-1]); 1088 #endif 1089 return 0; 1090 } 1091 1092 if (parentid == TC_H_ROOT) 1093 return -EEXIST; 1094 1095 parent = &q->root; 1096 if (parentid) { 1097 parent = hfsc_find_class(parentid, sch); 1098 if (parent == NULL) 1099 return -ENOENT; 1100 } 1101 1102 if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0) 1103 return -EINVAL; 1104 if (hfsc_find_class(classid, sch)) 1105 return -EEXIST; 1106 1107 if (rsc == NULL && fsc == NULL) 1108 return -EINVAL; 1109 1110 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL); 1111 if (cl == NULL) 1112 return -ENOBUFS; 1113 1114 if (rsc != NULL) 1115 hfsc_change_rsc(cl, rsc, 0); 1116 if (fsc != NULL) 1117 hfsc_change_fsc(cl, fsc); 1118 if (usc != NULL) 1119 hfsc_change_usc(cl, usc, 0); 1120 1121 cl->refcnt = 1; 1122 cl->classid = classid; 1123 cl->sched = q; 1124 cl->cl_parent = parent; 1125 cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); 1126 if (cl->qdisc == NULL) 1127 cl->qdisc = &noop_qdisc; 1128 cl->stats_lock = &sch->dev->queue_lock; 1129 INIT_LIST_HEAD(&cl->children); 1130 cl->vt_tree = RB_ROOT; 1131 cl->cf_tree = RB_ROOT; 1132 1133 sch_tree_lock(sch); 1134 list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]); 1135 list_add_tail(&cl->siblings, &parent->children); 1136 if (parent->level == 0) 1137 hfsc_purge_queue(sch, parent); 1138 hfsc_adjust_levels(parent); 1139 cl->cl_pcvtoff = parent->cl_cvtoff; 1140 sch_tree_unlock(sch); 1141 1142 #ifdef CONFIG_NET_ESTIMATOR 1143 if (tca[TCA_RATE-1]) 1144 gen_new_estimator(&cl->bstats, &cl->rate_est, 1145 cl->stats_lock, tca[TCA_RATE-1]); 1146 #endif 1147 *arg = (unsigned long)cl; 1148 return 0; 1149 } 1150 1151 static void 1152 hfsc_destroy_filters(struct tcf_proto **fl) 1153 { 1154 struct tcf_proto *tp; 1155 1156 while ((tp = *fl) != NULL) { 1157 *fl = tp->next; 1158 tcf_destroy(tp); 1159 } 1160 } 1161 1162 static void 1163 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl) 1164 { 1165 struct hfsc_sched *q = qdisc_priv(sch); 1166 1167 hfsc_destroy_filters(&cl->filter_list); 1168 qdisc_destroy(cl->qdisc); 1169 #ifdef CONFIG_NET_ESTIMATOR 1170 gen_kill_estimator(&cl->bstats, &cl->rate_est); 1171 #endif 1172 if (cl != &q->root) 1173 kfree(cl); 1174 } 1175 1176 static int 1177 hfsc_delete_class(struct Qdisc *sch, unsigned long arg) 1178 { 1179 struct hfsc_sched *q = qdisc_priv(sch); 1180 struct hfsc_class *cl = (struct hfsc_class *)arg; 1181 1182 if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root) 1183 return -EBUSY; 1184 1185 sch_tree_lock(sch); 1186 1187 list_del(&cl->siblings); 1188 hfsc_adjust_levels(cl->cl_parent); 1189 1190 hfsc_purge_queue(sch, cl); 1191 list_del(&cl->hlist); 1192 1193 if (--cl->refcnt == 0) 1194 hfsc_destroy_class(sch, cl); 1195 1196 sch_tree_unlock(sch); 1197 return 0; 1198 } 1199 1200 static struct hfsc_class * 1201 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) 1202 { 1203 struct hfsc_sched *q = qdisc_priv(sch); 1204 struct hfsc_class *cl; 1205 struct tcf_result res; 1206 struct tcf_proto *tcf; 1207 int result; 1208 1209 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 && 1210 (cl = hfsc_find_class(skb->priority, sch)) != NULL) 1211 if (cl->level == 0) 1212 return cl; 1213 1214 *qerr = NET_XMIT_BYPASS; 1215 tcf = q->root.filter_list; 1216 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 1217 #ifdef CONFIG_NET_CLS_ACT 1218 switch (result) { 1219 case TC_ACT_QUEUED: 1220 case TC_ACT_STOLEN: 1221 *qerr = NET_XMIT_SUCCESS; 1222 case TC_ACT_SHOT: 1223 return NULL; 1224 } 1225 #elif defined(CONFIG_NET_CLS_POLICE) 1226 if (result == TC_POLICE_SHOT) 1227 return NULL; 1228 #endif 1229 if ((cl = (struct hfsc_class *)res.class) == NULL) { 1230 if ((cl = hfsc_find_class(res.classid, sch)) == NULL) 1231 break; /* filter selected invalid classid */ 1232 } 1233 1234 if (cl->level == 0) 1235 return cl; /* hit leaf class */ 1236 1237 /* apply inner filter chain */ 1238 tcf = cl->filter_list; 1239 } 1240 1241 /* classification failed, try default class */ 1242 cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); 1243 if (cl == NULL || cl->level > 0) 1244 return NULL; 1245 1246 return cl; 1247 } 1248 1249 static int 1250 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1251 struct Qdisc **old) 1252 { 1253 struct hfsc_class *cl = (struct hfsc_class *)arg; 1254 1255 if (cl == NULL) 1256 return -ENOENT; 1257 if (cl->level > 0) 1258 return -EINVAL; 1259 if (new == NULL) { 1260 new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1261 cl->classid); 1262 if (new == NULL) 1263 new = &noop_qdisc; 1264 } 1265 1266 sch_tree_lock(sch); 1267 hfsc_purge_queue(sch, cl); 1268 *old = xchg(&cl->qdisc, new); 1269 sch_tree_unlock(sch); 1270 return 0; 1271 } 1272 1273 static struct Qdisc * 1274 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg) 1275 { 1276 struct hfsc_class *cl = (struct hfsc_class *)arg; 1277 1278 if (cl != NULL && cl->level == 0) 1279 return cl->qdisc; 1280 1281 return NULL; 1282 } 1283 1284 static void 1285 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) 1286 { 1287 struct hfsc_class *cl = (struct hfsc_class *)arg; 1288 1289 if (cl->qdisc->q.qlen == 0) { 1290 update_vf(cl, 0, 0); 1291 set_passive(cl); 1292 } 1293 } 1294 1295 static unsigned long 1296 hfsc_get_class(struct Qdisc *sch, u32 classid) 1297 { 1298 struct hfsc_class *cl = hfsc_find_class(classid, sch); 1299 1300 if (cl != NULL) 1301 cl->refcnt++; 1302 1303 return (unsigned long)cl; 1304 } 1305 1306 static void 1307 hfsc_put_class(struct Qdisc *sch, unsigned long arg) 1308 { 1309 struct hfsc_class *cl = (struct hfsc_class *)arg; 1310 1311 if (--cl->refcnt == 0) 1312 hfsc_destroy_class(sch, cl); 1313 } 1314 1315 static unsigned long 1316 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid) 1317 { 1318 struct hfsc_class *p = (struct hfsc_class *)parent; 1319 struct hfsc_class *cl = hfsc_find_class(classid, sch); 1320 1321 if (cl != NULL) { 1322 if (p != NULL && p->level <= cl->level) 1323 return 0; 1324 cl->filter_cnt++; 1325 } 1326 1327 return (unsigned long)cl; 1328 } 1329 1330 static void 1331 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg) 1332 { 1333 struct hfsc_class *cl = (struct hfsc_class *)arg; 1334 1335 cl->filter_cnt--; 1336 } 1337 1338 static struct tcf_proto ** 1339 hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) 1340 { 1341 struct hfsc_sched *q = qdisc_priv(sch); 1342 struct hfsc_class *cl = (struct hfsc_class *)arg; 1343 1344 if (cl == NULL) 1345 cl = &q->root; 1346 1347 return &cl->filter_list; 1348 } 1349 1350 static int 1351 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) 1352 { 1353 struct tc_service_curve tsc; 1354 1355 tsc.m1 = sm2m(sc->sm1); 1356 tsc.d = dx2d(sc->dx); 1357 tsc.m2 = sm2m(sc->sm2); 1358 RTA_PUT(skb, attr, sizeof(tsc), &tsc); 1359 1360 return skb->len; 1361 1362 rtattr_failure: 1363 return -1; 1364 } 1365 1366 static inline int 1367 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) 1368 { 1369 if ((cl->cl_flags & HFSC_RSC) && 1370 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0)) 1371 goto rtattr_failure; 1372 1373 if ((cl->cl_flags & HFSC_FSC) && 1374 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0)) 1375 goto rtattr_failure; 1376 1377 if ((cl->cl_flags & HFSC_USC) && 1378 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0)) 1379 goto rtattr_failure; 1380 1381 return skb->len; 1382 1383 rtattr_failure: 1384 return -1; 1385 } 1386 1387 static int 1388 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, 1389 struct tcmsg *tcm) 1390 { 1391 struct hfsc_class *cl = (struct hfsc_class *)arg; 1392 unsigned char *b = skb->tail; 1393 struct rtattr *rta = (struct rtattr *)b; 1394 1395 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT; 1396 tcm->tcm_handle = cl->classid; 1397 if (cl->level == 0) 1398 tcm->tcm_info = cl->qdisc->handle; 1399 1400 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 1401 if (hfsc_dump_curves(skb, cl) < 0) 1402 goto rtattr_failure; 1403 rta->rta_len = skb->tail - b; 1404 return skb->len; 1405 1406 rtattr_failure: 1407 skb_trim(skb, b - skb->data); 1408 return -1; 1409 } 1410 1411 static int 1412 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, 1413 struct gnet_dump *d) 1414 { 1415 struct hfsc_class *cl = (struct hfsc_class *)arg; 1416 struct tc_hfsc_stats xstats; 1417 1418 cl->qstats.qlen = cl->qdisc->q.qlen; 1419 xstats.level = cl->level; 1420 xstats.period = cl->cl_vtperiod; 1421 xstats.work = cl->cl_total; 1422 xstats.rtwork = cl->cl_cumul; 1423 1424 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || 1425 #ifdef CONFIG_NET_ESTIMATOR 1426 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1427 #endif 1428 gnet_stats_copy_queue(d, &cl->qstats) < 0) 1429 return -1; 1430 1431 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 1432 } 1433 1434 1435 1436 static void 1437 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1438 { 1439 struct hfsc_sched *q = qdisc_priv(sch); 1440 struct hfsc_class *cl; 1441 unsigned int i; 1442 1443 if (arg->stop) 1444 return; 1445 1446 for (i = 0; i < HFSC_HSIZE; i++) { 1447 list_for_each_entry(cl, &q->clhash[i], hlist) { 1448 if (arg->count < arg->skip) { 1449 arg->count++; 1450 continue; 1451 } 1452 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { 1453 arg->stop = 1; 1454 return; 1455 } 1456 arg->count++; 1457 } 1458 } 1459 } 1460 1461 static void 1462 hfsc_watchdog(unsigned long arg) 1463 { 1464 struct Qdisc *sch = (struct Qdisc *)arg; 1465 1466 sch->flags &= ~TCQ_F_THROTTLED; 1467 netif_schedule(sch->dev); 1468 } 1469 1470 static void 1471 hfsc_schedule_watchdog(struct Qdisc *sch, u64 cur_time) 1472 { 1473 struct hfsc_sched *q = qdisc_priv(sch); 1474 struct hfsc_class *cl; 1475 u64 next_time = 0; 1476 long delay; 1477 1478 if ((cl = eltree_get_minel(q)) != NULL) 1479 next_time = cl->cl_e; 1480 if (q->root.cl_cfmin != 0) { 1481 if (next_time == 0 || next_time > q->root.cl_cfmin) 1482 next_time = q->root.cl_cfmin; 1483 } 1484 WARN_ON(next_time == 0); 1485 delay = next_time - cur_time; 1486 delay = PSCHED_US2JIFFIE(delay); 1487 1488 sch->flags |= TCQ_F_THROTTLED; 1489 mod_timer(&q->wd_timer, jiffies + delay); 1490 } 1491 1492 static int 1493 hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt) 1494 { 1495 struct hfsc_sched *q = qdisc_priv(sch); 1496 struct tc_hfsc_qopt *qopt; 1497 unsigned int i; 1498 1499 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) 1500 return -EINVAL; 1501 qopt = RTA_DATA(opt); 1502 1503 sch->stats_lock = &sch->dev->queue_lock; 1504 1505 q->defcls = qopt->defcls; 1506 for (i = 0; i < HFSC_HSIZE; i++) 1507 INIT_LIST_HEAD(&q->clhash[i]); 1508 q->eligible = RB_ROOT; 1509 INIT_LIST_HEAD(&q->droplist); 1510 skb_queue_head_init(&q->requeue); 1511 1512 q->root.refcnt = 1; 1513 q->root.classid = sch->handle; 1514 q->root.sched = q; 1515 q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1516 sch->handle); 1517 if (q->root.qdisc == NULL) 1518 q->root.qdisc = &noop_qdisc; 1519 q->root.stats_lock = &sch->dev->queue_lock; 1520 INIT_LIST_HEAD(&q->root.children); 1521 q->root.vt_tree = RB_ROOT; 1522 q->root.cf_tree = RB_ROOT; 1523 1524 list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]); 1525 1526 init_timer(&q->wd_timer); 1527 q->wd_timer.function = hfsc_watchdog; 1528 q->wd_timer.data = (unsigned long)sch; 1529 1530 return 0; 1531 } 1532 1533 static int 1534 hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt) 1535 { 1536 struct hfsc_sched *q = qdisc_priv(sch); 1537 struct tc_hfsc_qopt *qopt; 1538 1539 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) 1540 return -EINVAL; 1541 qopt = RTA_DATA(opt); 1542 1543 sch_tree_lock(sch); 1544 q->defcls = qopt->defcls; 1545 sch_tree_unlock(sch); 1546 1547 return 0; 1548 } 1549 1550 static void 1551 hfsc_reset_class(struct hfsc_class *cl) 1552 { 1553 cl->cl_total = 0; 1554 cl->cl_cumul = 0; 1555 cl->cl_d = 0; 1556 cl->cl_e = 0; 1557 cl->cl_vt = 0; 1558 cl->cl_vtadj = 0; 1559 cl->cl_vtoff = 0; 1560 cl->cl_cvtmin = 0; 1561 cl->cl_cvtmax = 0; 1562 cl->cl_cvtoff = 0; 1563 cl->cl_pcvtoff = 0; 1564 cl->cl_vtperiod = 0; 1565 cl->cl_parentperiod = 0; 1566 cl->cl_f = 0; 1567 cl->cl_myf = 0; 1568 cl->cl_myfadj = 0; 1569 cl->cl_cfmin = 0; 1570 cl->cl_nactive = 0; 1571 1572 cl->vt_tree = RB_ROOT; 1573 cl->cf_tree = RB_ROOT; 1574 qdisc_reset(cl->qdisc); 1575 1576 if (cl->cl_flags & HFSC_RSC) 1577 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0); 1578 if (cl->cl_flags & HFSC_FSC) 1579 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0); 1580 if (cl->cl_flags & HFSC_USC) 1581 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0); 1582 } 1583 1584 static void 1585 hfsc_reset_qdisc(struct Qdisc *sch) 1586 { 1587 struct hfsc_sched *q = qdisc_priv(sch); 1588 struct hfsc_class *cl; 1589 unsigned int i; 1590 1591 for (i = 0; i < HFSC_HSIZE; i++) { 1592 list_for_each_entry(cl, &q->clhash[i], hlist) 1593 hfsc_reset_class(cl); 1594 } 1595 __skb_queue_purge(&q->requeue); 1596 q->eligible = RB_ROOT; 1597 INIT_LIST_HEAD(&q->droplist); 1598 del_timer(&q->wd_timer); 1599 sch->flags &= ~TCQ_F_THROTTLED; 1600 sch->q.qlen = 0; 1601 } 1602 1603 static void 1604 hfsc_destroy_qdisc(struct Qdisc *sch) 1605 { 1606 struct hfsc_sched *q = qdisc_priv(sch); 1607 struct hfsc_class *cl, *next; 1608 unsigned int i; 1609 1610 for (i = 0; i < HFSC_HSIZE; i++) { 1611 list_for_each_entry_safe(cl, next, &q->clhash[i], hlist) 1612 hfsc_destroy_class(sch, cl); 1613 } 1614 __skb_queue_purge(&q->requeue); 1615 del_timer(&q->wd_timer); 1616 } 1617 1618 static int 1619 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) 1620 { 1621 struct hfsc_sched *q = qdisc_priv(sch); 1622 unsigned char *b = skb->tail; 1623 struct tc_hfsc_qopt qopt; 1624 1625 qopt.defcls = q->defcls; 1626 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); 1627 return skb->len; 1628 1629 rtattr_failure: 1630 skb_trim(skb, b - skb->data); 1631 return -1; 1632 } 1633 1634 static int 1635 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 1636 { 1637 struct hfsc_class *cl; 1638 unsigned int len; 1639 int err; 1640 1641 cl = hfsc_classify(skb, sch, &err); 1642 if (cl == NULL) { 1643 if (err == NET_XMIT_BYPASS) 1644 sch->qstats.drops++; 1645 kfree_skb(skb); 1646 return err; 1647 } 1648 1649 len = skb->len; 1650 err = cl->qdisc->enqueue(skb, cl->qdisc); 1651 if (unlikely(err != NET_XMIT_SUCCESS)) { 1652 cl->qstats.drops++; 1653 sch->qstats.drops++; 1654 return err; 1655 } 1656 1657 if (cl->qdisc->q.qlen == 1) 1658 set_active(cl, len); 1659 1660 cl->bstats.packets++; 1661 cl->bstats.bytes += len; 1662 sch->bstats.packets++; 1663 sch->bstats.bytes += len; 1664 sch->q.qlen++; 1665 1666 return NET_XMIT_SUCCESS; 1667 } 1668 1669 static struct sk_buff * 1670 hfsc_dequeue(struct Qdisc *sch) 1671 { 1672 struct hfsc_sched *q = qdisc_priv(sch); 1673 struct hfsc_class *cl; 1674 struct sk_buff *skb; 1675 u64 cur_time; 1676 unsigned int next_len; 1677 int realtime = 0; 1678 1679 if (sch->q.qlen == 0) 1680 return NULL; 1681 if ((skb = __skb_dequeue(&q->requeue))) 1682 goto out; 1683 1684 PSCHED_GET_TIME(cur_time); 1685 1686 /* 1687 * if there are eligible classes, use real-time criteria. 1688 * find the class with the minimum deadline among 1689 * the eligible classes. 1690 */ 1691 if ((cl = eltree_get_mindl(q, cur_time)) != NULL) { 1692 realtime = 1; 1693 } else { 1694 /* 1695 * use link-sharing criteria 1696 * get the class with the minimum vt in the hierarchy 1697 */ 1698 cl = vttree_get_minvt(&q->root, cur_time); 1699 if (cl == NULL) { 1700 sch->qstats.overlimits++; 1701 hfsc_schedule_watchdog(sch, cur_time); 1702 return NULL; 1703 } 1704 } 1705 1706 skb = cl->qdisc->dequeue(cl->qdisc); 1707 if (skb == NULL) { 1708 if (net_ratelimit()) 1709 printk("HFSC: Non-work-conserving qdisc ?\n"); 1710 return NULL; 1711 } 1712 1713 update_vf(cl, skb->len, cur_time); 1714 if (realtime) 1715 cl->cl_cumul += skb->len; 1716 1717 if (cl->qdisc->q.qlen != 0) { 1718 if (cl->cl_flags & HFSC_RSC) { 1719 /* update ed */ 1720 next_len = qdisc_peek_len(cl->qdisc); 1721 if (realtime) 1722 update_ed(cl, next_len); 1723 else 1724 update_d(cl, next_len); 1725 } 1726 } else { 1727 /* the class becomes passive */ 1728 set_passive(cl); 1729 } 1730 1731 out: 1732 sch->flags &= ~TCQ_F_THROTTLED; 1733 sch->q.qlen--; 1734 1735 return skb; 1736 } 1737 1738 static int 1739 hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch) 1740 { 1741 struct hfsc_sched *q = qdisc_priv(sch); 1742 1743 __skb_queue_head(&q->requeue, skb); 1744 sch->q.qlen++; 1745 sch->qstats.requeues++; 1746 return NET_XMIT_SUCCESS; 1747 } 1748 1749 static unsigned int 1750 hfsc_drop(struct Qdisc *sch) 1751 { 1752 struct hfsc_sched *q = qdisc_priv(sch); 1753 struct hfsc_class *cl; 1754 unsigned int len; 1755 1756 list_for_each_entry(cl, &q->droplist, dlist) { 1757 if (cl->qdisc->ops->drop != NULL && 1758 (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) { 1759 if (cl->qdisc->q.qlen == 0) { 1760 update_vf(cl, 0, 0); 1761 set_passive(cl); 1762 } else { 1763 list_move_tail(&cl->dlist, &q->droplist); 1764 } 1765 cl->qstats.drops++; 1766 sch->qstats.drops++; 1767 sch->q.qlen--; 1768 return len; 1769 } 1770 } 1771 return 0; 1772 } 1773 1774 static struct Qdisc_class_ops hfsc_class_ops = { 1775 .change = hfsc_change_class, 1776 .delete = hfsc_delete_class, 1777 .graft = hfsc_graft_class, 1778 .leaf = hfsc_class_leaf, 1779 .qlen_notify = hfsc_qlen_notify, 1780 .get = hfsc_get_class, 1781 .put = hfsc_put_class, 1782 .bind_tcf = hfsc_bind_tcf, 1783 .unbind_tcf = hfsc_unbind_tcf, 1784 .tcf_chain = hfsc_tcf_chain, 1785 .dump = hfsc_dump_class, 1786 .dump_stats = hfsc_dump_class_stats, 1787 .walk = hfsc_walk 1788 }; 1789 1790 static struct Qdisc_ops hfsc_qdisc_ops = { 1791 .id = "hfsc", 1792 .init = hfsc_init_qdisc, 1793 .change = hfsc_change_qdisc, 1794 .reset = hfsc_reset_qdisc, 1795 .destroy = hfsc_destroy_qdisc, 1796 .dump = hfsc_dump_qdisc, 1797 .enqueue = hfsc_enqueue, 1798 .dequeue = hfsc_dequeue, 1799 .requeue = hfsc_requeue, 1800 .drop = hfsc_drop, 1801 .cl_ops = &hfsc_class_ops, 1802 .priv_size = sizeof(struct hfsc_sched), 1803 .owner = THIS_MODULE 1804 }; 1805 1806 static int __init 1807 hfsc_init(void) 1808 { 1809 return register_qdisc(&hfsc_qdisc_ops); 1810 } 1811 1812 static void __exit 1813 hfsc_cleanup(void) 1814 { 1815 unregister_qdisc(&hfsc_qdisc_ops); 1816 } 1817 1818 MODULE_LICENSE("GPL"); 1819 module_init(hfsc_init); 1820 module_exit(hfsc_cleanup); 1821