1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * net/sched/sch_qfq.c Quick Fair Queueing Plus Scheduler. 4 * 5 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente. 6 * Copyright (c) 2012 Paolo Valente. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/bitops.h> 12 #include <linux/errno.h> 13 #include <linux/netdevice.h> 14 #include <linux/pkt_sched.h> 15 #include <net/sch_generic.h> 16 #include <net/pkt_sched.h> 17 #include <net/pkt_cls.h> 18 19 20 /* Quick Fair Queueing Plus 21 ======================== 22 23 Sources: 24 25 [1] Paolo Valente, 26 "Reducing the Execution Time of Fair-Queueing Schedulers." 27 http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf 28 29 Sources for QFQ: 30 31 [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient 32 Packet Scheduling with Tight Bandwidth Distribution Guarantees." 33 34 See also: 35 http://retis.sssup.it/~fabio/linux/qfq/ 36 */ 37 38 /* 39 40 QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES 41 classes. Each aggregate is timestamped with a virtual start time S 42 and a virtual finish time F, and scheduled according to its 43 timestamps. S and F are computed as a function of a system virtual 44 time function V. The classes within each aggregate are instead 45 scheduled with DRR. 46 47 To speed up operations, QFQ+ divides also aggregates into a limited 48 number of groups. Which group a class belongs to depends on the 49 ratio between the maximum packet length for the class and the weight 50 of the class. Groups have their own S and F. In the end, QFQ+ 51 schedules groups, then aggregates within groups, then classes within 52 aggregates. See [1] and [2] for a full description. 53 54 Virtual time computations. 55 56 S, F and V are all computed in fixed point arithmetic with 57 FRAC_BITS decimal bits. 58 59 QFQ_MAX_INDEX is the maximum index allowed for a group. We need 60 one bit per index. 61 QFQ_MAX_WSHIFT is the maximum power of two supported as a weight. 62 63 The layout of the bits is as below: 64 65 [ MTU_SHIFT ][ FRAC_BITS ] 66 [ MAX_INDEX ][ MIN_SLOT_SHIFT ] 67 ^.__grp->index = 0 68 *.__grp->slot_shift 69 70 where MIN_SLOT_SHIFT is derived by difference from the others. 71 72 The max group index corresponds to Lmax/w_min, where 73 Lmax=1<<MTU_SHIFT, w_min = 1 . 74 From this, and knowing how many groups (MAX_INDEX) we want, 75 we can derive the shift corresponding to each group. 76 77 Because we often need to compute 78 F = S + len/w_i and V = V + len/wsum 79 instead of storing w_i store the value 80 inv_w = (1<<FRAC_BITS)/w_i 81 so we can do F = S + len * inv_w * wsum. 82 We use W_TOT in the formulas so we can easily move between 83 static and adaptive weight sum. 84 85 The per-scheduler-instance data contain all the data structures 86 for the scheduler: bitmaps and bucket lists. 87 88 */ 89 90 /* 91 * Maximum number of consecutive slots occupied by backlogged classes 92 * inside a group. 93 */ 94 #define QFQ_MAX_SLOTS 32 95 96 /* 97 * Shifts used for aggregate<->group mapping. We allow class weights that are 98 * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the 99 * group with the smallest index that can support the L_i / r_i configured 100 * for the classes in the aggregate. 101 * 102 * grp->index is the index of the group; and grp->slot_shift 103 * is the shift for the corresponding (scaled) sigma_i. 104 */ 105 #define QFQ_MAX_INDEX 24 106 #define QFQ_MAX_WSHIFT 10 107 108 #define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */ 109 #define QFQ_MAX_WSUM (64*QFQ_MAX_WEIGHT) 110 111 #define FRAC_BITS 30 /* fixed point arithmetic */ 112 #define ONE_FP (1UL << FRAC_BITS) 113 114 #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */ 115 #define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */ 116 #define QFQ_MAX_LMAX (1UL << QFQ_MTU_SHIFT) 117 118 #define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */ 119 120 /* 121 * Possible group states. These values are used as indexes for the bitmaps 122 * array of struct qfq_queue. 123 */ 124 enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE }; 125 126 struct qfq_group; 127 128 struct qfq_aggregate; 129 130 struct qfq_class { 131 struct Qdisc_class_common common; 132 133 unsigned int filter_cnt; 134 135 struct gnet_stats_basic_sync bstats; 136 struct gnet_stats_queue qstats; 137 struct net_rate_estimator __rcu *rate_est; 138 struct Qdisc *qdisc; 139 struct list_head alist; /* Link for active-classes list. */ 140 struct qfq_aggregate *agg; /* Parent aggregate. */ 141 int deficit; /* DRR deficit counter. */ 142 }; 143 144 struct qfq_aggregate { 145 struct hlist_node next; /* Link for the slot list. */ 146 u64 S, F; /* flow timestamps (exact) */ 147 148 /* group we belong to. In principle we would need the index, 149 * which is log_2(lmax/weight), but we never reference it 150 * directly, only the group. 151 */ 152 struct qfq_group *grp; 153 154 /* these are copied from the flowset. */ 155 u32 class_weight; /* Weight of each class in this aggregate. */ 156 /* Max pkt size for the classes in this aggregate, DRR quantum. */ 157 int lmax; 158 159 u32 inv_w; /* ONE_FP/(sum of weights of classes in aggr.). */ 160 u32 budgetmax; /* Max budget for this aggregate. */ 161 u32 initial_budget, budget; /* Initial and current budget. */ 162 163 int num_classes; /* Number of classes in this aggr. */ 164 struct list_head active; /* DRR queue of active classes. */ 165 166 struct hlist_node nonfull_next; /* See nonfull_aggs in qfq_sched. */ 167 }; 168 169 struct qfq_group { 170 u64 S, F; /* group timestamps (approx). */ 171 unsigned int slot_shift; /* Slot shift. */ 172 unsigned int index; /* Group index. */ 173 unsigned int front; /* Index of the front slot. */ 174 unsigned long full_slots; /* non-empty slots */ 175 176 /* Array of RR lists of active aggregates. */ 177 struct hlist_head slots[QFQ_MAX_SLOTS]; 178 }; 179 180 struct qfq_sched { 181 struct tcf_proto __rcu *filter_list; 182 struct tcf_block *block; 183 struct Qdisc_class_hash clhash; 184 185 u64 oldV, V; /* Precise virtual times. */ 186 struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */ 187 u32 wsum; /* weight sum */ 188 u32 iwsum; /* inverse weight sum */ 189 190 unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */ 191 struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */ 192 u32 min_slot_shift; /* Index of the group-0 bit in the bitmaps. */ 193 194 u32 max_agg_classes; /* Max number of classes per aggr. */ 195 struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */ 196 }; 197 198 /* 199 * Possible reasons why the timestamps of an aggregate are updated 200 * enqueue: the aggregate switches from idle to active and must scheduled 201 * for service 202 * requeue: the aggregate finishes its budget, so it stops being served and 203 * must be rescheduled for service 204 */ 205 enum update_reason {enqueue, requeue}; 206 207 static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) 208 { 209 struct qfq_sched *q = qdisc_priv(sch); 210 struct Qdisc_class_common *clc; 211 212 clc = qdisc_class_find(&q->clhash, classid); 213 if (clc == NULL) 214 return NULL; 215 return container_of(clc, struct qfq_class, common); 216 } 217 218 static struct netlink_range_validation lmax_range = { 219 .min = QFQ_MIN_LMAX, 220 .max = QFQ_MAX_LMAX, 221 }; 222 223 static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { 224 [TCA_QFQ_WEIGHT] = NLA_POLICY_RANGE(NLA_U32, 1, QFQ_MAX_WEIGHT), 225 [TCA_QFQ_LMAX] = NLA_POLICY_FULL_RANGE(NLA_U32, &lmax_range), 226 }; 227 228 /* 229 * Calculate a flow index, given its weight and maximum packet length. 230 * index = log_2(maxlen/weight) but we need to apply the scaling. 231 * This is used only once at flow creation. 232 */ 233 static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift) 234 { 235 u64 slot_size = (u64)maxlen * inv_w; 236 unsigned long size_map; 237 int index = 0; 238 239 size_map = slot_size >> min_slot_shift; 240 if (!size_map) 241 goto out; 242 243 index = __fls(size_map) + 1; /* basically a log_2 */ 244 index -= !(slot_size - (1ULL << (index + min_slot_shift - 1))); 245 246 if (index < 0) 247 index = 0; 248 out: 249 pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n", 250 (unsigned long) ONE_FP/inv_w, maxlen, index); 251 252 return index; 253 } 254 255 static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *); 256 static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *, 257 enum update_reason); 258 259 static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg, 260 u32 lmax, u32 weight) 261 { 262 INIT_LIST_HEAD(&agg->active); 263 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); 264 265 agg->lmax = lmax; 266 agg->class_weight = weight; 267 } 268 269 static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q, 270 u32 lmax, u32 weight) 271 { 272 struct qfq_aggregate *agg; 273 274 hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next) 275 if (agg->lmax == lmax && agg->class_weight == weight) 276 return agg; 277 278 return NULL; 279 } 280 281 282 /* Update aggregate as a function of the new number of classes. */ 283 static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, 284 int new_num_classes) 285 { 286 u32 new_agg_weight; 287 288 if (new_num_classes == q->max_agg_classes) 289 hlist_del_init(&agg->nonfull_next); 290 291 if (agg->num_classes > new_num_classes && 292 new_num_classes == q->max_agg_classes - 1) /* agg no more full */ 293 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); 294 295 /* The next assignment may let 296 * agg->initial_budget > agg->budgetmax 297 * hold, we will take it into account in charge_actual_service(). 298 */ 299 agg->budgetmax = new_num_classes * agg->lmax; 300 new_agg_weight = agg->class_weight * new_num_classes; 301 agg->inv_w = ONE_FP/new_agg_weight; 302 303 if (agg->grp == NULL) { 304 int i = qfq_calc_index(agg->inv_w, agg->budgetmax, 305 q->min_slot_shift); 306 agg->grp = &q->groups[i]; 307 } 308 309 q->wsum += 310 (int) agg->class_weight * (new_num_classes - agg->num_classes); 311 q->iwsum = ONE_FP / q->wsum; 312 313 agg->num_classes = new_num_classes; 314 } 315 316 /* Add class to aggregate. */ 317 static void qfq_add_to_agg(struct qfq_sched *q, 318 struct qfq_aggregate *agg, 319 struct qfq_class *cl) 320 { 321 cl->agg = agg; 322 323 qfq_update_agg(q, agg, agg->num_classes+1); 324 if (cl->qdisc->q.qlen > 0) { /* adding an active class */ 325 list_add_tail(&cl->alist, &agg->active); 326 if (list_first_entry(&agg->active, struct qfq_class, alist) == 327 cl && q->in_serv_agg != agg) /* agg was inactive */ 328 qfq_activate_agg(q, agg, enqueue); /* schedule agg */ 329 } 330 } 331 332 static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *); 333 334 static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg) 335 { 336 hlist_del_init(&agg->nonfull_next); 337 q->wsum -= agg->class_weight; 338 if (q->wsum != 0) 339 q->iwsum = ONE_FP / q->wsum; 340 341 if (q->in_serv_agg == agg) 342 q->in_serv_agg = qfq_choose_next_agg(q); 343 kfree(agg); 344 } 345 346 /* Deschedule class from within its parent aggregate. */ 347 static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl) 348 { 349 struct qfq_aggregate *agg = cl->agg; 350 351 352 list_del(&cl->alist); /* remove from RR queue of the aggregate */ 353 if (list_empty(&agg->active)) /* agg is now inactive */ 354 qfq_deactivate_agg(q, agg); 355 } 356 357 /* Remove class from its parent aggregate. */ 358 static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl) 359 { 360 struct qfq_aggregate *agg = cl->agg; 361 362 cl->agg = NULL; 363 if (agg->num_classes == 1) { /* agg being emptied, destroy it */ 364 qfq_destroy_agg(q, agg); 365 return; 366 } 367 qfq_update_agg(q, agg, agg->num_classes-1); 368 } 369 370 /* Deschedule class and remove it from its parent aggregate. */ 371 static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl) 372 { 373 if (cl->qdisc->q.qlen > 0) /* class is active */ 374 qfq_deactivate_class(q, cl); 375 376 qfq_rm_from_agg(q, cl); 377 } 378 379 /* Move class to a new aggregate, matching the new class weight and/or lmax */ 380 static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight, 381 u32 lmax) 382 { 383 struct qfq_sched *q = qdisc_priv(sch); 384 struct qfq_aggregate *new_agg; 385 386 /* 'lmax' can range from [QFQ_MIN_LMAX, pktlen + stab overhead] */ 387 if (lmax > QFQ_MAX_LMAX) 388 return -EINVAL; 389 390 new_agg = qfq_find_agg(q, lmax, weight); 391 if (new_agg == NULL) { /* create new aggregate */ 392 new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC); 393 if (new_agg == NULL) 394 return -ENOBUFS; 395 qfq_init_agg(q, new_agg, lmax, weight); 396 } 397 qfq_deact_rm_from_agg(q, cl); 398 qfq_add_to_agg(q, new_agg, cl); 399 400 return 0; 401 } 402 403 static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 404 struct nlattr **tca, unsigned long *arg, 405 struct netlink_ext_ack *extack) 406 { 407 struct qfq_sched *q = qdisc_priv(sch); 408 struct qfq_class *cl = (struct qfq_class *)*arg; 409 bool existing = false; 410 struct nlattr *tb[TCA_QFQ_MAX + 1]; 411 struct qfq_aggregate *new_agg = NULL; 412 u32 weight, lmax, inv_w; 413 int err; 414 int delta_w; 415 416 if (NL_REQ_ATTR_CHECK(extack, NULL, tca, TCA_OPTIONS)) { 417 NL_SET_ERR_MSG_MOD(extack, "missing options"); 418 return -EINVAL; 419 } 420 421 err = nla_parse_nested_deprecated(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], 422 qfq_policy, extack); 423 if (err < 0) 424 return err; 425 426 if (tb[TCA_QFQ_WEIGHT]) 427 weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]); 428 else 429 weight = 1; 430 431 if (tb[TCA_QFQ_LMAX]) { 432 lmax = nla_get_u32(tb[TCA_QFQ_LMAX]); 433 } else { 434 /* MTU size is user controlled */ 435 lmax = psched_mtu(qdisc_dev(sch)); 436 if (lmax < QFQ_MIN_LMAX || lmax > QFQ_MAX_LMAX) { 437 NL_SET_ERR_MSG_MOD(extack, 438 "MTU size out of bounds for qfq"); 439 return -EINVAL; 440 } 441 } 442 443 inv_w = ONE_FP / weight; 444 weight = ONE_FP / inv_w; 445 446 if (cl != NULL && 447 lmax == cl->agg->lmax && 448 weight == cl->agg->class_weight) 449 return 0; /* nothing to change */ 450 451 delta_w = weight - (cl ? cl->agg->class_weight : 0); 452 453 if (q->wsum + delta_w > QFQ_MAX_WSUM) { 454 NL_SET_ERR_MSG_FMT_MOD(extack, 455 "total weight out of range (%d + %u)\n", 456 delta_w, q->wsum); 457 return -EINVAL; 458 } 459 460 if (cl != NULL) { /* modify existing class */ 461 if (tca[TCA_RATE]) { 462 err = gen_replace_estimator(&cl->bstats, NULL, 463 &cl->rate_est, 464 NULL, 465 true, 466 tca[TCA_RATE]); 467 if (err) 468 return err; 469 } 470 existing = true; 471 goto set_change_agg; 472 } 473 474 /* create and init new class */ 475 cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL); 476 if (cl == NULL) 477 return -ENOBUFS; 478 479 gnet_stats_basic_sync_init(&cl->bstats); 480 cl->common.classid = classid; 481 cl->deficit = lmax; 482 483 cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, 484 classid, NULL); 485 if (cl->qdisc == NULL) 486 cl->qdisc = &noop_qdisc; 487 488 if (tca[TCA_RATE]) { 489 err = gen_new_estimator(&cl->bstats, NULL, 490 &cl->rate_est, 491 NULL, 492 true, 493 tca[TCA_RATE]); 494 if (err) 495 goto destroy_class; 496 } 497 498 if (cl->qdisc != &noop_qdisc) 499 qdisc_hash_add(cl->qdisc, true); 500 501 set_change_agg: 502 sch_tree_lock(sch); 503 new_agg = qfq_find_agg(q, lmax, weight); 504 if (new_agg == NULL) { /* create new aggregate */ 505 sch_tree_unlock(sch); 506 new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL); 507 if (new_agg == NULL) { 508 err = -ENOBUFS; 509 gen_kill_estimator(&cl->rate_est); 510 goto destroy_class; 511 } 512 sch_tree_lock(sch); 513 qfq_init_agg(q, new_agg, lmax, weight); 514 } 515 if (existing) 516 qfq_deact_rm_from_agg(q, cl); 517 else 518 qdisc_class_hash_insert(&q->clhash, &cl->common); 519 qfq_add_to_agg(q, new_agg, cl); 520 sch_tree_unlock(sch); 521 qdisc_class_hash_grow(sch, &q->clhash); 522 523 *arg = (unsigned long)cl; 524 return 0; 525 526 destroy_class: 527 qdisc_put(cl->qdisc); 528 kfree(cl); 529 return err; 530 } 531 532 static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl) 533 { 534 struct qfq_sched *q = qdisc_priv(sch); 535 536 qfq_rm_from_agg(q, cl); 537 gen_kill_estimator(&cl->rate_est); 538 qdisc_put(cl->qdisc); 539 kfree(cl); 540 } 541 542 static int qfq_delete_class(struct Qdisc *sch, unsigned long arg, 543 struct netlink_ext_ack *extack) 544 { 545 struct qfq_sched *q = qdisc_priv(sch); 546 struct qfq_class *cl = (struct qfq_class *)arg; 547 548 if (cl->filter_cnt > 0) 549 return -EBUSY; 550 551 sch_tree_lock(sch); 552 553 qdisc_purge_queue(cl->qdisc); 554 qdisc_class_hash_remove(&q->clhash, &cl->common); 555 556 sch_tree_unlock(sch); 557 558 qfq_destroy_class(sch, cl); 559 return 0; 560 } 561 562 static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid) 563 { 564 return (unsigned long)qfq_find_class(sch, classid); 565 } 566 567 static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl, 568 struct netlink_ext_ack *extack) 569 { 570 struct qfq_sched *q = qdisc_priv(sch); 571 572 if (cl) 573 return NULL; 574 575 return q->block; 576 } 577 578 static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent, 579 u32 classid) 580 { 581 struct qfq_class *cl = qfq_find_class(sch, classid); 582 583 if (cl != NULL) 584 cl->filter_cnt++; 585 586 return (unsigned long)cl; 587 } 588 589 static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg) 590 { 591 struct qfq_class *cl = (struct qfq_class *)arg; 592 593 cl->filter_cnt--; 594 } 595 596 static int qfq_graft_class(struct Qdisc *sch, unsigned long arg, 597 struct Qdisc *new, struct Qdisc **old, 598 struct netlink_ext_ack *extack) 599 { 600 struct qfq_class *cl = (struct qfq_class *)arg; 601 602 if (new == NULL) { 603 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, 604 cl->common.classid, NULL); 605 if (new == NULL) 606 new = &noop_qdisc; 607 } 608 609 *old = qdisc_replace(sch, new, &cl->qdisc); 610 return 0; 611 } 612 613 static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg) 614 { 615 struct qfq_class *cl = (struct qfq_class *)arg; 616 617 return cl->qdisc; 618 } 619 620 static int qfq_dump_class(struct Qdisc *sch, unsigned long arg, 621 struct sk_buff *skb, struct tcmsg *tcm) 622 { 623 struct qfq_class *cl = (struct qfq_class *)arg; 624 struct nlattr *nest; 625 626 tcm->tcm_parent = TC_H_ROOT; 627 tcm->tcm_handle = cl->common.classid; 628 tcm->tcm_info = cl->qdisc->handle; 629 630 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 631 if (nest == NULL) 632 goto nla_put_failure; 633 if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) || 634 nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax)) 635 goto nla_put_failure; 636 return nla_nest_end(skb, nest); 637 638 nla_put_failure: 639 nla_nest_cancel(skb, nest); 640 return -EMSGSIZE; 641 } 642 643 static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, 644 struct gnet_dump *d) 645 { 646 struct qfq_class *cl = (struct qfq_class *)arg; 647 struct tc_qfq_stats xstats; 648 649 memset(&xstats, 0, sizeof(xstats)); 650 651 xstats.weight = cl->agg->class_weight; 652 xstats.lmax = cl->agg->lmax; 653 654 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || 655 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 656 qdisc_qstats_copy(d, cl->qdisc) < 0) 657 return -1; 658 659 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 660 } 661 662 static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) 663 { 664 struct qfq_sched *q = qdisc_priv(sch); 665 struct qfq_class *cl; 666 unsigned int i; 667 668 if (arg->stop) 669 return; 670 671 for (i = 0; i < q->clhash.hashsize; i++) { 672 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 673 if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg)) 674 return; 675 } 676 } 677 } 678 679 static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch, 680 int *qerr) 681 { 682 struct qfq_sched *q = qdisc_priv(sch); 683 struct qfq_class *cl; 684 struct tcf_result res; 685 struct tcf_proto *fl; 686 int result; 687 688 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { 689 pr_debug("qfq_classify: found %d\n", skb->priority); 690 cl = qfq_find_class(sch, skb->priority); 691 if (cl != NULL) 692 return cl; 693 } 694 695 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 696 fl = rcu_dereference_bh(q->filter_list); 697 result = tcf_classify(skb, NULL, fl, &res, false); 698 if (result >= 0) { 699 #ifdef CONFIG_NET_CLS_ACT 700 switch (result) { 701 case TC_ACT_QUEUED: 702 case TC_ACT_STOLEN: 703 case TC_ACT_TRAP: 704 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 705 fallthrough; 706 case TC_ACT_SHOT: 707 return NULL; 708 } 709 #endif 710 cl = (struct qfq_class *)res.class; 711 if (cl == NULL) 712 cl = qfq_find_class(sch, res.classid); 713 return cl; 714 } 715 716 return NULL; 717 } 718 719 /* Generic comparison function, handling wraparound. */ 720 static inline int qfq_gt(u64 a, u64 b) 721 { 722 return (s64)(a - b) > 0; 723 } 724 725 /* Round a precise timestamp to its slotted value. */ 726 static inline u64 qfq_round_down(u64 ts, unsigned int shift) 727 { 728 return ts & ~((1ULL << shift) - 1); 729 } 730 731 /* return the pointer to the group with lowest index in the bitmap */ 732 static inline struct qfq_group *qfq_ffs(struct qfq_sched *q, 733 unsigned long bitmap) 734 { 735 int index = __ffs(bitmap); 736 return &q->groups[index]; 737 } 738 /* Calculate a mask to mimic what would be ffs_from(). */ 739 static inline unsigned long mask_from(unsigned long bitmap, int from) 740 { 741 return bitmap & ~((1UL << from) - 1); 742 } 743 744 /* 745 * The state computation relies on ER=0, IR=1, EB=2, IB=3 746 * First compute eligibility comparing grp->S, q->V, 747 * then check if someone is blocking us and possibly add EB 748 */ 749 static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp) 750 { 751 /* if S > V we are not eligible */ 752 unsigned int state = qfq_gt(grp->S, q->V); 753 unsigned long mask = mask_from(q->bitmaps[ER], grp->index); 754 struct qfq_group *next; 755 756 if (mask) { 757 next = qfq_ffs(q, mask); 758 if (qfq_gt(grp->F, next->F)) 759 state |= EB; 760 } 761 762 return state; 763 } 764 765 766 /* 767 * In principle 768 * q->bitmaps[dst] |= q->bitmaps[src] & mask; 769 * q->bitmaps[src] &= ~mask; 770 * but we should make sure that src != dst 771 */ 772 static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask, 773 int src, int dst) 774 { 775 q->bitmaps[dst] |= q->bitmaps[src] & mask; 776 q->bitmaps[src] &= ~mask; 777 } 778 779 static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F) 780 { 781 unsigned long mask = mask_from(q->bitmaps[ER], index + 1); 782 struct qfq_group *next; 783 784 if (mask) { 785 next = qfq_ffs(q, mask); 786 if (!qfq_gt(next->F, old_F)) 787 return; 788 } 789 790 mask = (1UL << index) - 1; 791 qfq_move_groups(q, mask, EB, ER); 792 qfq_move_groups(q, mask, IB, IR); 793 } 794 795 /* 796 * perhaps 797 * 798 old_V ^= q->V; 799 old_V >>= q->min_slot_shift; 800 if (old_V) { 801 ... 802 } 803 * 804 */ 805 static void qfq_make_eligible(struct qfq_sched *q) 806 { 807 unsigned long vslot = q->V >> q->min_slot_shift; 808 unsigned long old_vslot = q->oldV >> q->min_slot_shift; 809 810 if (vslot != old_vslot) { 811 unsigned long mask; 812 int last_flip_pos = fls(vslot ^ old_vslot); 813 814 if (last_flip_pos > 31) /* higher than the number of groups */ 815 mask = ~0UL; /* make all groups eligible */ 816 else 817 mask = (1UL << last_flip_pos) - 1; 818 819 qfq_move_groups(q, mask, IR, ER); 820 qfq_move_groups(q, mask, IB, EB); 821 } 822 } 823 824 /* 825 * The index of the slot in which the input aggregate agg is to be 826 * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2' 827 * and not a '-1' because the start time of the group may be moved 828 * backward by one slot after the aggregate has been inserted, and 829 * this would cause non-empty slots to be right-shifted by one 830 * position. 831 * 832 * QFQ+ fully satisfies this bound to the slot index if the parameters 833 * of the classes are not changed dynamically, and if QFQ+ never 834 * happens to postpone the service of agg unjustly, i.e., it never 835 * happens that the aggregate becomes backlogged and eligible, or just 836 * eligible, while an aggregate with a higher approximated finish time 837 * is being served. In particular, in this case QFQ+ guarantees that 838 * the timestamps of agg are low enough that the slot index is never 839 * higher than 2. Unfortunately, QFQ+ cannot provide the same 840 * guarantee if it happens to unjustly postpone the service of agg, or 841 * if the parameters of some class are changed. 842 * 843 * As for the first event, i.e., an out-of-order service, the 844 * upper bound to the slot index guaranteed by QFQ+ grows to 845 * 2 + 846 * QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * 847 * (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1. 848 * 849 * The following function deals with this problem by backward-shifting 850 * the timestamps of agg, if needed, so as to guarantee that the slot 851 * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may 852 * cause the service of other aggregates to be postponed, yet the 853 * worst-case guarantees of these aggregates are not violated. In 854 * fact, in case of no out-of-order service, the timestamps of agg 855 * would have been even lower than they are after the backward shift, 856 * because QFQ+ would have guaranteed a maximum value equal to 2 for 857 * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose 858 * service is postponed because of the backward-shift would have 859 * however waited for the service of agg before being served. 860 * 861 * The other event that may cause the slot index to be higher than 2 862 * for agg is a recent change of the parameters of some class. If the 863 * weight of a class is increased or the lmax (max_pkt_size) of the 864 * class is decreased, then a new aggregate with smaller slot size 865 * than the original parent aggregate of the class may happen to be 866 * activated. The activation of this aggregate should be properly 867 * delayed to when the service of the class has finished in the ideal 868 * system tracked by QFQ+. If the activation of the aggregate is not 869 * delayed to this reference time instant, then this aggregate may be 870 * unjustly served before other aggregates waiting for service. This 871 * may cause the above bound to the slot index to be violated for some 872 * of these unlucky aggregates. 873 * 874 * Instead of delaying the activation of the new aggregate, which is 875 * quite complex, the above-discussed capping of the slot index is 876 * used to handle also the consequences of a change of the parameters 877 * of a class. 878 */ 879 static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg, 880 u64 roundedS) 881 { 882 u64 slot = (roundedS - grp->S) >> grp->slot_shift; 883 unsigned int i; /* slot index in the bucket list */ 884 885 if (unlikely(slot > QFQ_MAX_SLOTS - 2)) { 886 u64 deltaS = roundedS - grp->S - 887 ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift); 888 agg->S -= deltaS; 889 agg->F -= deltaS; 890 slot = QFQ_MAX_SLOTS - 2; 891 } 892 893 i = (grp->front + slot) % QFQ_MAX_SLOTS; 894 895 hlist_add_head(&agg->next, &grp->slots[i]); 896 __set_bit(slot, &grp->full_slots); 897 } 898 899 /* Maybe introduce hlist_first_entry?? */ 900 static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp) 901 { 902 return hlist_entry(grp->slots[grp->front].first, 903 struct qfq_aggregate, next); 904 } 905 906 /* 907 * remove the entry from the slot 908 */ 909 static void qfq_front_slot_remove(struct qfq_group *grp) 910 { 911 struct qfq_aggregate *agg = qfq_slot_head(grp); 912 913 BUG_ON(!agg); 914 hlist_del(&agg->next); 915 if (hlist_empty(&grp->slots[grp->front])) 916 __clear_bit(0, &grp->full_slots); 917 } 918 919 /* 920 * Returns the first aggregate in the first non-empty bucket of the 921 * group. As a side effect, adjusts the bucket list so the first 922 * non-empty bucket is at position 0 in full_slots. 923 */ 924 static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp) 925 { 926 unsigned int i; 927 928 pr_debug("qfq slot_scan: grp %u full %#lx\n", 929 grp->index, grp->full_slots); 930 931 if (grp->full_slots == 0) 932 return NULL; 933 934 i = __ffs(grp->full_slots); /* zero based */ 935 if (i > 0) { 936 grp->front = (grp->front + i) % QFQ_MAX_SLOTS; 937 grp->full_slots >>= i; 938 } 939 940 return qfq_slot_head(grp); 941 } 942 943 /* 944 * adjust the bucket list. When the start time of a group decreases, 945 * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to 946 * move the objects. The mask of occupied slots must be shifted 947 * because we use ffs() to find the first non-empty slot. 948 * This covers decreases in the group's start time, but what about 949 * increases of the start time ? 950 * Here too we should make sure that i is less than 32 951 */ 952 static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS) 953 { 954 unsigned int i = (grp->S - roundedS) >> grp->slot_shift; 955 956 grp->full_slots <<= i; 957 grp->front = (grp->front - i) % QFQ_MAX_SLOTS; 958 } 959 960 static void qfq_update_eligible(struct qfq_sched *q) 961 { 962 struct qfq_group *grp; 963 unsigned long ineligible; 964 965 ineligible = q->bitmaps[IR] | q->bitmaps[IB]; 966 if (ineligible) { 967 if (!q->bitmaps[ER]) { 968 grp = qfq_ffs(q, ineligible); 969 if (qfq_gt(grp->S, q->V)) 970 q->V = grp->S; 971 } 972 qfq_make_eligible(q); 973 } 974 } 975 976 /* Dequeue head packet of the head class in the DRR queue of the aggregate. */ 977 static void agg_dequeue(struct qfq_aggregate *agg, 978 struct qfq_class *cl, unsigned int len) 979 { 980 qdisc_dequeue_peeked(cl->qdisc); 981 982 cl->deficit -= (int) len; 983 984 if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */ 985 list_del(&cl->alist); 986 else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) { 987 cl->deficit += agg->lmax; 988 list_move_tail(&cl->alist, &agg->active); 989 } 990 } 991 992 static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg, 993 struct qfq_class **cl, 994 unsigned int *len) 995 { 996 struct sk_buff *skb; 997 998 *cl = list_first_entry(&agg->active, struct qfq_class, alist); 999 skb = (*cl)->qdisc->ops->peek((*cl)->qdisc); 1000 if (skb == NULL) 1001 WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n"); 1002 else 1003 *len = qdisc_pkt_len(skb); 1004 1005 return skb; 1006 } 1007 1008 /* Update F according to the actual service received by the aggregate. */ 1009 static inline void charge_actual_service(struct qfq_aggregate *agg) 1010 { 1011 /* Compute the service received by the aggregate, taking into 1012 * account that, after decreasing the number of classes in 1013 * agg, it may happen that 1014 * agg->initial_budget - agg->budget > agg->bugdetmax 1015 */ 1016 u32 service_received = min(agg->budgetmax, 1017 agg->initial_budget - agg->budget); 1018 1019 agg->F = agg->S + (u64)service_received * agg->inv_w; 1020 } 1021 1022 /* Assign a reasonable start time for a new aggregate in group i. 1023 * Admissible values for \hat(F) are multiples of \sigma_i 1024 * no greater than V+\sigma_i . Larger values mean that 1025 * we had a wraparound so we consider the timestamp to be stale. 1026 * 1027 * If F is not stale and F >= V then we set S = F. 1028 * Otherwise we should assign S = V, but this may violate 1029 * the ordering in EB (see [2]). So, if we have groups in ER, 1030 * set S to the F_j of the first group j which would be blocking us. 1031 * We are guaranteed not to move S backward because 1032 * otherwise our group i would still be blocked. 1033 */ 1034 static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg) 1035 { 1036 unsigned long mask; 1037 u64 limit, roundedF; 1038 int slot_shift = agg->grp->slot_shift; 1039 1040 roundedF = qfq_round_down(agg->F, slot_shift); 1041 limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift); 1042 1043 if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) { 1044 /* timestamp was stale */ 1045 mask = mask_from(q->bitmaps[ER], agg->grp->index); 1046 if (mask) { 1047 struct qfq_group *next = qfq_ffs(q, mask); 1048 if (qfq_gt(roundedF, next->F)) { 1049 if (qfq_gt(limit, next->F)) 1050 agg->S = next->F; 1051 else /* preserve timestamp correctness */ 1052 agg->S = limit; 1053 return; 1054 } 1055 } 1056 agg->S = q->V; 1057 } else /* timestamp is not stale */ 1058 agg->S = agg->F; 1059 } 1060 1061 /* Update the timestamps of agg before scheduling/rescheduling it for 1062 * service. In particular, assign to agg->F its maximum possible 1063 * value, i.e., the virtual finish time with which the aggregate 1064 * should be labeled if it used all its budget once in service. 1065 */ 1066 static inline void 1067 qfq_update_agg_ts(struct qfq_sched *q, 1068 struct qfq_aggregate *agg, enum update_reason reason) 1069 { 1070 if (reason != requeue) 1071 qfq_update_start(q, agg); 1072 else /* just charge agg for the service received */ 1073 agg->S = agg->F; 1074 1075 agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w; 1076 } 1077 1078 static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg); 1079 1080 static struct sk_buff *qfq_dequeue(struct Qdisc *sch) 1081 { 1082 struct qfq_sched *q = qdisc_priv(sch); 1083 struct qfq_aggregate *in_serv_agg = q->in_serv_agg; 1084 struct qfq_class *cl; 1085 struct sk_buff *skb = NULL; 1086 /* next-packet len, 0 means no more active classes in in-service agg */ 1087 unsigned int len = 0; 1088 1089 if (in_serv_agg == NULL) 1090 return NULL; 1091 1092 if (!list_empty(&in_serv_agg->active)) 1093 skb = qfq_peek_skb(in_serv_agg, &cl, &len); 1094 1095 /* 1096 * If there are no active classes in the in-service aggregate, 1097 * or if the aggregate has not enough budget to serve its next 1098 * class, then choose the next aggregate to serve. 1099 */ 1100 if (len == 0 || in_serv_agg->budget < len) { 1101 charge_actual_service(in_serv_agg); 1102 1103 /* recharge the budget of the aggregate */ 1104 in_serv_agg->initial_budget = in_serv_agg->budget = 1105 in_serv_agg->budgetmax; 1106 1107 if (!list_empty(&in_serv_agg->active)) { 1108 /* 1109 * Still active: reschedule for 1110 * service. Possible optimization: if no other 1111 * aggregate is active, then there is no point 1112 * in rescheduling this aggregate, and we can 1113 * just keep it as the in-service one. This 1114 * should be however a corner case, and to 1115 * handle it, we would need to maintain an 1116 * extra num_active_aggs field. 1117 */ 1118 qfq_update_agg_ts(q, in_serv_agg, requeue); 1119 qfq_schedule_agg(q, in_serv_agg); 1120 } else if (sch->q.qlen == 0) { /* no aggregate to serve */ 1121 q->in_serv_agg = NULL; 1122 return NULL; 1123 } 1124 1125 /* 1126 * If we get here, there are other aggregates queued: 1127 * choose the new aggregate to serve. 1128 */ 1129 in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q); 1130 skb = qfq_peek_skb(in_serv_agg, &cl, &len); 1131 } 1132 if (!skb) 1133 return NULL; 1134 1135 qdisc_qstats_backlog_dec(sch, skb); 1136 sch->q.qlen--; 1137 qdisc_bstats_update(sch, skb); 1138 1139 agg_dequeue(in_serv_agg, cl, len); 1140 /* If lmax is lowered, through qfq_change_class, for a class 1141 * owning pending packets with larger size than the new value 1142 * of lmax, then the following condition may hold. 1143 */ 1144 if (unlikely(in_serv_agg->budget < len)) 1145 in_serv_agg->budget = 0; 1146 else 1147 in_serv_agg->budget -= len; 1148 1149 q->V += (u64)len * q->iwsum; 1150 pr_debug("qfq dequeue: len %u F %lld now %lld\n", 1151 len, (unsigned long long) in_serv_agg->F, 1152 (unsigned long long) q->V); 1153 1154 return skb; 1155 } 1156 1157 static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) 1158 { 1159 struct qfq_group *grp; 1160 struct qfq_aggregate *agg, *new_front_agg; 1161 u64 old_F; 1162 1163 qfq_update_eligible(q); 1164 q->oldV = q->V; 1165 1166 if (!q->bitmaps[ER]) 1167 return NULL; 1168 1169 grp = qfq_ffs(q, q->bitmaps[ER]); 1170 old_F = grp->F; 1171 1172 agg = qfq_slot_head(grp); 1173 1174 /* agg starts to be served, remove it from schedule */ 1175 qfq_front_slot_remove(grp); 1176 1177 new_front_agg = qfq_slot_scan(grp); 1178 1179 if (new_front_agg == NULL) /* group is now inactive, remove from ER */ 1180 __clear_bit(grp->index, &q->bitmaps[ER]); 1181 else { 1182 u64 roundedS = qfq_round_down(new_front_agg->S, 1183 grp->slot_shift); 1184 unsigned int s; 1185 1186 if (grp->S == roundedS) 1187 return agg; 1188 grp->S = roundedS; 1189 grp->F = roundedS + (2ULL << grp->slot_shift); 1190 __clear_bit(grp->index, &q->bitmaps[ER]); 1191 s = qfq_calc_state(q, grp); 1192 __set_bit(grp->index, &q->bitmaps[s]); 1193 } 1194 1195 qfq_unblock_groups(q, grp->index, old_F); 1196 1197 return agg; 1198 } 1199 1200 static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, 1201 struct sk_buff **to_free) 1202 { 1203 unsigned int len = qdisc_pkt_len(skb), gso_segs; 1204 struct qfq_sched *q = qdisc_priv(sch); 1205 struct qfq_class *cl; 1206 struct qfq_aggregate *agg; 1207 int err = 0; 1208 bool first; 1209 1210 cl = qfq_classify(skb, sch, &err); 1211 if (cl == NULL) { 1212 if (err & __NET_XMIT_BYPASS) 1213 qdisc_qstats_drop(sch); 1214 __qdisc_drop(skb, to_free); 1215 return err; 1216 } 1217 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); 1218 1219 if (unlikely(cl->agg->lmax < len)) { 1220 pr_debug("qfq: increasing maxpkt from %u to %u for class %u", 1221 cl->agg->lmax, len, cl->common.classid); 1222 err = qfq_change_agg(sch, cl, cl->agg->class_weight, len); 1223 if (err) { 1224 cl->qstats.drops++; 1225 return qdisc_drop(skb, sch, to_free); 1226 } 1227 } 1228 1229 gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 1230 first = !cl->qdisc->q.qlen; 1231 err = qdisc_enqueue(skb, cl->qdisc, to_free); 1232 if (unlikely(err != NET_XMIT_SUCCESS)) { 1233 pr_debug("qfq_enqueue: enqueue failed %d\n", err); 1234 if (net_xmit_drop_count(err)) { 1235 cl->qstats.drops++; 1236 qdisc_qstats_drop(sch); 1237 } 1238 return err; 1239 } 1240 1241 _bstats_update(&cl->bstats, len, gso_segs); 1242 sch->qstats.backlog += len; 1243 ++sch->q.qlen; 1244 1245 agg = cl->agg; 1246 /* if the queue was not empty, then done here */ 1247 if (!first) { 1248 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && 1249 list_first_entry(&agg->active, struct qfq_class, alist) 1250 == cl && cl->deficit < len) 1251 list_move_tail(&cl->alist, &agg->active); 1252 1253 return err; 1254 } 1255 1256 /* schedule class for service within the aggregate */ 1257 cl->deficit = agg->lmax; 1258 list_add_tail(&cl->alist, &agg->active); 1259 1260 if (list_first_entry(&agg->active, struct qfq_class, alist) != cl || 1261 q->in_serv_agg == agg) 1262 return err; /* non-empty or in service, nothing else to do */ 1263 1264 qfq_activate_agg(q, agg, enqueue); 1265 1266 return err; 1267 } 1268 1269 /* 1270 * Schedule aggregate according to its timestamps. 1271 */ 1272 static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg) 1273 { 1274 struct qfq_group *grp = agg->grp; 1275 u64 roundedS; 1276 int s; 1277 1278 roundedS = qfq_round_down(agg->S, grp->slot_shift); 1279 1280 /* 1281 * Insert agg in the correct bucket. 1282 * If agg->S >= grp->S we don't need to adjust the 1283 * bucket list and simply go to the insertion phase. 1284 * Otherwise grp->S is decreasing, we must make room 1285 * in the bucket list, and also recompute the group state. 1286 * Finally, if there were no flows in this group and nobody 1287 * was in ER make sure to adjust V. 1288 */ 1289 if (grp->full_slots) { 1290 if (!qfq_gt(grp->S, agg->S)) 1291 goto skip_update; 1292 1293 /* create a slot for this agg->S */ 1294 qfq_slot_rotate(grp, roundedS); 1295 /* group was surely ineligible, remove */ 1296 __clear_bit(grp->index, &q->bitmaps[IR]); 1297 __clear_bit(grp->index, &q->bitmaps[IB]); 1298 } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) && 1299 q->in_serv_agg == NULL) 1300 q->V = roundedS; 1301 1302 grp->S = roundedS; 1303 grp->F = roundedS + (2ULL << grp->slot_shift); 1304 s = qfq_calc_state(q, grp); 1305 __set_bit(grp->index, &q->bitmaps[s]); 1306 1307 pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n", 1308 s, q->bitmaps[s], 1309 (unsigned long long) agg->S, 1310 (unsigned long long) agg->F, 1311 (unsigned long long) q->V); 1312 1313 skip_update: 1314 qfq_slot_insert(grp, agg, roundedS); 1315 } 1316 1317 1318 /* Update agg ts and schedule agg for service */ 1319 static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, 1320 enum update_reason reason) 1321 { 1322 agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */ 1323 1324 qfq_update_agg_ts(q, agg, reason); 1325 if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */ 1326 q->in_serv_agg = agg; /* start serving this aggregate */ 1327 /* update V: to be in service, agg must be eligible */ 1328 q->oldV = q->V = agg->S; 1329 } else if (agg != q->in_serv_agg) 1330 qfq_schedule_agg(q, agg); 1331 } 1332 1333 static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, 1334 struct qfq_aggregate *agg) 1335 { 1336 unsigned int i, offset; 1337 u64 roundedS; 1338 1339 roundedS = qfq_round_down(agg->S, grp->slot_shift); 1340 offset = (roundedS - grp->S) >> grp->slot_shift; 1341 1342 i = (grp->front + offset) % QFQ_MAX_SLOTS; 1343 1344 hlist_del(&agg->next); 1345 if (hlist_empty(&grp->slots[i])) 1346 __clear_bit(offset, &grp->full_slots); 1347 } 1348 1349 /* 1350 * Called to forcibly deschedule an aggregate. If the aggregate is 1351 * not in the front bucket, or if the latter has other aggregates in 1352 * the front bucket, we can simply remove the aggregate with no other 1353 * side effects. 1354 * Otherwise we must propagate the event up. 1355 */ 1356 static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg) 1357 { 1358 struct qfq_group *grp = agg->grp; 1359 unsigned long mask; 1360 u64 roundedS; 1361 int s; 1362 1363 if (agg == q->in_serv_agg) { 1364 charge_actual_service(agg); 1365 q->in_serv_agg = qfq_choose_next_agg(q); 1366 return; 1367 } 1368 1369 agg->F = agg->S; 1370 qfq_slot_remove(q, grp, agg); 1371 1372 if (!grp->full_slots) { 1373 __clear_bit(grp->index, &q->bitmaps[IR]); 1374 __clear_bit(grp->index, &q->bitmaps[EB]); 1375 __clear_bit(grp->index, &q->bitmaps[IB]); 1376 1377 if (test_bit(grp->index, &q->bitmaps[ER]) && 1378 !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) { 1379 mask = q->bitmaps[ER] & ((1UL << grp->index) - 1); 1380 if (mask) 1381 mask = ~((1UL << __fls(mask)) - 1); 1382 else 1383 mask = ~0UL; 1384 qfq_move_groups(q, mask, EB, ER); 1385 qfq_move_groups(q, mask, IB, IR); 1386 } 1387 __clear_bit(grp->index, &q->bitmaps[ER]); 1388 } else if (hlist_empty(&grp->slots[grp->front])) { 1389 agg = qfq_slot_scan(grp); 1390 roundedS = qfq_round_down(agg->S, grp->slot_shift); 1391 if (grp->S != roundedS) { 1392 __clear_bit(grp->index, &q->bitmaps[ER]); 1393 __clear_bit(grp->index, &q->bitmaps[IR]); 1394 __clear_bit(grp->index, &q->bitmaps[EB]); 1395 __clear_bit(grp->index, &q->bitmaps[IB]); 1396 grp->S = roundedS; 1397 grp->F = roundedS + (2ULL << grp->slot_shift); 1398 s = qfq_calc_state(q, grp); 1399 __set_bit(grp->index, &q->bitmaps[s]); 1400 } 1401 } 1402 } 1403 1404 static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) 1405 { 1406 struct qfq_sched *q = qdisc_priv(sch); 1407 struct qfq_class *cl = (struct qfq_class *)arg; 1408 1409 qfq_deactivate_class(q, cl); 1410 } 1411 1412 static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt, 1413 struct netlink_ext_ack *extack) 1414 { 1415 struct qfq_sched *q = qdisc_priv(sch); 1416 struct qfq_group *grp; 1417 int i, j, err; 1418 u32 max_cl_shift, maxbudg_shift, max_classes; 1419 1420 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); 1421 if (err) 1422 return err; 1423 1424 err = qdisc_class_hash_init(&q->clhash); 1425 if (err < 0) 1426 return err; 1427 1428 max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1, 1429 QFQ_MAX_AGG_CLASSES); 1430 /* max_cl_shift = floor(log_2(max_classes)) */ 1431 max_cl_shift = __fls(max_classes); 1432 q->max_agg_classes = 1<<max_cl_shift; 1433 1434 /* maxbudg_shift = log2(max_len * max_classes_per_agg) */ 1435 maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift; 1436 q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX; 1437 1438 for (i = 0; i <= QFQ_MAX_INDEX; i++) { 1439 grp = &q->groups[i]; 1440 grp->index = i; 1441 grp->slot_shift = q->min_slot_shift + i; 1442 for (j = 0; j < QFQ_MAX_SLOTS; j++) 1443 INIT_HLIST_HEAD(&grp->slots[j]); 1444 } 1445 1446 INIT_HLIST_HEAD(&q->nonfull_aggs); 1447 1448 return 0; 1449 } 1450 1451 static void qfq_reset_qdisc(struct Qdisc *sch) 1452 { 1453 struct qfq_sched *q = qdisc_priv(sch); 1454 struct qfq_class *cl; 1455 unsigned int i; 1456 1457 for (i = 0; i < q->clhash.hashsize; i++) { 1458 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 1459 if (cl->qdisc->q.qlen > 0) 1460 qfq_deactivate_class(q, cl); 1461 1462 qdisc_reset(cl->qdisc); 1463 } 1464 } 1465 } 1466 1467 static void qfq_destroy_qdisc(struct Qdisc *sch) 1468 { 1469 struct qfq_sched *q = qdisc_priv(sch); 1470 struct qfq_class *cl; 1471 struct hlist_node *next; 1472 unsigned int i; 1473 1474 tcf_block_put(q->block); 1475 1476 for (i = 0; i < q->clhash.hashsize; i++) { 1477 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], 1478 common.hnode) { 1479 qfq_destroy_class(sch, cl); 1480 } 1481 } 1482 qdisc_class_hash_destroy(&q->clhash); 1483 } 1484 1485 static const struct Qdisc_class_ops qfq_class_ops = { 1486 .change = qfq_change_class, 1487 .delete = qfq_delete_class, 1488 .find = qfq_search_class, 1489 .tcf_block = qfq_tcf_block, 1490 .bind_tcf = qfq_bind_tcf, 1491 .unbind_tcf = qfq_unbind_tcf, 1492 .graft = qfq_graft_class, 1493 .leaf = qfq_class_leaf, 1494 .qlen_notify = qfq_qlen_notify, 1495 .dump = qfq_dump_class, 1496 .dump_stats = qfq_dump_class_stats, 1497 .walk = qfq_walk, 1498 }; 1499 1500 static struct Qdisc_ops qfq_qdisc_ops __read_mostly = { 1501 .cl_ops = &qfq_class_ops, 1502 .id = "qfq", 1503 .priv_size = sizeof(struct qfq_sched), 1504 .enqueue = qfq_enqueue, 1505 .dequeue = qfq_dequeue, 1506 .peek = qdisc_peek_dequeued, 1507 .init = qfq_init_qdisc, 1508 .reset = qfq_reset_qdisc, 1509 .destroy = qfq_destroy_qdisc, 1510 .owner = THIS_MODULE, 1511 }; 1512 1513 static int __init qfq_init(void) 1514 { 1515 return register_qdisc(&qfq_qdisc_ops); 1516 } 1517 1518 static void __exit qfq_exit(void) 1519 { 1520 unregister_qdisc(&qfq_qdisc_ops); 1521 } 1522 1523 module_init(qfq_init); 1524 module_exit(qfq_exit); 1525 MODULE_LICENSE("GPL"); 1526