Lines Matching refs:cl

223 	struct htb_class *cl;  in htb_classify()  local
234 cl = htb_find(skb->priority, sch); in htb_classify()
235 if (cl) { in htb_classify()
236 if (cl->level == 0) in htb_classify()
237 return cl; in htb_classify()
239 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify()
257 cl = (void *)res.class; in htb_classify()
258 if (!cl) { in htb_classify()
261 cl = htb_find(res.classid, sch); in htb_classify()
262 if (!cl) in htb_classify()
265 if (!cl->level) in htb_classify()
266 return cl; /* we hit leaf; return it */ in htb_classify()
269 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify()
272 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify()
273 if (!cl || cl->level) in htb_classify()
275 return cl; in htb_classify()
288 struct htb_class *cl, int prio) in htb_add_to_id_tree() argument
297 if (cl->common.classid > c->common.classid) in htb_add_to_id_tree()
302 rb_link_node(&cl->node[prio], parent, p); in htb_add_to_id_tree()
303 rb_insert_color(&cl->node[prio], root); in htb_add_to_id_tree()
317 struct htb_class *cl, s64 delay) in htb_add_to_wait_tree() argument
319 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; in htb_add_to_wait_tree()
321 cl->pq_key = q->now + delay; in htb_add_to_wait_tree()
322 if (cl->pq_key == q->now) in htb_add_to_wait_tree()
323 cl->pq_key++; in htb_add_to_wait_tree()
326 if (q->near_ev_cache[cl->level] > cl->pq_key) in htb_add_to_wait_tree()
327 q->near_ev_cache[cl->level] = cl->pq_key; in htb_add_to_wait_tree()
333 if (cl->pq_key >= c->pq_key) in htb_add_to_wait_tree()
338 rb_link_node(&cl->pq_node, parent, p); in htb_add_to_wait_tree()
339 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_add_to_wait_tree()
364 struct htb_class *cl, int mask) in htb_add_class_to_row() argument
366 q->row_mask[cl->level] |= mask; in htb_add_class_to_row()
370 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio); in htb_add_class_to_row()
396 struct htb_class *cl, int mask) in htb_remove_class_from_row() argument
399 struct htb_level *hlevel = &q->hlevel[cl->level]; in htb_remove_class_from_row()
406 if (hprio->ptr == cl->node + prio) in htb_remove_class_from_row()
409 htb_safe_rb_erase(cl->node + prio, &hprio->row); in htb_remove_class_from_row()
413 q->row_mask[cl->level] &= ~m; in htb_remove_class_from_row()
425 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) in htb_activate_prios() argument
427 struct htb_class *p = cl->parent; in htb_activate_prios()
428 long m, mask = cl->prio_activity; in htb_activate_prios()
430 while (cl->cmode == HTB_MAY_BORROW && p && mask) { in htb_activate_prios()
445 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio); in htb_activate_prios()
448 cl = p; in htb_activate_prios()
449 p = cl->parent; in htb_activate_prios()
452 if (cl->cmode == HTB_CAN_SEND && mask) in htb_activate_prios()
453 htb_add_class_to_row(q, cl, mask); in htb_activate_prios()
465 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) in htb_deactivate_prios() argument
467 struct htb_class *p = cl->parent; in htb_deactivate_prios()
468 long m, mask = cl->prio_activity; in htb_deactivate_prios()
470 while (cl->cmode == HTB_MAY_BORROW && p && mask) { in htb_deactivate_prios()
477 if (p->inner.clprio[prio].ptr == cl->node + prio) { in htb_deactivate_prios()
482 p->inner.clprio[prio].last_ptr_id = cl->common.classid; in htb_deactivate_prios()
486 htb_safe_rb_erase(cl->node + prio, in htb_deactivate_prios()
494 cl = p; in htb_deactivate_prios()
495 p = cl->parent; in htb_deactivate_prios()
498 if (cl->cmode == HTB_CAN_SEND && mask) in htb_deactivate_prios()
499 htb_remove_class_from_row(q, cl, mask); in htb_deactivate_prios()
502 static inline s64 htb_lowater(const struct htb_class *cl) in htb_lowater() argument
505 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; in htb_lowater()
509 static inline s64 htb_hiwater(const struct htb_class *cl) in htb_hiwater() argument
512 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; in htb_hiwater()
532 htb_class_mode(struct htb_class *cl, s64 *diff) in htb_class_mode() argument
536 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { in htb_class_mode()
541 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl)) in htb_class_mode()
561 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) in htb_change_class_mode() argument
563 enum htb_cmode new_mode = htb_class_mode(cl, diff); in htb_change_class_mode()
565 if (new_mode == cl->cmode) in htb_change_class_mode()
569 cl->overlimits++; in htb_change_class_mode()
573 if (cl->prio_activity) { /* not necessary: speed optimization */ in htb_change_class_mode()
574 if (cl->cmode != HTB_CANT_SEND) in htb_change_class_mode()
575 htb_deactivate_prios(q, cl); in htb_change_class_mode()
576 cl->cmode = new_mode; in htb_change_class_mode()
578 htb_activate_prios(q, cl); in htb_change_class_mode()
580 cl->cmode = new_mode; in htb_change_class_mode()
592 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) in htb_activate() argument
594 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); in htb_activate()
596 if (!cl->prio_activity) { in htb_activate()
597 cl->prio_activity = 1 << cl->prio; in htb_activate()
598 htb_activate_prios(q, cl); in htb_activate()
610 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) in htb_deactivate() argument
612 WARN_ON(!cl->prio_activity); in htb_deactivate()
614 htb_deactivate_prios(q, cl); in htb_deactivate()
615 cl->prio_activity = 0; in htb_deactivate()
624 struct htb_class *cl = htb_classify(skb, sch, &ret); in htb_enqueue() local
626 if (cl == HTB_DIRECT) { in htb_enqueue()
635 } else if (!cl) { in htb_enqueue()
641 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, in htb_enqueue()
645 cl->drops++; in htb_enqueue()
649 htb_activate(q, cl); in htb_enqueue()
657 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff) in htb_accnt_tokens() argument
659 s64 toks = diff + cl->tokens; in htb_accnt_tokens()
661 if (toks > cl->buffer) in htb_accnt_tokens()
662 toks = cl->buffer; in htb_accnt_tokens()
663 toks -= (s64) psched_l2t_ns(&cl->rate, bytes); in htb_accnt_tokens()
664 if (toks <= -cl->mbuffer) in htb_accnt_tokens()
665 toks = 1 - cl->mbuffer; in htb_accnt_tokens()
667 cl->tokens = toks; in htb_accnt_tokens()
670 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff) in htb_accnt_ctokens() argument
672 s64 toks = diff + cl->ctokens; in htb_accnt_ctokens()
674 if (toks > cl->cbuffer) in htb_accnt_ctokens()
675 toks = cl->cbuffer; in htb_accnt_ctokens()
676 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes); in htb_accnt_ctokens()
677 if (toks <= -cl->mbuffer) in htb_accnt_ctokens()
678 toks = 1 - cl->mbuffer; in htb_accnt_ctokens()
680 cl->ctokens = toks; in htb_accnt_ctokens()
698 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, in htb_charge_class() argument
705 while (cl) { in htb_charge_class()
706 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_charge_class()
707 if (cl->level >= level) { in htb_charge_class()
708 if (cl->level == level) in htb_charge_class()
709 cl->xstats.lends++; in htb_charge_class()
710 htb_accnt_tokens(cl, bytes, diff); in htb_charge_class()
712 cl->xstats.borrows++; in htb_charge_class()
713 cl->tokens += diff; /* we moved t_c; update tokens */ in htb_charge_class()
715 htb_accnt_ctokens(cl, bytes, diff); in htb_charge_class()
716 cl->t_c = q->now; in htb_charge_class()
718 old_mode = cl->cmode; in htb_charge_class()
720 htb_change_class_mode(q, cl, &diff); in htb_charge_class()
721 if (old_mode != cl->cmode) { in htb_charge_class()
723 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_charge_class()
724 if (cl->cmode != HTB_CAN_SEND) in htb_charge_class()
725 htb_add_to_wait_tree(q, cl, diff); in htb_charge_class()
729 if (cl->level) in htb_charge_class()
730 bstats_update(&cl->bstats, skb); in htb_charge_class()
732 cl = cl->parent; in htb_charge_class()
757 struct htb_class *cl; in htb_do_events() local
764 cl = rb_entry(p, struct htb_class, pq_node); in htb_do_events()
765 if (cl->pq_key > q->now) in htb_do_events()
766 return cl->pq_key; in htb_do_events()
769 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_do_events()
770 htb_change_class_mode(q, cl, &diff); in htb_do_events()
771 if (cl->cmode != HTB_CAN_SEND) in htb_do_events()
772 htb_add_to_wait_tree(q, cl, diff); in htb_do_events()
792 struct htb_class *cl = in htb_id_find_next_upper() local
795 if (id > cl->common.classid) { in htb_id_find_next_upper()
797 } else if (id < cl->common.classid) { in htb_id_find_next_upper()
852 struct htb_class *cl; in htb_lookup_leaf() local
855 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); in htb_lookup_leaf()
856 if (!cl->level) in htb_lookup_leaf()
857 return cl; in htb_lookup_leaf()
858 clp = &cl->inner.clprio[prio]; in htb_lookup_leaf()
875 struct htb_class *cl, *start; in htb_dequeue_tree() local
880 start = cl = htb_lookup_leaf(hprio, prio); in htb_dequeue_tree()
884 if (unlikely(!cl)) in htb_dequeue_tree()
892 if (unlikely(cl->leaf.q->q.qlen == 0)) { in htb_dequeue_tree()
894 htb_deactivate(q, cl); in htb_dequeue_tree()
902 if (cl == start) /* fix start if we just deleted it */ in htb_dequeue_tree()
904 cl = next; in htb_dequeue_tree()
908 skb = cl->leaf.q->dequeue(cl->leaf.q); in htb_dequeue_tree()
912 qdisc_warn_nonwc("htb", cl->leaf.q); in htb_dequeue_tree()
913 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr: in htb_dequeue_tree()
915 cl = htb_lookup_leaf(hprio, prio); in htb_dequeue_tree()
917 } while (cl != start); in htb_dequeue_tree()
920 bstats_update(&cl->bstats, skb); in htb_dequeue_tree()
921 cl->leaf.deficit[level] -= qdisc_pkt_len(skb); in htb_dequeue_tree()
922 if (cl->leaf.deficit[level] < 0) { in htb_dequeue_tree()
923 cl->leaf.deficit[level] += cl->quantum; in htb_dequeue_tree()
924 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr : in htb_dequeue_tree()
930 if (!cl->leaf.q->q.qlen) in htb_dequeue_tree()
931 htb_deactivate(q, cl); in htb_dequeue_tree()
932 htb_charge_class(q, cl, level, skb); in htb_dequeue_tree()
1000 struct htb_class *cl; in htb_reset() local
1004 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_reset()
1005 if (cl->level) in htb_reset()
1006 memset(&cl->inner, 0, sizeof(cl->inner)); in htb_reset()
1008 if (cl->leaf.q && !q->offload) in htb_reset()
1009 qdisc_reset(cl->leaf.q); in htb_reset()
1011 cl->prio_activity = 0; in htb_reset()
1012 cl->cmode = HTB_CAN_SEND; in htb_reset()
1243 struct htb_class *cl = (struct htb_class *)arg; in htb_dump_class() local
1251 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; in htb_dump_class()
1252 tcm->tcm_handle = cl->common.classid; in htb_dump_class()
1253 if (!cl->level && cl->leaf.q) in htb_dump_class()
1254 tcm->tcm_info = cl->leaf.q->handle; in htb_dump_class()
1262 psched_ratecfg_getrate(&opt.rate, &cl->rate); in htb_dump_class()
1263 opt.buffer = PSCHED_NS2TICKS(cl->buffer); in htb_dump_class()
1264 psched_ratecfg_getrate(&opt.ceil, &cl->ceil); in htb_dump_class()
1265 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer); in htb_dump_class()
1266 opt.quantum = cl->quantum; in htb_dump_class()
1267 opt.prio = cl->prio; in htb_dump_class()
1268 opt.level = cl->level; in htb_dump_class()
1273 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class()
1274 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps, in htb_dump_class()
1277 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class()
1278 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps, in htb_dump_class()
1290 struct htb_class *cl) in htb_offload_aggregate_stats() argument
1296 gnet_stats_basic_sync_init(&cl->bstats); in htb_offload_aggregate_stats()
1302 while (p && p->level < cl->level) in htb_offload_aggregate_stats()
1305 if (p != cl) in htb_offload_aggregate_stats()
1316 _bstats_update(&cl->bstats, bytes, packets); in htb_offload_aggregate_stats()
1322 struct htb_class *cl = (struct htb_class *)arg; in htb_dump_class_stats() local
1325 .drops = cl->drops, in htb_dump_class_stats()
1326 .overlimits = cl->overlimits, in htb_dump_class_stats()
1330 if (!cl->level && cl->leaf.q) in htb_dump_class_stats()
1331 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog); in htb_dump_class_stats()
1333 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), in htb_dump_class_stats()
1335 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), in htb_dump_class_stats()
1339 if (!cl->level) { in htb_dump_class_stats()
1340 if (cl->leaf.q) in htb_dump_class_stats()
1341 cl->bstats = cl->leaf.q->bstats; in htb_dump_class_stats()
1343 gnet_stats_basic_sync_init(&cl->bstats); in htb_dump_class_stats()
1344 _bstats_update(&cl->bstats, in htb_dump_class_stats()
1345 u64_stats_read(&cl->bstats_bias.bytes), in htb_dump_class_stats()
1346 u64_stats_read(&cl->bstats_bias.packets)); in htb_dump_class_stats()
1348 htb_offload_aggregate_stats(q, cl); in htb_dump_class_stats()
1352 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || in htb_dump_class_stats()
1353 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || in htb_dump_class_stats()
1357 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); in htb_dump_class_stats()
1398 static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl) in htb_offload_get_queue() argument
1402 queue = cl->leaf.offload_queue; in htb_offload_get_queue()
1403 if (!(cl->leaf.q->flags & TCQ_F_BUILTIN)) in htb_offload_get_queue()
1404 WARN_ON(cl->leaf.q->dev_queue != queue); in htb_offload_get_queue()
1445 struct htb_class *cl = (struct htb_class *)arg; in htb_graft() local
1449 if (cl->level) in htb_graft()
1453 dev_queue = htb_offload_get_queue(cl); in htb_graft()
1457 cl->common.classid, extack); in htb_graft()
1468 *old = qdisc_replace(sch, new, &cl->leaf.q); in htb_graft()
1480 struct htb_class *cl = (struct htb_class *)arg; in htb_leaf() local
1481 return !cl->level ? cl->leaf.q : NULL; in htb_leaf()
1486 struct htb_class *cl = (struct htb_class *)arg; in htb_qlen_notify() local
1488 htb_deactivate(qdisc_priv(sch), cl); in htb_qlen_notify()
1491 static inline int htb_parent_last_child(struct htb_class *cl) in htb_parent_last_child() argument
1493 if (!cl->parent) in htb_parent_last_child()
1496 if (cl->parent->children > 1) in htb_parent_last_child()
1502 static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl, in htb_parent_to_leaf() argument
1506 struct htb_class *parent = cl->parent; in htb_parent_to_leaf()
1508 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity); in htb_parent_to_leaf()
1522 parent->leaf.offload_queue = cl->leaf.offload_queue; in htb_parent_to_leaf()
1538 static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl, in htb_destroy_class_offload() argument
1544 struct Qdisc *q = cl->leaf.q; in htb_destroy_class_offload()
1548 if (cl->level) in htb_destroy_class_offload()
1552 dev_queue = htb_offload_get_queue(cl); in htb_destroy_class_offload()
1565 if (cl->parent) { in htb_destroy_class_offload()
1566 _bstats_update(&cl->parent->bstats_bias, in htb_destroy_class_offload()
1575 .classid = cl->common.classid, in htb_destroy_class_offload()
1590 if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) { in htb_destroy_class_offload()
1595 htb_offload_move_qdisc(sch, moved_cl, cl, destroying); in htb_destroy_class_offload()
1601 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) in htb_destroy_class() argument
1603 if (!cl->level) { in htb_destroy_class()
1604 WARN_ON(!cl->leaf.q); in htb_destroy_class()
1605 qdisc_put(cl->leaf.q); in htb_destroy_class()
1607 gen_kill_estimator(&cl->rate_est); in htb_destroy_class()
1608 tcf_block_put(cl->block); in htb_destroy_class()
1609 kfree(cl); in htb_destroy_class()
1619 struct htb_class *cl; in htb_destroy() local
1632 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_destroy()
1633 tcf_block_put(cl->block); in htb_destroy()
1634 cl->block = NULL; in htb_destroy()
1642 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], in htb_destroy()
1647 htb_destroy_class(sch, cl); in htb_destroy()
1653 if (cl->level) in htb_destroy()
1658 last_child = htb_parent_last_child(cl); in htb_destroy()
1659 htb_destroy_class_offload(sch, cl, last_child, in htb_destroy()
1662 &cl->common); in htb_destroy()
1663 if (cl->parent) in htb_destroy()
1664 cl->parent->children--; in htb_destroy()
1666 htb_parent_to_leaf(sch, cl, NULL); in htb_destroy()
1667 htb_destroy_class(sch, cl); in htb_destroy()
1694 struct htb_class *cl = (struct htb_class *)arg; in htb_delete() local
1703 if (cl->children || qdisc_class_in_use(&cl->common)) { in htb_delete()
1708 if (!cl->level && htb_parent_last_child(cl)) in htb_delete()
1712 err = htb_destroy_class_offload(sch, cl, last_child, false, in htb_delete()
1722 dev_queue = htb_offload_get_queue(cl); in htb_delete()
1725 cl->parent->common.classid, in htb_delete()
1733 if (!cl->level) in htb_delete()
1734 qdisc_purge_queue(cl->leaf.q); in htb_delete()
1737 qdisc_class_hash_remove(&q->clhash, &cl->common); in htb_delete()
1738 if (cl->parent) in htb_delete()
1739 cl->parent->children--; in htb_delete()
1741 if (cl->prio_activity) in htb_delete()
1742 htb_deactivate(q, cl); in htb_delete()
1744 if (cl->cmode != HTB_CAN_SEND) in htb_delete()
1745 htb_safe_rb_erase(&cl->pq_node, in htb_delete()
1746 &q->hlevel[cl->level].wait_pq); in htb_delete()
1749 htb_parent_to_leaf(sch, cl, new_q); in htb_delete()
1753 htb_destroy_class(sch, cl); in htb_delete()
1763 struct htb_class *cl = (struct htb_class *)*arg, *parent; in htb_change_class() local
1816 if (!cl) { /* new class */ in htb_change_class()
1846 cl = kzalloc(sizeof(*cl), GFP_KERNEL); in htb_change_class()
1847 if (!cl) in htb_change_class()
1850 gnet_stats_basic_sync_init(&cl->bstats); in htb_change_class()
1851 gnet_stats_basic_sync_init(&cl->bstats_bias); in htb_change_class()
1853 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); in htb_change_class()
1855 kfree(cl); in htb_change_class()
1859 err = gen_new_estimator(&cl->bstats, NULL, in htb_change_class()
1860 &cl->rate_est, in htb_change_class()
1868 cl->children = 0; in htb_change_class()
1869 RB_CLEAR_NODE(&cl->pq_node); in htb_change_class()
1872 RB_CLEAR_NODE(&cl->node[prio]); in htb_change_class()
1874 cl->common.classid = classid; in htb_change_class()
1891 .classid = cl->common.classid, in htb_change_class()
1914 .classid = cl->common.classid, in htb_change_class()
1964 cl->leaf.q = new_q ? new_q : &noop_qdisc; in htb_change_class()
1966 cl->leaf.offload_queue = dev_queue; in htb_change_class()
1968 cl->parent = parent; in htb_change_class()
1971 cl->tokens = PSCHED_TICKS2NS(hopt->buffer); in htb_change_class()
1972 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); in htb_change_class()
1973 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */ in htb_change_class()
1974 cl->t_c = ktime_get_ns(); in htb_change_class()
1975 cl->cmode = HTB_CAN_SEND; in htb_change_class()
1978 qdisc_class_hash_insert(&q->clhash, &cl->common); in htb_change_class()
1981 if (cl->leaf.q != &noop_qdisc) in htb_change_class()
1982 qdisc_hash_add(cl->leaf.q, true); in htb_change_class()
1985 err = gen_replace_estimator(&cl->bstats, NULL, in htb_change_class()
1986 &cl->rate_est, in htb_change_class()
1999 .classid = cl->common.classid, in htb_change_class()
2020 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); in htb_change_class()
2021 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); in htb_change_class()
2026 if (!cl->level) { in htb_change_class()
2027 u64 quantum = cl->rate.rate_bytes_ps; in htb_change_class()
2030 cl->quantum = min_t(u64, quantum, INT_MAX); in htb_change_class()
2032 if (!hopt->quantum && cl->quantum < 1000) { in htb_change_class()
2034 cl->quantum = 1000; in htb_change_class()
2036 if (!hopt->quantum && cl->quantum > 200000) { in htb_change_class()
2038 cl->quantum = 200000; in htb_change_class()
2041 cl->quantum = hopt->quantum; in htb_change_class()
2042 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO) in htb_change_class()
2043 cl->prio = TC_HTB_NUMPRIO - 1; in htb_change_class()
2046 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); in htb_change_class()
2047 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); in htb_change_class()
2055 cl->common.classid, (warn == -1 ? "small" : "big")); in htb_change_class()
2059 *arg = (unsigned long)cl; in htb_change_class()
2063 gen_kill_estimator(&cl->rate_est); in htb_change_class()
2065 tcf_block_put(cl->block); in htb_change_class()
2066 kfree(cl); in htb_change_class()
2075 struct htb_class *cl = (struct htb_class *)arg; in htb_tcf_block() local
2077 return cl ? cl->block : q->block; in htb_tcf_block()
2083 struct htb_class *cl = htb_find(classid, sch); in htb_bind_filter() local
2094 if (cl) in htb_bind_filter()
2095 qdisc_class_get(&cl->common); in htb_bind_filter()
2096 return (unsigned long)cl; in htb_bind_filter()
2101 struct htb_class *cl = (struct htb_class *)arg; in htb_unbind_filter() local
2103 qdisc_class_put(&cl->common); in htb_unbind_filter()
2109 struct htb_class *cl; in htb_walk() local
2116 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_walk()
2117 if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg)) in htb_walk()