Lines Matching refs:q

126 			struct Qdisc	*q;  member
188 struct htb_sched *q = qdisc_priv(sch); in htb_find() local
191 clc = qdisc_class_find(&q->clhash, handle); in htb_find()
222 struct htb_sched *q = qdisc_priv(sch); in htb_classify() local
241 tcf = rcu_dereference_bh(q->filter_list); in htb_classify()
272 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify()
316 static void htb_add_to_wait_tree(struct htb_sched *q, in htb_add_to_wait_tree() argument
319 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; in htb_add_to_wait_tree()
321 cl->pq_key = q->now + delay; in htb_add_to_wait_tree()
322 if (cl->pq_key == q->now) in htb_add_to_wait_tree()
326 if (q->near_ev_cache[cl->level] > cl->pq_key) in htb_add_to_wait_tree()
327 q->near_ev_cache[cl->level] = cl->pq_key; in htb_add_to_wait_tree()
339 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_add_to_wait_tree()
363 static inline void htb_add_class_to_row(struct htb_sched *q, in htb_add_class_to_row() argument
366 q->row_mask[cl->level] |= mask; in htb_add_class_to_row()
370 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio); in htb_add_class_to_row()
395 static inline void htb_remove_class_from_row(struct htb_sched *q, in htb_remove_class_from_row() argument
399 struct htb_level *hlevel = &q->hlevel[cl->level]; in htb_remove_class_from_row()
413 q->row_mask[cl->level] &= ~m; in htb_remove_class_from_row()
425 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) in htb_activate_prios() argument
453 htb_add_class_to_row(q, cl, mask); in htb_activate_prios()
465 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) in htb_deactivate_prios() argument
499 htb_remove_class_from_row(q, cl, mask); in htb_deactivate_prios()
561 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) in htb_change_class_mode() argument
570 q->overlimits++; in htb_change_class_mode()
575 htb_deactivate_prios(q, cl); in htb_change_class_mode()
578 htb_activate_prios(q, cl); in htb_change_class_mode()
592 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) in htb_activate() argument
594 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); in htb_activate()
598 htb_activate_prios(q, cl); in htb_activate()
610 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) in htb_deactivate() argument
614 htb_deactivate_prios(q, cl); in htb_deactivate()
623 struct htb_sched *q = qdisc_priv(sch); in htb_enqueue() local
628 if (q->direct_queue.qlen < q->direct_qlen) { in htb_enqueue()
629 __qdisc_enqueue_tail(skb, &q->direct_queue); in htb_enqueue()
630 q->direct_pkts++; in htb_enqueue()
641 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, in htb_enqueue()
649 htb_activate(q, cl); in htb_enqueue()
653 sch->q.qlen++; in htb_enqueue()
698 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, in htb_charge_class() argument
706 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_charge_class()
716 cl->t_c = q->now; in htb_charge_class()
720 htb_change_class_mode(q, cl, &diff); in htb_charge_class()
723 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_charge_class()
725 htb_add_to_wait_tree(q, cl, diff); in htb_charge_class()
746 static s64 htb_do_events(struct htb_sched *q, const int level, in htb_do_events() argument
754 struct rb_root *wait_pq = &q->hlevel[level].wait_pq; in htb_do_events()
765 if (cl->pq_key > q->now) in htb_do_events()
769 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_do_events()
770 htb_change_class_mode(q, cl, &diff); in htb_do_events()
772 htb_add_to_wait_tree(q, cl, diff); in htb_do_events()
776 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { in htb_do_events()
778 q->warned |= HTB_WARN_TOOMANYEVENTS; in htb_do_events()
781 return q->now; in htb_do_events()
871 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio, in htb_dequeue_tree() argument
876 struct htb_level *hlevel = &q->hlevel[level]; in htb_dequeue_tree()
892 if (unlikely(cl->leaf.q->q.qlen == 0)) { in htb_dequeue_tree()
894 htb_deactivate(q, cl); in htb_dequeue_tree()
897 if ((q->row_mask[level] & (1 << prio)) == 0) in htb_dequeue_tree()
908 skb = cl->leaf.q->dequeue(cl->leaf.q); in htb_dequeue_tree()
912 qdisc_warn_nonwc("htb", cl->leaf.q); in htb_dequeue_tree()
914 &q->hlevel[0].hprio[prio].ptr); in htb_dequeue_tree()
925 &q->hlevel[0].hprio[prio].ptr); in htb_dequeue_tree()
930 if (!cl->leaf.q->q.qlen) in htb_dequeue_tree()
931 htb_deactivate(q, cl); in htb_dequeue_tree()
932 htb_charge_class(q, cl, level, skb); in htb_dequeue_tree()
940 struct htb_sched *q = qdisc_priv(sch); in htb_dequeue() local
946 skb = __qdisc_dequeue_head(&q->direct_queue); in htb_dequeue()
951 sch->q.qlen--; in htb_dequeue()
955 if (!sch->q.qlen) in htb_dequeue()
957 q->now = ktime_get_ns(); in htb_dequeue()
960 next_event = q->now + 5LLU * NSEC_PER_SEC; in htb_dequeue()
965 s64 event = q->near_ev_cache[level]; in htb_dequeue()
967 if (q->now >= event) { in htb_dequeue()
968 event = htb_do_events(q, level, start_at); in htb_dequeue()
970 event = q->now + NSEC_PER_SEC; in htb_dequeue()
971 q->near_ev_cache[level] = event; in htb_dequeue()
977 m = ~q->row_mask[level]; in htb_dequeue()
982 skb = htb_dequeue_tree(q, prio, level); in htb_dequeue()
987 if (likely(next_event > q->now)) in htb_dequeue()
988 qdisc_watchdog_schedule_ns(&q->watchdog, next_event); in htb_dequeue()
990 schedule_work(&q->work); in htb_dequeue()
999 struct htb_sched *q = qdisc_priv(sch); in htb_reset() local
1003 for (i = 0; i < q->clhash.hashsize; i++) { in htb_reset()
1004 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_reset()
1008 if (cl->leaf.q && !q->offload) in htb_reset()
1009 qdisc_reset(cl->leaf.q); in htb_reset()
1015 qdisc_watchdog_cancel(&q->watchdog); in htb_reset()
1016 __qdisc_reset_queue(&q->direct_queue); in htb_reset()
1017 memset(q->hlevel, 0, sizeof(q->hlevel)); in htb_reset()
1018 memset(q->row_mask, 0, sizeof(q->row_mask)); in htb_reset()
1034 struct htb_sched *q = container_of(work, struct htb_sched, work); in htb_work_func() local
1035 struct Qdisc *sch = q->watchdog.qdisc; in htb_work_func()
1052 struct htb_sched *q = qdisc_priv(sch); in htb_init() local
1059 qdisc_watchdog_init(&q->watchdog, sch); in htb_init()
1060 INIT_WORK(&q->work, htb_work_func); in htb_init()
1065 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in htb_init()
1094 q->num_direct_qdiscs = dev->real_num_tx_queues; in htb_init()
1095 q->direct_qdiscs = kcalloc(q->num_direct_qdiscs, in htb_init()
1096 sizeof(*q->direct_qdiscs), in htb_init()
1098 if (!q->direct_qdiscs) in htb_init()
1102 err = qdisc_class_hash_init(&q->clhash); in htb_init()
1107 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); in htb_init()
1109 q->direct_qlen = qdisc_dev(sch)->tx_queue_len; in htb_init()
1111 if ((q->rate2quantum = gopt->rate2quantum) < 1) in htb_init()
1112 q->rate2quantum = 1; in htb_init()
1113 q->defcls = gopt->defcls; in htb_init()
1118 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { in htb_init()
1128 q->direct_qdiscs[ntx] = qdisc; in htb_init()
1137 .classid = TC_H_MIN(q->defcls), in htb_init()
1147 q->offload = true; in htb_init()
1155 struct htb_sched *q = qdisc_priv(sch); in htb_attach_offload() local
1158 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { in htb_attach_offload()
1159 struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx]; in htb_attach_offload()
1165 for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) { in htb_attach_offload()
1172 kfree(q->direct_qdiscs); in htb_attach_offload()
1173 q->direct_qdiscs = NULL; in htb_attach_offload()
1194 struct htb_sched *q = qdisc_priv(sch); in htb_attach() local
1196 if (q->offload) in htb_attach()
1204 struct htb_sched *q = qdisc_priv(sch); in htb_dump() local
1208 if (q->offload) in htb_dump()
1213 sch->qstats.overlimits = q->overlimits; in htb_dump()
1218 gopt.direct_pkts = q->direct_pkts; in htb_dump()
1220 gopt.rate2quantum = q->rate2quantum; in htb_dump()
1221 gopt.defcls = q->defcls; in htb_dump()
1228 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen)) in htb_dump()
1230 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD)) in htb_dump()
1244 struct htb_sched *q = qdisc_priv(sch); in htb_dump_class() local
1253 if (!cl->level && cl->leaf.q) in htb_dump_class()
1254 tcm->tcm_info = cl->leaf.q->handle; in htb_dump_class()
1271 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD)) in htb_dump_class()
1289 static void htb_offload_aggregate_stats(struct htb_sched *q, in htb_offload_aggregate_stats() argument
1298 for (i = 0; i < q->clhash.hashsize; i++) { in htb_offload_aggregate_stats()
1299 hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) { in htb_offload_aggregate_stats()
1311 bytes += u64_stats_read(&c->leaf.q->bstats.bytes); in htb_offload_aggregate_stats()
1312 packets += u64_stats_read(&c->leaf.q->bstats.packets); in htb_offload_aggregate_stats()
1323 struct htb_sched *q = qdisc_priv(sch); in htb_dump_class_stats() local
1330 if (!cl->level && cl->leaf.q) in htb_dump_class_stats()
1331 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog); in htb_dump_class_stats()
1338 if (q->offload) { in htb_dump_class_stats()
1340 if (cl->leaf.q) in htb_dump_class_stats()
1341 cl->bstats = cl->leaf.q->bstats; in htb_dump_class_stats()
1348 htb_offload_aggregate_stats(q, cl); in htb_dump_class_stats()
1365 struct htb_sched *q = qdisc_priv(sch); in htb_select_queue() local
1368 if (!q->offload) in htb_select_queue()
1403 if (!(cl->leaf.q->flags & TCQ_F_BUILTIN)) in htb_offload_get_queue()
1404 WARN_ON(cl->leaf.q->dev_queue != queue); in htb_offload_get_queue()
1424 WARN_ON(qdisc != cl_old->leaf.q); in htb_offload_move_qdisc()
1427 if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN)) in htb_offload_move_qdisc()
1428 cl_old->leaf.q->dev_queue = queue_new; in htb_offload_move_qdisc()
1434 qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q); in htb_offload_move_qdisc()
1446 struct htb_sched *q = qdisc_priv(sch); in htb_graft() local
1452 if (q->offload) in htb_graft()
1462 if (q->offload) { in htb_graft()
1468 *old = qdisc_replace(sch, new, &cl->leaf.q); in htb_graft()
1470 if (q->offload) { in htb_graft()
1481 return !cl->level ? cl->leaf.q : NULL; in htb_leaf()
1505 struct htb_sched *q = qdisc_priv(sch); in htb_parent_to_leaf() local
1508 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity); in htb_parent_to_leaf()
1512 &q->hlevel[parent->level].wait_pq); in htb_parent_to_leaf()
1516 parent->leaf.q = new_q ? new_q : &noop_qdisc; in htb_parent_to_leaf()
1521 if (q->offload) in htb_parent_to_leaf()
1544 struct Qdisc *q = cl->leaf.q; in htb_destroy_class_offload() local
1551 WARN_ON(!q); in htb_destroy_class_offload()
1562 WARN_ON(old != q); in htb_destroy_class_offload()
1567 u64_stats_read(&q->bstats.bytes), in htb_destroy_class_offload()
1568 u64_stats_read(&q->bstats.packets)); in htb_destroy_class_offload()
1604 WARN_ON(!cl->leaf.q); in htb_destroy_class()
1605 qdisc_put(cl->leaf.q); in htb_destroy_class()
1616 struct htb_sched *q = qdisc_priv(sch); in htb_destroy() local
1622 cancel_work_sync(&q->work); in htb_destroy()
1623 qdisc_watchdog_cancel(&q->watchdog); in htb_destroy()
1629 tcf_block_put(q->block); in htb_destroy()
1631 for (i = 0; i < q->clhash.hashsize; i++) { in htb_destroy()
1632 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_destroy()
1641 for (i = 0; i < q->clhash.hashsize; i++) { in htb_destroy()
1642 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], in htb_destroy()
1646 if (!q->offload) { in htb_destroy()
1661 qdisc_class_hash_remove(&q->clhash, in htb_destroy()
1673 qdisc_class_hash_destroy(&q->clhash); in htb_destroy()
1674 __qdisc_reset_queue(&q->direct_queue); in htb_destroy()
1676 if (q->offload) { in htb_destroy()
1683 if (!q->direct_qdiscs) in htb_destroy()
1685 for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++) in htb_destroy()
1686 qdisc_put(q->direct_qdiscs[i]); in htb_destroy()
1687 kfree(q->direct_qdiscs); in htb_destroy()
1693 struct htb_sched *q = qdisc_priv(sch); in htb_delete() local
1711 if (q->offload) { in htb_delete()
1721 if (q->offload) in htb_delete()
1727 if (q->offload) in htb_delete()
1734 qdisc_purge_queue(cl->leaf.q); in htb_delete()
1737 qdisc_class_hash_remove(&q->clhash, &cl->common); in htb_delete()
1742 htb_deactivate(q, cl); in htb_delete()
1746 &q->hlevel[cl->level].wait_pq); in htb_delete()
1762 struct htb_sched *q = qdisc_priv(sch); in htb_change_class() local
1792 if (q->offload) { in htb_change_class()
1885 if (!q->offload) { in htb_change_class()
1911 WARN_ON(old_q != parent->leaf.q); in htb_change_class()
1937 if (q->offload) { in htb_change_class()
1948 qdisc_purge_queue(parent->leaf.q); in htb_change_class()
1949 parent_qdisc = parent->leaf.q; in htb_change_class()
1951 htb_deactivate(q, parent); in htb_change_class()
1955 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq); in htb_change_class()
1964 cl->leaf.q = new_q ? new_q : &noop_qdisc; in htb_change_class()
1965 if (q->offload) in htb_change_class()
1978 qdisc_class_hash_insert(&q->clhash, &cl->common); in htb_change_class()
1981 if (cl->leaf.q != &noop_qdisc) in htb_change_class()
1982 qdisc_hash_add(cl->leaf.q, true); in htb_change_class()
1994 if (q->offload) { in htb_change_class()
2029 do_div(quantum, q->rate2quantum); in htb_change_class()
2057 qdisc_class_hash_grow(sch, &q->clhash); in htb_change_class()
2074 struct htb_sched *q = qdisc_priv(sch); in htb_tcf_block() local
2077 return cl ? cl->block : q->block; in htb_tcf_block()
2108 struct htb_sched *q = qdisc_priv(sch); in htb_walk() local
2115 for (i = 0; i < q->clhash.hashsize; i++) { in htb_walk()
2116 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_walk()