Lines Matching +full:static +full:- +full:beta
1 // SPDX-License-Identifier: GPL-2.0-only
21 * - Packets are classified on flows.
22 * - This is a Stochastic model (as we use a hash, several flows might
24 * - Each flow has a PIE managed queue.
25 * - Flows are linked onto two (Round Robin) lists,
27 * - For a given flow, packets are not reordered.
28 * - Drops during enqueue only.
29 * - ECN capability is off by default.
30 * - ECN threshold (if ECN is enabled) is at 10% by default.
31 * - Uses timestamps to calculate queue delay by default.
35 * struct fq_pie_flow - contains data for each flow
74 static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q, in fq_pie_hash()
77 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_pie_hash()
80 static unsigned int fq_pie_classify(struct sk_buff *skb, struct Qdisc *sch, in fq_pie_classify()
88 if (TC_H_MAJ(skb->priority) == sch->handle && in fq_pie_classify()
89 TC_H_MIN(skb->priority) > 0 && in fq_pie_classify()
90 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_pie_classify()
91 return TC_H_MIN(skb->priority); in fq_pie_classify()
93 filter = rcu_dereference_bh(q->filter_list); in fq_pie_classify()
111 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_pie_classify()
118 static inline void flow_queue_add(struct fq_pie_flow *flow, in flow_queue_add()
121 if (!flow->head) in flow_queue_add()
122 flow->head = skb; in flow_queue_add()
124 flow->tail->next = skb; in flow_queue_add()
125 flow->tail = skb; in flow_queue_add()
126 skb->next = NULL; in flow_queue_add()
129 static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, in fq_pie_qdisc_enqueue()
148 idx--; in fq_pie_qdisc_enqueue()
150 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue()
152 get_pie_cb(skb)->mem_usage = skb->truesize; in fq_pie_qdisc_enqueue()
153 memory_limited = q->memory_usage > q->memory_limit + skb->truesize; in fq_pie_qdisc_enqueue()
156 if (unlikely(qdisc_qlen(sch) >= sch->limit)) { in fq_pie_qdisc_enqueue()
157 q->stats.overlimit++; in fq_pie_qdisc_enqueue()
160 q->overmemory++; in fq_pie_qdisc_enqueue()
163 if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars, in fq_pie_qdisc_enqueue()
164 sel_flow->backlog, skb->len)) { in fq_pie_qdisc_enqueue()
166 } else if (q->p_params.ecn && in fq_pie_qdisc_enqueue()
167 sel_flow->vars.prob <= (MAX_PROB / 100) * q->ecn_prob && in fq_pie_qdisc_enqueue()
172 q->stats.ecn_mark++; in fq_pie_qdisc_enqueue()
177 if (!q->p_params.dq_rate_estimator) in fq_pie_qdisc_enqueue()
181 q->stats.packets_in++; in fq_pie_qdisc_enqueue()
182 q->memory_usage += skb->truesize; in fq_pie_qdisc_enqueue()
183 sch->qstats.backlog += pkt_len; in fq_pie_qdisc_enqueue()
184 sch->q.qlen++; in fq_pie_qdisc_enqueue()
186 if (list_empty(&sel_flow->flowchain)) { in fq_pie_qdisc_enqueue()
187 list_add_tail(&sel_flow->flowchain, &q->new_flows); in fq_pie_qdisc_enqueue()
188 q->new_flow_count++; in fq_pie_qdisc_enqueue()
189 sel_flow->deficit = q->quantum; in fq_pie_qdisc_enqueue()
190 sel_flow->qlen = 0; in fq_pie_qdisc_enqueue()
191 sel_flow->backlog = 0; in fq_pie_qdisc_enqueue()
193 sel_flow->qlen++; in fq_pie_qdisc_enqueue()
194 sel_flow->backlog += pkt_len; in fq_pie_qdisc_enqueue()
198 q->stats.dropped++; in fq_pie_qdisc_enqueue()
199 sel_flow->vars.accu_prob = 0; in fq_pie_qdisc_enqueue()
205 static struct netlink_range_validation fq_pie_q_range = {
210 static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
226 static inline struct sk_buff *dequeue_head(struct fq_pie_flow *flow) in dequeue_head()
228 struct sk_buff *skb = flow->head; in dequeue_head()
230 flow->head = skb->next; in dequeue_head()
231 skb->next = NULL; in dequeue_head()
235 static struct sk_buff *fq_pie_qdisc_dequeue(struct Qdisc *sch) in fq_pie_qdisc_dequeue()
244 head = &q->new_flows; in fq_pie_qdisc_dequeue()
246 head = &q->old_flows; in fq_pie_qdisc_dequeue()
253 if (flow->deficit <= 0) { in fq_pie_qdisc_dequeue()
254 flow->deficit += q->quantum; in fq_pie_qdisc_dequeue()
255 list_move_tail(&flow->flowchain, &q->old_flows); in fq_pie_qdisc_dequeue()
259 if (flow->head) { in fq_pie_qdisc_dequeue()
262 sch->qstats.backlog -= pkt_len; in fq_pie_qdisc_dequeue()
263 sch->q.qlen--; in fq_pie_qdisc_dequeue()
269 if (head == &q->new_flows && !list_empty(&q->old_flows)) in fq_pie_qdisc_dequeue()
270 list_move_tail(&flow->flowchain, &q->old_flows); in fq_pie_qdisc_dequeue()
272 list_del_init(&flow->flowchain); in fq_pie_qdisc_dequeue()
276 flow->qlen--; in fq_pie_qdisc_dequeue()
277 flow->deficit -= pkt_len; in fq_pie_qdisc_dequeue()
278 flow->backlog -= pkt_len; in fq_pie_qdisc_dequeue()
279 q->memory_usage -= get_pie_cb(skb)->mem_usage; in fq_pie_qdisc_dequeue()
280 pie_process_dequeue(skb, &q->p_params, &flow->vars, flow->backlog); in fq_pie_qdisc_dequeue()
284 static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt, in fq_pie_change()
301 q->p_params.limit = limit; in fq_pie_change()
302 sch->limit = limit; in fq_pie_change()
305 if (q->flows) { in fq_pie_change()
310 q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]); in fq_pie_change()
311 if (!q->flows_cnt || q->flows_cnt > 65536) { in fq_pie_change()
324 q->p_params.target = in fq_pie_change()
330 q->p_params.tupdate = in fq_pie_change()
334 q->p_params.alpha = nla_get_u32(tb[TCA_FQ_PIE_ALPHA]); in fq_pie_change()
337 q->p_params.beta = nla_get_u32(tb[TCA_FQ_PIE_BETA]); in fq_pie_change()
340 q->quantum = nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]); in fq_pie_change()
343 q->memory_limit = nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]); in fq_pie_change()
346 q->ecn_prob = nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]); in fq_pie_change()
349 q->p_params.ecn = nla_get_u32(tb[TCA_FQ_PIE_ECN]); in fq_pie_change()
352 q->p_params.bytemode = nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]); in fq_pie_change()
355 q->p_params.dq_rate_estimator = in fq_pie_change()
359 while (sch->q.qlen > sch->limit) { in fq_pie_change()
373 return -EINVAL; in fq_pie_change()
376 static void fq_pie_timer(struct timer_list *t) in fq_pie_timer()
380 struct Qdisc *sch = q->sch; in fq_pie_timer()
389 max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048); in fq_pie_timer()
391 pie_calculate_probability(&q->p_params, in fq_pie_timer()
392 &q->flows[q->flows_cursor].vars, in fq_pie_timer()
393 q->flows[q->flows_cursor].backlog); in fq_pie_timer()
394 q->flows_cursor++; in fq_pie_timer()
397 tupdate = q->p_params.tupdate; in fq_pie_timer()
399 if (q->flows_cursor >= q->flows_cnt) { in fq_pie_timer()
400 q->flows_cursor = 0; in fq_pie_timer()
404 mod_timer(&q->adapt_timer, jiffies + next); in fq_pie_timer()
409 static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt, in fq_pie_init()
416 pie_params_init(&q->p_params); in fq_pie_init()
417 sch->limit = 10 * 1024; in fq_pie_init()
418 q->p_params.limit = sch->limit; in fq_pie_init()
419 q->quantum = psched_mtu(qdisc_dev(sch)); in fq_pie_init()
420 q->sch = sch; in fq_pie_init()
421 q->ecn_prob = 10; in fq_pie_init()
422 q->flows_cnt = 1024; in fq_pie_init()
423 q->memory_limit = SZ_32M; in fq_pie_init()
425 INIT_LIST_HEAD(&q->new_flows); in fq_pie_init()
426 INIT_LIST_HEAD(&q->old_flows); in fq_pie_init()
427 timer_setup(&q->adapt_timer, fq_pie_timer, 0); in fq_pie_init()
436 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in fq_pie_init()
440 q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow), in fq_pie_init()
442 if (!q->flows) { in fq_pie_init()
443 err = -ENOMEM; in fq_pie_init()
446 for (idx = 0; idx < q->flows_cnt; idx++) { in fq_pie_init()
447 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_init()
449 INIT_LIST_HEAD(&flow->flowchain); in fq_pie_init()
450 pie_vars_init(&flow->vars); in fq_pie_init()
453 mod_timer(&q->adapt_timer, jiffies + HZ / 2); in fq_pie_init()
458 q->flows_cnt = 0; in fq_pie_init()
463 static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb) in fq_pie_dump()
470 return -EMSGSIZE; in fq_pie_dump()
473 if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, sch->limit) || in fq_pie_dump()
474 nla_put_u32(skb, TCA_FQ_PIE_FLOWS, q->flows_cnt) || in fq_pie_dump()
476 ((u32)PSCHED_TICKS2NS(q->p_params.target)) / in fq_pie_dump()
479 jiffies_to_usecs(q->p_params.tupdate)) || in fq_pie_dump()
480 nla_put_u32(skb, TCA_FQ_PIE_ALPHA, q->p_params.alpha) || in fq_pie_dump()
481 nla_put_u32(skb, TCA_FQ_PIE_BETA, q->p_params.beta) || in fq_pie_dump()
482 nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, q->quantum) || in fq_pie_dump()
483 nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT, q->memory_limit) || in fq_pie_dump()
484 nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, q->ecn_prob) || in fq_pie_dump()
485 nla_put_u32(skb, TCA_FQ_PIE_ECN, q->p_params.ecn) || in fq_pie_dump()
486 nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, q->p_params.bytemode) || in fq_pie_dump()
488 q->p_params.dq_rate_estimator)) in fq_pie_dump()
495 return -EMSGSIZE; in fq_pie_dump()
498 static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) in fq_pie_dump_stats()
502 .packets_in = q->stats.packets_in, in fq_pie_dump_stats()
503 .overlimit = q->stats.overlimit, in fq_pie_dump_stats()
504 .overmemory = q->overmemory, in fq_pie_dump_stats()
505 .dropped = q->stats.dropped, in fq_pie_dump_stats()
506 .ecn_mark = q->stats.ecn_mark, in fq_pie_dump_stats()
507 .new_flow_count = q->new_flow_count, in fq_pie_dump_stats()
508 .memory_usage = q->memory_usage, in fq_pie_dump_stats()
513 list_for_each(pos, &q->new_flows) in fq_pie_dump_stats()
516 list_for_each(pos, &q->old_flows) in fq_pie_dump_stats()
523 static void fq_pie_reset(struct Qdisc *sch) in fq_pie_reset()
528 INIT_LIST_HEAD(&q->new_flows); in fq_pie_reset()
529 INIT_LIST_HEAD(&q->old_flows); in fq_pie_reset()
530 for (idx = 0; idx < q->flows_cnt; idx++) { in fq_pie_reset()
531 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_reset()
534 rtnl_kfree_skbs(flow->head, flow->tail); in fq_pie_reset()
535 flow->head = NULL; in fq_pie_reset()
537 INIT_LIST_HEAD(&flow->flowchain); in fq_pie_reset()
538 pie_vars_init(&flow->vars); in fq_pie_reset()
542 static void fq_pie_destroy(struct Qdisc *sch) in fq_pie_destroy()
546 tcf_block_put(q->block); in fq_pie_destroy()
547 q->p_params.tupdate = 0; in fq_pie_destroy()
548 del_timer_sync(&q->adapt_timer); in fq_pie_destroy()
549 kvfree(q->flows); in fq_pie_destroy()
552 static struct Qdisc_ops fq_pie_qdisc_ops __read_mostly = {
567 static int __init fq_pie_module_init(void) in fq_pie_module_init()
572 static void __exit fq_pie_module_exit(void) in fq_pie_module_exit()
580 MODULE_DESCRIPTION("Flow Queue Proportional Integral controller Enhanced (FQ-PIE)");