xref: /openbmc/linux/net/sched/sch_drr.c (revision bef7a78d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/sched/sch_drr.c         Deficit Round Robin scheduler
4  *
5  * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/netdevice.h>
13 #include <linux/pkt_sched.h>
14 #include <net/sch_generic.h>
15 #include <net/pkt_sched.h>
16 #include <net/pkt_cls.h>
17 
18 struct drr_class {
19 	struct Qdisc_class_common	common;
20 	unsigned int			filter_cnt;
21 
22 	struct gnet_stats_basic_packed		bstats;
23 	struct gnet_stats_queue		qstats;
24 	struct net_rate_estimator __rcu *rate_est;
25 	struct list_head		alist;
26 	struct Qdisc			*qdisc;
27 
28 	u32				quantum;
29 	u32				deficit;
30 };
31 
32 struct drr_sched {
33 	struct list_head		active;
34 	struct tcf_proto __rcu		*filter_list;
35 	struct tcf_block		*block;
36 	struct Qdisc_class_hash		clhash;
37 };
38 
39 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
40 {
41 	struct drr_sched *q = qdisc_priv(sch);
42 	struct Qdisc_class_common *clc;
43 
44 	clc = qdisc_class_find(&q->clhash, classid);
45 	if (clc == NULL)
46 		return NULL;
47 	return container_of(clc, struct drr_class, common);
48 }
49 
50 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
51 	[TCA_DRR_QUANTUM]	= { .type = NLA_U32 },
52 };
53 
54 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
55 			    struct nlattr **tca, unsigned long *arg,
56 			    struct netlink_ext_ack *extack)
57 {
58 	struct drr_sched *q = qdisc_priv(sch);
59 	struct drr_class *cl = (struct drr_class *)*arg;
60 	struct nlattr *opt = tca[TCA_OPTIONS];
61 	struct nlattr *tb[TCA_DRR_MAX + 1];
62 	u32 quantum;
63 	int err;
64 
65 	if (!opt) {
66 		NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
67 		return -EINVAL;
68 	}
69 
70 	err = nla_parse_nested_deprecated(tb, TCA_DRR_MAX, opt, drr_policy,
71 					  extack);
72 	if (err < 0)
73 		return err;
74 
75 	if (tb[TCA_DRR_QUANTUM]) {
76 		quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
77 		if (quantum == 0) {
78 			NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
79 			return -EINVAL;
80 		}
81 	} else
82 		quantum = psched_mtu(qdisc_dev(sch));
83 
84 	if (cl != NULL) {
85 		if (tca[TCA_RATE]) {
86 			err = gen_replace_estimator(&cl->bstats, NULL,
87 						    &cl->rate_est,
88 						    NULL,
89 						    qdisc_root_sleeping_running(sch),
90 						    tca[TCA_RATE]);
91 			if (err) {
92 				NL_SET_ERR_MSG(extack, "Failed to replace estimator");
93 				return err;
94 			}
95 		}
96 
97 		sch_tree_lock(sch);
98 		if (tb[TCA_DRR_QUANTUM])
99 			cl->quantum = quantum;
100 		sch_tree_unlock(sch);
101 
102 		return 0;
103 	}
104 
105 	cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
106 	if (cl == NULL)
107 		return -ENOBUFS;
108 
109 	cl->common.classid = classid;
110 	cl->quantum	   = quantum;
111 	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
112 					       &pfifo_qdisc_ops, classid,
113 					       NULL);
114 	if (cl->qdisc == NULL)
115 		cl->qdisc = &noop_qdisc;
116 	else
117 		qdisc_hash_add(cl->qdisc, true);
118 
119 	if (tca[TCA_RATE]) {
120 		err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
121 					    NULL,
122 					    qdisc_root_sleeping_running(sch),
123 					    tca[TCA_RATE]);
124 		if (err) {
125 			NL_SET_ERR_MSG(extack, "Failed to replace estimator");
126 			qdisc_put(cl->qdisc);
127 			kfree(cl);
128 			return err;
129 		}
130 	}
131 
132 	sch_tree_lock(sch);
133 	qdisc_class_hash_insert(&q->clhash, &cl->common);
134 	sch_tree_unlock(sch);
135 
136 	qdisc_class_hash_grow(sch, &q->clhash);
137 
138 	*arg = (unsigned long)cl;
139 	return 0;
140 }
141 
142 static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
143 {
144 	gen_kill_estimator(&cl->rate_est);
145 	qdisc_put(cl->qdisc);
146 	kfree(cl);
147 }
148 
149 static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
150 {
151 	struct drr_sched *q = qdisc_priv(sch);
152 	struct drr_class *cl = (struct drr_class *)arg;
153 
154 	if (cl->filter_cnt > 0)
155 		return -EBUSY;
156 
157 	sch_tree_lock(sch);
158 
159 	qdisc_purge_queue(cl->qdisc);
160 	qdisc_class_hash_remove(&q->clhash, &cl->common);
161 
162 	sch_tree_unlock(sch);
163 
164 	drr_destroy_class(sch, cl);
165 	return 0;
166 }
167 
168 static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
169 {
170 	return (unsigned long)drr_find_class(sch, classid);
171 }
172 
173 static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
174 				       struct netlink_ext_ack *extack)
175 {
176 	struct drr_sched *q = qdisc_priv(sch);
177 
178 	if (cl) {
179 		NL_SET_ERR_MSG(extack, "DRR classid must be zero");
180 		return NULL;
181 	}
182 
183 	return q->block;
184 }
185 
186 static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
187 				  u32 classid)
188 {
189 	struct drr_class *cl = drr_find_class(sch, classid);
190 
191 	if (cl != NULL)
192 		cl->filter_cnt++;
193 
194 	return (unsigned long)cl;
195 }
196 
197 static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
198 {
199 	struct drr_class *cl = (struct drr_class *)arg;
200 
201 	cl->filter_cnt--;
202 }
203 
204 static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
205 			   struct Qdisc *new, struct Qdisc **old,
206 			   struct netlink_ext_ack *extack)
207 {
208 	struct drr_class *cl = (struct drr_class *)arg;
209 
210 	if (new == NULL) {
211 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
212 					cl->common.classid, NULL);
213 		if (new == NULL)
214 			new = &noop_qdisc;
215 	}
216 
217 	*old = qdisc_replace(sch, new, &cl->qdisc);
218 	return 0;
219 }
220 
221 static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
222 {
223 	struct drr_class *cl = (struct drr_class *)arg;
224 
225 	return cl->qdisc;
226 }
227 
228 static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
229 {
230 	struct drr_class *cl = (struct drr_class *)arg;
231 
232 	list_del(&cl->alist);
233 }
234 
235 static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
236 			  struct sk_buff *skb, struct tcmsg *tcm)
237 {
238 	struct drr_class *cl = (struct drr_class *)arg;
239 	struct nlattr *nest;
240 
241 	tcm->tcm_parent	= TC_H_ROOT;
242 	tcm->tcm_handle	= cl->common.classid;
243 	tcm->tcm_info	= cl->qdisc->handle;
244 
245 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
246 	if (nest == NULL)
247 		goto nla_put_failure;
248 	if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
249 		goto nla_put_failure;
250 	return nla_nest_end(skb, nest);
251 
252 nla_put_failure:
253 	nla_nest_cancel(skb, nest);
254 	return -EMSGSIZE;
255 }
256 
257 static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
258 				struct gnet_dump *d)
259 {
260 	struct drr_class *cl = (struct drr_class *)arg;
261 	__u32 qlen = qdisc_qlen_sum(cl->qdisc);
262 	struct Qdisc *cl_q = cl->qdisc;
263 	struct tc_drr_stats xstats;
264 
265 	memset(&xstats, 0, sizeof(xstats));
266 	if (qlen)
267 		xstats.deficit = cl->deficit;
268 
269 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
270 				  d, NULL, &cl->bstats) < 0 ||
271 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
272 	    gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
273 		return -1;
274 
275 	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
276 }
277 
278 static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
279 {
280 	struct drr_sched *q = qdisc_priv(sch);
281 	struct drr_class *cl;
282 	unsigned int i;
283 
284 	if (arg->stop)
285 		return;
286 
287 	for (i = 0; i < q->clhash.hashsize; i++) {
288 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
289 			if (arg->count < arg->skip) {
290 				arg->count++;
291 				continue;
292 			}
293 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
294 				arg->stop = 1;
295 				return;
296 			}
297 			arg->count++;
298 		}
299 	}
300 }
301 
302 static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
303 				      int *qerr)
304 {
305 	struct drr_sched *q = qdisc_priv(sch);
306 	struct drr_class *cl;
307 	struct tcf_result res;
308 	struct tcf_proto *fl;
309 	int result;
310 
311 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
312 		cl = drr_find_class(sch, skb->priority);
313 		if (cl != NULL)
314 			return cl;
315 	}
316 
317 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
318 	fl = rcu_dereference_bh(q->filter_list);
319 	result = tcf_classify(skb, fl, &res, false);
320 	if (result >= 0) {
321 #ifdef CONFIG_NET_CLS_ACT
322 		switch (result) {
323 		case TC_ACT_QUEUED:
324 		case TC_ACT_STOLEN:
325 		case TC_ACT_TRAP:
326 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
327 			fallthrough;
328 		case TC_ACT_SHOT:
329 			return NULL;
330 		}
331 #endif
332 		cl = (struct drr_class *)res.class;
333 		if (cl == NULL)
334 			cl = drr_find_class(sch, res.classid);
335 		return cl;
336 	}
337 	return NULL;
338 }
339 
340 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
341 		       struct sk_buff **to_free)
342 {
343 	unsigned int len = qdisc_pkt_len(skb);
344 	struct drr_sched *q = qdisc_priv(sch);
345 	struct drr_class *cl;
346 	int err = 0;
347 	bool first;
348 
349 	cl = drr_classify(skb, sch, &err);
350 	if (cl == NULL) {
351 		if (err & __NET_XMIT_BYPASS)
352 			qdisc_qstats_drop(sch);
353 		__qdisc_drop(skb, to_free);
354 		return err;
355 	}
356 
357 	first = !cl->qdisc->q.qlen;
358 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
359 	if (unlikely(err != NET_XMIT_SUCCESS)) {
360 		if (net_xmit_drop_count(err)) {
361 			cl->qstats.drops++;
362 			qdisc_qstats_drop(sch);
363 		}
364 		return err;
365 	}
366 
367 	if (first) {
368 		list_add_tail(&cl->alist, &q->active);
369 		cl->deficit = cl->quantum;
370 	}
371 
372 	sch->qstats.backlog += len;
373 	sch->q.qlen++;
374 	return err;
375 }
376 
377 static struct sk_buff *drr_dequeue(struct Qdisc *sch)
378 {
379 	struct drr_sched *q = qdisc_priv(sch);
380 	struct drr_class *cl;
381 	struct sk_buff *skb;
382 	unsigned int len;
383 
384 	if (list_empty(&q->active))
385 		goto out;
386 	while (1) {
387 		cl = list_first_entry(&q->active, struct drr_class, alist);
388 		skb = cl->qdisc->ops->peek(cl->qdisc);
389 		if (skb == NULL) {
390 			qdisc_warn_nonwc(__func__, cl->qdisc);
391 			goto out;
392 		}
393 
394 		len = qdisc_pkt_len(skb);
395 		if (len <= cl->deficit) {
396 			cl->deficit -= len;
397 			skb = qdisc_dequeue_peeked(cl->qdisc);
398 			if (unlikely(skb == NULL))
399 				goto out;
400 			if (cl->qdisc->q.qlen == 0)
401 				list_del(&cl->alist);
402 
403 			bstats_update(&cl->bstats, skb);
404 			qdisc_bstats_update(sch, skb);
405 			qdisc_qstats_backlog_dec(sch, skb);
406 			sch->q.qlen--;
407 			return skb;
408 		}
409 
410 		cl->deficit += cl->quantum;
411 		list_move_tail(&cl->alist, &q->active);
412 	}
413 out:
414 	return NULL;
415 }
416 
417 static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
418 			  struct netlink_ext_ack *extack)
419 {
420 	struct drr_sched *q = qdisc_priv(sch);
421 	int err;
422 
423 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
424 	if (err)
425 		return err;
426 	err = qdisc_class_hash_init(&q->clhash);
427 	if (err < 0)
428 		return err;
429 	INIT_LIST_HEAD(&q->active);
430 	return 0;
431 }
432 
433 static void drr_reset_qdisc(struct Qdisc *sch)
434 {
435 	struct drr_sched *q = qdisc_priv(sch);
436 	struct drr_class *cl;
437 	unsigned int i;
438 
439 	for (i = 0; i < q->clhash.hashsize; i++) {
440 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
441 			if (cl->qdisc->q.qlen)
442 				list_del(&cl->alist);
443 			qdisc_reset(cl->qdisc);
444 		}
445 	}
446 	sch->qstats.backlog = 0;
447 	sch->q.qlen = 0;
448 }
449 
450 static void drr_destroy_qdisc(struct Qdisc *sch)
451 {
452 	struct drr_sched *q = qdisc_priv(sch);
453 	struct drr_class *cl;
454 	struct hlist_node *next;
455 	unsigned int i;
456 
457 	tcf_block_put(q->block);
458 
459 	for (i = 0; i < q->clhash.hashsize; i++) {
460 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
461 					  common.hnode)
462 			drr_destroy_class(sch, cl);
463 	}
464 	qdisc_class_hash_destroy(&q->clhash);
465 }
466 
467 static const struct Qdisc_class_ops drr_class_ops = {
468 	.change		= drr_change_class,
469 	.delete		= drr_delete_class,
470 	.find		= drr_search_class,
471 	.tcf_block	= drr_tcf_block,
472 	.bind_tcf	= drr_bind_tcf,
473 	.unbind_tcf	= drr_unbind_tcf,
474 	.graft		= drr_graft_class,
475 	.leaf		= drr_class_leaf,
476 	.qlen_notify	= drr_qlen_notify,
477 	.dump		= drr_dump_class,
478 	.dump_stats	= drr_dump_class_stats,
479 	.walk		= drr_walk,
480 };
481 
482 static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
483 	.cl_ops		= &drr_class_ops,
484 	.id		= "drr",
485 	.priv_size	= sizeof(struct drr_sched),
486 	.enqueue	= drr_enqueue,
487 	.dequeue	= drr_dequeue,
488 	.peek		= qdisc_peek_dequeued,
489 	.init		= drr_init_qdisc,
490 	.reset		= drr_reset_qdisc,
491 	.destroy	= drr_destroy_qdisc,
492 	.owner		= THIS_MODULE,
493 };
494 
495 static int __init drr_init(void)
496 {
497 	return register_qdisc(&drr_qdisc_ops);
498 }
499 
500 static void __exit drr_exit(void)
501 {
502 	unregister_qdisc(&drr_qdisc_ops);
503 }
504 
505 module_init(drr_init);
506 module_exit(drr_exit);
507 MODULE_LICENSE("GPL");
508