xref: /openbmc/linux/net/sched/sch_drr.c (revision 31e67366)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/sched/sch_drr.c         Deficit Round Robin scheduler
4  *
5  * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/netdevice.h>
13 #include <linux/pkt_sched.h>
14 #include <net/sch_generic.h>
15 #include <net/pkt_sched.h>
16 #include <net/pkt_cls.h>
17 
18 struct drr_class {
19 	struct Qdisc_class_common	common;
20 	unsigned int			filter_cnt;
21 
22 	struct gnet_stats_basic_packed		bstats;
23 	struct gnet_stats_queue		qstats;
24 	struct net_rate_estimator __rcu *rate_est;
25 	struct list_head		alist;
26 	struct Qdisc			*qdisc;
27 
28 	u32				quantum;
29 	u32				deficit;
30 };
31 
32 struct drr_sched {
33 	struct list_head		active;
34 	struct tcf_proto __rcu		*filter_list;
35 	struct tcf_block		*block;
36 	struct Qdisc_class_hash		clhash;
37 };
38 
39 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
40 {
41 	struct drr_sched *q = qdisc_priv(sch);
42 	struct Qdisc_class_common *clc;
43 
44 	clc = qdisc_class_find(&q->clhash, classid);
45 	if (clc == NULL)
46 		return NULL;
47 	return container_of(clc, struct drr_class, common);
48 }
49 
50 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
51 	[TCA_DRR_QUANTUM]	= { .type = NLA_U32 },
52 };
53 
54 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
55 			    struct nlattr **tca, unsigned long *arg,
56 			    struct netlink_ext_ack *extack)
57 {
58 	struct drr_sched *q = qdisc_priv(sch);
59 	struct drr_class *cl = (struct drr_class *)*arg;
60 	struct nlattr *opt = tca[TCA_OPTIONS];
61 	struct nlattr *tb[TCA_DRR_MAX + 1];
62 	u32 quantum;
63 	int err;
64 
65 	if (!opt) {
66 		NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
67 		return -EINVAL;
68 	}
69 
70 	err = nla_parse_nested_deprecated(tb, TCA_DRR_MAX, opt, drr_policy,
71 					  extack);
72 	if (err < 0)
73 		return err;
74 
75 	if (tb[TCA_DRR_QUANTUM]) {
76 		quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
77 		if (quantum == 0) {
78 			NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
79 			return -EINVAL;
80 		}
81 	} else
82 		quantum = psched_mtu(qdisc_dev(sch));
83 
84 	if (cl != NULL) {
85 		if (tca[TCA_RATE]) {
86 			err = gen_replace_estimator(&cl->bstats, NULL,
87 						    &cl->rate_est,
88 						    NULL,
89 						    qdisc_root_sleeping_running(sch),
90 						    tca[TCA_RATE]);
91 			if (err) {
92 				NL_SET_ERR_MSG(extack, "Failed to replace estimator");
93 				return err;
94 			}
95 		}
96 
97 		sch_tree_lock(sch);
98 		if (tb[TCA_DRR_QUANTUM])
99 			cl->quantum = quantum;
100 		sch_tree_unlock(sch);
101 
102 		return 0;
103 	}
104 
105 	cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
106 	if (cl == NULL)
107 		return -ENOBUFS;
108 
109 	cl->common.classid = classid;
110 	cl->quantum	   = quantum;
111 	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
112 					       &pfifo_qdisc_ops, classid,
113 					       NULL);
114 	if (cl->qdisc == NULL)
115 		cl->qdisc = &noop_qdisc;
116 	else
117 		qdisc_hash_add(cl->qdisc, true);
118 
119 	if (tca[TCA_RATE]) {
120 		err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
121 					    NULL,
122 					    qdisc_root_sleeping_running(sch),
123 					    tca[TCA_RATE]);
124 		if (err) {
125 			NL_SET_ERR_MSG(extack, "Failed to replace estimator");
126 			qdisc_put(cl->qdisc);
127 			kfree(cl);
128 			return err;
129 		}
130 	}
131 
132 	sch_tree_lock(sch);
133 	qdisc_class_hash_insert(&q->clhash, &cl->common);
134 	sch_tree_unlock(sch);
135 
136 	qdisc_class_hash_grow(sch, &q->clhash);
137 
138 	*arg = (unsigned long)cl;
139 	return 0;
140 }
141 
142 static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
143 {
144 	gen_kill_estimator(&cl->rate_est);
145 	qdisc_put(cl->qdisc);
146 	kfree(cl);
147 }
148 
149 static int drr_delete_class(struct Qdisc *sch, unsigned long arg,
150 			    struct netlink_ext_ack *extack)
151 {
152 	struct drr_sched *q = qdisc_priv(sch);
153 	struct drr_class *cl = (struct drr_class *)arg;
154 
155 	if (cl->filter_cnt > 0)
156 		return -EBUSY;
157 
158 	sch_tree_lock(sch);
159 
160 	qdisc_purge_queue(cl->qdisc);
161 	qdisc_class_hash_remove(&q->clhash, &cl->common);
162 
163 	sch_tree_unlock(sch);
164 
165 	drr_destroy_class(sch, cl);
166 	return 0;
167 }
168 
169 static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
170 {
171 	return (unsigned long)drr_find_class(sch, classid);
172 }
173 
174 static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
175 				       struct netlink_ext_ack *extack)
176 {
177 	struct drr_sched *q = qdisc_priv(sch);
178 
179 	if (cl) {
180 		NL_SET_ERR_MSG(extack, "DRR classid must be zero");
181 		return NULL;
182 	}
183 
184 	return q->block;
185 }
186 
187 static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
188 				  u32 classid)
189 {
190 	struct drr_class *cl = drr_find_class(sch, classid);
191 
192 	if (cl != NULL)
193 		cl->filter_cnt++;
194 
195 	return (unsigned long)cl;
196 }
197 
198 static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
199 {
200 	struct drr_class *cl = (struct drr_class *)arg;
201 
202 	cl->filter_cnt--;
203 }
204 
205 static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
206 			   struct Qdisc *new, struct Qdisc **old,
207 			   struct netlink_ext_ack *extack)
208 {
209 	struct drr_class *cl = (struct drr_class *)arg;
210 
211 	if (new == NULL) {
212 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
213 					cl->common.classid, NULL);
214 		if (new == NULL)
215 			new = &noop_qdisc;
216 	}
217 
218 	*old = qdisc_replace(sch, new, &cl->qdisc);
219 	return 0;
220 }
221 
222 static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
223 {
224 	struct drr_class *cl = (struct drr_class *)arg;
225 
226 	return cl->qdisc;
227 }
228 
229 static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
230 {
231 	struct drr_class *cl = (struct drr_class *)arg;
232 
233 	list_del(&cl->alist);
234 }
235 
236 static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
237 			  struct sk_buff *skb, struct tcmsg *tcm)
238 {
239 	struct drr_class *cl = (struct drr_class *)arg;
240 	struct nlattr *nest;
241 
242 	tcm->tcm_parent	= TC_H_ROOT;
243 	tcm->tcm_handle	= cl->common.classid;
244 	tcm->tcm_info	= cl->qdisc->handle;
245 
246 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
247 	if (nest == NULL)
248 		goto nla_put_failure;
249 	if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
250 		goto nla_put_failure;
251 	return nla_nest_end(skb, nest);
252 
253 nla_put_failure:
254 	nla_nest_cancel(skb, nest);
255 	return -EMSGSIZE;
256 }
257 
258 static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
259 				struct gnet_dump *d)
260 {
261 	struct drr_class *cl = (struct drr_class *)arg;
262 	__u32 qlen = qdisc_qlen_sum(cl->qdisc);
263 	struct Qdisc *cl_q = cl->qdisc;
264 	struct tc_drr_stats xstats;
265 
266 	memset(&xstats, 0, sizeof(xstats));
267 	if (qlen)
268 		xstats.deficit = cl->deficit;
269 
270 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
271 				  d, NULL, &cl->bstats) < 0 ||
272 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
273 	    gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
274 		return -1;
275 
276 	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
277 }
278 
279 static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
280 {
281 	struct drr_sched *q = qdisc_priv(sch);
282 	struct drr_class *cl;
283 	unsigned int i;
284 
285 	if (arg->stop)
286 		return;
287 
288 	for (i = 0; i < q->clhash.hashsize; i++) {
289 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
290 			if (arg->count < arg->skip) {
291 				arg->count++;
292 				continue;
293 			}
294 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
295 				arg->stop = 1;
296 				return;
297 			}
298 			arg->count++;
299 		}
300 	}
301 }
302 
303 static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
304 				      int *qerr)
305 {
306 	struct drr_sched *q = qdisc_priv(sch);
307 	struct drr_class *cl;
308 	struct tcf_result res;
309 	struct tcf_proto *fl;
310 	int result;
311 
312 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
313 		cl = drr_find_class(sch, skb->priority);
314 		if (cl != NULL)
315 			return cl;
316 	}
317 
318 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
319 	fl = rcu_dereference_bh(q->filter_list);
320 	result = tcf_classify(skb, fl, &res, false);
321 	if (result >= 0) {
322 #ifdef CONFIG_NET_CLS_ACT
323 		switch (result) {
324 		case TC_ACT_QUEUED:
325 		case TC_ACT_STOLEN:
326 		case TC_ACT_TRAP:
327 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
328 			fallthrough;
329 		case TC_ACT_SHOT:
330 			return NULL;
331 		}
332 #endif
333 		cl = (struct drr_class *)res.class;
334 		if (cl == NULL)
335 			cl = drr_find_class(sch, res.classid);
336 		return cl;
337 	}
338 	return NULL;
339 }
340 
341 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
342 		       struct sk_buff **to_free)
343 {
344 	unsigned int len = qdisc_pkt_len(skb);
345 	struct drr_sched *q = qdisc_priv(sch);
346 	struct drr_class *cl;
347 	int err = 0;
348 	bool first;
349 
350 	cl = drr_classify(skb, sch, &err);
351 	if (cl == NULL) {
352 		if (err & __NET_XMIT_BYPASS)
353 			qdisc_qstats_drop(sch);
354 		__qdisc_drop(skb, to_free);
355 		return err;
356 	}
357 
358 	first = !cl->qdisc->q.qlen;
359 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
360 	if (unlikely(err != NET_XMIT_SUCCESS)) {
361 		if (net_xmit_drop_count(err)) {
362 			cl->qstats.drops++;
363 			qdisc_qstats_drop(sch);
364 		}
365 		return err;
366 	}
367 
368 	if (first) {
369 		list_add_tail(&cl->alist, &q->active);
370 		cl->deficit = cl->quantum;
371 	}
372 
373 	sch->qstats.backlog += len;
374 	sch->q.qlen++;
375 	return err;
376 }
377 
378 static struct sk_buff *drr_dequeue(struct Qdisc *sch)
379 {
380 	struct drr_sched *q = qdisc_priv(sch);
381 	struct drr_class *cl;
382 	struct sk_buff *skb;
383 	unsigned int len;
384 
385 	if (list_empty(&q->active))
386 		goto out;
387 	while (1) {
388 		cl = list_first_entry(&q->active, struct drr_class, alist);
389 		skb = cl->qdisc->ops->peek(cl->qdisc);
390 		if (skb == NULL) {
391 			qdisc_warn_nonwc(__func__, cl->qdisc);
392 			goto out;
393 		}
394 
395 		len = qdisc_pkt_len(skb);
396 		if (len <= cl->deficit) {
397 			cl->deficit -= len;
398 			skb = qdisc_dequeue_peeked(cl->qdisc);
399 			if (unlikely(skb == NULL))
400 				goto out;
401 			if (cl->qdisc->q.qlen == 0)
402 				list_del(&cl->alist);
403 
404 			bstats_update(&cl->bstats, skb);
405 			qdisc_bstats_update(sch, skb);
406 			qdisc_qstats_backlog_dec(sch, skb);
407 			sch->q.qlen--;
408 			return skb;
409 		}
410 
411 		cl->deficit += cl->quantum;
412 		list_move_tail(&cl->alist, &q->active);
413 	}
414 out:
415 	return NULL;
416 }
417 
418 static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
419 			  struct netlink_ext_ack *extack)
420 {
421 	struct drr_sched *q = qdisc_priv(sch);
422 	int err;
423 
424 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
425 	if (err)
426 		return err;
427 	err = qdisc_class_hash_init(&q->clhash);
428 	if (err < 0)
429 		return err;
430 	INIT_LIST_HEAD(&q->active);
431 	return 0;
432 }
433 
434 static void drr_reset_qdisc(struct Qdisc *sch)
435 {
436 	struct drr_sched *q = qdisc_priv(sch);
437 	struct drr_class *cl;
438 	unsigned int i;
439 
440 	for (i = 0; i < q->clhash.hashsize; i++) {
441 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
442 			if (cl->qdisc->q.qlen)
443 				list_del(&cl->alist);
444 			qdisc_reset(cl->qdisc);
445 		}
446 	}
447 	sch->qstats.backlog = 0;
448 	sch->q.qlen = 0;
449 }
450 
451 static void drr_destroy_qdisc(struct Qdisc *sch)
452 {
453 	struct drr_sched *q = qdisc_priv(sch);
454 	struct drr_class *cl;
455 	struct hlist_node *next;
456 	unsigned int i;
457 
458 	tcf_block_put(q->block);
459 
460 	for (i = 0; i < q->clhash.hashsize; i++) {
461 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
462 					  common.hnode)
463 			drr_destroy_class(sch, cl);
464 	}
465 	qdisc_class_hash_destroy(&q->clhash);
466 }
467 
468 static const struct Qdisc_class_ops drr_class_ops = {
469 	.change		= drr_change_class,
470 	.delete		= drr_delete_class,
471 	.find		= drr_search_class,
472 	.tcf_block	= drr_tcf_block,
473 	.bind_tcf	= drr_bind_tcf,
474 	.unbind_tcf	= drr_unbind_tcf,
475 	.graft		= drr_graft_class,
476 	.leaf		= drr_class_leaf,
477 	.qlen_notify	= drr_qlen_notify,
478 	.dump		= drr_dump_class,
479 	.dump_stats	= drr_dump_class_stats,
480 	.walk		= drr_walk,
481 };
482 
483 static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
484 	.cl_ops		= &drr_class_ops,
485 	.id		= "drr",
486 	.priv_size	= sizeof(struct drr_sched),
487 	.enqueue	= drr_enqueue,
488 	.dequeue	= drr_dequeue,
489 	.peek		= qdisc_peek_dequeued,
490 	.init		= drr_init_qdisc,
491 	.reset		= drr_reset_qdisc,
492 	.destroy	= drr_destroy_qdisc,
493 	.owner		= THIS_MODULE,
494 };
495 
496 static int __init drr_init(void)
497 {
498 	return register_qdisc(&drr_qdisc_ops);
499 }
500 
501 static void __exit drr_exit(void)
502 {
503 	unregister_qdisc(&drr_qdisc_ops);
504 }
505 
506 module_init(drr_init);
507 module_exit(drr_exit);
508 MODULE_LICENSE("GPL");
509