xref: /openbmc/linux/net/sched/sch_drr.c (revision 9f7d35d9)
1 /*
2  * net/sched/sch_drr.c         Deficit Round Robin scheduler
3  *
4  * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * version 2 as published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/netdevice.h>
16 #include <linux/pkt_sched.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_sched.h>
19 #include <net/pkt_cls.h>
20 
21 struct drr_class {
22 	struct Qdisc_class_common	common;
23 	unsigned int			filter_cnt;
24 
25 	struct gnet_stats_basic_packed		bstats;
26 	struct gnet_stats_queue		qstats;
27 	struct net_rate_estimator __rcu *rate_est;
28 	struct list_head		alist;
29 	struct Qdisc			*qdisc;
30 
31 	u32				quantum;
32 	u32				deficit;
33 };
34 
35 struct drr_sched {
36 	struct list_head		active;
37 	struct tcf_proto __rcu		*filter_list;
38 	struct tcf_block		*block;
39 	struct Qdisc_class_hash		clhash;
40 };
41 
42 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
43 {
44 	struct drr_sched *q = qdisc_priv(sch);
45 	struct Qdisc_class_common *clc;
46 
47 	clc = qdisc_class_find(&q->clhash, classid);
48 	if (clc == NULL)
49 		return NULL;
50 	return container_of(clc, struct drr_class, common);
51 }
52 
53 static void drr_purge_queue(struct drr_class *cl)
54 {
55 	unsigned int len = cl->qdisc->q.qlen;
56 	unsigned int backlog = cl->qdisc->qstats.backlog;
57 
58 	qdisc_reset(cl->qdisc);
59 	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
60 }
61 
62 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
63 	[TCA_DRR_QUANTUM]	= { .type = NLA_U32 },
64 };
65 
66 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
67 			    struct nlattr **tca, unsigned long *arg,
68 			    struct netlink_ext_ack *extack)
69 {
70 	struct drr_sched *q = qdisc_priv(sch);
71 	struct drr_class *cl = (struct drr_class *)*arg;
72 	struct nlattr *opt = tca[TCA_OPTIONS];
73 	struct nlattr *tb[TCA_DRR_MAX + 1];
74 	u32 quantum;
75 	int err;
76 
77 	if (!opt) {
78 		NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
79 		return -EINVAL;
80 	}
81 
82 	err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, extack);
83 	if (err < 0)
84 		return err;
85 
86 	if (tb[TCA_DRR_QUANTUM]) {
87 		quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
88 		if (quantum == 0) {
89 			NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
90 			return -EINVAL;
91 		}
92 	} else
93 		quantum = psched_mtu(qdisc_dev(sch));
94 
95 	if (cl != NULL) {
96 		if (tca[TCA_RATE]) {
97 			err = gen_replace_estimator(&cl->bstats, NULL,
98 						    &cl->rate_est,
99 						    NULL,
100 						    qdisc_root_sleeping_running(sch),
101 						    tca[TCA_RATE]);
102 			if (err) {
103 				NL_SET_ERR_MSG(extack, "Failed to replace estimator");
104 				return err;
105 			}
106 		}
107 
108 		sch_tree_lock(sch);
109 		if (tb[TCA_DRR_QUANTUM])
110 			cl->quantum = quantum;
111 		sch_tree_unlock(sch);
112 
113 		return 0;
114 	}
115 
116 	cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
117 	if (cl == NULL)
118 		return -ENOBUFS;
119 
120 	cl->common.classid = classid;
121 	cl->quantum	   = quantum;
122 	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
123 					       &pfifo_qdisc_ops, classid,
124 					       NULL);
125 	if (cl->qdisc == NULL)
126 		cl->qdisc = &noop_qdisc;
127 	else
128 		qdisc_hash_add(cl->qdisc, true);
129 
130 	if (tca[TCA_RATE]) {
131 		err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
132 					    NULL,
133 					    qdisc_root_sleeping_running(sch),
134 					    tca[TCA_RATE]);
135 		if (err) {
136 			NL_SET_ERR_MSG(extack, "Failed to replace estimator");
137 			qdisc_put(cl->qdisc);
138 			kfree(cl);
139 			return err;
140 		}
141 	}
142 
143 	sch_tree_lock(sch);
144 	qdisc_class_hash_insert(&q->clhash, &cl->common);
145 	sch_tree_unlock(sch);
146 
147 	qdisc_class_hash_grow(sch, &q->clhash);
148 
149 	*arg = (unsigned long)cl;
150 	return 0;
151 }
152 
153 static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
154 {
155 	gen_kill_estimator(&cl->rate_est);
156 	qdisc_put(cl->qdisc);
157 	kfree(cl);
158 }
159 
160 static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
161 {
162 	struct drr_sched *q = qdisc_priv(sch);
163 	struct drr_class *cl = (struct drr_class *)arg;
164 
165 	if (cl->filter_cnt > 0)
166 		return -EBUSY;
167 
168 	sch_tree_lock(sch);
169 
170 	drr_purge_queue(cl);
171 	qdisc_class_hash_remove(&q->clhash, &cl->common);
172 
173 	sch_tree_unlock(sch);
174 
175 	drr_destroy_class(sch, cl);
176 	return 0;
177 }
178 
179 static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
180 {
181 	return (unsigned long)drr_find_class(sch, classid);
182 }
183 
184 static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
185 				       struct netlink_ext_ack *extack)
186 {
187 	struct drr_sched *q = qdisc_priv(sch);
188 
189 	if (cl) {
190 		NL_SET_ERR_MSG(extack, "DRR classid must be zero");
191 		return NULL;
192 	}
193 
194 	return q->block;
195 }
196 
197 static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
198 				  u32 classid)
199 {
200 	struct drr_class *cl = drr_find_class(sch, classid);
201 
202 	if (cl != NULL)
203 		cl->filter_cnt++;
204 
205 	return (unsigned long)cl;
206 }
207 
208 static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
209 {
210 	struct drr_class *cl = (struct drr_class *)arg;
211 
212 	cl->filter_cnt--;
213 }
214 
215 static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
216 			   struct Qdisc *new, struct Qdisc **old,
217 			   struct netlink_ext_ack *extack)
218 {
219 	struct drr_class *cl = (struct drr_class *)arg;
220 
221 	if (new == NULL) {
222 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
223 					cl->common.classid, NULL);
224 		if (new == NULL)
225 			new = &noop_qdisc;
226 	}
227 
228 	*old = qdisc_replace(sch, new, &cl->qdisc);
229 	return 0;
230 }
231 
232 static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
233 {
234 	struct drr_class *cl = (struct drr_class *)arg;
235 
236 	return cl->qdisc;
237 }
238 
239 static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
240 {
241 	struct drr_class *cl = (struct drr_class *)arg;
242 
243 	list_del(&cl->alist);
244 }
245 
246 static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
247 			  struct sk_buff *skb, struct tcmsg *tcm)
248 {
249 	struct drr_class *cl = (struct drr_class *)arg;
250 	struct nlattr *nest;
251 
252 	tcm->tcm_parent	= TC_H_ROOT;
253 	tcm->tcm_handle	= cl->common.classid;
254 	tcm->tcm_info	= cl->qdisc->handle;
255 
256 	nest = nla_nest_start(skb, TCA_OPTIONS);
257 	if (nest == NULL)
258 		goto nla_put_failure;
259 	if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
260 		goto nla_put_failure;
261 	return nla_nest_end(skb, nest);
262 
263 nla_put_failure:
264 	nla_nest_cancel(skb, nest);
265 	return -EMSGSIZE;
266 }
267 
268 static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
269 				struct gnet_dump *d)
270 {
271 	struct drr_class *cl = (struct drr_class *)arg;
272 	__u32 qlen = cl->qdisc->q.qlen;
273 	struct tc_drr_stats xstats;
274 
275 	memset(&xstats, 0, sizeof(xstats));
276 	if (qlen)
277 		xstats.deficit = cl->deficit;
278 
279 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
280 				  d, NULL, &cl->bstats) < 0 ||
281 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
282 	    gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
283 		return -1;
284 
285 	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
286 }
287 
288 static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
289 {
290 	struct drr_sched *q = qdisc_priv(sch);
291 	struct drr_class *cl;
292 	unsigned int i;
293 
294 	if (arg->stop)
295 		return;
296 
297 	for (i = 0; i < q->clhash.hashsize; i++) {
298 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
299 			if (arg->count < arg->skip) {
300 				arg->count++;
301 				continue;
302 			}
303 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
304 				arg->stop = 1;
305 				return;
306 			}
307 			arg->count++;
308 		}
309 	}
310 }
311 
312 static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
313 				      int *qerr)
314 {
315 	struct drr_sched *q = qdisc_priv(sch);
316 	struct drr_class *cl;
317 	struct tcf_result res;
318 	struct tcf_proto *fl;
319 	int result;
320 
321 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
322 		cl = drr_find_class(sch, skb->priority);
323 		if (cl != NULL)
324 			return cl;
325 	}
326 
327 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
328 	fl = rcu_dereference_bh(q->filter_list);
329 	result = tcf_classify(skb, fl, &res, false);
330 	if (result >= 0) {
331 #ifdef CONFIG_NET_CLS_ACT
332 		switch (result) {
333 		case TC_ACT_QUEUED:
334 		case TC_ACT_STOLEN:
335 		case TC_ACT_TRAP:
336 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
337 			/* fall through */
338 		case TC_ACT_SHOT:
339 			return NULL;
340 		}
341 #endif
342 		cl = (struct drr_class *)res.class;
343 		if (cl == NULL)
344 			cl = drr_find_class(sch, res.classid);
345 		return cl;
346 	}
347 	return NULL;
348 }
349 
350 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
351 		       struct sk_buff **to_free)
352 {
353 	struct drr_sched *q = qdisc_priv(sch);
354 	struct drr_class *cl;
355 	int err = 0;
356 
357 	cl = drr_classify(skb, sch, &err);
358 	if (cl == NULL) {
359 		if (err & __NET_XMIT_BYPASS)
360 			qdisc_qstats_drop(sch);
361 		__qdisc_drop(skb, to_free);
362 		return err;
363 	}
364 
365 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
366 	if (unlikely(err != NET_XMIT_SUCCESS)) {
367 		if (net_xmit_drop_count(err)) {
368 			cl->qstats.drops++;
369 			qdisc_qstats_drop(sch);
370 		}
371 		return err;
372 	}
373 
374 	if (cl->qdisc->q.qlen == 1) {
375 		list_add_tail(&cl->alist, &q->active);
376 		cl->deficit = cl->quantum;
377 	}
378 
379 	qdisc_qstats_backlog_inc(sch, skb);
380 	sch->q.qlen++;
381 	return err;
382 }
383 
384 static struct sk_buff *drr_dequeue(struct Qdisc *sch)
385 {
386 	struct drr_sched *q = qdisc_priv(sch);
387 	struct drr_class *cl;
388 	struct sk_buff *skb;
389 	unsigned int len;
390 
391 	if (list_empty(&q->active))
392 		goto out;
393 	while (1) {
394 		cl = list_first_entry(&q->active, struct drr_class, alist);
395 		skb = cl->qdisc->ops->peek(cl->qdisc);
396 		if (skb == NULL) {
397 			qdisc_warn_nonwc(__func__, cl->qdisc);
398 			goto out;
399 		}
400 
401 		len = qdisc_pkt_len(skb);
402 		if (len <= cl->deficit) {
403 			cl->deficit -= len;
404 			skb = qdisc_dequeue_peeked(cl->qdisc);
405 			if (unlikely(skb == NULL))
406 				goto out;
407 			if (cl->qdisc->q.qlen == 0)
408 				list_del(&cl->alist);
409 
410 			bstats_update(&cl->bstats, skb);
411 			qdisc_bstats_update(sch, skb);
412 			qdisc_qstats_backlog_dec(sch, skb);
413 			sch->q.qlen--;
414 			return skb;
415 		}
416 
417 		cl->deficit += cl->quantum;
418 		list_move_tail(&cl->alist, &q->active);
419 	}
420 out:
421 	return NULL;
422 }
423 
424 static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
425 			  struct netlink_ext_ack *extack)
426 {
427 	struct drr_sched *q = qdisc_priv(sch);
428 	int err;
429 
430 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
431 	if (err)
432 		return err;
433 	err = qdisc_class_hash_init(&q->clhash);
434 	if (err < 0)
435 		return err;
436 	INIT_LIST_HEAD(&q->active);
437 	return 0;
438 }
439 
440 static void drr_reset_qdisc(struct Qdisc *sch)
441 {
442 	struct drr_sched *q = qdisc_priv(sch);
443 	struct drr_class *cl;
444 	unsigned int i;
445 
446 	for (i = 0; i < q->clhash.hashsize; i++) {
447 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
448 			if (cl->qdisc->q.qlen)
449 				list_del(&cl->alist);
450 			qdisc_reset(cl->qdisc);
451 		}
452 	}
453 	sch->qstats.backlog = 0;
454 	sch->q.qlen = 0;
455 }
456 
457 static void drr_destroy_qdisc(struct Qdisc *sch)
458 {
459 	struct drr_sched *q = qdisc_priv(sch);
460 	struct drr_class *cl;
461 	struct hlist_node *next;
462 	unsigned int i;
463 
464 	tcf_block_put(q->block);
465 
466 	for (i = 0; i < q->clhash.hashsize; i++) {
467 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
468 					  common.hnode)
469 			drr_destroy_class(sch, cl);
470 	}
471 	qdisc_class_hash_destroy(&q->clhash);
472 }
473 
474 static const struct Qdisc_class_ops drr_class_ops = {
475 	.change		= drr_change_class,
476 	.delete		= drr_delete_class,
477 	.find		= drr_search_class,
478 	.tcf_block	= drr_tcf_block,
479 	.bind_tcf	= drr_bind_tcf,
480 	.unbind_tcf	= drr_unbind_tcf,
481 	.graft		= drr_graft_class,
482 	.leaf		= drr_class_leaf,
483 	.qlen_notify	= drr_qlen_notify,
484 	.dump		= drr_dump_class,
485 	.dump_stats	= drr_dump_class_stats,
486 	.walk		= drr_walk,
487 };
488 
489 static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
490 	.cl_ops		= &drr_class_ops,
491 	.id		= "drr",
492 	.priv_size	= sizeof(struct drr_sched),
493 	.enqueue	= drr_enqueue,
494 	.dequeue	= drr_dequeue,
495 	.peek		= qdisc_peek_dequeued,
496 	.init		= drr_init_qdisc,
497 	.reset		= drr_reset_qdisc,
498 	.destroy	= drr_destroy_qdisc,
499 	.owner		= THIS_MODULE,
500 };
501 
502 static int __init drr_init(void)
503 {
504 	return register_qdisc(&drr_qdisc_ops);
505 }
506 
507 static void __exit drr_exit(void)
508 {
509 	unregister_qdisc(&drr_qdisc_ops);
510 }
511 
512 module_init(drr_init);
513 module_exit(drr_exit);
514 MODULE_LICENSE("GPL");
515