xref: /openbmc/linux/net/sched/sch_drr.c (revision 96ac6d43)
1 /*
2  * net/sched/sch_drr.c         Deficit Round Robin scheduler
3  *
4  * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * version 2 as published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/netdevice.h>
16 #include <linux/pkt_sched.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_sched.h>
19 #include <net/pkt_cls.h>
20 
21 struct drr_class {
22 	struct Qdisc_class_common	common;
23 	unsigned int			filter_cnt;
24 
25 	struct gnet_stats_basic_packed		bstats;
26 	struct gnet_stats_queue		qstats;
27 	struct net_rate_estimator __rcu *rate_est;
28 	struct list_head		alist;
29 	struct Qdisc			*qdisc;
30 
31 	u32				quantum;
32 	u32				deficit;
33 };
34 
35 struct drr_sched {
36 	struct list_head		active;
37 	struct tcf_proto __rcu		*filter_list;
38 	struct tcf_block		*block;
39 	struct Qdisc_class_hash		clhash;
40 };
41 
42 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
43 {
44 	struct drr_sched *q = qdisc_priv(sch);
45 	struct Qdisc_class_common *clc;
46 
47 	clc = qdisc_class_find(&q->clhash, classid);
48 	if (clc == NULL)
49 		return NULL;
50 	return container_of(clc, struct drr_class, common);
51 }
52 
53 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
54 	[TCA_DRR_QUANTUM]	= { .type = NLA_U32 },
55 };
56 
57 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
58 			    struct nlattr **tca, unsigned long *arg,
59 			    struct netlink_ext_ack *extack)
60 {
61 	struct drr_sched *q = qdisc_priv(sch);
62 	struct drr_class *cl = (struct drr_class *)*arg;
63 	struct nlattr *opt = tca[TCA_OPTIONS];
64 	struct nlattr *tb[TCA_DRR_MAX + 1];
65 	u32 quantum;
66 	int err;
67 
68 	if (!opt) {
69 		NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
70 		return -EINVAL;
71 	}
72 
73 	err = nla_parse_nested_deprecated(tb, TCA_DRR_MAX, opt, drr_policy,
74 					  extack);
75 	if (err < 0)
76 		return err;
77 
78 	if (tb[TCA_DRR_QUANTUM]) {
79 		quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
80 		if (quantum == 0) {
81 			NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
82 			return -EINVAL;
83 		}
84 	} else
85 		quantum = psched_mtu(qdisc_dev(sch));
86 
87 	if (cl != NULL) {
88 		if (tca[TCA_RATE]) {
89 			err = gen_replace_estimator(&cl->bstats, NULL,
90 						    &cl->rate_est,
91 						    NULL,
92 						    qdisc_root_sleeping_running(sch),
93 						    tca[TCA_RATE]);
94 			if (err) {
95 				NL_SET_ERR_MSG(extack, "Failed to replace estimator");
96 				return err;
97 			}
98 		}
99 
100 		sch_tree_lock(sch);
101 		if (tb[TCA_DRR_QUANTUM])
102 			cl->quantum = quantum;
103 		sch_tree_unlock(sch);
104 
105 		return 0;
106 	}
107 
108 	cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
109 	if (cl == NULL)
110 		return -ENOBUFS;
111 
112 	cl->common.classid = classid;
113 	cl->quantum	   = quantum;
114 	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
115 					       &pfifo_qdisc_ops, classid,
116 					       NULL);
117 	if (cl->qdisc == NULL)
118 		cl->qdisc = &noop_qdisc;
119 	else
120 		qdisc_hash_add(cl->qdisc, true);
121 
122 	if (tca[TCA_RATE]) {
123 		err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
124 					    NULL,
125 					    qdisc_root_sleeping_running(sch),
126 					    tca[TCA_RATE]);
127 		if (err) {
128 			NL_SET_ERR_MSG(extack, "Failed to replace estimator");
129 			qdisc_put(cl->qdisc);
130 			kfree(cl);
131 			return err;
132 		}
133 	}
134 
135 	sch_tree_lock(sch);
136 	qdisc_class_hash_insert(&q->clhash, &cl->common);
137 	sch_tree_unlock(sch);
138 
139 	qdisc_class_hash_grow(sch, &q->clhash);
140 
141 	*arg = (unsigned long)cl;
142 	return 0;
143 }
144 
145 static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
146 {
147 	gen_kill_estimator(&cl->rate_est);
148 	qdisc_put(cl->qdisc);
149 	kfree(cl);
150 }
151 
152 static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
153 {
154 	struct drr_sched *q = qdisc_priv(sch);
155 	struct drr_class *cl = (struct drr_class *)arg;
156 
157 	if (cl->filter_cnt > 0)
158 		return -EBUSY;
159 
160 	sch_tree_lock(sch);
161 
162 	qdisc_purge_queue(cl->qdisc);
163 	qdisc_class_hash_remove(&q->clhash, &cl->common);
164 
165 	sch_tree_unlock(sch);
166 
167 	drr_destroy_class(sch, cl);
168 	return 0;
169 }
170 
171 static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
172 {
173 	return (unsigned long)drr_find_class(sch, classid);
174 }
175 
176 static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
177 				       struct netlink_ext_ack *extack)
178 {
179 	struct drr_sched *q = qdisc_priv(sch);
180 
181 	if (cl) {
182 		NL_SET_ERR_MSG(extack, "DRR classid must be zero");
183 		return NULL;
184 	}
185 
186 	return q->block;
187 }
188 
189 static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
190 				  u32 classid)
191 {
192 	struct drr_class *cl = drr_find_class(sch, classid);
193 
194 	if (cl != NULL)
195 		cl->filter_cnt++;
196 
197 	return (unsigned long)cl;
198 }
199 
200 static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
201 {
202 	struct drr_class *cl = (struct drr_class *)arg;
203 
204 	cl->filter_cnt--;
205 }
206 
207 static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
208 			   struct Qdisc *new, struct Qdisc **old,
209 			   struct netlink_ext_ack *extack)
210 {
211 	struct drr_class *cl = (struct drr_class *)arg;
212 
213 	if (new == NULL) {
214 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
215 					cl->common.classid, NULL);
216 		if (new == NULL)
217 			new = &noop_qdisc;
218 	}
219 
220 	*old = qdisc_replace(sch, new, &cl->qdisc);
221 	return 0;
222 }
223 
224 static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
225 {
226 	struct drr_class *cl = (struct drr_class *)arg;
227 
228 	return cl->qdisc;
229 }
230 
231 static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
232 {
233 	struct drr_class *cl = (struct drr_class *)arg;
234 
235 	list_del(&cl->alist);
236 }
237 
238 static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
239 			  struct sk_buff *skb, struct tcmsg *tcm)
240 {
241 	struct drr_class *cl = (struct drr_class *)arg;
242 	struct nlattr *nest;
243 
244 	tcm->tcm_parent	= TC_H_ROOT;
245 	tcm->tcm_handle	= cl->common.classid;
246 	tcm->tcm_info	= cl->qdisc->handle;
247 
248 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
249 	if (nest == NULL)
250 		goto nla_put_failure;
251 	if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
252 		goto nla_put_failure;
253 	return nla_nest_end(skb, nest);
254 
255 nla_put_failure:
256 	nla_nest_cancel(skb, nest);
257 	return -EMSGSIZE;
258 }
259 
260 static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
261 				struct gnet_dump *d)
262 {
263 	struct drr_class *cl = (struct drr_class *)arg;
264 	__u32 qlen = qdisc_qlen_sum(cl->qdisc);
265 	struct Qdisc *cl_q = cl->qdisc;
266 	struct tc_drr_stats xstats;
267 
268 	memset(&xstats, 0, sizeof(xstats));
269 	if (qlen)
270 		xstats.deficit = cl->deficit;
271 
272 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
273 				  d, NULL, &cl->bstats) < 0 ||
274 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
275 	    gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
276 		return -1;
277 
278 	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
279 }
280 
281 static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
282 {
283 	struct drr_sched *q = qdisc_priv(sch);
284 	struct drr_class *cl;
285 	unsigned int i;
286 
287 	if (arg->stop)
288 		return;
289 
290 	for (i = 0; i < q->clhash.hashsize; i++) {
291 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
292 			if (arg->count < arg->skip) {
293 				arg->count++;
294 				continue;
295 			}
296 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
297 				arg->stop = 1;
298 				return;
299 			}
300 			arg->count++;
301 		}
302 	}
303 }
304 
305 static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
306 				      int *qerr)
307 {
308 	struct drr_sched *q = qdisc_priv(sch);
309 	struct drr_class *cl;
310 	struct tcf_result res;
311 	struct tcf_proto *fl;
312 	int result;
313 
314 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
315 		cl = drr_find_class(sch, skb->priority);
316 		if (cl != NULL)
317 			return cl;
318 	}
319 
320 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
321 	fl = rcu_dereference_bh(q->filter_list);
322 	result = tcf_classify(skb, fl, &res, false);
323 	if (result >= 0) {
324 #ifdef CONFIG_NET_CLS_ACT
325 		switch (result) {
326 		case TC_ACT_QUEUED:
327 		case TC_ACT_STOLEN:
328 		case TC_ACT_TRAP:
329 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
330 			/* fall through */
331 		case TC_ACT_SHOT:
332 			return NULL;
333 		}
334 #endif
335 		cl = (struct drr_class *)res.class;
336 		if (cl == NULL)
337 			cl = drr_find_class(sch, res.classid);
338 		return cl;
339 	}
340 	return NULL;
341 }
342 
343 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
344 		       struct sk_buff **to_free)
345 {
346 	unsigned int len = qdisc_pkt_len(skb);
347 	struct drr_sched *q = qdisc_priv(sch);
348 	struct drr_class *cl;
349 	int err = 0;
350 	bool first;
351 
352 	cl = drr_classify(skb, sch, &err);
353 	if (cl == NULL) {
354 		if (err & __NET_XMIT_BYPASS)
355 			qdisc_qstats_drop(sch);
356 		__qdisc_drop(skb, to_free);
357 		return err;
358 	}
359 
360 	first = !cl->qdisc->q.qlen;
361 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
362 	if (unlikely(err != NET_XMIT_SUCCESS)) {
363 		if (net_xmit_drop_count(err)) {
364 			cl->qstats.drops++;
365 			qdisc_qstats_drop(sch);
366 		}
367 		return err;
368 	}
369 
370 	if (first) {
371 		list_add_tail(&cl->alist, &q->active);
372 		cl->deficit = cl->quantum;
373 	}
374 
375 	sch->qstats.backlog += len;
376 	sch->q.qlen++;
377 	return err;
378 }
379 
380 static struct sk_buff *drr_dequeue(struct Qdisc *sch)
381 {
382 	struct drr_sched *q = qdisc_priv(sch);
383 	struct drr_class *cl;
384 	struct sk_buff *skb;
385 	unsigned int len;
386 
387 	if (list_empty(&q->active))
388 		goto out;
389 	while (1) {
390 		cl = list_first_entry(&q->active, struct drr_class, alist);
391 		skb = cl->qdisc->ops->peek(cl->qdisc);
392 		if (skb == NULL) {
393 			qdisc_warn_nonwc(__func__, cl->qdisc);
394 			goto out;
395 		}
396 
397 		len = qdisc_pkt_len(skb);
398 		if (len <= cl->deficit) {
399 			cl->deficit -= len;
400 			skb = qdisc_dequeue_peeked(cl->qdisc);
401 			if (unlikely(skb == NULL))
402 				goto out;
403 			if (cl->qdisc->q.qlen == 0)
404 				list_del(&cl->alist);
405 
406 			bstats_update(&cl->bstats, skb);
407 			qdisc_bstats_update(sch, skb);
408 			qdisc_qstats_backlog_dec(sch, skb);
409 			sch->q.qlen--;
410 			return skb;
411 		}
412 
413 		cl->deficit += cl->quantum;
414 		list_move_tail(&cl->alist, &q->active);
415 	}
416 out:
417 	return NULL;
418 }
419 
420 static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
421 			  struct netlink_ext_ack *extack)
422 {
423 	struct drr_sched *q = qdisc_priv(sch);
424 	int err;
425 
426 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
427 	if (err)
428 		return err;
429 	err = qdisc_class_hash_init(&q->clhash);
430 	if (err < 0)
431 		return err;
432 	INIT_LIST_HEAD(&q->active);
433 	return 0;
434 }
435 
436 static void drr_reset_qdisc(struct Qdisc *sch)
437 {
438 	struct drr_sched *q = qdisc_priv(sch);
439 	struct drr_class *cl;
440 	unsigned int i;
441 
442 	for (i = 0; i < q->clhash.hashsize; i++) {
443 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
444 			if (cl->qdisc->q.qlen)
445 				list_del(&cl->alist);
446 			qdisc_reset(cl->qdisc);
447 		}
448 	}
449 	sch->qstats.backlog = 0;
450 	sch->q.qlen = 0;
451 }
452 
453 static void drr_destroy_qdisc(struct Qdisc *sch)
454 {
455 	struct drr_sched *q = qdisc_priv(sch);
456 	struct drr_class *cl;
457 	struct hlist_node *next;
458 	unsigned int i;
459 
460 	tcf_block_put(q->block);
461 
462 	for (i = 0; i < q->clhash.hashsize; i++) {
463 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
464 					  common.hnode)
465 			drr_destroy_class(sch, cl);
466 	}
467 	qdisc_class_hash_destroy(&q->clhash);
468 }
469 
470 static const struct Qdisc_class_ops drr_class_ops = {
471 	.change		= drr_change_class,
472 	.delete		= drr_delete_class,
473 	.find		= drr_search_class,
474 	.tcf_block	= drr_tcf_block,
475 	.bind_tcf	= drr_bind_tcf,
476 	.unbind_tcf	= drr_unbind_tcf,
477 	.graft		= drr_graft_class,
478 	.leaf		= drr_class_leaf,
479 	.qlen_notify	= drr_qlen_notify,
480 	.dump		= drr_dump_class,
481 	.dump_stats	= drr_dump_class_stats,
482 	.walk		= drr_walk,
483 };
484 
485 static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
486 	.cl_ops		= &drr_class_ops,
487 	.id		= "drr",
488 	.priv_size	= sizeof(struct drr_sched),
489 	.enqueue	= drr_enqueue,
490 	.dequeue	= drr_dequeue,
491 	.peek		= qdisc_peek_dequeued,
492 	.init		= drr_init_qdisc,
493 	.reset		= drr_reset_qdisc,
494 	.destroy	= drr_destroy_qdisc,
495 	.owner		= THIS_MODULE,
496 };
497 
498 static int __init drr_init(void)
499 {
500 	return register_qdisc(&drr_qdisc_ops);
501 }
502 
503 static void __exit drr_exit(void)
504 {
505 	unregister_qdisc(&drr_qdisc_ops);
506 }
507 
508 module_init(drr_init);
509 module_exit(drr_exit);
510 MODULE_LICENSE("GPL");
511