xref: /openbmc/linux/net/sched/sch_drr.c (revision 3213486f)
1 /*
2  * net/sched/sch_drr.c         Deficit Round Robin scheduler
3  *
4  * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * version 2 as published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/netdevice.h>
16 #include <linux/pkt_sched.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_sched.h>
19 #include <net/pkt_cls.h>
20 
21 struct drr_class {
22 	struct Qdisc_class_common	common;
23 	unsigned int			filter_cnt;
24 
25 	struct gnet_stats_basic_packed		bstats;
26 	struct gnet_stats_queue		qstats;
27 	struct net_rate_estimator __rcu *rate_est;
28 	struct list_head		alist;
29 	struct Qdisc			*qdisc;
30 
31 	u32				quantum;
32 	u32				deficit;
33 };
34 
35 struct drr_sched {
36 	struct list_head		active;
37 	struct tcf_proto __rcu		*filter_list;
38 	struct tcf_block		*block;
39 	struct Qdisc_class_hash		clhash;
40 };
41 
42 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
43 {
44 	struct drr_sched *q = qdisc_priv(sch);
45 	struct Qdisc_class_common *clc;
46 
47 	clc = qdisc_class_find(&q->clhash, classid);
48 	if (clc == NULL)
49 		return NULL;
50 	return container_of(clc, struct drr_class, common);
51 }
52 
53 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
54 	[TCA_DRR_QUANTUM]	= { .type = NLA_U32 },
55 };
56 
57 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
58 			    struct nlattr **tca, unsigned long *arg,
59 			    struct netlink_ext_ack *extack)
60 {
61 	struct drr_sched *q = qdisc_priv(sch);
62 	struct drr_class *cl = (struct drr_class *)*arg;
63 	struct nlattr *opt = tca[TCA_OPTIONS];
64 	struct nlattr *tb[TCA_DRR_MAX + 1];
65 	u32 quantum;
66 	int err;
67 
68 	if (!opt) {
69 		NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
70 		return -EINVAL;
71 	}
72 
73 	err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, extack);
74 	if (err < 0)
75 		return err;
76 
77 	if (tb[TCA_DRR_QUANTUM]) {
78 		quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
79 		if (quantum == 0) {
80 			NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
81 			return -EINVAL;
82 		}
83 	} else
84 		quantum = psched_mtu(qdisc_dev(sch));
85 
86 	if (cl != NULL) {
87 		if (tca[TCA_RATE]) {
88 			err = gen_replace_estimator(&cl->bstats, NULL,
89 						    &cl->rate_est,
90 						    NULL,
91 						    qdisc_root_sleeping_running(sch),
92 						    tca[TCA_RATE]);
93 			if (err) {
94 				NL_SET_ERR_MSG(extack, "Failed to replace estimator");
95 				return err;
96 			}
97 		}
98 
99 		sch_tree_lock(sch);
100 		if (tb[TCA_DRR_QUANTUM])
101 			cl->quantum = quantum;
102 		sch_tree_unlock(sch);
103 
104 		return 0;
105 	}
106 
107 	cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
108 	if (cl == NULL)
109 		return -ENOBUFS;
110 
111 	cl->common.classid = classid;
112 	cl->quantum	   = quantum;
113 	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
114 					       &pfifo_qdisc_ops, classid,
115 					       NULL);
116 	if (cl->qdisc == NULL)
117 		cl->qdisc = &noop_qdisc;
118 	else
119 		qdisc_hash_add(cl->qdisc, true);
120 
121 	if (tca[TCA_RATE]) {
122 		err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
123 					    NULL,
124 					    qdisc_root_sleeping_running(sch),
125 					    tca[TCA_RATE]);
126 		if (err) {
127 			NL_SET_ERR_MSG(extack, "Failed to replace estimator");
128 			qdisc_put(cl->qdisc);
129 			kfree(cl);
130 			return err;
131 		}
132 	}
133 
134 	sch_tree_lock(sch);
135 	qdisc_class_hash_insert(&q->clhash, &cl->common);
136 	sch_tree_unlock(sch);
137 
138 	qdisc_class_hash_grow(sch, &q->clhash);
139 
140 	*arg = (unsigned long)cl;
141 	return 0;
142 }
143 
144 static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
145 {
146 	gen_kill_estimator(&cl->rate_est);
147 	qdisc_put(cl->qdisc);
148 	kfree(cl);
149 }
150 
151 static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
152 {
153 	struct drr_sched *q = qdisc_priv(sch);
154 	struct drr_class *cl = (struct drr_class *)arg;
155 
156 	if (cl->filter_cnt > 0)
157 		return -EBUSY;
158 
159 	sch_tree_lock(sch);
160 
161 	qdisc_purge_queue(cl->qdisc);
162 	qdisc_class_hash_remove(&q->clhash, &cl->common);
163 
164 	sch_tree_unlock(sch);
165 
166 	drr_destroy_class(sch, cl);
167 	return 0;
168 }
169 
170 static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
171 {
172 	return (unsigned long)drr_find_class(sch, classid);
173 }
174 
175 static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
176 				       struct netlink_ext_ack *extack)
177 {
178 	struct drr_sched *q = qdisc_priv(sch);
179 
180 	if (cl) {
181 		NL_SET_ERR_MSG(extack, "DRR classid must be zero");
182 		return NULL;
183 	}
184 
185 	return q->block;
186 }
187 
188 static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
189 				  u32 classid)
190 {
191 	struct drr_class *cl = drr_find_class(sch, classid);
192 
193 	if (cl != NULL)
194 		cl->filter_cnt++;
195 
196 	return (unsigned long)cl;
197 }
198 
199 static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
200 {
201 	struct drr_class *cl = (struct drr_class *)arg;
202 
203 	cl->filter_cnt--;
204 }
205 
206 static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
207 			   struct Qdisc *new, struct Qdisc **old,
208 			   struct netlink_ext_ack *extack)
209 {
210 	struct drr_class *cl = (struct drr_class *)arg;
211 
212 	if (new == NULL) {
213 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
214 					cl->common.classid, NULL);
215 		if (new == NULL)
216 			new = &noop_qdisc;
217 	}
218 
219 	*old = qdisc_replace(sch, new, &cl->qdisc);
220 	return 0;
221 }
222 
223 static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
224 {
225 	struct drr_class *cl = (struct drr_class *)arg;
226 
227 	return cl->qdisc;
228 }
229 
230 static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
231 {
232 	struct drr_class *cl = (struct drr_class *)arg;
233 
234 	list_del(&cl->alist);
235 }
236 
237 static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
238 			  struct sk_buff *skb, struct tcmsg *tcm)
239 {
240 	struct drr_class *cl = (struct drr_class *)arg;
241 	struct nlattr *nest;
242 
243 	tcm->tcm_parent	= TC_H_ROOT;
244 	tcm->tcm_handle	= cl->common.classid;
245 	tcm->tcm_info	= cl->qdisc->handle;
246 
247 	nest = nla_nest_start(skb, TCA_OPTIONS);
248 	if (nest == NULL)
249 		goto nla_put_failure;
250 	if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
251 		goto nla_put_failure;
252 	return nla_nest_end(skb, nest);
253 
254 nla_put_failure:
255 	nla_nest_cancel(skb, nest);
256 	return -EMSGSIZE;
257 }
258 
259 static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
260 				struct gnet_dump *d)
261 {
262 	struct drr_class *cl = (struct drr_class *)arg;
263 	__u32 qlen = qdisc_qlen_sum(cl->qdisc);
264 	struct Qdisc *cl_q = cl->qdisc;
265 	struct tc_drr_stats xstats;
266 
267 	memset(&xstats, 0, sizeof(xstats));
268 	if (qlen)
269 		xstats.deficit = cl->deficit;
270 
271 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
272 				  d, NULL, &cl->bstats) < 0 ||
273 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
274 	    gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
275 		return -1;
276 
277 	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
278 }
279 
280 static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
281 {
282 	struct drr_sched *q = qdisc_priv(sch);
283 	struct drr_class *cl;
284 	unsigned int i;
285 
286 	if (arg->stop)
287 		return;
288 
289 	for (i = 0; i < q->clhash.hashsize; i++) {
290 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
291 			if (arg->count < arg->skip) {
292 				arg->count++;
293 				continue;
294 			}
295 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
296 				arg->stop = 1;
297 				return;
298 			}
299 			arg->count++;
300 		}
301 	}
302 }
303 
304 static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
305 				      int *qerr)
306 {
307 	struct drr_sched *q = qdisc_priv(sch);
308 	struct drr_class *cl;
309 	struct tcf_result res;
310 	struct tcf_proto *fl;
311 	int result;
312 
313 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
314 		cl = drr_find_class(sch, skb->priority);
315 		if (cl != NULL)
316 			return cl;
317 	}
318 
319 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
320 	fl = rcu_dereference_bh(q->filter_list);
321 	result = tcf_classify(skb, fl, &res, false);
322 	if (result >= 0) {
323 #ifdef CONFIG_NET_CLS_ACT
324 		switch (result) {
325 		case TC_ACT_QUEUED:
326 		case TC_ACT_STOLEN:
327 		case TC_ACT_TRAP:
328 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
329 			/* fall through */
330 		case TC_ACT_SHOT:
331 			return NULL;
332 		}
333 #endif
334 		cl = (struct drr_class *)res.class;
335 		if (cl == NULL)
336 			cl = drr_find_class(sch, res.classid);
337 		return cl;
338 	}
339 	return NULL;
340 }
341 
342 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
343 		       struct sk_buff **to_free)
344 {
345 	unsigned int len = qdisc_pkt_len(skb);
346 	struct drr_sched *q = qdisc_priv(sch);
347 	struct drr_class *cl;
348 	int err = 0;
349 	bool first;
350 
351 	cl = drr_classify(skb, sch, &err);
352 	if (cl == NULL) {
353 		if (err & __NET_XMIT_BYPASS)
354 			qdisc_qstats_drop(sch);
355 		__qdisc_drop(skb, to_free);
356 		return err;
357 	}
358 
359 	first = !cl->qdisc->q.qlen;
360 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
361 	if (unlikely(err != NET_XMIT_SUCCESS)) {
362 		if (net_xmit_drop_count(err)) {
363 			cl->qstats.drops++;
364 			qdisc_qstats_drop(sch);
365 		}
366 		return err;
367 	}
368 
369 	if (first) {
370 		list_add_tail(&cl->alist, &q->active);
371 		cl->deficit = cl->quantum;
372 	}
373 
374 	sch->qstats.backlog += len;
375 	sch->q.qlen++;
376 	return err;
377 }
378 
379 static struct sk_buff *drr_dequeue(struct Qdisc *sch)
380 {
381 	struct drr_sched *q = qdisc_priv(sch);
382 	struct drr_class *cl;
383 	struct sk_buff *skb;
384 	unsigned int len;
385 
386 	if (list_empty(&q->active))
387 		goto out;
388 	while (1) {
389 		cl = list_first_entry(&q->active, struct drr_class, alist);
390 		skb = cl->qdisc->ops->peek(cl->qdisc);
391 		if (skb == NULL) {
392 			qdisc_warn_nonwc(__func__, cl->qdisc);
393 			goto out;
394 		}
395 
396 		len = qdisc_pkt_len(skb);
397 		if (len <= cl->deficit) {
398 			cl->deficit -= len;
399 			skb = qdisc_dequeue_peeked(cl->qdisc);
400 			if (unlikely(skb == NULL))
401 				goto out;
402 			if (cl->qdisc->q.qlen == 0)
403 				list_del(&cl->alist);
404 
405 			bstats_update(&cl->bstats, skb);
406 			qdisc_bstats_update(sch, skb);
407 			qdisc_qstats_backlog_dec(sch, skb);
408 			sch->q.qlen--;
409 			return skb;
410 		}
411 
412 		cl->deficit += cl->quantum;
413 		list_move_tail(&cl->alist, &q->active);
414 	}
415 out:
416 	return NULL;
417 }
418 
419 static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
420 			  struct netlink_ext_ack *extack)
421 {
422 	struct drr_sched *q = qdisc_priv(sch);
423 	int err;
424 
425 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
426 	if (err)
427 		return err;
428 	err = qdisc_class_hash_init(&q->clhash);
429 	if (err < 0)
430 		return err;
431 	INIT_LIST_HEAD(&q->active);
432 	return 0;
433 }
434 
435 static void drr_reset_qdisc(struct Qdisc *sch)
436 {
437 	struct drr_sched *q = qdisc_priv(sch);
438 	struct drr_class *cl;
439 	unsigned int i;
440 
441 	for (i = 0; i < q->clhash.hashsize; i++) {
442 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
443 			if (cl->qdisc->q.qlen)
444 				list_del(&cl->alist);
445 			qdisc_reset(cl->qdisc);
446 		}
447 	}
448 	sch->qstats.backlog = 0;
449 	sch->q.qlen = 0;
450 }
451 
452 static void drr_destroy_qdisc(struct Qdisc *sch)
453 {
454 	struct drr_sched *q = qdisc_priv(sch);
455 	struct drr_class *cl;
456 	struct hlist_node *next;
457 	unsigned int i;
458 
459 	tcf_block_put(q->block);
460 
461 	for (i = 0; i < q->clhash.hashsize; i++) {
462 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
463 					  common.hnode)
464 			drr_destroy_class(sch, cl);
465 	}
466 	qdisc_class_hash_destroy(&q->clhash);
467 }
468 
469 static const struct Qdisc_class_ops drr_class_ops = {
470 	.change		= drr_change_class,
471 	.delete		= drr_delete_class,
472 	.find		= drr_search_class,
473 	.tcf_block	= drr_tcf_block,
474 	.bind_tcf	= drr_bind_tcf,
475 	.unbind_tcf	= drr_unbind_tcf,
476 	.graft		= drr_graft_class,
477 	.leaf		= drr_class_leaf,
478 	.qlen_notify	= drr_qlen_notify,
479 	.dump		= drr_dump_class,
480 	.dump_stats	= drr_dump_class_stats,
481 	.walk		= drr_walk,
482 };
483 
484 static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
485 	.cl_ops		= &drr_class_ops,
486 	.id		= "drr",
487 	.priv_size	= sizeof(struct drr_sched),
488 	.enqueue	= drr_enqueue,
489 	.dequeue	= drr_dequeue,
490 	.peek		= qdisc_peek_dequeued,
491 	.init		= drr_init_qdisc,
492 	.reset		= drr_reset_qdisc,
493 	.destroy	= drr_destroy_qdisc,
494 	.owner		= THIS_MODULE,
495 };
496 
497 static int __init drr_init(void)
498 {
499 	return register_qdisc(&drr_qdisc_ops);
500 }
501 
502 static void __exit drr_exit(void)
503 {
504 	unregister_qdisc(&drr_qdisc_ops);
505 }
506 
507 module_init(drr_init);
508 module_exit(drr_exit);
509 MODULE_LICENSE("GPL");
510