xref: /openbmc/linux/net/sched/sch_prio.c (revision a1e58bbd)
1 /*
2  * net/sched/sch_prio.c	Simple 3-band priority "scheduler".
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  * Fixes:       19990609: J Hadi Salim <hadi@nortelnetworks.com>:
11  *              Init --  EINVAL when opt undefined
12  */
13 
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 
23 
24 struct prio_sched_data
25 {
26 	int bands;
27 	int curband; /* for round-robin */
28 	struct tcf_proto *filter_list;
29 	u8  prio2band[TC_PRIO_MAX+1];
30 	struct Qdisc *queues[TCQ_PRIO_BANDS];
31 	int mq;
32 };
33 
34 
35 static struct Qdisc *
36 prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
37 {
38 	struct prio_sched_data *q = qdisc_priv(sch);
39 	u32 band = skb->priority;
40 	struct tcf_result res;
41 	int err;
42 
43 	*qerr = NET_XMIT_BYPASS;
44 	if (TC_H_MAJ(skb->priority) != sch->handle) {
45 		err = tc_classify(skb, q->filter_list, &res);
46 #ifdef CONFIG_NET_CLS_ACT
47 		switch (err) {
48 		case TC_ACT_STOLEN:
49 		case TC_ACT_QUEUED:
50 			*qerr = NET_XMIT_SUCCESS;
51 		case TC_ACT_SHOT:
52 			return NULL;
53 		}
54 #endif
55 		if (!q->filter_list || err < 0) {
56 			if (TC_H_MAJ(band))
57 				band = 0;
58 			band = q->prio2band[band&TC_PRIO_MAX];
59 			goto out;
60 		}
61 		band = res.classid;
62 	}
63 	band = TC_H_MIN(band) - 1;
64 	if (band >= q->bands)
65 		band = q->prio2band[0];
66 out:
67 	if (q->mq)
68 		skb_set_queue_mapping(skb, band);
69 	return q->queues[band];
70 }
71 
72 static int
73 prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
74 {
75 	struct Qdisc *qdisc;
76 	int ret;
77 
78 	qdisc = prio_classify(skb, sch, &ret);
79 #ifdef CONFIG_NET_CLS_ACT
80 	if (qdisc == NULL) {
81 
82 		if (ret == NET_XMIT_BYPASS)
83 			sch->qstats.drops++;
84 		kfree_skb(skb);
85 		return ret;
86 	}
87 #endif
88 
89 	if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) {
90 		sch->bstats.bytes += skb->len;
91 		sch->bstats.packets++;
92 		sch->q.qlen++;
93 		return NET_XMIT_SUCCESS;
94 	}
95 	sch->qstats.drops++;
96 	return ret;
97 }
98 
99 
100 static int
101 prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
102 {
103 	struct Qdisc *qdisc;
104 	int ret;
105 
106 	qdisc = prio_classify(skb, sch, &ret);
107 #ifdef CONFIG_NET_CLS_ACT
108 	if (qdisc == NULL) {
109 		if (ret == NET_XMIT_BYPASS)
110 			sch->qstats.drops++;
111 		kfree_skb(skb);
112 		return ret;
113 	}
114 #endif
115 
116 	if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) {
117 		sch->q.qlen++;
118 		sch->qstats.requeues++;
119 		return 0;
120 	}
121 	sch->qstats.drops++;
122 	return NET_XMIT_DROP;
123 }
124 
125 
126 static struct sk_buff *
127 prio_dequeue(struct Qdisc* sch)
128 {
129 	struct sk_buff *skb;
130 	struct prio_sched_data *q = qdisc_priv(sch);
131 	int prio;
132 	struct Qdisc *qdisc;
133 
134 	for (prio = 0; prio < q->bands; prio++) {
135 		/* Check if the target subqueue is available before
136 		 * pulling an skb.  This way we avoid excessive requeues
137 		 * for slower queues.
138 		 */
139 		if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) {
140 			qdisc = q->queues[prio];
141 			skb = qdisc->dequeue(qdisc);
142 			if (skb) {
143 				sch->q.qlen--;
144 				return skb;
145 			}
146 		}
147 	}
148 	return NULL;
149 
150 }
151 
152 static struct sk_buff *rr_dequeue(struct Qdisc* sch)
153 {
154 	struct sk_buff *skb;
155 	struct prio_sched_data *q = qdisc_priv(sch);
156 	struct Qdisc *qdisc;
157 	int bandcount;
158 
159 	/* Only take one pass through the queues.  If nothing is available,
160 	 * return nothing.
161 	 */
162 	for (bandcount = 0; bandcount < q->bands; bandcount++) {
163 		/* Check if the target subqueue is available before
164 		 * pulling an skb.  This way we avoid excessive requeues
165 		 * for slower queues.  If the queue is stopped, try the
166 		 * next queue.
167 		 */
168 		if (!__netif_subqueue_stopped(sch->dev,
169 					    (q->mq ? q->curband : 0))) {
170 			qdisc = q->queues[q->curband];
171 			skb = qdisc->dequeue(qdisc);
172 			if (skb) {
173 				sch->q.qlen--;
174 				q->curband++;
175 				if (q->curband >= q->bands)
176 					q->curband = 0;
177 				return skb;
178 			}
179 		}
180 		q->curband++;
181 		if (q->curband >= q->bands)
182 			q->curband = 0;
183 	}
184 	return NULL;
185 }
186 
187 static unsigned int prio_drop(struct Qdisc* sch)
188 {
189 	struct prio_sched_data *q = qdisc_priv(sch);
190 	int prio;
191 	unsigned int len;
192 	struct Qdisc *qdisc;
193 
194 	for (prio = q->bands-1; prio >= 0; prio--) {
195 		qdisc = q->queues[prio];
196 		if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
197 			sch->q.qlen--;
198 			return len;
199 		}
200 	}
201 	return 0;
202 }
203 
204 
205 static void
206 prio_reset(struct Qdisc* sch)
207 {
208 	int prio;
209 	struct prio_sched_data *q = qdisc_priv(sch);
210 
211 	for (prio=0; prio<q->bands; prio++)
212 		qdisc_reset(q->queues[prio]);
213 	sch->q.qlen = 0;
214 }
215 
216 static void
217 prio_destroy(struct Qdisc* sch)
218 {
219 	int prio;
220 	struct prio_sched_data *q = qdisc_priv(sch);
221 
222 	tcf_destroy_chain(q->filter_list);
223 	for (prio=0; prio<q->bands; prio++)
224 		qdisc_destroy(q->queues[prio]);
225 }
226 
227 static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
228 {
229 	struct prio_sched_data *q = qdisc_priv(sch);
230 	struct tc_prio_qopt *qopt;
231 	struct nlattr *tb[TCA_PRIO_MAX + 1];
232 	int err;
233 	int i;
234 
235 	err = nla_parse_nested_compat(tb, TCA_PRIO_MAX, opt, NULL, qopt,
236 				      sizeof(*qopt));
237 	if (err < 0)
238 		return err;
239 
240 	q->bands = qopt->bands;
241 	/* If we're multiqueue, make sure the number of incoming bands
242 	 * matches the number of queues on the device we're associating with.
243 	 * If the number of bands requested is zero, then set q->bands to
244 	 * dev->egress_subqueue_count.  Also, the root qdisc must be the
245 	 * only one that is enabled for multiqueue, since it's the only one
246 	 * that interacts with the underlying device.
247 	 */
248 	q->mq = nla_get_flag(tb[TCA_PRIO_MQ]);
249 	if (q->mq) {
250 		if (sch->parent != TC_H_ROOT)
251 			return -EINVAL;
252 		if (netif_is_multiqueue(sch->dev)) {
253 			if (q->bands == 0)
254 				q->bands = sch->dev->egress_subqueue_count;
255 			else if (q->bands != sch->dev->egress_subqueue_count)
256 				return -EINVAL;
257 		} else
258 			return -EOPNOTSUPP;
259 	}
260 
261 	if (q->bands > TCQ_PRIO_BANDS || q->bands < 2)
262 		return -EINVAL;
263 
264 	for (i=0; i<=TC_PRIO_MAX; i++) {
265 		if (qopt->priomap[i] >= q->bands)
266 			return -EINVAL;
267 	}
268 
269 	sch_tree_lock(sch);
270 	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
271 
272 	for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
273 		struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc);
274 		if (child != &noop_qdisc) {
275 			qdisc_tree_decrease_qlen(child, child->q.qlen);
276 			qdisc_destroy(child);
277 		}
278 	}
279 	sch_tree_unlock(sch);
280 
281 	for (i=0; i<q->bands; i++) {
282 		if (q->queues[i] == &noop_qdisc) {
283 			struct Qdisc *child;
284 			child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
285 						  TC_H_MAKE(sch->handle, i + 1));
286 			if (child) {
287 				sch_tree_lock(sch);
288 				child = xchg(&q->queues[i], child);
289 
290 				if (child != &noop_qdisc) {
291 					qdisc_tree_decrease_qlen(child,
292 								 child->q.qlen);
293 					qdisc_destroy(child);
294 				}
295 				sch_tree_unlock(sch);
296 			}
297 		}
298 	}
299 	return 0;
300 }
301 
302 static int prio_init(struct Qdisc *sch, struct nlattr *opt)
303 {
304 	struct prio_sched_data *q = qdisc_priv(sch);
305 	int i;
306 
307 	for (i=0; i<TCQ_PRIO_BANDS; i++)
308 		q->queues[i] = &noop_qdisc;
309 
310 	if (opt == NULL) {
311 		return -EINVAL;
312 	} else {
313 		int err;
314 
315 		if ((err= prio_tune(sch, opt)) != 0)
316 			return err;
317 	}
318 	return 0;
319 }
320 
321 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
322 {
323 	struct prio_sched_data *q = qdisc_priv(sch);
324 	unsigned char *b = skb_tail_pointer(skb);
325 	struct nlattr *nest;
326 	struct tc_prio_qopt opt;
327 
328 	opt.bands = q->bands;
329 	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
330 
331 	nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt);
332 	if (nest == NULL)
333 		goto nla_put_failure;
334 	if (q->mq) {
335 		if (nla_put_flag(skb, TCA_PRIO_MQ) < 0)
336 			goto nla_put_failure;
337 	}
338 	nla_nest_compat_end(skb, nest);
339 
340 	return skb->len;
341 
342 nla_put_failure:
343 	nlmsg_trim(skb, b);
344 	return -1;
345 }
346 
347 static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
348 		      struct Qdisc **old)
349 {
350 	struct prio_sched_data *q = qdisc_priv(sch);
351 	unsigned long band = arg - 1;
352 
353 	if (band >= q->bands)
354 		return -EINVAL;
355 
356 	if (new == NULL)
357 		new = &noop_qdisc;
358 
359 	sch_tree_lock(sch);
360 	*old = q->queues[band];
361 	q->queues[band] = new;
362 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
363 	qdisc_reset(*old);
364 	sch_tree_unlock(sch);
365 
366 	return 0;
367 }
368 
369 static struct Qdisc *
370 prio_leaf(struct Qdisc *sch, unsigned long arg)
371 {
372 	struct prio_sched_data *q = qdisc_priv(sch);
373 	unsigned long band = arg - 1;
374 
375 	if (band >= q->bands)
376 		return NULL;
377 
378 	return q->queues[band];
379 }
380 
381 static unsigned long prio_get(struct Qdisc *sch, u32 classid)
382 {
383 	struct prio_sched_data *q = qdisc_priv(sch);
384 	unsigned long band = TC_H_MIN(classid);
385 
386 	if (band - 1 >= q->bands)
387 		return 0;
388 	return band;
389 }
390 
391 static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
392 {
393 	return prio_get(sch, classid);
394 }
395 
396 
397 static void prio_put(struct Qdisc *q, unsigned long cl)
398 {
399 	return;
400 }
401 
402 static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct nlattr **tca, unsigned long *arg)
403 {
404 	unsigned long cl = *arg;
405 	struct prio_sched_data *q = qdisc_priv(sch);
406 
407 	if (cl - 1 > q->bands)
408 		return -ENOENT;
409 	return 0;
410 }
411 
412 static int prio_delete(struct Qdisc *sch, unsigned long cl)
413 {
414 	struct prio_sched_data *q = qdisc_priv(sch);
415 	if (cl - 1 > q->bands)
416 		return -ENOENT;
417 	return 0;
418 }
419 
420 
421 static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
422 			   struct tcmsg *tcm)
423 {
424 	struct prio_sched_data *q = qdisc_priv(sch);
425 
426 	if (cl - 1 > q->bands)
427 		return -ENOENT;
428 	tcm->tcm_handle |= TC_H_MIN(cl);
429 	if (q->queues[cl-1])
430 		tcm->tcm_info = q->queues[cl-1]->handle;
431 	return 0;
432 }
433 
434 static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
435 				 struct gnet_dump *d)
436 {
437 	struct prio_sched_data *q = qdisc_priv(sch);
438 	struct Qdisc *cl_q;
439 
440 	cl_q = q->queues[cl - 1];
441 	if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
442 	    gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
443 		return -1;
444 
445 	return 0;
446 }
447 
448 static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
449 {
450 	struct prio_sched_data *q = qdisc_priv(sch);
451 	int prio;
452 
453 	if (arg->stop)
454 		return;
455 
456 	for (prio = 0; prio < q->bands; prio++) {
457 		if (arg->count < arg->skip) {
458 			arg->count++;
459 			continue;
460 		}
461 		if (arg->fn(sch, prio+1, arg) < 0) {
462 			arg->stop = 1;
463 			break;
464 		}
465 		arg->count++;
466 	}
467 }
468 
469 static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl)
470 {
471 	struct prio_sched_data *q = qdisc_priv(sch);
472 
473 	if (cl)
474 		return NULL;
475 	return &q->filter_list;
476 }
477 
478 static const struct Qdisc_class_ops prio_class_ops = {
479 	.graft		=	prio_graft,
480 	.leaf		=	prio_leaf,
481 	.get		=	prio_get,
482 	.put		=	prio_put,
483 	.change		=	prio_change,
484 	.delete		=	prio_delete,
485 	.walk		=	prio_walk,
486 	.tcf_chain	=	prio_find_tcf,
487 	.bind_tcf	=	prio_bind,
488 	.unbind_tcf	=	prio_put,
489 	.dump		=	prio_dump_class,
490 	.dump_stats	=	prio_dump_class_stats,
491 };
492 
493 static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
494 	.next		=	NULL,
495 	.cl_ops		=	&prio_class_ops,
496 	.id		=	"prio",
497 	.priv_size	=	sizeof(struct prio_sched_data),
498 	.enqueue	=	prio_enqueue,
499 	.dequeue	=	prio_dequeue,
500 	.requeue	=	prio_requeue,
501 	.drop		=	prio_drop,
502 	.init		=	prio_init,
503 	.reset		=	prio_reset,
504 	.destroy	=	prio_destroy,
505 	.change		=	prio_tune,
506 	.dump		=	prio_dump,
507 	.owner		=	THIS_MODULE,
508 };
509 
510 static struct Qdisc_ops rr_qdisc_ops __read_mostly = {
511 	.next		=	NULL,
512 	.cl_ops		=	&prio_class_ops,
513 	.id		=	"rr",
514 	.priv_size	=	sizeof(struct prio_sched_data),
515 	.enqueue	=	prio_enqueue,
516 	.dequeue	=	rr_dequeue,
517 	.requeue	=	prio_requeue,
518 	.drop		=	prio_drop,
519 	.init		=	prio_init,
520 	.reset		=	prio_reset,
521 	.destroy	=	prio_destroy,
522 	.change		=	prio_tune,
523 	.dump		=	prio_dump,
524 	.owner		=	THIS_MODULE,
525 };
526 
527 static int __init prio_module_init(void)
528 {
529 	int err;
530 
531 	err = register_qdisc(&prio_qdisc_ops);
532 	if (err < 0)
533 		return err;
534 	err = register_qdisc(&rr_qdisc_ops);
535 	if (err < 0)
536 		unregister_qdisc(&prio_qdisc_ops);
537 	return err;
538 }
539 
540 static void __exit prio_module_exit(void)
541 {
542 	unregister_qdisc(&prio_qdisc_ops);
543 	unregister_qdisc(&rr_qdisc_ops);
544 }
545 
546 module_init(prio_module_init)
547 module_exit(prio_module_exit)
548 
549 MODULE_LICENSE("GPL");
550 MODULE_ALIAS("sch_rr");
551