xref: /openbmc/linux/net/sched/sch_mqprio.c (revision b34e08d5)
1 /*
2  * net/sched/sch_mqprio.c
3  *
4  * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * version 2 as published by the Free Software Foundation.
9  */
10 
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/errno.h>
16 #include <linux/skbuff.h>
17 #include <linux/module.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
20 #include <net/sch_generic.h>
21 
22 struct mqprio_sched {
23 	struct Qdisc		**qdiscs;
24 	int hw_owned;
25 };
26 
27 static void mqprio_destroy(struct Qdisc *sch)
28 {
29 	struct net_device *dev = qdisc_dev(sch);
30 	struct mqprio_sched *priv = qdisc_priv(sch);
31 	unsigned int ntx;
32 
33 	if (priv->qdiscs) {
34 		for (ntx = 0;
35 		     ntx < dev->num_tx_queues && priv->qdiscs[ntx];
36 		     ntx++)
37 			qdisc_destroy(priv->qdiscs[ntx]);
38 		kfree(priv->qdiscs);
39 	}
40 
41 	if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
42 		dev->netdev_ops->ndo_setup_tc(dev, 0);
43 	else
44 		netdev_set_num_tc(dev, 0);
45 }
46 
47 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
48 {
49 	int i, j;
50 
51 	/* Verify num_tc is not out of max range */
52 	if (qopt->num_tc > TC_MAX_QUEUE)
53 		return -EINVAL;
54 
55 	/* Verify priority mapping uses valid tcs */
56 	for (i = 0; i < TC_BITMASK + 1; i++) {
57 		if (qopt->prio_tc_map[i] >= qopt->num_tc)
58 			return -EINVAL;
59 	}
60 
61 	/* net_device does not support requested operation */
62 	if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
63 		return -EINVAL;
64 
65 	/* if hw owned qcount and qoffset are taken from LLD so
66 	 * no reason to verify them here
67 	 */
68 	if (qopt->hw)
69 		return 0;
70 
71 	for (i = 0; i < qopt->num_tc; i++) {
72 		unsigned int last = qopt->offset[i] + qopt->count[i];
73 
74 		/* Verify the queue count is in tx range being equal to the
75 		 * real_num_tx_queues indicates the last queue is in use.
76 		 */
77 		if (qopt->offset[i] >= dev->real_num_tx_queues ||
78 		    !qopt->count[i] ||
79 		    last > dev->real_num_tx_queues)
80 			return -EINVAL;
81 
82 		/* Verify that the offset and counts do not overlap */
83 		for (j = i + 1; j < qopt->num_tc; j++) {
84 			if (last > qopt->offset[j])
85 				return -EINVAL;
86 		}
87 	}
88 
89 	return 0;
90 }
91 
92 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
93 {
94 	struct net_device *dev = qdisc_dev(sch);
95 	struct mqprio_sched *priv = qdisc_priv(sch);
96 	struct netdev_queue *dev_queue;
97 	struct Qdisc *qdisc;
98 	int i, err = -EOPNOTSUPP;
99 	struct tc_mqprio_qopt *qopt = NULL;
100 
101 	BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
102 	BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
103 
104 	if (sch->parent != TC_H_ROOT)
105 		return -EOPNOTSUPP;
106 
107 	if (!netif_is_multiqueue(dev))
108 		return -EOPNOTSUPP;
109 
110 	if (!opt || nla_len(opt) < sizeof(*qopt))
111 		return -EINVAL;
112 
113 	qopt = nla_data(opt);
114 	if (mqprio_parse_opt(dev, qopt))
115 		return -EINVAL;
116 
117 	/* pre-allocate qdisc, attachment can't fail */
118 	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
119 			       GFP_KERNEL);
120 	if (priv->qdiscs == NULL) {
121 		err = -ENOMEM;
122 		goto err;
123 	}
124 
125 	for (i = 0; i < dev->num_tx_queues; i++) {
126 		dev_queue = netdev_get_tx_queue(dev, i);
127 		qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
128 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
129 						    TC_H_MIN(i + 1)));
130 		if (qdisc == NULL) {
131 			err = -ENOMEM;
132 			goto err;
133 		}
134 		priv->qdiscs[i] = qdisc;
135 		qdisc->flags |= TCQ_F_ONETXQUEUE;
136 	}
137 
138 	/* If the mqprio options indicate that hardware should own
139 	 * the queue mapping then run ndo_setup_tc otherwise use the
140 	 * supplied and verified mapping
141 	 */
142 	if (qopt->hw) {
143 		priv->hw_owned = 1;
144 		err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
145 		if (err)
146 			goto err;
147 	} else {
148 		netdev_set_num_tc(dev, qopt->num_tc);
149 		for (i = 0; i < qopt->num_tc; i++)
150 			netdev_set_tc_queue(dev, i,
151 					    qopt->count[i], qopt->offset[i]);
152 	}
153 
154 	/* Always use supplied priority mappings */
155 	for (i = 0; i < TC_BITMASK + 1; i++)
156 		netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
157 
158 	sch->flags |= TCQ_F_MQROOT;
159 	return 0;
160 
161 err:
162 	mqprio_destroy(sch);
163 	return err;
164 }
165 
166 static void mqprio_attach(struct Qdisc *sch)
167 {
168 	struct net_device *dev = qdisc_dev(sch);
169 	struct mqprio_sched *priv = qdisc_priv(sch);
170 	struct Qdisc *qdisc, *old;
171 	unsigned int ntx;
172 
173 	/* Attach underlying qdisc */
174 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
175 		qdisc = priv->qdiscs[ntx];
176 		old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
177 		if (old)
178 			qdisc_destroy(old);
179 		if (ntx < dev->real_num_tx_queues)
180 			qdisc_list_add(qdisc);
181 	}
182 	kfree(priv->qdiscs);
183 	priv->qdiscs = NULL;
184 }
185 
186 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
187 					     unsigned long cl)
188 {
189 	struct net_device *dev = qdisc_dev(sch);
190 	unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
191 
192 	if (ntx >= dev->num_tx_queues)
193 		return NULL;
194 	return netdev_get_tx_queue(dev, ntx);
195 }
196 
197 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
198 		    struct Qdisc **old)
199 {
200 	struct net_device *dev = qdisc_dev(sch);
201 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
202 
203 	if (!dev_queue)
204 		return -EINVAL;
205 
206 	if (dev->flags & IFF_UP)
207 		dev_deactivate(dev);
208 
209 	*old = dev_graft_qdisc(dev_queue, new);
210 
211 	if (new)
212 		new->flags |= TCQ_F_ONETXQUEUE;
213 
214 	if (dev->flags & IFF_UP)
215 		dev_activate(dev);
216 
217 	return 0;
218 }
219 
220 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
221 {
222 	struct net_device *dev = qdisc_dev(sch);
223 	struct mqprio_sched *priv = qdisc_priv(sch);
224 	unsigned char *b = skb_tail_pointer(skb);
225 	struct tc_mqprio_qopt opt = { 0 };
226 	struct Qdisc *qdisc;
227 	unsigned int i;
228 
229 	sch->q.qlen = 0;
230 	memset(&sch->bstats, 0, sizeof(sch->bstats));
231 	memset(&sch->qstats, 0, sizeof(sch->qstats));
232 
233 	for (i = 0; i < dev->num_tx_queues; i++) {
234 		qdisc = netdev_get_tx_queue(dev, i)->qdisc;
235 		spin_lock_bh(qdisc_lock(qdisc));
236 		sch->q.qlen		+= qdisc->q.qlen;
237 		sch->bstats.bytes	+= qdisc->bstats.bytes;
238 		sch->bstats.packets	+= qdisc->bstats.packets;
239 		sch->qstats.qlen	+= qdisc->qstats.qlen;
240 		sch->qstats.backlog	+= qdisc->qstats.backlog;
241 		sch->qstats.drops	+= qdisc->qstats.drops;
242 		sch->qstats.requeues	+= qdisc->qstats.requeues;
243 		sch->qstats.overlimits	+= qdisc->qstats.overlimits;
244 		spin_unlock_bh(qdisc_lock(qdisc));
245 	}
246 
247 	opt.num_tc = netdev_get_num_tc(dev);
248 	memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
249 	opt.hw = priv->hw_owned;
250 
251 	for (i = 0; i < netdev_get_num_tc(dev); i++) {
252 		opt.count[i] = dev->tc_to_txq[i].count;
253 		opt.offset[i] = dev->tc_to_txq[i].offset;
254 	}
255 
256 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
257 		goto nla_put_failure;
258 
259 	return skb->len;
260 nla_put_failure:
261 	nlmsg_trim(skb, b);
262 	return -1;
263 }
264 
265 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
266 {
267 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
268 
269 	if (!dev_queue)
270 		return NULL;
271 
272 	return dev_queue->qdisc_sleeping;
273 }
274 
275 static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
276 {
277 	struct net_device *dev = qdisc_dev(sch);
278 	unsigned int ntx = TC_H_MIN(classid);
279 
280 	if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
281 		return 0;
282 	return ntx;
283 }
284 
285 static void mqprio_put(struct Qdisc *sch, unsigned long cl)
286 {
287 }
288 
289 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
290 			 struct sk_buff *skb, struct tcmsg *tcm)
291 {
292 	struct net_device *dev = qdisc_dev(sch);
293 
294 	if (cl <= netdev_get_num_tc(dev)) {
295 		tcm->tcm_parent = TC_H_ROOT;
296 		tcm->tcm_info = 0;
297 	} else {
298 		int i;
299 		struct netdev_queue *dev_queue;
300 
301 		dev_queue = mqprio_queue_get(sch, cl);
302 		tcm->tcm_parent = 0;
303 		for (i = 0; i < netdev_get_num_tc(dev); i++) {
304 			struct netdev_tc_txq tc = dev->tc_to_txq[i];
305 			int q_idx = cl - netdev_get_num_tc(dev);
306 
307 			if (q_idx > tc.offset &&
308 			    q_idx <= tc.offset + tc.count) {
309 				tcm->tcm_parent =
310 					TC_H_MAKE(TC_H_MAJ(sch->handle),
311 						  TC_H_MIN(i + 1));
312 				break;
313 			}
314 		}
315 		tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
316 	}
317 	tcm->tcm_handle |= TC_H_MIN(cl);
318 	return 0;
319 }
320 
321 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
322 				   struct gnet_dump *d)
323 	__releases(d->lock)
324 	__acquires(d->lock)
325 {
326 	struct net_device *dev = qdisc_dev(sch);
327 
328 	if (cl <= netdev_get_num_tc(dev)) {
329 		int i;
330 		struct Qdisc *qdisc;
331 		struct gnet_stats_queue qstats = {0};
332 		struct gnet_stats_basic_packed bstats = {0};
333 		struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
334 
335 		/* Drop lock here it will be reclaimed before touching
336 		 * statistics this is required because the d->lock we
337 		 * hold here is the look on dev_queue->qdisc_sleeping
338 		 * also acquired below.
339 		 */
340 		spin_unlock_bh(d->lock);
341 
342 		for (i = tc.offset; i < tc.offset + tc.count; i++) {
343 			qdisc = netdev_get_tx_queue(dev, i)->qdisc;
344 			spin_lock_bh(qdisc_lock(qdisc));
345 			bstats.bytes      += qdisc->bstats.bytes;
346 			bstats.packets    += qdisc->bstats.packets;
347 			qstats.qlen       += qdisc->qstats.qlen;
348 			qstats.backlog    += qdisc->qstats.backlog;
349 			qstats.drops      += qdisc->qstats.drops;
350 			qstats.requeues   += qdisc->qstats.requeues;
351 			qstats.overlimits += qdisc->qstats.overlimits;
352 			spin_unlock_bh(qdisc_lock(qdisc));
353 		}
354 		/* Reclaim root sleeping lock before completing stats */
355 		spin_lock_bh(d->lock);
356 		if (gnet_stats_copy_basic(d, &bstats) < 0 ||
357 		    gnet_stats_copy_queue(d, &qstats) < 0)
358 			return -1;
359 	} else {
360 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
361 
362 		sch = dev_queue->qdisc_sleeping;
363 		sch->qstats.qlen = sch->q.qlen;
364 		if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
365 		    gnet_stats_copy_queue(d, &sch->qstats) < 0)
366 			return -1;
367 	}
368 	return 0;
369 }
370 
371 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
372 {
373 	struct net_device *dev = qdisc_dev(sch);
374 	unsigned long ntx;
375 
376 	if (arg->stop)
377 		return;
378 
379 	/* Walk hierarchy with a virtual class per tc */
380 	arg->count = arg->skip;
381 	for (ntx = arg->skip;
382 	     ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
383 	     ntx++) {
384 		if (arg->fn(sch, ntx + 1, arg) < 0) {
385 			arg->stop = 1;
386 			break;
387 		}
388 		arg->count++;
389 	}
390 }
391 
392 static const struct Qdisc_class_ops mqprio_class_ops = {
393 	.graft		= mqprio_graft,
394 	.leaf		= mqprio_leaf,
395 	.get		= mqprio_get,
396 	.put		= mqprio_put,
397 	.walk		= mqprio_walk,
398 	.dump		= mqprio_dump_class,
399 	.dump_stats	= mqprio_dump_class_stats,
400 };
401 
402 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
403 	.cl_ops		= &mqprio_class_ops,
404 	.id		= "mqprio",
405 	.priv_size	= sizeof(struct mqprio_sched),
406 	.init		= mqprio_init,
407 	.destroy	= mqprio_destroy,
408 	.attach		= mqprio_attach,
409 	.dump		= mqprio_dump,
410 	.owner		= THIS_MODULE,
411 };
412 
413 static int __init mqprio_module_init(void)
414 {
415 	return register_qdisc(&mqprio_qdisc_ops);
416 }
417 
418 static void __exit mqprio_module_exit(void)
419 {
420 	unregister_qdisc(&mqprio_qdisc_ops);
421 }
422 
423 module_init(mqprio_module_init);
424 module_exit(mqprio_module_exit);
425 
426 MODULE_LICENSE("GPL");
427