xref: /openbmc/linux/net/sched/sch_mqprio.c (revision 46f28427)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/sched/sch_mqprio.c
4  *
5  * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
6  */
7 
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/errno.h>
13 #include <linux/skbuff.h>
14 #include <linux/module.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_cls.h>
19 
20 #include "sch_mqprio_lib.h"
21 
22 struct mqprio_sched {
23 	struct Qdisc		**qdiscs;
24 	u16 mode;
25 	u16 shaper;
26 	int hw_offload;
27 	u32 flags;
28 	u64 min_rate[TC_QOPT_MAX_QUEUE];
29 	u64 max_rate[TC_QOPT_MAX_QUEUE];
30 };
31 
32 static int mqprio_enable_offload(struct Qdisc *sch,
33 				 const struct tc_mqprio_qopt *qopt,
34 				 struct netlink_ext_ack *extack)
35 {
36 	struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
37 	struct mqprio_sched *priv = qdisc_priv(sch);
38 	struct net_device *dev = qdisc_dev(sch);
39 	int err, i;
40 
41 	switch (priv->mode) {
42 	case TC_MQPRIO_MODE_DCB:
43 		if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
44 			return -EINVAL;
45 		break;
46 	case TC_MQPRIO_MODE_CHANNEL:
47 		mqprio.flags = priv->flags;
48 		if (priv->flags & TC_MQPRIO_F_MODE)
49 			mqprio.mode = priv->mode;
50 		if (priv->flags & TC_MQPRIO_F_SHAPER)
51 			mqprio.shaper = priv->shaper;
52 		if (priv->flags & TC_MQPRIO_F_MIN_RATE)
53 			for (i = 0; i < mqprio.qopt.num_tc; i++)
54 				mqprio.min_rate[i] = priv->min_rate[i];
55 		if (priv->flags & TC_MQPRIO_F_MAX_RATE)
56 			for (i = 0; i < mqprio.qopt.num_tc; i++)
57 				mqprio.max_rate[i] = priv->max_rate[i];
58 		break;
59 	default:
60 		return -EINVAL;
61 	}
62 
63 	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO,
64 					    &mqprio);
65 	if (err)
66 		return err;
67 
68 	priv->hw_offload = mqprio.qopt.hw;
69 
70 	return 0;
71 }
72 
73 static void mqprio_disable_offload(struct Qdisc *sch)
74 {
75 	struct tc_mqprio_qopt_offload mqprio = { { 0 } };
76 	struct mqprio_sched *priv = qdisc_priv(sch);
77 	struct net_device *dev = qdisc_dev(sch);
78 
79 	switch (priv->mode) {
80 	case TC_MQPRIO_MODE_DCB:
81 	case TC_MQPRIO_MODE_CHANNEL:
82 		dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO,
83 					      &mqprio);
84 		break;
85 	}
86 }
87 
88 static void mqprio_destroy(struct Qdisc *sch)
89 {
90 	struct net_device *dev = qdisc_dev(sch);
91 	struct mqprio_sched *priv = qdisc_priv(sch);
92 	unsigned int ntx;
93 
94 	if (priv->qdiscs) {
95 		for (ntx = 0;
96 		     ntx < dev->num_tx_queues && priv->qdiscs[ntx];
97 		     ntx++)
98 			qdisc_put(priv->qdiscs[ntx]);
99 		kfree(priv->qdiscs);
100 	}
101 
102 	if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc)
103 		mqprio_disable_offload(sch);
104 	else
105 		netdev_set_num_tc(dev, 0);
106 }
107 
108 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
109 			    const struct tc_mqprio_caps *caps,
110 			    struct netlink_ext_ack *extack)
111 {
112 	int err;
113 
114 	/* Limit qopt->hw to maximum supported offload value.  Drivers have
115 	 * the option of overriding this later if they don't support the a
116 	 * given offload type.
117 	 */
118 	if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
119 		qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
120 
121 	/* If hardware offload is requested, we will leave 3 options to the
122 	 * device driver:
123 	 * - populate the queue counts itself (and ignore what was requested)
124 	 * - validate the provided queue counts by itself (and apply them)
125 	 * - request queue count validation here (and apply them)
126 	 */
127 	err = mqprio_validate_qopt(dev, qopt,
128 				   !qopt->hw || caps->validate_queue_counts,
129 				   false, extack);
130 	if (err)
131 		return err;
132 
133 	/* If ndo_setup_tc is not present then hardware doesn't support offload
134 	 * and we should return an error.
135 	 */
136 	if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
137 		return -EINVAL;
138 
139 	return 0;
140 }
141 
142 static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
143 	[TCA_MQPRIO_MODE]	= { .len = sizeof(u16) },
144 	[TCA_MQPRIO_SHAPER]	= { .len = sizeof(u16) },
145 	[TCA_MQPRIO_MIN_RATE64]	= { .type = NLA_NESTED },
146 	[TCA_MQPRIO_MAX_RATE64]	= { .type = NLA_NESTED },
147 };
148 
149 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
150 		      const struct nla_policy *policy, int len)
151 {
152 	int nested_len = nla_len(nla) - NLA_ALIGN(len);
153 
154 	if (nested_len >= nla_attr_size(0))
155 		return nla_parse_deprecated(tb, maxtype,
156 					    nla_data(nla) + NLA_ALIGN(len),
157 					    nested_len, policy, NULL);
158 
159 	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
160 	return 0;
161 }
162 
163 static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt,
164 			       struct nlattr *opt)
165 {
166 	struct mqprio_sched *priv = qdisc_priv(sch);
167 	struct nlattr *tb[TCA_MQPRIO_MAX + 1];
168 	struct nlattr *attr;
169 	int i, rem, err;
170 
171 	err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
172 			 sizeof(*qopt));
173 	if (err < 0)
174 		return err;
175 
176 	if (!qopt->hw)
177 		return -EINVAL;
178 
179 	if (tb[TCA_MQPRIO_MODE]) {
180 		priv->flags |= TC_MQPRIO_F_MODE;
181 		priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
182 	}
183 
184 	if (tb[TCA_MQPRIO_SHAPER]) {
185 		priv->flags |= TC_MQPRIO_F_SHAPER;
186 		priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
187 	}
188 
189 	if (tb[TCA_MQPRIO_MIN_RATE64]) {
190 		if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
191 			return -EINVAL;
192 		i = 0;
193 		nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
194 				    rem) {
195 			if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64)
196 				return -EINVAL;
197 			if (i >= qopt->num_tc)
198 				break;
199 			priv->min_rate[i] = *(u64 *)nla_data(attr);
200 			i++;
201 		}
202 		priv->flags |= TC_MQPRIO_F_MIN_RATE;
203 	}
204 
205 	if (tb[TCA_MQPRIO_MAX_RATE64]) {
206 		if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
207 			return -EINVAL;
208 		i = 0;
209 		nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
210 				    rem) {
211 			if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64)
212 				return -EINVAL;
213 			if (i >= qopt->num_tc)
214 				break;
215 			priv->max_rate[i] = *(u64 *)nla_data(attr);
216 			i++;
217 		}
218 		priv->flags |= TC_MQPRIO_F_MAX_RATE;
219 	}
220 
221 	return 0;
222 }
223 
224 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
225 		       struct netlink_ext_ack *extack)
226 {
227 	struct net_device *dev = qdisc_dev(sch);
228 	struct mqprio_sched *priv = qdisc_priv(sch);
229 	struct netdev_queue *dev_queue;
230 	struct Qdisc *qdisc;
231 	int i, err = -EOPNOTSUPP;
232 	struct tc_mqprio_qopt *qopt = NULL;
233 	struct tc_mqprio_caps caps;
234 	int len;
235 
236 	BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
237 	BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
238 
239 	if (sch->parent != TC_H_ROOT)
240 		return -EOPNOTSUPP;
241 
242 	if (!netif_is_multiqueue(dev))
243 		return -EOPNOTSUPP;
244 
245 	/* make certain can allocate enough classids to handle queues */
246 	if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
247 		return -ENOMEM;
248 
249 	if (!opt || nla_len(opt) < sizeof(*qopt))
250 		return -EINVAL;
251 
252 	qdisc_offload_query_caps(dev, TC_SETUP_QDISC_MQPRIO,
253 				 &caps, sizeof(caps));
254 
255 	qopt = nla_data(opt);
256 	if (mqprio_parse_opt(dev, qopt, &caps, extack))
257 		return -EINVAL;
258 
259 	len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
260 	if (len > 0) {
261 		err = mqprio_parse_nlattr(sch, qopt, opt);
262 		if (err)
263 			return err;
264 	}
265 
266 	/* pre-allocate qdisc, attachment can't fail */
267 	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
268 			       GFP_KERNEL);
269 	if (!priv->qdiscs)
270 		return -ENOMEM;
271 
272 	for (i = 0; i < dev->num_tx_queues; i++) {
273 		dev_queue = netdev_get_tx_queue(dev, i);
274 		qdisc = qdisc_create_dflt(dev_queue,
275 					  get_default_qdisc_ops(dev, i),
276 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
277 						    TC_H_MIN(i + 1)), extack);
278 		if (!qdisc)
279 			return -ENOMEM;
280 
281 		priv->qdiscs[i] = qdisc;
282 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
283 	}
284 
285 	/* If the mqprio options indicate that hardware should own
286 	 * the queue mapping then run ndo_setup_tc otherwise use the
287 	 * supplied and verified mapping
288 	 */
289 	if (qopt->hw) {
290 		err = mqprio_enable_offload(sch, qopt, extack);
291 		if (err)
292 			return err;
293 	} else {
294 		netdev_set_num_tc(dev, qopt->num_tc);
295 		for (i = 0; i < qopt->num_tc; i++)
296 			netdev_set_tc_queue(dev, i,
297 					    qopt->count[i], qopt->offset[i]);
298 	}
299 
300 	/* Always use supplied priority mappings */
301 	for (i = 0; i < TC_BITMASK + 1; i++)
302 		netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
303 
304 	sch->flags |= TCQ_F_MQROOT;
305 	return 0;
306 }
307 
308 static void mqprio_attach(struct Qdisc *sch)
309 {
310 	struct net_device *dev = qdisc_dev(sch);
311 	struct mqprio_sched *priv = qdisc_priv(sch);
312 	struct Qdisc *qdisc, *old;
313 	unsigned int ntx;
314 
315 	/* Attach underlying qdisc */
316 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
317 		qdisc = priv->qdiscs[ntx];
318 		old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
319 		if (old)
320 			qdisc_put(old);
321 		if (ntx < dev->real_num_tx_queues)
322 			qdisc_hash_add(qdisc, false);
323 	}
324 	kfree(priv->qdiscs);
325 	priv->qdiscs = NULL;
326 }
327 
328 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
329 					     unsigned long cl)
330 {
331 	struct net_device *dev = qdisc_dev(sch);
332 	unsigned long ntx = cl - 1;
333 
334 	if (ntx >= dev->num_tx_queues)
335 		return NULL;
336 	return netdev_get_tx_queue(dev, ntx);
337 }
338 
339 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
340 			struct Qdisc **old, struct netlink_ext_ack *extack)
341 {
342 	struct net_device *dev = qdisc_dev(sch);
343 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
344 
345 	if (!dev_queue)
346 		return -EINVAL;
347 
348 	if (dev->flags & IFF_UP)
349 		dev_deactivate(dev);
350 
351 	*old = dev_graft_qdisc(dev_queue, new);
352 
353 	if (new)
354 		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
355 
356 	if (dev->flags & IFF_UP)
357 		dev_activate(dev);
358 
359 	return 0;
360 }
361 
362 static int dump_rates(struct mqprio_sched *priv,
363 		      struct tc_mqprio_qopt *opt, struct sk_buff *skb)
364 {
365 	struct nlattr *nest;
366 	int i;
367 
368 	if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
369 		nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64);
370 		if (!nest)
371 			goto nla_put_failure;
372 
373 		for (i = 0; i < opt->num_tc; i++) {
374 			if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
375 				    sizeof(priv->min_rate[i]),
376 				    &priv->min_rate[i]))
377 				goto nla_put_failure;
378 		}
379 		nla_nest_end(skb, nest);
380 	}
381 
382 	if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
383 		nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64);
384 		if (!nest)
385 			goto nla_put_failure;
386 
387 		for (i = 0; i < opt->num_tc; i++) {
388 			if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
389 				    sizeof(priv->max_rate[i]),
390 				    &priv->max_rate[i]))
391 				goto nla_put_failure;
392 		}
393 		nla_nest_end(skb, nest);
394 	}
395 	return 0;
396 
397 nla_put_failure:
398 	nla_nest_cancel(skb, nest);
399 	return -1;
400 }
401 
402 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
403 {
404 	struct net_device *dev = qdisc_dev(sch);
405 	struct mqprio_sched *priv = qdisc_priv(sch);
406 	struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
407 	struct tc_mqprio_qopt opt = { 0 };
408 	struct Qdisc *qdisc;
409 	unsigned int ntx;
410 
411 	sch->q.qlen = 0;
412 	gnet_stats_basic_sync_init(&sch->bstats);
413 	memset(&sch->qstats, 0, sizeof(sch->qstats));
414 
415 	/* MQ supports lockless qdiscs. However, statistics accounting needs
416 	 * to account for all, none, or a mix of locked and unlocked child
417 	 * qdiscs. Percpu stats are added to counters in-band and locking
418 	 * qdisc totals are added at end.
419 	 */
420 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
421 		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
422 		spin_lock_bh(qdisc_lock(qdisc));
423 
424 		gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
425 				     &qdisc->bstats, false);
426 		gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
427 				     &qdisc->qstats);
428 		sch->q.qlen += qdisc_qlen(qdisc);
429 
430 		spin_unlock_bh(qdisc_lock(qdisc));
431 	}
432 
433 	mqprio_qopt_reconstruct(dev, &opt);
434 	opt.hw = priv->hw_offload;
435 
436 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
437 		goto nla_put_failure;
438 
439 	if ((priv->flags & TC_MQPRIO_F_MODE) &&
440 	    nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
441 		goto nla_put_failure;
442 
443 	if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
444 	    nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
445 		goto nla_put_failure;
446 
447 	if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
448 	     priv->flags & TC_MQPRIO_F_MAX_RATE) &&
449 	    (dump_rates(priv, &opt, skb) != 0))
450 		goto nla_put_failure;
451 
452 	return nla_nest_end(skb, nla);
453 nla_put_failure:
454 	nlmsg_trim(skb, nla);
455 	return -1;
456 }
457 
458 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
459 {
460 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
461 
462 	if (!dev_queue)
463 		return NULL;
464 
465 	return dev_queue->qdisc_sleeping;
466 }
467 
468 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
469 {
470 	struct net_device *dev = qdisc_dev(sch);
471 	unsigned int ntx = TC_H_MIN(classid);
472 
473 	/* There are essentially two regions here that have valid classid
474 	 * values. The first region will have a classid value of 1 through
475 	 * num_tx_queues. All of these are backed by actual Qdiscs.
476 	 */
477 	if (ntx < TC_H_MIN_PRIORITY)
478 		return (ntx <= dev->num_tx_queues) ? ntx : 0;
479 
480 	/* The second region represents the hardware traffic classes. These
481 	 * are represented by classid values of TC_H_MIN_PRIORITY through
482 	 * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
483 	 */
484 	return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
485 }
486 
487 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
488 			 struct sk_buff *skb, struct tcmsg *tcm)
489 {
490 	if (cl < TC_H_MIN_PRIORITY) {
491 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
492 		struct net_device *dev = qdisc_dev(sch);
493 		int tc = netdev_txq_to_tc(dev, cl - 1);
494 
495 		tcm->tcm_parent = (tc < 0) ? 0 :
496 			TC_H_MAKE(TC_H_MAJ(sch->handle),
497 				  TC_H_MIN(tc + TC_H_MIN_PRIORITY));
498 		tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
499 	} else {
500 		tcm->tcm_parent = TC_H_ROOT;
501 		tcm->tcm_info = 0;
502 	}
503 	tcm->tcm_handle |= TC_H_MIN(cl);
504 	return 0;
505 }
506 
507 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
508 				   struct gnet_dump *d)
509 	__releases(d->lock)
510 	__acquires(d->lock)
511 {
512 	if (cl >= TC_H_MIN_PRIORITY) {
513 		int i;
514 		__u32 qlen;
515 		struct gnet_stats_queue qstats = {0};
516 		struct gnet_stats_basic_sync bstats;
517 		struct net_device *dev = qdisc_dev(sch);
518 		struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
519 
520 		gnet_stats_basic_sync_init(&bstats);
521 		/* Drop lock here it will be reclaimed before touching
522 		 * statistics this is required because the d->lock we
523 		 * hold here is the look on dev_queue->qdisc_sleeping
524 		 * also acquired below.
525 		 */
526 		if (d->lock)
527 			spin_unlock_bh(d->lock);
528 
529 		for (i = tc.offset; i < tc.offset + tc.count; i++) {
530 			struct netdev_queue *q = netdev_get_tx_queue(dev, i);
531 			struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
532 
533 			spin_lock_bh(qdisc_lock(qdisc));
534 
535 			gnet_stats_add_basic(&bstats, qdisc->cpu_bstats,
536 					     &qdisc->bstats, false);
537 			gnet_stats_add_queue(&qstats, qdisc->cpu_qstats,
538 					     &qdisc->qstats);
539 			sch->q.qlen += qdisc_qlen(qdisc);
540 
541 			spin_unlock_bh(qdisc_lock(qdisc));
542 		}
543 		qlen = qdisc_qlen(sch) + qstats.qlen;
544 
545 		/* Reclaim root sleeping lock before completing stats */
546 		if (d->lock)
547 			spin_lock_bh(d->lock);
548 		if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 ||
549 		    gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
550 			return -1;
551 	} else {
552 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
553 
554 		sch = dev_queue->qdisc_sleeping;
555 		if (gnet_stats_copy_basic(d, sch->cpu_bstats,
556 					  &sch->bstats, true) < 0 ||
557 		    qdisc_qstats_copy(d, sch) < 0)
558 			return -1;
559 	}
560 	return 0;
561 }
562 
563 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
564 {
565 	struct net_device *dev = qdisc_dev(sch);
566 	unsigned long ntx;
567 
568 	if (arg->stop)
569 		return;
570 
571 	/* Walk hierarchy with a virtual class per tc */
572 	arg->count = arg->skip;
573 	for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
574 		if (!tc_qdisc_stats_dump(sch, ntx + TC_H_MIN_PRIORITY, arg))
575 			return;
576 	}
577 
578 	/* Pad the values and skip over unused traffic classes */
579 	if (ntx < TC_MAX_QUEUE) {
580 		arg->count = TC_MAX_QUEUE;
581 		ntx = TC_MAX_QUEUE;
582 	}
583 
584 	/* Reset offset, sort out remaining per-queue qdiscs */
585 	for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
586 		if (arg->fn(sch, ntx + 1, arg) < 0) {
587 			arg->stop = 1;
588 			return;
589 		}
590 		arg->count++;
591 	}
592 }
593 
594 static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
595 						struct tcmsg *tcm)
596 {
597 	return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
598 }
599 
600 static const struct Qdisc_class_ops mqprio_class_ops = {
601 	.graft		= mqprio_graft,
602 	.leaf		= mqprio_leaf,
603 	.find		= mqprio_find,
604 	.walk		= mqprio_walk,
605 	.dump		= mqprio_dump_class,
606 	.dump_stats	= mqprio_dump_class_stats,
607 	.select_queue	= mqprio_select_queue,
608 };
609 
610 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
611 	.cl_ops		= &mqprio_class_ops,
612 	.id		= "mqprio",
613 	.priv_size	= sizeof(struct mqprio_sched),
614 	.init		= mqprio_init,
615 	.destroy	= mqprio_destroy,
616 	.attach		= mqprio_attach,
617 	.change_real_num_tx = mq_change_real_num_tx,
618 	.dump		= mqprio_dump,
619 	.owner		= THIS_MODULE,
620 };
621 
622 static int __init mqprio_module_init(void)
623 {
624 	return register_qdisc(&mqprio_qdisc_ops);
625 }
626 
627 static void __exit mqprio_module_exit(void)
628 {
629 	unregister_qdisc(&mqprio_qdisc_ops);
630 }
631 
632 module_init(mqprio_module_init);
633 module_exit(mqprio_module_exit);
634 
635 MODULE_LICENSE("GPL");
636