1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * net/sched/sch_mqprio.c 4 * 5 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com> 6 */ 7 8 #include <linux/ethtool_netlink.h> 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/kernel.h> 12 #include <linux/string.h> 13 #include <linux/errno.h> 14 #include <linux/skbuff.h> 15 #include <linux/module.h> 16 #include <net/netlink.h> 17 #include <net/pkt_sched.h> 18 #include <net/sch_generic.h> 19 #include <net/pkt_cls.h> 20 21 #include "sch_mqprio_lib.h" 22 23 struct mqprio_sched { 24 struct Qdisc **qdiscs; 25 u16 mode; 26 u16 shaper; 27 int hw_offload; 28 u32 flags; 29 u64 min_rate[TC_QOPT_MAX_QUEUE]; 30 u64 max_rate[TC_QOPT_MAX_QUEUE]; 31 u32 fp[TC_QOPT_MAX_QUEUE]; 32 }; 33 34 static int mqprio_enable_offload(struct Qdisc *sch, 35 const struct tc_mqprio_qopt *qopt, 36 struct netlink_ext_ack *extack) 37 { 38 struct mqprio_sched *priv = qdisc_priv(sch); 39 struct net_device *dev = qdisc_dev(sch); 40 struct tc_mqprio_qopt_offload mqprio = { 41 .qopt = *qopt, 42 .extack = extack, 43 }; 44 int err, i; 45 46 switch (priv->mode) { 47 case TC_MQPRIO_MODE_DCB: 48 if (priv->shaper != TC_MQPRIO_SHAPER_DCB) 49 return -EINVAL; 50 break; 51 case TC_MQPRIO_MODE_CHANNEL: 52 mqprio.flags = priv->flags; 53 if (priv->flags & TC_MQPRIO_F_MODE) 54 mqprio.mode = priv->mode; 55 if (priv->flags & TC_MQPRIO_F_SHAPER) 56 mqprio.shaper = priv->shaper; 57 if (priv->flags & TC_MQPRIO_F_MIN_RATE) 58 for (i = 0; i < mqprio.qopt.num_tc; i++) 59 mqprio.min_rate[i] = priv->min_rate[i]; 60 if (priv->flags & TC_MQPRIO_F_MAX_RATE) 61 for (i = 0; i < mqprio.qopt.num_tc; i++) 62 mqprio.max_rate[i] = priv->max_rate[i]; 63 break; 64 default: 65 return -EINVAL; 66 } 67 68 mqprio_fp_to_offload(priv->fp, &mqprio); 69 70 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO, 71 &mqprio); 72 if (err) 73 return err; 74 75 priv->hw_offload = mqprio.qopt.hw; 76 77 return 0; 78 } 79 80 static void mqprio_disable_offload(struct Qdisc *sch) 81 { 82 struct tc_mqprio_qopt_offload mqprio = { { 0 } }; 83 struct mqprio_sched *priv = qdisc_priv(sch); 84 struct net_device *dev = qdisc_dev(sch); 85 86 switch (priv->mode) { 87 case TC_MQPRIO_MODE_DCB: 88 case TC_MQPRIO_MODE_CHANNEL: 89 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO, 90 &mqprio); 91 break; 92 } 93 } 94 95 static void mqprio_destroy(struct Qdisc *sch) 96 { 97 struct net_device *dev = qdisc_dev(sch); 98 struct mqprio_sched *priv = qdisc_priv(sch); 99 unsigned int ntx; 100 101 if (priv->qdiscs) { 102 for (ntx = 0; 103 ntx < dev->num_tx_queues && priv->qdiscs[ntx]; 104 ntx++) 105 qdisc_put(priv->qdiscs[ntx]); 106 kfree(priv->qdiscs); 107 } 108 109 if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) 110 mqprio_disable_offload(sch); 111 else 112 netdev_set_num_tc(dev, 0); 113 } 114 115 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt, 116 const struct tc_mqprio_caps *caps, 117 struct netlink_ext_ack *extack) 118 { 119 int err; 120 121 /* Limit qopt->hw to maximum supported offload value. Drivers have 122 * the option of overriding this later if they don't support the a 123 * given offload type. 124 */ 125 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX) 126 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX; 127 128 /* If hardware offload is requested, we will leave 3 options to the 129 * device driver: 130 * - populate the queue counts itself (and ignore what was requested) 131 * - validate the provided queue counts by itself (and apply them) 132 * - request queue count validation here (and apply them) 133 */ 134 err = mqprio_validate_qopt(dev, qopt, 135 !qopt->hw || caps->validate_queue_counts, 136 false, extack); 137 if (err) 138 return err; 139 140 /* If ndo_setup_tc is not present then hardware doesn't support offload 141 * and we should return an error. 142 */ 143 if (qopt->hw && !dev->netdev_ops->ndo_setup_tc) { 144 NL_SET_ERR_MSG(extack, 145 "Device does not support hardware offload"); 146 return -EINVAL; 147 } 148 149 return 0; 150 } 151 152 static const struct 153 nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = { 154 [TCA_MQPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32, 155 TC_QOPT_MAX_QUEUE), 156 [TCA_MQPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32, 157 TC_FP_EXPRESS, 158 TC_FP_PREEMPTIBLE), 159 }; 160 161 static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = { 162 [TCA_MQPRIO_MODE] = { .len = sizeof(u16) }, 163 [TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) }, 164 [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED }, 165 [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED }, 166 [TCA_MQPRIO_TC_ENTRY] = { .type = NLA_NESTED }, 167 }; 168 169 static int mqprio_parse_tc_entry(u32 fp[TC_QOPT_MAX_QUEUE], 170 struct nlattr *opt, 171 unsigned long *seen_tcs, 172 struct netlink_ext_ack *extack) 173 { 174 struct nlattr *tb[TCA_MQPRIO_TC_ENTRY_MAX + 1]; 175 int err, tc; 176 177 err = nla_parse_nested(tb, TCA_MQPRIO_TC_ENTRY_MAX, opt, 178 mqprio_tc_entry_policy, extack); 179 if (err < 0) 180 return err; 181 182 if (NL_REQ_ATTR_CHECK(extack, opt, tb, TCA_MQPRIO_TC_ENTRY_INDEX)) { 183 NL_SET_ERR_MSG(extack, "TC entry index missing"); 184 return -EINVAL; 185 } 186 187 tc = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_INDEX]); 188 if (*seen_tcs & BIT(tc)) { 189 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_TC_ENTRY_INDEX], 190 "Duplicate tc entry"); 191 return -EINVAL; 192 } 193 194 *seen_tcs |= BIT(tc); 195 196 if (tb[TCA_MQPRIO_TC_ENTRY_FP]) 197 fp[tc] = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_FP]); 198 199 return 0; 200 } 201 202 static int mqprio_parse_tc_entries(struct Qdisc *sch, struct nlattr *nlattr_opt, 203 int nlattr_opt_len, 204 struct netlink_ext_ack *extack) 205 { 206 struct mqprio_sched *priv = qdisc_priv(sch); 207 struct net_device *dev = qdisc_dev(sch); 208 bool have_preemption = false; 209 unsigned long seen_tcs = 0; 210 u32 fp[TC_QOPT_MAX_QUEUE]; 211 struct nlattr *n; 212 int tc, rem; 213 int err = 0; 214 215 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) 216 fp[tc] = priv->fp[tc]; 217 218 nla_for_each_attr(n, nlattr_opt, nlattr_opt_len, rem) { 219 if (nla_type(n) != TCA_MQPRIO_TC_ENTRY) 220 continue; 221 222 err = mqprio_parse_tc_entry(fp, n, &seen_tcs, extack); 223 if (err) 224 goto out; 225 } 226 227 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { 228 priv->fp[tc] = fp[tc]; 229 if (fp[tc] == TC_FP_PREEMPTIBLE) 230 have_preemption = true; 231 } 232 233 if (have_preemption && !ethtool_dev_mm_supported(dev)) { 234 NL_SET_ERR_MSG(extack, "Device does not support preemption"); 235 return -EOPNOTSUPP; 236 } 237 out: 238 return err; 239 } 240 241 /* Parse the other netlink attributes that represent the payload of 242 * TCA_OPTIONS, which are appended right after struct tc_mqprio_qopt. 243 */ 244 static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt, 245 struct nlattr *opt, 246 struct netlink_ext_ack *extack) 247 { 248 struct nlattr *nlattr_opt = nla_data(opt) + NLA_ALIGN(sizeof(*qopt)); 249 int nlattr_opt_len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt)); 250 struct mqprio_sched *priv = qdisc_priv(sch); 251 struct nlattr *tb[TCA_MQPRIO_MAX + 1] = {}; 252 struct nlattr *attr; 253 int i, rem, err; 254 255 if (nlattr_opt_len >= nla_attr_size(0)) { 256 err = nla_parse_deprecated(tb, TCA_MQPRIO_MAX, nlattr_opt, 257 nlattr_opt_len, mqprio_policy, 258 NULL); 259 if (err < 0) 260 return err; 261 } 262 263 if (!qopt->hw) { 264 NL_SET_ERR_MSG(extack, 265 "mqprio TCA_OPTIONS can only contain netlink attributes in hardware mode"); 266 return -EINVAL; 267 } 268 269 if (tb[TCA_MQPRIO_MODE]) { 270 priv->flags |= TC_MQPRIO_F_MODE; 271 priv->mode = nla_get_u16(tb[TCA_MQPRIO_MODE]); 272 } 273 274 if (tb[TCA_MQPRIO_SHAPER]) { 275 priv->flags |= TC_MQPRIO_F_SHAPER; 276 priv->shaper = nla_get_u16(tb[TCA_MQPRIO_SHAPER]); 277 } 278 279 if (tb[TCA_MQPRIO_MIN_RATE64]) { 280 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) { 281 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MIN_RATE64], 282 "min_rate accepted only when shaper is in bw_rlimit mode"); 283 return -EINVAL; 284 } 285 i = 0; 286 nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64], 287 rem) { 288 if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) { 289 NL_SET_ERR_MSG_ATTR(extack, attr, 290 "Attribute type expected to be TCA_MQPRIO_MIN_RATE64"); 291 return -EINVAL; 292 } 293 if (i >= qopt->num_tc) 294 break; 295 priv->min_rate[i] = nla_get_u64(attr); 296 i++; 297 } 298 priv->flags |= TC_MQPRIO_F_MIN_RATE; 299 } 300 301 if (tb[TCA_MQPRIO_MAX_RATE64]) { 302 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) { 303 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MAX_RATE64], 304 "max_rate accepted only when shaper is in bw_rlimit mode"); 305 return -EINVAL; 306 } 307 i = 0; 308 nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64], 309 rem) { 310 if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) { 311 NL_SET_ERR_MSG_ATTR(extack, attr, 312 "Attribute type expected to be TCA_MQPRIO_MAX_RATE64"); 313 return -EINVAL; 314 } 315 if (i >= qopt->num_tc) 316 break; 317 priv->max_rate[i] = nla_get_u64(attr); 318 i++; 319 } 320 priv->flags |= TC_MQPRIO_F_MAX_RATE; 321 } 322 323 if (tb[TCA_MQPRIO_TC_ENTRY]) { 324 err = mqprio_parse_tc_entries(sch, nlattr_opt, nlattr_opt_len, 325 extack); 326 if (err) 327 return err; 328 } 329 330 return 0; 331 } 332 333 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt, 334 struct netlink_ext_ack *extack) 335 { 336 struct net_device *dev = qdisc_dev(sch); 337 struct mqprio_sched *priv = qdisc_priv(sch); 338 struct netdev_queue *dev_queue; 339 struct Qdisc *qdisc; 340 int i, err = -EOPNOTSUPP; 341 struct tc_mqprio_qopt *qopt = NULL; 342 struct tc_mqprio_caps caps; 343 int len, tc; 344 345 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE); 346 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK); 347 348 if (sch->parent != TC_H_ROOT) 349 return -EOPNOTSUPP; 350 351 if (!netif_is_multiqueue(dev)) 352 return -EOPNOTSUPP; 353 354 /* make certain can allocate enough classids to handle queues */ 355 if (dev->num_tx_queues >= TC_H_MIN_PRIORITY) 356 return -ENOMEM; 357 358 if (!opt || nla_len(opt) < sizeof(*qopt)) 359 return -EINVAL; 360 361 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) 362 priv->fp[tc] = TC_FP_EXPRESS; 363 364 qdisc_offload_query_caps(dev, TC_SETUP_QDISC_MQPRIO, 365 &caps, sizeof(caps)); 366 367 qopt = nla_data(opt); 368 if (mqprio_parse_opt(dev, qopt, &caps, extack)) 369 return -EINVAL; 370 371 len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt)); 372 if (len > 0) { 373 err = mqprio_parse_nlattr(sch, qopt, opt, extack); 374 if (err) 375 return err; 376 } 377 378 /* pre-allocate qdisc, attachment can't fail */ 379 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), 380 GFP_KERNEL); 381 if (!priv->qdiscs) 382 return -ENOMEM; 383 384 for (i = 0; i < dev->num_tx_queues; i++) { 385 dev_queue = netdev_get_tx_queue(dev, i); 386 qdisc = qdisc_create_dflt(dev_queue, 387 get_default_qdisc_ops(dev, i), 388 TC_H_MAKE(TC_H_MAJ(sch->handle), 389 TC_H_MIN(i + 1)), extack); 390 if (!qdisc) 391 return -ENOMEM; 392 393 priv->qdiscs[i] = qdisc; 394 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 395 } 396 397 /* If the mqprio options indicate that hardware should own 398 * the queue mapping then run ndo_setup_tc otherwise use the 399 * supplied and verified mapping 400 */ 401 if (qopt->hw) { 402 err = mqprio_enable_offload(sch, qopt, extack); 403 if (err) 404 return err; 405 } else { 406 netdev_set_num_tc(dev, qopt->num_tc); 407 for (i = 0; i < qopt->num_tc; i++) 408 netdev_set_tc_queue(dev, i, 409 qopt->count[i], qopt->offset[i]); 410 } 411 412 /* Always use supplied priority mappings */ 413 for (i = 0; i < TC_BITMASK + 1; i++) 414 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]); 415 416 sch->flags |= TCQ_F_MQROOT; 417 return 0; 418 } 419 420 static void mqprio_attach(struct Qdisc *sch) 421 { 422 struct net_device *dev = qdisc_dev(sch); 423 struct mqprio_sched *priv = qdisc_priv(sch); 424 struct Qdisc *qdisc, *old; 425 unsigned int ntx; 426 427 /* Attach underlying qdisc */ 428 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 429 qdisc = priv->qdiscs[ntx]; 430 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); 431 if (old) 432 qdisc_put(old); 433 if (ntx < dev->real_num_tx_queues) 434 qdisc_hash_add(qdisc, false); 435 } 436 kfree(priv->qdiscs); 437 priv->qdiscs = NULL; 438 } 439 440 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch, 441 unsigned long cl) 442 { 443 struct net_device *dev = qdisc_dev(sch); 444 unsigned long ntx = cl - 1; 445 446 if (ntx >= dev->num_tx_queues) 447 return NULL; 448 return netdev_get_tx_queue(dev, ntx); 449 } 450 451 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, 452 struct Qdisc **old, struct netlink_ext_ack *extack) 453 { 454 struct net_device *dev = qdisc_dev(sch); 455 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 456 457 if (!dev_queue) 458 return -EINVAL; 459 460 if (dev->flags & IFF_UP) 461 dev_deactivate(dev); 462 463 *old = dev_graft_qdisc(dev_queue, new); 464 465 if (new) 466 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 467 468 if (dev->flags & IFF_UP) 469 dev_activate(dev); 470 471 return 0; 472 } 473 474 static int dump_rates(struct mqprio_sched *priv, 475 struct tc_mqprio_qopt *opt, struct sk_buff *skb) 476 { 477 struct nlattr *nest; 478 int i; 479 480 if (priv->flags & TC_MQPRIO_F_MIN_RATE) { 481 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64); 482 if (!nest) 483 goto nla_put_failure; 484 485 for (i = 0; i < opt->num_tc; i++) { 486 if (nla_put(skb, TCA_MQPRIO_MIN_RATE64, 487 sizeof(priv->min_rate[i]), 488 &priv->min_rate[i])) 489 goto nla_put_failure; 490 } 491 nla_nest_end(skb, nest); 492 } 493 494 if (priv->flags & TC_MQPRIO_F_MAX_RATE) { 495 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64); 496 if (!nest) 497 goto nla_put_failure; 498 499 for (i = 0; i < opt->num_tc; i++) { 500 if (nla_put(skb, TCA_MQPRIO_MAX_RATE64, 501 sizeof(priv->max_rate[i]), 502 &priv->max_rate[i])) 503 goto nla_put_failure; 504 } 505 nla_nest_end(skb, nest); 506 } 507 return 0; 508 509 nla_put_failure: 510 nla_nest_cancel(skb, nest); 511 return -1; 512 } 513 514 static int mqprio_dump_tc_entries(struct mqprio_sched *priv, 515 struct sk_buff *skb) 516 { 517 struct nlattr *n; 518 int tc; 519 520 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { 521 n = nla_nest_start(skb, TCA_MQPRIO_TC_ENTRY); 522 if (!n) 523 return -EMSGSIZE; 524 525 if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_INDEX, tc)) 526 goto nla_put_failure; 527 528 if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_FP, priv->fp[tc])) 529 goto nla_put_failure; 530 531 nla_nest_end(skb, n); 532 } 533 534 return 0; 535 536 nla_put_failure: 537 nla_nest_cancel(skb, n); 538 return -EMSGSIZE; 539 } 540 541 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) 542 { 543 struct net_device *dev = qdisc_dev(sch); 544 struct mqprio_sched *priv = qdisc_priv(sch); 545 struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb); 546 struct tc_mqprio_qopt opt = { 0 }; 547 struct Qdisc *qdisc; 548 unsigned int ntx; 549 550 sch->q.qlen = 0; 551 gnet_stats_basic_sync_init(&sch->bstats); 552 memset(&sch->qstats, 0, sizeof(sch->qstats)); 553 554 /* MQ supports lockless qdiscs. However, statistics accounting needs 555 * to account for all, none, or a mix of locked and unlocked child 556 * qdiscs. Percpu stats are added to counters in-band and locking 557 * qdisc totals are added at end. 558 */ 559 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 560 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; 561 spin_lock_bh(qdisc_lock(qdisc)); 562 563 gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, 564 &qdisc->bstats, false); 565 gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats, 566 &qdisc->qstats); 567 sch->q.qlen += qdisc_qlen(qdisc); 568 569 spin_unlock_bh(qdisc_lock(qdisc)); 570 } 571 572 mqprio_qopt_reconstruct(dev, &opt); 573 opt.hw = priv->hw_offload; 574 575 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) 576 goto nla_put_failure; 577 578 if ((priv->flags & TC_MQPRIO_F_MODE) && 579 nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode)) 580 goto nla_put_failure; 581 582 if ((priv->flags & TC_MQPRIO_F_SHAPER) && 583 nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper)) 584 goto nla_put_failure; 585 586 if ((priv->flags & TC_MQPRIO_F_MIN_RATE || 587 priv->flags & TC_MQPRIO_F_MAX_RATE) && 588 (dump_rates(priv, &opt, skb) != 0)) 589 goto nla_put_failure; 590 591 if (mqprio_dump_tc_entries(priv, skb)) 592 goto nla_put_failure; 593 594 return nla_nest_end(skb, nla); 595 nla_put_failure: 596 nlmsg_trim(skb, nla); 597 return -1; 598 } 599 600 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl) 601 { 602 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 603 604 if (!dev_queue) 605 return NULL; 606 607 return dev_queue->qdisc_sleeping; 608 } 609 610 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid) 611 { 612 struct net_device *dev = qdisc_dev(sch); 613 unsigned int ntx = TC_H_MIN(classid); 614 615 /* There are essentially two regions here that have valid classid 616 * values. The first region will have a classid value of 1 through 617 * num_tx_queues. All of these are backed by actual Qdiscs. 618 */ 619 if (ntx < TC_H_MIN_PRIORITY) 620 return (ntx <= dev->num_tx_queues) ? ntx : 0; 621 622 /* The second region represents the hardware traffic classes. These 623 * are represented by classid values of TC_H_MIN_PRIORITY through 624 * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1 625 */ 626 return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0; 627 } 628 629 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl, 630 struct sk_buff *skb, struct tcmsg *tcm) 631 { 632 if (cl < TC_H_MIN_PRIORITY) { 633 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 634 struct net_device *dev = qdisc_dev(sch); 635 int tc = netdev_txq_to_tc(dev, cl - 1); 636 637 tcm->tcm_parent = (tc < 0) ? 0 : 638 TC_H_MAKE(TC_H_MAJ(sch->handle), 639 TC_H_MIN(tc + TC_H_MIN_PRIORITY)); 640 tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 641 } else { 642 tcm->tcm_parent = TC_H_ROOT; 643 tcm->tcm_info = 0; 644 } 645 tcm->tcm_handle |= TC_H_MIN(cl); 646 return 0; 647 } 648 649 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, 650 struct gnet_dump *d) 651 __releases(d->lock) 652 __acquires(d->lock) 653 { 654 if (cl >= TC_H_MIN_PRIORITY) { 655 int i; 656 __u32 qlen; 657 struct gnet_stats_queue qstats = {0}; 658 struct gnet_stats_basic_sync bstats; 659 struct net_device *dev = qdisc_dev(sch); 660 struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK]; 661 662 gnet_stats_basic_sync_init(&bstats); 663 /* Drop lock here it will be reclaimed before touching 664 * statistics this is required because the d->lock we 665 * hold here is the look on dev_queue->qdisc_sleeping 666 * also acquired below. 667 */ 668 if (d->lock) 669 spin_unlock_bh(d->lock); 670 671 for (i = tc.offset; i < tc.offset + tc.count; i++) { 672 struct netdev_queue *q = netdev_get_tx_queue(dev, i); 673 struct Qdisc *qdisc = rtnl_dereference(q->qdisc); 674 675 spin_lock_bh(qdisc_lock(qdisc)); 676 677 gnet_stats_add_basic(&bstats, qdisc->cpu_bstats, 678 &qdisc->bstats, false); 679 gnet_stats_add_queue(&qstats, qdisc->cpu_qstats, 680 &qdisc->qstats); 681 sch->q.qlen += qdisc_qlen(qdisc); 682 683 spin_unlock_bh(qdisc_lock(qdisc)); 684 } 685 qlen = qdisc_qlen(sch) + qstats.qlen; 686 687 /* Reclaim root sleeping lock before completing stats */ 688 if (d->lock) 689 spin_lock_bh(d->lock); 690 if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 || 691 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) 692 return -1; 693 } else { 694 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 695 696 sch = dev_queue->qdisc_sleeping; 697 if (gnet_stats_copy_basic(d, sch->cpu_bstats, 698 &sch->bstats, true) < 0 || 699 qdisc_qstats_copy(d, sch) < 0) 700 return -1; 701 } 702 return 0; 703 } 704 705 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) 706 { 707 struct net_device *dev = qdisc_dev(sch); 708 unsigned long ntx; 709 710 if (arg->stop) 711 return; 712 713 /* Walk hierarchy with a virtual class per tc */ 714 arg->count = arg->skip; 715 for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) { 716 if (!tc_qdisc_stats_dump(sch, ntx + TC_H_MIN_PRIORITY, arg)) 717 return; 718 } 719 720 /* Pad the values and skip over unused traffic classes */ 721 if (ntx < TC_MAX_QUEUE) { 722 arg->count = TC_MAX_QUEUE; 723 ntx = TC_MAX_QUEUE; 724 } 725 726 /* Reset offset, sort out remaining per-queue qdiscs */ 727 for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) { 728 if (arg->fn(sch, ntx + 1, arg) < 0) { 729 arg->stop = 1; 730 return; 731 } 732 arg->count++; 733 } 734 } 735 736 static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch, 737 struct tcmsg *tcm) 738 { 739 return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); 740 } 741 742 static const struct Qdisc_class_ops mqprio_class_ops = { 743 .graft = mqprio_graft, 744 .leaf = mqprio_leaf, 745 .find = mqprio_find, 746 .walk = mqprio_walk, 747 .dump = mqprio_dump_class, 748 .dump_stats = mqprio_dump_class_stats, 749 .select_queue = mqprio_select_queue, 750 }; 751 752 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = { 753 .cl_ops = &mqprio_class_ops, 754 .id = "mqprio", 755 .priv_size = sizeof(struct mqprio_sched), 756 .init = mqprio_init, 757 .destroy = mqprio_destroy, 758 .attach = mqprio_attach, 759 .change_real_num_tx = mq_change_real_num_tx, 760 .dump = mqprio_dump, 761 .owner = THIS_MODULE, 762 }; 763 764 static int __init mqprio_module_init(void) 765 { 766 return register_qdisc(&mqprio_qdisc_ops); 767 } 768 769 static void __exit mqprio_module_exit(void) 770 { 771 unregister_qdisc(&mqprio_qdisc_ops); 772 } 773 774 module_init(mqprio_module_init); 775 module_exit(mqprio_module_exit); 776 777 MODULE_LICENSE("GPL"); 778