sch_taprio.c (faa5f5da809b690542e1108ba66886574ac57d2c) sch_taprio.c (13511704f8d7591faf19fdb84f0902dff0535ccb)
1// SPDX-License-Identifier: GPL-2.0
2
3/* net/sched/sch_taprio.c Time Aware Priority Scheduler
4 *
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
6 *
7 */
8

--- 421 unchanged lines hidden (view full) ---

430
431static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
432 struct sk_buff **to_free)
433{
434 struct taprio_sched *q = qdisc_priv(sch);
435 struct Qdisc *child;
436 int queue;
437
1// SPDX-License-Identifier: GPL-2.0
2
3/* net/sched/sch_taprio.c Time Aware Priority Scheduler
4 *
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
6 *
7 */
8

--- 421 unchanged lines hidden (view full) ---

430
431static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
432 struct sk_buff **to_free)
433{
434 struct taprio_sched *q = qdisc_priv(sch);
435 struct Qdisc *child;
436 int queue;
437
438 if (unlikely(FULL_OFFLOAD_IS_ENABLED(q->flags))) {
439 WARN_ONCE(1, "Trying to enqueue skb into the root of a taprio qdisc configured with full offload\n");
440 return qdisc_drop(skb, sch, to_free);
441 }
442
438 queue = skb_get_queue_mapping(skb);
439
440 child = q->qdiscs[queue];
441 if (unlikely(!child))
442 return qdisc_drop(skb, sch, to_free);
443
444 /* Large packets might not be transmitted when the transmission duration
445 * exceeds any configured interval. Therefore, segment the skb into

--- 75 unchanged lines hidden (view full) ---

521 return skb;
522 }
523
524 return NULL;
525}
526
527static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
528{
443 queue = skb_get_queue_mapping(skb);
444
445 child = q->qdiscs[queue];
446 if (unlikely(!child))
447 return qdisc_drop(skb, sch, to_free);
448
449 /* Large packets might not be transmitted when the transmission duration
450 * exceeds any configured interval. Therefore, segment the skb into

--- 75 unchanged lines hidden (view full) ---

526 return skb;
527 }
528
529 return NULL;
530}
531
532static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
533{
529 struct taprio_sched *q = qdisc_priv(sch);
530 struct net_device *dev = qdisc_dev(sch);
531 struct sk_buff *skb;
532 int i;
534 WARN_ONCE(1, "Trying to peek into the root of a taprio qdisc configured with full offload\n");
533
535
534 for (i = 0; i < dev->num_tx_queues; i++) {
535 struct Qdisc *child = q->qdiscs[i];
536
537 if (unlikely(!child))
538 continue;
539
540 skb = child->ops->peek(child);
541 if (!skb)
542 continue;
543
544 return skb;
545 }
546
547 return NULL;
548}
549
550static struct sk_buff *taprio_peek(struct Qdisc *sch)
551{
552 struct taprio_sched *q = qdisc_priv(sch);
553
554 return q->peek(sch);

--- 91 unchanged lines hidden (view full) ---

646done:
647 rcu_read_unlock();
648
649 return skb;
650}
651
652static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
653{
536 return NULL;
537}
538
539static struct sk_buff *taprio_peek(struct Qdisc *sch)
540{
541 struct taprio_sched *q = qdisc_priv(sch);
542
543 return q->peek(sch);

--- 91 unchanged lines hidden (view full) ---

635done:
636 rcu_read_unlock();
637
638 return skb;
639}
640
641static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
642{
654 struct taprio_sched *q = qdisc_priv(sch);
655 struct net_device *dev = qdisc_dev(sch);
656 struct sk_buff *skb;
657 int i;
643 WARN_ONCE(1, "Trying to dequeue from the root of a taprio qdisc configured with full offload\n");
658
644
659 for (i = 0; i < dev->num_tx_queues; i++) {
660 struct Qdisc *child = q->qdiscs[i];
661
662 if (unlikely(!child))
663 continue;
664
665 skb = child->ops->dequeue(child);
666 if (unlikely(!skb))
667 continue;
668
669 qdisc_bstats_update(sch, skb);
670 qdisc_qstats_backlog_dec(sch, skb);
671 sch->q.qlen--;
672
673 return skb;
674 }
675
676 return NULL;
677}
678
679static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
680{
681 struct taprio_sched *q = qdisc_priv(sch);
682
683 return q->dequeue(sch);

--- 1067 unchanged lines hidden (view full) ---

1751 qdisc_hash_add(qdisc, false);
1752
1753 q->qdiscs[i] = qdisc;
1754 }
1755
1756 return taprio_change(sch, opt, extack);
1757}
1758
645 return NULL;
646}
647
648static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
649{
650 struct taprio_sched *q = qdisc_priv(sch);
651
652 return q->dequeue(sch);

--- 1067 unchanged lines hidden (view full) ---

1720 qdisc_hash_add(qdisc, false);
1721
1722 q->qdiscs[i] = qdisc;
1723 }
1724
1725 return taprio_change(sch, opt, extack);
1726}
1727
1728static void taprio_attach(struct Qdisc *sch)
1729{
1730 struct taprio_sched *q = qdisc_priv(sch);
1731 struct net_device *dev = qdisc_dev(sch);
1732 unsigned int ntx;
1733
1734 /* Attach underlying qdisc */
1735 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
1736 struct Qdisc *qdisc = q->qdiscs[ntx];
1737 struct Qdisc *old;
1738
1739 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1740 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1741 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
1742 if (ntx < dev->real_num_tx_queues)
1743 qdisc_hash_add(qdisc, false);
1744 } else {
1745 old = dev_graft_qdisc(qdisc->dev_queue, sch);
1746 qdisc_refcount_inc(sch);
1747 }
1748 if (old)
1749 qdisc_put(old);
1750 }
1751
1752 /* access to the child qdiscs is not needed in offload mode */
1753 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1754 kfree(q->qdiscs);
1755 q->qdiscs = NULL;
1756 }
1757}
1758
1759static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
1760 unsigned long cl)
1761{
1762 struct net_device *dev = qdisc_dev(sch);
1763 unsigned long ntx = cl - 1;
1764
1765 if (ntx >= dev->num_tx_queues)
1766 return NULL;

--- 10 unchanged lines hidden (view full) ---

1777 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1778
1779 if (!dev_queue)
1780 return -EINVAL;
1781
1782 if (dev->flags & IFF_UP)
1783 dev_deactivate(dev);
1784
1759static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
1760 unsigned long cl)
1761{
1762 struct net_device *dev = qdisc_dev(sch);
1763 unsigned long ntx = cl - 1;
1764
1765 if (ntx >= dev->num_tx_queues)
1766 return NULL;

--- 10 unchanged lines hidden (view full) ---

1777 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1778
1779 if (!dev_queue)
1780 return -EINVAL;
1781
1782 if (dev->flags & IFF_UP)
1783 dev_deactivate(dev);
1784
1785 *old = q->qdiscs[cl - 1];
1786 q->qdiscs[cl - 1] = new;
1785 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1786 *old = dev_graft_qdisc(dev_queue, new);
1787 } else {
1788 *old = q->qdiscs[cl - 1];
1789 q->qdiscs[cl - 1] = new;
1790 }
1787
1788 if (new)
1789 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1790
1791 if (dev->flags & IFF_UP)
1792 dev_activate(dev);
1793
1794 return 0;

--- 217 unchanged lines hidden (view full) ---

2012static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
2013 .cl_ops = &taprio_class_ops,
2014 .id = "taprio",
2015 .priv_size = sizeof(struct taprio_sched),
2016 .init = taprio_init,
2017 .change = taprio_change,
2018 .destroy = taprio_destroy,
2019 .reset = taprio_reset,
1791
1792 if (new)
1793 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1794
1795 if (dev->flags & IFF_UP)
1796 dev_activate(dev);
1797
1798 return 0;

--- 217 unchanged lines hidden (view full) ---

2016static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
2017 .cl_ops = &taprio_class_ops,
2018 .id = "taprio",
2019 .priv_size = sizeof(struct taprio_sched),
2020 .init = taprio_init,
2021 .change = taprio_change,
2022 .destroy = taprio_destroy,
2023 .reset = taprio_reset,
2024 .attach = taprio_attach,
2020 .peek = taprio_peek,
2021 .dequeue = taprio_dequeue,
2022 .enqueue = taprio_enqueue,
2023 .dump = taprio_dump,
2024 .owner = THIS_MODULE,
2025};
2026
2027static struct notifier_block taprio_device_notifier = {

--- 22 unchanged lines hidden ---
2025 .peek = taprio_peek,
2026 .dequeue = taprio_dequeue,
2027 .enqueue = taprio_enqueue,
2028 .dump = taprio_dump,
2029 .owner = THIS_MODULE,
2030};
2031
2032static struct notifier_block taprio_device_notifier = {

--- 22 unchanged lines hidden ---