/openbmc/linux/net/sched/ |
H A D | sch_mq.c | 72 struct netdev_queue *dev_queue; in mq_init() local 89 dev_queue = netdev_get_tx_queue(dev, ntx); in mq_init() 90 qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx), in mq_init() 115 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); in mq_attach() 178 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); in mq_graft() local 185 *old = dev_graft_qdisc(dev_queue, new); in mq_graft() 203 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); in mq_leaf() local 205 return rtnl_dereference(dev_queue->qdisc_sleeping); in mq_leaf() 220 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); in mq_dump_class() local 224 tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; in mq_dump_class() [all …]
|
H A D | sch_generic.c | 74 const struct netdev_queue *txq = q->dev_queue; in __skb_dequeue_bad_txq() 231 const struct netdev_queue *txq = q->dev_queue; in dequeue_skb() 670 .dev_queue = &noop_netdev_queue, 932 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, in qdisc_alloc() argument 941 if (!dev_queue) { in qdisc_alloc() 947 dev = dev_queue->dev; in qdisc_alloc() 948 sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue)); in qdisc_alloc() 985 sch->dev_queue = dev_queue; in qdisc_alloc() 997 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, in qdisc_create_dflt() argument 1009 sch = qdisc_alloc(dev_queue, ops, extack); in qdisc_create_dflt() [all …]
|
H A D | sch_htb.c | 1119 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); in htb_init() local 1122 qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, in htb_init() 1161 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); in htb_attach_offload() 1166 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); in htb_attach_offload() local 1167 struct Qdisc *old = dev_graft_qdisc(dev_queue, NULL); in htb_attach_offload() 1183 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); in htb_attach_software() local 1184 struct Qdisc *old = dev_graft_qdisc(dev_queue, sch); in htb_attach_software() 1369 return sch->dev_queue; in htb_select_queue() 1382 htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q) in htb_graft_helper() argument 1384 struct net_device *dev = dev_queue->dev; in htb_graft_helper() [all …]
|
H A D | sch_mqprio.c | 352 struct netdev_queue *dev_queue; in mqprio_init() local 399 dev_queue = netdev_get_tx_queue(dev, i); in mqprio_init() 400 qdisc = qdisc_create_dflt(dev_queue, in mqprio_init() 444 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); in mqprio_attach() 469 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); in mqprio_graft() local 471 if (!dev_queue) in mqprio_graft() 477 *old = dev_graft_qdisc(dev_queue, new); in mqprio_graft() 616 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); in mqprio_leaf() local 618 if (!dev_queue) in mqprio_leaf() 621 return rtnl_dereference(dev_queue->qdisc_sleeping); in mqprio_leaf() [all …]
|
H A D | sch_api.c | 1081 struct netdev_queue *dev_queue; in qdisc_graft() local 1088 dev_queue = dev_ingress_queue(dev); in qdisc_graft() 1089 if (!dev_queue) { in qdisc_graft() 1094 q = rtnl_dereference(dev_queue->qdisc_sleeping); in qdisc_graft() 1116 dev_queue = netdev_get_tx_queue(dev, i); in qdisc_graft() 1117 old = dev_graft_qdisc(dev_queue, new); in qdisc_graft() 1124 old = dev_graft_qdisc(dev_queue, NULL); in qdisc_graft() 1133 dev_graft_qdisc(dev_queue, new); in qdisc_graft() 1229 struct netdev_queue *dev_queue, in qdisc_create() argument 1275 sch = qdisc_alloc(dev_queue, ops, extack); in qdisc_create() [all …]
|
H A D | sch_plug.c | 184 netif_schedule_queue(sch->dev_queue); in plug_change() 192 netif_schedule_queue(sch->dev_queue); in plug_change()
|
H A D | sch_cbs.c | 412 q->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in cbs_init() 423 q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); in cbs_init() 498 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in cbs_graft()
|
H A D | sch_prio.c | 198 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in prio_tune() 294 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in prio_graft()
|
H A D | sch_taprio.c | 2114 struct netdev_queue *dev_queue; in taprio_init() local 2117 dev_queue = netdev_get_tx_queue(dev, i); in taprio_init() 2118 qdisc = qdisc_create_dflt(dev_queue, in taprio_init() 2148 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); in taprio_attach() local 2166 old = dev_graft_qdisc(dev_queue, dev_queue_qdisc); in taprio_attach() 2194 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); in taprio_graft() local 2196 if (!dev_queue) in taprio_graft() 2211 WARN_ON_ONCE(dev_graft_qdisc(dev_queue, new) != *old); in taprio_graft()
|
H A D | sch_drr.c | 110 cl->qdisc = qdisc_create_dflt(sch->dev_queue, in drr_change_class() 211 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in drr_graft_class()
|
H A D | sch_fifo.c | 262 q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1), in fifo_create_dflt()
|
H A D | sch_ets.c | 256 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in ets_class_graft() 639 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in ets_qdisc_change()
|
H A D | sch_multiq.c | 214 child = qdisc_create_dflt(sch->dev_queue, in multiq_tune()
|
H A D | sch_hfsc.c | 1061 cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in hfsc_change_class() 1193 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in hfsc_graft_class() 1419 q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in hfsc_init_qdisc()
|
H A D | sch_etf.c | 379 q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); in etf_init()
|
/openbmc/linux/include/linux/ |
H A D | netdevice.h | 3322 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) in netif_tx_start_queue() argument 3324 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_start_queue() 3348 void netif_tx_wake_queue(struct netdev_queue *dev_queue); 3372 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) in netif_tx_stop_queue() argument 3375 WRITE_ONCE(dev_queue->trans_start, jiffies); in netif_tx_stop_queue() 3381 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_stop_queue() 3398 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) in netif_tx_queue_stopped() argument 3400 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_queue_stopped() 3414 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) in netif_xmit_stopped() argument 3416 return dev_queue->state & QUEUE_STATE_ANY_XOFF; in netif_xmit_stopped() [all …]
|
/openbmc/linux/include/net/ |
H A D | netdev_queues.h | 89 netdev_txq_completed_mb(struct netdev_queue *dev_queue, in netdev_txq_completed_mb() argument 93 netdev_tx_completed_queue(dev_queue, pkts, bytes); in netdev_txq_completed_mb()
|
H A D | sch_generic.h | 104 struct netdev_queue *dev_queue; member 548 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); in qdisc_root() 555 return rcu_dereference_bh(qdisc->dev_queue->qdisc); in qdisc_root_bh() 560 return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping); in qdisc_root_sleeping() 573 return qdisc->dev_queue->dev; in qdisc_dev() 690 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 724 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 728 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
|
H A D | pkt_sched.h | 142 return dev_net(q->dev_queue->dev); in qdisc_net()
|
/openbmc/linux/drivers/net/ethernet/hisilicon/hns/ |
H A D | hns_enet.c | 333 struct netdev_queue *dev_queue; in hns_nic_net_xmit_hw() local 386 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping); in hns_nic_net_xmit_hw() 387 netdev_tx_sent_queue(dev_queue, skb->len); in hns_nic_net_xmit_hw() 976 struct netdev_queue *dev_queue; in hns_nic_tx_poll_one() local 1005 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); in hns_nic_tx_poll_one() 1006 netdev_tx_completed_queue(dev_queue, pkts, bytes); in hns_nic_tx_poll_one() 1017 if (netif_tx_queue_stopped(dev_queue) && in hns_nic_tx_poll_one() 1019 netif_tx_wake_queue(dev_queue); in hns_nic_tx_poll_one() 1060 struct netdev_queue *dev_queue; in hns_nic_tx_clr_all_bufs() local 1070 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); in hns_nic_tx_clr_all_bufs() [all …]
|
H A D | hns_ethtool.c | 376 struct netdev_queue *dev_queue; in __lb_other_process() local 407 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); in __lb_other_process() 408 netdev_tx_reset_queue(dev_queue); in __lb_other_process()
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
H A D | qos.c | 364 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid); in mlx5e_reset_qdisc() local 365 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; in mlx5e_reset_qdisc()
|
/openbmc/linux/drivers/net/xen-netback/ |
H A D | xenbus.c | 31 struct netdev_queue *dev_queue; in xenvif_read_io_ring() local 92 dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id); in xenvif_read_io_ring() 97 netif_tx_queue_stopped(dev_queue) ? "stopped" : "running"); in xenvif_read_io_ring()
|
/openbmc/linux/drivers/net/ethernet/hisilicon/hns3/ |
H A D | hns3_enet.c | 813 struct netdev_queue *dev_queue; in hns3_reset_tx_queue() local 817 dev_queue = netdev_get_tx_queue(ndev, in hns3_reset_tx_queue() 819 netdev_tx_reset_queue(dev_queue); in hns3_reset_tx_queue() 2303 struct netdev_queue *dev_queue; in hns3_nic_net_xmit() local 2347 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); in hns3_nic_net_xmit() 2348 doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes, in hns3_nic_net_xmit() 3637 struct netdev_queue *dev_queue; in hns3_clean_tx_ring() local 3654 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); in hns3_clean_tx_ring() 3655 netdev_tx_completed_queue(dev_queue, pkts, bytes); in hns3_clean_tx_ring() 3663 if (netif_tx_queue_stopped(dev_queue) && in hns3_clean_tx_ring() [all …]
|
/openbmc/linux/tools/perf/Documentation/ |
H A D | perf-trace.txt | 326 next block:*_unplug and the next three net:*dev_queue events, this last one 329 …# perf trace -e sched:*switch/nr=2/,block:*_plug/nr=4/,block:*_unplug/nr=1/,net:*dev_queue/nr=3,ma…
|