/openbmc/linux/drivers/net/ethernet/broadcom/bnxt/ |
H A D | bnxt_xdp.c | 26 struct bnxt_tx_ring_info *txr, in bnxt_xmit_bd() argument 44 prod = txr->tx_prod; in bnxt_xmit_bd() 45 tx_buf = &txr->tx_buf_ring[prod]; in bnxt_xmit_bd() 50 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; in bnxt_xmit_bd() 66 WRITE_ONCE(txr->tx_prod, prod); in bnxt_xmit_bd() 69 frag_tx_buf = &txr->tx_buf_ring[prod]; in bnxt_xmit_bd() 72 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; in bnxt_xmit_bd() 90 WRITE_ONCE(txr->tx_prod, prod); in bnxt_xmit_bd() 95 static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, in __bnxt_xmit_xdp() argument 101 tx_buf = bnxt_xmit_bd(bp, txr, mappin in __bnxt_xmit_xdp() 108 __bnxt_xmit_xdp_redirect(struct bnxt * bp,struct bnxt_tx_ring_info * txr,dma_addr_t mapping,u32 len,struct xdp_frame * xdpf) __bnxt_xmit_xdp_redirect() argument 123 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; bnxt_tx_int_xdp() local 229 struct bnxt_tx_ring_info *txr; bnxt_rx_xdp() local 337 struct bnxt_tx_ring_info *txr; bnxt_xdp_xmit() local [all...] |
H A D | bnxt.c | 334 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, in bnxt_sched_reset_txr() argument 337 struct bnxt_napi *bnapi = txr->bnapi; in bnxt_sched_reset_txr() 343 txr->txq_index, bnapi->tx_pkts, in bnxt_sched_reset_txr() 344 txr->tx_cons, txr->tx_prod, idx); in bnxt_sched_reset_txr() 382 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, in bnxt_txr_db_kick() argument 385 bnxt_db_write(bp, &txr->tx_db, prod); in bnxt_txr_db_kick() 386 txr->kick_pending = 0; in bnxt_txr_db_kick() 401 struct bnxt_tx_ring_info *txr; in bnxt_start_xmit() local 413 txr in bnxt_start_xmit() 691 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; bnxt_tx_int() local 2561 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; __bnxt_poll_work() local 2851 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; bnxt_free_tx_skbs() local 3288 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; bnxt_free_tx_rings() local 3324 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; bnxt_alloc_tx_rings() local 3552 struct bnxt_tx_ring_info *txr; bnxt_init_ring_struct() local 3765 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; bnxt_init_tx_rings() local 4410 struct bnxt_tx_ring_info *txr; bnxt_clear_ring_indices() local 4580 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; bnxt_alloc_mem() local 5156 bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr) bnxt_cp_ring_for_tx() argument 5724 struct bnxt_tx_ring_info *txr; hwrm_ring_alloc_send_msg() local 5920 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; bnxt_hwrm_ring_alloc() local 6047 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; bnxt_hwrm_ring_free() local 9448 struct bnxt_tx_ring_info *txr; bnxt_tx_disable() local 9467 struct bnxt_tx_ring_info *txr; bnxt_tx_enable() local 11547 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; bnxt_dump_tx_sw_state() local [all...] |
H A D | bnxt_xdp.h | 16 struct bnxt_tx_ring_info *txr,
|
H A D | bnxt.h | 2258 const struct bnxt_tx_ring_info *txr) in bnxt_tx_avail() argument 2260 u32 used = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons); in bnxt_tx_avail() 2355 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
|
H A D | bnxt_ethtool.c | 3622 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; in bnxt_run_loopback() local 3652 bnxt_xmit_bd(bp, txr, map, pkt_size, NULL); in bnxt_run_loopback() 3657 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); in bnxt_run_loopback()
|
/openbmc/linux/drivers/net/ethernet/qualcomm/ |
H A D | qca_spi.c | 288 if (qca->txr.skb[qca->txr.head] == NULL) in qcaspi_transmit() 301 while (qca->txr.skb[qca->txr.head]) { in qcaspi_transmit() 302 pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN; in qcaspi_transmit() 310 if (qcaspi_tx_frame(qca, qca->txr.skb[qca->txr.head]) == -1) { in qcaspi_transmit() 317 n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len; in qcaspi_transmit() 325 dev_kfree_skb(qca->txr.skb[qca->txr.head]); in qcaspi_transmit() 326 qca->txr.skb[qca->txr.head] = NULL; in qcaspi_transmit() 327 qca->txr.size -= pkt_len; in qcaspi_transmit() 328 new_head = qca->txr.head + 1; in qcaspi_transmit() 329 if (new_head >= qca->txr.count) in qcaspi_transmit() [all …]
|
H A D | qca_debug.c | 82 if (qca->txr.skb[qca->txr.head] == NULL) in qcaspi_info_show() 84 else if (qca->txr.skb[qca->txr.tail]) in qcaspi_info_show() 92 qca->txr.size); in qcaspi_info_show() 258 ring->tx_pending = qca->txr.count; in qcaspi_get_ringparam() 276 qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); in qcaspi_set_ringparam() 277 qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN); in qcaspi_set_ringparam()
|
H A D | qca_spi.h | 86 struct tx_ring txr; member
|
/openbmc/linux/drivers/net/ethernet/broadcom/ |
H A D | bnx2.c | 246 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) in bnx2_tx_avail() argument 253 diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons); in bnx2_tx_avail() 696 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; in bnx2_free_tx_mem() local 698 if (txr->tx_desc_ring) { in bnx2_free_tx_mem() 700 txr->tx_desc_ring, in bnx2_free_tx_mem() 701 txr->tx_desc_mapping); in bnx2_free_tx_mem() 702 txr->tx_desc_ring = NULL; in bnx2_free_tx_mem() 704 kfree(txr->tx_buf_ring); in bnx2_free_tx_mem() 705 txr->tx_buf_ring = NULL; in bnx2_free_tx_mem() 748 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; in bnx2_alloc_tx_mem() local [all …]
|
H A D | bcmsysport.c | 1142 struct bcm_sysport_tx_ring *txr; in bcm_sysport_rx_isr() local 1177 txr = &priv->tx_rings[ring]; in bcm_sysport_rx_isr() 1179 if (likely(napi_schedule_prep(&txr->napi))) { in bcm_sysport_rx_isr() 1181 __napi_schedule(&txr->napi); in bcm_sysport_rx_isr() 1193 struct bcm_sysport_tx_ring *txr; in bcm_sysport_tx_isr() local 1209 txr = &priv->tx_rings[ring]; in bcm_sysport_tx_isr() 1211 if (likely(napi_schedule_prep(&txr->napi))) { in bcm_sysport_tx_isr() 1213 __napi_schedule_irqoff(&txr->napi); in bcm_sysport_tx_isr()
|
/openbmc/qemu/hw/net/ |
H A D | e1000e_core.c | 869 e1000e_tx_ring_init(E1000ECore *core, E1000E_TxRing *txr, int idx) in e1000e_tx_ring_init() argument 878 txr->i = &i[idx]; in e1000e_tx_ring_init() 879 txr->tx = &core->tx[idx]; in e1000e_tx_ring_init() 900 e1000e_start_xmit(E1000ECore *core, const E1000E_TxRing *txr) in e1000e_start_xmit() argument 905 const E1000ERingInfo *txi = txr->i; in e1000e_start_xmit() 921 e1000e_process_tx_desc(core, txr->tx, &desc, txi->idx); in e1000e_start_xmit() 931 net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, core->owner); in e1000e_start_xmit() 2407 E1000E_TxRing txr; in e1000e_set_tctl() local 2411 e1000e_tx_ring_init(core, &txr, 0); in e1000e_set_tctl() 2412 e1000e_start_xmit(core, &txr); in e1000e_set_tctl() [all …]
|
H A D | igb_core.c | 769 igb_tx_ring_init(IGBCore *core, IGB_TxRing *txr, int idx) in igb_tx_ring_init() argument 792 txr->i = &i[idx]; in igb_tx_ring_init() 793 txr->tx = &core->tx[idx]; in igb_tx_ring_init() 875 igb_start_xmit(IGBCore *core, const IGB_TxRing *txr) in igb_start_xmit() argument 880 const E1000ERingInfo *txi = txr->i; in igb_start_xmit() 901 igb_process_tx_desc(core, d, txr->tx, &desc, txi->idx); in igb_start_xmit() 911 net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, d); in igb_start_xmit() 2804 IGB_TxRing txr; in igb_set_tdt() local 2809 igb_tx_ring_init(core, &txr, qn); in igb_set_tdt() 2810 igb_start_xmit(core, &txr); in igb_set_tdt()
|
/openbmc/linux/drivers/net/ethernet/sgi/ |
H A D | ioc3-eth.c | 86 struct ioc3_etxd *txr; member 615 desc = &ip->txr[entry]; in ioc3_tx_unmap() 642 ip->txr[i].cmd = 0; in ioc3_clean_tx_ring() 902 ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K); in ioc3eth_probe() 1041 desc = &ip->txr[produce]; in ioc3_start_xmit()
|
/openbmc/linux/drivers/net/ethernet/amazon/ena/ |
H A D | ena_netdev.c | 184 struct ena_ring *txr, *rxr; in ena_init_io_rings() local 190 txr = &adapter->tx_ring[i]; in ena_init_io_rings() 194 ena_init_io_rings_common(adapter, txr, i); in ena_init_io_rings() 197 txr->ring_size = adapter->requested_tx_ring_size; in ena_init_io_rings() 198 txr->tx_max_header_size = ena_dev->tx_max_header_size; in ena_init_io_rings() 199 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; in ena_init_io_rings() 200 txr->sgl_size = adapter->max_tx_sgl_size; in ena_init_io_rings() 201 txr->smoothed_interval = in ena_init_io_rings() 203 txr->disable_meta_caching = adapter->disable_meta_caching; in ena_init_io_rings() 204 spin_lock_init(&txr->xdp_tx_lock); in ena_init_io_rings() [all …]
|
/openbmc/linux/tools/testing/selftests/bpf/ |
H A D | xskxceiver.c | 250 struct xsk_ring_prod *txr; in __xsk_configure_socket() local 261 txr = ifobject->tx_on ? &xsk->tx : NULL; in __xsk_configure_socket() 263 return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg); in __xsk_configure_socket()
|