/openbmc/linux/drivers/net/ethernet/aquantia/atlantic/ |
H A D | aq_vec.c | 18 unsigned int tx_rings; member 41 for (i = 0U; self->tx_rings > i; ++i) { in aq_vec_poll() 119 self->tx_rings = 0; in aq_vec_alloc() 144 ++self->tx_rings; in aq_vec_ring_alloc() 190 for (i = 0U; self->tx_rings > i; ++i) { in aq_vec_init() 232 for (i = 0U; self->tx_rings > i; ++i) { in aq_vec_start() 256 for (i = 0U; self->tx_rings > i; ++i) { in aq_vec_stop() 276 for (i = 0U; self->tx_rings > i; ++i) { in aq_vec_deinit() 305 for (i = 0U; self->tx_rings > i; ++i) { in aq_vec_ring_free() 314 self->tx_rings = 0; in aq_vec_ring_free() [all …]
|
H A D | aq_hw.h | 66 u8 tx_rings; member
|
/openbmc/linux/drivers/net/ethernet/netronome/nfp/ |
H A D | nfp_net_dp.c | 187 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings), in nfp_net_tx_rings_prepare() 189 if (!dp->tx_rings) in nfp_net_tx_rings_prepare() 206 nfp_net_tx_ring_init(&dp->tx_rings[r], dp, in nfp_net_tx_rings_prepare() 209 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r])) in nfp_net_tx_rings_prepare() 212 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r])) in nfp_net_tx_rings_prepare() 220 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_prepare() 222 nfp_net_tx_ring_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_prepare() 228 kfree(dp->tx_rings); in nfp_net_tx_rings_prepare() 237 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_free() 238 nfp_net_tx_ring_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_free() [all …]
|
H A D | nfp_net_common.c | 797 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL; in nfp_net_vector_assign_rings() 800 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL; in nfp_net_vector_assign_rings() 968 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]); in nfp_net_clear_config_and_disable() 1001 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r); in nfp_net_set_config_and_enable() 1572 new->tx_rings = NULL; in nfp_net_clone_dp()
|
/openbmc/linux/drivers/net/wireless/realtek/rtw88/ |
H A D | pci.c | 167 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_free_trx_ring() 328 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_init_trx_ring() 350 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_init_trx_ring() 404 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; in rtw_pci_reset_buf_desc() 408 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; in rtw_pci_reset_buf_desc() 409 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; in rtw_pci_reset_buf_desc() 410 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; in rtw_pci_reset_buf_desc() 411 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0; in rtw_pci_reset_buf_desc() 416 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len; in rtw_pci_reset_buf_desc() 417 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma; in rtw_pci_reset_buf_desc() [all …]
|
H A D | pci.h | 223 struct rtw_pci_tx_ring tx_rings[RTK_MAX_TX_QUEUE_NUM]; member
|
/openbmc/linux/drivers/net/ethernet/intel/ice/ |
H A D | ice_ethtool.c | 880 tx_ring = test_vsi->tx_rings[0]; in ice_loopback_test() 1547 tx_ring = READ_ONCE(vsi->tx_rings[j]); in __ice_get_ethtool_stats() 2684 if (vsi->tx_rings && vsi->rx_rings) { in ice_get_ringparam() 2686 ring->tx_pending = vsi->tx_rings[0]->count; in ice_get_ringparam() 2706 struct ice_tx_ring *tx_rings = NULL; in ice_set_ringparam() local 2725 if (!vsi->tx_rings || !vsi->rx_rings) in ice_set_ringparam() 2738 if (new_tx_cnt == vsi->tx_rings[0]->count && in ice_set_ringparam() 2761 vsi->tx_rings[i]->count = new_tx_cnt; in ice_set_ringparam() 2773 if (new_tx_cnt == vsi->tx_rings[0]->count) in ice_set_ringparam() 2778 vsi->tx_rings[0]->count, new_tx_cnt); in ice_set_ringparam() [all …]
|
H A D | ice_lib.c | 83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, in ice_vsi_alloc_arrays() 84 sizeof(*vsi->tx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays() 85 if (!vsi->tx_rings) in ice_vsi_alloc_arrays() 129 devm_kfree(dev, vsi->tx_rings); in ice_vsi_alloc_arrays() 321 devm_kfree(dev, vsi->tx_rings); in ice_vsi_free_arrays() 322 vsi->tx_rings = NULL; in ice_vsi_free_arrays() 391 ring = vsi->tx_rings[i]; in ice_vsi_alloc_ring_stats() 1385 if (vsi->tx_rings) { in ice_vsi_clear_rings() 1387 if (vsi->tx_rings[i]) { in ice_vsi_clear_rings() 1388 kfree_rcu(vsi->tx_rings[i], rcu); in ice_vsi_clear_rings() [all …]
|
H A D | ice_dcb_lib.c | 208 return vsi->tx_rings[queue_index]->dcb_tc; in ice_dcb_get_tc() 225 tx_ring = vsi->tx_rings[i]; in ice_vsi_cfg_dcb_rings() 242 vsi->tx_rings[i]->dcb_tc = n; in ice_vsi_cfg_dcb_rings() 264 vsi->tx_rings[i]->dcb_tc = first_droptc; in ice_vsi_cfg_dcb_rings()
|
H A D | ice_lib.h | 59 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
|
H A D | ice_xsk.c | 54 ice_clean_tx_ring(vsi->tx_rings[q_idx]); in ice_qp_clean_rings() 170 tx_ring = vsi->tx_rings[q_idx]; in ice_qp_dis() 235 tx_ring = vsi->tx_rings[q_idx]; in ice_qp_ena()
|
H A D | ice_base.c | 285 if (vsi->tx_rings[i] == ring) in ice_eswitch_calc_txq_handle() 788 struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; in ice_vsi_map_rings_to_vectors()
|
H A D | ice_repr.c | 158 tx_ring = np->vsi->tx_rings[vf_id]; in ice_repr_sp_stats64()
|
/openbmc/linux/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_ethtool.c | 2031 ring->tx_pending = vsi->tx_rings[0]->count; in i40e_get_ringparam() 2052 struct i40e_ring *tx_rings = NULL, *rx_rings = NULL; in i40e_set_ringparam() local 2080 if ((new_tx_count == vsi->tx_rings[0]->count) && in i40e_set_ringparam() 2101 vsi->tx_rings[i]->count = new_tx_count; in i40e_set_ringparam() 2119 if (new_tx_count != vsi->tx_rings[0]->count) { in i40e_set_ringparam() 2122 vsi->tx_rings[0]->count, new_tx_count); in i40e_set_ringparam() 2123 tx_rings = kcalloc(tx_alloc_queue_pairs, in i40e_set_ringparam() 2125 if (!tx_rings) { in i40e_set_ringparam() 2134 tx_rings[i] = *vsi->tx_rings[i]; in i40e_set_ringparam() 2135 tx_rings[i].count = new_tx_count; in i40e_set_ringparam() [all …]
|
H A D | i40e_main.c | 358 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { in i40e_tx_timeout() 360 vsi->tx_rings[i]->queue_index) { in i40e_tx_timeout() 361 tx_ring = vsi->tx_rings[i]; in i40e_tx_timeout() 471 if (!vsi->tx_rings) in i40e_get_netdev_stats_struct() 479 ring = READ_ONCE(vsi->tx_rings[i]); in i40e_get_netdev_stats_struct() 539 memset(&vsi->tx_rings[i]->stats, 0, in i40e_vsi_reset_stats() 540 sizeof(vsi->tx_rings[i]->stats)); in i40e_vsi_reset_stats() 541 memset(&vsi->tx_rings[i]->tx_stats, 0, in i40e_vsi_reset_stats() 542 sizeof(vsi->tx_rings[i]->tx_stats)); in i40e_vsi_reset_stats() 915 p = READ_ONCE(vsi->tx_rings[q]); in i40e_update_vsi_stats() [all …]
|
H A D | i40e_debugfs.c | 292 struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); in i40e_dbg_dump_vsi_seid() 574 if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) { in i40e_dbg_dump_desc() 586 ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL); in i40e_dbg_dump_desc()
|
/openbmc/linux/drivers/thunderbolt/ |
H A D | nhi.c | 529 if (!nhi->tx_rings[i]) { in nhi_alloc_hop() 552 if (ring->is_tx && nhi->tx_rings[ring->hop]) { in nhi_alloc_hop() 566 nhi->tx_rings[ring->hop] = ring; in nhi_alloc_hop() 817 ring->nhi->tx_rings[ring->hop] = NULL; in tb_ring_free() 939 ring = nhi->tx_rings[hop]; in nhi_interrupt_work() 1137 if (nhi->tx_rings[i]) in nhi_shutdown() 1361 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, in nhi_probe() 1362 sizeof(*nhi->tx_rings), GFP_KERNEL); in nhi_probe() 1365 if (!nhi->tx_rings || !nhi->rx_rings) in nhi_probe()
|
/openbmc/linux/drivers/net/ethernet/broadcom/genet/ |
H A D | bcmgenet.c | 1010 tx_rings[num].packets), \ 1012 tx_rings[num].bytes), \ 1971 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); in bcmgenet_tx_reclaim_all() 1974 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); in bcmgenet_tx_reclaim_all() 2073 ring = &priv->tx_rings[index]; in bcmgenet_xmit() 2637 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; in bcmgenet_init_tx_ring() 2755 ring = &priv->tx_rings[i]; in bcmgenet_enable_tx_napi() 2760 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_enable_tx_napi() 2771 ring = &priv->tx_rings[i]; in bcmgenet_disable_tx_napi() 2775 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_disable_tx_napi() [all …]
|
/openbmc/linux/drivers/net/ethernet/broadcom/ |
H A D | bcmsysport.c | 469 ring = &priv->tx_rings[q]; in bcm_sysport_update_tx_stats() 535 ring = &priv->tx_rings[i]; in bcm_sysport_get_stats() 667 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec); in bcm_sysport_set_coalesce() 1023 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); in bcm_sysport_tx_reclaim_all() 1177 txr = &priv->tx_rings[ring]; in bcm_sysport_rx_isr() 1209 txr = &priv->tx_rings[ring]; in bcm_sysport_tx_isr() 1334 ring = &priv->tx_rings[queue]; in bcm_sysport_xmit() 1516 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; in bcm_sysport_init_tx_ring() 1607 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; in bcm_sysport_fini_tx_ring() 2373 ring = &priv->tx_rings[q]; in bcm_sysport_map_queues() [all …]
|
/openbmc/linux/drivers/net/wireless/realtek/rtw89/ |
H A D | pci.c | 111 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; in rtw89_pci_reclaim_tx_fwcmd() 492 tx_ring = &rtwpci->tx_rings[txch]; in rtw89_pci_release_rpp() 939 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; in __rtw89_pci_check_and_reclaim_tx_fwcmd_resource() 955 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; in __rtw89_pci_check_and_reclaim_tx_resource_noio() 972 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; in __rtw89_pci_check_and_reclaim_tx_resource() 1065 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; in rtw89_pci_ops_tx_kick_off() 1085 tx_ring = &rtwpci->tx_rings[txch]; in rtw89_pci_tx_kick_off_pending() 1093 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; in __pci_flush_txch() 1350 tx_ring = &rtwpci->tx_rings[txch]; in rtw89_pci_tx_write() 1436 tx_ring = &rtwpci->tx_rings[i]; in rtw89_pci_reset_trx_rings() [all …]
|
/openbmc/linux/drivers/net/ethernet/intel/iavf/ |
H A D | iavf_main.c | 478 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; in iavf_map_vector_to_txq() 707 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); in iavf_configure_tx() 1497 kfree(adapter->tx_rings); in iavf_free_queues() 1498 adapter->tx_rings = NULL; in iavf_free_queues() 1517 struct iavf_ring *tx_ring = &adapter->tx_rings[i]; in iavf_set_queue_vlan_tag_loc() 1611 adapter->tx_rings = kcalloc(num_active_queues, in iavf_alloc_queues() 1613 if (!adapter->tx_rings) in iavf_alloc_queues() 1624 tx_ring = &adapter->tx_rings[i]; in iavf_alloc_queues() 3369 if (!adapter->tx_rings) in iavf_free_all_tx_resources() 3373 if (adapter->tx_rings[i].desc) in iavf_free_all_tx_resources() [all …]
|
/openbmc/linux/drivers/net/ethernet/sun/ |
H A D | niu.c | 3610 index = (rp - np->tx_rings); in niu_tx_work() 3757 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_poll_core() 4109 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_slowpath_interrupt() 4187 struct tx_ring_info *rp = &np->tx_rings[i]; in __niu_fastpath_interrupt() 4327 if (np->tx_rings) { in niu_free_channels() 4329 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_free_channels() 4333 kfree(np->tx_rings); in niu_free_channels() 4334 np->tx_rings = NULL; in niu_free_channels() 4475 struct tx_ring_info *tx_rings; in niu_alloc_channels() local 4528 tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), in niu_alloc_channels() [all …]
|
/openbmc/linux/Documentation/networking/ |
H A D | driver.rst | 66 dr = dp->tx_rings[idx];
|
/openbmc/linux/include/linux/ |
H A D | thunderbolt.h | 496 struct tb_ring **tx_rings; member
|
/openbmc/linux/drivers/net/ethernet/broadcom/bnxt/ |
H A D | bnxt.c | 6198 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) in __bnxt_hwrm_get_tx_rings() argument 6215 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); in __bnxt_hwrm_get_tx_rings() 6224 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, in __bnxt_hwrm_reserve_pf_rings() argument 6234 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; in __bnxt_hwrm_reserve_pf_rings() 6235 req->num_tx_rings = cpu_to_le16(tx_rings); in __bnxt_hwrm_reserve_pf_rings() 6241 enables |= tx_rings + ring_grps ? in __bnxt_hwrm_reserve_pf_rings() 6256 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); in __bnxt_hwrm_reserve_pf_rings() 6277 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, in __bnxt_hwrm_reserve_vf_rings() argument 6286 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; in __bnxt_hwrm_reserve_vf_rings() 6291 enables |= tx_rings + ring_grps ? in __bnxt_hwrm_reserve_vf_rings() [all …]
|