/openbmc/linux/drivers/net/ethernet/intel/ice/ |
H A D | ice_base.c | 105 struct ice_q_vector *q_vector; in ice_vsi_alloc_q_vector() local 109 q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); in ice_vsi_alloc_q_vector() 110 if (!q_vector) in ice_vsi_alloc_q_vector() 113 q_vector->vsi = vsi; in ice_vsi_alloc_q_vector() 114 q_vector->v_idx = v_idx; in ice_vsi_alloc_q_vector() 115 q_vector->tx.itr_setting = ICE_DFLT_TX_ITR; in ice_vsi_alloc_q_vector() 116 q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; in ice_vsi_alloc_q_vector() 117 q_vector->tx.itr_mode = ITR_DYNAMIC; in ice_vsi_alloc_q_vector() 118 q_vector->rx.itr_mode = ITR_DYNAMIC; in ice_vsi_alloc_q_vector() 119 q_vector->tx.type = ICE_TX_CONTAINER; in ice_vsi_alloc_q_vector() [all …]
|
H A D | ice_trace.h | 64 TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim), 65 TP_ARGS(q_vector, dim), 66 TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector) 68 __string(devname, q_vector->rx.rx_ring->netdev->name)), 70 TP_fast_assign(__entry->q_vector = q_vector; 72 __assign_str(devname, q_vector->rx.rx_ring->netdev->name);), 76 __entry->q_vector->rx.rx_ring->q_index, 86 TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim), 87 TP_ARGS(q_vector, dim) 91 TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim), [all …]
|
H A D | ice_xsk.c | 67 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, in ice_qvec_toggle_napi() argument 70 if (!vsi->netdev || !q_vector) in ice_qvec_toggle_napi() 74 napi_enable(&q_vector->napi); in ice_qvec_toggle_napi() 76 napi_disable(&q_vector->napi); in ice_qvec_toggle_napi() 87 struct ice_q_vector *q_vector) in ice_qvec_dis_irq() argument 102 if (q_vector) { in ice_qvec_dis_irq() 103 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0); in ice_qvec_dis_irq() 105 synchronize_irq(q_vector->irq.virq); in ice_qvec_dis_irq() 115 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector) in ice_qvec_cfg_msix() argument 117 u16 reg_idx = q_vector->reg_idx; in ice_qvec_cfg_msix() [all …]
|
H A D | ice_eswitch.c | 151 struct ice_q_vector *q_vector; in ice_eswitch_remap_rings_to_vectors() local 162 q_vector = repr->q_vector; in ice_eswitch_remap_rings_to_vectors() 166 q_vector->vsi = vsi; in ice_eswitch_remap_rings_to_vectors() 167 q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; in ice_eswitch_remap_rings_to_vectors() 169 q_vector->num_ring_tx = 1; in ice_eswitch_remap_rings_to_vectors() 170 q_vector->tx.tx_ring = tx_ring; in ice_eswitch_remap_rings_to_vectors() 171 tx_ring->q_vector = q_vector; in ice_eswitch_remap_rings_to_vectors() 179 q_vector->num_ring_rx = 1; in ice_eswitch_remap_rings_to_vectors() 180 q_vector->rx.rx_ring = rx_ring; in ice_eswitch_remap_rings_to_vectors() 181 rx_ring->q_vector = q_vector; in ice_eswitch_remap_rings_to_vectors() [all …]
|
H A D | ice_repr.c | 287 struct ice_q_vector *q_vector; in ice_repr_add() local 313 q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); in ice_repr_add() 314 if (!q_vector) { in ice_repr_add() 318 repr->q_vector = q_vector; in ice_repr_add() 340 kfree(repr->q_vector); in ice_repr_add() 341 vf->repr->q_vector = NULL; in ice_repr_add() 360 kfree(vf->repr->q_vector); in ice_repr_rem() 361 vf->repr->q_vector = NULL; in ice_repr_rem()
|
H A D | ice_lib.c | 480 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; in ice_msix_clean_ctrl_vsi() local 482 if (!q_vector->tx.tx_ring) in ice_msix_clean_ctrl_vsi() 486 ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET); in ice_msix_clean_ctrl_vsi() 487 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring); in ice_msix_clean_ctrl_vsi() 499 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; in ice_msix_clean_rings() local 501 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) in ice_msix_clean_rings() 504 q_vector->total_events++; in ice_msix_clean_rings() 506 napi_schedule(&q_vector->napi); in ice_msix_clean_rings() 513 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; in ice_eswitch_msix_clean_rings() local 514 struct ice_pf *pf = q_vector->vsi->back; in ice_eswitch_msix_clean_rings() [all …]
|
H A D | ice_txrx.c | 1021 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, in ice_construct_skb() 1315 static void __ice_update_sample(struct ice_q_vector *q_vector, in __ice_update_sample() argument 1348 dim_update_sample(q_vector->total_events, packets, bytes, sample); in __ice_update_sample() 1369 static void ice_net_dim(struct ice_q_vector *q_vector) in ice_net_dim() argument 1371 struct ice_ring_container *tx = &q_vector->tx; in ice_net_dim() 1372 struct ice_ring_container *rx = &q_vector->rx; in ice_net_dim() 1377 __ice_update_sample(q_vector, tx, &dim_sample, true); in ice_net_dim() 1384 __ice_update_sample(q_vector, rx, &dim_sample, false); in ice_net_dim() 1418 static void ice_enable_interrupt(struct ice_q_vector *q_vector) in ice_enable_interrupt() argument 1420 struct ice_vsi *vsi = q_vector->vsi; in ice_enable_interrupt() [all …]
|
H A D | ice_base.h | 20 void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector); 25 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
|
/openbmc/linux/drivers/net/ethernet/intel/fm10k/ |
H A D | fm10k_debugfs.c | 116 struct fm10k_q_vector *q_vector = ring->q_vector; in fm10k_dbg_desc_open() local 120 if (ring < q_vector->rx.ring) in fm10k_dbg_desc_open() 150 void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) in fm10k_dbg_q_vector_init() argument 152 struct fm10k_intfc *interface = q_vector->interface; in fm10k_dbg_q_vector_init() 160 snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx); in fm10k_dbg_q_vector_init() 162 q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc); in fm10k_dbg_q_vector_init() 165 for (i = 0; i < q_vector->tx.count; i++) { in fm10k_dbg_q_vector_init() 166 struct fm10k_ring *ring = &q_vector->tx.ring[i]; in fm10k_dbg_q_vector_init() 171 q_vector->dbg_q_vector, ring, in fm10k_dbg_q_vector_init() 176 for (i = 0; i < q_vector->rx.count; i++) { in fm10k_dbg_q_vector_init() [all …]
|
H A D | fm10k_main.c | 319 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in fm10k_fetch_rx_buffer() 556 static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, in fm10k_receive_skb() argument 559 napi_gro_receive(&q_vector->napi, skb); in fm10k_receive_skb() 562 static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, in fm10k_clean_rx_irq() argument 612 fm10k_receive_skb(q_vector, skb); in fm10k_clean_rx_irq() 628 q_vector->rx.total_packets += total_packets; in fm10k_clean_rx_irq() 629 q_vector->rx.total_bytes += total_bytes; in fm10k_clean_rx_irq() 1115 struct fm10k_intfc *interface = ring->q_vector->interface; in fm10k_get_tx_pending() 1179 static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, in fm10k_clean_tx_irq() argument 1182 struct fm10k_intfc *interface = q_vector->interface; in fm10k_clean_tx_irq() [all …]
|
H A D | fm10k_pci.c | 720 struct fm10k_q_vector *qv = interface->q_vector[i]; in fm10k_check_hang_subtask() 900 if (ring->q_vector) { in fm10k_configure_tx_ring() 901 txint = ring->q_vector->v_idx + NON_Q_VECTORS; in fm10k_configure_tx_ring() 913 ring->q_vector) in fm10k_configure_tx_ring() 915 &ring->q_vector->affinity_mask, in fm10k_configure_tx_ring() 1039 if (ring->q_vector) { in fm10k_configure_rx_ring() 1040 rxint = ring->q_vector->v_idx + NON_Q_VECTORS; in fm10k_configure_rx_ring() 1174 struct fm10k_q_vector *q_vector; in fm10k_napi_enable_all() local 1178 q_vector = interface->q_vector[q_idx]; in fm10k_napi_enable_all() 1179 napi_enable(&q_vector->napi); in fm10k_napi_enable_all() [all …]
|
/openbmc/linux/drivers/net/ethernet/intel/iavf/ |
H A D | iavf_txrx.c | 136 static void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector) in iavf_force_wb() argument 145 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), in iavf_force_wb() 188 iavf_force_wb(vsi, tx_ring->q_vector); in iavf_detect_recover_hung() 307 tx_ring->q_vector->tx.total_bytes += total_bytes; in iavf_clean_tx_irq() 308 tx_ring->q_vector->tx.total_packets += total_packets; in iavf_clean_tx_irq() 355 struct iavf_q_vector *q_vector) in iavf_enable_wb_on_itr() argument 357 u16 flags = q_vector->tx.ring[0].flags; in iavf_enable_wb_on_itr() 363 if (q_vector->arm_wb_state) in iavf_enable_wb_on_itr() 370 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val); in iavf_enable_wb_on_itr() 371 q_vector->arm_wb_state = true; in iavf_enable_wb_on_itr() [all …]
|
H A D | iavf_main.c | 432 struct iavf_q_vector *q_vector = data; in iavf_msix_clean_rings() local 434 if (!q_vector->tx.ring && !q_vector->rx.ring) in iavf_msix_clean_rings() 437 napi_schedule_irqoff(&q_vector->napi); in iavf_msix_clean_rings() 451 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; in iavf_map_vector_to_rxq() local 455 rx_ring->q_vector = q_vector; in iavf_map_vector_to_rxq() 456 rx_ring->next = q_vector->rx.ring; in iavf_map_vector_to_rxq() 458 q_vector->rx.ring = rx_ring; in iavf_map_vector_to_rxq() 459 q_vector->rx.count++; in iavf_map_vector_to_rxq() 460 q_vector->rx.next_update = jiffies + 1; in iavf_map_vector_to_rxq() 461 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); in iavf_map_vector_to_rxq() [all …]
|
/openbmc/linux/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_lib.c | 840 struct ixgbe_q_vector *q_vector; in ixgbe_alloc_q_vector() local 858 q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count), in ixgbe_alloc_q_vector() 860 if (!q_vector) in ixgbe_alloc_q_vector() 861 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), in ixgbe_alloc_q_vector() 863 if (!q_vector) in ixgbe_alloc_q_vector() 868 cpumask_set_cpu(cpu, &q_vector->affinity_mask); in ixgbe_alloc_q_vector() 869 q_vector->numa_node = node; in ixgbe_alloc_q_vector() 873 q_vector->cpu = -1; in ixgbe_alloc_q_vector() 877 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll); in ixgbe_alloc_q_vector() 880 adapter->q_vector[v_idx] = q_vector; in ixgbe_alloc_q_vector() [all …]
|
H A D | ixgbe_xsk.c | 223 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, in ixgbe_construct_skb_zc() 248 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, in ixgbe_clean_rx_irq_zc() argument 253 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_rx_irq_zc() 350 ixgbe_rx_skb(q_vector, skb); in ixgbe_clean_rx_irq_zc() 366 q_vector->rx.total_packets += total_rx_packets; in ixgbe_clean_rx_irq_zc() 367 q_vector->rx.total_bytes += total_rx_bytes; in ixgbe_clean_rx_irq_zc() 461 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, in ixgbe_clean_xdp_tx_irq() argument 507 q_vector->tx.total_bytes += total_bytes; in ixgbe_clean_xdp_tx_irq() 508 q_vector->tx.total_packets += total_packets; in ixgbe_clean_xdp_tx_irq() 516 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); in ixgbe_clean_xdp_tx_irq() [all …]
|
H A D | ixgbe_txrx_common.h | 24 void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, 40 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, 44 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
|
H A D | ixgbe_main.c | 1114 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, in ixgbe_clean_tx_irq() argument 1117 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_tx_irq() 1121 unsigned int budget = q_vector->tx.work_limit; in ixgbe_clean_tx_irq() 1214 q_vector->tx.total_bytes += total_bytes; in ixgbe_clean_tx_irq() 1215 q_vector->tx.total_packets += total_packets; in ixgbe_clean_tx_irq() 1336 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) in ixgbe_update_dca() argument 1338 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_update_dca() 1342 if (q_vector->cpu == cpu) in ixgbe_update_dca() 1345 ixgbe_for_each_ring(ring, q_vector->tx) in ixgbe_update_dca() 1348 ixgbe_for_each_ring(ring, q_vector->rx) in ixgbe_update_dca() [all …]
|
/openbmc/linux/drivers/net/ethernet/wangxun/libwx/ |
H A D | wx_lib.c | 259 skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256); in wx_build_skb() 581 ethertype = ring->q_vector->wx->tpid[idx]; in wx_rx_vlan() 621 static int wx_clean_rx_irq(struct wx_q_vector *q_vector, in wx_clean_rx_irq() argument 676 napi_gro_receive(&q_vector->napi, skb); in wx_clean_rx_irq() 686 q_vector->rx.total_packets += total_rx_packets; in wx_clean_rx_irq() 687 q_vector->rx.total_bytes += total_rx_bytes; in wx_clean_rx_irq() 703 static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, in wx_clean_tx_irq() argument 706 unsigned int budget = q_vector->wx->tx_work_limit; in wx_clean_tx_irq() 796 q_vector->tx.total_bytes += total_bytes; in wx_clean_tx_irq() 797 q_vector->tx.total_packets += total_packets; in wx_clean_tx_irq() [all …]
|
/openbmc/linux/drivers/net/ethernet/intel/igc/ |
H A D | igc_main.c | 321 struct igc_hw *hw = &ring->q_vector->adapter->hw; in igc_disable_tx_ring_hw() 545 rx_ring->q_vector->napi.napi_id); in igc_setup_rx_resources() 1977 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in igc_construct_skb() 2165 struct igc_adapter *adapter = rx_ring->q_vector->adapter; in igc_rx_offset() 2559 static void igc_update_rx_stats(struct igc_q_vector *q_vector, in igc_update_rx_stats() argument 2562 struct igc_ring *ring = q_vector->rx.ring; in igc_update_rx_stats() 2569 q_vector->rx.total_packets += packets; in igc_update_rx_stats() 2570 q_vector->rx.total_bytes += bytes; in igc_update_rx_stats() 2573 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) in igc_clean_rx_irq() argument 2576 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_rx_irq() [all …]
|
/openbmc/linux/drivers/net/ethernet/intel/ixgbevf/ |
H A D | ixgbevf_main.c | 109 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 264 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, in ixgbevf_clean_tx_irq() argument 267 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_tx_irq() 363 q_vector->tx.total_bytes += total_bytes; in ixgbevf_clean_tx_irq() 364 q_vector->tx.total_packets += total_packets; in ixgbevf_clean_tx_irq() 429 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, in ixgbevf_rx_skb() argument 432 napi_gro_receive(&q_vector->napi, skb); in ixgbevf_rx_skb() 883 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); in ixgbevf_construct_skb() 1116 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, in ixgbevf_clean_rx_irq() argument 1121 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_rx_irq() [all …]
|
H A D | ethtool.c | 803 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) in ixgbevf_get_coalesce() 821 struct ixgbevf_q_vector *q_vector; in ixgbevf_set_coalesce() local 826 if (adapter->q_vector[0]->tx.count && in ixgbevf_set_coalesce() 827 adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) in ixgbevf_set_coalesce() 857 q_vector = adapter->q_vector[i]; in ixgbevf_set_coalesce() 858 if (q_vector->tx.count && !q_vector->rx.count) in ixgbevf_set_coalesce() 860 q_vector->itr = tx_itr_param; in ixgbevf_set_coalesce() 863 q_vector->itr = rx_itr_param; in ixgbevf_set_coalesce() 864 ixgbevf_write_eitr(q_vector); in ixgbevf_set_coalesce()
|
/openbmc/linux/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_txrx.c | 906 i40e_force_wb(vsi, tx_ring->q_vector); in i40e_detect_recover_hung() 1063 struct i40e_q_vector *q_vector) in i40e_enable_wb_on_itr() argument 1065 u16 flags = q_vector->tx.ring[0].flags; in i40e_enable_wb_on_itr() 1071 if (q_vector->arm_wb_state) in i40e_enable_wb_on_itr() 1079 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), in i40e_enable_wb_on_itr() 1087 q_vector->arm_wb_state = true; in i40e_enable_wb_on_itr() 1096 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) in i40e_force_wb() argument 1106 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); in i40e_force_wb() 1118 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, in i40e_container_is_rx() argument 1121 return &q_vector->rx == rc; in i40e_container_is_rx() [all …]
|
/openbmc/linux/drivers/net/ethernet/intel/igb/ |
H A D | igb_main.c | 784 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) in igb_assign_vector() argument 786 struct igb_adapter *adapter = q_vector->adapter; in igb_assign_vector() 792 if (q_vector->rx.ring) in igb_assign_vector() 793 rx_queue = q_vector->rx.ring->reg_idx; in igb_assign_vector() 794 if (q_vector->tx.ring) in igb_assign_vector() 795 tx_queue = q_vector->tx.ring->reg_idx; in igb_assign_vector() 811 q_vector->eims_value = msixbm; in igb_assign_vector() 827 q_vector->eims_value = BIT(msix_vector); in igb_assign_vector() 848 q_vector->eims_value = BIT(msix_vector); in igb_assign_vector() 856 adapter->eims_enable_mask |= q_vector->eims_value; in igb_assign_vector() [all …]
|
/openbmc/linux/drivers/net/ethernet/wangxun/txgbe/ |
H A D | txgbe_main.c | 101 struct wx_q_vector *q_vector; in txgbe_intr() local 106 q_vector = wx->q_vector[0]; in txgbe_intr() 124 napi_schedule_irqoff(&q_vector->napi); in txgbe_intr() 147 struct wx_q_vector *q_vector = wx->q_vector[vector]; in txgbe_request_msix_irqs() local 150 if (q_vector->tx.ring && q_vector->rx.ring) in txgbe_request_msix_irqs() 151 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in txgbe_request_msix_irqs() 158 q_vector->name, q_vector); in txgbe_request_msix_irqs() 161 q_vector->name, err); in txgbe_request_msix_irqs() 172 wx->q_vector[vector]); in txgbe_request_msix_irqs()
|
/openbmc/linux/drivers/net/ethernet/wangxun/ngbe/ |
H A D | ngbe_main.c | 188 struct wx_q_vector *q_vector; in ngbe_intr() local 193 q_vector = wx->q_vector[0]; in ngbe_intr() 211 napi_schedule_irqoff(&q_vector->napi); in ngbe_intr() 243 struct wx_q_vector *q_vector = wx->q_vector[vector]; in ngbe_request_msix_irqs() local 246 if (q_vector->tx.ring && q_vector->rx.ring) in ngbe_request_msix_irqs() 247 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ngbe_request_msix_irqs() 254 q_vector->name, q_vector); in ngbe_request_msix_irqs() 257 q_vector->name, err); in ngbe_request_msix_irqs() 276 wx->q_vector[vector]); in ngbe_request_msix_irqs()
|