Home
last modified time | relevance | path

Searched refs:q_idx (Results 1 – 25 of 58) sorted by relevance

123

/openbmc/linux/drivers/infiniband/hw/hfi1/
H A Dvnic_main.c124 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_tx_counters() argument
127 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_tx_counters()
153 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_rx_counters() argument
156 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_rx_counters()
205 u8 q_idx) in hfi1_vnic_maybe_stop_tx() argument
207 netif_stop_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx()
208 if (!hfi1_vnic_sdma_write_avail(vinfo, q_idx)) in hfi1_vnic_maybe_stop_tx()
211 netif_start_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx()
218 u8 pad_len, q_idx = skb->queue_mapping; in hfi1_netdev_start_xmit() local
225 v_dbg("xmit: queue %d skb len %d\n", q_idx, skb->len); in hfi1_netdev_start_xmit()
[all …]
H A Dvnic_sdma.c126 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, in hfi1_vnic_send_dma() argument
130 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_send_dma()
224 if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx)) in hfi1_vnic_sdma_wakeup()
225 netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx); in hfi1_vnic_sdma_wakeup()
229 u8 q_idx) in hfi1_vnic_sdma_write_avail() argument
231 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_sdma_write_avail()
249 vnic_sdma->q_idx = i; in hfi1_vnic_sdma_init()
H A Dvnic.h49 u8 q_idx; member
113 u8 q_idx);
122 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
H A Dipoib_tx.c57 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq()
64 netif_wake_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_wake_txq()
125 le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx, in hfi1_ipoib_free_tx()
723 txq->q_idx = i; in hfi1_ipoib_txreq_init()
789 txq->q_idx, in hfi1_ipoib_drain_tx_list()
848 __netif_subqueue_stopped(dev, txq->q_idx), in hfi1_ipoib_tx_timeout()
H A Dipoib.h114 u8 q_idx; member
/openbmc/linux/drivers/net/ethernet/intel/ice/
H A Dice_xsk.c25 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) in ice_qp_reset_stats() argument
38 memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0, in ice_qp_reset_stats()
39 sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats)); in ice_qp_reset_stats()
40 memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0, in ice_qp_reset_stats()
41 sizeof(vsi_stat->tx_ring_stats[q_idx]->stats)); in ice_qp_reset_stats()
43 memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0, in ice_qp_reset_stats()
44 sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats)); in ice_qp_reset_stats()
52 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) in ice_qp_clean_rings() argument
54 ice_clean_tx_ring(vsi->tx_rings[q_idx]); in ice_qp_clean_rings()
56 ice_clean_tx_ring(vsi->xdp_rings[q_idx]); in ice_qp_clean_rings()
[all …]
H A Dice_lib.h57 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
59 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
H A Dice_lib.c1820 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) in ice_vsi_cfg_single_rxq() argument
1822 if (q_idx >= vsi->num_rxq) in ice_vsi_cfg_single_rxq()
1825 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); in ice_vsi_cfg_single_rxq()
1828 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx) in ice_vsi_cfg_single_txq() argument
1833 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) in ice_vsi_cfg_single_txq()
1842 err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); in ice_vsi_cfg_single_txq()
1887 u16 q_idx = 0; in ice_vsi_cfg_txqs() local
1896 for (q_idx = 0; q_idx < coun in ice_vsi_cfg_txqs()
2124 u16 q_idx; ice_vsi_stop_tx_rings() local
[all...]
H A Dice_virtchnl.c1175 * @q_idx: VF queue index used to determine the queue in the PF's space
1177 static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) in ice_vf_ena_txq_interrupt() argument
1180 u32 pfq = vsi->txq_map[q_idx]; in ice_vf_ena_txq_interrupt()
1198 * @q_idx: VF queue index used to determine the queue in the PF's space
1200 static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) in ice_vf_ena_rxq_interrupt() argument
1203 u32 pfq = vsi->rxq_map[q_idx]; in ice_vf_ena_rxq_interrupt()
1593 int i = -1, q_idx; in ice_vc_cfg_qs_msg() local
1637 q_idx = qpi->rxq.queue_id; in ice_vc_cfg_qs_msg()
1639 /* make sure selected "q_idx" is in valid range of queues in ice_vc_cfg_qs_msg()
1642 if (q_idx > in ice_vc_cfg_qs_msg()
[all...]
/openbmc/linux/drivers/accel/habanalabs/common/
H A Dhw_queue.c409 u32 q_idx; in init_signal_cs() local
412 q_idx = job->hw_queue_id; in init_signal_cs()
413 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in init_signal_cs()
421 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx, in init_signal_cs()
430 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1, in init_signal_cs()
469 u32 q_idx; in init_wait_cs() local
471 q_idx = job->hw_queue_id; in init_wait_cs()
472 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in init_wait_cs()
486 cs->encaps_sig_hdl->q_idx, in init_wait_cs()
522 prop->base_mon_id, q_idx, cs->sequence); in init_wait_cs()
[all …]
H A Dcommand_submission.c138 hw_sob->q_idx, hw_sob->sob_id); in hl_sob_reset_error()
1785 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx, in hl_cs_signal_sob_wraparound_handler() argument
1793 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in hl_cs_signal_sob_wraparound_handler()
1816 q_idx); in hl_cs_signal_sob_wraparound_handler()
1857 prop->curr_sob_offset, q_idx); in hl_cs_signal_sob_wraparound_handler()
1925 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset) in cs_ioctl_signal_wait_create_jobs() argument
1960 job->hw_queue_id = q_idx; in cs_ioctl_signal_wait_create_jobs()
1989 u32 q_idx, u32 count, in cs_ioctl_reserve_signals() argument
2009 if (q_idx >= hdev->asic_prop.max_queues) { in cs_ioctl_reserve_signals()
2011 q_idx); in cs_ioctl_reserve_signals()
[all …]
/openbmc/linux/drivers/net/ethernet/microsoft/mana/
H A Dmana_bpf.c36 u16 q_idx) in mana_xdp_xmit_fm() argument
44 skb_set_queue_mapping(skb, q_idx); in mana_xdp_xmit_fm()
57 u16 q_idx; in mana_xdp_xmit() local
62 q_idx = smp_processor_id() % ndev->real_num_tx_queues; in mana_xdp_xmit()
65 if (mana_xdp_xmit_fm(ndev, frames[i], q_idx)) in mana_xdp_xmit()
71 tx_stats = &apc->tx_qp[q_idx].txq.stats; in mana_xdp_xmit()
/openbmc/linux/drivers/net/hyperv/
H A Dnetvsc_bpf.c230 struct xdp_frame *frame, u16 q_idx) in netvsc_ndoxdp_xmit_fm() argument
240 skb_record_rx_queue(skb, q_idx); in netvsc_ndoxdp_xmit_fm()
256 u16 q_idx; in netvsc_ndoxdp_xmit() local
276 q_idx = smp_processor_id() % ndev->real_num_tx_queues; in netvsc_ndoxdp_xmit()
279 if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx)) in netvsc_ndoxdp_xmit()
285 tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats; in netvsc_ndoxdp_xmit()
H A Dnetvsc.c321 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) in netvsc_alloc_recv_comp_ring() argument
323 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; in netvsc_alloc_recv_comp_ring()
760 u16 q_idx = 0; in netvsc_send_tx_complete() local
781 q_idx = packet->q_idx; in netvsc_send_tx_complete()
783 tx_stats = &net_device->chan_table[q_idx].tx_stats; in netvsc_send_tx_complete()
795 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); in netvsc_send_tx_complete()
801 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); in netvsc_send_tx_complete()
1061 &net_device->chan_table[packet->q_idx]; in netvsc_send_pkt()
1065 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); in netvsc_send_pkt()
1202 nvchan = &net_device->chan_table[packet->q_idx]; in netvsc_send()
[all …]
H A Dnetvsc_drv.c254 int q_idx; in netvsc_get_tx_queue() local
256 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & in netvsc_get_tx_queue()
260 if (q_idx != old_idx && in netvsc_get_tx_queue()
262 sk_tx_queue_set(sk, q_idx); in netvsc_get_tx_queue()
264 return q_idx; in netvsc_get_tx_queue()
280 int q_idx = sk_tx_queue_get(skb->sk); in netvsc_pick_tx() local
282 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { in netvsc_pick_tx()
287 q_idx = skb_get_rx_queue(skb); in netvsc_pick_tx()
289 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); in netvsc_pick_tx()
292 return q_idx; in netvsc_pick_tx()
[all …]
/openbmc/linux/drivers/net/ethernet/cavium/thunder/
H A Dnicvf_queues.h354 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
355 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
356 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
357 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
H A Dnicvf_queues.c1723 static u64 nicvf_int_type_to_mask(int int_type, int q_idx) in nicvf_int_type_to_mask() argument
1729 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); in nicvf_int_type_to_mask()
1732 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); in nicvf_int_type_to_mask()
1735 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); in nicvf_int_type_to_mask()
1757 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_enable_intr() argument
1759 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_enable_intr()
1771 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_disable_intr() argument
1773 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_disable_intr()
1785 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_clear_intr() argument
1787 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_clear_intr()
[all …]
/openbmc/linux/drivers/net/ethernet/intel/fm10k/
H A Dfm10k_pf.c503 u16 vsi, queue, pc, q_idx; in fm10k_configure_dglort_map_pf() local
520 q_idx = dglort->queue_b; in fm10k_configure_dglort_map_pf()
524 for (queue = 0; queue < queue_count; queue++, q_idx++) { in fm10k_configure_dglort_map_pf()
525 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf()
528 fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf()
529 fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf()
539 q_idx = pc + dglort->queue_b; in fm10k_configure_dglort_map_pf()
541 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf()
544 txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx)); in fm10k_configure_dglort_map_pf()
547 fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl); in fm10k_configure_dglort_map_pf()
[all …]
H A Dfm10k_pci.c1175 int q_idx; in fm10k_napi_enable_all() local
1177 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { in fm10k_napi_enable_all()
1178 q_vector = interface->q_vector[q_idx]; in fm10k_napi_enable_all()
1872 int q_idx; in fm10k_napi_disable_all() local
1874 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { in fm10k_napi_disable_all()
1875 q_vector = interface->q_vector[q_idx]; in fm10k_napi_disable_all()
/openbmc/linux/net/sched/
H A Dsch_api.c1783 int ret = 0, q_idx = *q_idx_p; in tc_dump_qdisc_root() local
1791 if (q_idx < s_q_idx) { in tc_dump_qdisc_root()
1792 q_idx++; in tc_dump_qdisc_root()
1799 q_idx++; in tc_dump_qdisc_root()
1812 if (q_idx < s_q_idx) { in tc_dump_qdisc_root()
1813 q_idx++; in tc_dump_qdisc_root()
1821 q_idx++; in tc_dump_qdisc_root()
1825 *q_idx_p = q_idx; in tc_dump_qdisc_root()
1835 int idx, q_idx; in tc_dump_qdisc() local
1843 s_q_idx = q_idx = cb->args[1]; in tc_dump_qdisc()
[all …]
/openbmc/linux/drivers/net/ethernet/intel/iavf/
H A Diavf_main.c1218 int q_idx; in iavf_napi_enable_all() local
1222 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_enable_all()
1225 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_enable_all()
1237 int q_idx; in iavf_napi_disable_all() local
1241 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_disable_all()
1242 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_disable_all()
1834 int q_idx = 0, num_q_vectors; in iavf_alloc_q_vectors() local
1843 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { in iavf_alloc_q_vectors()
1844 q_vector = &adapter->q_vectors[q_idx]; in iavf_alloc_q_vectors()
1847 q_vector->v_idx = q_idx; in iavf_alloc_q_vectors()
[all …]
/openbmc/linux/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/
H A Dtrx.c534 u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) in rtl92ee_get_available_desc() argument
542 get_desc_addr_fr_q_idx(q_idx)); in rtl92ee_get_available_desc()
889 u8 q_idx = *val; in rtl92ee_set_desc() local
900 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx]; in rtl92ee_set_desc()
903 if (q_idx == BEACON_QUEUE) { in rtl92ee_set_desc()
914 get_desc_addr_fr_q_idx(q_idx), in rtl92ee_set_desc()
/openbmc/linux/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_device_queue_manager.c2871 int q_idx = QUEUE_NOT_FOUND; in resume_queues() local
2874 q_idx = q_array_get_index( in resume_queues()
2879 if (!queue_ids || q_idx != QUEUE_NOT_FOUND) { in resume_queues()
2884 queue_ids[q_idx] &= in resume_queues()
2887 queue_ids[q_idx] |= in resume_queues()
2916 int q_idx = q_array_get_index( in resume_queues() local
2922 if (q_idx != QUEUE_NOT_FOUND) in resume_queues()
2923 queue_ids[q_idx] |= in resume_queues()
2974 int q_idx = q_array_get_index(q->properties.queue_id, in suspend_queues() local
2978 if (q_idx != QUEUE_NOT_FOUND) { in suspend_queues()
[all …]
/openbmc/linux/drivers/scsi/mpi3mr/
H A Dmpi3mr_fw.c1588 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) in mpi3mr_free_op_req_q_segments() argument
1594 segments = mrioc->req_qinfo[q_idx].q_segments; in mpi3mr_free_op_req_q_segments()
1600 if (mrioc->req_qinfo[q_idx].q_segment_list) { in mpi3mr_free_op_req_q_segments()
1603 mrioc->req_qinfo[q_idx].q_segment_list, in mpi3mr_free_op_req_q_segments()
1604 mrioc->req_qinfo[q_idx].q_segment_list_dma); in mpi3mr_free_op_req_q_segments()
1605 mrioc->req_qinfo[q_idx].q_segment_list = NULL; in mpi3mr_free_op_req_q_segments()
1608 size = mrioc->req_qinfo[q_idx].segment_qd * in mpi3mr_free_op_req_q_segments()
1611 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { in mpi3mr_free_op_req_q_segments()
1618 kfree(mrioc->req_qinfo[q_idx].q_segments); in mpi3mr_free_op_req_q_segments()
1619 mrioc->req_qinfo[q_idx].q_segments = NULL; in mpi3mr_free_op_req_q_segments()
[all …]
/openbmc/linux/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_tx.c576 int i, q_idx; in fun_xdp_xmit_frames() local
585 q_idx = smp_processor_id(); in fun_xdp_xmit_frames()
586 if (unlikely(q_idx >= fp->num_xdpqs)) in fun_xdp_xmit_frames()
589 for (q = xdpqs[q_idx], i = 0; i < n; i++) in fun_xdp_xmit_frames()

123