Home
last modified time | relevance | path

Searched refs:qidx (Results 1 – 25 of 72) sorted by relevance

123

/openbmc/linux/drivers/net/ethernet/cavium/thunder/
H A Dnicvf_queues.c32 static int nicvf_poll_reg(struct nicvf *nic, int qidx, in nicvf_poll_reg() argument
43 reg_val = nicvf_queue_reg_read(nic, reg, qidx); in nicvf_poll_reg()
505 struct snd_queue *sq, int q_len, int qidx) in nicvf_init_snd_queue() argument
525 qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS); in nicvf_init_snd_queue()
526 if (qidx < nic->pnicvf->xdp_tx_queues) { in nicvf_init_snd_queue()
628 struct queue_set *qs, int qidx) in nicvf_reclaim_snd_queue() argument
631 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); in nicvf_reclaim_snd_queue()
633 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) in nicvf_reclaim_snd_queue()
636 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); in nicvf_reclaim_snd_queue()
640 struct queue_set *qs, int qidx) in nicvf_reclaim_rcv_queue() argument
[all …]
H A Dnicvf_main.c75 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) in nicvf_netdev_qidx() argument
78 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS); in nicvf_netdev_qidx()
80 return qidx; in nicvf_netdev_qidx()
104 u64 qidx, u64 val) in nicvf_queue_reg_write() argument
108 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT)); in nicvf_queue_reg_write()
111 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) in nicvf_queue_reg_read() argument
115 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT)); in nicvf_queue_reg_read()
989 int qidx; in nicvf_handle_qs_err() local
995 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { in nicvf_handle_qs_err()
997 qidx); in nicvf_handle_qs_err()
[all …]
H A Dnicvf_ethtool.c214 int stats, qidx; in nicvf_get_qset_strings() local
217 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_strings()
219 sprintf(*data, "rxq%d: %s", qidx + start_qidx, in nicvf_get_qset_strings()
225 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_strings()
227 sprintf(*data, "txq%d: %s", qidx + start_qidx, in nicvf_get_qset_strings()
301 int stat, qidx; in nicvf_get_qset_stats() local
306 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_stats()
307 nicvf_update_rq_stats(nic, qidx); in nicvf_get_qset_stats()
309 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) in nicvf_get_qset_stats()
313 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_stats()
[all …]
H A Dnicvf_queues.h336 int qidx, bool enable);
338 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
339 void nicvf_sq_disable(struct nicvf *nic, int qidx);
342 struct snd_queue *sq, int qidx);
365 u64 qidx, u64 val);
367 u64 offset, u64 qidx);
/openbmc/linux/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dqos_sq.c33 static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx) in otx2_qos_sq_aura_pool_init() argument
56 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); in otx2_qos_sq_aura_pool_init()
76 sq = &qset->sq[qidx]; in otx2_qos_sq_aura_pool_init()
116 static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx) in otx2_qos_sq_free_sqbs() argument
124 sq = &qset->sq[qidx]; in otx2_qos_sq_free_sqbs()
140 sq = &qset->sq[qidx]; in otx2_qos_sq_free_sqbs()
151 static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx) in otx2_qos_sqb_flush() argument
157 incr = (u64)qidx << 32; in otx2_qos_sqb_flush()
165 static int otx2_qos_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id) in otx2_qos_ctx_disable() argument
176 cn10k_sq_aq->qidx = qidx; in otx2_qos_ctx_disable()
[all …]
H A Dotx2_common.c20 struct otx2_nic *pfvf, int qidx) in otx2_nix_rq_op_stats() argument
22 u64 incr = (u64)qidx << 32; in otx2_nix_rq_op_stats()
33 struct otx2_nic *pfvf, int qidx) in otx2_nix_sq_op_stats() argument
35 u64 incr = (u64)qidx << 32; in otx2_nix_sq_op_stats()
76 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) in otx2_update_rq_stats() argument
78 struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx]; in otx2_update_rq_stats()
83 otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); in otx2_update_rq_stats()
87 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) in otx2_update_sq_stats() argument
89 struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; in otx2_update_sq_stats()
94 if (qidx >= pfvf->hw.non_qos_queues) { in otx2_update_sq_stats()
[all …]
H A Dcn10k.c75 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) in cn10k_sq_aq_init() argument
85 aq->sq.cq = pfvf->hw.rx_queues + qidx; in cn10k_sq_aq_init()
89 aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); in cn10k_sq_aq_init()
102 aq->qidx = qidx; in cn10k_sq_aq_init()
138 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) in cn10k_sqe_flush() argument
326 aq->qidx = rq_idx; in cn10k_map_unmap_rq_policer()
350 int qidx, rc; in cn10k_free_matchall_ipolicer() local
355 for (qidx = 0; qidx < hw->rx_queues; qidx++) in cn10k_free_matchall_ipolicer()
356 cn10k_map_unmap_rq_policer(pfvf, qidx, in cn10k_free_matchall_ipolicer()
462 aq->qidx = profile; in cn10k_set_ipolicer_rate()
[all …]
H A Dotx2_txrx.c196 int qidx) in otx2_skb_add_frag() argument
227 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL); in otx2_skb_add_frag()
255 int qidx) in otx2_free_rcv_seg() argument
268 pfvf->hw_ops->aura_freeptr(pfvf, qidx, in otx2_free_rcv_seg()
275 struct nix_cqe_rx_s *cqe, int qidx) in otx2_check_rcv_errors() argument
283 qidx, parse->errlev, parse->errcode); in otx2_check_rcv_errors()
334 otx2_free_rcv_seg(pfvf, cqe, qidx); in otx2_check_rcv_errors()
450 int tx_pkts = 0, tx_bytes = 0, qidx; in otx2_tx_napi_handler() local
462 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_tx_napi_handler()
463 sq = &pfvf->qset.sq[qidx]; in otx2_tx_napi_handler()
[all …]
H A Dotx2_pf.c1275 u64 qidx = 0; in otx2_q_intr_handler() local
1278 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { in otx2_q_intr_handler()
1280 val = otx2_atomic64_add((qidx << 44), ptr); in otx2_q_intr_handler()
1282 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) | in otx2_q_intr_handler()
1290 qidx, otx2_read64(pf, NIX_LF_ERR_INT)); in otx2_q_intr_handler()
1294 qidx); in otx2_q_intr_handler()
1298 qidx); in otx2_q_intr_handler()
1305 for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) { in otx2_q_intr_handler()
1309 sq = &pf->qset.sq[qidx]; in otx2_q_intr_handler()
1319 val = otx2_atomic64_add((qidx << 44), ptr); in otx2_q_intr_handler()
[all …]
H A Dotx2_dcbnl.c161 cn10k_sq_aq->qidx = prio; in otx2_pfc_update_sq_smq_mapping()
176 sq_aq->qidx = prio; in otx2_pfc_update_sq_smq_mapping()
335 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, in otx2_update_bpid_in_rqctx() argument
343 if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) { in otx2_update_bpid_in_rqctx()
346 pfvf->queue_to_pfc_map[qidx], qidx); in otx2_update_bpid_in_rqctx()
355 pfvf->queue_to_pfc_map[qidx] = vlan_prio; in otx2_update_bpid_in_rqctx()
367 aq->qidx = qidx; in otx2_update_bpid_in_rqctx()
382 npa_aq->aura_id = qidx; in otx2_update_bpid_in_rqctx()
396 qidx, err); in otx2_update_bpid_in_rqctx()
H A Dotx2_ethtool.c86 int qidx, stats; in otx2_get_qset_strings() local
88 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { in otx2_get_qset_strings()
90 sprintf(*data, "rxq%d: %s", qidx + start_qidx, in otx2_get_qset_strings()
96 for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { in otx2_get_qset_strings()
98 if (qidx >= pfvf->hw.non_qos_queues) in otx2_get_qset_strings()
100 qidx + start_qidx - pfvf->hw.non_qos_queues, in otx2_get_qset_strings()
103 sprintf(*data, "txq%d: %s", qidx + start_qidx, in otx2_get_qset_strings()
153 int stat, qidx; in otx2_get_qset_stats() local
157 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { in otx2_get_qset_stats()
158 if (!otx2_update_rq_stats(pfvf, qidx)) { in otx2_get_qset_stats()
[all …]
H A Dqos.h27 void otx2_qos_free_qid(struct otx2_nic *pfvf, int qidx);
28 int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx);
29 void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx);
H A Dotx2_common.h371 int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
373 int size, int qidx);
913 static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) in otx2_get_smq_idx() argument
917 if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) in otx2_get_smq_idx()
918 return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; in otx2_get_smq_idx()
921 if (qidx >= pfvf->hw.non_qos_queues) in otx2_get_smq_idx()
922 smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; in otx2_get_smq_idx()
961 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
988 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
990 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
[all …]
H A Dotx2_txrx.h168 struct sk_buff *skb, u16 qidx);
170 int size, int qidx);
172 int size, int qidx);
H A Dcn10k.h28 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
29 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
/openbmc/linux/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_trace.h23 __field(u32, qidx)
31 __entry->qidx = txq->qidx;
39 __get_str(devname), __entry->qidx, __entry->sqe_idx,
53 __field(u32, qidx)
61 __entry->qidx = txq->qidx;
69 __get_str(devname), __entry->qidx, __entry->sqe_idx,
84 __field(u32, qidx)
94 __entry->qidx = rxq->qidx;
104 __get_str(devname), __entry->qidx, __entry->cq_head,
H A Dfuneth_tx.c624 unsigned int qidx, in fun_txq_create_sw() argument
635 numa_node = cpu_to_node(qidx); /* XDP Tx queue */ in fun_txq_create_sw()
651 q->qidx = qidx; in fun_txq_create_sw()
661 irq ? "Tx" : "XDP", qidx); in fun_txq_create_sw()
709 q->ndq = netdev_get_tx_queue(q->netdev, q->qidx); in fun_txq_create_dev()
718 irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx, in fun_txq_create_dev()
727 irq ? "Tx" : "XDP", q->qidx, err); in fun_txq_create_dev()
740 q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid, in fun_txq_free_dev()
759 int funeth_txq_create(struct net_device *dev, unsigned int qidx, in funeth_txq_create() argument
767 q = fun_txq_create_sw(dev, qidx, ndesc, irq); in funeth_txq_create()
H A Dfuneth_rx.c432 skb_record_rx_queue(skb, q->qidx); in fun_handle_cqe_pkt()
614 unsigned int qidx, in fun_rxq_create_sw() argument
629 q->qidx = qidx; in fun_rxq_create_sw()
673 netdev_err(dev, "Unable to allocate memory for Rx queue %u\n", qidx); in fun_rxq_create_sw()
704 err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx, in fun_rxq_create_dev()
750 q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx, in fun_rxq_create_dev()
761 q->qidx, err); in fun_rxq_create_dev()
776 q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx); in fun_rxq_free_dev()
788 int funeth_rxq_create(struct net_device *dev, unsigned int qidx, in funeth_rxq_create() argument
796 q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq); in funeth_rxq_create()
H A Dfuneth_txrx.h117 u16 qidx; /* queue index within net_device */ member
173 u16 qidx; /* queue index within net_device */ member
254 int funeth_txq_create(struct net_device *dev, unsigned int qidx,
259 int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
/openbmc/qemu/tests/
H A Dvhost-user-bridge.c173 vubr_handle_tx(VuDev *dev, int qidx) in vubr_handle_tx() argument
175 VuVirtq *vq = vu_get_queue(dev, qidx); in vubr_handle_tx()
180 assert(qidx % 2); in vubr_handle_tx()
462 vubr_queue_set_started(VuDev *dev, int qidx, bool started) in vubr_queue_set_started() argument
465 VuVirtq *vq = vu_get_queue(dev, qidx); in vubr_queue_set_started()
470 qidx * qemu_real_host_page_size()); in vubr_queue_set_started()
473 if (qidx % 2 == 1) { in vubr_queue_set_started()
490 vubr_queue_is_processed_in_order(VuDev *dev, int qidx) in vubr_queue_is_processed_in_order() argument
603 int qidx; in notifier_thread() local
606 for (qidx = 0; qidx < VHOST_USER_BRIDGE_MAX_QUEUES; qidx++) { in notifier_thread()
[all …]
/openbmc/linux/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/
H A Dchcr_ipsec.c424 u32 qidx; in copy_esn_pktxt() local
432 qidx = skb->queue_mapping; in copy_esn_pktxt()
433 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_esn_pktxt()
474 u32 ctrl0, qidx; in copy_cpltx_pktxt() local
480 qidx = skb->queue_mapping; in copy_cpltx_pktxt()
481 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_cpltx_pktxt()
519 unsigned int qidx; in copy_key_cpltx_pktxt() local
523 qidx = skb->queue_mapping; in copy_key_cpltx_pktxt()
524 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_key_cpltx_pktxt()
579 int qidx = skb_get_queue_mapping(skb); in ch_ipsec_crypto_wreq() local
[all …]
/openbmc/qemu/hw/net/
H A Dvmxnet3.c387 vmxnet3_inc_tx_consumption_counter(VMXNET3State *s, int qidx) in vmxnet3_inc_tx_consumption_counter() argument
389 vmxnet3_ring_inc(&s->txq_descr[qidx].tx_ring); in vmxnet3_inc_tx_consumption_counter()
393 vmxnet3_inc_rx_consumption_counter(VMXNET3State *s, int qidx, int ridx) in vmxnet3_inc_rx_consumption_counter() argument
395 vmxnet3_ring_inc(&s->rxq_descr[qidx].rx_ring[ridx]); in vmxnet3_inc_rx_consumption_counter()
399 vmxnet3_inc_tx_completion_counter(VMXNET3State *s, int qidx) in vmxnet3_inc_tx_completion_counter() argument
401 vmxnet3_ring_inc(&s->txq_descr[qidx].comp_ring); in vmxnet3_inc_tx_completion_counter()
405 vmxnet3_inc_rx_completion_counter(VMXNET3State *s, int qidx) in vmxnet3_inc_rx_completion_counter() argument
407 vmxnet3_ring_inc(&s->rxq_descr[qidx].comp_ring); in vmxnet3_inc_rx_completion_counter()
411 vmxnet3_dec_rx_completion_counter(VMXNET3State *s, int qidx) in vmxnet3_dec_rx_completion_counter() argument
413 vmxnet3_ring_dec(&s->rxq_descr[qidx].comp_ring); in vmxnet3_dec_rx_completion_counter()
[all …]
/openbmc/linux/drivers/dma/ptdma/
H A Dptdma-dev.c72 u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx]; in pt_core_execute_cmd()
84 cmd_q->qidx = (cmd_q->qidx + 1) % CMD_Q_LEN; in pt_core_execute_cmd()
90 tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); in pt_core_execute_cmd()
136 tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); in pt_do_cmd_complete()
216 cmd_q->qidx = 0; in pt_core_init()
/openbmc/qemu/contrib/vhost-user-input/
H A Dmain.c137 static void vi_handle_sts(VuDev *dev, int qidx) in vi_handle_sts() argument
140 VuVirtq *vq = vu_get_queue(dev, qidx); in vi_handle_sts()
172 vi_queue_set_started(VuDev *dev, int qidx, bool started) in vi_queue_set_started() argument
175 VuVirtq *vq = vu_get_queue(dev, qidx); in vi_queue_set_started()
177 g_debug("queue started %d:%d", qidx, started); in vi_queue_set_started()
179 if (qidx == 1) { in vi_queue_set_started()
/openbmc/linux/drivers/net/ethernet/broadcom/bnxt/
H A Dbnxt_dcb.c55 u8 qidx; in bnxt_hwrm_queue_pri2cos_cfg() local
60 qidx = bp->tc_to_qidx[ets->prio_tc[i]]; in bnxt_hwrm_queue_pri2cos_cfg()
61 pri2cos[i] = bp->q_info[qidx].queue_id; in bnxt_hwrm_queue_pri2cos_cfg()
108 u8 qidx = bp->tc_to_qidx[i]; in bnxt_hwrm_queue_cos2bw_cfg() local
112 qidx); in bnxt_hwrm_queue_cos2bw_cfg()
115 cos2bw.queue_id = bp->q_info[qidx].queue_id; in bnxt_hwrm_queue_cos2bw_cfg()
131 if (qidx == 0) { in bnxt_hwrm_queue_cos2bw_cfg()
277 u8 qidx = bp->tc_to_qidx[i]; in bnxt_hwrm_queue_pfc_cfg() local
279 if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) { in bnxt_hwrm_queue_pfc_cfg()

123