Home
last modified time | relevance | path

Searched refs:qset (Results 1 – 19 of 19) sorted by relevance

/openbmc/linux/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dqos_sq.c23 if (!pfvf->qset.pool) in otx2_qos_aura_pool_free()
26 pool = &pfvf->qset.pool[pool_id]; in otx2_qos_aura_pool_free()
35 struct otx2_qset *qset = &pfvf->qset; in otx2_qos_sq_aura_pool_init() local
50 num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs; in otx2_qos_sq_aura_pool_init()
57 pool = &pfvf->qset.pool[pool_id]; in otx2_qos_sq_aura_pool_init()
76 sq = &qset->sq[qidx]; in otx2_qos_sq_aura_pool_init()
118 struct otx2_qset *qset = &pfvf->qset; in otx2_qos_sq_free_sqbs() local
124 sq = &qset->sq[qidx]; in otx2_qos_sq_free_sqbs()
140 sq = &qset->sq[qidx]; in otx2_qos_sq_free_sqbs()
261 struct otx2_qset *qset = &pfvf->qset; in otx2_qos_disable_sq() local
[all …]
H A Dotx2_common.c78 struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx]; in otx2_update_rq_stats()
80 if (!pfvf->qset.rq) in otx2_update_rq_stats()
89 struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; in otx2_update_sq_stats()
91 if (!pfvf->qset.sq) in otx2_update_sq_stats()
824 sq = &pfvf->qset.sq[qidx]; in otx2_sqb_flush()
855 struct otx2_qset *qset = &pfvf->qset; in otx2_rq_init() local
873 aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); in otx2_rq_init()
874 aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); in otx2_rq_init()
892 sq = &pfvf->qset.sq[qidx]; in otx2_sq_aq_init()
913 aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt)); in otx2_sq_aq_init()
[all …]
H A Dotx2_pf.c1278 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { in otx2_q_intr_handler()
1309 sq = &pf->qset.sq[qidx]; in otx2_q_intr_handler()
1408 struct otx2_qset *qset = &pf->qset; in otx2_disable_napi() local
1413 cq_poll = &qset->napi[qidx]; in otx2_disable_napi()
1422 struct otx2_qset *qset = &pf->qset; in otx2_free_cq_res() local
1428 for (qidx = 0; qidx < qset->cq_cnt; qidx++) { in otx2_free_cq_res()
1429 cq = &qset->cq[qidx]; in otx2_free_cq_res()
1436 struct otx2_qset *qset = &pf->qset; in otx2_free_sq_res() local
1445 sq = &qset->sq[qidx]; in otx2_free_sq_res()
1611 struct otx2_qset *qset = &pf->qset; in otx2_free_hw_resources() local
[all …]
H A Dotx2_txrx.c463 sq = &pfvf->qset.sq[qidx]; in otx2_tx_napi_handler()
478 otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx], in otx2_tx_napi_handler()
536 struct otx2_qset *qset; in otx2_napi_handler() local
542 qset = &pfvf->qset; in otx2_napi_handler()
548 cq = &qset->cq[cq_idx]; in otx2_napi_handler()
1228 pool = &pfvf->qset.pool[pool_id]; in otx2_cleanup_rx_cqes()
1263 sq = &pfvf->qset.sq[qidx]; in otx2_cleanup_tx_cqes()
1328 sq = &pfvf->qset.sq[sq_idx]; in otx2_free_pending_sqe()
1376 sq = &pfvf->qset.sq[qidx]; in otx2_xdp_sq_append_pkt()
H A Dotx2_ethtool.c83 static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset) in otx2_get_qset_strings() argument
85 int start_qidx = qset * pfvf->hw.rx_queues; in otx2_get_qset_strings()
164 *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats) in otx2_get_qset_stats()
175 *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats) in otx2_get_qset_stats()
387 struct otx2_qset *qs = &pfvf->qset; in otx2_get_ringparam()
407 struct otx2_qset *qs = &pfvf->qset; in otx2_set_ringparam()
H A Dcn10k.c99 aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt)); in cn10k_sq_aq_init()
H A Dotx2_common.h472 struct otx2_qset qset; member
599 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); in otx2_setup_dev_hw_settings()
H A Dotx2_vf.c395 sq = &vf->qset.sq[qidx]; in otx2vf_xmit()
/openbmc/linux/drivers/net/ethernet/intel/ice/
H A Dice_idc.c60 int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) in ice_add_rdma_qset() argument
70 if (WARN_ON(!pf || !qset)) in ice_add_rdma_qset()
87 max_rdmaqs[qset->tc]++; in ice_add_rdma_qset()
88 qs_handle = qset->qs_handle; in ice_add_rdma_qset()
97 status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc, in ice_add_rdma_qset()
103 vsi->qset_handle[qset->tc] = qset->qs_handle; in ice_add_rdma_qset()
104 qset->teid = qset_teid; in ice_add_rdma_qset()
115 int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) in ice_del_rdma_qset() argument
121 if (WARN_ON(!pf || !qset)) in ice_del_rdma_qset()
124 vsi = ice_find_vsi(pf, qset->vport_id); in ice_del_rdma_qset()
[all …]
/openbmc/linux/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_main.c441 static void fun_free_rings(struct net_device *netdev, struct fun_qset *qset) in fun_free_rings() argument
444 struct funeth_txq **xdpqs = qset->xdpqs; in fun_free_rings()
445 struct funeth_rxq **rxqs = qset->rxqs; in fun_free_rings()
453 qset->txqs = fp->txqs; in fun_free_rings()
454 qset->nrxqs = netdev->real_num_rx_queues; in fun_free_rings()
455 qset->ntxqs = netdev->real_num_tx_queues; in fun_free_rings()
456 qset->nxdpqs = fp->num_xdpqs; in fun_free_rings()
468 free_rxqs(rxqs, qset->nrxqs, qset->rxq_start, qset->state); in fun_free_rings()
469 free_txqs(qset->txqs, qset->ntxqs, qset->txq_start, qset->state); in fun_free_rings()
470 free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, qset->state); in fun_free_rings()
[all …]
/openbmc/linux/drivers/infiniband/hw/irdma/
H A Dmain.c170 struct iidc_rdma_qset_params qset = {}; in irdma_lan_register_qset() local
173 qset.qs_handle = tc_node->qs_handle; in irdma_lan_register_qset()
174 qset.tc = tc_node->traffic_class; in irdma_lan_register_qset()
175 qset.vport_id = vsi->vsi_idx; in irdma_lan_register_qset()
176 ret = ice_add_rdma_qset(pf, &qset); in irdma_lan_register_qset()
182 tc_node->l2_sched_node_id = qset.teid; in irdma_lan_register_qset()
183 vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid; in irdma_lan_register_qset()
198 struct iidc_rdma_qset_params qset = {}; in irdma_lan_unregister_qset() local
200 qset.qs_handle = tc_node->qs_handle; in irdma_lan_unregister_qset()
201 qset.tc = tc_node->traffic_class; in irdma_lan_unregister_qset()
[all …]
H A Di40iw_if.c113 u16 qset; in i40iw_open() local
136 qset = cdev_info->params.qos.prio_qos[i].qs_handle; in i40iw_open()
138 l2params.qs_handle_list[i] = qset; in i40iw_open()
140 last_qset = qset; in i40iw_open()
141 else if ((qset != last_qset) && (qset != IRDMA_NO_QSET)) in i40iw_open()
/openbmc/linux/include/linux/net/intel/
H A Diidc.h76 int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
77 int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
/openbmc/linux/drivers/net/ethernet/cavium/thunder/
H A Dnic_main.c478 u8 qset, rq_idx = 0; in nic_config_cpi() local
504 qset = cfg->vf_id; in nic_config_cpi()
508 (qset << 3) | rq_idx); in nic_config_cpi()
567 u8 qset, idx = 0; in nic_config_rss() local
579 qset = nic->vf_sqs[cfg->vf_id][svf - 1]; in nic_config_rss()
581 qset = cfg->vf_id; in nic_config_rss()
583 (qset << 3) | (cfg->ind_tbl[idx] & 0x7)); in nic_config_rss()
H A Dnicvf_ethtool.c212 static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset) in nicvf_get_qset_strings() argument
215 int start_qidx = qset * MAX_RCV_QUEUES_PER_QS; in nicvf_get_qset_strings()
/openbmc/linux/drivers/net/ethernet/chelsio/cxgb3/
H A Dcxgb3_main.c670 &adap->params.sge.qset[qset_idx], ntxq, dev, in setup_sge_qsets()
1957 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset]; in get_sge_param()
1991 q = &adapter->params.sge.qset[pi->first_qset]; in set_sge_param()
2017 qsp = &adapter->params.sge.qset[i]; in set_coalesce()
2032 struct qset_params *q = adapter->params.sge.qset; in get_coalesce()
2202 q = &adapter->params.sge.qset[t.qset_idx]; in cxgb_siocdevprivate()
2236 qset[i]; in cxgb_siocdevprivate()
2278 q = &adapter->params.sge.qset[q1 + t.qset_idx]; in cxgb_siocdevprivate()
H A Dcommon.h329 struct qset_params qset[SGE_QSETS]; member
H A Dsge.c3369 struct qset_params *q = p->qset + i; in t3_sge_prep()
/openbmc/linux/drivers/net/ethernet/chelsio/cxgb4/
H A Dcxgb4_debugfs.c2601 static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset) in ethqset2pinfo() argument
2608 if (qset >= pi->first_qset && in ethqset2pinfo()
2609 qset < pi->first_qset + pi->nqsets) in ethqset2pinfo()