Home
last modified time | relevance | path

Searched refs:cq (Results 51 – 75 of 365) sorted by relevance

12345678910>>...15

/openbmc/linux/drivers/infiniband/hw/ocrdma/
H A Docrdma_hw.c133 dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1); in ocrdma_mcq_inc_tail()
538 cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size); in ocrdma_mbx_mq_cq_create()
657 cq = &dev->mq.cq; in ocrdma_destroy_mq()
721 ib_evt.element.cq = &cq->ibcq; in ocrdma_dispatch_ibevent()
727 ib_evt.element.cq = &cq->ibcq; in ocrdma_dispatch_ibevent()
788 cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context); in ocrdma_dispatch_ibevent()
908 struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head); in _ocrdma_qp_buddy_cq_handler()
979 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in ocrdma_qp_cq_handler()
1815 cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); in ocrdma_mbx_create_cq()
1878 dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa); in ocrdma_mbx_create_cq()
[all …]
H A Docrdma_verbs.c938 uresp.cq_id = cq->id; in ocrdma_copy_cq_uresp()
960 cq->ucontext = uctx; in ocrdma_copy_cq_uresp()
1005 dev->cq_tbl[cq->id] = cq; in ocrdma_create_cq()
1036 cqe = cq->va; in ocrdma_flush_cq()
1067 ocrdma_flush_cq(cq); in ocrdma_destroy_cq()
1070 if (cq->ucontext) { in ocrdma_destroy_cq()
1072 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, in ocrdma_destroy_cq()
1605 cur_getp = cq->getp; in ocrdma_discard_cqes()
2744 cq->phase = (~cq->phase & OCRDMA_CQE_VALID); in ocrdma_change_cq_phase()
2763 cur_getp = cq->getp; in ocrdma_poll_hwcq()
[all …]
/openbmc/linux/drivers/net/ethernet/pensando/ionic/
H A Dionic_debugfs.c97 struct ionic_cq *cq = seq->private; in cq_tail_show() local
99 seq_printf(seq, "%d\n", cq->tail_idx); in cq_tail_show()
123 struct ionic_cq *cq = &qcq->cq; in ionic_debugfs_add_qcq() local
169 debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa); in ionic_debugfs_add_qcq()
170 debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs); in ionic_debugfs_add_qcq()
171 debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size); in ionic_debugfs_add_qcq()
172 debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color); in ionic_debugfs_add_qcq()
174 debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops); in ionic_debugfs_add_qcq()
179 desc_blob->data = cq->base; in ionic_debugfs_add_qcq()
180 desc_blob->size = (unsigned long)cq->num_descs * cq->desc_size; in ionic_debugfs_add_qcq()
H A Dionic_txrx.h7 void ionic_tx_flush(struct ionic_cq *cq);
17 bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
18 bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
/openbmc/linux/drivers/infiniband/sw/rxe/
H A Drxe.h54 #define rxe_dbg_cq(cq, fmt, ...) ibdev_dbg((cq)->ibcq.device, \ argument
55 "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
75 #define rxe_err_cq(cq, fmt, ...) ibdev_err_ratelimited((cq)->ibcq.device, \ argument
76 "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
96 #define rxe_info_cq(cq, fmt, ...) ibdev_info_ratelimited((cq)->ibcq.device, \ argument
97 "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
/openbmc/linux/drivers/net/ethernet/amd/pds_core/
H A Dcore.c155 if (qcq->cq.info) in pdsc_qcq_free()
156 vfree(qcq->cq.info); in pdsc_qcq_free()
181 cq->base = base; in pdsc_cq_map()
182 cq->base_pa = base_pa; in pdsc_cq_map()
184 for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++) in pdsc_cq_map()
222 qcq->cq.info = vcalloc(num_descs, sizeof(*qcq->cq.info)); in pdsc_qcq_alloc()
223 if (!qcq->cq.info) { in pdsc_qcq_alloc()
231 qcq->cq.tail_idx = 0; in pdsc_qcq_alloc()
232 qcq->cq.done_color = 1; in pdsc_qcq_alloc()
286 qcq->cq.bound_q = &qcq->q; in pdsc_qcq_alloc()
[all …]
H A Ddebugfs.c114 struct pdsc_cq *cq = &qcq->cq; in pdsc_debugfs_add_qcq() local
143 debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa); in pdsc_debugfs_add_qcq()
144 debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs); in pdsc_debugfs_add_qcq()
145 debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size); in pdsc_debugfs_add_qcq()
146 debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color); in pdsc_debugfs_add_qcq()
147 debugfs_create_u16("tail", 0400, cq_dentry, &cq->tail_idx); in pdsc_debugfs_add_qcq()
/openbmc/linux/drivers/infiniband/hw/efa/
H A Defa_com.c164 struct efa_com_admin_cq *cq = &aq->cq; in efa_com_admin_init_cq() local
170 cq->entries = in efa_com_admin_init_cq()
172 if (!cq->entries) in efa_com_admin_init_cq()
177 cq->cc = 0; in efa_com_admin_init_cq()
178 cq->phase = 1; in efa_com_admin_init_cq()
444 phase = aq->cq.phase; in efa_com_handle_admin_completion()
468 aq->cq.cc += comp_num; in efa_com_handle_admin_completion()
469 aq->cq.phase = phase; in efa_com_handle_admin_completion()
663 struct efa_com_admin_cq *cq = &aq->cq; in efa_com_admin_destroy() local
676 dma_free_coherent(edev->dmadev, size, cq->entries, cq->dma_addr); in efa_com_admin_destroy()
[all …]
H A Defa_verbs.c1018 cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr); in efa_destroy_cq()
1022 if (cq->eq) { in efa_destroy_cq()
1026 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, in efa_destroy_cq()
1145 cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size, in efa_create_cq()
1177 cq->cq_idx); in efa_create_cq()
1181 if (cq->eq) { in efa_create_cq()
1182 err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL)); in efa_create_cq()
1185 cq->cq_idx); in efa_create_cq()
1201 cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr); in efa_create_cq()
1206 if (cq->eq) in efa_create_cq()
[all …]
/openbmc/linux/drivers/tee/optee/
H A Dcall.c53 mutex_lock(&cq->mutex); in optee_cq_wait_init()
62 list_add_tail(&w->list_node, &cq->waiters); in optee_cq_wait_init()
64 mutex_unlock(&cq->mutex); in optee_cq_wait_init()
72 mutex_lock(&cq->mutex); in optee_cq_wait_for_completion()
77 list_add_tail(&w->list_node, &cq->waiters); in optee_cq_wait_for_completion()
79 mutex_unlock(&cq->mutex); in optee_cq_wait_for_completion()
86 list_for_each_entry(w, &cq->waiters, list_node) { in optee_cq_complete_one()
102 mutex_lock(&cq->mutex); in optee_cq_wait_final()
108 optee_cq_complete_one(cq); in optee_cq_wait_final()
117 optee_cq_complete_one(cq); in optee_cq_wait_final()
[all …]
/openbmc/linux/drivers/vfio/pci/mlx5/
H A Dcmd.c941 tracker.cq.mcq); in mlx5vf_cq_complete()
952 struct mlx5_vhca_cq *cq; in mlx5vf_create_cq() local
958 cq = &tracker->cq; in mlx5vf_create_cq()
964 cq->ncqe = ncqe; in mlx5vf_create_cq()
965 cq->mcq.set_ci_db = cq->db.db; in mlx5vf_create_cq()
966 cq->mcq.arm_db = cq->db.db + 1; in mlx5vf_create_cq()
975 cq->buf.frag_buf.npages; in mlx5vf_create_cq()
1526 void *cqe = get_cqe(cq, n & (cq->ncqe - 1)); in get_sw_cqe()
1546 cqe = get_sw_cqe(cq, cq->mcq.cons_index); in mlx5vf_cq_poll_one()
1550 ++cq->mcq.cons_index; in mlx5vf_cq_poll_one()
[all …]
/openbmc/linux/drivers/infiniband/hw/erdma/
H A Derdma_verbs.c213 cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT); in create_cq_cmd()
1233 req.cqn = cq->cqn; in erdma_destroy_cq()
1242 cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr); in erdma_destroy_cq()
1602 cq->kern_cq.qbuf = in erdma_init_kernel_cq()
1606 if (!cq->kern_cq.qbuf) in erdma_init_kernel_cq()
1609 cq->kern_cq.db_record = in erdma_init_kernel_cq()
1610 (u64 *)(cq->kern_cq.qbuf + (cq->depth << CQE_SHIFT)); in erdma_init_kernel_cq()
1632 cq->ibcq.cqe = depth; in erdma_create_cq()
1633 cq->depth = depth; in erdma_create_cq()
1636 ret = xa_alloc_cyclic(&dev->cq_xa, &cq->cqn, cq, in erdma_create_cq()
[all …]
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_send.c1073 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in dr_create_cq()
1074 if (!cq) in dr_create_cq()
1125 cq->mcq.cqe_sz = 64; in dr_create_cq()
1126 cq->mcq.set_ci_db = cq->wq_ctrl.db.db; in dr_create_cq()
1127 cq->mcq.arm_db = cq->wq_ctrl.db.db + 1; in dr_create_cq()
1135 cq->mcq.vector = 0; in dr_create_cq()
1136 cq->mcq.uar = uar; in dr_create_cq()
1137 cq->mdev = mdev; in dr_create_cq()
1139 return cq; in dr_create_cq()
1144 kfree(cq); in dr_create_cq()
[all …]
/openbmc/linux/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_common.c863 aq->rq.cq = qidx; in otx2_rq_init()
992 cq = &qset->cq[qidx]; in otx2_cq_init()
993 cq->cq_idx = qidx; in otx2_cq_init()
1021 err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size); in otx2_cq_init()
1026 cq->cqe_base = cq->cqe->base; in otx2_cq_init()
1040 aq->cq.ena = 1; in otx2_cq_init()
1041 aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4); in otx2_cq_init()
1043 aq->cq.base = cq->cqe->iova; in otx2_cq_init()
1044 aq->cq.cint_idx = cq->cint_idx; in otx2_cq_init()
1050 aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt); in otx2_cq_init()
[all …]
/openbmc/linux/drivers/infiniband/hw/qedr/
H A Dqedr_roce_cm.c75 struct qedr_cq *cq = dev->gsi_sqcq; in qedr_ll2_complete_tx_packet() local
92 if (cq->ibcq.comp_handler) in qedr_ll2_complete_tx_packet()
93 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in qedr_ll2_complete_tx_packet()
121 if (cq->ibcq.comp_handler) in qedr_ll2_complete_rx_packet()
122 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in qedr_ll2_complete_rx_packet()
137 struct qedr_cq *cq; in qedr_destroy_gsi_cq() local
139 cq = get_qedr_cq(attrs->send_cq); in qedr_destroy_gsi_cq()
140 iparams.icid = cq->icid; in qedr_destroy_gsi_cq()
144 cq = get_qedr_cq(attrs->recv_cq); in qedr_destroy_gsi_cq()
146 if (iparams.icid != cq->icid) { in qedr_destroy_gsi_cq()
[all …]
/openbmc/linux/drivers/isdn/mISDN/
H A Dhwchannel.c160 switch (cq->op) { in mISDN_ctrl_bchannel()
162 cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY | in mISDN_ctrl_bchannel()
166 if (cq->p1) { in mISDN_ctrl_bchannel()
175 cq->p2 = bch->dropcnt; in mISDN_ctrl_bchannel()
176 if (cq->p1) in mISDN_ctrl_bchannel()
183 if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE) in mISDN_ctrl_bchannel()
184 bch->next_maxlen = cq->p2; in mISDN_ctrl_bchannel()
185 if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE) in mISDN_ctrl_bchannel()
186 bch->next_minlen = cq->p1; in mISDN_ctrl_bchannel()
188 cq->p1 = bch->minlen; in mISDN_ctrl_bchannel()
[all …]
H A Ddsp_core.c190 struct mISDN_ctrl_req cq; in dsp_rx_off_member() local
193 memset(&cq, 0, sizeof(cq)); in dsp_rx_off_member()
222 cq.op = MISDN_CTRL_RX_OFF; in dsp_rx_off_member()
223 cq.p1 = rx_off; in dsp_rx_off_member()
257 struct mISDN_ctrl_req cq; in dsp_fill_empty() local
259 memset(&cq, 0, sizeof(cq)); in dsp_fill_empty()
268 cq.p1 = 1; in dsp_fill_empty()
269 cq.p2 = dsp_silence; in dsp_fill_empty()
629 struct mISDN_ctrl_req cq; in get_features() local
637 memset(&cq, 0, sizeof(cq)); in get_features()
[all …]
/openbmc/linux/drivers/infiniband/hw/hfi1/
H A Duser_sdma.c161 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in hfi1_user_sdma_alloc_queues()
162 if (!cq) in hfi1_user_sdma_alloc_queues()
165 cq->comps = vmalloc_user(PAGE_ALIGN(sizeof(*cq->comps) in hfi1_user_sdma_alloc_queues()
167 if (!cq->comps) in hfi1_user_sdma_alloc_queues()
177 fd->cq = cq; in hfi1_user_sdma_alloc_queues()
184 kfree(cq); in hfi1_user_sdma_alloc_queues()
241 if (fd->cq) { in hfi1_user_sdma_free_queues()
243 kfree(fd->cq); in hfi1_user_sdma_free_queues()
285 struct hfi1_user_sdma_comp_q *cq = fd->cq; in hfi1_user_sdma_process_request() local
356 req->cq = cq; in hfi1_user_sdma_process_request()
[all …]
/openbmc/linux/drivers/accel/habanalabs/common/
H A Dirq.c78 static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq, ktime_t timestamp) in job_finish() argument
83 queue = &hdev->kernel_queues[cq->hw_queue_id]; in job_finish()
86 queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work); in job_finish()
132 struct hl_cq *cq = arg; in hl_irq_handler_cq() local
133 struct hl_device *hdev = cq->hdev; in hl_irq_handler_cq()
142 irq, cq->hw_queue_id); in hl_irq_handler_cq()
146 cq_base = cq->kernel_address; in hl_irq_handler_cq()
149 cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci]; in hl_irq_handler_cq()
181 job_finish(hdev, shadow_index, cq, timestamp); in hl_irq_handler_cq()
188 cq->ci = hl_cq_inc_ptr(cq->ci); in hl_irq_handler_cq()
[all …]
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dhealth.c38 int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) in mlx5e_health_cq_diag_fmsg() argument
45 err = mlx5_core_query_cq(cq->mdev, &cq->mcq, out); in mlx5e_health_cq_diag_fmsg()
56 err = devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn); in mlx5e_health_cq_diag_fmsg()
64 err = devlink_fmsg_u32_pair_put(fmsg, "ci", mlx5_cqwq_get_ci(&cq->wq)); in mlx5e_health_cq_diag_fmsg()
68 err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&cq->wq)); in mlx5e_health_cq_diag_fmsg()
79 int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) in mlx5e_health_cq_common_diag_fmsg() argument
85 cq_sz = mlx5_cqwq_get_size(&cq->wq); in mlx5e_health_cq_common_diag_fmsg()
86 cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq); in mlx5e_health_cq_common_diag_fmsg()
/openbmc/linux/drivers/infiniband/hw/irdma/
H A Dctrl.c2471 writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db); in irdma_sc_cq_ack()
2529 if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs) in irdma_sc_cq_create()
2532 ceq = cq->dev->ceq[cq->ceq_id]; in irdma_sc_cq_create()
2550 set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa)); in irdma_sc_cq_create()
2559 FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0), in irdma_sc_cq_create()
2601 ceq = cq->dev->ceq[cq->ceq_id]; in irdma_sc_cq_destroy()
2609 (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); in irdma_sc_cq_destroy()
2612 FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0), in irdma_sc_cq_destroy()
3843 if (cq) in irdma_sc_process_ceq()
3845 return cq; in irdma_sc_process_ceq()
[all …]
/openbmc/linux/drivers/infiniband/hw/bnxt_re/
H A Dib_verbs.c1385 qp->scq = cq; in bnxt_re_init_qp_attr()
1391 qp->rcq = cq; in bnxt_re_init_qp_attr()
2924 kfree(cq->cql); in bnxt_re_destroy_cq()
2952 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq); in bnxt_re_create_cq()
2974 cq->qplib_cq.sg_info.umem = cq->umem; in bnxt_re_create_cq()
3004 cq->cq_period = cq->qplib_cq.period; in bnxt_re_create_cq()
3032 kfree(cq->cql); in bnxt_re_create_cq()
3042 cq->qplib_cq.max_wqe = cq->resize_cqe; in bnxt_re_resize_cq_complete()
3045 cq->umem = cq->resize_umem; in bnxt_re_resize_cq_complete()
3108 cq->qplib_cq.sg_info.umem = cq->resize_umem; in bnxt_re_resize_cq()
[all …]
/openbmc/linux/drivers/net/ethernet/microsoft/mana/
H A Dgdma_main.c287 e.cq.id = qid; in mana_gd_ring_doorbell()
289 e.cq.arm = num_req; in mana_gd_ring_doorbell()
337 mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id, in mana_gd_ring_cq()
349 struct gdma_queue *cq; in mana_gd_process_eqe() local
364 if (WARN_ON_ONCE(!cq || cq->type != GDMA_CQ || cq->id != cq_id)) in mana_gd_process_eqe()
367 if (cq->cq.callback) in mana_gd_process_eqe()
368 cq->cq.callback(cq->cq.context, cq); in mana_gd_process_eqe()
634 queue->cq.parent = spec->cq.parent_eq; in mana_gd_create_cq()
635 queue->cq.context = spec->cq.context; in mana_gd_create_cq()
636 queue->cq.callback = spec->cq.callback; in mana_gd_create_cq()
[all …]
H A Dhw_channel.c126 hwc->cq->gdma_cq->id = val; in mana_hwc_init_event_handler()
283 spec.cq.context = ctx; in mana_hwc_create_gdma_cq()
284 spec.cq.callback = cb; in mana_hwc_create_gdma_cq()
356 struct gdma_queue *eq, *cq; in mana_hwc_create_cq() local
382 eq, &cq); in mana_hwc_create_cq()
387 hwc_cq->gdma_cq = cq; in mana_hwc_create_cq()
623 struct gdma_queue *cq = hwc->cq->gdma_cq; in mana_hwc_establish_channel() local
652 gc->cq_table[cq->id] = cq; in mana_hwc_establish_channel()
679 hwc->cq, &hwc->rxq); in mana_hwc_init_queues()
686 hwc->cq, &hwc->txq); in mana_hwc_init_queues()
[all …]
/openbmc/linux/drivers/infiniband/ulp/rtrs/
H A Drtrs.c236 struct ib_cq *cq; in create_cq() local
244 if (IS_ERR(cq)) { in create_cq()
246 PTR_ERR(cq)); in create_cq()
247 return PTR_ERR(cq); in create_cq()
249 con->cq = cq; in create_cq()
270 init_attr.send_cq = con->cq; in create_qp()
271 init_attr.recv_cq = con->cq; in create_qp()
286 if (con->cq) { in destroy_cq()
288 ib_free_cq(con->cq); in destroy_cq()
290 ib_cq_pool_put(con->cq, con->nr_cqe); in destroy_cq()
[all …]

12345678910>>...15