Home
last modified time | relevance | path

Searched refs:cqes (Results 1 – 25 of 27) sorted by relevance

12

/openbmc/qemu/block/
H A Dio_uring.c126 struct io_uring_cqe *cqes; in luring_process_completions() local
148 while (io_uring_peek_cqe(&s->ring, &cqes) == 0) { in luring_process_completions()
152 if (!cqes) { in luring_process_completions()
156 luringcb = io_uring_cqe_get_data(cqes); in luring_process_completions()
157 ret = cqes->res; in luring_process_completions()
158 io_uring_cqe_seen(&s->ring, cqes); in luring_process_completions()
159 cqes = NULL; in luring_process_completions()
/openbmc/u-boot/drivers/nvme/
H A Dnvme.c36 struct nvme_completion *cqes; member
130 u64 start = (ulong)&nvmeq->cqes[index]; in nvme_read_completion_status()
135 return le16_to_cpu(readw(&(nvmeq->cqes[index].status))); in nvme_read_completion_status()
228 if (!nvmeq->cqes) in nvme_alloc_queue()
230 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth)); in nvme_alloc_queue()
250 free((void *)nvmeq->cqes); in nvme_alloc_queue()
298 free((void *)nvmeq->cqes); in nvme_free_queue()
324 flush_dcache_range((ulong)nvmeq->cqes, in nvme_init_queue()
325 (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth)); in nvme_init_queue()
376 nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq); in nvme_configure_admin_queue()
[all …]
H A Dnvme.h61 __u8 cqes; member
/openbmc/linux/drivers/infiniband/hw/cxgb4/
H A Drestrack.c334 struct t4_cqe *cqes) in fill_hwcqes() argument
339 if (fill_cqe(msg, cqes, idx, "hwcq_idx")) in fill_hwcqes()
342 if (fill_cqe(msg, cqes + 1, idx, "hwcq_idx")) in fill_hwcqes()
351 struct t4_cqe *cqes) in fill_swcqes() argument
359 if (fill_cqe(msg, cqes, idx, "swcq_idx")) in fill_swcqes()
364 if (fill_cqe(msg, cqes + 1, idx, "swcq_idx")) in fill_swcqes()
/openbmc/linux/tools/testing/selftests/net/
H A Dio_uring_zerocopy_tx.c77 struct io_uring_cqe *cqes; member
102 struct io_uring_cqe *cqes; member
198 cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); in io_uring_mmap()
211 cq->cqes = ptr + p->cq_off.cqes; in io_uring_mmap()
311 *cqe_ptr = &cq->cqes[head & mask]; in io_uring_wait_cqe()
/openbmc/linux/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_rx.c353 q->next_cqe_info = cqe_to_info(q->cqes); in advance_cq()
644 q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0, in fun_rxq_create_sw()
647 if (!q->cqes) in fun_rxq_create_sw()
665 dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes, in fun_rxq_create_sw()
686 q->cqes, q->cq_dma_addr); in fun_rxq_free_sw()
722 q->next_cqe_info = cqe_to_info(q->cqes); in fun_rxq_create_dev()
H A Dfuneth_txrx.h167 void *cqes; /* base of CQ descriptor ring */ member
/openbmc/linux/drivers/net/ethernet/fungible/funcore/
H A Dfun_queue.c295 cqe = funq->cqes + (funq->cq_head << funq->cqe_size_log2); in __fun_process_cq()
366 funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth, in fun_alloc_cqes()
370 return funq->cqes ? 0 : -ENOMEM; in fun_alloc_cqes()
389 funq->cqes, funq->cq_dma_addr, NULL); in fun_free_queue()
H A Dfun_queue.h36 void *cqes; member
/openbmc/linux/tools/testing/selftests/x86/
H A Dlam.c96 struct io_uring_cqe *cqes; member
390 cring->ring_sz = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe); in mmap_io_uring()
445 cring->queue.cqes = cq_ptr + p.cq_off.cqes; in mmap_io_uring()
482 cqe = &cring->queue.cqes[head & *s->cq_ring.ring_mask]; in handle_uring_cq()
/openbmc/linux/drivers/net/ethernet/broadcom/
H A Dcnic.c1432 cqes, num_cqes); in cnic_reply_bnx2x_kcqes()
1550 struct kcqe *cqes[1]; in cnic_bnx2x_iscsi_init2() local
1580 cqes[0] = (struct kcqe *) &kcqe; in cnic_bnx2x_iscsi_init2()
1879 struct kcqe *cqes[1]; in cnic_bnx2x_iscsi_ofld1() local
1997 struct kcqe *cqes[1]; in cnic_bnx2x_iscsi_destroy() local
2226 struct kcqe *cqes[1]; in cnic_bnx2x_offload_pg() local
2241 struct kcqe *cqes[1]; in cnic_bnx2x_update_pg() local
2351 struct kcqe *cqes[1]; in cnic_bnx2x_fcoe_ofld1() local
2498 struct kcqe *cqes[1]; in cnic_bnx2x_fcoe_destroy() local
2582 struct kcqe *cqes[1]; in cnic_bnx2x_kwqe_err() local
[all …]
H A Dcnic_if.h369 void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
/openbmc/linux/drivers/nvme/host/
H A Dapple.c135 struct nvme_completion *cqes; member
571 struct nvme_completion *hcqe = &q->cqes[q->cq_head]; in apple_nvme_cqe_pending()
589 struct nvme_completion *cqe = &q->cqes[idx]; in apple_nvme_handle_cqe()
976 memset(q->cqes, 0, depth * sizeof(struct nvme_completion)); in apple_nvme_init_queue()
1299 q->cqes = dmam_alloc_coherent(anv->dev, in apple_nvme_queue_alloc()
1302 if (!q->cqes) in apple_nvme_queue_alloc()
H A Dpci.c195 struct nvme_completion *cqes; member
993 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending()
1017 struct nvme_completion *cqe = &nvmeq->cqes[idx]; in nvme_handle_cqe()
1411 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue()
1536 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), in nvme_alloc_queue()
1538 if (!nvmeq->cqes) in nvme_alloc_queue()
1556 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, in nvme_alloc_queue()
1585 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_stats.c282 s->tx_xdp_cqes += xdpsq_red_stats->cqes; in mlx5e_stats_grp_sw_update_stats_xdp_red()
294 s->rx_xdp_tx_cqe += xdpsq_stats->cqes; in mlx5e_stats_grp_sw_update_stats_xdpsq()
305 s->tx_xsk_cqes += xsksq_stats->cqes; in mlx5e_stats_grp_sw_update_stats_xsksq()
440 s->tx_cqes += sq_stats->cqes; in mlx5e_stats_grp_sw_update_stats_sq()
2067 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
2079 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2089 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2119 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2143 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2218 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
H A Den_stats.h433 u64 cqes ____cacheline_aligned_in_smp;
446 u64 cqes ____cacheline_aligned_in_smp;
H A Den_tx.c875 stats->cqes += i; in mlx5e_poll_tx_cq()
/openbmc/linux/drivers/nvme/target/
H A Dpassthru.c134 id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes); in nvmet_passthru_override_id_ctrl()
H A Dadmin-cmd.c428 id->cqes = (0x4 << 4) | 0x4; in nvmet_execute_identify_ctrl()
/openbmc/qemu/include/block/
H A Dnvme.h1142 uint8_t cqes; member
1247 #define NVME_CTRL_CQES_MIN(cqes) ((cqes) & 0xf) argument
1248 #define NVME_CTRL_CQES_MAX(cqes) (((cqes) >> 4) & 0xf) argument
/openbmc/linux/io_uring/
H A Dfdinfo.c126 struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift]; in io_uring_show_fdinfo()
H A Dio_uring.c872 ctx->cqe_cached = &rings->cqes[off]; in io_cqe_cache_refill()
2806 off = struct_size(rings, cqes, cq_entries); in rings_size()
3994 p->cq_off.cqes = offsetof(struct io_rings, cqes); in io_uring_create()
/openbmc/linux/include/uapi/linux/
H A Dio_uring.h448 __u32 cqes; member
/openbmc/linux/include/linux/
H A Dio_uring_types.h151 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; member
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dxdp.c743 sq->stats->cqes += i; in mlx5e_poll_xdpsq_cq()

12