/openbmc/linux/drivers/scsi/snic/ |
H A D | vnic_cq.c | 34 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, in svnic_cq_init() argument 46 iowrite32(cq_head, &cq->ctrl->cq_head); in svnic_cq_init() 61 iowrite32(0, &cq->ctrl->cq_head); in svnic_cq_clean()
|
H A D | vnic_cq.h | 19 u32 cq_head; /* 0x20 */ member 91 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
|
/openbmc/linux/drivers/net/ethernet/cisco/enic/ |
H A D | vnic_cq.c | 39 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, in vnic_cq_init() argument 51 iowrite32(cq_head, &cq->ctrl->cq_head); in vnic_cq_init() 68 iowrite32(0, &cq->ctrl->cq_head); in vnic_cq_clean()
|
H A D | vnic_cq.h | 22 u32 cq_head; /* 0x20 */ member 104 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
|
/openbmc/linux/drivers/scsi/fnic/ |
H A D | vnic_cq.c | 41 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, in vnic_cq_init() argument 53 iowrite32(cq_head, &cq->ctrl->cq_head); in vnic_cq_init() 68 iowrite32(0, &cq->ctrl->cq_head); in vnic_cq_clean()
|
H A D | vnic_cq.h | 31 u32 cq_head; /* 0x20 */ member 103 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
|
/openbmc/linux/drivers/net/ethernet/fungible/funeth/ |
H A D | funeth_trace.h | 85 __field(u32, cq_head) 95 __entry->cq_head = rxq->cq_head; 104 __get_str(devname), __entry->qidx, __entry->cq_head,
|
H A D | funeth_rx.c | 350 if (unlikely(q->cq_head == q->cq_mask)) { in advance_cq() 351 q->cq_head = 0; in advance_cq() 355 q->cq_head++; in advance_cq() 515 u32 cq_db_val = q->cq_head; in fun_rxq_napi_poll() 716 q->cq_head = 0; in fun_rxq_create_dev()
|
H A D | funeth_txrx.h | 170 unsigned int cq_head; /* CQ head index */ member
|
/openbmc/linux/io_uring/ |
H A D | fdinfo.c | 61 unsigned int cq_head = READ_ONCE(r->cq.head); in io_uring_show_fdinfo() local 86 seq_printf(m, "CqHead:\t%u\n", cq_head); in io_uring_show_fdinfo() 122 seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head); in io_uring_show_fdinfo() 123 cq_entries = min(cq_tail - cq_head, ctx->cq_entries); in io_uring_show_fdinfo() 125 unsigned int entry = i + cq_head; in io_uring_show_fdinfo()
|
/openbmc/linux/drivers/net/ethernet/fungible/funcore/ |
H A D | fun_queue.c | 295 cqe = funq->cqes + (funq->cq_head << funq->cqe_size_log2); in __fun_process_cq() 305 if (++funq->cq_head == funq->cq_depth) { in __fun_process_cq() 306 funq->cq_head = 0; in __fun_process_cq() 331 funq->cqid, new_cqes, max, funq->cq_head, funq->cq_phase); in __fun_process_cq() 349 db = funq->cq_head | FUN_DB_IRQ_ARM_F; in fun_process_cq()
|
H A D | fun_queue.h | 49 u16 cq_head; member
|
/openbmc/u-boot/drivers/nvme/ |
H A D | nvme.c | 43 u16 cq_head; member 162 u16 head = nvmeq->cq_head; in nvme_submit_sync_cmd() 192 nvmeq->cq_head = head; in nvme_submit_sync_cmd() 206 nvmeq->cq_head = head; in nvme_submit_sync_cmd() 239 nvmeq->cq_head = 0; in nvme_alloc_queue() 320 nvmeq->cq_head = 0; in nvme_init_queue()
|
/openbmc/linux/drivers/net/ethernet/marvell/octeontx2/nic/ |
H A D | otx2_txrx.c | 50 cq->cq_head = (status >> 20) & 0xFFFFF; in otx2_nix_cq_op_status() 51 if (cq->cq_tail < cq->cq_head) in otx2_nix_cq_op_status() 52 cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) + in otx2_nix_cq_op_status() 55 cq->pend_cqe = cq->cq_tail - cq->cq_head; in otx2_nix_cq_op_status() 64 cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head); in otx2_get_next_cqe() 68 cq->cq_head++; in otx2_get_next_cqe() 69 cq->cq_head &= (cq->cqe_cnt - 1); in otx2_get_next_cqe() 404 cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head); in otx2_rx_napi_handler() 411 cq->cq_head++; in otx2_rx_napi_handler() 412 cq->cq_head &= (cq->cqe_cnt - 1); in otx2_rx_napi_handler()
|
H A D | otx2_txrx.h | 135 u32 cq_head; member
|
/openbmc/linux/drivers/dma/ |
H A D | hisi_dma.c | 152 u32 cq_head; member 472 chan->cq_head = 0; in hisi_dma_free_chan_resources() 738 cqe = chan->cq + chan->cq_head; in hisi_dma_irq() 741 chan->cq_head = (chan->cq_head + 1) % hdma_dev->chan_depth; in hisi_dma_irq() 743 chan->qp_num, chan->cq_head); in hisi_dma_irq()
|
/openbmc/linux/drivers/nvme/host/ |
H A D | apple.c | 145 u16 cq_head; member 571 struct nvme_completion *hcqe = &q->cqes[q->cq_head]; in apple_nvme_cqe_pending() 609 u32 tmp = q->cq_head + 1; in apple_nvme_update_cq_head() 612 q->cq_head = 0; in apple_nvme_update_cq_head() 615 q->cq_head = tmp; in apple_nvme_update_cq_head() 632 apple_nvme_handle_cqe(q, iob, q->cq_head); in apple_nvme_poll_cq() 637 writel(q->cq_head, q->cq_db); in apple_nvme_poll_cq() 972 q->cq_head = 0; in apple_nvme_init_queue()
|
H A D | pci.c | 204 u16 cq_head; member 989 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending() 996 u16 head = nvmeq->cq_head; in nvme_ring_cq_doorbell() 1046 u32 tmp = nvmeq->cq_head + 1; in nvme_update_cq_head() 1049 nvmeq->cq_head = 0; in nvme_update_cq_head() 1052 nvmeq->cq_head = tmp; in nvme_update_cq_head() 1068 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); in nvme_poll_cq() 1543 nvmeq->cq_head = 0; in nvme_alloc_queue() 1578 nvmeq->cq_head = 0; in nvme_init_queue()
|
/openbmc/linux/drivers/infiniband/hw/irdma/ |
H A D | uk.c | 1495 u32 cq_head; in irdma_uk_clean_cq() local 1498 cq_head = cq->cq_ring.head; in irdma_uk_clean_cq() 1502 cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf; in irdma_uk_clean_cq() 1504 cqe = cq->cq_base[cq_head].buf; in irdma_uk_clean_cq() 1518 cq_head = (cq_head + 1) % cq->cq_ring.size; in irdma_uk_clean_cq() 1519 if (!cq_head) in irdma_uk_clean_cq()
|
H A D | verbs.h | 116 u16 cq_head; member
|
/openbmc/linux/drivers/crypto/hisilicon/ |
H A D | qm.c | 873 if (qp->qp_status.cq_head == qp->cq_depth - 1) { in qm_cq_head_update() 875 qp->qp_status.cq_head = 0; in qm_cq_head_update() 877 qp->qp_status.cq_head++; in qm_cq_head_update() 883 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_req_cb() 891 cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_req_cb() 893 qp->qp_status.cq_head, 0); in qm_poll_req_cb() 900 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); in qm_poll_req_cb() 1101 qp_status->cq_head = 0; in qm_init_qp_status() 2439 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated() 2446 cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated()
|
/openbmc/linux/include/linux/ |
H A D | hisi_acc_qm.h | 393 u16 cq_head; member
|
/openbmc/linux/drivers/net/ethernet/cavium/thunder/ |
H A D | nicvf_main.c | 958 u64 cq_head; in nicvf_poll() local 971 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, in nicvf_poll() 975 cq->cq_idx, cq_head); in nicvf_poll()
|
/openbmc/qemu/block/ |
H A D | nvme.c | 105 uint32_t cq_head; member 275 q->cq.doorbell = &s->doorbells[idx * s->doorbell_scale].cq_head; in nvme_create_queue_pair()
|