/openbmc/linux/drivers/infiniband/hw/erdma/ |
H A D | erdma_cmdq.c | 90 struct erdma_cmdq *cmdq = &dev->cmdq; in erdma_cmdq_sq_init() local 122 struct erdma_cmdq *cmdq = &dev->cmdq; in erdma_cmdq_cq_init() local 153 struct erdma_cmdq *cmdq = &dev->cmdq; in erdma_cmdq_eq_init() local 185 struct erdma_cmdq *cmdq = &dev->cmdq; in erdma_cmdq_init() local 217 cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr); in erdma_cmdq_init() 223 cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr); in erdma_cmdq_init() 237 struct erdma_cmdq *cmdq = &dev->cmdq; in erdma_cmdq_destroy() local 244 cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr); in erdma_cmdq_destroy() 275 wqe = get_queue_entry(cmdq->sq.qbuf, cmdq->sq.pi, cmdq->sq.depth, in push_cmdq_sqe() 279 cmdq->sq.pi += cmdq->sq.wqebb_cnt; in push_cmdq_sqe() [all …]
|
H A D | erdma.h | 201 struct erdma_cmdq cmdq; member 264 int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size, 266 void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
|
/openbmc/linux/drivers/gpu/drm/nouveau/nvkm/falcon/ |
H A D | cmdq.c | 28 u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg); in nvkm_falcon_cmdq_has_room() 29 u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg); in nvkm_falcon_cmdq_has_room() 35 free = cmdq->offset + cmdq->size - head; in nvkm_falcon_cmdq_has_room() 67 cmdq->position = cmdq->offset; in nvkm_falcon_cmdq_rewind() 84 cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg); in nvkm_falcon_cmdq_open() 95 nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position); in nvkm_falcon_cmdq_close() 180 cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride; in nvkm_falcon_cmdq_init() 181 cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride; in nvkm_falcon_cmdq_init() 187 index, cmdq->offset, cmdq->size); in nvkm_falcon_cmdq_init() 194 if (cmdq) { in nvkm_falcon_cmdq_del() [all …]
|
/openbmc/linux/drivers/mailbox/ |
H A D | mtk-cmdq-mailbox.c | 67 struct cmdq *cmdq; member 107 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); in cmdq_get_shift_pa() local 138 static void cmdq_init(struct cmdq *cmdq) in cmdq_init() argument 227 struct cmdq *cmdq = task->cmdq; in cmdq_task_handle_error() local 294 struct cmdq *cmdq = dev; in cmdq_irq_handler() local 315 struct cmdq *cmdq = dev_get_drvdata(dev); in cmdq_suspend() local 343 struct cmdq *cmdq = dev_get_drvdata(dev); in cmdq_resume() local 356 struct cmdq *cmdq = platform_get_drvdata(pdev); in cmdq_remove() local 380 task->cmdq = cmdq; in cmdq_mbox_send_data() 545 struct cmdq *cmdq; in cmdq_probe() local [all …]
|
/openbmc/linux/drivers/crypto/cavium/nitrox/ |
H A D | nitrox_lib.c | 29 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; in nitrox_cmdq_init() 30 cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, in nitrox_cmdq_init() 36 cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes); in nitrox_cmdq_init() 37 cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma); in nitrox_cmdq_init() 64 if (!cmdq) in nitrox_cmdq_cleanup() 74 cmdq->unalign_base, cmdq->unalign_dma); in nitrox_cmdq_cleanup() 82 cmdq->dma = 0; in nitrox_cmdq_cleanup() 83 cmdq->qsize = 0; in nitrox_cmdq_cleanup() 106 cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node); in nitrox_alloc_aqm_queues() 107 if (!cmdq) { in nitrox_alloc_aqm_queues() [all …]
|
H A D | nitrox_reqmgr.c | 296 idx = cmdq->write_idx; in post_se_instr() 298 ent = cmdq->base + (idx * cmdq->instr_size); in post_se_instr() 302 response_list_add(sr, cmdq); in post_se_instr() 342 post_se_instr(sr, cmdq); in post_backlog_cmds() 351 struct nitrox_cmdq *cmdq = sr->cmdq; in nitrox_enqueue_request() local 355 post_backlog_cmds(cmdq); in nitrox_enqueue_request() 364 backlog_list_add(sr, cmdq); in nitrox_enqueue_request() 367 post_se_instr(sr, cmdq); in nitrox_enqueue_request() 507 struct nitrox_cmdq *cmdq; in backlog_qflush_work() local 510 post_backlog_cmds(cmdq); in backlog_qflush_work() [all …]
|
H A D | nitrox_isr.c | 32 struct nitrox_cmdq *cmdq = qvec->cmdq; in nps_pkt_slc_isr() local 34 slc_cnts.value = readq(cmdq->compl_cnt_csr_addr); in nps_pkt_slc_isr() 337 qvec->cmdq = &ndev->pkt_inq[qvec->ring]; in nitrox_register_interrupts()
|
/openbmc/linux/drivers/net/ethernet/brocade/bna/ |
H A D | bfa_msgq.c | 56 cmdq->flags = 0; in cmdq_sm_stopped_entry() 57 cmdq->token = 0; in cmdq_sm_stopped_entry() 58 cmdq->offset = 0; in cmdq_sm_stopped_entry() 195 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb, in bfa_msgq_cmdq_dbell() 218 BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth); in __cmd_copy() 268 cmdq->token = 0; in bfa_msgq_cmdq_copy_req() 290 cmdq->token++; in bfa_msgq_cmdq_copy_rsp() 294 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb, in bfa_msgq_cmdq_copy_rsp() 305 cmdq->msgq = msgq; in bfa_msgq_cmdq_attach() 515 bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa); in bfa_msgq_init() [all …]
|
/openbmc/linux/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_hw_cmdq.c | 78 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ argument 364 cmdq->wrapped = !cmdq->wrapped; in cmdq_sync_cmd_direct_resp() 443 cmdq->wrapped = !cmdq->wrapped; in cmdq_set_arm_bit() 510 struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC]; in hinic_set_arm_bit() local 643 struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; in cmdq_ceq_handler() local 743 cmdq->wq = wq; in init_cmdq() 745 cmdq->wrapped = 1; in init_cmdq() 749 cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth)); in init_cmdq() 750 if (!cmdq->done) in init_cmdq() 753 cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode), in init_cmdq() [all …]
|
H A D | hinic_hw_io.c | 533 enum hinic_cmdq_type cmdq, type; in hinic_io_init() local 565 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { in hinic_io_init() 573 func_to_io->cmdq_db_area[cmdq] = db_area; in hinic_io_init() 600 for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) in hinic_io_init() 619 enum hinic_cmdq_type cmdq; in hinic_io_free() local 628 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) in hinic_io_free() 629 return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]); in hinic_io_free()
|
/openbmc/linux/drivers/accel/ivpu/ |
H A D | ivpu_job.c | 42 cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL); in ivpu_cmdq_alloc() 43 if (!cmdq) in ivpu_cmdq_alloc() 54 cmdq->jobq = (struct vpu_job_queue *)cmdq->mem->kvaddr; in ivpu_cmdq_alloc() 70 if (!cmdq) in ivpu_cmdq_free() 80 struct ivpu_cmdq *cmdq = file_priv->cmdq[engine]; in ivpu_cmdq_acquire() local 87 if (!cmdq) in ivpu_cmdq_acquire() 89 file_priv->cmdq[engine] = cmdq; in ivpu_cmdq_acquire() 96 cmdq->mem->vpu_addr, cmdq->mem->base.size); in ivpu_cmdq_acquire() 107 struct ivpu_cmdq *cmdq = file_priv->cmdq[engine]; in ivpu_cmdq_release_locked() local 111 if (cmdq) { in ivpu_cmdq_release_locked() [all …]
|
H A D | ivpu_mmu.c | 315 struct ivpu_mmu_queue *q = &mmu->cmdq; in ivpu_mmu_cmdq_alloc() 408 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_wait_for_cons() local 410 return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons), in ivpu_mmu_cmdq_wait_for_cons() 416 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_cmd_write() 436 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_sync() 488 memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset() 489 clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset() 490 mmu->cmdq.prod = 0; in ivpu_mmu_reset() 491 mmu->cmdq.cons = 0; in ivpu_mmu_reset() 512 REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q); in ivpu_mmu_reset()
|
/openbmc/linux/drivers/infiniband/hw/bnxt_re/ |
H A D | qplib_rcfw.c | 118 cmdq = &rcfw->cmdq; in bnxt_re_is_fw_stalled() 151 cmdq = &rcfw->cmdq; in __wait_for_resp() 194 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; in __block_for_resp() local 238 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; in __send_message_no_waiter() local 297 cmdq = &rcfw->cmdq; in __send_message() 390 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; in __poll_for_resp() local 424 cmdq = &rcfw->cmdq; in __send_message_basic_sanity() 939 cmdq = &rcfw->cmdq; in bnxt_qplib_alloc_rcfw_channel() 1020 cmdq = &rcfw->cmdq; in bnxt_qplib_disable_rcfw_channel() 1147 cmdq = &rcfw->cmdq; in bnxt_qplib_start_rcfw() [all …]
|
/openbmc/linux/drivers/net/ethernet/hisilicon/hns3/hns3_common/ |
H A D | hclge_comm_cmd.c | 538 spin_lock_bh(&cmdq->csq.lock); in hclge_comm_cmd_uninit() 539 spin_lock(&cmdq->crq.lock); in hclge_comm_cmd_uninit() 541 spin_unlock(&cmdq->crq.lock); in hclge_comm_cmd_uninit() 557 cmdq->csq.pdev = pdev; in hclge_comm_cmd_queue_init() 558 cmdq->crq.pdev = pdev; in hclge_comm_cmd_queue_init() 594 spin_lock(&cmdq->crq.lock); in hclge_comm_cmd_init() 596 cmdq->csq.next_to_clean = 0; in hclge_comm_cmd_init() 597 cmdq->csq.next_to_use = 0; in hclge_comm_cmd_init() 598 cmdq->crq.next_to_clean = 0; in hclge_comm_cmd_init() 599 cmdq->crq.next_to_use = 0; in hclge_comm_cmd_init() [all …]
|
/openbmc/linux/drivers/gpu/drm/nouveau/nvkm/engine/sec2/ |
H A D | base.c | 45 struct nvkm_falcon_cmdq *cmdq = sec2->cmdq; in nvkm_sec2_fini() local 56 ret = nvkm_falcon_cmdq_send(cmdq, &cmd, nvkm_sec2_finimsg, sec2, in nvkm_sec2_fini() 68 nvkm_falcon_cmdq_fini(cmdq); in nvkm_sec2_fini() 119 nvkm_falcon_cmdq_del(&sec2->cmdq); in nvkm_sec2_dtor() 159 (ret = nvkm_falcon_cmdq_new(sec2->qmgr, "cmdq", &sec2->cmdq)) || in nvkm_sec2_new_()
|
H A D | ga102.c | 49 nvkm_falcon_cmdq_init(sec2->cmdq, msg.queue_info[i].index, in ga102_sec2_initmsg() 103 return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr, in ga102_sec2_acr_bootstrap_falcon() 136 .cmdq = { 0xc00, 0xc04, 8 },
|
H A D | gp102.c | 71 return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr, in gp102_sec2_acr_bootstrap_falcon() 142 nvkm_falcon_cmdq_init(sec2->cmdq, in gp102_sec2_initmsg() 214 .cmdq = { 0xa00, 0xa04, 8 },
|
/openbmc/linux/drivers/iommu/arm/arm-smmu-v3/ |
H A D | arm-smmu-v3.c | 350 return &smmu->cmdq; in arm_smmu_get_cmdq() 469 if (atomic_read(&cmdq->lock) == 1) in arm_smmu_cmdq_shared_tryunlock() 472 arm_smmu_cmdq_shared_unlock(cmdq); in arm_smmu_cmdq_shared_tryunlock() 552 ptr = &cmdq->valid_map[swidx]; in __arm_smmu_cmdq_poll_set_valid_map() 606 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_poll_until_not_full() 800 arm_smmu_cmdq_shared_lock(cmdq); in arm_smmu_cmdq_issue_cmdlist() 814 &cmdq->q.llq.atomic.prod); in arm_smmu_cmdq_issue_cmdlist() 2935 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in arm_smmu_cmdq_init() local 2938 atomic_set(&cmdq->owner_prod, 0); in arm_smmu_cmdq_init() 2939 atomic_set(&cmdq->lock, 0); in arm_smmu_cmdq_init() [all …]
|
/openbmc/qemu/hw/arm/ |
H A D | smmuv3.c | 300 s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS); in smmuv3_init_regs() 301 s->cmdq.prod = 0; in smmuv3_init_regs() 302 s->cmdq.cons = 0; in smmuv3_init_regs() 1298 SMMUQueue *q = &s->cmdq; in smmuv3_cmdq_consume() 1536 s->cmdq.base = data; in smmu_writell() 1537 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); in smmu_writell() 1623 s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data); in smmu_writel() 1624 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); in smmu_writel() 1630 s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data); in smmu_writel() 1633 s->cmdq.prod = data; in smmu_writel() [all …]
|
/openbmc/linux/drivers/atm/ |
H A D | fore200e.c | 557 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_pca_prom_read() 562 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); in fore200e_pca_prom_read() 1226 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_activate_vcin() 1670 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_getstats() 1716 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1756 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_set_oc3() 2240 &cmdq->status, in fore200e_init_cmd_queue() 2253 cmdq->host_entry[ i ].status = in fore200e_init_cmd_queue() 2255 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ]; in fore200e_init_cmd_queue() 2257 *cmdq->host_entry[ i ].status = STATUS_FREE; in fore200e_init_cmd_queue() [all …]
|
/openbmc/linux/drivers/media/platform/mediatek/mdp3/ |
H A D | Makefile | 4 mtk-mdp3-y += mtk-mdp3-comp.o mtk-mdp3-cmdq.o
|
/openbmc/linux/drivers/soc/mediatek/ |
H A D | Makefile | 2 obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o
|
/openbmc/linux/Documentation/devicetree/bindings/iommu/ |
H A D | arm,smmu-v3.yaml | 45 - cmdq-sync # CMD_SYNC complete 91 interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
|
/openbmc/linux/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/ |
H A D | gp102.c | 38 .cmdq = { 0x4a0, 0x4b0, 4 },
|
/openbmc/linux/drivers/gpu/drm/nouveau/include/nvkm/engine/ |
H A D | sec2.h | 17 struct nvkm_falcon_cmdq *cmdq; member
|