12af541bfSCheng Xu // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
22af541bfSCheng Xu
32af541bfSCheng Xu /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
42af541bfSCheng Xu /* Kai Shen <kaishen@linux.alibaba.com> */
52af541bfSCheng Xu /* Copyright (c) 2020-2022, Alibaba Group. */
62af541bfSCheng Xu
72af541bfSCheng Xu #include "erdma.h"
82af541bfSCheng Xu
arm_cmdq_cq(struct erdma_cmdq * cmdq)92af541bfSCheng Xu static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
102af541bfSCheng Xu {
112af541bfSCheng Xu struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
122af541bfSCheng Xu u64 db_data = FIELD_PREP(ERDMA_CQDB_CI_MASK, cmdq->cq.ci) |
132af541bfSCheng Xu FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
142af541bfSCheng Xu FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) |
152af541bfSCheng Xu FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn);
162af541bfSCheng Xu
172af541bfSCheng Xu *cmdq->cq.db_record = db_data;
182af541bfSCheng Xu writeq(db_data, dev->func_bar + ERDMA_CMDQ_CQDB_REG);
192af541bfSCheng Xu
202af541bfSCheng Xu atomic64_inc(&cmdq->cq.armed_num);
212af541bfSCheng Xu }
222af541bfSCheng Xu
kick_cmdq_db(struct erdma_cmdq * cmdq)232af541bfSCheng Xu static void kick_cmdq_db(struct erdma_cmdq *cmdq)
242af541bfSCheng Xu {
252af541bfSCheng Xu struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
262af541bfSCheng Xu u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi);
272af541bfSCheng Xu
282af541bfSCheng Xu *cmdq->sq.db_record = db_data;
292af541bfSCheng Xu writeq(db_data, dev->func_bar + ERDMA_CMDQ_SQDB_REG);
302af541bfSCheng Xu }
312af541bfSCheng Xu
get_comp_wait(struct erdma_cmdq * cmdq)322af541bfSCheng Xu static struct erdma_comp_wait *get_comp_wait(struct erdma_cmdq *cmdq)
332af541bfSCheng Xu {
342af541bfSCheng Xu int comp_idx;
352af541bfSCheng Xu
362af541bfSCheng Xu spin_lock(&cmdq->lock);
372af541bfSCheng Xu comp_idx = find_first_zero_bit(cmdq->comp_wait_bitmap,
382af541bfSCheng Xu cmdq->max_outstandings);
392af541bfSCheng Xu if (comp_idx == cmdq->max_outstandings) {
402af541bfSCheng Xu spin_unlock(&cmdq->lock);
412af541bfSCheng Xu return ERR_PTR(-ENOMEM);
422af541bfSCheng Xu }
432af541bfSCheng Xu
442af541bfSCheng Xu __set_bit(comp_idx, cmdq->comp_wait_bitmap);
452af541bfSCheng Xu spin_unlock(&cmdq->lock);
462af541bfSCheng Xu
472af541bfSCheng Xu return &cmdq->wait_pool[comp_idx];
482af541bfSCheng Xu }
492af541bfSCheng Xu
put_comp_wait(struct erdma_cmdq * cmdq,struct erdma_comp_wait * comp_wait)502af541bfSCheng Xu static void put_comp_wait(struct erdma_cmdq *cmdq,
512af541bfSCheng Xu struct erdma_comp_wait *comp_wait)
522af541bfSCheng Xu {
532af541bfSCheng Xu int used;
542af541bfSCheng Xu
552af541bfSCheng Xu cmdq->wait_pool[comp_wait->ctx_id].cmd_status = ERDMA_CMD_STATUS_INIT;
562af541bfSCheng Xu spin_lock(&cmdq->lock);
572af541bfSCheng Xu used = __test_and_clear_bit(comp_wait->ctx_id, cmdq->comp_wait_bitmap);
582af541bfSCheng Xu spin_unlock(&cmdq->lock);
592af541bfSCheng Xu
602af541bfSCheng Xu WARN_ON(!used);
612af541bfSCheng Xu }
622af541bfSCheng Xu
erdma_cmdq_wait_res_init(struct erdma_dev * dev,struct erdma_cmdq * cmdq)632af541bfSCheng Xu static int erdma_cmdq_wait_res_init(struct erdma_dev *dev,
642af541bfSCheng Xu struct erdma_cmdq *cmdq)
652af541bfSCheng Xu {
662af541bfSCheng Xu int i;
672af541bfSCheng Xu
682af541bfSCheng Xu cmdq->wait_pool =
692af541bfSCheng Xu devm_kcalloc(&dev->pdev->dev, cmdq->max_outstandings,
702af541bfSCheng Xu sizeof(struct erdma_comp_wait), GFP_KERNEL);
712af541bfSCheng Xu if (!cmdq->wait_pool)
722af541bfSCheng Xu return -ENOMEM;
732af541bfSCheng Xu
742af541bfSCheng Xu spin_lock_init(&cmdq->lock);
752af541bfSCheng Xu cmdq->comp_wait_bitmap = devm_bitmap_zalloc(
762af541bfSCheng Xu &dev->pdev->dev, cmdq->max_outstandings, GFP_KERNEL);
772af541bfSCheng Xu if (!cmdq->comp_wait_bitmap)
782af541bfSCheng Xu return -ENOMEM;
792af541bfSCheng Xu
802af541bfSCheng Xu for (i = 0; i < cmdq->max_outstandings; i++) {
812af541bfSCheng Xu init_completion(&cmdq->wait_pool[i].wait_event);
822af541bfSCheng Xu cmdq->wait_pool[i].ctx_id = i;
832af541bfSCheng Xu }
842af541bfSCheng Xu
852af541bfSCheng Xu return 0;
862af541bfSCheng Xu }
872af541bfSCheng Xu
erdma_cmdq_sq_init(struct erdma_dev * dev)882af541bfSCheng Xu static int erdma_cmdq_sq_init(struct erdma_dev *dev)
892af541bfSCheng Xu {
902af541bfSCheng Xu struct erdma_cmdq *cmdq = &dev->cmdq;
912af541bfSCheng Xu struct erdma_cmdq_sq *sq = &cmdq->sq;
922af541bfSCheng Xu u32 buf_size;
932af541bfSCheng Xu
942af541bfSCheng Xu sq->wqebb_cnt = SQEBB_COUNT(ERDMA_CMDQ_SQE_SIZE);
952af541bfSCheng Xu sq->depth = cmdq->max_outstandings * sq->wqebb_cnt;
962af541bfSCheng Xu
972af541bfSCheng Xu buf_size = sq->depth << SQEBB_SHIFT;
982af541bfSCheng Xu
992af541bfSCheng Xu sq->qbuf =
1002af541bfSCheng Xu dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
1012af541bfSCheng Xu &sq->qbuf_dma_addr, GFP_KERNEL);
1022af541bfSCheng Xu if (!sq->qbuf)
1032af541bfSCheng Xu return -ENOMEM;
1042af541bfSCheng Xu
1052af541bfSCheng Xu sq->db_record = (u64 *)(sq->qbuf + buf_size);
1062af541bfSCheng Xu
1072af541bfSCheng Xu spin_lock_init(&sq->lock);
1082af541bfSCheng Xu
1092af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_H_REG,
1102af541bfSCheng Xu upper_32_bits(sq->qbuf_dma_addr));
1112af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_L_REG,
1122af541bfSCheng Xu lower_32_bits(sq->qbuf_dma_addr));
1132af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_DEPTH_REG, sq->depth);
1142af541bfSCheng Xu erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG,
1152af541bfSCheng Xu sq->qbuf_dma_addr + buf_size);
1162af541bfSCheng Xu
1172af541bfSCheng Xu return 0;
1182af541bfSCheng Xu }
1192af541bfSCheng Xu
erdma_cmdq_cq_init(struct erdma_dev * dev)1202af541bfSCheng Xu static int erdma_cmdq_cq_init(struct erdma_dev *dev)
1212af541bfSCheng Xu {
1222af541bfSCheng Xu struct erdma_cmdq *cmdq = &dev->cmdq;
1232af541bfSCheng Xu struct erdma_cmdq_cq *cq = &cmdq->cq;
1242af541bfSCheng Xu u32 buf_size;
1252af541bfSCheng Xu
1262af541bfSCheng Xu cq->depth = cmdq->sq.depth;
1272af541bfSCheng Xu buf_size = cq->depth << CQE_SHIFT;
1282af541bfSCheng Xu
1292af541bfSCheng Xu cq->qbuf =
1302af541bfSCheng Xu dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
1312af541bfSCheng Xu &cq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
1322af541bfSCheng Xu if (!cq->qbuf)
1332af541bfSCheng Xu return -ENOMEM;
1342af541bfSCheng Xu
1352af541bfSCheng Xu spin_lock_init(&cq->lock);
1362af541bfSCheng Xu
1372af541bfSCheng Xu cq->db_record = (u64 *)(cq->qbuf + buf_size);
1382af541bfSCheng Xu
1392af541bfSCheng Xu atomic64_set(&cq->armed_num, 0);
1402af541bfSCheng Xu
1412af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_H_REG,
1422af541bfSCheng Xu upper_32_bits(cq->qbuf_dma_addr));
1432af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_L_REG,
1442af541bfSCheng Xu lower_32_bits(cq->qbuf_dma_addr));
1452af541bfSCheng Xu erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG,
1462af541bfSCheng Xu cq->qbuf_dma_addr + buf_size);
1472af541bfSCheng Xu
1482af541bfSCheng Xu return 0;
1492af541bfSCheng Xu }
1502af541bfSCheng Xu
erdma_cmdq_eq_init(struct erdma_dev * dev)1512af541bfSCheng Xu static int erdma_cmdq_eq_init(struct erdma_dev *dev)
1522af541bfSCheng Xu {
1532af541bfSCheng Xu struct erdma_cmdq *cmdq = &dev->cmdq;
1542af541bfSCheng Xu struct erdma_eq *eq = &cmdq->eq;
1552af541bfSCheng Xu u32 buf_size;
1562af541bfSCheng Xu
1572af541bfSCheng Xu eq->depth = cmdq->max_outstandings;
1582af541bfSCheng Xu buf_size = eq->depth << EQE_SHIFT;
1592af541bfSCheng Xu
1602af541bfSCheng Xu eq->qbuf =
1612af541bfSCheng Xu dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
1622af541bfSCheng Xu &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
1632af541bfSCheng Xu if (!eq->qbuf)
1642af541bfSCheng Xu return -ENOMEM;
1652af541bfSCheng Xu
1662af541bfSCheng Xu spin_lock_init(&eq->lock);
1672af541bfSCheng Xu atomic64_set(&eq->event_num, 0);
1682af541bfSCheng Xu
16972769dbaSCheng Xu eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG;
1702af541bfSCheng Xu eq->db_record = (u64 *)(eq->qbuf + buf_size);
1712af541bfSCheng Xu
1722af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
1732af541bfSCheng Xu upper_32_bits(eq->qbuf_dma_addr));
1742af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_L_REG,
1752af541bfSCheng Xu lower_32_bits(eq->qbuf_dma_addr));
1762af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_DEPTH_REG, eq->depth);
1772af541bfSCheng Xu erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG,
1782af541bfSCheng Xu eq->qbuf_dma_addr + buf_size);
1792af541bfSCheng Xu
1802af541bfSCheng Xu return 0;
1812af541bfSCheng Xu }
1822af541bfSCheng Xu
erdma_cmdq_init(struct erdma_dev * dev)1832af541bfSCheng Xu int erdma_cmdq_init(struct erdma_dev *dev)
1842af541bfSCheng Xu {
1852af541bfSCheng Xu struct erdma_cmdq *cmdq = &dev->cmdq;
186*901d9d62SCheng Xu int err;
1872af541bfSCheng Xu
1882af541bfSCheng Xu cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING;
1892af541bfSCheng Xu cmdq->use_event = false;
1902af541bfSCheng Xu
1912af541bfSCheng Xu sema_init(&cmdq->credits, cmdq->max_outstandings);
1922af541bfSCheng Xu
1932af541bfSCheng Xu err = erdma_cmdq_wait_res_init(dev, cmdq);
1942af541bfSCheng Xu if (err)
1952af541bfSCheng Xu return err;
1962af541bfSCheng Xu
1972af541bfSCheng Xu err = erdma_cmdq_sq_init(dev);
1982af541bfSCheng Xu if (err)
1992af541bfSCheng Xu return err;
2002af541bfSCheng Xu
2012af541bfSCheng Xu err = erdma_cmdq_cq_init(dev);
2022af541bfSCheng Xu if (err)
2032af541bfSCheng Xu goto err_destroy_sq;
2042af541bfSCheng Xu
2052af541bfSCheng Xu err = erdma_cmdq_eq_init(dev);
2062af541bfSCheng Xu if (err)
2072af541bfSCheng Xu goto err_destroy_cq;
2082af541bfSCheng Xu
2092af541bfSCheng Xu set_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
2102af541bfSCheng Xu
2112af541bfSCheng Xu return 0;
2122af541bfSCheng Xu
2132af541bfSCheng Xu err_destroy_cq:
2142af541bfSCheng Xu dma_free_coherent(&dev->pdev->dev,
2152af541bfSCheng Xu (cmdq->cq.depth << CQE_SHIFT) +
2162af541bfSCheng Xu ERDMA_EXTRA_BUFFER_SIZE,
2172af541bfSCheng Xu cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
2182af541bfSCheng Xu
2192af541bfSCheng Xu err_destroy_sq:
2202af541bfSCheng Xu dma_free_coherent(&dev->pdev->dev,
2212af541bfSCheng Xu (cmdq->sq.depth << SQEBB_SHIFT) +
2222af541bfSCheng Xu ERDMA_EXTRA_BUFFER_SIZE,
2232af541bfSCheng Xu cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
2242af541bfSCheng Xu
2252af541bfSCheng Xu return err;
2262af541bfSCheng Xu }
2272af541bfSCheng Xu
erdma_finish_cmdq_init(struct erdma_dev * dev)2282af541bfSCheng Xu void erdma_finish_cmdq_init(struct erdma_dev *dev)
2292af541bfSCheng Xu {
2302af541bfSCheng Xu /* after device init successfully, change cmdq to event mode. */
2312af541bfSCheng Xu dev->cmdq.use_event = true;
2322af541bfSCheng Xu arm_cmdq_cq(&dev->cmdq);
2332af541bfSCheng Xu }
2342af541bfSCheng Xu
erdma_cmdq_destroy(struct erdma_dev * dev)2352af541bfSCheng Xu void erdma_cmdq_destroy(struct erdma_dev *dev)
2362af541bfSCheng Xu {
2372af541bfSCheng Xu struct erdma_cmdq *cmdq = &dev->cmdq;
2382af541bfSCheng Xu
2392af541bfSCheng Xu clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
2402af541bfSCheng Xu
2412af541bfSCheng Xu dma_free_coherent(&dev->pdev->dev,
2422af541bfSCheng Xu (cmdq->eq.depth << EQE_SHIFT) +
2432af541bfSCheng Xu ERDMA_EXTRA_BUFFER_SIZE,
2442af541bfSCheng Xu cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
2452af541bfSCheng Xu dma_free_coherent(&dev->pdev->dev,
2462af541bfSCheng Xu (cmdq->sq.depth << SQEBB_SHIFT) +
2472af541bfSCheng Xu ERDMA_EXTRA_BUFFER_SIZE,
2482af541bfSCheng Xu cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
2492af541bfSCheng Xu dma_free_coherent(&dev->pdev->dev,
2502af541bfSCheng Xu (cmdq->cq.depth << CQE_SHIFT) +
2512af541bfSCheng Xu ERDMA_EXTRA_BUFFER_SIZE,
2522af541bfSCheng Xu cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
2532af541bfSCheng Xu }
2542af541bfSCheng Xu
get_next_valid_cmdq_cqe(struct erdma_cmdq * cmdq)2552af541bfSCheng Xu static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)
2562af541bfSCheng Xu {
2572af541bfSCheng Xu __be32 *cqe = get_queue_entry(cmdq->cq.qbuf, cmdq->cq.ci,
2582af541bfSCheng Xu cmdq->cq.depth, CQE_SHIFT);
2592af541bfSCheng Xu u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
260de19ec77SCheng Xu be32_to_cpu(READ_ONCE(*cqe)));
2612af541bfSCheng Xu
2622af541bfSCheng Xu return owner ^ !!(cmdq->cq.ci & cmdq->cq.depth) ? cqe : NULL;
2632af541bfSCheng Xu }
2642af541bfSCheng Xu
push_cmdq_sqe(struct erdma_cmdq * cmdq,u64 * req,size_t req_len,struct erdma_comp_wait * comp_wait)2652af541bfSCheng Xu static void push_cmdq_sqe(struct erdma_cmdq *cmdq, u64 *req, size_t req_len,
2662af541bfSCheng Xu struct erdma_comp_wait *comp_wait)
2672af541bfSCheng Xu {
2682af541bfSCheng Xu __le64 *wqe;
2692af541bfSCheng Xu u64 hdr = *req;
2702af541bfSCheng Xu
2712af541bfSCheng Xu comp_wait->cmd_status = ERDMA_CMD_STATUS_ISSUED;
2722af541bfSCheng Xu reinit_completion(&comp_wait->wait_event);
2732af541bfSCheng Xu comp_wait->sq_pi = cmdq->sq.pi;
2742af541bfSCheng Xu
2752af541bfSCheng Xu wqe = get_queue_entry(cmdq->sq.qbuf, cmdq->sq.pi, cmdq->sq.depth,
2762af541bfSCheng Xu SQEBB_SHIFT);
2772af541bfSCheng Xu memcpy(wqe, req, req_len);
2782af541bfSCheng Xu
2792af541bfSCheng Xu cmdq->sq.pi += cmdq->sq.wqebb_cnt;
2802af541bfSCheng Xu hdr |= FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi) |
2812af541bfSCheng Xu FIELD_PREP(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK,
2822af541bfSCheng Xu comp_wait->ctx_id) |
2832af541bfSCheng Xu FIELD_PREP(ERDMA_CMD_HDR_WQEBB_CNT_MASK, cmdq->sq.wqebb_cnt - 1);
2842af541bfSCheng Xu *wqe = cpu_to_le64(hdr);
2852af541bfSCheng Xu
2862af541bfSCheng Xu kick_cmdq_db(cmdq);
2872af541bfSCheng Xu }
2882af541bfSCheng Xu
erdma_poll_single_cmd_completion(struct erdma_cmdq * cmdq)2892af541bfSCheng Xu static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
2902af541bfSCheng Xu {
2912af541bfSCheng Xu struct erdma_comp_wait *comp_wait;
2922af541bfSCheng Xu u32 hdr0, sqe_idx;
2932af541bfSCheng Xu __be32 *cqe;
2942af541bfSCheng Xu u16 ctx_id;
2952af541bfSCheng Xu u64 *sqe;
2962af541bfSCheng Xu
2972af541bfSCheng Xu cqe = get_next_valid_cmdq_cqe(cmdq);
2982af541bfSCheng Xu if (!cqe)
2992af541bfSCheng Xu return -EAGAIN;
3002af541bfSCheng Xu
3012af541bfSCheng Xu cmdq->cq.ci++;
3022af541bfSCheng Xu
3032af541bfSCheng Xu dma_rmb();
304de19ec77SCheng Xu hdr0 = be32_to_cpu(*cqe);
305de19ec77SCheng Xu sqe_idx = be32_to_cpu(*(cqe + 1));
3062af541bfSCheng Xu
3072af541bfSCheng Xu sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth,
3082af541bfSCheng Xu SQEBB_SHIFT);
3092af541bfSCheng Xu ctx_id = FIELD_GET(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK, *sqe);
3102af541bfSCheng Xu comp_wait = &cmdq->wait_pool[ctx_id];
3112af541bfSCheng Xu if (comp_wait->cmd_status != ERDMA_CMD_STATUS_ISSUED)
3122af541bfSCheng Xu return -EIO;
3132af541bfSCheng Xu
3142af541bfSCheng Xu comp_wait->cmd_status = ERDMA_CMD_STATUS_FINISHED;
3152af541bfSCheng Xu comp_wait->comp_status = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, hdr0);
3162af541bfSCheng Xu cmdq->sq.ci += cmdq->sq.wqebb_cnt;
317de19ec77SCheng Xu /* Copy 16B comp data after cqe hdr to outer */
318de19ec77SCheng Xu be32_to_cpu_array(comp_wait->comp_data, cqe + 2, 4);
3192af541bfSCheng Xu
3202af541bfSCheng Xu if (cmdq->use_event)
3212af541bfSCheng Xu complete(&comp_wait->wait_event);
3222af541bfSCheng Xu
3232af541bfSCheng Xu return 0;
3242af541bfSCheng Xu }
3252af541bfSCheng Xu
erdma_polling_cmd_completions(struct erdma_cmdq * cmdq)3262af541bfSCheng Xu static void erdma_polling_cmd_completions(struct erdma_cmdq *cmdq)
3272af541bfSCheng Xu {
3282af541bfSCheng Xu unsigned long flags;
3292af541bfSCheng Xu u16 comp_num;
3302af541bfSCheng Xu
3312af541bfSCheng Xu spin_lock_irqsave(&cmdq->cq.lock, flags);
3322af541bfSCheng Xu
3332af541bfSCheng Xu /* We must have less than # of max_outstandings
3342af541bfSCheng Xu * completions at one time.
3352af541bfSCheng Xu */
3362af541bfSCheng Xu for (comp_num = 0; comp_num < cmdq->max_outstandings; comp_num++)
3372af541bfSCheng Xu if (erdma_poll_single_cmd_completion(cmdq))
3382af541bfSCheng Xu break;
3392af541bfSCheng Xu
3402af541bfSCheng Xu if (comp_num && cmdq->use_event)
3412af541bfSCheng Xu arm_cmdq_cq(cmdq);
3422af541bfSCheng Xu
3432af541bfSCheng Xu spin_unlock_irqrestore(&cmdq->cq.lock, flags);
3442af541bfSCheng Xu }
3452af541bfSCheng Xu
erdma_cmdq_completion_handler(struct erdma_cmdq * cmdq)3462af541bfSCheng Xu void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
3472af541bfSCheng Xu {
3482af541bfSCheng Xu int got_event = 0;
3492af541bfSCheng Xu
3502af541bfSCheng Xu if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state) ||
3512af541bfSCheng Xu !cmdq->use_event)
3522af541bfSCheng Xu return;
3532af541bfSCheng Xu
3542af541bfSCheng Xu while (get_next_valid_eqe(&cmdq->eq)) {
3552af541bfSCheng Xu cmdq->eq.ci++;
3562af541bfSCheng Xu got_event++;
3572af541bfSCheng Xu }
3582af541bfSCheng Xu
3592af541bfSCheng Xu if (got_event) {
3602af541bfSCheng Xu cmdq->cq.cmdsn++;
3612af541bfSCheng Xu erdma_polling_cmd_completions(cmdq);
3622af541bfSCheng Xu }
3632af541bfSCheng Xu
3642af541bfSCheng Xu notify_eq(&cmdq->eq);
3652af541bfSCheng Xu }
3662af541bfSCheng Xu
erdma_poll_cmd_completion(struct erdma_comp_wait * comp_ctx,struct erdma_cmdq * cmdq,u32 timeout)3672af541bfSCheng Xu static int erdma_poll_cmd_completion(struct erdma_comp_wait *comp_ctx,
3682af541bfSCheng Xu struct erdma_cmdq *cmdq, u32 timeout)
3692af541bfSCheng Xu {
3702af541bfSCheng Xu unsigned long comp_timeout = jiffies + msecs_to_jiffies(timeout);
3712af541bfSCheng Xu
3722af541bfSCheng Xu while (1) {
3732af541bfSCheng Xu erdma_polling_cmd_completions(cmdq);
3742af541bfSCheng Xu if (comp_ctx->cmd_status != ERDMA_CMD_STATUS_ISSUED)
3752af541bfSCheng Xu break;
3762af541bfSCheng Xu
3772af541bfSCheng Xu if (time_is_before_jiffies(comp_timeout))
3782af541bfSCheng Xu return -ETIME;
3792af541bfSCheng Xu
3802af541bfSCheng Xu msleep(20);
3812af541bfSCheng Xu }
3822af541bfSCheng Xu
3832af541bfSCheng Xu return 0;
3842af541bfSCheng Xu }
3852af541bfSCheng Xu
erdma_wait_cmd_completion(struct erdma_comp_wait * comp_ctx,struct erdma_cmdq * cmdq,u32 timeout)3862af541bfSCheng Xu static int erdma_wait_cmd_completion(struct erdma_comp_wait *comp_ctx,
3872af541bfSCheng Xu struct erdma_cmdq *cmdq, u32 timeout)
3882af541bfSCheng Xu {
3892af541bfSCheng Xu unsigned long flags = 0;
3902af541bfSCheng Xu
3912af541bfSCheng Xu wait_for_completion_timeout(&comp_ctx->wait_event,
3922af541bfSCheng Xu msecs_to_jiffies(timeout));
3932af541bfSCheng Xu
3942af541bfSCheng Xu if (unlikely(comp_ctx->cmd_status != ERDMA_CMD_STATUS_FINISHED)) {
3952af541bfSCheng Xu spin_lock_irqsave(&cmdq->cq.lock, flags);
3962af541bfSCheng Xu comp_ctx->cmd_status = ERDMA_CMD_STATUS_TIMEOUT;
3972af541bfSCheng Xu spin_unlock_irqrestore(&cmdq->cq.lock, flags);
3982af541bfSCheng Xu return -ETIME;
3992af541bfSCheng Xu }
4002af541bfSCheng Xu
4012af541bfSCheng Xu return 0;
4022af541bfSCheng Xu }
4032af541bfSCheng Xu
erdma_cmdq_build_reqhdr(u64 * hdr,u32 mod,u32 op)4042af541bfSCheng Xu void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
4052af541bfSCheng Xu {
4062af541bfSCheng Xu *hdr = FIELD_PREP(ERDMA_CMD_HDR_SUB_MOD_MASK, mod) |
4072af541bfSCheng Xu FIELD_PREP(ERDMA_CMD_HDR_OPCODE_MASK, op);
4082af541bfSCheng Xu }
4092af541bfSCheng Xu
erdma_post_cmd_wait(struct erdma_cmdq * cmdq,void * req,u32 req_size,u64 * resp0,u64 * resp1)41095f911d9SCheng Xu int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
4112af541bfSCheng Xu u64 *resp0, u64 *resp1)
4122af541bfSCheng Xu {
4132af541bfSCheng Xu struct erdma_comp_wait *comp_wait;
4142af541bfSCheng Xu int ret;
4152af541bfSCheng Xu
4162af541bfSCheng Xu if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
4172af541bfSCheng Xu return -ENODEV;
4182af541bfSCheng Xu
4192af541bfSCheng Xu down(&cmdq->credits);
4202af541bfSCheng Xu
4212af541bfSCheng Xu comp_wait = get_comp_wait(cmdq);
4222af541bfSCheng Xu if (IS_ERR(comp_wait)) {
4232af541bfSCheng Xu clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
4242af541bfSCheng Xu set_bit(ERDMA_CMDQ_STATE_CTX_ERR_BIT, &cmdq->state);
4252af541bfSCheng Xu up(&cmdq->credits);
4262af541bfSCheng Xu return PTR_ERR(comp_wait);
4272af541bfSCheng Xu }
4282af541bfSCheng Xu
4292af541bfSCheng Xu spin_lock(&cmdq->sq.lock);
4302af541bfSCheng Xu push_cmdq_sqe(cmdq, req, req_size, comp_wait);
4312af541bfSCheng Xu spin_unlock(&cmdq->sq.lock);
4322af541bfSCheng Xu
4332af541bfSCheng Xu if (cmdq->use_event)
4342af541bfSCheng Xu ret = erdma_wait_cmd_completion(comp_wait, cmdq,
4352af541bfSCheng Xu ERDMA_CMDQ_TIMEOUT_MS);
4362af541bfSCheng Xu else
4372af541bfSCheng Xu ret = erdma_poll_cmd_completion(comp_wait, cmdq,
4382af541bfSCheng Xu ERDMA_CMDQ_TIMEOUT_MS);
4392af541bfSCheng Xu
4402af541bfSCheng Xu if (ret) {
4412af541bfSCheng Xu set_bit(ERDMA_CMDQ_STATE_TIMEOUT_BIT, &cmdq->state);
4422af541bfSCheng Xu clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
4432af541bfSCheng Xu goto out;
4442af541bfSCheng Xu }
4452af541bfSCheng Xu
4462af541bfSCheng Xu if (comp_wait->comp_status)
4472af541bfSCheng Xu ret = -EIO;
4482af541bfSCheng Xu
4492af541bfSCheng Xu if (resp0 && resp1) {
4502af541bfSCheng Xu *resp0 = *((u64 *)&comp_wait->comp_data[0]);
4512af541bfSCheng Xu *resp1 = *((u64 *)&comp_wait->comp_data[2]);
4522af541bfSCheng Xu }
4532af541bfSCheng Xu put_comp_wait(cmdq, comp_wait);
4542af541bfSCheng Xu
4552af541bfSCheng Xu out:
4562af541bfSCheng Xu up(&cmdq->credits);
4572af541bfSCheng Xu
4582af541bfSCheng Xu return ret;
4592af541bfSCheng Xu }
460