Lines Matching +full:sub +full:- +full:mailboxes
1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/dma-mapping.h>
49 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
59 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
61 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
67 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
70 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
240 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
254 ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
257 (qc)->head = 0; \
258 (qc)->tail = 0; \
259 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
260 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
261 (qc)->dw3 = 0; \
262 (qc)->w8 = 0; \
263 (qc)->rsvd0 = 0; \
264 (qc)->pasid = cpu_to_le16(pasid); \
265 (qc)->w11 = 0; \
266 (qc)->rsvd1 = 0; \
370 * struct qm_hw_err - Structure describing the device errors
456 enum qm_state curr = atomic_read(&qm->status.flags); in qm_avail_state()
476 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n", in qm_avail_state()
480 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n", in qm_avail_state()
489 enum qm_state qm_curr = atomic_read(&qm->status.flags); in qm_qp_avail_state()
494 qp_curr = atomic_read(&qp->qp_status.flags); in qm_qp_avail_state()
522 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n", in qm_qp_avail_state()
526 dev_warn(&qm->pdev->dev, in qm_qp_avail_state()
535 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); in qm_get_hw_error_status()
540 return qm->err_ini->get_dev_hw_err_status(qm); in qm_get_dev_err_status()
548 if (qm->fun_type == QM_HW_VF) in qm_check_dev_error()
551 val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; in qm_check_dev_error()
552 dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; in qm_check_dev_error()
562 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_wait_reset_finish()
565 return -EBUSY; in qm_wait_reset_finish()
573 struct pci_dev *pdev = qm->pdev; in qm_reset_prepare_ready()
580 if (qm->ver < QM_HW_V3) in qm_reset_prepare_ready()
588 struct pci_dev *pdev = qm->pdev; in qm_reset_bit_clear()
591 if (qm->ver < QM_HW_V3) in qm_reset_bit_clear()
592 clear_bit(QM_RESETTING, &pf_qm->misc_ctl); in qm_reset_bit_clear()
594 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_reset_bit_clear()
600 mailbox->w0 = cpu_to_le16((cmd) | in qm_mb_pre_init()
603 mailbox->queue_num = cpu_to_le16(queue); in qm_mb_pre_init()
604 mailbox->base_l = cpu_to_le32(lower_32_bits(base)); in qm_mb_pre_init()
605 mailbox->base_h = cpu_to_le32(upper_32_bits(base)); in qm_mb_pre_init()
606 mailbox->rsvd = 0; in qm_mb_pre_init()
609 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
614 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, in hisi_qm_wait_mb_ready()
623 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; in qm_mb_write()
653 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); in qm_mb_nolock()
654 ret = -EBUSY; in qm_mb_nolock()
661 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); in qm_mb_nolock()
662 ret = -ETIMEDOUT; in qm_mb_nolock()
666 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); in qm_mb_nolock()
668 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); in qm_mb_nolock()
669 ret = -EIO; in qm_mb_nolock()
676 atomic64_inc(&qm->debug.dfx.mb_err_cnt); in qm_mb_nolock()
686 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", in hisi_qm_mb()
691 mutex_lock(&qm->mailbox_lock); in hisi_qm_mb()
693 mutex_unlock(&qm->mailbox_lock); in hisi_qm_mb()
707 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); in qm_db_v1()
712 void __iomem *io_base = qm->io_base; in qm_db_v2()
717 io_base = qm->db_io_base + (u64)qn * qm->db_interval + in qm_db_v2()
732 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", in qm_db()
735 qm->ops->qm_db(qm, qn, cmd, index, priority); in qm_db()
743 if (qm->ver < QM_HW_V3) in qm_disable_clock_gate()
746 val = readl(qm->io_base + QM_PM_CTRL); in qm_disable_clock_gate()
748 writel(val, qm->io_base + QM_PM_CTRL); in qm_disable_clock_gate()
755 writel(0x1, qm->io_base + QM_MEM_START_INIT); in qm_dev_mem_reset()
756 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, in qm_dev_mem_reset()
762 * hisi_qm_get_hw_info() - Get device information.
776 switch (qm->ver) { in hisi_qm_get_hw_info()
785 val = readl(qm->io_base + info_table[index].offset); in hisi_qm_get_hw_info()
796 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); in qm_get_xqc_depth()
804 struct device *dev = &qm->pdev->dev; in hisi_qm_set_algs()
808 if (!qm->uacce) in hisi_qm_set_algs()
814 return -EINVAL; in hisi_qm_set_algs()
819 return -ENOMEM; in hisi_qm_set_algs()
828 qm->uacce->algs = algs; in hisi_qm_set_algs()
837 if (qm->fun_type == QM_HW_PF) in qm_get_irq_num()
838 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); in qm_get_irq_num()
840 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); in qm_get_irq_num()
845 struct device *dev = &qm->pdev->dev; in qm_pm_get_sync()
848 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in qm_pm_get_sync()
862 struct device *dev = &qm->pdev->dev; in qm_pm_put_sync()
864 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in qm_pm_put_sync()
873 if (qp->qp_status.cq_head == qp->cq_depth - 1) { in qm_cq_head_update()
874 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; in qm_cq_head_update()
875 qp->qp_status.cq_head = 0; in qm_cq_head_update()
877 qp->qp_status.cq_head++; in qm_cq_head_update()
883 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_req_cb()
884 struct hisi_qm *qm = qp->qm; in qm_poll_req_cb()
886 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { in qm_poll_req_cb()
888 qp->req_cb(qp, qp->sqe + qm->sqe_size * in qm_poll_req_cb()
889 le16_to_cpu(cqe->sq_head)); in qm_poll_req_cb()
891 cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_req_cb()
892 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, in qm_poll_req_cb()
893 qp->qp_status.cq_head, 0); in qm_poll_req_cb()
894 atomic_dec(&qp->qp_status.used); in qm_poll_req_cb()
900 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); in qm_poll_req_cb()
907 struct hisi_qm *qm = poll_data->qm; in qm_work_process()
908 u16 eqe_num = poll_data->eqe_num; in qm_work_process()
912 for (i = eqe_num - 1; i >= 0; i--) { in qm_work_process()
913 qp = &qm->qp_array[poll_data->qp_finish_id[i]]; in qm_work_process()
914 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) in qm_work_process()
917 if (qp->event_cb) { in qm_work_process()
918 qp->event_cb(qp); in qm_work_process()
922 if (likely(qp->req_cb)) in qm_work_process()
929 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; in qm_get_complete_eqe_num()
931 u16 eq_depth = qm->eq_depth; in qm_get_complete_eqe_num()
934 if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) { in qm_get_complete_eqe_num()
935 atomic64_inc(&qm->debug.dfx.err_irq_cnt); in qm_get_complete_eqe_num()
936 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_get_complete_eqe_num()
940 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; in qm_get_complete_eqe_num()
941 if (unlikely(cqn >= qm->qp_num)) in qm_get_complete_eqe_num()
943 poll_data = &qm->poll_data[cqn]; in qm_get_complete_eqe_num()
945 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { in qm_get_complete_eqe_num()
946 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; in qm_get_complete_eqe_num()
947 poll_data->qp_finish_id[eqe_num] = cqn; in qm_get_complete_eqe_num()
950 if (qm->status.eq_head == eq_depth - 1) { in qm_get_complete_eqe_num()
951 qm->status.eqc_phase = !qm->status.eqc_phase; in qm_get_complete_eqe_num()
952 eqe = qm->eqe; in qm_get_complete_eqe_num()
953 qm->status.eq_head = 0; in qm_get_complete_eqe_num()
956 qm->status.eq_head++; in qm_get_complete_eqe_num()
959 if (eqe_num == (eq_depth >> 1) - 1) in qm_get_complete_eqe_num()
963 poll_data->eqe_num = eqe_num; in qm_get_complete_eqe_num()
964 queue_work(qm->wq, &poll_data->work); in qm_get_complete_eqe_num()
965 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_get_complete_eqe_num()
972 /* Get qp id of completed tasks and re-enable the interrupt */ in qm_eq_irq()
983 val = readl(qm->io_base + QM_IFC_INT_STATUS); in qm_mb_cmd_irq()
988 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) { in qm_mb_cmd_irq()
989 dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n"); in qm_mb_cmd_irq()
993 schedule_work(&qm->cmd_process); in qm_mb_cmd_irq()
1002 if (qp->is_in_kernel) in qm_set_qp_disable()
1005 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset; in qm_set_qp_disable()
1014 struct hisi_qp *qp = &qm->qp_array[qp_id]; in qm_disable_qp()
1023 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in qm_reset_function()
1024 struct device *dev = &qm->pdev->dev; in qm_reset_function()
1053 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; in qm_aeq_thread()
1054 u16 aeq_depth = qm->aeq_depth; in qm_aeq_thread()
1057 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); in qm_aeq_thread()
1059 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { in qm_aeq_thread()
1060 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT; in qm_aeq_thread()
1061 qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; in qm_aeq_thread()
1065 dev_err(&qm->pdev->dev, "eq overflow, reset function\n"); in qm_aeq_thread()
1069 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n", in qm_aeq_thread()
1076 dev_err(&qm->pdev->dev, "unknown error type %u\n", in qm_aeq_thread()
1081 if (qm->status.aeq_head == aeq_depth - 1) { in qm_aeq_thread()
1082 qm->status.aeqc_phase = !qm->status.aeqc_phase; in qm_aeq_thread()
1083 aeqe = qm->aeqe; in qm_aeq_thread()
1084 qm->status.aeq_head = 0; in qm_aeq_thread()
1087 qm->status.aeq_head++; in qm_aeq_thread()
1091 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_aeq_thread()
1098 struct hisi_qp_status *qp_status = &qp->qp_status; in qm_init_qp_status()
1100 qp_status->sq_tail = 0; in qm_init_qp_status()
1101 qp_status->cq_head = 0; in qm_init_qp_status()
1102 qp_status->cqc_phase = true; in qm_init_qp_status()
1103 atomic_set(&qp_status->used, 0); in qm_init_qp_status()
1108 struct device *dev = &qm->pdev->dev; in qm_init_prefetch()
1111 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) in qm_init_prefetch()
1129 writel(page_type, qm->io_base + QM_PAGE_SIZE); in qm_init_prefetch()
1139 * IR(Mbps) = -------------------------
1179 factor->cbs_s = acc_shaper_calc_cbs_s(ir); in qm_get_shaper_para()
1186 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; in qm_get_shaper_para()
1188 factor->cir_b = cir_b; in qm_get_shaper_para()
1189 factor->cir_u = cir_u; in qm_get_shaper_para()
1190 factor->cir_s = cir_s; in qm_get_shaper_para()
1196 return -EINVAL; in qm_get_shaper_para()
1207 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
1216 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; in qm_vft_data_cfg()
1220 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
1231 tmp = factor->cir_b | in qm_vft_data_cfg()
1232 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | in qm_vft_data_cfg()
1233 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | in qm_vft_data_cfg()
1235 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); in qm_vft_data_cfg()
1241 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); in qm_vft_data_cfg()
1242 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); in qm_vft_data_cfg()
1252 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in qm_set_vft_common()
1253 factor = &qm->factor[fun_num]; in qm_set_vft_common()
1255 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
1261 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); in qm_set_vft_common()
1262 writel(type, qm->io_base + QM_VFT_CFG_TYPE); in qm_set_vft_common()
1266 writel(fun_num, qm->io_base + QM_VFT_CFG); in qm_set_vft_common()
1270 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_set_vft_common()
1271 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_set_vft_common()
1273 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
1280 u32 qos = qm->factor[fun_num].func_qos; in qm_shaper_init_vft()
1283 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); in qm_shaper_init_vft()
1285 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); in qm_shaper_init_vft()
1288 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); in qm_shaper_init_vft()
1312 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { in qm_set_sqc_cqc_vft()
1335 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_vft_v2()
1336 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_vft_v2()
1347 struct device *dev = &qm->pdev->dev; in hisi_qm_ctx_alloc()
1352 return ERR_PTR(-ENOMEM); in hisi_qm_ctx_alloc()
1358 return ERR_PTR(-ENOMEM); in hisi_qm_ctx_alloc()
1367 struct device *dev = &qm->pdev->dev; in hisi_qm_ctx_free()
1385 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v1()
1390 struct hisi_qm_err_info *err_info = &qm->err_info; in qm_hw_error_cfg()
1392 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; in qm_hw_error_cfg()
1394 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_hw_error_cfg()
1397 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); in qm_hw_error_cfg()
1398 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); in qm_hw_error_cfg()
1399 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_cfg()
1400 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); in qm_hw_error_cfg()
1409 irq_unmask = ~qm->error_mask; in qm_hw_error_init_v2()
1410 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1411 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1416 u32 irq_mask = qm->error_mask; in qm_hw_error_uninit_v2()
1418 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v2()
1419 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v2()
1429 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); in qm_hw_error_init_v3()
1431 irq_unmask = ~qm->error_mask; in qm_hw_error_init_v3()
1432 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v3()
1433 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v3()
1438 u32 irq_mask = qm->error_mask; in qm_hw_error_uninit_v3()
1440 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v3()
1441 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v3()
1444 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); in qm_hw_error_uninit_v3()
1450 struct device *dev = &qm->pdev->dev; in qm_log_hw_error()
1456 if (!(err->int_msk & error_status)) in qm_log_hw_error()
1460 err->msg, err->int_msk); in qm_log_hw_error()
1462 if (err->int_msk & QM_DB_TIMEOUT) { in qm_log_hw_error()
1463 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); in qm_log_hw_error()
1469 } else if (err->int_msk & QM_OF_FIFO_OF) { in qm_log_hw_error()
1470 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); in qm_log_hw_error()
1489 if (error_status & qm->error_mask) { in qm_hw_error_handle_v2()
1491 qm->err_status.is_qm_ecc_mbit = true; in qm_hw_error_handle_v2()
1494 if (error_status & qm->err_info.qm_reset_mask) { in qm_hw_error_handle_v2()
1496 writel(qm->err_info.nfe & (~error_status), in qm_hw_error_handle_v2()
1497 qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_handle_v2()
1502 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_hw_error_handle_v2()
1503 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_handle_v2()
1504 writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE); in qm_hw_error_handle_v2()
1516 mutex_lock(&qm->mailbox_lock); in qm_get_mb_cmd()
1521 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_mb_cmd()
1522 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_mb_cmd()
1525 mutex_unlock(&qm->mailbox_lock); in qm_get_mb_cmd()
1533 if (qm->fun_type == QM_HW_PF) in qm_clear_cmd_interrupt()
1534 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); in qm_clear_cmd_interrupt()
1536 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); in qm_clear_cmd_interrupt()
1538 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); in qm_clear_cmd_interrupt()
1543 struct device *dev = &qm->pdev->dev; in qm_handle_vf_msg()
1573 struct device *dev = &qm->pdev->dev; in qm_wait_vf_prepare_finish()
1574 u32 vfs_num = qm->vfs_num; in qm_wait_vf_prepare_finish()
1580 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_wait_vf_prepare_finish()
1584 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); in qm_wait_vf_prepare_finish()
1590 ret = -EBUSY; in qm_wait_vf_prepare_finish()
1615 val = readl(qm->io_base + QM_IFC_INT_CFG); in qm_trigger_vf_interrupt()
1618 writel(val, qm->io_base + QM_IFC_INT_CFG); in qm_trigger_vf_interrupt()
1620 val = readl(qm->io_base + QM_IFC_INT_SET_P); in qm_trigger_vf_interrupt()
1622 writel(val, qm->io_base + QM_IFC_INT_SET_P); in qm_trigger_vf_interrupt()
1629 val = readl(qm->io_base + QM_IFC_INT_SET_V); in qm_trigger_pf_interrupt()
1631 writel(val, qm->io_base + QM_IFC_INT_SET_V); in qm_trigger_pf_interrupt()
1636 struct device *dev = &qm->pdev->dev; in qm_ping_single_vf()
1643 mutex_lock(&qm->mailbox_lock); in qm_ping_single_vf()
1653 val = readq(qm->io_base + QM_IFC_READY_STATUS); in qm_ping_single_vf()
1660 ret = -ETIMEDOUT; in qm_ping_single_vf()
1666 mutex_unlock(&qm->mailbox_lock); in qm_ping_single_vf()
1672 struct device *dev = &qm->pdev->dev; in qm_ping_all_vfs()
1673 u32 vfs_num = qm->vfs_num; in qm_ping_all_vfs()
1681 mutex_lock(&qm->mailbox_lock); in qm_ping_all_vfs()
1686 mutex_unlock(&qm->mailbox_lock); in qm_ping_all_vfs()
1693 val = readq(qm->io_base + QM_IFC_READY_STATUS); in qm_ping_all_vfs()
1696 mutex_unlock(&qm->mailbox_lock); in qm_ping_all_vfs()
1704 mutex_unlock(&qm->mailbox_lock); in qm_ping_all_vfs()
1712 return -ETIMEDOUT; in qm_ping_all_vfs()
1723 mutex_lock(&qm->mailbox_lock); in qm_ping_pf()
1726 dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); in qm_ping_pf()
1734 val = readl(qm->io_base + QM_IFC_INT_SET_V); in qm_ping_pf()
1739 ret = -ETIMEDOUT; in qm_ping_pf()
1745 mutex_unlock(&qm->mailbox_lock); in qm_ping_pf()
1751 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); in qm_stop_qp()
1756 struct pci_dev *pdev = qm->pdev; in qm_set_msi()
1759 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, in qm_set_msi()
1762 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, in qm_set_msi()
1764 if (qm->err_status.is_qm_ecc_mbit || in qm_set_msi()
1765 qm->err_status.is_dev_ecc_mbit) in qm_set_msi()
1769 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) in qm_set_msi()
1770 return -EFAULT; in qm_set_msi()
1778 struct pci_dev *pdev = qm->pdev; in qm_wait_msi_finish()
1785 pci_read_config_dword(pdev, pdev->msi_cap + in qm_wait_msi_finish()
1798 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, in qm_wait_msi_finish()
1804 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, in qm_wait_msi_finish()
1813 struct pci_dev *pdev = qm->pdev; in qm_set_msi_v3()
1814 int ret = -ETIMEDOUT; in qm_set_msi_v3()
1817 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); in qm_set_msi_v3()
1823 pci_write_config_dword(pdev, pdev->msi_cap, cmd); in qm_set_msi_v3()
1826 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); in qm_set_msi_v3()
1867 struct hisi_qp_status *qp_status = &qp->qp_status; in qm_get_avail_sqe()
1868 u16 sq_tail = qp_status->sq_tail; in qm_get_avail_sqe()
1870 if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) in qm_get_avail_sqe()
1873 return qp->sqe + sq_tail * qp->qm->sqe_size; in qm_get_avail_sqe()
1881 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET; in hisi_qm_unset_hw_reset()
1887 struct device *dev = &qm->pdev->dev; in qm_create_qp_nolock()
1892 return ERR_PTR(-EPERM); in qm_create_qp_nolock()
1894 if (qm->qp_in_used == qm->qp_num) { in qm_create_qp_nolock()
1896 qm->qp_num); in qm_create_qp_nolock()
1897 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
1898 return ERR_PTR(-EBUSY); in qm_create_qp_nolock()
1901 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); in qm_create_qp_nolock()
1904 qm->qp_num); in qm_create_qp_nolock()
1905 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
1906 return ERR_PTR(-EBUSY); in qm_create_qp_nolock()
1909 qp = &qm->qp_array[qp_id]; in qm_create_qp_nolock()
1911 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); in qm_create_qp_nolock()
1913 qp->event_cb = NULL; in qm_create_qp_nolock()
1914 qp->req_cb = NULL; in qm_create_qp_nolock()
1915 qp->qp_id = qp_id; in qm_create_qp_nolock()
1916 qp->alg_type = alg_type; in qm_create_qp_nolock()
1917 qp->is_in_kernel = true; in qm_create_qp_nolock()
1918 qm->qp_in_used++; in qm_create_qp_nolock()
1919 atomic_set(&qp->qp_status.flags, QP_INIT); in qm_create_qp_nolock()
1925 * hisi_qm_create_qp() - Create a queue pair from qm.
1940 down_write(&qm->qps_lock); in hisi_qm_create_qp()
1942 up_write(&qm->qps_lock); in hisi_qm_create_qp()
1951 * hisi_qm_release_qp() - Release a qp back to its qm.
1958 struct hisi_qm *qm = qp->qm; in hisi_qm_release_qp()
1960 down_write(&qm->qps_lock); in hisi_qm_release_qp()
1963 up_write(&qm->qps_lock); in hisi_qm_release_qp()
1967 qm->qp_in_used--; in hisi_qm_release_qp()
1968 idr_remove(&qm->qp_idr, qp->qp_id); in hisi_qm_release_qp()
1970 up_write(&qm->qps_lock); in hisi_qm_release_qp()
1977 struct hisi_qm *qm = qp->qm; in qm_sq_ctx_cfg()
1978 struct device *dev = &qm->pdev->dev; in qm_sq_ctx_cfg()
1979 enum qm_hw_ver ver = qm->ver; in qm_sq_ctx_cfg()
1986 return -ENOMEM; in qm_sq_ctx_cfg()
1988 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); in qm_sq_ctx_cfg()
1990 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); in qm_sq_ctx_cfg()
1991 sqc->w8 = cpu_to_le16(qp->sq_depth - 1); in qm_sq_ctx_cfg()
1993 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); in qm_sq_ctx_cfg()
1994 sqc->w8 = 0; /* rand_qc */ in qm_sq_ctx_cfg()
1996 sqc->cq_num = cpu_to_le16(qp_id); in qm_sq_ctx_cfg()
1997 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); in qm_sq_ctx_cfg()
1999 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) in qm_sq_ctx_cfg()
2000 sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE << in qm_sq_ctx_cfg()
2007 return -ENOMEM; in qm_sq_ctx_cfg()
2019 struct hisi_qm *qm = qp->qm; in qm_cq_ctx_cfg()
2020 struct device *dev = &qm->pdev->dev; in qm_cq_ctx_cfg()
2021 enum qm_hw_ver ver = qm->ver; in qm_cq_ctx_cfg()
2028 return -ENOMEM; in qm_cq_ctx_cfg()
2030 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); in qm_cq_ctx_cfg()
2032 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, in qm_cq_ctx_cfg()
2034 cqc->w8 = cpu_to_le16(qp->cq_depth - 1); in qm_cq_ctx_cfg()
2036 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); in qm_cq_ctx_cfg()
2037 cqc->w8 = 0; /* rand_qc */ in qm_cq_ctx_cfg()
2039 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); in qm_cq_ctx_cfg()
2041 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) in qm_cq_ctx_cfg()
2042 cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE); in qm_cq_ctx_cfg()
2048 return -ENOMEM; in qm_cq_ctx_cfg()
2073 struct hisi_qm *qm = qp->qm; in qm_start_qp_nolock()
2074 struct device *dev = &qm->pdev->dev; in qm_start_qp_nolock()
2075 int qp_id = qp->qp_id; in qm_start_qp_nolock()
2080 return -EPERM; in qm_start_qp_nolock()
2086 atomic_set(&qp->qp_status.flags, QP_START); in qm_start_qp_nolock()
2093 * hisi_qm_start_qp() - Start a qp into running.
2102 struct hisi_qm *qm = qp->qm; in hisi_qm_start_qp()
2105 down_write(&qm->qps_lock); in hisi_qm_start_qp()
2107 up_write(&qm->qps_lock); in hisi_qm_start_qp()
2114 * qp_stop_fail_cb() - call request cb.
2121 int qp_used = atomic_read(&qp->qp_status.used); in qp_stop_fail_cb()
2122 u16 cur_tail = qp->qp_status.sq_tail; in qp_stop_fail_cb()
2123 u16 sq_depth = qp->sq_depth; in qp_stop_fail_cb()
2124 u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; in qp_stop_fail_cb()
2125 struct hisi_qm *qm = qp->qm; in qp_stop_fail_cb()
2131 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); in qp_stop_fail_cb()
2132 atomic_dec(&qp->qp_status.used); in qp_stop_fail_cb()
2137 * qm_drain_qp() - Drain a qp.
2146 struct hisi_qm *qm = qp->qm; in qm_drain_qp()
2147 struct device *dev = &qm->pdev->dev; in qm_drain_qp()
2159 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { in qm_drain_qp()
2162 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); in qm_drain_qp()
2169 return -ENOMEM; in qm_drain_qp()
2173 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); in qm_drain_qp()
2181 qp->qp_id); in qm_drain_qp()
2188 if ((sqc->tail == cqc->tail) && in qm_drain_qp()
2193 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); in qm_drain_qp()
2194 ret = -EBUSY; in qm_drain_qp()
2208 struct device *dev = &qp->qm->pdev->dev; in qm_stop_qp_nolock()
2217 if (atomic_read(&qp->qp_status.flags) == QP_STOP) { in qm_stop_qp_nolock()
2218 qp->is_resetting = false; in qm_stop_qp_nolock()
2222 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP)) in qm_stop_qp_nolock()
2223 return -EPERM; in qm_stop_qp_nolock()
2225 atomic_set(&qp->qp_status.flags, QP_STOP); in qm_stop_qp_nolock()
2232 flush_workqueue(qp->qm->wq); in qm_stop_qp_nolock()
2233 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) in qm_stop_qp_nolock()
2236 dev_dbg(dev, "stop queue %u!", qp->qp_id); in qm_stop_qp_nolock()
2242 * hisi_qm_stop_qp() - Stop a qp in qm.
2251 down_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
2253 up_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
2260 * hisi_qp_send() - Queue up a task in the hardware queue.
2264 * This function will return -EBUSY if qp is currently full, and -EAGAIN
2276 struct hisi_qp_status *qp_status = &qp->qp_status; in hisi_qp_send()
2277 u16 sq_tail = qp_status->sq_tail; in hisi_qp_send()
2278 u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; in hisi_qp_send()
2281 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || in hisi_qp_send()
2282 atomic_read(&qp->qm->status.flags) == QM_STOP || in hisi_qp_send()
2283 qp->is_resetting)) { in hisi_qp_send()
2284 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); in hisi_qp_send()
2285 return -EAGAIN; in hisi_qp_send()
2289 return -EBUSY; in hisi_qp_send()
2291 memcpy(sqe, msg, qp->qm->sqe_size); in hisi_qp_send()
2293 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); in hisi_qp_send()
2294 atomic_inc(&qp->qp_status.used); in hisi_qp_send()
2295 qp_status->sq_tail = sq_tail_next; in hisi_qp_send()
2305 if (qm->ver == QM_HW_V1) in hisi_qm_cache_wb()
2308 writel(0x1, qm->io_base + QM_CACHE_WB_START); in hisi_qm_cache_wb()
2309 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, in hisi_qm_cache_wb()
2312 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); in hisi_qm_cache_wb()
2317 wake_up_interruptible(&qp->uacce_q->wait); in qm_qp_event_notifier()
2323 struct hisi_qm *qm = uacce->priv; in hisi_qm_get_available_instances()
2326 down_read(&qm->qps_lock); in hisi_qm_get_available_instances()
2327 ret = qm->qp_num - qm->qp_in_used; in hisi_qm_get_available_instances()
2328 up_read(&qm->qps_lock); in hisi_qm_get_available_instances()
2337 for (i = 0; i < qm->qp_num; i++) in hisi_qm_set_hw_reset()
2338 qm_set_qp_disable(&qm->qp_array[i], offset); in hisi_qm_set_hw_reset()
2345 struct hisi_qm *qm = uacce->priv; in hisi_qm_uacce_get_queue()
2353 q->priv = qp; in hisi_qm_uacce_get_queue()
2354 q->uacce = uacce; in hisi_qm_uacce_get_queue()
2355 qp->uacce_q = q; in hisi_qm_uacce_get_queue()
2356 qp->event_cb = qm_qp_event_notifier; in hisi_qm_uacce_get_queue()
2357 qp->pasid = arg; in hisi_qm_uacce_get_queue()
2358 qp->is_in_kernel = false; in hisi_qm_uacce_get_queue()
2365 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_put_queue()
2375 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_mmap()
2376 struct hisi_qm *qm = qp->qm; in hisi_qm_uacce_mmap()
2377 resource_size_t phys_base = qm->db_phys_base + in hisi_qm_uacce_mmap()
2378 qp->qp_id * qm->db_interval; in hisi_qm_uacce_mmap()
2379 size_t sz = vma->vm_end - vma->vm_start; in hisi_qm_uacce_mmap()
2380 struct pci_dev *pdev = qm->pdev; in hisi_qm_uacce_mmap()
2381 struct device *dev = &pdev->dev; in hisi_qm_uacce_mmap()
2385 switch (qfr->type) { in hisi_qm_uacce_mmap()
2387 if (qm->ver == QM_HW_V1) { in hisi_qm_uacce_mmap()
2389 return -EINVAL; in hisi_qm_uacce_mmap()
2390 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { in hisi_qm_uacce_mmap()
2393 return -EINVAL; in hisi_qm_uacce_mmap()
2395 if (sz > qm->db_interval) in hisi_qm_uacce_mmap()
2396 return -EINVAL; in hisi_qm_uacce_mmap()
2401 return remap_pfn_range(vma, vma->vm_start, in hisi_qm_uacce_mmap()
2403 sz, pgprot_noncached(vma->vm_page_prot)); in hisi_qm_uacce_mmap()
2405 if (sz != qp->qdma.size) in hisi_qm_uacce_mmap()
2406 return -EINVAL; in hisi_qm_uacce_mmap()
2412 vm_pgoff = vma->vm_pgoff; in hisi_qm_uacce_mmap()
2413 vma->vm_pgoff = 0; in hisi_qm_uacce_mmap()
2414 ret = dma_mmap_coherent(dev, vma, qp->qdma.va, in hisi_qm_uacce_mmap()
2415 qp->qdma.dma, sz); in hisi_qm_uacce_mmap()
2416 vma->vm_pgoff = vm_pgoff; in hisi_qm_uacce_mmap()
2420 return -EINVAL; in hisi_qm_uacce_mmap()
2426 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_start_queue()
2428 return hisi_qm_start_qp(qp, qp->pasid); in hisi_qm_uacce_start_queue()
2433 hisi_qm_stop_qp(q->priv); in hisi_qm_uacce_stop_queue()
2438 struct hisi_qp *qp = q->priv; in hisi_qm_is_q_updated()
2439 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated()
2442 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { in hisi_qm_is_q_updated()
2446 cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated()
2455 struct hisi_qm *qm = q->uacce->priv; in qm_set_sqctype()
2456 struct hisi_qp *qp = q->priv; in qm_set_sqctype()
2458 down_write(&qm->qps_lock); in qm_set_sqctype()
2459 qp->alg_type = type; in qm_set_sqctype()
2460 up_write(&qm->qps_lock); in qm_set_sqctype()
2466 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_ioctl()
2473 return -EFAULT; in hisi_qm_uacce_ioctl()
2476 return -EINVAL; in hisi_qm_uacce_ioctl()
2479 qp_ctx.id = qp->qp_id; in hisi_qm_uacce_ioctl()
2483 return -EFAULT; in hisi_qm_uacce_ioctl()
2489 return -EFAULT; in hisi_qm_uacce_ioctl()
2491 qp_info.sqe_size = qp->qm->sqe_size; in hisi_qm_uacce_ioctl()
2492 qp_info.sq_depth = qp->sq_depth; in hisi_qm_uacce_ioctl()
2493 qp_info.cq_depth = qp->cq_depth; in hisi_qm_uacce_ioctl()
2497 return -EFAULT; in hisi_qm_uacce_ioctl()
2502 return -EINVAL; in hisi_qm_uacce_ioctl()
2506 * qm_hw_err_isolate() - Try to set the isolation status of the uacce device
2516 isolate = &qm->isolate_data; in qm_hw_err_isolate()
2521 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold) in qm_hw_err_isolate()
2526 return -ENOMEM; in qm_hw_err_isolate()
2529 * Time-stamp every slot AER error. Then check the AER error log when the in qm_hw_err_isolate()
2534 mutex_lock(&isolate->isolate_lock); in qm_hw_err_isolate()
2535 hw_err->timestamp = jiffies; in qm_hw_err_isolate()
2536 list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) { in qm_hw_err_isolate()
2537 if ((hw_err->timestamp - err->timestamp) / HZ > in qm_hw_err_isolate()
2539 list_del(&err->list); in qm_hw_err_isolate()
2545 list_add(&hw_err->list, &isolate->qm_hw_errs); in qm_hw_err_isolate()
2546 mutex_unlock(&isolate->isolate_lock); in qm_hw_err_isolate()
2548 if (count >= isolate->err_threshold) in qm_hw_err_isolate()
2549 isolate->is_isolate = true; in qm_hw_err_isolate()
2558 mutex_lock(&qm->isolate_data.isolate_lock); in qm_hw_err_destroy()
2559 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { in qm_hw_err_destroy()
2560 list_del(&err->list); in qm_hw_err_destroy()
2563 mutex_unlock(&qm->isolate_data.isolate_lock); in qm_hw_err_destroy()
2568 struct hisi_qm *qm = uacce->priv; in hisi_qm_get_isolate_state()
2571 if (uacce->is_vf) in hisi_qm_get_isolate_state()
2572 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in hisi_qm_get_isolate_state()
2576 return pf_qm->isolate_data.is_isolate ? in hisi_qm_get_isolate_state()
2582 struct hisi_qm *qm = uacce->priv; in hisi_qm_isolate_threshold_write()
2585 if (uacce->is_vf) in hisi_qm_isolate_threshold_write()
2586 return -EPERM; in hisi_qm_isolate_threshold_write()
2588 if (qm->isolate_data.is_isolate) in hisi_qm_isolate_threshold_write()
2589 return -EPERM; in hisi_qm_isolate_threshold_write()
2591 qm->isolate_data.err_threshold = num; in hisi_qm_isolate_threshold_write()
2601 struct hisi_qm *qm = uacce->priv; in hisi_qm_isolate_threshold_read()
2604 if (uacce->is_vf) { in hisi_qm_isolate_threshold_read()
2605 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in hisi_qm_isolate_threshold_read()
2606 return pf_qm->isolate_data.err_threshold; in hisi_qm_isolate_threshold_read()
2609 return qm->isolate_data.err_threshold; in hisi_qm_isolate_threshold_read()
2628 struct uacce_device *uacce = qm->uacce; in qm_remove_uacce()
2630 if (qm->use_sva) { in qm_remove_uacce()
2633 qm->uacce = NULL; in qm_remove_uacce()
2639 struct pci_dev *pdev = qm->pdev; in qm_alloc_uacce()
2650 ret = strscpy(interface.name, dev_driver_string(&pdev->dev), in qm_alloc_uacce()
2653 return -ENAMETOOLONG; in qm_alloc_uacce()
2655 uacce = uacce_alloc(&pdev->dev, &interface); in qm_alloc_uacce()
2659 if (uacce->flags & UACCE_DEV_SVA) { in qm_alloc_uacce()
2660 qm->use_sva = true; in qm_alloc_uacce()
2664 return -EINVAL; in qm_alloc_uacce()
2667 uacce->is_vf = pdev->is_virtfn; in qm_alloc_uacce()
2668 uacce->priv = qm; in qm_alloc_uacce()
2670 if (qm->ver == QM_HW_V1) in qm_alloc_uacce()
2671 uacce->api_ver = HISI_QM_API_VER_BASE; in qm_alloc_uacce()
2672 else if (qm->ver == QM_HW_V2) in qm_alloc_uacce()
2673 uacce->api_ver = HISI_QM_API_VER2_BASE; in qm_alloc_uacce()
2675 uacce->api_ver = HISI_QM_API_VER3_BASE; in qm_alloc_uacce()
2677 if (qm->ver == QM_HW_V1) in qm_alloc_uacce()
2679 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_alloc_uacce()
2683 mmio_page_nr = qm->db_interval / PAGE_SIZE; in qm_alloc_uacce()
2688 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + in qm_alloc_uacce()
2692 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; in qm_alloc_uacce()
2693 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; in qm_alloc_uacce()
2695 qm->uacce = uacce; in qm_alloc_uacce()
2696 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs); in qm_alloc_uacce()
2697 mutex_init(&qm->isolate_data.isolate_lock); in qm_alloc_uacce()
2703 * qm_frozen() - Try to froze QM to cut continuous queue request. If
2711 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) in qm_frozen()
2714 down_write(&qm->qps_lock); in qm_frozen()
2716 if (!qm->qp_in_used) { in qm_frozen()
2717 qm->qp_in_used = qm->qp_num; in qm_frozen()
2718 up_write(&qm->qps_lock); in qm_frozen()
2719 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); in qm_frozen()
2723 up_write(&qm->qps_lock); in qm_frozen()
2725 return -EBUSY; in qm_frozen()
2736 return -EINVAL; in qm_try_frozen_vfs()
2739 mutex_lock(&qm_list->lock); in qm_try_frozen_vfs()
2740 list_for_each_entry(qm, &qm_list->list, list) { in qm_try_frozen_vfs()
2741 dev = qm->pdev; in qm_try_frozen_vfs()
2753 mutex_unlock(&qm_list->lock); in qm_try_frozen_vfs()
2759 * hisi_qm_wait_task_finish() - Wait until the task is finished
2767 ((qm->fun_type == QM_HW_PF) && in hisi_qm_wait_task_finish()
2768 qm_try_frozen_vfs(qm->pdev, qm_list))) { in hisi_qm_wait_task_finish()
2772 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || in hisi_qm_wait_task_finish()
2773 test_bit(QM_RESETTING, &qm->misc_ctl)) in hisi_qm_wait_task_finish()
2776 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in hisi_qm_wait_task_finish()
2777 flush_work(&qm->cmd_process); in hisi_qm_wait_task_finish()
2785 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_uninit()
2789 for (i = num - 1; i >= 0; i--) { in hisi_qp_memory_uninit()
2790 qdma = &qm->qp_array[i].qdma; in hisi_qp_memory_uninit()
2791 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); in hisi_qp_memory_uninit()
2792 kfree(qm->poll_data[i].qp_finish_id); in hisi_qp_memory_uninit()
2795 kfree(qm->poll_data); in hisi_qp_memory_uninit()
2796 kfree(qm->qp_array); in hisi_qp_memory_uninit()
2802 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_init()
2803 size_t off = qm->sqe_size * sq_depth; in hisi_qp_memory_init()
2805 int ret = -ENOMEM; in hisi_qp_memory_init()
2807 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), in hisi_qp_memory_init()
2809 if (!qm->poll_data[id].qp_finish_id) in hisi_qp_memory_init()
2810 return -ENOMEM; in hisi_qp_memory_init()
2812 qp = &qm->qp_array[id]; in hisi_qp_memory_init()
2813 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, in hisi_qp_memory_init()
2815 if (!qp->qdma.va) in hisi_qp_memory_init()
2818 qp->sqe = qp->qdma.va; in hisi_qp_memory_init()
2819 qp->sqe_dma = qp->qdma.dma; in hisi_qp_memory_init()
2820 qp->cqe = qp->qdma.va + off; in hisi_qp_memory_init()
2821 qp->cqe_dma = qp->qdma.dma + off; in hisi_qp_memory_init()
2822 qp->qdma.size = dma_size; in hisi_qp_memory_init()
2823 qp->sq_depth = sq_depth; in hisi_qp_memory_init()
2824 qp->cq_depth = cq_depth; in hisi_qp_memory_init()
2825 qp->qm = qm; in hisi_qp_memory_init()
2826 qp->qp_id = id; in hisi_qp_memory_init()
2831 kfree(qm->poll_data[id].qp_finish_id); in hisi_qp_memory_init()
2837 struct pci_dev *pdev = qm->pdev; in hisi_qm_pre_init()
2839 if (qm->ver == QM_HW_V1) in hisi_qm_pre_init()
2840 qm->ops = &qm_hw_ops_v1; in hisi_qm_pre_init()
2841 else if (qm->ver == QM_HW_V2) in hisi_qm_pre_init()
2842 qm->ops = &qm_hw_ops_v2; in hisi_qm_pre_init()
2844 qm->ops = &qm_hw_ops_v3; in hisi_qm_pre_init()
2847 mutex_init(&qm->mailbox_lock); in hisi_qm_pre_init()
2848 init_rwsem(&qm->qps_lock); in hisi_qm_pre_init()
2849 qm->qp_in_used = 0; in hisi_qm_pre_init()
2850 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { in hisi_qm_pre_init()
2851 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) in hisi_qm_pre_init()
2852 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); in hisi_qm_pre_init()
2860 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_cmd_uninit()
2863 val = readl(qm->io_base + QM_IFC_INT_MASK); in qm_cmd_uninit()
2865 writel(val, qm->io_base + QM_IFC_INT_MASK); in qm_cmd_uninit()
2872 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_cmd_init()
2879 val = readl(qm->io_base + QM_IFC_INT_MASK); in qm_cmd_init()
2881 writel(val, qm->io_base + QM_IFC_INT_MASK); in qm_cmd_init()
2886 struct pci_dev *pdev = qm->pdev; in qm_put_pci_res()
2888 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_put_pci_res()
2889 iounmap(qm->db_io_base); in qm_put_pci_res()
2891 iounmap(qm->io_base); in qm_put_pci_res()
2897 struct pci_dev *pdev = qm->pdev; in hisi_qm_pci_uninit()
2906 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) in hisi_qm_set_state()
2907 writel(state, qm->io_base + QM_VF_STATE); in hisi_qm_set_state()
2912 destroy_workqueue(qm->wq); in hisi_qm_unint_work()
2917 struct device *dev = &qm->pdev->dev; in hisi_qm_memory_uninit()
2919 hisi_qp_memory_uninit(qm, qm->qp_num); in hisi_qm_memory_uninit()
2920 if (qm->qdma.va) { in hisi_qm_memory_uninit()
2922 dma_free_coherent(dev, qm->qdma.size, in hisi_qm_memory_uninit()
2923 qm->qdma.va, qm->qdma.dma); in hisi_qm_memory_uninit()
2926 idr_destroy(&qm->qp_idr); in hisi_qm_memory_uninit()
2928 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_memory_uninit()
2929 kfree(qm->factor); in hisi_qm_memory_uninit()
2933 * hisi_qm_uninit() - Uninitialize qm.
2942 down_write(&qm->qps_lock); in hisi_qm_uninit()
2945 up_write(&qm->qps_lock); in hisi_qm_uninit()
2951 up_write(&qm->qps_lock); in hisi_qm_uninit()
2960 * hisi_qm_get_vft() - Get vft from a qm.
2974 return -EINVAL; in hisi_qm_get_vft()
2976 if (!qm->ops->get_vft) { in hisi_qm_get_vft()
2977 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); in hisi_qm_get_vft()
2978 return -EINVAL; in hisi_qm_get_vft()
2981 return qm->ops->get_vft(qm, base, number); in hisi_qm_get_vft()
2985 * hisi_qm_set_vft() - Set vft to a qm.
2994 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
2995 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
3001 u32 max_q_num = qm->ctrl_qp_num; in hisi_qm_set_vft()
3005 return -EINVAL; in hisi_qm_set_vft()
3012 struct hisi_qm_status *status = &qm->status; in qm_init_eq_aeq_status()
3014 status->eq_head = 0; in qm_init_eq_aeq_status()
3015 status->aeq_head = 0; in qm_init_eq_aeq_status()
3016 status->eqc_phase = true; in qm_init_eq_aeq_status()
3017 status->aeqc_phase = true; in qm_init_eq_aeq_status()
3023 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_enable_eq_aeq_interrupts()
3024 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_enable_eq_aeq_interrupts()
3026 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); in qm_enable_eq_aeq_interrupts()
3027 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); in qm_enable_eq_aeq_interrupts()
3032 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); in qm_disable_eq_aeq_interrupts()
3033 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); in qm_disable_eq_aeq_interrupts()
3038 struct device *dev = &qm->pdev->dev; in qm_eq_ctx_cfg()
3045 return -ENOMEM; in qm_eq_ctx_cfg()
3047 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
3048 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
3049 if (qm->ver == QM_HW_V1) in qm_eq_ctx_cfg()
3050 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); in qm_eq_ctx_cfg()
3051 eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_eq_ctx_cfg()
3057 return -ENOMEM; in qm_eq_ctx_cfg()
3069 struct device *dev = &qm->pdev->dev; in qm_aeq_ctx_cfg()
3076 return -ENOMEM; in qm_aeq_ctx_cfg()
3078 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); in qm_aeq_ctx_cfg()
3079 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); in qm_aeq_ctx_cfg()
3080 aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_aeq_ctx_cfg()
3086 return -ENOMEM; in qm_aeq_ctx_cfg()
3098 struct device *dev = &qm->pdev->dev; in qm_eq_aeq_ctx_cfg()
3116 WARN_ON(!qm->qdma.va); in __hisi_qm_start()
3118 if (qm->fun_type == QM_HW_PF) { in __hisi_qm_start()
3119 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); in __hisi_qm_start()
3128 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); in __hisi_qm_start()
3132 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); in __hisi_qm_start()
3143 * hisi_qm_start() - start qm
3150 struct device *dev = &qm->pdev->dev; in hisi_qm_start()
3153 down_write(&qm->qps_lock); in hisi_qm_start()
3156 up_write(&qm->qps_lock); in hisi_qm_start()
3157 return -EPERM; in hisi_qm_start()
3160 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); in hisi_qm_start()
3162 if (!qm->qp_num) { in hisi_qm_start()
3164 ret = -EINVAL; in hisi_qm_start()
3170 atomic_set(&qm->status.flags, QM_START); in hisi_qm_start()
3174 up_write(&qm->qps_lock); in hisi_qm_start()
3181 struct device *dev = &qm->pdev->dev; in qm_restart()
3189 down_write(&qm->qps_lock); in qm_restart()
3190 for (i = 0; i < qm->qp_num; i++) { in qm_restart()
3191 qp = &qm->qp_array[i]; in qm_restart()
3192 if (atomic_read(&qp->qp_status.flags) == QP_STOP && in qm_restart()
3193 qp->is_resetting == true) { in qm_restart()
3198 up_write(&qm->qps_lock); in qm_restart()
3201 qp->is_resetting = false; in qm_restart()
3204 up_write(&qm->qps_lock); in qm_restart()
3212 struct device *dev = &qm->pdev->dev; in qm_stop_started_qp()
3216 for (i = 0; i < qm->qp_num; i++) { in qm_stop_started_qp()
3217 qp = &qm->qp_array[i]; in qm_stop_started_qp()
3218 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { in qm_stop_started_qp()
3219 qp->is_resetting = true; in qm_stop_started_qp()
3232 * qm_clear_queues() - Clear all queues memory in a qm.
3243 for (i = 0; i < qm->qp_num; i++) { in qm_clear_queues()
3244 qp = &qm->qp_array[i]; in qm_clear_queues()
3245 if (qp->is_in_kernel && qp->is_resetting) in qm_clear_queues()
3246 memset(qp->qdma.va, 0, qp->qdma.size); in qm_clear_queues()
3249 memset(qm->qdma.va, 0, qm->qdma.size); in qm_clear_queues()
3253 * hisi_qm_stop() - Stop a qm.
3263 struct device *dev = &qm->pdev->dev; in hisi_qm_stop()
3266 down_write(&qm->qps_lock); in hisi_qm_stop()
3268 qm->status.stop_reason = r; in hisi_qm_stop()
3270 ret = -EPERM; in hisi_qm_stop()
3274 if (qm->status.stop_reason == QM_SOFT_RESET || in hisi_qm_stop()
3275 qm->status.stop_reason == QM_DOWN) { in hisi_qm_stop()
3286 if (qm->fun_type == QM_HW_PF) { in hisi_qm_stop()
3290 ret = -EBUSY; in hisi_qm_stop()
3296 atomic_set(&qm->status.flags, QM_STOP); in hisi_qm_stop()
3299 up_write(&qm->qps_lock); in hisi_qm_stop()
3306 if (!qm->ops->hw_error_init) { in qm_hw_error_init()
3307 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); in qm_hw_error_init()
3311 qm->ops->hw_error_init(qm); in qm_hw_error_init()
3316 if (!qm->ops->hw_error_uninit) { in qm_hw_error_uninit()
3317 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); in qm_hw_error_uninit()
3321 qm->ops->hw_error_uninit(qm); in qm_hw_error_uninit()
3326 if (!qm->ops->hw_error_handle) { in qm_hw_error_handle()
3327 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); in qm_hw_error_handle()
3331 return qm->ops->hw_error_handle(qm); in qm_hw_error_handle()
3335 * hisi_qm_dev_err_init() - Initialize device error configuration.
3342 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_init()
3347 if (!qm->err_ini->hw_err_enable) { in hisi_qm_dev_err_init()
3348 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); in hisi_qm_dev_err_init()
3351 qm->err_ini->hw_err_enable(qm); in hisi_qm_dev_err_init()
3356 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
3363 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_uninit()
3368 if (!qm->err_ini->hw_err_disable) { in hisi_qm_dev_err_uninit()
3369 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); in hisi_qm_dev_err_uninit()
3372 qm->err_ini->hw_err_disable(qm); in hisi_qm_dev_err_uninit()
3377 * hisi_qm_free_qps() - free multiple queue pairs.
3388 for (i = qp_num - 1; i >= 0; i--) in hisi_qm_free_qps()
3398 list_del(&res->list); in free_list()
3412 list_for_each_entry(qm, &qm_list->list, list) { in hisi_qm_sort_devices()
3413 dev = &qm->pdev->dev; in hisi_qm_sort_devices()
3421 return -ENOMEM; in hisi_qm_sort_devices()
3423 res->qm = qm; in hisi_qm_sort_devices()
3424 res->distance = node_distance(dev_node, node); in hisi_qm_sort_devices()
3427 if (res->distance < tmp->distance) { in hisi_qm_sort_devices()
3428 n = &tmp->list; in hisi_qm_sort_devices()
3432 list_add_tail(&res->list, n); in hisi_qm_sort_devices()
3439 * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
3454 int ret = -ENODEV; in hisi_qm_alloc_qps_node()
3459 return -EINVAL; in hisi_qm_alloc_qps_node()
3461 mutex_lock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3463 mutex_unlock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3469 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); in hisi_qm_alloc_qps_node()
3482 mutex_unlock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3496 u32 max_qp_num = qm->max_qp_num; in qm_vf_q_assign()
3497 u32 q_base = qm->qp_num; in qm_vf_q_assign()
3501 return -EINVAL; in qm_vf_q_assign()
3503 vfs_q_num = qm->ctrl_qp_num - qm->qp_num; in qm_vf_q_assign()
3507 return -EINVAL; in qm_vf_q_assign()
3512 for (i = num_vfs; i > 0; i--) { in qm_vf_q_assign()
3522 remain_q_num--; in qm_vf_q_assign()
3530 for (j = num_vfs; j > i; j--) in qm_vf_q_assign()
3545 for (i = 1; i <= qm->vfs_num; i++) { in qm_clear_vft_config()
3550 qm->vfs_num = 0; in qm_clear_vft_config()
3557 struct device *dev = &qm->pdev->dev; in qm_func_shaper_enable()
3561 total_vfs = pci_sriov_get_totalvfs(qm->pdev); in qm_func_shaper_enable()
3563 return -EINVAL; in qm_func_shaper_enable()
3565 qm->factor[fun_index].func_qos = qos; in qm_func_shaper_enable()
3567 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); in qm_func_shaper_enable()
3570 return -EINVAL; in qm_func_shaper_enable()
3578 return -EINVAL; in qm_func_shaper_enable()
3593 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_get_shaper_vft_qos()
3599 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); in qm_get_shaper_vft_qos()
3600 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); in qm_get_shaper_vft_qos()
3601 writel(fun_index, qm->io_base + QM_VFT_CFG); in qm_get_shaper_vft_qos()
3603 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_get_shaper_vft_qos()
3604 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_get_shaper_vft_qos()
3606 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_get_shaper_vft_qos()
3612 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | in qm_get_shaper_vft_qos()
3613 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); in qm_get_shaper_vft_qos()
3624 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; in qm_get_shaper_vft_qos()
3626 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; in qm_get_shaper_vft_qos()
3628 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); in qm_get_shaper_vft_qos()
3637 struct device *dev = &qm->pdev->dev; in qm_vf_get_qos()
3657 int ret = -EINVAL; in qm_vf_read_qos()
3660 qm->mb_qos = 0; in qm_vf_read_qos()
3665 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); in qm_vf_read_qos()
3671 if (qm->mb_qos) in qm_vf_read_qos()
3675 pci_err(qm->pdev, "PF ping VF timeout!\n"); in qm_vf_read_qos()
3676 return -ETIMEDOUT; in qm_vf_read_qos()
3686 struct hisi_qm *qm = filp->private_data; in qm_algqos_read()
3696 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_algqos_read()
3697 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); in qm_algqos_read()
3698 ret = -EAGAIN; in qm_algqos_read()
3702 if (qm->fun_type == QM_HW_PF) { in qm_algqos_read()
3708 ir = qm->mb_qos; in qm_algqos_read()
3717 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_algqos_read()
3727 const struct bus_type *bus_type = qm->pdev->dev.bus; in qm_get_qos_value()
3736 return -EINVAL; in qm_get_qos_value()
3740 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); in qm_get_qos_value()
3741 return -EINVAL; in qm_get_qos_value()
3746 pci_err(qm->pdev, "input pci bdf number is error!\n"); in qm_get_qos_value()
3747 return -ENODEV; in qm_get_qos_value()
3752 *fun_index = pdev->devfn; in qm_get_qos_value()
3760 struct hisi_qm *qm = filp->private_data; in qm_algqos_write()
3770 return -ENOSPC; in qm_algqos_write()
3772 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count); in qm_algqos_write()
3782 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_algqos_write()
3783 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); in qm_algqos_write()
3784 return -EAGAIN; in qm_algqos_write()
3789 ret = -EINVAL; in qm_algqos_write()
3795 pci_err(qm->pdev, "failed to enable function shaper!\n"); in qm_algqos_write()
3796 ret = -EINVAL; in qm_algqos_write()
3800 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n", in qm_algqos_write()
3807 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_algqos_write()
3819 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
3826 if (qm->fun_type == QM_HW_PF) in hisi_qm_set_algqos_init()
3827 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, in hisi_qm_set_algqos_init()
3829 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in hisi_qm_set_algqos_init()
3830 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, in hisi_qm_set_algqos_init()
3839 qm->factor[i].func_qos = QM_QOS_MAX_VAL; in hisi_qm_init_vf_qos()
3843 * hisi_qm_sriov_enable() - enable virtual functions
3863 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", in hisi_qm_sriov_enable()
3870 ret = -ERANGE; in hisi_qm_sriov_enable()
3876 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_sriov_enable()
3885 qm->vfs_num = num_vfs; in hisi_qm_sriov_enable()
3905 * hisi_qm_sriov_disable - disable virtual functions
3918 return -EPERM; in hisi_qm_sriov_disable()
3922 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { in hisi_qm_sriov_disable()
3924 return -EBUSY; in hisi_qm_sriov_disable()
3940 * hisi_qm_sriov_configure - configure the number of VFs
3944 * Enable SR-IOV according to num_vfs, 0 means disable.
3957 if (!qm->err_ini->get_err_result) { in qm_dev_err_handle()
3958 dev_err(&qm->pdev->dev, "Device doesn't support reset!\n"); in qm_dev_err_handle()
3962 return qm->err_ini->get_err_result(qm); in qm_dev_err_handle()
3981 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
3994 if (pdev->is_virtfn) in hisi_qm_dev_err_detected()
4011 struct pci_dev *pdev = qm->pdev; in qm_check_req_recv()
4015 if (qm->ver >= QM_HW_V3) in qm_check_req_recv()
4018 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
4019 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
4023 dev_err(&pdev->dev, "Fails to read QM reg!\n"); in qm_check_req_recv()
4027 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
4028 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
4032 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); in qm_check_req_recv()
4039 struct pci_dev *pdev = qm->pdev; in qm_set_pf_mse()
4058 return -ETIMEDOUT; in qm_set_pf_mse()
4063 struct pci_dev *pdev = qm->pdev; in qm_set_vf_mse()
4085 return -ETIMEDOUT; in qm_set_vf_mse()
4093 if (qm->ver >= QM_HW_V3) in qm_dev_ecc_mbit_handle()
4096 if (!qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
4097 qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
4098 qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
4099 qm->err_ini->close_axi_master_ooo(qm); in qm_dev_ecc_mbit_handle()
4100 } else if (qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
4101 !qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
4102 !qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
4103 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
4105 qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
4106 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); in qm_dev_ecc_mbit_handle()
4113 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_prepare()
4114 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_prepare()
4119 mutex_lock(&qm_list->lock); in qm_vf_reset_prepare()
4120 list_for_each_entry(vf_qm, &qm_list->list, list) { in qm_vf_reset_prepare()
4121 virtfn = vf_qm->pdev; in qm_vf_reset_prepare()
4136 mutex_unlock(&qm_list->lock); in qm_vf_reset_prepare()
4143 struct pci_dev *pdev = qm->pdev; in qm_try_stop_vfs()
4146 if (!qm->vfs_num) in qm_try_stop_vfs()
4150 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { in qm_try_stop_vfs()
4165 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_prepare()
4191 if (qm->use_sva) { in qm_controller_reset_prepare()
4201 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in qm_controller_reset_prepare()
4212 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_master_ooo_check()
4213 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, in qm_master_ooo_check()
4217 pci_warn(qm->pdev, "Bus lock! Please reset system.\n"); in qm_master_ooo_check()
4224 struct pci_dev *pdev = qm->pdev; in qm_soft_reset_prepare()
4227 /* Ensure all doorbells and mailboxes received by QM */ in qm_soft_reset_prepare()
4232 if (qm->vfs_num) { in qm_soft_reset_prepare()
4240 ret = qm->ops->set_msi(qm, false); in qm_soft_reset_prepare()
4250 if (qm->err_ini->close_sva_prefetch) in qm_soft_reset_prepare()
4251 qm->err_ini->close_sva_prefetch(qm); in qm_soft_reset_prepare()
4262 struct pci_dev *pdev = qm->pdev; in qm_reset_device()
4264 /* The reset related sub-control registers are not in PCI BAR */ in qm_reset_device()
4265 if (ACPI_HANDLE(&pdev->dev)) { in qm_reset_device()
4269 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), in qm_reset_device()
4270 qm->err_info.acpi_rst, in qm_reset_device()
4274 return -EIO; in qm_reset_device()
4279 return -EIO; in qm_reset_device()
4286 return -EINVAL; in qm_reset_device()
4302 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_done()
4303 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_done()
4308 mutex_lock(&qm_list->lock); in qm_vf_reset_done()
4309 list_for_each_entry(vf_qm, &qm_list->list, list) { in qm_vf_reset_done()
4310 virtfn = vf_qm->pdev; in qm_vf_reset_done()
4325 mutex_unlock(&qm_list->lock); in qm_vf_reset_done()
4331 struct pci_dev *pdev = qm->pdev; in qm_try_start_vfs()
4334 if (!qm->vfs_num) in qm_try_start_vfs()
4337 ret = qm_vf_q_assign(qm, qm->vfs_num); in qm_try_start_vfs()
4344 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { in qm_try_start_vfs()
4359 return qm->err_ini->hw_init(qm); in qm_dev_hw_init()
4366 if (qm->err_ini->open_sva_prefetch) in qm_restart_prepare()
4367 qm->err_ini->open_sva_prefetch(qm); in qm_restart_prepare()
4369 if (qm->ver >= QM_HW_V3) in qm_restart_prepare()
4372 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_prepare()
4373 !qm->err_status.is_dev_ecc_mbit) in qm_restart_prepare()
4377 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
4378 writel(value & ~qm->err_info.msi_wr_port, in qm_restart_prepare()
4379 qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
4382 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; in qm_restart_prepare()
4383 if (value && qm->err_ini->clear_dev_hw_err_status) in qm_restart_prepare()
4384 qm->err_ini->clear_dev_hw_err_status(qm, value); in qm_restart_prepare()
4387 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_restart_prepare()
4390 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); in qm_restart_prepare()
4397 if (qm->ver >= QM_HW_V3) in qm_restart_done()
4400 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_done()
4401 !qm->err_status.is_dev_ecc_mbit) in qm_restart_done()
4405 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
4406 value |= qm->err_info.msi_wr_port; in qm_restart_done()
4407 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
4410 qm->err_status.is_qm_ecc_mbit = false; in qm_restart_done()
4411 qm->err_status.is_dev_ecc_mbit = false; in qm_restart_done()
4416 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_done()
4419 ret = qm->ops->set_msi(qm, true); in qm_controller_reset_done()
4431 if (qm->vfs_num) { in qm_controller_reset_done()
4447 if (qm->err_ini->open_axi_master_ooo) in qm_controller_reset_done()
4448 qm->err_ini->open_axi_master_ooo(qm); in qm_controller_reset_done()
4480 struct pci_dev *pdev = qm->pdev; in qm_controller_reset()
4489 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in qm_controller_reset()
4494 if (qm->err_ini->show_last_dfx_regs) in qm_controller_reset()
4495 qm->err_ini->show_last_dfx_regs(qm); in qm_controller_reset()
4514 if (qm->use_sva) in qm_controller_reset()
4515 qm->isolate_data.is_isolate = true; in qm_controller_reset()
4520 * hisi_qm_dev_slot_reset() - slot reset
4531 if (pdev->is_virtfn) in hisi_qm_dev_slot_reset()
4571 if (qm->fun_type == QM_HW_PF) in hisi_qm_reset_prepare()
4600 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); in qm_flr_reset_complete()
4615 if (qm->fun_type == QM_HW_PF) { in hisi_qm_reset_done()
4640 if (qm->fun_type == QM_HW_PF) in hisi_qm_reset_done()
4655 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); in qm_abnormal_irq()
4658 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && in qm_abnormal_irq()
4659 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) in qm_abnormal_irq()
4660 schedule_work(&qm->rst_work); in qm_abnormal_irq()
4666 * hisi_qm_dev_shutdown() - Shutdown device.
4678 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); in hisi_qm_dev_shutdown()
4691 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in hisi_qm_controller_reset()
4698 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); in hisi_qm_controller_reset()
4707 struct pci_dev *pdev = qm->pdev; in qm_pf_reset_vf_prepare()
4712 dev_err(&pdev->dev, "reset prepare not ready!\n"); in qm_pf_reset_vf_prepare()
4713 atomic_set(&qm->status.flags, QM_STOP); in qm_pf_reset_vf_prepare()
4720 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); in qm_pf_reset_vf_prepare()
4721 atomic_set(&qm->status.flags, QM_STOP); in qm_pf_reset_vf_prepare()
4735 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); in qm_pf_reset_vf_prepare()
4741 struct pci_dev *pdev = qm->pdev; in qm_pf_reset_vf_done()
4747 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); in qm_pf_reset_vf_done()
4754 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); in qm_pf_reset_vf_done()
4761 struct device *dev = &qm->pdev->dev; in qm_wait_pf_reset_finish()
4767 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, in qm_wait_pf_reset_finish()
4773 return -ETIMEDOUT; in qm_wait_pf_reset_finish()
4790 ret = -EINVAL; in qm_wait_pf_reset_finish()
4799 struct device *dev = &qm->pdev->dev; in qm_pf_reset_vf_process()
4825 struct device *dev = &qm->pdev->dev; in qm_handle_cmd_msg()
4853 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT; in qm_handle_cmd_msg()
4865 u32 vfs_num = qm->vfs_num; in qm_cmd_process()
4869 if (qm->fun_type == QM_HW_PF) { in qm_cmd_process()
4870 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); in qm_cmd_process()
4886 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
4895 struct device *dev = &qm->pdev->dev; in hisi_qm_alg_register()
4899 mutex_lock(&qm_list->lock); in hisi_qm_alg_register()
4900 if (list_empty(&qm_list->list)) in hisi_qm_alg_register()
4902 list_add_tail(&qm->list, &qm_list->list); in hisi_qm_alg_register()
4903 mutex_unlock(&qm_list->lock); in hisi_qm_alg_register()
4905 if (qm->ver <= QM_HW_V2 && qm->use_sva) { in hisi_qm_alg_register()
4911 ret = qm_list->register_to_crypto(qm); in hisi_qm_alg_register()
4913 mutex_lock(&qm_list->lock); in hisi_qm_alg_register()
4914 list_del(&qm->list); in hisi_qm_alg_register()
4915 mutex_unlock(&qm_list->lock); in hisi_qm_alg_register()
4924 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
4934 mutex_lock(&qm_list->lock); in hisi_qm_alg_unregister()
4935 list_del(&qm->list); in hisi_qm_alg_unregister()
4936 mutex_unlock(&qm_list->lock); in hisi_qm_alg_unregister()
4938 if (qm->ver <= QM_HW_V2 && qm->use_sva) in hisi_qm_alg_unregister()
4941 if (list_empty(&qm_list->list)) in hisi_qm_alg_unregister()
4942 qm_list->unregister_from_crypto(qm); in hisi_qm_alg_unregister()
4948 struct pci_dev *pdev = qm->pdev; in qm_unregister_abnormal_irq()
4951 if (qm->fun_type == QM_HW_VF) in qm_unregister_abnormal_irq()
4954 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; in qm_unregister_abnormal_irq()
4964 struct pci_dev *pdev = qm->pdev; in qm_register_abnormal_irq()
4968 if (qm->fun_type == QM_HW_VF) in qm_register_abnormal_irq()
4971 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; in qm_register_abnormal_irq()
4976 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); in qm_register_abnormal_irq()
4978 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); in qm_register_abnormal_irq()
4985 struct pci_dev *pdev = qm->pdev; in qm_unregister_mb_cmd_irq()
4988 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; in qm_unregister_mb_cmd_irq()
4998 struct pci_dev *pdev = qm->pdev; in qm_register_mb_cmd_irq()
5002 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; in qm_register_mb_cmd_irq()
5007 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); in qm_register_mb_cmd_irq()
5009 dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); in qm_register_mb_cmd_irq()
5016 struct pci_dev *pdev = qm->pdev; in qm_unregister_aeq_irq()
5019 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; in qm_unregister_aeq_irq()
5029 struct pci_dev *pdev = qm->pdev; in qm_register_aeq_irq()
5033 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; in qm_register_aeq_irq()
5039 qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm); in qm_register_aeq_irq()
5041 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); in qm_register_aeq_irq()
5048 struct pci_dev *pdev = qm->pdev; in qm_unregister_eq_irq()
5051 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; in qm_unregister_eq_irq()
5061 struct pci_dev *pdev = qm->pdev; in qm_register_eq_irq()
5065 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; in qm_register_eq_irq()
5070 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); in qm_register_eq_irq()
5072 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); in qm_register_eq_irq()
5118 struct device *dev = &qm->pdev->dev; in qm_get_qp_num()
5122 if (qm->fun_type == QM_HW_VF) { in qm_get_qp_num()
5123 if (qm->ver != QM_HW_V1) in qm_get_qp_num()
5125 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); in qm_get_qp_num()
5130 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); in qm_get_qp_num()
5131 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); in qm_get_qp_num()
5132 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, in qm_get_qp_num()
5135 if (qm->qp_num <= qm->max_qp_num) in qm_get_qp_num()
5138 if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) { in qm_get_qp_num()
5141 qm->qp_num, qm->max_qp_num); in qm_get_qp_num()
5142 return -EINVAL; in qm_get_qp_num()
5146 qm->qp_num, qm->max_qp_num); in qm_get_qp_num()
5147 qm->qp_num = qm->max_qp_num; in qm_get_qp_num()
5148 qm->debug.curr_qm_qp_num = qm->qp_num; in qm_get_qp_num()
5156 struct pci_dev *pdev = qm->pdev; in qm_pre_store_irq_type_caps()
5160 qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL); in qm_pre_store_irq_type_caps()
5162 return -ENOMEM; in qm_pre_store_irq_type_caps()
5167 qm_pre_store_caps[i], qm->cap_ver); in qm_pre_store_irq_type_caps()
5170 qm->cap_tables.qm_cap_table = qm_cap; in qm_pre_store_irq_type_caps()
5177 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? in qm_get_hw_caps()
5179 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : in qm_get_hw_caps()
5186 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); in qm_get_hw_caps()
5188 if (qm->ver >= QM_HW_V3) { in qm_get_hw_caps()
5189 val = readl(qm->io_base + QM_FUNC_CAPS_REG); in qm_get_hw_caps()
5190 qm->cap_ver = val & QM_CAPBILITY_VERSION; in qm_get_hw_caps()
5195 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); in qm_get_hw_caps()
5197 set_bit(qm_cap_info_comm[i].type, &qm->caps); in qm_get_hw_caps()
5202 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); in qm_get_hw_caps()
5204 set_bit(cap_info[i].type, &qm->caps); in qm_get_hw_caps()
5213 struct pci_dev *pdev = qm->pdev; in qm_get_pci_res()
5214 struct device *dev = &pdev->dev; in qm_get_pci_res()
5217 ret = pci_request_mem_regions(pdev, qm->dev_name); in qm_get_pci_res()
5223 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); in qm_get_pci_res()
5224 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); in qm_get_pci_res()
5225 if (!qm->io_base) { in qm_get_pci_res()
5226 ret = -EIO; in qm_get_pci_res()
5234 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { in qm_get_pci_res()
5235 qm->db_interval = QM_QP_DB_INTERVAL; in qm_get_pci_res()
5236 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); in qm_get_pci_res()
5237 qm->db_io_base = ioremap(qm->db_phys_base, in qm_get_pci_res()
5239 if (!qm->db_io_base) { in qm_get_pci_res()
5240 ret = -EIO; in qm_get_pci_res()
5244 qm->db_phys_base = qm->phys_base; in qm_get_pci_res()
5245 qm->db_io_base = qm->io_base; in qm_get_pci_res()
5246 qm->db_interval = 0; in qm_get_pci_res()
5256 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_get_pci_res()
5257 iounmap(qm->db_io_base); in qm_get_pci_res()
5259 iounmap(qm->io_base); in qm_get_pci_res()
5267 acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev); in qm_clear_device()
5270 if (qm->fun_type == QM_HW_VF) in qm_clear_device()
5274 if (!qm->err_ini->err_info_init) in qm_clear_device()
5276 qm->err_ini->err_info_init(qm); in qm_clear_device()
5282 if (!acpi_has_method(handle, qm->err_info.acpi_rst)) in qm_clear_device()
5287 writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_clear_device()
5296 struct pci_dev *pdev = qm->pdev; in hisi_qm_pci_init()
5297 struct device *dev = &pdev->dev; in hisi_qm_pci_init()
5342 for (i = 0; i < qm->qp_num; i++) in hisi_qm_init_work()
5343 INIT_WORK(&qm->poll_data[i].work, qm_work_process); in hisi_qm_init_work()
5345 if (qm->fun_type == QM_HW_PF) in hisi_qm_init_work()
5346 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); in hisi_qm_init_work()
5348 if (qm->ver > QM_HW_V2) in hisi_qm_init_work()
5349 INIT_WORK(&qm->cmd_process, qm_cmd_process); in hisi_qm_init_work()
5351 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | in hisi_qm_init_work()
5353 pci_name(qm->pdev)); in hisi_qm_init_work()
5354 if (!qm->wq) { in hisi_qm_init_work()
5355 pci_err(qm->pdev, "failed to alloc workqueue!\n"); in hisi_qm_init_work()
5356 return -ENOMEM; in hisi_qm_init_work()
5364 struct device *dev = &qm->pdev->dev; in hisi_qp_alloc_memory()
5369 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); in hisi_qp_alloc_memory()
5370 if (!qm->qp_array) in hisi_qp_alloc_memory()
5371 return -ENOMEM; in hisi_qp_alloc_memory()
5373 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); in hisi_qp_alloc_memory()
5374 if (!qm->poll_data) { in hisi_qp_alloc_memory()
5375 kfree(qm->qp_array); in hisi_qp_alloc_memory()
5376 return -ENOMEM; in hisi_qp_alloc_memory()
5382 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; in hisi_qp_alloc_memory()
5384 for (i = 0; i < qm->qp_num; i++) { in hisi_qp_alloc_memory()
5385 qm->poll_data[i].qm = qm; in hisi_qp_alloc_memory()
5402 struct device *dev = &qm->pdev->dev; in hisi_qm_memory_init()
5406 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { in hisi_qm_memory_init()
5407 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; in hisi_qm_memory_init()
5408 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); in hisi_qm_memory_init()
5409 if (!qm->factor) in hisi_qm_memory_init()
5410 return -ENOMEM; in hisi_qm_memory_init()
5413 qm->factor[0].func_qos = QM_QOS_MAX_VAL; in hisi_qm_memory_init()
5417 (qm)->type = ((qm)->qdma.va + (off)); \ in hisi_qm_memory_init()
5418 (qm)->type##_dma = (qm)->qdma.dma + (off); \ in hisi_qm_memory_init()
5422 idr_init(&qm->qp_idr); in hisi_qm_memory_init()
5423 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); in hisi_qm_memory_init()
5424 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + in hisi_qm_memory_init()
5425 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + in hisi_qm_memory_init()
5426 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + in hisi_qm_memory_init()
5427 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); in hisi_qm_memory_init()
5428 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, in hisi_qm_memory_init()
5430 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); in hisi_qm_memory_init()
5431 if (!qm->qdma.va) { in hisi_qm_memory_init()
5432 ret = -ENOMEM; in hisi_qm_memory_init()
5436 QM_INIT_BUF(qm, eqe, qm->eq_depth); in hisi_qm_memory_init()
5437 QM_INIT_BUF(qm, aeqe, qm->aeq_depth); in hisi_qm_memory_init()
5438 QM_INIT_BUF(qm, sqc, qm->qp_num); in hisi_qm_memory_init()
5439 QM_INIT_BUF(qm, cqc, qm->qp_num); in hisi_qm_memory_init()
5448 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); in hisi_qm_memory_init()
5450 idr_destroy(&qm->qp_idr); in hisi_qm_memory_init()
5451 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_memory_init()
5452 kfree(qm->factor); in hisi_qm_memory_init()
5458 * hisi_qm_init() - Initialize configures about qm.
5465 struct pci_dev *pdev = qm->pdev; in hisi_qm_init()
5466 struct device *dev = &pdev->dev; in hisi_qm_init()
5479 if (qm->fun_type == QM_HW_PF) { in hisi_qm_init()
5481 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); in hisi_qm_init()
5490 if (qm->mode == UACCE_MODE_SVA) { in hisi_qm_init()
5505 atomic_set(&qm->status.flags, QM_INIT); in hisi_qm_init()
5522 * hisi_qm_get_dfx_access() - Try to get dfx access.
5532 struct device *dev = &qm->pdev->dev; in hisi_qm_get_dfx_access()
5535 dev_info(dev, "can not read/write - device in suspended.\n"); in hisi_qm_get_dfx_access()
5536 return -EAGAIN; in hisi_qm_get_dfx_access()
5544 * hisi_qm_put_dfx_access() - Put dfx access.
5556 * hisi_qm_pm_init() - Initialize qm runtime PM.
5563 struct device *dev = &qm->pdev->dev; in hisi_qm_pm_init()
5565 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in hisi_qm_pm_init()
5575 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
5582 struct device *dev = &qm->pdev->dev; in hisi_qm_pm_uninit()
5584 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in hisi_qm_pm_uninit()
5594 struct pci_dev *pdev = qm->pdev; in qm_prepare_for_suspend()
5597 ret = qm->ops->set_msi(qm, false); in qm_prepare_for_suspend()
5616 struct pci_dev *pdev = qm->pdev; in qm_rebuild_for_resume()
5625 ret = qm->ops->set_msi(qm, true); in qm_rebuild_for_resume()
5640 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); in qm_rebuild_for_resume()
5650 * hisi_qm_suspend() - Runtime suspend of given device.
5678 * hisi_qm_resume() - Runtime resume of given device.