xref: /openbmc/linux/drivers/crypto/hisilicon/zip/zip_main.c (revision 79e09f30eeba857b09832209bfc66bd689c58328)
162c455caSZhou Wang // SPDX-License-Identifier: GPL-2.0
262c455caSZhou Wang /* Copyright (c) 2019 HiSilicon Limited. */
362c455caSZhou Wang #include <linux/acpi.h>
462c455caSZhou Wang #include <linux/aer.h>
562c455caSZhou Wang #include <linux/bitops.h>
662c455caSZhou Wang #include <linux/init.h>
762c455caSZhou Wang #include <linux/io.h>
862c455caSZhou Wang #include <linux/kernel.h>
962c455caSZhou Wang #include <linux/module.h>
1062c455caSZhou Wang #include <linux/pci.h>
1162c455caSZhou Wang #include <linux/topology.h>
1262c455caSZhou Wang #include "zip.h"
1362c455caSZhou Wang 
1462c455caSZhou Wang #define PCI_DEVICE_ID_ZIP_PF		0xa250
15*79e09f30SZhou Wang #define PCI_DEVICE_ID_ZIP_VF		0xa251
1662c455caSZhou Wang 
1762c455caSZhou Wang #define HZIP_VF_NUM			63
1862c455caSZhou Wang #define HZIP_QUEUE_NUM_V1		4096
1962c455caSZhou Wang #define HZIP_QUEUE_NUM_V2		1024
2062c455caSZhou Wang 
2162c455caSZhou Wang #define HZIP_CLOCK_GATE_CTRL		0x301004
2262c455caSZhou Wang #define COMP0_ENABLE			BIT(0)
2362c455caSZhou Wang #define COMP1_ENABLE			BIT(1)
2462c455caSZhou Wang #define DECOMP0_ENABLE			BIT(2)
2562c455caSZhou Wang #define DECOMP1_ENABLE			BIT(3)
2662c455caSZhou Wang #define DECOMP2_ENABLE			BIT(4)
2762c455caSZhou Wang #define DECOMP3_ENABLE			BIT(5)
2862c455caSZhou Wang #define DECOMP4_ENABLE			BIT(6)
2962c455caSZhou Wang #define DECOMP5_ENABLE			BIT(7)
3062c455caSZhou Wang #define ALL_COMP_DECOMP_EN		(COMP0_ENABLE | COMP1_ENABLE |	\
3162c455caSZhou Wang 					 DECOMP0_ENABLE | DECOMP1_ENABLE | \
3262c455caSZhou Wang 					 DECOMP2_ENABLE | DECOMP3_ENABLE | \
3362c455caSZhou Wang 					 DECOMP4_ENABLE | DECOMP5_ENABLE)
3462c455caSZhou Wang #define DECOMP_CHECK_ENABLE		BIT(16)
3562c455caSZhou Wang 
3662c455caSZhou Wang #define HZIP_PORT_ARCA_CHE_0		0x301040
3762c455caSZhou Wang #define HZIP_PORT_ARCA_CHE_1		0x301044
3862c455caSZhou Wang #define HZIP_PORT_AWCA_CHE_0		0x301060
3962c455caSZhou Wang #define HZIP_PORT_AWCA_CHE_1		0x301064
4062c455caSZhou Wang #define CACHE_ALL_EN			0xffffffff
4162c455caSZhou Wang 
4262c455caSZhou Wang #define HZIP_BD_RUSER_32_63		0x301110
4362c455caSZhou Wang #define HZIP_SGL_RUSER_32_63		0x30111c
4462c455caSZhou Wang #define HZIP_DATA_RUSER_32_63		0x301128
4562c455caSZhou Wang #define HZIP_DATA_WUSER_32_63		0x301134
4662c455caSZhou Wang #define HZIP_BD_WUSER_32_63		0x301140
4762c455caSZhou Wang 
4862c455caSZhou Wang 
4962c455caSZhou Wang 
5062c455caSZhou Wang #define HZIP_CORE_INT_SOURCE		0x3010A0
5162c455caSZhou Wang #define HZIP_CORE_INT_MASK		0x3010A4
5262c455caSZhou Wang #define HZIP_CORE_INT_STATUS		0x3010AC
5362c455caSZhou Wang #define HZIP_CORE_INT_STATUS_M_ECC	BIT(1)
5462c455caSZhou Wang #define HZIP_CORE_SRAM_ECC_ERR_INFO	0x301148
5562c455caSZhou Wang #define SRAM_ECC_ERR_NUM_SHIFT		16
5662c455caSZhou Wang #define SRAM_ECC_ERR_ADDR_SHIFT		24
5762c455caSZhou Wang #define HZIP_CORE_INT_DISABLE		0x000007FF
5862c455caSZhou Wang #define HZIP_SQE_SIZE			128
5962c455caSZhou Wang #define HZIP_PF_DEF_Q_NUM		64
6062c455caSZhou Wang #define HZIP_PF_DEF_Q_BASE		0
6162c455caSZhou Wang 
6262c455caSZhou Wang 
6362c455caSZhou Wang #define HZIP_NUMA_DISTANCE		100
6462c455caSZhou Wang 
6562c455caSZhou Wang static const char hisi_zip_name[] = "hisi_zip";
6662c455caSZhou Wang LIST_HEAD(hisi_zip_list);
6762c455caSZhou Wang DEFINE_MUTEX(hisi_zip_list_lock);
6862c455caSZhou Wang 
6962c455caSZhou Wang #ifdef CONFIG_NUMA
7062c455caSZhou Wang static struct hisi_zip *find_zip_device_numa(int node)
7162c455caSZhou Wang {
7262c455caSZhou Wang 	struct hisi_zip *zip = NULL;
7362c455caSZhou Wang 	struct hisi_zip *hisi_zip;
7462c455caSZhou Wang 	int min_distance = HZIP_NUMA_DISTANCE;
7562c455caSZhou Wang 	struct device *dev;
7662c455caSZhou Wang 
7762c455caSZhou Wang 	list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
7862c455caSZhou Wang 		dev = &hisi_zip->qm.pdev->dev;
7962c455caSZhou Wang 		if (node_distance(dev->numa_node, node) < min_distance) {
8062c455caSZhou Wang 			zip = hisi_zip;
8162c455caSZhou Wang 			min_distance = node_distance(dev->numa_node, node);
8262c455caSZhou Wang 		}
8362c455caSZhou Wang 	}
8462c455caSZhou Wang 
8562c455caSZhou Wang 	return zip;
8662c455caSZhou Wang }
8762c455caSZhou Wang #endif
8862c455caSZhou Wang 
8962c455caSZhou Wang struct hisi_zip *find_zip_device(int node)
9062c455caSZhou Wang {
9162c455caSZhou Wang 	struct hisi_zip *zip = NULL;
9262c455caSZhou Wang 
9362c455caSZhou Wang 	mutex_lock(&hisi_zip_list_lock);
9462c455caSZhou Wang #ifdef CONFIG_NUMA
9562c455caSZhou Wang 	zip = find_zip_device_numa(node);
9662c455caSZhou Wang #else
9762c455caSZhou Wang 	zip = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
9862c455caSZhou Wang #endif
9962c455caSZhou Wang 	mutex_unlock(&hisi_zip_list_lock);
10062c455caSZhou Wang 
10162c455caSZhou Wang 	return zip;
10262c455caSZhou Wang }
10362c455caSZhou Wang 
10462c455caSZhou Wang struct hisi_zip_hw_error {
10562c455caSZhou Wang 	u32 int_msk;
10662c455caSZhou Wang 	const char *msg;
10762c455caSZhou Wang };
10862c455caSZhou Wang 
10962c455caSZhou Wang static const struct hisi_zip_hw_error zip_hw_error[] = {
11062c455caSZhou Wang 	{ .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" },
11162c455caSZhou Wang 	{ .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" },
11262c455caSZhou Wang 	{ .int_msk = BIT(2), .msg = "zip_axi_rresp_err" },
11362c455caSZhou Wang 	{ .int_msk = BIT(3), .msg = "zip_axi_bresp_err" },
11462c455caSZhou Wang 	{ .int_msk = BIT(4), .msg = "zip_src_addr_parse_err" },
11562c455caSZhou Wang 	{ .int_msk = BIT(5), .msg = "zip_dst_addr_parse_err" },
11662c455caSZhou Wang 	{ .int_msk = BIT(6), .msg = "zip_pre_in_addr_err" },
11762c455caSZhou Wang 	{ .int_msk = BIT(7), .msg = "zip_pre_in_data_err" },
11862c455caSZhou Wang 	{ .int_msk = BIT(8), .msg = "zip_com_inf_err" },
11962c455caSZhou Wang 	{ .int_msk = BIT(9), .msg = "zip_enc_inf_err" },
12062c455caSZhou Wang 	{ .int_msk = BIT(10), .msg = "zip_pre_out_err" },
12162c455caSZhou Wang 	{ /* sentinel */ }
12262c455caSZhou Wang };
12362c455caSZhou Wang 
12462c455caSZhou Wang /*
12562c455caSZhou Wang  * One ZIP controller has one PF and multiple VFs, some global configurations
12662c455caSZhou Wang  * which PF has need this structure.
12762c455caSZhou Wang  *
12862c455caSZhou Wang  * Just relevant for PF.
12962c455caSZhou Wang  */
13062c455caSZhou Wang struct hisi_zip_ctrl {
131*79e09f30SZhou Wang 	u32 num_vfs;
13262c455caSZhou Wang 	struct hisi_zip *hisi_zip;
13362c455caSZhou Wang };
13462c455caSZhou Wang 
13562c455caSZhou Wang static int pf_q_num_set(const char *val, const struct kernel_param *kp)
13662c455caSZhou Wang {
13762c455caSZhou Wang 	struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
13862c455caSZhou Wang 					      PCI_DEVICE_ID_ZIP_PF, NULL);
13962c455caSZhou Wang 	u32 n, q_num;
14062c455caSZhou Wang 	u8 rev_id;
14162c455caSZhou Wang 	int ret;
14262c455caSZhou Wang 
14362c455caSZhou Wang 	if (!val)
14462c455caSZhou Wang 		return -EINVAL;
14562c455caSZhou Wang 
14662c455caSZhou Wang 	if (!pdev) {
14762c455caSZhou Wang 		q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2);
14862c455caSZhou Wang 		pr_info("No device found currently, suppose queue number is %d\n",
14962c455caSZhou Wang 			q_num);
15062c455caSZhou Wang 	} else {
15162c455caSZhou Wang 		rev_id = pdev->revision;
15262c455caSZhou Wang 		switch (rev_id) {
15362c455caSZhou Wang 		case QM_HW_V1:
15462c455caSZhou Wang 			q_num = HZIP_QUEUE_NUM_V1;
15562c455caSZhou Wang 			break;
15662c455caSZhou Wang 		case QM_HW_V2:
15762c455caSZhou Wang 			q_num = HZIP_QUEUE_NUM_V2;
15862c455caSZhou Wang 			break;
15962c455caSZhou Wang 		default:
16062c455caSZhou Wang 			return -EINVAL;
16162c455caSZhou Wang 		}
16262c455caSZhou Wang 	}
16362c455caSZhou Wang 
16462c455caSZhou Wang 	ret = kstrtou32(val, 10, &n);
16562c455caSZhou Wang 	if (ret != 0 || n > q_num || n == 0)
16662c455caSZhou Wang 		return -EINVAL;
16762c455caSZhou Wang 
16862c455caSZhou Wang 	return param_set_int(val, kp);
16962c455caSZhou Wang }
17062c455caSZhou Wang 
17162c455caSZhou Wang static const struct kernel_param_ops pf_q_num_ops = {
17262c455caSZhou Wang 	.set = pf_q_num_set,
17362c455caSZhou Wang 	.get = param_get_int,
17462c455caSZhou Wang };
17562c455caSZhou Wang 
17662c455caSZhou Wang static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
17762c455caSZhou Wang module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
17862c455caSZhou Wang MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
17962c455caSZhou Wang 
18062c455caSZhou Wang static int uacce_mode;
18162c455caSZhou Wang module_param(uacce_mode, int, 0);
18262c455caSZhou Wang 
18362c455caSZhou Wang static const struct pci_device_id hisi_zip_dev_ids[] = {
18462c455caSZhou Wang 	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
185*79e09f30SZhou Wang 	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) },
18662c455caSZhou Wang 	{ 0, }
18762c455caSZhou Wang };
18862c455caSZhou Wang MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
18962c455caSZhou Wang 
19062c455caSZhou Wang static inline void hisi_zip_add_to_list(struct hisi_zip *hisi_zip)
19162c455caSZhou Wang {
19262c455caSZhou Wang 	mutex_lock(&hisi_zip_list_lock);
19362c455caSZhou Wang 	list_add_tail(&hisi_zip->list, &hisi_zip_list);
19462c455caSZhou Wang 	mutex_unlock(&hisi_zip_list_lock);
19562c455caSZhou Wang }
19662c455caSZhou Wang 
19762c455caSZhou Wang static inline void hisi_zip_remove_from_list(struct hisi_zip *hisi_zip)
19862c455caSZhou Wang {
19962c455caSZhou Wang 	mutex_lock(&hisi_zip_list_lock);
20062c455caSZhou Wang 	list_del(&hisi_zip->list);
20162c455caSZhou Wang 	mutex_unlock(&hisi_zip_list_lock);
20262c455caSZhou Wang }
20362c455caSZhou Wang 
20462c455caSZhou Wang static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
20562c455caSZhou Wang {
20662c455caSZhou Wang 	void __iomem *base = hisi_zip->qm.io_base;
20762c455caSZhou Wang 
20862c455caSZhou Wang 	/* qm user domain */
20962c455caSZhou Wang 	writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
21062c455caSZhou Wang 	writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE);
21162c455caSZhou Wang 	writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1);
21262c455caSZhou Wang 	writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE);
21362c455caSZhou Wang 	writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE);
21462c455caSZhou Wang 
21562c455caSZhou Wang 	/* qm cache */
21662c455caSZhou Wang 	writel(AXI_M_CFG, base + QM_AXI_M_CFG);
21762c455caSZhou Wang 	writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE);
21862c455caSZhou Wang 	/* disable FLR triggered by BME(bus master enable) */
21962c455caSZhou Wang 	writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG);
22062c455caSZhou Wang 	writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE);
22162c455caSZhou Wang 
22262c455caSZhou Wang 	/* cache */
22362c455caSZhou Wang 	writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
22462c455caSZhou Wang 	writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
22562c455caSZhou Wang 	writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
22662c455caSZhou Wang 	writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
22762c455caSZhou Wang 
22862c455caSZhou Wang 	/* user domain configurations */
22962c455caSZhou Wang 	writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
23062c455caSZhou Wang 	writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
23162c455caSZhou Wang 	writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
23262c455caSZhou Wang 	writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
23362c455caSZhou Wang 	writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
23462c455caSZhou Wang 
23562c455caSZhou Wang 	/* let's open all compression/decompression cores */
23662c455caSZhou Wang 	writel(DECOMP_CHECK_ENABLE | ALL_COMP_DECOMP_EN,
23762c455caSZhou Wang 	       base + HZIP_CLOCK_GATE_CTRL);
23862c455caSZhou Wang 
23962c455caSZhou Wang 	/* enable sqc writeback */
24062c455caSZhou Wang 	writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
24162c455caSZhou Wang 	       CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
24262c455caSZhou Wang 	       FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
24362c455caSZhou Wang }
24462c455caSZhou Wang 
24562c455caSZhou Wang static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
24662c455caSZhou Wang {
24762c455caSZhou Wang 	struct hisi_qm *qm = &hisi_zip->qm;
24862c455caSZhou Wang 
24962c455caSZhou Wang 	if (qm->ver == QM_HW_V1) {
25062c455caSZhou Wang 		writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
25162c455caSZhou Wang 		dev_info(&qm->pdev->dev, "ZIP v%d does not support hw error handle\n",
25262c455caSZhou Wang 			 qm->ver);
25362c455caSZhou Wang 		return;
25462c455caSZhou Wang 	}
25562c455caSZhou Wang 
25662c455caSZhou Wang 	if (state) {
25762c455caSZhou Wang 		/* clear ZIP hw error source if having */
25862c455caSZhou Wang 		writel(HZIP_CORE_INT_DISABLE, hisi_zip->qm.io_base +
25962c455caSZhou Wang 					      HZIP_CORE_INT_SOURCE);
26062c455caSZhou Wang 		/* enable ZIP hw error interrupts */
26162c455caSZhou Wang 		writel(0, hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
26262c455caSZhou Wang 	} else {
26362c455caSZhou Wang 		/* disable ZIP hw error interrupts */
26462c455caSZhou Wang 		writel(HZIP_CORE_INT_DISABLE,
26562c455caSZhou Wang 		       hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
26662c455caSZhou Wang 	}
26762c455caSZhou Wang }
26862c455caSZhou Wang 
26962c455caSZhou Wang static void hisi_zip_hw_error_init(struct hisi_zip *hisi_zip)
27062c455caSZhou Wang {
27162c455caSZhou Wang 	hisi_qm_hw_error_init(&hisi_zip->qm, QM_BASE_CE,
27262c455caSZhou Wang 			      QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
27362c455caSZhou Wang 			      QM_DB_RANDOM_INVALID);
27462c455caSZhou Wang 	hisi_zip_hw_error_set_state(hisi_zip, true);
27562c455caSZhou Wang }
27662c455caSZhou Wang 
27762c455caSZhou Wang static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
27862c455caSZhou Wang {
27962c455caSZhou Wang 	struct hisi_qm *qm = &hisi_zip->qm;
28062c455caSZhou Wang 	struct hisi_zip_ctrl *ctrl;
28162c455caSZhou Wang 
28262c455caSZhou Wang 	ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
28362c455caSZhou Wang 	if (!ctrl)
28462c455caSZhou Wang 		return -ENOMEM;
28562c455caSZhou Wang 
28662c455caSZhou Wang 	hisi_zip->ctrl = ctrl;
28762c455caSZhou Wang 	ctrl->hisi_zip = hisi_zip;
28862c455caSZhou Wang 
28962c455caSZhou Wang 	switch (qm->ver) {
29062c455caSZhou Wang 	case QM_HW_V1:
29162c455caSZhou Wang 		qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1;
29262c455caSZhou Wang 		break;
29362c455caSZhou Wang 
29462c455caSZhou Wang 	case QM_HW_V2:
29562c455caSZhou Wang 		qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2;
29662c455caSZhou Wang 		break;
29762c455caSZhou Wang 
29862c455caSZhou Wang 	default:
29962c455caSZhou Wang 		return -EINVAL;
30062c455caSZhou Wang 	}
30162c455caSZhou Wang 
30262c455caSZhou Wang 	hisi_zip_set_user_domain_and_cache(hisi_zip);
30362c455caSZhou Wang 	hisi_zip_hw_error_init(hisi_zip);
30462c455caSZhou Wang 
30562c455caSZhou Wang 	return 0;
30662c455caSZhou Wang }
30762c455caSZhou Wang 
30862c455caSZhou Wang static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
30962c455caSZhou Wang {
31062c455caSZhou Wang 	struct hisi_zip *hisi_zip;
31162c455caSZhou Wang 	enum qm_hw_ver rev_id;
31262c455caSZhou Wang 	struct hisi_qm *qm;
31362c455caSZhou Wang 	int ret;
31462c455caSZhou Wang 
31562c455caSZhou Wang 	rev_id = hisi_qm_get_hw_version(pdev);
31662c455caSZhou Wang 	if (rev_id == QM_HW_UNKNOWN)
31762c455caSZhou Wang 		return -EINVAL;
31862c455caSZhou Wang 
31962c455caSZhou Wang 	hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
32062c455caSZhou Wang 	if (!hisi_zip)
32162c455caSZhou Wang 		return -ENOMEM;
32262c455caSZhou Wang 	pci_set_drvdata(pdev, hisi_zip);
32362c455caSZhou Wang 
32462c455caSZhou Wang 	qm = &hisi_zip->qm;
32562c455caSZhou Wang 	qm->pdev = pdev;
32662c455caSZhou Wang 	qm->ver = rev_id;
32762c455caSZhou Wang 
32862c455caSZhou Wang 	qm->sqe_size = HZIP_SQE_SIZE;
32962c455caSZhou Wang 	qm->dev_name = hisi_zip_name;
330*79e09f30SZhou Wang 	qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
331*79e09f30SZhou Wang 								QM_HW_VF;
33262c455caSZhou Wang 	switch (uacce_mode) {
33362c455caSZhou Wang 	case 0:
33462c455caSZhou Wang 		qm->use_dma_api = true;
33562c455caSZhou Wang 		break;
33662c455caSZhou Wang 	case 1:
33762c455caSZhou Wang 		qm->use_dma_api = false;
33862c455caSZhou Wang 		break;
33962c455caSZhou Wang 	case 2:
34062c455caSZhou Wang 		qm->use_dma_api = true;
34162c455caSZhou Wang 		break;
34262c455caSZhou Wang 	default:
34362c455caSZhou Wang 		return -EINVAL;
34462c455caSZhou Wang 	}
34562c455caSZhou Wang 
34662c455caSZhou Wang 	ret = hisi_qm_init(qm);
34762c455caSZhou Wang 	if (ret) {
34862c455caSZhou Wang 		dev_err(&pdev->dev, "Failed to init qm!\n");
34962c455caSZhou Wang 		return ret;
35062c455caSZhou Wang 	}
35162c455caSZhou Wang 
352*79e09f30SZhou Wang 	if (qm->fun_type == QM_HW_PF) {
35362c455caSZhou Wang 		ret = hisi_zip_pf_probe_init(hisi_zip);
35462c455caSZhou Wang 		if (ret)
355*79e09f30SZhou Wang 			return ret;
35662c455caSZhou Wang 
35762c455caSZhou Wang 		qm->qp_base = HZIP_PF_DEF_Q_BASE;
35862c455caSZhou Wang 		qm->qp_num = pf_q_num;
359*79e09f30SZhou Wang 	} else if (qm->fun_type == QM_HW_VF) {
360*79e09f30SZhou Wang 		/*
361*79e09f30SZhou Wang 		 * have no way to get qm configure in VM in v1 hardware,
362*79e09f30SZhou Wang 		 * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
363*79e09f30SZhou Wang 		 * to trigger only one VF in v1 hardware.
364*79e09f30SZhou Wang 		 *
365*79e09f30SZhou Wang 		 * v2 hardware has no such problem.
366*79e09f30SZhou Wang 		 */
367*79e09f30SZhou Wang 		if (qm->ver == QM_HW_V1) {
368*79e09f30SZhou Wang 			qm->qp_base = HZIP_PF_DEF_Q_NUM;
369*79e09f30SZhou Wang 			qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
370*79e09f30SZhou Wang 		} else if (qm->ver == QM_HW_V2)
371*79e09f30SZhou Wang 			/* v2 starts to support get vft by mailbox */
372*79e09f30SZhou Wang 			hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
373*79e09f30SZhou Wang 	}
37462c455caSZhou Wang 
37562c455caSZhou Wang 	ret = hisi_qm_start(qm);
37662c455caSZhou Wang 	if (ret)
37762c455caSZhou Wang 		goto err_qm_uninit;
37862c455caSZhou Wang 
37962c455caSZhou Wang 	hisi_zip_add_to_list(hisi_zip);
38062c455caSZhou Wang 
38162c455caSZhou Wang 	return 0;
38262c455caSZhou Wang 
38362c455caSZhou Wang err_qm_uninit:
38462c455caSZhou Wang 	hisi_qm_uninit(qm);
38562c455caSZhou Wang 	return ret;
38662c455caSZhou Wang }
38762c455caSZhou Wang 
388*79e09f30SZhou Wang /* Currently we only support equal assignment */
389*79e09f30SZhou Wang static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs)
390*79e09f30SZhou Wang {
391*79e09f30SZhou Wang 	struct hisi_qm *qm = &hisi_zip->qm;
392*79e09f30SZhou Wang 	u32 qp_num = qm->qp_num;
393*79e09f30SZhou Wang 	u32 q_base = qp_num;
394*79e09f30SZhou Wang 	u32 q_num, remain_q_num, i;
395*79e09f30SZhou Wang 	int ret;
396*79e09f30SZhou Wang 
397*79e09f30SZhou Wang 	if (!num_vfs)
398*79e09f30SZhou Wang 		return -EINVAL;
399*79e09f30SZhou Wang 
400*79e09f30SZhou Wang 	remain_q_num = qm->ctrl_qp_num - qp_num;
401*79e09f30SZhou Wang 	if (remain_q_num < num_vfs)
402*79e09f30SZhou Wang 		return -EINVAL;
403*79e09f30SZhou Wang 
404*79e09f30SZhou Wang 	q_num = remain_q_num / num_vfs;
405*79e09f30SZhou Wang 	for (i = 1; i <= num_vfs; i++) {
406*79e09f30SZhou Wang 		if (i == num_vfs)
407*79e09f30SZhou Wang 			q_num += remain_q_num % num_vfs;
408*79e09f30SZhou Wang 		ret = hisi_qm_set_vft(qm, i, q_base, q_num);
409*79e09f30SZhou Wang 		if (ret)
410*79e09f30SZhou Wang 			return ret;
411*79e09f30SZhou Wang 		q_base += q_num;
412*79e09f30SZhou Wang 	}
413*79e09f30SZhou Wang 
414*79e09f30SZhou Wang 	return 0;
415*79e09f30SZhou Wang }
416*79e09f30SZhou Wang 
417*79e09f30SZhou Wang static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip)
418*79e09f30SZhou Wang {
419*79e09f30SZhou Wang 	struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl;
420*79e09f30SZhou Wang 	struct hisi_qm *qm = &hisi_zip->qm;
421*79e09f30SZhou Wang 	u32 i, num_vfs = ctrl->num_vfs;
422*79e09f30SZhou Wang 	int ret;
423*79e09f30SZhou Wang 
424*79e09f30SZhou Wang 	for (i = 1; i <= num_vfs; i++) {
425*79e09f30SZhou Wang 		ret = hisi_qm_set_vft(qm, i, 0, 0);
426*79e09f30SZhou Wang 		if (ret)
427*79e09f30SZhou Wang 			return ret;
428*79e09f30SZhou Wang 	}
429*79e09f30SZhou Wang 
430*79e09f30SZhou Wang 	ctrl->num_vfs = 0;
431*79e09f30SZhou Wang 
432*79e09f30SZhou Wang 	return 0;
433*79e09f30SZhou Wang }
434*79e09f30SZhou Wang 
435*79e09f30SZhou Wang static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs)
436*79e09f30SZhou Wang {
437*79e09f30SZhou Wang #ifdef CONFIG_PCI_IOV
438*79e09f30SZhou Wang 	struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
439*79e09f30SZhou Wang 	int pre_existing_vfs, num_vfs, ret;
440*79e09f30SZhou Wang 
441*79e09f30SZhou Wang 	pre_existing_vfs = pci_num_vf(pdev);
442*79e09f30SZhou Wang 
443*79e09f30SZhou Wang 	if (pre_existing_vfs) {
444*79e09f30SZhou Wang 		dev_err(&pdev->dev,
445*79e09f30SZhou Wang 			"Can't enable VF. Please disable pre-enabled VFs!\n");
446*79e09f30SZhou Wang 		return 0;
447*79e09f30SZhou Wang 	}
448*79e09f30SZhou Wang 
449*79e09f30SZhou Wang 	num_vfs = min_t(int, max_vfs, HZIP_VF_NUM);
450*79e09f30SZhou Wang 
451*79e09f30SZhou Wang 	ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs);
452*79e09f30SZhou Wang 	if (ret) {
453*79e09f30SZhou Wang 		dev_err(&pdev->dev, "Can't assign queues for VF!\n");
454*79e09f30SZhou Wang 		return ret;
455*79e09f30SZhou Wang 	}
456*79e09f30SZhou Wang 
457*79e09f30SZhou Wang 	hisi_zip->ctrl->num_vfs = num_vfs;
458*79e09f30SZhou Wang 
459*79e09f30SZhou Wang 	ret = pci_enable_sriov(pdev, num_vfs);
460*79e09f30SZhou Wang 	if (ret) {
461*79e09f30SZhou Wang 		dev_err(&pdev->dev, "Can't enable VF!\n");
462*79e09f30SZhou Wang 		hisi_zip_clear_vft_config(hisi_zip);
463*79e09f30SZhou Wang 		return ret;
464*79e09f30SZhou Wang 	}
465*79e09f30SZhou Wang 
466*79e09f30SZhou Wang 	return num_vfs;
467*79e09f30SZhou Wang #else
468*79e09f30SZhou Wang 	return 0;
469*79e09f30SZhou Wang #endif
470*79e09f30SZhou Wang }
471*79e09f30SZhou Wang 
472*79e09f30SZhou Wang static int hisi_zip_sriov_disable(struct pci_dev *pdev)
473*79e09f30SZhou Wang {
474*79e09f30SZhou Wang 	struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
475*79e09f30SZhou Wang 
476*79e09f30SZhou Wang 	if (pci_vfs_assigned(pdev)) {
477*79e09f30SZhou Wang 		dev_err(&pdev->dev,
478*79e09f30SZhou Wang 			"Can't disable VFs while VFs are assigned!\n");
479*79e09f30SZhou Wang 		return -EPERM;
480*79e09f30SZhou Wang 	}
481*79e09f30SZhou Wang 
482*79e09f30SZhou Wang 	/* remove in hisi_zip_pci_driver will be called to free VF resources */
483*79e09f30SZhou Wang 	pci_disable_sriov(pdev);
484*79e09f30SZhou Wang 
485*79e09f30SZhou Wang 	return hisi_zip_clear_vft_config(hisi_zip);
486*79e09f30SZhou Wang }
487*79e09f30SZhou Wang 
488*79e09f30SZhou Wang static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
489*79e09f30SZhou Wang {
490*79e09f30SZhou Wang 	if (num_vfs == 0)
491*79e09f30SZhou Wang 		return hisi_zip_sriov_disable(pdev);
492*79e09f30SZhou Wang 	else
493*79e09f30SZhou Wang 		return hisi_zip_sriov_enable(pdev, num_vfs);
494*79e09f30SZhou Wang }
495*79e09f30SZhou Wang 
49662c455caSZhou Wang static void hisi_zip_remove(struct pci_dev *pdev)
49762c455caSZhou Wang {
49862c455caSZhou Wang 	struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
49962c455caSZhou Wang 	struct hisi_qm *qm = &hisi_zip->qm;
50062c455caSZhou Wang 
501*79e09f30SZhou Wang 	if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0)
502*79e09f30SZhou Wang 		hisi_zip_sriov_disable(pdev);
503*79e09f30SZhou Wang 
50462c455caSZhou Wang 	hisi_qm_stop(qm);
505*79e09f30SZhou Wang 
506*79e09f30SZhou Wang 	if (qm->fun_type == QM_HW_PF)
50762c455caSZhou Wang 		hisi_zip_hw_error_set_state(hisi_zip, false);
508*79e09f30SZhou Wang 
50962c455caSZhou Wang 	hisi_qm_uninit(qm);
51062c455caSZhou Wang 	hisi_zip_remove_from_list(hisi_zip);
51162c455caSZhou Wang }
51262c455caSZhou Wang 
51362c455caSZhou Wang static void hisi_zip_log_hw_error(struct hisi_zip *hisi_zip, u32 err_sts)
51462c455caSZhou Wang {
51562c455caSZhou Wang 	const struct hisi_zip_hw_error *err = zip_hw_error;
51662c455caSZhou Wang 	struct device *dev = &hisi_zip->qm.pdev->dev;
51762c455caSZhou Wang 	u32 err_val;
51862c455caSZhou Wang 
51962c455caSZhou Wang 	while (err->msg) {
52062c455caSZhou Wang 		if (err->int_msk & err_sts) {
52162c455caSZhou Wang 			dev_warn(dev, "%s [error status=0x%x] found\n",
52262c455caSZhou Wang 				 err->msg, err->int_msk);
52362c455caSZhou Wang 
52462c455caSZhou Wang 			if (HZIP_CORE_INT_STATUS_M_ECC & err->int_msk) {
52562c455caSZhou Wang 				err_val = readl(hisi_zip->qm.io_base +
52662c455caSZhou Wang 						HZIP_CORE_SRAM_ECC_ERR_INFO);
52762c455caSZhou Wang 				dev_warn(dev, "hisi-zip multi ecc sram num=0x%x\n",
52862c455caSZhou Wang 					 ((err_val >> SRAM_ECC_ERR_NUM_SHIFT) &
52962c455caSZhou Wang 					  0xFF));
53062c455caSZhou Wang 				dev_warn(dev, "hisi-zip multi ecc sram addr=0x%x\n",
53162c455caSZhou Wang 					 (err_val >> SRAM_ECC_ERR_ADDR_SHIFT));
53262c455caSZhou Wang 			}
53362c455caSZhou Wang 		}
53462c455caSZhou Wang 		err++;
53562c455caSZhou Wang 	}
53662c455caSZhou Wang }
53762c455caSZhou Wang 
53862c455caSZhou Wang static pci_ers_result_t hisi_zip_hw_error_handle(struct hisi_zip *hisi_zip)
53962c455caSZhou Wang {
54062c455caSZhou Wang 	u32 err_sts;
54162c455caSZhou Wang 
54262c455caSZhou Wang 	/* read err sts */
54362c455caSZhou Wang 	err_sts = readl(hisi_zip->qm.io_base + HZIP_CORE_INT_STATUS);
54462c455caSZhou Wang 
54562c455caSZhou Wang 	if (err_sts) {
54662c455caSZhou Wang 		hisi_zip_log_hw_error(hisi_zip, err_sts);
54762c455caSZhou Wang 		/* clear error interrupts */
54862c455caSZhou Wang 		writel(err_sts, hisi_zip->qm.io_base + HZIP_CORE_INT_SOURCE);
54962c455caSZhou Wang 
55062c455caSZhou Wang 		return PCI_ERS_RESULT_NEED_RESET;
55162c455caSZhou Wang 	}
55262c455caSZhou Wang 
55362c455caSZhou Wang 	return PCI_ERS_RESULT_RECOVERED;
55462c455caSZhou Wang }
55562c455caSZhou Wang 
55662c455caSZhou Wang static pci_ers_result_t hisi_zip_process_hw_error(struct pci_dev *pdev)
55762c455caSZhou Wang {
55862c455caSZhou Wang 	struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
55962c455caSZhou Wang 	struct device *dev = &pdev->dev;
56062c455caSZhou Wang 	pci_ers_result_t qm_ret, zip_ret;
56162c455caSZhou Wang 
56262c455caSZhou Wang 	if (!hisi_zip) {
56362c455caSZhou Wang 		dev_err(dev,
56462c455caSZhou Wang 			"Can't recover ZIP-error occurred during device init\n");
56562c455caSZhou Wang 		return PCI_ERS_RESULT_NONE;
56662c455caSZhou Wang 	}
56762c455caSZhou Wang 
56862c455caSZhou Wang 	qm_ret = hisi_qm_hw_error_handle(&hisi_zip->qm);
56962c455caSZhou Wang 
57062c455caSZhou Wang 	zip_ret = hisi_zip_hw_error_handle(hisi_zip);
57162c455caSZhou Wang 
57262c455caSZhou Wang 	return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
57362c455caSZhou Wang 		zip_ret == PCI_ERS_RESULT_NEED_RESET) ?
57462c455caSZhou Wang 	       PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
57562c455caSZhou Wang }
57662c455caSZhou Wang 
57762c455caSZhou Wang static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev,
57862c455caSZhou Wang 						pci_channel_state_t state)
57962c455caSZhou Wang {
58062c455caSZhou Wang 	if (pdev->is_virtfn)
58162c455caSZhou Wang 		return PCI_ERS_RESULT_NONE;
58262c455caSZhou Wang 
58362c455caSZhou Wang 	dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
58462c455caSZhou Wang 	if (state == pci_channel_io_perm_failure)
58562c455caSZhou Wang 		return PCI_ERS_RESULT_DISCONNECT;
58662c455caSZhou Wang 
58762c455caSZhou Wang 	return hisi_zip_process_hw_error(pdev);
58862c455caSZhou Wang }
58962c455caSZhou Wang 
59062c455caSZhou Wang static const struct pci_error_handlers hisi_zip_err_handler = {
59162c455caSZhou Wang 	.error_detected	= hisi_zip_error_detected,
59262c455caSZhou Wang };
59362c455caSZhou Wang 
59462c455caSZhou Wang static struct pci_driver hisi_zip_pci_driver = {
59562c455caSZhou Wang 	.name			= "hisi_zip",
59662c455caSZhou Wang 	.id_table		= hisi_zip_dev_ids,
59762c455caSZhou Wang 	.probe			= hisi_zip_probe,
59862c455caSZhou Wang 	.remove			= hisi_zip_remove,
599*79e09f30SZhou Wang 	.sriov_configure	= hisi_zip_sriov_configure,
60062c455caSZhou Wang 	.err_handler		= &hisi_zip_err_handler,
60162c455caSZhou Wang };
60262c455caSZhou Wang 
60362c455caSZhou Wang static int __init hisi_zip_init(void)
60462c455caSZhou Wang {
60562c455caSZhou Wang 	int ret;
60662c455caSZhou Wang 
60762c455caSZhou Wang 	ret = pci_register_driver(&hisi_zip_pci_driver);
60862c455caSZhou Wang 	if (ret < 0) {
60962c455caSZhou Wang 		pr_err("Failed to register pci driver.\n");
61062c455caSZhou Wang 		return ret;
61162c455caSZhou Wang 	}
61262c455caSZhou Wang 
61362c455caSZhou Wang 	if (uacce_mode == 0 || uacce_mode == 2) {
61462c455caSZhou Wang 		ret = hisi_zip_register_to_crypto();
61562c455caSZhou Wang 		if (ret < 0) {
61662c455caSZhou Wang 			pr_err("Failed to register driver to crypto.\n");
61762c455caSZhou Wang 			goto err_crypto;
61862c455caSZhou Wang 		}
61962c455caSZhou Wang 	}
62062c455caSZhou Wang 
62162c455caSZhou Wang 	return 0;
62262c455caSZhou Wang 
62362c455caSZhou Wang err_crypto:
62462c455caSZhou Wang 	pci_unregister_driver(&hisi_zip_pci_driver);
62562c455caSZhou Wang 	return ret;
62662c455caSZhou Wang }
62762c455caSZhou Wang 
62862c455caSZhou Wang static void __exit hisi_zip_exit(void)
62962c455caSZhou Wang {
63062c455caSZhou Wang 	if (uacce_mode == 0 || uacce_mode == 2)
63162c455caSZhou Wang 		hisi_zip_unregister_from_crypto();
63262c455caSZhou Wang 	pci_unregister_driver(&hisi_zip_pci_driver);
63362c455caSZhou Wang }
63462c455caSZhou Wang 
63562c455caSZhou Wang module_init(hisi_zip_init);
63662c455caSZhou Wang module_exit(hisi_zip_exit);
63762c455caSZhou Wang 
63862c455caSZhou Wang MODULE_LICENSE("GPL v2");
63962c455caSZhou Wang MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
64062c455caSZhou Wang MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator");
641