1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <linux/acpi.h>
4 #include <linux/aer.h>
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/init.h>
8 #include <linux/io.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/seq_file.h>
13 #include <linux/topology.h>
14 #include <linux/uacce.h>
15 #include "zip.h"
16 
17 #define PCI_DEVICE_ID_ZIP_PF		0xa250
18 #define PCI_DEVICE_ID_ZIP_VF		0xa251
19 
20 #define HZIP_VF_NUM			63
21 #define HZIP_QUEUE_NUM_V1		4096
22 #define HZIP_QUEUE_NUM_V2		1024
23 
24 #define HZIP_CLOCK_GATE_CTRL		0x301004
25 #define COMP0_ENABLE			BIT(0)
26 #define COMP1_ENABLE			BIT(1)
27 #define DECOMP0_ENABLE			BIT(2)
28 #define DECOMP1_ENABLE			BIT(3)
29 #define DECOMP2_ENABLE			BIT(4)
30 #define DECOMP3_ENABLE			BIT(5)
31 #define DECOMP4_ENABLE			BIT(6)
32 #define DECOMP5_ENABLE			BIT(7)
33 #define ALL_COMP_DECOMP_EN		(COMP0_ENABLE | COMP1_ENABLE |	\
34 					 DECOMP0_ENABLE | DECOMP1_ENABLE | \
35 					 DECOMP2_ENABLE | DECOMP3_ENABLE | \
36 					 DECOMP4_ENABLE | DECOMP5_ENABLE)
37 #define DECOMP_CHECK_ENABLE		BIT(16)
38 #define HZIP_FSM_MAX_CNT		0x301008
39 
40 #define HZIP_PORT_ARCA_CHE_0		0x301040
41 #define HZIP_PORT_ARCA_CHE_1		0x301044
42 #define HZIP_PORT_AWCA_CHE_0		0x301060
43 #define HZIP_PORT_AWCA_CHE_1		0x301064
44 #define CACHE_ALL_EN			0xffffffff
45 
46 #define HZIP_BD_RUSER_32_63		0x301110
47 #define HZIP_SGL_RUSER_32_63		0x30111c
48 #define HZIP_DATA_RUSER_32_63		0x301128
49 #define HZIP_DATA_WUSER_32_63		0x301134
50 #define HZIP_BD_WUSER_32_63		0x301140
51 
52 #define HZIP_QM_IDEL_STATUS		0x3040e4
53 
54 #define HZIP_CORE_DEBUG_COMP_0		0x302000
55 #define HZIP_CORE_DEBUG_COMP_1		0x303000
56 #define HZIP_CORE_DEBUG_DECOMP_0	0x304000
57 #define HZIP_CORE_DEBUG_DECOMP_1	0x305000
58 #define HZIP_CORE_DEBUG_DECOMP_2	0x306000
59 #define HZIP_CORE_DEBUG_DECOMP_3	0x307000
60 #define HZIP_CORE_DEBUG_DECOMP_4	0x308000
61 #define HZIP_CORE_DEBUG_DECOMP_5	0x309000
62 
63 #define HZIP_CORE_INT_SOURCE		0x3010A0
64 #define HZIP_CORE_INT_MASK_REG		0x3010A4
65 #define HZIP_CORE_INT_SET		0x3010A8
66 #define HZIP_CORE_INT_STATUS		0x3010AC
67 #define HZIP_CORE_INT_STATUS_M_ECC	BIT(1)
68 #define HZIP_CORE_SRAM_ECC_ERR_INFO	0x301148
69 #define HZIP_CORE_INT_RAS_CE_ENB	0x301160
70 #define HZIP_CORE_INT_RAS_NFE_ENB	0x301164
71 #define HZIP_CORE_INT_RAS_FE_ENB        0x301168
72 #define HZIP_CORE_INT_RAS_NFE_ENABLE	0x7FE
73 #define HZIP_SRAM_ECC_ERR_NUM_SHIFT	16
74 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT	24
75 #define HZIP_CORE_INT_MASK_ALL		GENMASK(10, 0)
76 #define HZIP_COMP_CORE_NUM		2
77 #define HZIP_DECOMP_CORE_NUM		6
78 #define HZIP_CORE_NUM			(HZIP_COMP_CORE_NUM + \
79 					 HZIP_DECOMP_CORE_NUM)
80 #define HZIP_SQE_SIZE			128
81 #define HZIP_SQ_SIZE			(HZIP_SQE_SIZE * QM_Q_DEPTH)
82 #define HZIP_PF_DEF_Q_NUM		64
83 #define HZIP_PF_DEF_Q_BASE		0
84 
85 #define HZIP_SOFT_CTRL_CNT_CLR_CE	0x301000
86 #define SOFT_CTRL_CNT_CLR_CE_BIT	BIT(0)
87 #define HZIP_SOFT_CTRL_ZIP_CONTROL	0x30100C
88 #define HZIP_AXI_SHUTDOWN_ENABLE	BIT(14)
89 #define HZIP_WR_PORT			BIT(11)
90 
91 #define HZIP_BUF_SIZE			22
92 #define HZIP_SQE_MASK_OFFSET		64
93 #define HZIP_SQE_MASK_LEN		48
94 
95 static const char hisi_zip_name[] = "hisi_zip";
96 static struct dentry *hzip_debugfs_root;
97 static struct hisi_qm_list zip_devices;
98 
99 struct hisi_zip_hw_error {
100 	u32 int_msk;
101 	const char *msg;
102 };
103 
104 struct zip_dfx_item {
105 	const char *name;
106 	u32 offset;
107 };
108 
109 static struct zip_dfx_item zip_dfx_files[] = {
110 	{"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)},
111 	{"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)},
112 	{"send_busy_cnt", offsetof(struct hisi_zip_dfx, send_busy_cnt)},
113 	{"err_bd_cnt", offsetof(struct hisi_zip_dfx, err_bd_cnt)},
114 };
115 
116 static const struct hisi_zip_hw_error zip_hw_error[] = {
117 	{ .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" },
118 	{ .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" },
119 	{ .int_msk = BIT(2), .msg = "zip_axi_rresp_err" },
120 	{ .int_msk = BIT(3), .msg = "zip_axi_bresp_err" },
121 	{ .int_msk = BIT(4), .msg = "zip_src_addr_parse_err" },
122 	{ .int_msk = BIT(5), .msg = "zip_dst_addr_parse_err" },
123 	{ .int_msk = BIT(6), .msg = "zip_pre_in_addr_err" },
124 	{ .int_msk = BIT(7), .msg = "zip_pre_in_data_err" },
125 	{ .int_msk = BIT(8), .msg = "zip_com_inf_err" },
126 	{ .int_msk = BIT(9), .msg = "zip_enc_inf_err" },
127 	{ .int_msk = BIT(10), .msg = "zip_pre_out_err" },
128 	{ /* sentinel */ }
129 };
130 
131 enum ctrl_debug_file_index {
132 	HZIP_CURRENT_QM,
133 	HZIP_CLEAR_ENABLE,
134 	HZIP_DEBUG_FILE_NUM,
135 };
136 
137 static const char * const ctrl_debug_file_name[] = {
138 	[HZIP_CURRENT_QM]   = "current_qm",
139 	[HZIP_CLEAR_ENABLE] = "clear_enable",
140 };
141 
142 struct ctrl_debug_file {
143 	enum ctrl_debug_file_index index;
144 	spinlock_t lock;
145 	struct hisi_zip_ctrl *ctrl;
146 };
147 
148 /*
149  * One ZIP controller has one PF and multiple VFs, some global configurations
150  * which PF has need this structure.
151  *
152  * Just relevant for PF.
153  */
154 struct hisi_zip_ctrl {
155 	struct hisi_zip *hisi_zip;
156 	struct dentry *debug_root;
157 	struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
158 };
159 
160 enum {
161 	HZIP_COMP_CORE0,
162 	HZIP_COMP_CORE1,
163 	HZIP_DECOMP_CORE0,
164 	HZIP_DECOMP_CORE1,
165 	HZIP_DECOMP_CORE2,
166 	HZIP_DECOMP_CORE3,
167 	HZIP_DECOMP_CORE4,
168 	HZIP_DECOMP_CORE5,
169 };
170 
171 static const u64 core_offsets[] = {
172 	[HZIP_COMP_CORE0]   = 0x302000,
173 	[HZIP_COMP_CORE1]   = 0x303000,
174 	[HZIP_DECOMP_CORE0] = 0x304000,
175 	[HZIP_DECOMP_CORE1] = 0x305000,
176 	[HZIP_DECOMP_CORE2] = 0x306000,
177 	[HZIP_DECOMP_CORE3] = 0x307000,
178 	[HZIP_DECOMP_CORE4] = 0x308000,
179 	[HZIP_DECOMP_CORE5] = 0x309000,
180 };
181 
182 static const struct debugfs_reg32 hzip_dfx_regs[] = {
183 	{"HZIP_GET_BD_NUM                ",  0x00ull},
184 	{"HZIP_GET_RIGHT_BD              ",  0x04ull},
185 	{"HZIP_GET_ERROR_BD              ",  0x08ull},
186 	{"HZIP_DONE_BD_NUM               ",  0x0cull},
187 	{"HZIP_WORK_CYCLE                ",  0x10ull},
188 	{"HZIP_IDLE_CYCLE                ",  0x18ull},
189 	{"HZIP_MAX_DELAY                 ",  0x20ull},
190 	{"HZIP_MIN_DELAY                 ",  0x24ull},
191 	{"HZIP_AVG_DELAY                 ",  0x28ull},
192 	{"HZIP_MEM_VISIBLE_DATA          ",  0x30ull},
193 	{"HZIP_MEM_VISIBLE_ADDR          ",  0x34ull},
194 	{"HZIP_COMSUMED_BYTE             ",  0x38ull},
195 	{"HZIP_PRODUCED_BYTE             ",  0x40ull},
196 	{"HZIP_COMP_INF                  ",  0x70ull},
197 	{"HZIP_PRE_OUT                   ",  0x78ull},
198 	{"HZIP_BD_RD                     ",  0x7cull},
199 	{"HZIP_BD_WR                     ",  0x80ull},
200 	{"HZIP_GET_BD_AXI_ERR_NUM        ",  0x84ull},
201 	{"HZIP_GET_BD_PARSE_ERR_NUM      ",  0x88ull},
202 	{"HZIP_ADD_BD_AXI_ERR_NUM        ",  0x8cull},
203 	{"HZIP_DECOMP_STF_RELOAD_CURR_ST ",  0x94ull},
204 	{"HZIP_DECOMP_LZ77_CURR_ST       ",  0x9cull},
205 };
206 
207 static int pf_q_num_set(const char *val, const struct kernel_param *kp)
208 {
209 	return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF);
210 }
211 
212 static const struct kernel_param_ops pf_q_num_ops = {
213 	.set = pf_q_num_set,
214 	.get = param_get_int,
215 };
216 
217 static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
218 module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
219 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
220 
221 static const struct kernel_param_ops vfs_num_ops = {
222 	.set = vfs_num_set,
223 	.get = param_get_int,
224 };
225 
226 static u32 vfs_num;
227 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
228 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
229 
230 static const struct pci_device_id hisi_zip_dev_ids[] = {
231 	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
232 	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) },
233 	{ 0, }
234 };
235 MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
236 
237 int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
238 {
239 	if (node == NUMA_NO_NODE)
240 		node = cpu_to_node(smp_processor_id());
241 
242 	return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
243 }
244 
245 static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
246 {
247 	void __iomem *base = qm->io_base;
248 
249 	/* qm user domain */
250 	writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
251 	writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE);
252 	writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1);
253 	writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE);
254 	writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE);
255 
256 	/* qm cache */
257 	writel(AXI_M_CFG, base + QM_AXI_M_CFG);
258 	writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE);
259 	/* disable FLR triggered by BME(bus master enable) */
260 	writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG);
261 	writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE);
262 
263 	/* cache */
264 	writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
265 	writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
266 	writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
267 	writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
268 
269 	/* user domain configurations */
270 	writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
271 	writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
272 	writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
273 
274 	if (qm->use_sva) {
275 		writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
276 		writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
277 	} else {
278 		writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
279 		writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
280 	}
281 
282 	/* let's open all compression/decompression cores */
283 	writel(DECOMP_CHECK_ENABLE | ALL_COMP_DECOMP_EN,
284 	       base + HZIP_CLOCK_GATE_CTRL);
285 
286 	/* enable sqc writeback */
287 	writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
288 	       CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
289 	       FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
290 
291 	return 0;
292 }
293 
294 static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
295 {
296 	u32 val;
297 
298 	if (qm->ver == QM_HW_V1) {
299 		writel(HZIP_CORE_INT_MASK_ALL,
300 		       qm->io_base + HZIP_CORE_INT_MASK_REG);
301 		dev_info(&qm->pdev->dev, "Does not support hw error handle\n");
302 		return;
303 	}
304 
305 	/* clear ZIP hw error source if having */
306 	writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE);
307 
308 	/* configure error type */
309 	writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
310 	writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
311 	writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
312 		qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
313 
314 	/* enable ZIP hw error interrupts */
315 	writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
316 
317 	/* enable ZIP block master OOO when m-bit error occur */
318 	val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
319 	val = val | HZIP_AXI_SHUTDOWN_ENABLE;
320 	writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
321 }
322 
323 static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
324 {
325 	u32 val;
326 
327 	/* disable ZIP hw error interrupts */
328 	writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
329 
330 	/* disable ZIP block master OOO when m-bit error occur */
331 	val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
332 	val = val & ~HZIP_AXI_SHUTDOWN_ENABLE;
333 	writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
334 }
335 
336 static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
337 {
338 	struct hisi_zip *hisi_zip = file->ctrl->hisi_zip;
339 
340 	return &hisi_zip->qm;
341 }
342 
343 static u32 current_qm_read(struct ctrl_debug_file *file)
344 {
345 	struct hisi_qm *qm = file_to_qm(file);
346 
347 	return readl(qm->io_base + QM_DFX_MB_CNT_VF);
348 }
349 
350 static int current_qm_write(struct ctrl_debug_file *file, u32 val)
351 {
352 	struct hisi_qm *qm = file_to_qm(file);
353 	u32 vfq_num;
354 	u32 tmp;
355 
356 	if (val > qm->vfs_num)
357 		return -EINVAL;
358 
359 	/* Calculate curr_qm_qp_num and store */
360 	if (val == 0) {
361 		qm->debug.curr_qm_qp_num = qm->qp_num;
362 	} else {
363 		vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
364 		if (val == qm->vfs_num)
365 			qm->debug.curr_qm_qp_num = qm->ctrl_qp_num -
366 				qm->qp_num - (qm->vfs_num - 1) * vfq_num;
367 		else
368 			qm->debug.curr_qm_qp_num = vfq_num;
369 	}
370 
371 	writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
372 	writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
373 
374 	tmp = val |
375 	      (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
376 	writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
377 
378 	tmp = val |
379 	      (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
380 	writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
381 
382 	return  0;
383 }
384 
385 static u32 clear_enable_read(struct ctrl_debug_file *file)
386 {
387 	struct hisi_qm *qm = file_to_qm(file);
388 
389 	return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
390 	       SOFT_CTRL_CNT_CLR_CE_BIT;
391 }
392 
393 static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
394 {
395 	struct hisi_qm *qm = file_to_qm(file);
396 	u32 tmp;
397 
398 	if (val != 1 && val != 0)
399 		return -EINVAL;
400 
401 	tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
402 	       ~SOFT_CTRL_CNT_CLR_CE_BIT) | val;
403 	writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
404 
405 	return  0;
406 }
407 
408 static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
409 			       size_t count, loff_t *pos)
410 {
411 	struct ctrl_debug_file *file = filp->private_data;
412 	char tbuf[HZIP_BUF_SIZE];
413 	u32 val;
414 	int ret;
415 
416 	spin_lock_irq(&file->lock);
417 	switch (file->index) {
418 	case HZIP_CURRENT_QM:
419 		val = current_qm_read(file);
420 		break;
421 	case HZIP_CLEAR_ENABLE:
422 		val = clear_enable_read(file);
423 		break;
424 	default:
425 		spin_unlock_irq(&file->lock);
426 		return -EINVAL;
427 	}
428 	spin_unlock_irq(&file->lock);
429 	ret = sprintf(tbuf, "%u\n", val);
430 	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
431 }
432 
433 static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
434 				size_t count, loff_t *pos)
435 {
436 	struct ctrl_debug_file *file = filp->private_data;
437 	char tbuf[HZIP_BUF_SIZE];
438 	unsigned long val;
439 	int len, ret;
440 
441 	if (*pos != 0)
442 		return 0;
443 
444 	if (count >= HZIP_BUF_SIZE)
445 		return -ENOSPC;
446 
447 	len = simple_write_to_buffer(tbuf, HZIP_BUF_SIZE - 1, pos, buf, count);
448 	if (len < 0)
449 		return len;
450 
451 	tbuf[len] = '\0';
452 	if (kstrtoul(tbuf, 0, &val))
453 		return -EFAULT;
454 
455 	spin_lock_irq(&file->lock);
456 	switch (file->index) {
457 	case HZIP_CURRENT_QM:
458 		ret = current_qm_write(file, val);
459 		if (ret)
460 			goto err_input;
461 		break;
462 	case HZIP_CLEAR_ENABLE:
463 		ret = clear_enable_write(file, val);
464 		if (ret)
465 			goto err_input;
466 		break;
467 	default:
468 		ret = -EINVAL;
469 		goto err_input;
470 	}
471 	spin_unlock_irq(&file->lock);
472 
473 	return count;
474 
475 err_input:
476 	spin_unlock_irq(&file->lock);
477 	return ret;
478 }
479 
480 static const struct file_operations ctrl_debug_fops = {
481 	.owner = THIS_MODULE,
482 	.open = simple_open,
483 	.read = ctrl_debug_read,
484 	.write = ctrl_debug_write,
485 };
486 
487 
488 static int zip_debugfs_atomic64_set(void *data, u64 val)
489 {
490 	if (val)
491 		return -EINVAL;
492 
493 	atomic64_set((atomic64_t *)data, 0);
494 
495 	return 0;
496 }
497 
498 static int zip_debugfs_atomic64_get(void *data, u64 *val)
499 {
500 	*val = atomic64_read((atomic64_t *)data);
501 
502 	return 0;
503 }
504 
505 DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
506 			 zip_debugfs_atomic64_set, "%llu\n");
507 
508 static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
509 {
510 	struct hisi_zip *hisi_zip = ctrl->hisi_zip;
511 	struct hisi_qm *qm = &hisi_zip->qm;
512 	struct device *dev = &qm->pdev->dev;
513 	struct debugfs_regset32 *regset;
514 	struct dentry *tmp_d;
515 	char buf[HZIP_BUF_SIZE];
516 	int i;
517 
518 	for (i = 0; i < HZIP_CORE_NUM; i++) {
519 		if (i < HZIP_COMP_CORE_NUM)
520 			sprintf(buf, "comp_core%d", i);
521 		else
522 			sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM);
523 
524 		regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
525 		if (!regset)
526 			return -ENOENT;
527 
528 		regset->regs = hzip_dfx_regs;
529 		regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
530 		regset->base = qm->io_base + core_offsets[i];
531 
532 		tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
533 		debugfs_create_regset32("regs", 0444, tmp_d, regset);
534 	}
535 
536 	return 0;
537 }
538 
539 static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
540 {
541 	struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
542 	struct hisi_zip_dfx *dfx = &zip->dfx;
543 	struct dentry *tmp_dir;
544 	void *data;
545 	int i;
546 
547 	tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root);
548 	for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) {
549 		data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset);
550 		debugfs_create_file(zip_dfx_files[i].name,
551 			0644,
552 			tmp_dir,
553 			data,
554 			&zip_atomic64_ops);
555 	}
556 }
557 
558 static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
559 {
560 	int i;
561 
562 	for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
563 		spin_lock_init(&ctrl->files[i].lock);
564 		ctrl->files[i].ctrl = ctrl;
565 		ctrl->files[i].index = i;
566 
567 		debugfs_create_file(ctrl_debug_file_name[i], 0600,
568 				    ctrl->debug_root, ctrl->files + i,
569 				    &ctrl_debug_fops);
570 	}
571 
572 	return hisi_zip_core_debug_init(ctrl);
573 }
574 
575 static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
576 {
577 	struct hisi_qm *qm = &hisi_zip->qm;
578 	struct device *dev = &qm->pdev->dev;
579 	struct dentry *dev_d;
580 	int ret;
581 
582 	dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
583 
584 	qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
585 	qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
586 	qm->debug.debug_root = dev_d;
587 	ret = hisi_qm_debug_init(qm);
588 	if (ret)
589 		goto failed_to_create;
590 
591 	if (qm->fun_type == QM_HW_PF) {
592 		hisi_zip->ctrl->debug_root = dev_d;
593 		ret = hisi_zip_ctrl_debug_init(hisi_zip->ctrl);
594 		if (ret)
595 			goto failed_to_create;
596 	}
597 
598 	hisi_zip_dfx_debug_init(qm);
599 
600 	return 0;
601 
602 failed_to_create:
603 	debugfs_remove_recursive(hzip_debugfs_root);
604 	return ret;
605 }
606 
607 static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip)
608 {
609 	struct hisi_qm *qm = &hisi_zip->qm;
610 
611 	writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
612 	writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
613 	writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
614 
615 	hisi_qm_debug_regs_clear(qm);
616 }
617 
618 static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip)
619 {
620 	struct hisi_qm *qm = &hisi_zip->qm;
621 
622 	debugfs_remove_recursive(qm->debug.debug_root);
623 
624 	if (qm->fun_type == QM_HW_PF)
625 		hisi_zip_debug_regs_clear(hisi_zip);
626 }
627 
628 static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
629 {
630 	const struct hisi_zip_hw_error *err = zip_hw_error;
631 	struct device *dev = &qm->pdev->dev;
632 	u32 err_val;
633 
634 	while (err->msg) {
635 		if (err->int_msk & err_sts) {
636 			dev_err(dev, "%s [error status=0x%x] found\n",
637 				 err->msg, err->int_msk);
638 
639 			if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) {
640 				err_val = readl(qm->io_base +
641 						HZIP_CORE_SRAM_ECC_ERR_INFO);
642 				dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n",
643 					((err_val >>
644 					HZIP_SRAM_ECC_ERR_NUM_SHIFT) & 0xFF));
645 				dev_err(dev, "hisi-zip multi ecc sram addr=0x%x\n",
646 					(err_val >>
647 					HZIP_SRAM_ECC_ERR_ADDR_SHIFT));
648 			}
649 		}
650 		err++;
651 	}
652 }
653 
654 static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
655 {
656 	return readl(qm->io_base + HZIP_CORE_INT_STATUS);
657 }
658 
659 static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
660 {
661 	writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
662 }
663 
664 static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
665 {
666 	u32 val;
667 
668 	val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
669 
670 	writel(val & ~HZIP_AXI_SHUTDOWN_ENABLE,
671 	       qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
672 
673 	writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
674 	       qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
675 }
676 
677 static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
678 {
679 	u32 nfe_enb;
680 
681 	/* Disable ECC Mbit error report. */
682 	nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
683 	writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC,
684 	       qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
685 
686 	/* Inject zip ECC Mbit error to block master ooo. */
687 	writel(HZIP_CORE_INT_STATUS_M_ECC,
688 	       qm->io_base + HZIP_CORE_INT_SET);
689 }
690 
691 static const struct hisi_qm_err_ini hisi_zip_err_ini = {
692 	.hw_init		= hisi_zip_set_user_domain_and_cache,
693 	.hw_err_enable		= hisi_zip_hw_error_enable,
694 	.hw_err_disable		= hisi_zip_hw_error_disable,
695 	.get_dev_hw_err_status	= hisi_zip_get_hw_err_status,
696 	.clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
697 	.log_dev_hw_err		= hisi_zip_log_hw_error,
698 	.open_axi_master_ooo	= hisi_zip_open_axi_master_ooo,
699 	.close_axi_master_ooo	= hisi_zip_close_axi_master_ooo,
700 	.err_info		= {
701 		.ce			= QM_BASE_CE,
702 		.nfe			= QM_BASE_NFE |
703 					  QM_ACC_WB_NOT_READY_TIMEOUT,
704 		.fe			= 0,
705 		.ecc_2bits_mask		= HZIP_CORE_INT_STATUS_M_ECC,
706 		.msi_wr_port		= HZIP_WR_PORT,
707 		.acpi_rst		= "ZRST",
708 	}
709 };
710 
711 static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
712 {
713 	struct hisi_qm *qm = &hisi_zip->qm;
714 	struct hisi_zip_ctrl *ctrl;
715 
716 	ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
717 	if (!ctrl)
718 		return -ENOMEM;
719 
720 	hisi_zip->ctrl = ctrl;
721 	ctrl->hisi_zip = hisi_zip;
722 
723 	if (qm->ver == QM_HW_V1)
724 		qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1;
725 	else
726 		qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2;
727 
728 	qm->err_ini = &hisi_zip_err_ini;
729 
730 	hisi_zip_set_user_domain_and_cache(qm);
731 	hisi_qm_dev_err_init(qm);
732 	hisi_zip_debug_regs_clear(hisi_zip);
733 
734 	return 0;
735 }
736 
737 static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
738 {
739 	qm->pdev = pdev;
740 	qm->ver = pdev->revision;
741 	qm->algs = "zlib\ngzip";
742 	qm->sqe_size = HZIP_SQE_SIZE;
743 	qm->dev_name = hisi_zip_name;
744 
745 	qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ?
746 			QM_HW_PF : QM_HW_VF;
747 	if (qm->fun_type == QM_HW_PF) {
748 		qm->qp_base = HZIP_PF_DEF_Q_BASE;
749 		qm->qp_num = pf_q_num;
750 		qm->qm_list = &zip_devices;
751 	} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
752 		/*
753 		 * have no way to get qm configure in VM in v1 hardware,
754 		 * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
755 		 * to trigger only one VF in v1 hardware.
756 		 *
757 		 * v2 hardware has no such problem.
758 		 */
759 		qm->qp_base = HZIP_PF_DEF_Q_NUM;
760 		qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
761 	}
762 
763 	return hisi_qm_init(qm);
764 }
765 
766 static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
767 {
768 	struct hisi_qm *qm = &hisi_zip->qm;
769 	int ret;
770 
771 	if (qm->fun_type == QM_HW_PF) {
772 		ret = hisi_zip_pf_probe_init(hisi_zip);
773 		if (ret)
774 			return ret;
775 	}
776 
777 	return 0;
778 }
779 
780 static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
781 {
782 	struct hisi_zip *hisi_zip;
783 	struct hisi_qm *qm;
784 	int ret;
785 
786 	hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
787 	if (!hisi_zip)
788 		return -ENOMEM;
789 
790 	qm = &hisi_zip->qm;
791 
792 	ret = hisi_zip_qm_init(qm, pdev);
793 	if (ret) {
794 		pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret);
795 		return ret;
796 	}
797 
798 	ret = hisi_zip_probe_init(hisi_zip);
799 	if (ret) {
800 		pci_err(pdev, "Failed to probe (%d)!\n", ret);
801 		goto err_qm_uninit;
802 	}
803 
804 	ret = hisi_qm_start(qm);
805 	if (ret)
806 		goto err_qm_uninit;
807 
808 	ret = hisi_zip_debugfs_init(hisi_zip);
809 	if (ret)
810 		dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
811 
812 	hisi_qm_add_to_list(qm, &zip_devices);
813 
814 	if (qm->uacce) {
815 		ret = uacce_register(qm->uacce);
816 		if (ret)
817 			goto err_qm_uninit;
818 	}
819 
820 	if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
821 		ret = hisi_qm_sriov_enable(pdev, vfs_num);
822 		if (ret < 0)
823 			goto err_remove_from_list;
824 	}
825 
826 	return 0;
827 
828 err_remove_from_list:
829 	hisi_qm_del_from_list(qm, &zip_devices);
830 	hisi_zip_debugfs_exit(hisi_zip);
831 	hisi_qm_stop(qm);
832 err_qm_uninit:
833 	hisi_qm_uninit(qm);
834 
835 	return ret;
836 }
837 
838 static void hisi_zip_remove(struct pci_dev *pdev)
839 {
840 	struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
841 	struct hisi_qm *qm = &hisi_zip->qm;
842 
843 	if (qm->fun_type == QM_HW_PF && qm->vfs_num)
844 		hisi_qm_sriov_disable(pdev);
845 
846 	hisi_zip_debugfs_exit(hisi_zip);
847 	hisi_qm_stop(qm);
848 
849 	hisi_qm_dev_err_uninit(qm);
850 	hisi_qm_uninit(qm);
851 	hisi_qm_del_from_list(qm, &zip_devices);
852 }
853 
854 static const struct pci_error_handlers hisi_zip_err_handler = {
855 	.error_detected	= hisi_qm_dev_err_detected,
856 	.slot_reset	= hisi_qm_dev_slot_reset,
857 	.reset_prepare	= hisi_qm_reset_prepare,
858 	.reset_done	= hisi_qm_reset_done,
859 };
860 
861 static struct pci_driver hisi_zip_pci_driver = {
862 	.name			= "hisi_zip",
863 	.id_table		= hisi_zip_dev_ids,
864 	.probe			= hisi_zip_probe,
865 	.remove			= hisi_zip_remove,
866 	.sriov_configure	= IS_ENABLED(CONFIG_PCI_IOV) ?
867 					hisi_qm_sriov_configure : NULL,
868 	.err_handler		= &hisi_zip_err_handler,
869 };
870 
871 static void hisi_zip_register_debugfs(void)
872 {
873 	if (!debugfs_initialized())
874 		return;
875 
876 	hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL);
877 }
878 
879 static void hisi_zip_unregister_debugfs(void)
880 {
881 	debugfs_remove_recursive(hzip_debugfs_root);
882 }
883 
884 static int __init hisi_zip_init(void)
885 {
886 	int ret;
887 
888 	hisi_qm_init_list(&zip_devices);
889 	hisi_zip_register_debugfs();
890 
891 	ret = pci_register_driver(&hisi_zip_pci_driver);
892 	if (ret < 0) {
893 		pr_err("Failed to register pci driver.\n");
894 		goto err_pci;
895 	}
896 
897 	ret = hisi_zip_register_to_crypto();
898 	if (ret < 0) {
899 		pr_err("Failed to register driver to crypto.\n");
900 		goto err_crypto;
901 	}
902 
903 	return 0;
904 
905 err_crypto:
906 	pci_unregister_driver(&hisi_zip_pci_driver);
907 err_pci:
908 	hisi_zip_unregister_debugfs();
909 
910 	return ret;
911 }
912 
913 static void __exit hisi_zip_exit(void)
914 {
915 	hisi_zip_unregister_from_crypto();
916 	pci_unregister_driver(&hisi_zip_pci_driver);
917 	hisi_zip_unregister_debugfs();
918 }
919 
920 module_init(hisi_zip_init);
921 module_exit(hisi_zip_exit);
922 
923 MODULE_LICENSE("GPL v2");
924 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
925 MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator");
926