xref: /openbmc/linux/drivers/crypto/hisilicon/qm.c (revision 8ffdff6a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <asm/page.h>
4 #include <linux/acpi.h>
5 #include <linux/aer.h>
6 #include <linux/bitmap.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/idr.h>
10 #include <linux/io.h>
11 #include <linux/irqreturn.h>
12 #include <linux/log2.h>
13 #include <linux/seq_file.h>
14 #include <linux/slab.h>
15 #include <linux/uacce.h>
16 #include <linux/uaccess.h>
17 #include <uapi/misc/uacce/hisi_qm.h>
18 #include "qm.h"
19 
20 /* eq/aeq irq enable */
21 #define QM_VF_AEQ_INT_SOURCE		0x0
22 #define QM_VF_AEQ_INT_MASK		0x4
23 #define QM_VF_EQ_INT_SOURCE		0x8
24 #define QM_VF_EQ_INT_MASK		0xc
25 #define QM_IRQ_NUM_V1			1
26 #define QM_IRQ_NUM_PF_V2		4
27 #define QM_IRQ_NUM_VF_V2		2
28 
29 #define QM_EQ_EVENT_IRQ_VECTOR		0
30 #define QM_AEQ_EVENT_IRQ_VECTOR		1
31 #define QM_ABNORMAL_EVENT_IRQ_VECTOR	3
32 
33 /* mailbox */
34 #define QM_MB_CMD_SQC			0x0
35 #define QM_MB_CMD_CQC			0x1
36 #define QM_MB_CMD_EQC			0x2
37 #define QM_MB_CMD_AEQC			0x3
38 #define QM_MB_CMD_SQC_BT		0x4
39 #define QM_MB_CMD_CQC_BT		0x5
40 #define QM_MB_CMD_SQC_VFT_V2		0x6
41 
42 #define QM_MB_CMD_SEND_BASE		0x300
43 #define QM_MB_EVENT_SHIFT		8
44 #define QM_MB_BUSY_SHIFT		13
45 #define QM_MB_OP_SHIFT			14
46 #define QM_MB_CMD_DATA_ADDR_L		0x304
47 #define QM_MB_CMD_DATA_ADDR_H		0x308
48 
49 /* sqc shift */
50 #define QM_SQ_HOP_NUM_SHIFT		0
51 #define QM_SQ_PAGE_SIZE_SHIFT		4
52 #define QM_SQ_BUF_SIZE_SHIFT		8
53 #define QM_SQ_SQE_SIZE_SHIFT		12
54 #define QM_SQ_PRIORITY_SHIFT		0
55 #define QM_SQ_ORDERS_SHIFT		4
56 #define QM_SQ_TYPE_SHIFT		8
57 #define QM_QC_PASID_ENABLE		0x1
58 #define QM_QC_PASID_ENABLE_SHIFT	7
59 
60 #define QM_SQ_TYPE_MASK			GENMASK(3, 0)
61 #define QM_SQ_TAIL_IDX(sqc)		((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
62 
63 /* cqc shift */
64 #define QM_CQ_HOP_NUM_SHIFT		0
65 #define QM_CQ_PAGE_SIZE_SHIFT		4
66 #define QM_CQ_BUF_SIZE_SHIFT		8
67 #define QM_CQ_CQE_SIZE_SHIFT		12
68 #define QM_CQ_PHASE_SHIFT		0
69 #define QM_CQ_FLAG_SHIFT		1
70 
71 #define QM_CQE_PHASE(cqe)		(le16_to_cpu((cqe)->w7) & 0x1)
72 #define QM_QC_CQE_SIZE			4
73 #define QM_CQ_TAIL_IDX(cqc)		((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
74 
75 /* eqc shift */
76 #define QM_EQE_AEQE_SIZE		(2UL << 12)
77 #define QM_EQC_PHASE_SHIFT		16
78 
79 #define QM_EQE_PHASE(eqe)		((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
80 #define QM_EQE_CQN_MASK			GENMASK(15, 0)
81 
82 #define QM_AEQE_PHASE(aeqe)		((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
83 #define QM_AEQE_TYPE_SHIFT		17
84 
85 #define QM_DOORBELL_CMD_SQ		0
86 #define QM_DOORBELL_CMD_CQ		1
87 #define QM_DOORBELL_CMD_EQ		2
88 #define QM_DOORBELL_CMD_AEQ		3
89 
90 #define QM_DOORBELL_BASE_V1		0x340
91 #define QM_DB_CMD_SHIFT_V1		16
92 #define QM_DB_INDEX_SHIFT_V1		32
93 #define QM_DB_PRIORITY_SHIFT_V1		48
94 #define QM_DOORBELL_SQ_CQ_BASE_V2	0x1000
95 #define QM_DOORBELL_EQ_AEQ_BASE_V2	0x2000
96 #define QM_DB_CMD_SHIFT_V2		12
97 #define QM_DB_RAND_SHIFT_V2		16
98 #define QM_DB_INDEX_SHIFT_V2		32
99 #define QM_DB_PRIORITY_SHIFT_V2		48
100 
101 #define QM_MEM_START_INIT		0x100040
102 #define QM_MEM_INIT_DONE		0x100044
103 #define QM_VFT_CFG_RDY			0x10006c
104 #define QM_VFT_CFG_OP_WR		0x100058
105 #define QM_VFT_CFG_TYPE			0x10005c
106 #define QM_SQC_VFT			0x0
107 #define QM_CQC_VFT			0x1
108 #define QM_VFT_CFG			0x100060
109 #define QM_VFT_CFG_OP_ENABLE		0x100054
110 
111 #define QM_VFT_CFG_DATA_L		0x100064
112 #define QM_VFT_CFG_DATA_H		0x100068
113 #define QM_SQC_VFT_BUF_SIZE		(7ULL << 8)
114 #define QM_SQC_VFT_SQC_SIZE		(5ULL << 12)
115 #define QM_SQC_VFT_INDEX_NUMBER		(1ULL << 16)
116 #define QM_SQC_VFT_START_SQN_SHIFT	28
117 #define QM_SQC_VFT_VALID		(1ULL << 44)
118 #define QM_SQC_VFT_SQN_SHIFT		45
119 #define QM_CQC_VFT_BUF_SIZE		(7ULL << 8)
120 #define QM_CQC_VFT_SQC_SIZE		(5ULL << 12)
121 #define QM_CQC_VFT_INDEX_NUMBER		(1ULL << 16)
122 #define QM_CQC_VFT_VALID		(1ULL << 28)
123 
124 #define QM_SQC_VFT_BASE_SHIFT_V2	28
125 #define QM_SQC_VFT_BASE_MASK_V2		GENMASK(15, 0)
126 #define QM_SQC_VFT_NUM_SHIFT_V2		45
127 #define QM_SQC_VFT_NUM_MASK_v2		GENMASK(9, 0)
128 
129 #define QM_DFX_CNT_CLR_CE		0x100118
130 
131 #define QM_ABNORMAL_INT_SOURCE		0x100000
132 #define QM_ABNORMAL_INT_SOURCE_CLR	GENMASK(12, 0)
133 #define QM_ABNORMAL_INT_MASK		0x100004
134 #define QM_ABNORMAL_INT_MASK_VALUE	0x1fff
135 #define QM_ABNORMAL_INT_STATUS		0x100008
136 #define QM_ABNORMAL_INT_SET		0x10000c
137 #define QM_ABNORMAL_INF00		0x100010
138 #define QM_FIFO_OVERFLOW_TYPE		0xc0
139 #define QM_FIFO_OVERFLOW_TYPE_SHIFT	6
140 #define QM_FIFO_OVERFLOW_VF		0x3f
141 #define QM_ABNORMAL_INF01		0x100014
142 #define QM_DB_TIMEOUT_TYPE		0xc0
143 #define QM_DB_TIMEOUT_TYPE_SHIFT	6
144 #define QM_DB_TIMEOUT_VF		0x3f
145 #define QM_RAS_CE_ENABLE		0x1000ec
146 #define QM_RAS_FE_ENABLE		0x1000f0
147 #define QM_RAS_NFE_ENABLE		0x1000f4
148 #define QM_RAS_CE_THRESHOLD		0x1000f8
149 #define QM_RAS_CE_TIMES_PER_IRQ		1
150 #define QM_RAS_MSI_INT_SEL		0x1040f4
151 
152 #define QM_RESET_WAIT_TIMEOUT		400
153 #define QM_PEH_VENDOR_ID		0x1000d8
154 #define ACC_VENDOR_ID_VALUE		0x5a5a
155 #define QM_PEH_DFX_INFO0		0x1000fc
156 #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT	3
157 #define ACC_PEH_MSI_DISABLE		GENMASK(31, 0)
158 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN	0x1
159 #define ACC_MASTER_TRANS_RETURN_RW	3
160 #define ACC_MASTER_TRANS_RETURN		0x300150
161 #define ACC_MASTER_GLOBAL_CTRL		0x300000
162 #define ACC_AM_CFG_PORT_WR_EN		0x30001c
163 #define QM_RAS_NFE_MBIT_DISABLE		~QM_ECC_MBIT
164 #define ACC_AM_ROB_ECC_INT_STS		0x300104
165 #define ACC_ROB_ECC_ERR_MULTPL		BIT(1)
166 
167 #define POLL_PERIOD			10
168 #define POLL_TIMEOUT			1000
169 #define WAIT_PERIOD_US_MAX		200
170 #define WAIT_PERIOD_US_MIN		100
171 #define MAX_WAIT_COUNTS			1000
172 #define QM_CACHE_WB_START		0x204
173 #define QM_CACHE_WB_DONE		0x208
174 
175 #define PCI_BAR_2			2
176 #define QM_SQE_DATA_ALIGN_MASK		GENMASK(6, 0)
177 #define QMC_ALIGN(sz)			ALIGN(sz, 32)
178 
179 #define QM_DBG_READ_LEN		256
180 #define QM_DBG_WRITE_LEN		1024
181 #define QM_DBG_TMP_BUF_LEN		22
182 #define QM_PCI_COMMAND_INVALID		~0
183 
184 #define WAIT_PERIOD			20
185 #define REMOVE_WAIT_DELAY		10
186 #define QM_SQE_ADDR_MASK		GENMASK(7, 0)
187 #define QM_EQ_DEPTH			(1024 * 2)
188 
189 #define QM_DRIVER_REMOVING		0
190 #define QM_RST_SCHED			1
191 #define QM_RESETTING			2
192 
193 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
194 	(((hop_num) << QM_CQ_HOP_NUM_SHIFT)	| \
195 	((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT)	| \
196 	((buf_sz) << QM_CQ_BUF_SIZE_SHIFT)	| \
197 	((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
198 
199 #define QM_MK_CQC_DW3_V2(cqe_sz) \
200 	((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
201 
202 #define QM_MK_SQC_W13(priority, orders, alg_type) \
203 	(((priority) << QM_SQ_PRIORITY_SHIFT)	| \
204 	((orders) << QM_SQ_ORDERS_SHIFT)	| \
205 	(((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
206 
207 #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
208 	(((hop_num) << QM_SQ_HOP_NUM_SHIFT)	| \
209 	((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT)	| \
210 	((buf_sz) << QM_SQ_BUF_SIZE_SHIFT)	| \
211 	((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
212 
213 #define QM_MK_SQC_DW3_V2(sqe_sz) \
214 	((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
215 
216 #define INIT_QC_COMMON(qc, base, pasid) do {			\
217 	(qc)->head = 0;						\
218 	(qc)->tail = 0;						\
219 	(qc)->base_l = cpu_to_le32(lower_32_bits(base));	\
220 	(qc)->base_h = cpu_to_le32(upper_32_bits(base));	\
221 	(qc)->dw3 = 0;						\
222 	(qc)->w8 = 0;						\
223 	(qc)->rsvd0 = 0;					\
224 	(qc)->pasid = cpu_to_le16(pasid);			\
225 	(qc)->w11 = 0;						\
226 	(qc)->rsvd1 = 0;					\
227 } while (0)
228 
229 enum vft_type {
230 	SQC_VFT = 0,
231 	CQC_VFT,
232 };
233 
234 enum acc_err_result {
235 	ACC_ERR_NONE,
236 	ACC_ERR_NEED_RESET,
237 	ACC_ERR_RECOVERED,
238 };
239 
240 struct qm_cqe {
241 	__le32 rsvd0;
242 	__le16 cmd_id;
243 	__le16 rsvd1;
244 	__le16 sq_head;
245 	__le16 sq_num;
246 	__le16 rsvd2;
247 	__le16 w7;
248 };
249 
250 struct qm_eqe {
251 	__le32 dw0;
252 };
253 
254 struct qm_aeqe {
255 	__le32 dw0;
256 };
257 
258 struct qm_sqc {
259 	__le16 head;
260 	__le16 tail;
261 	__le32 base_l;
262 	__le32 base_h;
263 	__le32 dw3;
264 	__le16 w8;
265 	__le16 rsvd0;
266 	__le16 pasid;
267 	__le16 w11;
268 	__le16 cq_num;
269 	__le16 w13;
270 	__le32 rsvd1;
271 };
272 
273 struct qm_cqc {
274 	__le16 head;
275 	__le16 tail;
276 	__le32 base_l;
277 	__le32 base_h;
278 	__le32 dw3;
279 	__le16 w8;
280 	__le16 rsvd0;
281 	__le16 pasid;
282 	__le16 w11;
283 	__le32 dw6;
284 	__le32 rsvd1;
285 };
286 
287 struct qm_eqc {
288 	__le16 head;
289 	__le16 tail;
290 	__le32 base_l;
291 	__le32 base_h;
292 	__le32 dw3;
293 	__le32 rsvd[2];
294 	__le32 dw6;
295 };
296 
297 struct qm_aeqc {
298 	__le16 head;
299 	__le16 tail;
300 	__le32 base_l;
301 	__le32 base_h;
302 	__le32 dw3;
303 	__le32 rsvd[2];
304 	__le32 dw6;
305 };
306 
307 struct qm_mailbox {
308 	__le16 w0;
309 	__le16 queue_num;
310 	__le32 base_l;
311 	__le32 base_h;
312 	__le32 rsvd;
313 };
314 
315 struct qm_doorbell {
316 	__le16 queue_num;
317 	__le16 cmd;
318 	__le16 index;
319 	__le16 priority;
320 };
321 
322 struct hisi_qm_resource {
323 	struct hisi_qm *qm;
324 	int distance;
325 	struct list_head list;
326 };
327 
328 struct hisi_qm_hw_ops {
329 	int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
330 	void (*qm_db)(struct hisi_qm *qm, u16 qn,
331 		      u8 cmd, u16 index, u8 priority);
332 	u32 (*get_irq_num)(struct hisi_qm *qm);
333 	int (*debug_init)(struct hisi_qm *qm);
334 	void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
335 	void (*hw_error_uninit)(struct hisi_qm *qm);
336 	enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
337 };
338 
339 struct qm_dfx_item {
340 	const char *name;
341 	u32 offset;
342 };
343 
344 static struct qm_dfx_item qm_dfx_files[] = {
345 	{"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
346 	{"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
347 	{"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
348 	{"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
349 	{"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
350 };
351 
352 static const char * const qm_debug_file_name[] = {
353 	[CURRENT_Q]    = "current_q",
354 	[CLEAR_ENABLE] = "clear_enable",
355 };
356 
357 struct hisi_qm_hw_error {
358 	u32 int_msk;
359 	const char *msg;
360 };
361 
362 static const struct hisi_qm_hw_error qm_hw_error[] = {
363 	{ .int_msk = BIT(0), .msg = "qm_axi_rresp" },
364 	{ .int_msk = BIT(1), .msg = "qm_axi_bresp" },
365 	{ .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
366 	{ .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
367 	{ .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
368 	{ .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
369 	{ .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
370 	{ .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
371 	{ .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
372 	{ .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
373 	{ .int_msk = BIT(10), .msg = "qm_db_timeout" },
374 	{ .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
375 	{ .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
376 	{ /* sentinel */ }
377 };
378 
379 static const char * const qm_db_timeout[] = {
380 	"sq", "cq", "eq", "aeq",
381 };
382 
383 static const char * const qm_fifo_overflow[] = {
384 	"cq", "eq", "aeq",
385 };
386 
387 static const char * const qm_s[] = {
388 	"init", "start", "close", "stop",
389 };
390 
391 static const char * const qp_s[] = {
392 	"none", "init", "start", "stop", "close",
393 };
394 
395 static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
396 {
397 	enum qm_state curr = atomic_read(&qm->status.flags);
398 	bool avail = false;
399 
400 	switch (curr) {
401 	case QM_INIT:
402 		if (new == QM_START || new == QM_CLOSE)
403 			avail = true;
404 		break;
405 	case QM_START:
406 		if (new == QM_STOP)
407 			avail = true;
408 		break;
409 	case QM_STOP:
410 		if (new == QM_CLOSE || new == QM_START)
411 			avail = true;
412 		break;
413 	default:
414 		break;
415 	}
416 
417 	dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
418 		qm_s[curr], qm_s[new]);
419 
420 	if (!avail)
421 		dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
422 			 qm_s[curr], qm_s[new]);
423 
424 	return avail;
425 }
426 
427 static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
428 			      enum qp_state new)
429 {
430 	enum qm_state qm_curr = atomic_read(&qm->status.flags);
431 	enum qp_state qp_curr = 0;
432 	bool avail = false;
433 
434 	if (qp)
435 		qp_curr = atomic_read(&qp->qp_status.flags);
436 
437 	switch (new) {
438 	case QP_INIT:
439 		if (qm_curr == QM_START || qm_curr == QM_INIT)
440 			avail = true;
441 		break;
442 	case QP_START:
443 		if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
444 		    (qm_curr == QM_START && qp_curr == QP_STOP))
445 			avail = true;
446 		break;
447 	case QP_STOP:
448 		if ((qm_curr == QM_START && qp_curr == QP_START) ||
449 		    (qp_curr == QP_INIT))
450 			avail = true;
451 		break;
452 	case QP_CLOSE:
453 		if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
454 		    (qm_curr == QM_START && qp_curr == QP_STOP) ||
455 		    (qm_curr == QM_STOP && qp_curr == QP_STOP)  ||
456 		    (qm_curr == QM_STOP && qp_curr == QP_INIT))
457 			avail = true;
458 		break;
459 	default:
460 		break;
461 	}
462 
463 	dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
464 		qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
465 
466 	if (!avail)
467 		dev_warn(&qm->pdev->dev,
468 			 "Can not change qp state from %s to %s in QM %s\n",
469 			 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
470 
471 	return avail;
472 }
473 
474 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
475 static int qm_wait_mb_ready(struct hisi_qm *qm)
476 {
477 	u32 val;
478 
479 	return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
480 					  val, !((val >> QM_MB_BUSY_SHIFT) &
481 					  0x1), POLL_PERIOD, POLL_TIMEOUT);
482 }
483 
484 /* 128 bit should be written to hardware at one time to trigger a mailbox */
485 static void qm_mb_write(struct hisi_qm *qm, const void *src)
486 {
487 	void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
488 	unsigned long tmp0 = 0, tmp1 = 0;
489 
490 	if (!IS_ENABLED(CONFIG_ARM64)) {
491 		memcpy_toio(fun_base, src, 16);
492 		wmb();
493 		return;
494 	}
495 
496 	asm volatile("ldp %0, %1, %3\n"
497 		     "stp %0, %1, %2\n"
498 		     "dsb sy\n"
499 		     : "=&r" (tmp0),
500 		       "=&r" (tmp1),
501 		       "+Q" (*((char __iomem *)fun_base))
502 		     : "Q" (*((char *)src))
503 		     : "memory");
504 }
505 
506 static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
507 		 bool op)
508 {
509 	struct qm_mailbox mailbox;
510 	int ret = 0;
511 
512 	dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
513 		queue, cmd, (unsigned long long)dma_addr);
514 
515 	mailbox.w0 = cpu_to_le16(cmd |
516 		     (op ? 0x1 << QM_MB_OP_SHIFT : 0) |
517 		     (0x1 << QM_MB_BUSY_SHIFT));
518 	mailbox.queue_num = cpu_to_le16(queue);
519 	mailbox.base_l = cpu_to_le32(lower_32_bits(dma_addr));
520 	mailbox.base_h = cpu_to_le32(upper_32_bits(dma_addr));
521 	mailbox.rsvd = 0;
522 
523 	mutex_lock(&qm->mailbox_lock);
524 
525 	if (unlikely(qm_wait_mb_ready(qm))) {
526 		ret = -EBUSY;
527 		dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
528 		goto busy_unlock;
529 	}
530 
531 	qm_mb_write(qm, &mailbox);
532 
533 	if (unlikely(qm_wait_mb_ready(qm))) {
534 		ret = -EBUSY;
535 		dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
536 		goto busy_unlock;
537 	}
538 
539 busy_unlock:
540 	mutex_unlock(&qm->mailbox_lock);
541 
542 	if (ret)
543 		atomic64_inc(&qm->debug.dfx.mb_err_cnt);
544 	return ret;
545 }
546 
547 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
548 {
549 	u64 doorbell;
550 
551 	doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
552 		   ((u64)index << QM_DB_INDEX_SHIFT_V1)  |
553 		   ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
554 
555 	writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
556 }
557 
558 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
559 {
560 	u64 doorbell;
561 	u64 dbase;
562 	u16 randata = 0;
563 
564 	if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
565 		dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
566 	else
567 		dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
568 
569 	doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
570 		   ((u64)randata << QM_DB_RAND_SHIFT_V2) |
571 		   ((u64)index << QM_DB_INDEX_SHIFT_V2)	 |
572 		   ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
573 
574 	writeq(doorbell, qm->io_base + dbase);
575 }
576 
577 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
578 {
579 	dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
580 		qn, cmd, index);
581 
582 	qm->ops->qm_db(qm, qn, cmd, index, priority);
583 }
584 
585 static int qm_dev_mem_reset(struct hisi_qm *qm)
586 {
587 	u32 val;
588 
589 	writel(0x1, qm->io_base + QM_MEM_START_INIT);
590 	return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
591 					  val & BIT(0), POLL_PERIOD,
592 					  POLL_TIMEOUT);
593 }
594 
595 static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
596 {
597 	return QM_IRQ_NUM_V1;
598 }
599 
600 static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
601 {
602 	if (qm->fun_type == QM_HW_PF)
603 		return QM_IRQ_NUM_PF_V2;
604 	else
605 		return QM_IRQ_NUM_VF_V2;
606 }
607 
608 static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
609 {
610 	u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
611 
612 	return &qm->qp_array[cqn];
613 }
614 
615 static void qm_cq_head_update(struct hisi_qp *qp)
616 {
617 	if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
618 		qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
619 		qp->qp_status.cq_head = 0;
620 	} else {
621 		qp->qp_status.cq_head++;
622 	}
623 }
624 
625 static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
626 {
627 	if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
628 		return;
629 
630 	if (qp->event_cb) {
631 		qp->event_cb(qp);
632 		return;
633 	}
634 
635 	if (qp->req_cb) {
636 		struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
637 
638 		while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
639 			dma_rmb();
640 			qp->req_cb(qp, qp->sqe + qm->sqe_size *
641 				   le16_to_cpu(cqe->sq_head));
642 			qm_cq_head_update(qp);
643 			cqe = qp->cqe + qp->qp_status.cq_head;
644 			qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
645 			      qp->qp_status.cq_head, 0);
646 			atomic_dec(&qp->qp_status.used);
647 		}
648 
649 		/* set c_flag */
650 		qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
651 		      qp->qp_status.cq_head, 1);
652 	}
653 }
654 
655 static void qm_work_process(struct work_struct *work)
656 {
657 	struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
658 	struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
659 	struct hisi_qp *qp;
660 	int eqe_num = 0;
661 
662 	while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
663 		eqe_num++;
664 		qp = qm_to_hisi_qp(qm, eqe);
665 		qm_poll_qp(qp, qm);
666 
667 		if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
668 			qm->status.eqc_phase = !qm->status.eqc_phase;
669 			eqe = qm->eqe;
670 			qm->status.eq_head = 0;
671 		} else {
672 			eqe++;
673 			qm->status.eq_head++;
674 		}
675 
676 		if (eqe_num == QM_EQ_DEPTH / 2 - 1) {
677 			eqe_num = 0;
678 			qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
679 		}
680 	}
681 
682 	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
683 }
684 
685 static irqreturn_t do_qm_irq(int irq, void *data)
686 {
687 	struct hisi_qm *qm = (struct hisi_qm *)data;
688 
689 	/* the workqueue created by device driver of QM */
690 	if (qm->wq)
691 		queue_work(qm->wq, &qm->work);
692 	else
693 		schedule_work(&qm->work);
694 
695 	return IRQ_HANDLED;
696 }
697 
698 static irqreturn_t qm_irq(int irq, void *data)
699 {
700 	struct hisi_qm *qm = data;
701 
702 	if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
703 		return do_qm_irq(irq, data);
704 
705 	atomic64_inc(&qm->debug.dfx.err_irq_cnt);
706 	dev_err(&qm->pdev->dev, "invalid int source\n");
707 	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
708 
709 	return IRQ_NONE;
710 }
711 
712 static irqreturn_t qm_aeq_irq(int irq, void *data)
713 {
714 	struct hisi_qm *qm = data;
715 	struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
716 	u32 type;
717 
718 	atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
719 	if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
720 		return IRQ_NONE;
721 
722 	while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
723 		type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
724 		if (type < ARRAY_SIZE(qm_fifo_overflow))
725 			dev_err(&qm->pdev->dev, "%s overflow\n",
726 				qm_fifo_overflow[type]);
727 		else
728 			dev_err(&qm->pdev->dev, "unknown error type %u\n",
729 				type);
730 
731 		if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
732 			qm->status.aeqc_phase = !qm->status.aeqc_phase;
733 			aeqe = qm->aeqe;
734 			qm->status.aeq_head = 0;
735 		} else {
736 			aeqe++;
737 			qm->status.aeq_head++;
738 		}
739 
740 		qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
741 	}
742 
743 	return IRQ_HANDLED;
744 }
745 
746 static void qm_irq_unregister(struct hisi_qm *qm)
747 {
748 	struct pci_dev *pdev = qm->pdev;
749 
750 	free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
751 
752 	if (qm->ver == QM_HW_V1)
753 		return;
754 
755 	free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
756 
757 	if (qm->fun_type == QM_HW_PF)
758 		free_irq(pci_irq_vector(pdev,
759 			 QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
760 }
761 
762 static void qm_init_qp_status(struct hisi_qp *qp)
763 {
764 	struct hisi_qp_status *qp_status = &qp->qp_status;
765 
766 	qp_status->sq_tail = 0;
767 	qp_status->cq_head = 0;
768 	qp_status->cqc_phase = true;
769 	atomic_set(&qp_status->used, 0);
770 }
771 
772 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
773 			    u32 number)
774 {
775 	u64 tmp = 0;
776 
777 	if (number > 0) {
778 		switch (type) {
779 		case SQC_VFT:
780 			if (qm->ver == QM_HW_V1) {
781 				tmp = QM_SQC_VFT_BUF_SIZE	|
782 				      QM_SQC_VFT_SQC_SIZE	|
783 				      QM_SQC_VFT_INDEX_NUMBER	|
784 				      QM_SQC_VFT_VALID		|
785 				      (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
786 			} else {
787 				tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
788 				      QM_SQC_VFT_VALID |
789 				      (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
790 			}
791 			break;
792 		case CQC_VFT:
793 			if (qm->ver == QM_HW_V1) {
794 				tmp = QM_CQC_VFT_BUF_SIZE	|
795 				      QM_CQC_VFT_SQC_SIZE	|
796 				      QM_CQC_VFT_INDEX_NUMBER	|
797 				      QM_CQC_VFT_VALID;
798 			} else {
799 				tmp = QM_CQC_VFT_VALID;
800 			}
801 			break;
802 		}
803 	}
804 
805 	writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
806 	writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
807 }
808 
809 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
810 			     u32 fun_num, u32 base, u32 number)
811 {
812 	unsigned int val;
813 	int ret;
814 
815 	ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
816 					 val & BIT(0), POLL_PERIOD,
817 					 POLL_TIMEOUT);
818 	if (ret)
819 		return ret;
820 
821 	writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
822 	writel(type, qm->io_base + QM_VFT_CFG_TYPE);
823 	writel(fun_num, qm->io_base + QM_VFT_CFG);
824 
825 	qm_vft_data_cfg(qm, type, base, number);
826 
827 	writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
828 	writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
829 
830 	return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
831 					  val & BIT(0), POLL_PERIOD,
832 					  POLL_TIMEOUT);
833 }
834 
835 /* The config should be conducted after qm_dev_mem_reset() */
836 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
837 			      u32 number)
838 {
839 	int ret, i;
840 
841 	for (i = SQC_VFT; i <= CQC_VFT; i++) {
842 		ret = qm_set_vft_common(qm, i, fun_num, base, number);
843 		if (ret)
844 			return ret;
845 	}
846 
847 	return 0;
848 }
849 
850 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
851 {
852 	u64 sqc_vft;
853 	int ret;
854 
855 	ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
856 	if (ret)
857 		return ret;
858 
859 	sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
860 		  ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
861 	*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
862 	*number = (QM_SQC_VFT_NUM_MASK_v2 &
863 		   (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
864 
865 	return 0;
866 }
867 
868 static struct hisi_qm *file_to_qm(struct debugfs_file *file)
869 {
870 	struct qm_debug *debug = file->debug;
871 
872 	return container_of(debug, struct hisi_qm, debug);
873 }
874 
875 static u32 current_q_read(struct debugfs_file *file)
876 {
877 	struct hisi_qm *qm = file_to_qm(file);
878 
879 	return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
880 }
881 
882 static int current_q_write(struct debugfs_file *file, u32 val)
883 {
884 	struct hisi_qm *qm = file_to_qm(file);
885 	u32 tmp;
886 
887 	if (val >= qm->debug.curr_qm_qp_num)
888 		return -EINVAL;
889 
890 	tmp = val << QM_DFX_QN_SHIFT |
891 	      (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
892 	writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
893 
894 	tmp = val << QM_DFX_QN_SHIFT |
895 	      (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
896 	writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
897 
898 	return 0;
899 }
900 
901 static u32 clear_enable_read(struct debugfs_file *file)
902 {
903 	struct hisi_qm *qm = file_to_qm(file);
904 
905 	return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
906 }
907 
908 /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
909 static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
910 {
911 	struct hisi_qm *qm = file_to_qm(file);
912 
913 	if (rd_clr_ctrl > 1)
914 		return -EINVAL;
915 
916 	writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
917 
918 	return 0;
919 }
920 
921 static ssize_t qm_debug_read(struct file *filp, char __user *buf,
922 			     size_t count, loff_t *pos)
923 {
924 	struct debugfs_file *file = filp->private_data;
925 	enum qm_debug_file index = file->index;
926 	char tbuf[QM_DBG_TMP_BUF_LEN];
927 	u32 val;
928 	int ret;
929 
930 	mutex_lock(&file->lock);
931 	switch (index) {
932 	case CURRENT_Q:
933 		val = current_q_read(file);
934 		break;
935 	case CLEAR_ENABLE:
936 		val = clear_enable_read(file);
937 		break;
938 	default:
939 		mutex_unlock(&file->lock);
940 		return -EINVAL;
941 	}
942 	mutex_unlock(&file->lock);
943 
944 	ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
945 	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
946 }
947 
948 static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
949 			      size_t count, loff_t *pos)
950 {
951 	struct debugfs_file *file = filp->private_data;
952 	enum qm_debug_file index = file->index;
953 	unsigned long val;
954 	char tbuf[QM_DBG_TMP_BUF_LEN];
955 	int len, ret;
956 
957 	if (*pos != 0)
958 		return 0;
959 
960 	if (count >= QM_DBG_TMP_BUF_LEN)
961 		return -ENOSPC;
962 
963 	len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
964 				     count);
965 	if (len < 0)
966 		return len;
967 
968 	tbuf[len] = '\0';
969 	if (kstrtoul(tbuf, 0, &val))
970 		return -EFAULT;
971 
972 	mutex_lock(&file->lock);
973 	switch (index) {
974 	case CURRENT_Q:
975 		ret = current_q_write(file, val);
976 		if (ret)
977 			goto err_input;
978 		break;
979 	case CLEAR_ENABLE:
980 		ret = clear_enable_write(file, val);
981 		if (ret)
982 			goto err_input;
983 		break;
984 	default:
985 		ret = -EINVAL;
986 		goto err_input;
987 	}
988 	mutex_unlock(&file->lock);
989 
990 	return count;
991 
992 err_input:
993 	mutex_unlock(&file->lock);
994 	return ret;
995 }
996 
997 static const struct file_operations qm_debug_fops = {
998 	.owner = THIS_MODULE,
999 	.open = simple_open,
1000 	.read = qm_debug_read,
1001 	.write = qm_debug_write,
1002 };
1003 
1004 struct qm_dfx_registers {
1005 	char  *reg_name;
1006 	u64   reg_offset;
1007 };
1008 
1009 #define CNT_CYC_REGS_NUM		10
1010 static struct qm_dfx_registers qm_dfx_regs[] = {
1011 	/* XXX_CNT are reading clear register */
1012 	{"QM_ECC_1BIT_CNT               ",  0x104000ull},
1013 	{"QM_ECC_MBIT_CNT               ",  0x104008ull},
1014 	{"QM_DFX_MB_CNT                 ",  0x104018ull},
1015 	{"QM_DFX_DB_CNT                 ",  0x104028ull},
1016 	{"QM_DFX_SQE_CNT                ",  0x104038ull},
1017 	{"QM_DFX_CQE_CNT                ",  0x104048ull},
1018 	{"QM_DFX_SEND_SQE_TO_ACC_CNT    ",  0x104050ull},
1019 	{"QM_DFX_WB_SQE_FROM_ACC_CNT    ",  0x104058ull},
1020 	{"QM_DFX_ACC_FINISH_CNT         ",  0x104060ull},
1021 	{"QM_DFX_CQE_ERR_CNT            ",  0x1040b4ull},
1022 	{"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
1023 	{"QM_ECC_1BIT_INF               ",  0x104004ull},
1024 	{"QM_ECC_MBIT_INF               ",  0x10400cull},
1025 	{"QM_DFX_ACC_RDY_VLD0           ",  0x1040a0ull},
1026 	{"QM_DFX_ACC_RDY_VLD1           ",  0x1040a4ull},
1027 	{"QM_DFX_AXI_RDY_VLD            ",  0x1040a8ull},
1028 	{"QM_DFX_FF_ST0                 ",  0x1040c8ull},
1029 	{"QM_DFX_FF_ST1                 ",  0x1040ccull},
1030 	{"QM_DFX_FF_ST2                 ",  0x1040d0ull},
1031 	{"QM_DFX_FF_ST3                 ",  0x1040d4ull},
1032 	{"QM_DFX_FF_ST4                 ",  0x1040d8ull},
1033 	{"QM_DFX_FF_ST5                 ",  0x1040dcull},
1034 	{"QM_DFX_FF_ST6                 ",  0x1040e0ull},
1035 	{"QM_IN_IDLE_ST                 ",  0x1040e4ull},
1036 	{ NULL, 0}
1037 };
1038 
1039 static struct qm_dfx_registers qm_vf_dfx_regs[] = {
1040 	{"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
1041 	{ NULL, 0}
1042 };
1043 
1044 static int qm_regs_show(struct seq_file *s, void *unused)
1045 {
1046 	struct hisi_qm *qm = s->private;
1047 	struct qm_dfx_registers *regs;
1048 	u32 val;
1049 
1050 	if (qm->fun_type == QM_HW_PF)
1051 		regs = qm_dfx_regs;
1052 	else
1053 		regs = qm_vf_dfx_regs;
1054 
1055 	while (regs->reg_name) {
1056 		val = readl(qm->io_base + regs->reg_offset);
1057 		seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val);
1058 		regs++;
1059 	}
1060 
1061 	return 0;
1062 }
1063 
1064 DEFINE_SHOW_ATTRIBUTE(qm_regs);
1065 
1066 static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
1067 			   size_t count, loff_t *pos)
1068 {
1069 	char buf[QM_DBG_READ_LEN];
1070 	int len;
1071 
1072 	len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
1073 			"Please echo help to cmd to get help information");
1074 
1075 	return simple_read_from_buffer(buffer, count, pos, buf, len);
1076 }
1077 
1078 static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
1079 			  dma_addr_t *dma_addr)
1080 {
1081 	struct device *dev = &qm->pdev->dev;
1082 	void *ctx_addr;
1083 
1084 	ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
1085 	if (!ctx_addr)
1086 		return ERR_PTR(-ENOMEM);
1087 
1088 	*dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
1089 	if (dma_mapping_error(dev, *dma_addr)) {
1090 		dev_err(dev, "DMA mapping error!\n");
1091 		kfree(ctx_addr);
1092 		return ERR_PTR(-ENOMEM);
1093 	}
1094 
1095 	return ctx_addr;
1096 }
1097 
1098 static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
1099 			const void *ctx_addr, dma_addr_t *dma_addr)
1100 {
1101 	struct device *dev = &qm->pdev->dev;
1102 
1103 	dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
1104 	kfree(ctx_addr);
1105 }
1106 
1107 static int dump_show(struct hisi_qm *qm, void *info,
1108 		     unsigned int info_size, char *info_name)
1109 {
1110 	struct device *dev = &qm->pdev->dev;
1111 	u8 *info_buf, *info_curr = info;
1112 	u32 i;
1113 #define BYTE_PER_DW	4
1114 
1115 	info_buf = kzalloc(info_size, GFP_KERNEL);
1116 	if (!info_buf)
1117 		return -ENOMEM;
1118 
1119 	for (i = 0; i < info_size; i++, info_curr++) {
1120 		if (i % BYTE_PER_DW == 0)
1121 			info_buf[i + 3UL] = *info_curr;
1122 		else if (i % BYTE_PER_DW == 1)
1123 			info_buf[i + 1UL] = *info_curr;
1124 		else if (i % BYTE_PER_DW == 2)
1125 			info_buf[i - 1] = *info_curr;
1126 		else if (i % BYTE_PER_DW == 3)
1127 			info_buf[i - 3] = *info_curr;
1128 	}
1129 
1130 	dev_info(dev, "%s DUMP\n", info_name);
1131 	for (i = 0; i < info_size; i += BYTE_PER_DW) {
1132 		pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
1133 			info_buf[i], info_buf[i + 1UL],
1134 			info_buf[i + 2UL], info_buf[i + 3UL]);
1135 	}
1136 
1137 	kfree(info_buf);
1138 
1139 	return 0;
1140 }
1141 
1142 static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1143 {
1144 	return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
1145 }
1146 
1147 static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1148 {
1149 	return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
1150 }
1151 
1152 static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
1153 {
1154 	struct device *dev = &qm->pdev->dev;
1155 	struct qm_sqc *sqc, *sqc_curr;
1156 	dma_addr_t sqc_dma;
1157 	u32 qp_id;
1158 	int ret;
1159 
1160 	if (!s)
1161 		return -EINVAL;
1162 
1163 	ret = kstrtou32(s, 0, &qp_id);
1164 	if (ret || qp_id >= qm->qp_num) {
1165 		dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
1166 		return -EINVAL;
1167 	}
1168 
1169 	sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
1170 	if (IS_ERR(sqc))
1171 		return PTR_ERR(sqc);
1172 
1173 	ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
1174 	if (ret) {
1175 		down_read(&qm->qps_lock);
1176 		if (qm->sqc) {
1177 			sqc_curr = qm->sqc + qp_id;
1178 
1179 			ret = dump_show(qm, sqc_curr, sizeof(*sqc),
1180 					"SOFT SQC");
1181 			if (ret)
1182 				dev_info(dev, "Show soft sqc failed!\n");
1183 		}
1184 		up_read(&qm->qps_lock);
1185 
1186 		goto err_free_ctx;
1187 	}
1188 
1189 	ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
1190 	if (ret)
1191 		dev_info(dev, "Show hw sqc failed!\n");
1192 
1193 err_free_ctx:
1194 	qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
1195 	return ret;
1196 }
1197 
1198 static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
1199 {
1200 	struct device *dev = &qm->pdev->dev;
1201 	struct qm_cqc *cqc, *cqc_curr;
1202 	dma_addr_t cqc_dma;
1203 	u32 qp_id;
1204 	int ret;
1205 
1206 	if (!s)
1207 		return -EINVAL;
1208 
1209 	ret = kstrtou32(s, 0, &qp_id);
1210 	if (ret || qp_id >= qm->qp_num) {
1211 		dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
1212 		return -EINVAL;
1213 	}
1214 
1215 	cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
1216 	if (IS_ERR(cqc))
1217 		return PTR_ERR(cqc);
1218 
1219 	ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
1220 	if (ret) {
1221 		down_read(&qm->qps_lock);
1222 		if (qm->cqc) {
1223 			cqc_curr = qm->cqc + qp_id;
1224 
1225 			ret = dump_show(qm, cqc_curr, sizeof(*cqc),
1226 					"SOFT CQC");
1227 			if (ret)
1228 				dev_info(dev, "Show soft cqc failed!\n");
1229 		}
1230 		up_read(&qm->qps_lock);
1231 
1232 		goto err_free_ctx;
1233 	}
1234 
1235 	ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
1236 	if (ret)
1237 		dev_info(dev, "Show hw cqc failed!\n");
1238 
1239 err_free_ctx:
1240 	qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
1241 	return ret;
1242 }
1243 
1244 static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
1245 			    int cmd, char *name)
1246 {
1247 	struct device *dev = &qm->pdev->dev;
1248 	dma_addr_t xeqc_dma;
1249 	void *xeqc;
1250 	int ret;
1251 
1252 	if (strsep(&s, " ")) {
1253 		dev_err(dev, "Please do not input extra characters!\n");
1254 		return -EINVAL;
1255 	}
1256 
1257 	xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
1258 	if (IS_ERR(xeqc))
1259 		return PTR_ERR(xeqc);
1260 
1261 	ret = qm_mb(qm, cmd, xeqc_dma, 0, 1);
1262 	if (ret)
1263 		goto err_free_ctx;
1264 
1265 	ret = dump_show(qm, xeqc, size, name);
1266 	if (ret)
1267 		dev_info(dev, "Show hw %s failed!\n", name);
1268 
1269 err_free_ctx:
1270 	qm_ctx_free(qm, size, xeqc, &xeqc_dma);
1271 	return ret;
1272 }
1273 
1274 static int q_dump_param_parse(struct hisi_qm *qm, char *s,
1275 			      u32 *e_id, u32 *q_id)
1276 {
1277 	struct device *dev = &qm->pdev->dev;
1278 	unsigned int qp_num = qm->qp_num;
1279 	char *presult;
1280 	int ret;
1281 
1282 	presult = strsep(&s, " ");
1283 	if (!presult) {
1284 		dev_err(dev, "Please input qp number!\n");
1285 		return -EINVAL;
1286 	}
1287 
1288 	ret = kstrtou32(presult, 0, q_id);
1289 	if (ret || *q_id >= qp_num) {
1290 		dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
1291 		return -EINVAL;
1292 	}
1293 
1294 	presult = strsep(&s, " ");
1295 	if (!presult) {
1296 		dev_err(dev, "Please input sqe number!\n");
1297 		return -EINVAL;
1298 	}
1299 
1300 	ret = kstrtou32(presult, 0, e_id);
1301 	if (ret || *e_id >= QM_Q_DEPTH) {
1302 		dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
1303 		return -EINVAL;
1304 	}
1305 
1306 	if (strsep(&s, " ")) {
1307 		dev_err(dev, "Please do not input extra characters!\n");
1308 		return -EINVAL;
1309 	}
1310 
1311 	return 0;
1312 }
1313 
1314 static int qm_sq_dump(struct hisi_qm *qm, char *s)
1315 {
1316 	struct device *dev = &qm->pdev->dev;
1317 	void *sqe, *sqe_curr;
1318 	struct hisi_qp *qp;
1319 	u32 qp_id, sqe_id;
1320 	int ret;
1321 
1322 	ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
1323 	if (ret)
1324 		return ret;
1325 
1326 	sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
1327 	if (!sqe)
1328 		return -ENOMEM;
1329 
1330 	qp = &qm->qp_array[qp_id];
1331 	memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
1332 	sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
1333 	memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
1334 	       qm->debug.sqe_mask_len);
1335 
1336 	ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
1337 	if (ret)
1338 		dev_info(dev, "Show sqe failed!\n");
1339 
1340 	kfree(sqe);
1341 
1342 	return ret;
1343 }
1344 
1345 static int qm_cq_dump(struct hisi_qm *qm, char *s)
1346 {
1347 	struct device *dev = &qm->pdev->dev;
1348 	struct qm_cqe *cqe_curr;
1349 	struct hisi_qp *qp;
1350 	u32 qp_id, cqe_id;
1351 	int ret;
1352 
1353 	ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
1354 	if (ret)
1355 		return ret;
1356 
1357 	qp = &qm->qp_array[qp_id];
1358 	cqe_curr = qp->cqe + cqe_id;
1359 	ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
1360 	if (ret)
1361 		dev_info(dev, "Show cqe failed!\n");
1362 
1363 	return ret;
1364 }
1365 
1366 static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
1367 			  size_t size, char *name)
1368 {
1369 	struct device *dev = &qm->pdev->dev;
1370 	void *xeqe;
1371 	u32 xeqe_id;
1372 	int ret;
1373 
1374 	if (!s)
1375 		return -EINVAL;
1376 
1377 	ret = kstrtou32(s, 0, &xeqe_id);
1378 	if (ret)
1379 		return -EINVAL;
1380 
1381 	if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
1382 		dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
1383 		return -EINVAL;
1384 	} else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
1385 		dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
1386 		return -EINVAL;
1387 	}
1388 
1389 	down_read(&qm->qps_lock);
1390 
1391 	if (qm->eqe && !strcmp(name, "EQE")) {
1392 		xeqe = qm->eqe + xeqe_id;
1393 	} else if (qm->aeqe && !strcmp(name, "AEQE")) {
1394 		xeqe = qm->aeqe + xeqe_id;
1395 	} else {
1396 		ret = -EINVAL;
1397 		goto err_unlock;
1398 	}
1399 
1400 	ret = dump_show(qm, xeqe, size, name);
1401 	if (ret)
1402 		dev_info(dev, "Show %s failed!\n", name);
1403 
1404 err_unlock:
1405 	up_read(&qm->qps_lock);
1406 	return ret;
1407 }
1408 
1409 static int qm_dbg_help(struct hisi_qm *qm, char *s)
1410 {
1411 	struct device *dev = &qm->pdev->dev;
1412 
1413 	if (strsep(&s, " ")) {
1414 		dev_err(dev, "Please do not input extra characters!\n");
1415 		return -EINVAL;
1416 	}
1417 
1418 	dev_info(dev, "available commands:\n");
1419 	dev_info(dev, "sqc <num>\n");
1420 	dev_info(dev, "cqc <num>\n");
1421 	dev_info(dev, "eqc\n");
1422 	dev_info(dev, "aeqc\n");
1423 	dev_info(dev, "sq <num> <e>\n");
1424 	dev_info(dev, "cq <num> <e>\n");
1425 	dev_info(dev, "eq <e>\n");
1426 	dev_info(dev, "aeq <e>\n");
1427 
1428 	return 0;
1429 }
1430 
1431 static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
1432 {
1433 	struct device *dev = &qm->pdev->dev;
1434 	char *presult, *s, *s_tmp;
1435 	int ret;
1436 
1437 	s = kstrdup(cmd_buf, GFP_KERNEL);
1438 	if (!s)
1439 		return -ENOMEM;
1440 
1441 	s_tmp = s;
1442 	presult = strsep(&s, " ");
1443 	if (!presult) {
1444 		ret = -EINVAL;
1445 		goto err_buffer_free;
1446 	}
1447 
1448 	if (!strcmp(presult, "sqc"))
1449 		ret = qm_sqc_dump(qm, s);
1450 	else if (!strcmp(presult, "cqc"))
1451 		ret = qm_cqc_dump(qm, s);
1452 	else if (!strcmp(presult, "eqc"))
1453 		ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
1454 				       QM_MB_CMD_EQC, "EQC");
1455 	else if (!strcmp(presult, "aeqc"))
1456 		ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
1457 				       QM_MB_CMD_AEQC, "AEQC");
1458 	else if (!strcmp(presult, "sq"))
1459 		ret = qm_sq_dump(qm, s);
1460 	else if (!strcmp(presult, "cq"))
1461 		ret = qm_cq_dump(qm, s);
1462 	else if (!strcmp(presult, "eq"))
1463 		ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
1464 	else if (!strcmp(presult, "aeq"))
1465 		ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
1466 	else if (!strcmp(presult, "help"))
1467 		ret = qm_dbg_help(qm, s);
1468 	else
1469 		ret = -EINVAL;
1470 
1471 	if (ret)
1472 		dev_info(dev, "Please echo help\n");
1473 
1474 err_buffer_free:
1475 	kfree(s_tmp);
1476 
1477 	return ret;
1478 }
1479 
1480 static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
1481 			    size_t count, loff_t *pos)
1482 {
1483 	struct hisi_qm *qm = filp->private_data;
1484 	char *cmd_buf, *cmd_buf_tmp;
1485 	int ret;
1486 
1487 	if (*pos)
1488 		return 0;
1489 
1490 	/* Judge if the instance is being reset. */
1491 	if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
1492 		return 0;
1493 
1494 	if (count > QM_DBG_WRITE_LEN)
1495 		return -ENOSPC;
1496 
1497 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1498 	if (!cmd_buf)
1499 		return -ENOMEM;
1500 
1501 	if (copy_from_user(cmd_buf, buffer, count)) {
1502 		kfree(cmd_buf);
1503 		return -EFAULT;
1504 	}
1505 
1506 	cmd_buf[count] = '\0';
1507 
1508 	cmd_buf_tmp = strchr(cmd_buf, '\n');
1509 	if (cmd_buf_tmp) {
1510 		*cmd_buf_tmp = '\0';
1511 		count = cmd_buf_tmp - cmd_buf + 1;
1512 	}
1513 
1514 	ret = qm_cmd_write_dump(qm, cmd_buf);
1515 	if (ret) {
1516 		kfree(cmd_buf);
1517 		return ret;
1518 	}
1519 
1520 	kfree(cmd_buf);
1521 
1522 	return count;
1523 }
1524 
1525 static const struct file_operations qm_cmd_fops = {
1526 	.owner = THIS_MODULE,
1527 	.open = simple_open,
1528 	.read = qm_cmd_read,
1529 	.write = qm_cmd_write,
1530 };
1531 
1532 static void qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
1533 {
1534 	struct dentry *qm_d = qm->debug.qm_d;
1535 	struct debugfs_file *file = qm->debug.files + index;
1536 
1537 	debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
1538 			    &qm_debug_fops);
1539 
1540 	file->index = index;
1541 	mutex_init(&file->lock);
1542 	file->debug = &qm->debug;
1543 }
1544 
1545 static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1546 {
1547 	writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1548 }
1549 
1550 static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1551 {
1552 	u32 irq_enable = ce | nfe | fe;
1553 	u32 irq_unmask = ~irq_enable;
1554 
1555 	qm->error_mask = ce | nfe | fe;
1556 
1557 	/* clear QM hw residual error source */
1558 	writel(QM_ABNORMAL_INT_SOURCE_CLR,
1559 	       qm->io_base + QM_ABNORMAL_INT_SOURCE);
1560 
1561 	/* configure error type */
1562 	writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
1563 	writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1564 	writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1565 	writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
1566 
1567 	irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1568 	writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1569 }
1570 
1571 static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
1572 {
1573 	writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1574 }
1575 
1576 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
1577 {
1578 	const struct hisi_qm_hw_error *err;
1579 	struct device *dev = &qm->pdev->dev;
1580 	u32 reg_val, type, vf_num;
1581 	int i;
1582 
1583 	for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
1584 		err = &qm_hw_error[i];
1585 		if (!(err->int_msk & error_status))
1586 			continue;
1587 
1588 		dev_err(dev, "%s [error status=0x%x] found\n",
1589 			err->msg, err->int_msk);
1590 
1591 		if (err->int_msk & QM_DB_TIMEOUT) {
1592 			reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
1593 			type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
1594 			       QM_DB_TIMEOUT_TYPE_SHIFT;
1595 			vf_num = reg_val & QM_DB_TIMEOUT_VF;
1596 			dev_err(dev, "qm %s doorbell timeout in function %u\n",
1597 				qm_db_timeout[type], vf_num);
1598 		} else if (err->int_msk & QM_OF_FIFO_OF) {
1599 			reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
1600 			type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
1601 			       QM_FIFO_OVERFLOW_TYPE_SHIFT;
1602 			vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
1603 
1604 			if (type < ARRAY_SIZE(qm_fifo_overflow))
1605 				dev_err(dev, "qm %s fifo overflow in function %u\n",
1606 					qm_fifo_overflow[type], vf_num);
1607 			else
1608 				dev_err(dev, "unknown error type\n");
1609 		}
1610 	}
1611 }
1612 
1613 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
1614 {
1615 	u32 error_status, tmp, val;
1616 
1617 	/* read err sts */
1618 	tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
1619 	error_status = qm->error_mask & tmp;
1620 
1621 	if (error_status) {
1622 		if (error_status & QM_ECC_MBIT)
1623 			qm->err_status.is_qm_ecc_mbit = true;
1624 
1625 		qm_log_hw_error(qm, error_status);
1626 		val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
1627 		/* ce error does not need to be reset */
1628 		if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
1629 			writel(error_status, qm->io_base +
1630 			       QM_ABNORMAL_INT_SOURCE);
1631 			writel(qm->err_ini->err_info.nfe,
1632 			       qm->io_base + QM_RAS_NFE_ENABLE);
1633 			return ACC_ERR_RECOVERED;
1634 		}
1635 
1636 		return ACC_ERR_NEED_RESET;
1637 	}
1638 
1639 	return ACC_ERR_RECOVERED;
1640 }
1641 
1642 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
1643 	.qm_db = qm_db_v1,
1644 	.get_irq_num = qm_get_irq_num_v1,
1645 	.hw_error_init = qm_hw_error_init_v1,
1646 };
1647 
1648 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
1649 	.get_vft = qm_get_vft_v2,
1650 	.qm_db = qm_db_v2,
1651 	.get_irq_num = qm_get_irq_num_v2,
1652 	.hw_error_init = qm_hw_error_init_v2,
1653 	.hw_error_uninit = qm_hw_error_uninit_v2,
1654 	.hw_error_handle = qm_hw_error_handle_v2,
1655 };
1656 
1657 static void *qm_get_avail_sqe(struct hisi_qp *qp)
1658 {
1659 	struct hisi_qp_status *qp_status = &qp->qp_status;
1660 	u16 sq_tail = qp_status->sq_tail;
1661 
1662 	if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
1663 		return NULL;
1664 
1665 	return qp->sqe + sq_tail * qp->qm->sqe_size;
1666 }
1667 
1668 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
1669 {
1670 	struct device *dev = &qm->pdev->dev;
1671 	struct hisi_qp *qp;
1672 	int qp_id;
1673 
1674 	if (!qm_qp_avail_state(qm, NULL, QP_INIT))
1675 		return ERR_PTR(-EPERM);
1676 
1677 	if (qm->qp_in_used == qm->qp_num) {
1678 		dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1679 				     qm->qp_num);
1680 		atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1681 		return ERR_PTR(-EBUSY);
1682 	}
1683 
1684 	qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
1685 	if (qp_id < 0) {
1686 		dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1687 				    qm->qp_num);
1688 		atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1689 		return ERR_PTR(-EBUSY);
1690 	}
1691 
1692 	qp = &qm->qp_array[qp_id];
1693 
1694 	memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
1695 
1696 	qp->event_cb = NULL;
1697 	qp->req_cb = NULL;
1698 	qp->qp_id = qp_id;
1699 	qp->alg_type = alg_type;
1700 	qp->is_in_kernel = true;
1701 	qm->qp_in_used++;
1702 	atomic_set(&qp->qp_status.flags, QP_INIT);
1703 
1704 	return qp;
1705 }
1706 
1707 /**
1708  * hisi_qm_create_qp() - Create a queue pair from qm.
1709  * @qm: The qm we create a qp from.
1710  * @alg_type: Accelerator specific algorithm type in sqc.
1711  *
1712  * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
1713  * qp memory fails.
1714  */
1715 struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
1716 {
1717 	struct hisi_qp *qp;
1718 
1719 	down_write(&qm->qps_lock);
1720 	qp = qm_create_qp_nolock(qm, alg_type);
1721 	up_write(&qm->qps_lock);
1722 
1723 	return qp;
1724 }
1725 EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
1726 
1727 /**
1728  * hisi_qm_release_qp() - Release a qp back to its qm.
1729  * @qp: The qp we want to release.
1730  *
1731  * This function releases the resource of a qp.
1732  */
1733 void hisi_qm_release_qp(struct hisi_qp *qp)
1734 {
1735 	struct hisi_qm *qm = qp->qm;
1736 
1737 	down_write(&qm->qps_lock);
1738 
1739 	if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
1740 		up_write(&qm->qps_lock);
1741 		return;
1742 	}
1743 
1744 	qm->qp_in_used--;
1745 	idr_remove(&qm->qp_idr, qp->qp_id);
1746 
1747 	up_write(&qm->qps_lock);
1748 }
1749 EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
1750 
1751 static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
1752 {
1753 	struct hisi_qm *qm = qp->qm;
1754 	struct device *dev = &qm->pdev->dev;
1755 	enum qm_hw_ver ver = qm->ver;
1756 	struct qm_sqc *sqc;
1757 	dma_addr_t sqc_dma;
1758 	int ret;
1759 
1760 	sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
1761 	if (!sqc)
1762 		return -ENOMEM;
1763 
1764 	INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
1765 	if (ver == QM_HW_V1) {
1766 		sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
1767 		sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
1768 	} else {
1769 		sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
1770 		sqc->w8 = 0; /* rand_qc */
1771 	}
1772 	sqc->cq_num = cpu_to_le16(qp_id);
1773 	sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
1774 
1775 	if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
1776 		sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
1777 				       QM_QC_PASID_ENABLE_SHIFT);
1778 
1779 	sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
1780 				 DMA_TO_DEVICE);
1781 	if (dma_mapping_error(dev, sqc_dma)) {
1782 		kfree(sqc);
1783 		return -ENOMEM;
1784 	}
1785 
1786 	ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
1787 	dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
1788 	kfree(sqc);
1789 
1790 	return ret;
1791 }
1792 
1793 static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
1794 {
1795 	struct hisi_qm *qm = qp->qm;
1796 	struct device *dev = &qm->pdev->dev;
1797 	enum qm_hw_ver ver = qm->ver;
1798 	struct qm_cqc *cqc;
1799 	dma_addr_t cqc_dma;
1800 	int ret;
1801 
1802 	cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
1803 	if (!cqc)
1804 		return -ENOMEM;
1805 
1806 	INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
1807 	if (ver == QM_HW_V1) {
1808 		cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
1809 							QM_QC_CQE_SIZE));
1810 		cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
1811 	} else {
1812 		cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE));
1813 		cqc->w8 = 0; /* rand_qc */
1814 	}
1815 	cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
1816 
1817 	if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
1818 		cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
1819 
1820 	cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
1821 				 DMA_TO_DEVICE);
1822 	if (dma_mapping_error(dev, cqc_dma)) {
1823 		kfree(cqc);
1824 		return -ENOMEM;
1825 	}
1826 
1827 	ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
1828 	dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
1829 	kfree(cqc);
1830 
1831 	return ret;
1832 }
1833 
1834 static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
1835 {
1836 	int ret;
1837 
1838 	qm_init_qp_status(qp);
1839 
1840 	ret = qm_sq_ctx_cfg(qp, qp_id, pasid);
1841 	if (ret)
1842 		return ret;
1843 
1844 	return qm_cq_ctx_cfg(qp, qp_id, pasid);
1845 }
1846 
1847 static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
1848 {
1849 	struct hisi_qm *qm = qp->qm;
1850 	struct device *dev = &qm->pdev->dev;
1851 	int qp_id = qp->qp_id;
1852 	u32 pasid = arg;
1853 	int ret;
1854 
1855 	if (!qm_qp_avail_state(qm, qp, QP_START))
1856 		return -EPERM;
1857 
1858 	ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
1859 	if (ret)
1860 		return ret;
1861 
1862 	atomic_set(&qp->qp_status.flags, QP_START);
1863 	dev_dbg(dev, "queue %d started\n", qp_id);
1864 
1865 	return 0;
1866 }
1867 
1868 /**
1869  * hisi_qm_start_qp() - Start a qp into running.
1870  * @qp: The qp we want to start to run.
1871  * @arg: Accelerator specific argument.
1872  *
1873  * After this function, qp can receive request from user. Return 0 if
1874  * successful, Return -EBUSY if failed.
1875  */
1876 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
1877 {
1878 	struct hisi_qm *qm = qp->qm;
1879 	int ret;
1880 
1881 	down_write(&qm->qps_lock);
1882 	ret = qm_start_qp_nolock(qp, arg);
1883 	up_write(&qm->qps_lock);
1884 
1885 	return ret;
1886 }
1887 EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
1888 
1889 /**
1890  * qp_stop_fail_cb() - call request cb.
1891  * @qp: stopped failed qp.
1892  *
1893  * Callback function should be called whether task completed or not.
1894  */
1895 static void qp_stop_fail_cb(struct hisi_qp *qp)
1896 {
1897 	int qp_used = atomic_read(&qp->qp_status.used);
1898 	u16 cur_tail = qp->qp_status.sq_tail;
1899 	u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
1900 	struct hisi_qm *qm = qp->qm;
1901 	u16 pos;
1902 	int i;
1903 
1904 	for (i = 0; i < qp_used; i++) {
1905 		pos = (i + cur_head) % QM_Q_DEPTH;
1906 		qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
1907 		atomic_dec(&qp->qp_status.used);
1908 	}
1909 }
1910 
1911 /**
1912  * qm_drain_qp() - Drain a qp.
1913  * @qp: The qp we want to drain.
1914  *
1915  * Determine whether the queue is cleared by judging the tail pointers of
1916  * sq and cq.
1917  */
1918 static int qm_drain_qp(struct hisi_qp *qp)
1919 {
1920 	size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
1921 	struct hisi_qm *qm = qp->qm;
1922 	struct device *dev = &qm->pdev->dev;
1923 	struct qm_sqc *sqc;
1924 	struct qm_cqc *cqc;
1925 	dma_addr_t dma_addr;
1926 	int ret = 0, i = 0;
1927 	void *addr;
1928 
1929 	/*
1930 	 * No need to judge if ECC multi-bit error occurs because the
1931 	 * master OOO will be blocked.
1932 	 */
1933 	if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
1934 		return 0;
1935 
1936 	addr = qm_ctx_alloc(qm, size, &dma_addr);
1937 	if (IS_ERR(addr)) {
1938 		dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
1939 		return -ENOMEM;
1940 	}
1941 
1942 	while (++i) {
1943 		ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
1944 		if (ret) {
1945 			dev_err_ratelimited(dev, "Failed to dump sqc!\n");
1946 			break;
1947 		}
1948 		sqc = addr;
1949 
1950 		ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
1951 				      qp->qp_id);
1952 		if (ret) {
1953 			dev_err_ratelimited(dev, "Failed to dump cqc!\n");
1954 			break;
1955 		}
1956 		cqc = addr + sizeof(struct qm_sqc);
1957 
1958 		if ((sqc->tail == cqc->tail) &&
1959 		    (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
1960 			break;
1961 
1962 		if (i == MAX_WAIT_COUNTS) {
1963 			dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
1964 			ret = -EBUSY;
1965 			break;
1966 		}
1967 
1968 		usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
1969 	}
1970 
1971 	qm_ctx_free(qm, size, addr, &dma_addr);
1972 
1973 	return ret;
1974 }
1975 
1976 static int qm_stop_qp_nolock(struct hisi_qp *qp)
1977 {
1978 	struct device *dev = &qp->qm->pdev->dev;
1979 	int ret;
1980 
1981 	/*
1982 	 * It is allowed to stop and release qp when reset, If the qp is
1983 	 * stopped when reset but still want to be released then, the
1984 	 * is_resetting flag should be set negative so that this qp will not
1985 	 * be restarted after reset.
1986 	 */
1987 	if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
1988 		qp->is_resetting = false;
1989 		return 0;
1990 	}
1991 
1992 	if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
1993 		return -EPERM;
1994 
1995 	atomic_set(&qp->qp_status.flags, QP_STOP);
1996 
1997 	ret = qm_drain_qp(qp);
1998 	if (ret)
1999 		dev_err(dev, "Failed to drain out data for stopping!\n");
2000 
2001 	if (qp->qm->wq)
2002 		flush_workqueue(qp->qm->wq);
2003 	else
2004 		flush_work(&qp->qm->work);
2005 
2006 	if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
2007 		qp_stop_fail_cb(qp);
2008 
2009 	dev_dbg(dev, "stop queue %u!", qp->qp_id);
2010 
2011 	return 0;
2012 }
2013 
2014 /**
2015  * hisi_qm_stop_qp() - Stop a qp in qm.
2016  * @qp: The qp we want to stop.
2017  *
2018  * This function is reverse of hisi_qm_start_qp. Return 0 if successful.
2019  */
2020 int hisi_qm_stop_qp(struct hisi_qp *qp)
2021 {
2022 	int ret;
2023 
2024 	down_write(&qp->qm->qps_lock);
2025 	ret = qm_stop_qp_nolock(qp);
2026 	up_write(&qp->qm->qps_lock);
2027 
2028 	return ret;
2029 }
2030 EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
2031 
2032 /**
2033  * hisi_qp_send() - Queue up a task in the hardware queue.
2034  * @qp: The qp in which to put the message.
2035  * @msg: The message.
2036  *
2037  * This function will return -EBUSY if qp is currently full, and -EAGAIN
2038  * if qp related qm is resetting.
2039  *
2040  * Note: This function may run with qm_irq_thread and ACC reset at same time.
2041  *       It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
2042  *       reset may happen, we have no lock here considering performance. This
2043  *       causes current qm_db sending fail or can not receive sended sqe. QM
2044  *       sync/async receive function should handle the error sqe. ACC reset
2045  *       done function should clear used sqe to 0.
2046  */
2047 int hisi_qp_send(struct hisi_qp *qp, const void *msg)
2048 {
2049 	struct hisi_qp_status *qp_status = &qp->qp_status;
2050 	u16 sq_tail = qp_status->sq_tail;
2051 	u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
2052 	void *sqe = qm_get_avail_sqe(qp);
2053 
2054 	if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
2055 		     atomic_read(&qp->qm->status.flags) == QM_STOP ||
2056 		     qp->is_resetting)) {
2057 		dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
2058 		return -EAGAIN;
2059 	}
2060 
2061 	if (!sqe)
2062 		return -EBUSY;
2063 
2064 	memcpy(sqe, msg, qp->qm->sqe_size);
2065 
2066 	qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
2067 	atomic_inc(&qp->qp_status.used);
2068 	qp_status->sq_tail = sq_tail_next;
2069 
2070 	return 0;
2071 }
2072 EXPORT_SYMBOL_GPL(hisi_qp_send);
2073 
2074 static void hisi_qm_cache_wb(struct hisi_qm *qm)
2075 {
2076 	unsigned int val;
2077 
2078 	if (qm->ver == QM_HW_V1)
2079 		return;
2080 
2081 	writel(0x1, qm->io_base + QM_CACHE_WB_START);
2082 	if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
2083 				       val, val & BIT(0), POLL_PERIOD,
2084 				       POLL_TIMEOUT))
2085 		dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
2086 }
2087 
2088 static void qm_qp_event_notifier(struct hisi_qp *qp)
2089 {
2090 	wake_up_interruptible(&qp->uacce_q->wait);
2091 }
2092 
2093 static int hisi_qm_get_available_instances(struct uacce_device *uacce)
2094 {
2095 	return hisi_qm_get_free_qp_num(uacce->priv);
2096 }
2097 
2098 static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
2099 				   unsigned long arg,
2100 				   struct uacce_queue *q)
2101 {
2102 	struct hisi_qm *qm = uacce->priv;
2103 	struct hisi_qp *qp;
2104 	u8 alg_type = 0;
2105 
2106 	qp = hisi_qm_create_qp(qm, alg_type);
2107 	if (IS_ERR(qp))
2108 		return PTR_ERR(qp);
2109 
2110 	q->priv = qp;
2111 	q->uacce = uacce;
2112 	qp->uacce_q = q;
2113 	qp->event_cb = qm_qp_event_notifier;
2114 	qp->pasid = arg;
2115 	qp->is_in_kernel = false;
2116 
2117 	return 0;
2118 }
2119 
2120 static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
2121 {
2122 	struct hisi_qp *qp = q->priv;
2123 
2124 	hisi_qm_cache_wb(qp->qm);
2125 	hisi_qm_release_qp(qp);
2126 }
2127 
2128 /* map sq/cq/doorbell to user space */
2129 static int hisi_qm_uacce_mmap(struct uacce_queue *q,
2130 			      struct vm_area_struct *vma,
2131 			      struct uacce_qfile_region *qfr)
2132 {
2133 	struct hisi_qp *qp = q->priv;
2134 	struct hisi_qm *qm = qp->qm;
2135 	size_t sz = vma->vm_end - vma->vm_start;
2136 	struct pci_dev *pdev = qm->pdev;
2137 	struct device *dev = &pdev->dev;
2138 	unsigned long vm_pgoff;
2139 	int ret;
2140 
2141 	switch (qfr->type) {
2142 	case UACCE_QFRT_MMIO:
2143 		if (qm->ver == QM_HW_V1) {
2144 			if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
2145 				return -EINVAL;
2146 		} else {
2147 			if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
2148 			    QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
2149 				return -EINVAL;
2150 		}
2151 
2152 		vma->vm_flags |= VM_IO;
2153 
2154 		return remap_pfn_range(vma, vma->vm_start,
2155 				       qm->phys_base >> PAGE_SHIFT,
2156 				       sz, pgprot_noncached(vma->vm_page_prot));
2157 	case UACCE_QFRT_DUS:
2158 		if (sz != qp->qdma.size)
2159 			return -EINVAL;
2160 
2161 		/*
2162 		 * dma_mmap_coherent() requires vm_pgoff as 0
2163 		 * restore vm_pfoff to initial value for mmap()
2164 		 */
2165 		vm_pgoff = vma->vm_pgoff;
2166 		vma->vm_pgoff = 0;
2167 		ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
2168 					qp->qdma.dma, sz);
2169 		vma->vm_pgoff = vm_pgoff;
2170 		return ret;
2171 
2172 	default:
2173 		return -EINVAL;
2174 	}
2175 }
2176 
2177 static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
2178 {
2179 	struct hisi_qp *qp = q->priv;
2180 
2181 	return hisi_qm_start_qp(qp, qp->pasid);
2182 }
2183 
2184 static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
2185 {
2186 	hisi_qm_stop_qp(q->priv);
2187 }
2188 
2189 static void qm_set_sqctype(struct uacce_queue *q, u16 type)
2190 {
2191 	struct hisi_qm *qm = q->uacce->priv;
2192 	struct hisi_qp *qp = q->priv;
2193 
2194 	down_write(&qm->qps_lock);
2195 	qp->alg_type = type;
2196 	up_write(&qm->qps_lock);
2197 }
2198 
2199 static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
2200 				unsigned long arg)
2201 {
2202 	struct hisi_qp *qp = q->priv;
2203 	struct hisi_qp_ctx qp_ctx;
2204 
2205 	if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
2206 		if (copy_from_user(&qp_ctx, (void __user *)arg,
2207 				   sizeof(struct hisi_qp_ctx)))
2208 			return -EFAULT;
2209 
2210 		if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
2211 			return -EINVAL;
2212 
2213 		qm_set_sqctype(q, qp_ctx.qc_type);
2214 		qp_ctx.id = qp->qp_id;
2215 
2216 		if (copy_to_user((void __user *)arg, &qp_ctx,
2217 				 sizeof(struct hisi_qp_ctx)))
2218 			return -EFAULT;
2219 	} else {
2220 		return -EINVAL;
2221 	}
2222 
2223 	return 0;
2224 }
2225 
2226 static const struct uacce_ops uacce_qm_ops = {
2227 	.get_available_instances = hisi_qm_get_available_instances,
2228 	.get_queue = hisi_qm_uacce_get_queue,
2229 	.put_queue = hisi_qm_uacce_put_queue,
2230 	.start_queue = hisi_qm_uacce_start_queue,
2231 	.stop_queue = hisi_qm_uacce_stop_queue,
2232 	.mmap = hisi_qm_uacce_mmap,
2233 	.ioctl = hisi_qm_uacce_ioctl,
2234 };
2235 
2236 static int qm_alloc_uacce(struct hisi_qm *qm)
2237 {
2238 	struct pci_dev *pdev = qm->pdev;
2239 	struct uacce_device *uacce;
2240 	unsigned long mmio_page_nr;
2241 	unsigned long dus_page_nr;
2242 	struct uacce_interface interface = {
2243 		.flags = UACCE_DEV_SVA,
2244 		.ops = &uacce_qm_ops,
2245 	};
2246 	int ret;
2247 
2248 	ret = strscpy(interface.name, pdev->driver->name,
2249 		      sizeof(interface.name));
2250 	if (ret < 0)
2251 		return -ENAMETOOLONG;
2252 
2253 	uacce = uacce_alloc(&pdev->dev, &interface);
2254 	if (IS_ERR(uacce))
2255 		return PTR_ERR(uacce);
2256 
2257 	if (uacce->flags & UACCE_DEV_SVA && qm->mode == UACCE_MODE_SVA) {
2258 		qm->use_sva = true;
2259 	} else {
2260 		/* only consider sva case */
2261 		uacce_remove(uacce);
2262 		qm->uacce = NULL;
2263 		return -EINVAL;
2264 	}
2265 
2266 	uacce->is_vf = pdev->is_virtfn;
2267 	uacce->priv = qm;
2268 	uacce->algs = qm->algs;
2269 
2270 	if (qm->ver == QM_HW_V1) {
2271 		mmio_page_nr = QM_DOORBELL_PAGE_NR;
2272 		uacce->api_ver = HISI_QM_API_VER_BASE;
2273 	} else {
2274 		mmio_page_nr = QM_DOORBELL_PAGE_NR +
2275 			QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
2276 		uacce->api_ver = HISI_QM_API_VER2_BASE;
2277 	}
2278 
2279 	dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
2280 		       sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
2281 
2282 	uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
2283 	uacce->qf_pg_num[UACCE_QFRT_DUS]  = dus_page_nr;
2284 
2285 	qm->uacce = uacce;
2286 
2287 	return 0;
2288 }
2289 
2290 /**
2291  * qm_frozen() - Try to froze QM to cut continuous queue request. If
2292  * there is user on the QM, return failure without doing anything.
2293  * @qm: The qm needed to be fronzen.
2294  *
2295  * This function frozes QM, then we can do SRIOV disabling.
2296  */
2297 static int qm_frozen(struct hisi_qm *qm)
2298 {
2299 	if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
2300 		return 0;
2301 
2302 	down_write(&qm->qps_lock);
2303 
2304 	if (!qm->qp_in_used) {
2305 		qm->qp_in_used = qm->qp_num;
2306 		up_write(&qm->qps_lock);
2307 		set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
2308 		return 0;
2309 	}
2310 
2311 	up_write(&qm->qps_lock);
2312 
2313 	return -EBUSY;
2314 }
2315 
2316 static int qm_try_frozen_vfs(struct pci_dev *pdev,
2317 			     struct hisi_qm_list *qm_list)
2318 {
2319 	struct hisi_qm *qm, *vf_qm;
2320 	struct pci_dev *dev;
2321 	int ret = 0;
2322 
2323 	if (!qm_list || !pdev)
2324 		return -EINVAL;
2325 
2326 	/* Try to frozen all the VFs as disable SRIOV */
2327 	mutex_lock(&qm_list->lock);
2328 	list_for_each_entry(qm, &qm_list->list, list) {
2329 		dev = qm->pdev;
2330 		if (dev == pdev)
2331 			continue;
2332 		if (pci_physfn(dev) == pdev) {
2333 			vf_qm = pci_get_drvdata(dev);
2334 			ret = qm_frozen(vf_qm);
2335 			if (ret)
2336 				goto frozen_fail;
2337 		}
2338 	}
2339 
2340 frozen_fail:
2341 	mutex_unlock(&qm_list->lock);
2342 
2343 	return ret;
2344 }
2345 
2346 /**
2347  * hisi_qm_wait_task_finish() - Wait until the task is finished
2348  * when removing the driver.
2349  * @qm: The qm needed to wait for the task to finish.
2350  * @qm_list: The list of all available devices.
2351  */
2352 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
2353 {
2354 	while (qm_frozen(qm) ||
2355 	       ((qm->fun_type == QM_HW_PF) &&
2356 	       qm_try_frozen_vfs(qm->pdev, qm_list))) {
2357 		msleep(WAIT_PERIOD);
2358 	}
2359 
2360 	while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
2361 	       test_bit(QM_RESETTING, &qm->misc_ctl))
2362 		msleep(WAIT_PERIOD);
2363 
2364 	udelay(REMOVE_WAIT_DELAY);
2365 }
2366 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
2367 
2368 /**
2369  * hisi_qm_get_free_qp_num() - Get free number of qp in qm.
2370  * @qm: The qm which want to get free qp.
2371  *
2372  * This function return free number of qp in qm.
2373  */
2374 int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
2375 {
2376 	int ret;
2377 
2378 	down_read(&qm->qps_lock);
2379 	ret = qm->qp_num - qm->qp_in_used;
2380 	up_read(&qm->qps_lock);
2381 
2382 	return ret;
2383 }
2384 EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
2385 
2386 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
2387 {
2388 	struct device *dev = &qm->pdev->dev;
2389 	struct qm_dma *qdma;
2390 	int i;
2391 
2392 	for (i = num - 1; i >= 0; i--) {
2393 		qdma = &qm->qp_array[i].qdma;
2394 		dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
2395 	}
2396 
2397 	kfree(qm->qp_array);
2398 }
2399 
2400 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
2401 {
2402 	struct device *dev = &qm->pdev->dev;
2403 	size_t off = qm->sqe_size * QM_Q_DEPTH;
2404 	struct hisi_qp *qp;
2405 
2406 	qp = &qm->qp_array[id];
2407 	qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
2408 					 GFP_KERNEL);
2409 	if (!qp->qdma.va)
2410 		return -ENOMEM;
2411 
2412 	qp->sqe = qp->qdma.va;
2413 	qp->sqe_dma = qp->qdma.dma;
2414 	qp->cqe = qp->qdma.va + off;
2415 	qp->cqe_dma = qp->qdma.dma + off;
2416 	qp->qdma.size = dma_size;
2417 	qp->qm = qm;
2418 	qp->qp_id = id;
2419 
2420 	return 0;
2421 }
2422 
2423 static int hisi_qm_memory_init(struct hisi_qm *qm)
2424 {
2425 	struct device *dev = &qm->pdev->dev;
2426 	size_t qp_dma_size, off = 0;
2427 	int i, ret = 0;
2428 
2429 #define QM_INIT_BUF(qm, type, num) do { \
2430 	(qm)->type = ((qm)->qdma.va + (off)); \
2431 	(qm)->type##_dma = (qm)->qdma.dma + (off); \
2432 	off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
2433 } while (0)
2434 
2435 	idr_init(&qm->qp_idr);
2436 	qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
2437 			QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
2438 			QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
2439 			QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
2440 	qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
2441 					 GFP_ATOMIC);
2442 	dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
2443 	if (!qm->qdma.va)
2444 		return -ENOMEM;
2445 
2446 	QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
2447 	QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
2448 	QM_INIT_BUF(qm, sqc, qm->qp_num);
2449 	QM_INIT_BUF(qm, cqc, qm->qp_num);
2450 
2451 	qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
2452 	if (!qm->qp_array) {
2453 		ret = -ENOMEM;
2454 		goto err_alloc_qp_array;
2455 	}
2456 
2457 	/* one more page for device or qp statuses */
2458 	qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
2459 		      sizeof(struct qm_cqe) * QM_Q_DEPTH;
2460 	qp_dma_size = PAGE_ALIGN(qp_dma_size);
2461 	for (i = 0; i < qm->qp_num; i++) {
2462 		ret = hisi_qp_memory_init(qm, qp_dma_size, i);
2463 		if (ret)
2464 			goto err_init_qp_mem;
2465 
2466 		dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
2467 	}
2468 
2469 	return ret;
2470 
2471 err_init_qp_mem:
2472 	hisi_qp_memory_uninit(qm, i);
2473 err_alloc_qp_array:
2474 	dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
2475 
2476 	return ret;
2477 }
2478 
2479 static void hisi_qm_pre_init(struct hisi_qm *qm)
2480 {
2481 	struct pci_dev *pdev = qm->pdev;
2482 
2483 	if (qm->ver == QM_HW_V1)
2484 		qm->ops = &qm_hw_ops_v1;
2485 	else
2486 		qm->ops = &qm_hw_ops_v2;
2487 
2488 	pci_set_drvdata(pdev, qm);
2489 	mutex_init(&qm->mailbox_lock);
2490 	init_rwsem(&qm->qps_lock);
2491 	qm->qp_in_used = 0;
2492 	qm->misc_ctl = false;
2493 }
2494 
2495 static void hisi_qm_pci_uninit(struct hisi_qm *qm)
2496 {
2497 	struct pci_dev *pdev = qm->pdev;
2498 
2499 	pci_free_irq_vectors(pdev);
2500 	iounmap(qm->io_base);
2501 	pci_release_mem_regions(pdev);
2502 	pci_disable_device(pdev);
2503 }
2504 
2505 /**
2506  * hisi_qm_uninit() - Uninitialize qm.
2507  * @qm: The qm needed uninit.
2508  *
2509  * This function uninits qm related device resources.
2510  */
2511 void hisi_qm_uninit(struct hisi_qm *qm)
2512 {
2513 	struct pci_dev *pdev = qm->pdev;
2514 	struct device *dev = &pdev->dev;
2515 
2516 	down_write(&qm->qps_lock);
2517 
2518 	if (!qm_avail_state(qm, QM_CLOSE)) {
2519 		up_write(&qm->qps_lock);
2520 		return;
2521 	}
2522 
2523 	hisi_qp_memory_uninit(qm, qm->qp_num);
2524 	idr_destroy(&qm->qp_idr);
2525 
2526 	if (qm->qdma.va) {
2527 		hisi_qm_cache_wb(qm);
2528 		dma_free_coherent(dev, qm->qdma.size,
2529 				  qm->qdma.va, qm->qdma.dma);
2530 		memset(&qm->qdma, 0, sizeof(qm->qdma));
2531 	}
2532 
2533 	qm_irq_unregister(qm);
2534 	hisi_qm_pci_uninit(qm);
2535 	uacce_remove(qm->uacce);
2536 	qm->uacce = NULL;
2537 
2538 	up_write(&qm->qps_lock);
2539 }
2540 EXPORT_SYMBOL_GPL(hisi_qm_uninit);
2541 
2542 /**
2543  * hisi_qm_get_vft() - Get vft from a qm.
2544  * @qm: The qm we want to get its vft.
2545  * @base: The base number of queue in vft.
2546  * @number: The number of queues in vft.
2547  *
2548  * We can allocate multiple queues to a qm by configuring virtual function
2549  * table. We get related configures by this function. Normally, we call this
2550  * function in VF driver to get the queue information.
2551  *
2552  * qm hw v1 does not support this interface.
2553  */
2554 int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
2555 {
2556 	if (!base || !number)
2557 		return -EINVAL;
2558 
2559 	if (!qm->ops->get_vft) {
2560 		dev_err(&qm->pdev->dev, "Don't support vft read!\n");
2561 		return -EINVAL;
2562 	}
2563 
2564 	return qm->ops->get_vft(qm, base, number);
2565 }
2566 EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
2567 
2568 /**
2569  * hisi_qm_set_vft() - Set vft to a qm.
2570  * @qm: The qm we want to set its vft.
2571  * @fun_num: The function number.
2572  * @base: The base number of queue in vft.
2573  * @number: The number of queues in vft.
2574  *
2575  * This function is alway called in PF driver, it is used to assign queues
2576  * among PF and VFs.
2577  *
2578  * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
2579  * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
2580  * (VF function number 0x2)
2581  */
2582 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
2583 		    u32 number)
2584 {
2585 	u32 max_q_num = qm->ctrl_qp_num;
2586 
2587 	if (base >= max_q_num || number > max_q_num ||
2588 	    (base + number) > max_q_num)
2589 		return -EINVAL;
2590 
2591 	return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
2592 }
2593 
2594 static void qm_init_eq_aeq_status(struct hisi_qm *qm)
2595 {
2596 	struct hisi_qm_status *status = &qm->status;
2597 
2598 	status->eq_head = 0;
2599 	status->aeq_head = 0;
2600 	status->eqc_phase = true;
2601 	status->aeqc_phase = true;
2602 }
2603 
2604 static int qm_eq_ctx_cfg(struct hisi_qm *qm)
2605 {
2606 	struct device *dev = &qm->pdev->dev;
2607 	struct qm_eqc *eqc;
2608 	dma_addr_t eqc_dma;
2609 	int ret;
2610 
2611 	eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
2612 	if (!eqc)
2613 		return -ENOMEM;
2614 
2615 	eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
2616 	eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
2617 	if (qm->ver == QM_HW_V1)
2618 		eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
2619 	eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
2620 
2621 	eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
2622 				 DMA_TO_DEVICE);
2623 	if (dma_mapping_error(dev, eqc_dma)) {
2624 		kfree(eqc);
2625 		return -ENOMEM;
2626 	}
2627 
2628 	ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
2629 	dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
2630 	kfree(eqc);
2631 
2632 	return ret;
2633 }
2634 
2635 static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
2636 {
2637 	struct device *dev = &qm->pdev->dev;
2638 	struct qm_aeqc *aeqc;
2639 	dma_addr_t aeqc_dma;
2640 	int ret;
2641 
2642 	aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
2643 	if (!aeqc)
2644 		return -ENOMEM;
2645 
2646 	aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
2647 	aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
2648 	aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
2649 
2650 	aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
2651 				  DMA_TO_DEVICE);
2652 	if (dma_mapping_error(dev, aeqc_dma)) {
2653 		kfree(aeqc);
2654 		return -ENOMEM;
2655 	}
2656 
2657 	ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
2658 	dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
2659 	kfree(aeqc);
2660 
2661 	return ret;
2662 }
2663 
2664 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
2665 {
2666 	struct device *dev = &qm->pdev->dev;
2667 	int ret;
2668 
2669 	qm_init_eq_aeq_status(qm);
2670 
2671 	ret = qm_eq_ctx_cfg(qm);
2672 	if (ret) {
2673 		dev_err(dev, "Set eqc failed!\n");
2674 		return ret;
2675 	}
2676 
2677 	return qm_aeq_ctx_cfg(qm);
2678 }
2679 
2680 static int __hisi_qm_start(struct hisi_qm *qm)
2681 {
2682 	int ret;
2683 
2684 	WARN_ON(!qm->qdma.dma);
2685 
2686 	if (qm->fun_type == QM_HW_PF) {
2687 		ret = qm_dev_mem_reset(qm);
2688 		if (ret)
2689 			return ret;
2690 
2691 		ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
2692 		if (ret)
2693 			return ret;
2694 	}
2695 
2696 	ret = qm_eq_aeq_ctx_cfg(qm);
2697 	if (ret)
2698 		return ret;
2699 
2700 	ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
2701 	if (ret)
2702 		return ret;
2703 
2704 	ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
2705 	if (ret)
2706 		return ret;
2707 
2708 	writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
2709 	writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
2710 
2711 	return 0;
2712 }
2713 
2714 /**
2715  * hisi_qm_start() - start qm
2716  * @qm: The qm to be started.
2717  *
2718  * This function starts a qm, then we can allocate qp from this qm.
2719  */
2720 int hisi_qm_start(struct hisi_qm *qm)
2721 {
2722 	struct device *dev = &qm->pdev->dev;
2723 	int ret = 0;
2724 
2725 	down_write(&qm->qps_lock);
2726 
2727 	if (!qm_avail_state(qm, QM_START)) {
2728 		up_write(&qm->qps_lock);
2729 		return -EPERM;
2730 	}
2731 
2732 	dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
2733 
2734 	if (!qm->qp_num) {
2735 		dev_err(dev, "qp_num should not be 0\n");
2736 		ret = -EINVAL;
2737 		goto err_unlock;
2738 	}
2739 
2740 	ret = __hisi_qm_start(qm);
2741 	if (!ret)
2742 		atomic_set(&qm->status.flags, QM_START);
2743 
2744 err_unlock:
2745 	up_write(&qm->qps_lock);
2746 	return ret;
2747 }
2748 EXPORT_SYMBOL_GPL(hisi_qm_start);
2749 
2750 static int qm_restart(struct hisi_qm *qm)
2751 {
2752 	struct device *dev = &qm->pdev->dev;
2753 	struct hisi_qp *qp;
2754 	int ret, i;
2755 
2756 	ret = hisi_qm_start(qm);
2757 	if (ret < 0)
2758 		return ret;
2759 
2760 	down_write(&qm->qps_lock);
2761 	for (i = 0; i < qm->qp_num; i++) {
2762 		qp = &qm->qp_array[i];
2763 		if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
2764 		    qp->is_resetting == true) {
2765 			ret = qm_start_qp_nolock(qp, 0);
2766 			if (ret < 0) {
2767 				dev_err(dev, "Failed to start qp%d!\n", i);
2768 
2769 				up_write(&qm->qps_lock);
2770 				return ret;
2771 			}
2772 			qp->is_resetting = false;
2773 		}
2774 	}
2775 	up_write(&qm->qps_lock);
2776 
2777 	return 0;
2778 }
2779 
2780 /* Stop started qps in reset flow */
2781 static int qm_stop_started_qp(struct hisi_qm *qm)
2782 {
2783 	struct device *dev = &qm->pdev->dev;
2784 	struct hisi_qp *qp;
2785 	int i, ret;
2786 
2787 	for (i = 0; i < qm->qp_num; i++) {
2788 		qp = &qm->qp_array[i];
2789 		if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
2790 			qp->is_resetting = true;
2791 			ret = qm_stop_qp_nolock(qp);
2792 			if (ret < 0) {
2793 				dev_err(dev, "Failed to stop qp%d!\n", i);
2794 				return ret;
2795 			}
2796 		}
2797 	}
2798 
2799 	return 0;
2800 }
2801 
2802 
2803 /**
2804  * qm_clear_queues() - Clear all queues memory in a qm.
2805  * @qm: The qm in which the queues will be cleared.
2806  *
2807  * This function clears all queues memory in a qm. Reset of accelerator can
2808  * use this to clear queues.
2809  */
2810 static void qm_clear_queues(struct hisi_qm *qm)
2811 {
2812 	struct hisi_qp *qp;
2813 	int i;
2814 
2815 	for (i = 0; i < qm->qp_num; i++) {
2816 		qp = &qm->qp_array[i];
2817 		if (qp->is_resetting)
2818 			memset(qp->qdma.va, 0, qp->qdma.size);
2819 	}
2820 
2821 	memset(qm->qdma.va, 0, qm->qdma.size);
2822 }
2823 
2824 /**
2825  * hisi_qm_stop() - Stop a qm.
2826  * @qm: The qm which will be stopped.
2827  * @r: The reason to stop qm.
2828  *
2829  * This function stops qm and its qps, then qm can not accept request.
2830  * Related resources are not released at this state, we can use hisi_qm_start
2831  * to let qm start again.
2832  */
2833 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
2834 {
2835 	struct device *dev = &qm->pdev->dev;
2836 	int ret = 0;
2837 
2838 	down_write(&qm->qps_lock);
2839 
2840 	qm->status.stop_reason = r;
2841 	if (!qm_avail_state(qm, QM_STOP)) {
2842 		ret = -EPERM;
2843 		goto err_unlock;
2844 	}
2845 
2846 	if (qm->status.stop_reason == QM_SOFT_RESET ||
2847 	    qm->status.stop_reason == QM_FLR) {
2848 		ret = qm_stop_started_qp(qm);
2849 		if (ret < 0) {
2850 			dev_err(dev, "Failed to stop started qp!\n");
2851 			goto err_unlock;
2852 		}
2853 	}
2854 
2855 	/* Mask eq and aeq irq */
2856 	writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
2857 	writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
2858 
2859 	if (qm->fun_type == QM_HW_PF) {
2860 		ret = hisi_qm_set_vft(qm, 0, 0, 0);
2861 		if (ret < 0) {
2862 			dev_err(dev, "Failed to set vft!\n");
2863 			ret = -EBUSY;
2864 			goto err_unlock;
2865 		}
2866 	}
2867 
2868 	qm_clear_queues(qm);
2869 	atomic_set(&qm->status.flags, QM_STOP);
2870 
2871 err_unlock:
2872 	up_write(&qm->qps_lock);
2873 	return ret;
2874 }
2875 EXPORT_SYMBOL_GPL(hisi_qm_stop);
2876 
2877 static ssize_t qm_status_read(struct file *filp, char __user *buffer,
2878 			      size_t count, loff_t *pos)
2879 {
2880 	struct hisi_qm *qm = filp->private_data;
2881 	char buf[QM_DBG_READ_LEN];
2882 	int val, len;
2883 
2884 	val = atomic_read(&qm->status.flags);
2885 	len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
2886 
2887 	return simple_read_from_buffer(buffer, count, pos, buf, len);
2888 }
2889 
2890 static const struct file_operations qm_status_fops = {
2891 	.owner = THIS_MODULE,
2892 	.open = simple_open,
2893 	.read = qm_status_read,
2894 };
2895 
2896 static int qm_debugfs_atomic64_set(void *data, u64 val)
2897 {
2898 	if (val)
2899 		return -EINVAL;
2900 
2901 	atomic64_set((atomic64_t *)data, 0);
2902 
2903 	return 0;
2904 }
2905 
2906 static int qm_debugfs_atomic64_get(void *data, u64 *val)
2907 {
2908 	*val = atomic64_read((atomic64_t *)data);
2909 
2910 	return 0;
2911 }
2912 
2913 DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
2914 			 qm_debugfs_atomic64_set, "%llu\n");
2915 
2916 /**
2917  * hisi_qm_debug_init() - Initialize qm related debugfs files.
2918  * @qm: The qm for which we want to add debugfs files.
2919  *
2920  * Create qm related debugfs files.
2921  */
2922 void hisi_qm_debug_init(struct hisi_qm *qm)
2923 {
2924 	struct qm_dfx *dfx = &qm->debug.dfx;
2925 	struct dentry *qm_d;
2926 	void *data;
2927 	int i;
2928 
2929 	qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
2930 	qm->debug.qm_d = qm_d;
2931 
2932 	/* only show this in PF */
2933 	if (qm->fun_type == QM_HW_PF)
2934 		for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
2935 			qm_create_debugfs_file(qm, i);
2936 
2937 	debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
2938 
2939 	debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops);
2940 
2941 	debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
2942 			&qm_status_fops);
2943 	for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
2944 		data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
2945 		debugfs_create_file(qm_dfx_files[i].name,
2946 			0644,
2947 			qm_d,
2948 			data,
2949 			&qm_atomic64_ops);
2950 	}
2951 }
2952 EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
2953 
2954 /**
2955  * hisi_qm_debug_regs_clear() - clear qm debug related registers.
2956  * @qm: The qm for which we want to clear its debug registers.
2957  */
2958 void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
2959 {
2960 	struct qm_dfx_registers *regs;
2961 	int i;
2962 
2963 	/* clear current_q */
2964 	writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
2965 	writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
2966 
2967 	/*
2968 	 * these registers are reading and clearing, so clear them after
2969 	 * reading them.
2970 	 */
2971 	writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
2972 
2973 	regs = qm_dfx_regs;
2974 	for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
2975 		readl(qm->io_base + regs->reg_offset);
2976 		regs++;
2977 	}
2978 
2979 	writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
2980 }
2981 EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
2982 
2983 static void qm_hw_error_init(struct hisi_qm *qm)
2984 {
2985 	const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info;
2986 
2987 	if (!qm->ops->hw_error_init) {
2988 		dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
2989 		return;
2990 	}
2991 
2992 	qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
2993 }
2994 
2995 static void qm_hw_error_uninit(struct hisi_qm *qm)
2996 {
2997 	if (!qm->ops->hw_error_uninit) {
2998 		dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
2999 		return;
3000 	}
3001 
3002 	qm->ops->hw_error_uninit(qm);
3003 }
3004 
3005 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
3006 {
3007 	if (!qm->ops->hw_error_handle) {
3008 		dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
3009 		return ACC_ERR_NONE;
3010 	}
3011 
3012 	return qm->ops->hw_error_handle(qm);
3013 }
3014 
3015 /**
3016  * hisi_qm_dev_err_init() - Initialize device error configuration.
3017  * @qm: The qm for which we want to do error initialization.
3018  *
3019  * Initialize QM and device error related configuration.
3020  */
3021 void hisi_qm_dev_err_init(struct hisi_qm *qm)
3022 {
3023 	if (qm->fun_type == QM_HW_VF)
3024 		return;
3025 
3026 	qm_hw_error_init(qm);
3027 
3028 	if (!qm->err_ini->hw_err_enable) {
3029 		dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
3030 		return;
3031 	}
3032 	qm->err_ini->hw_err_enable(qm);
3033 }
3034 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
3035 
3036 /**
3037  * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
3038  * @qm: The qm for which we want to do error uninitialization.
3039  *
3040  * Uninitialize QM and device error related configuration.
3041  */
3042 void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
3043 {
3044 	if (qm->fun_type == QM_HW_VF)
3045 		return;
3046 
3047 	qm_hw_error_uninit(qm);
3048 
3049 	if (!qm->err_ini->hw_err_disable) {
3050 		dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
3051 		return;
3052 	}
3053 	qm->err_ini->hw_err_disable(qm);
3054 }
3055 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
3056 
3057 /**
3058  * hisi_qm_free_qps() - free multiple queue pairs.
3059  * @qps: The queue pairs need to be freed.
3060  * @qp_num: The num of queue pairs.
3061  */
3062 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
3063 {
3064 	int i;
3065 
3066 	if (!qps || qp_num <= 0)
3067 		return;
3068 
3069 	for (i = qp_num - 1; i >= 0; i--)
3070 		hisi_qm_release_qp(qps[i]);
3071 }
3072 EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
3073 
3074 static void free_list(struct list_head *head)
3075 {
3076 	struct hisi_qm_resource *res, *tmp;
3077 
3078 	list_for_each_entry_safe(res, tmp, head, list) {
3079 		list_del(&res->list);
3080 		kfree(res);
3081 	}
3082 }
3083 
3084 static int hisi_qm_sort_devices(int node, struct list_head *head,
3085 				struct hisi_qm_list *qm_list)
3086 {
3087 	struct hisi_qm_resource *res, *tmp;
3088 	struct hisi_qm *qm;
3089 	struct list_head *n;
3090 	struct device *dev;
3091 	int dev_node = 0;
3092 
3093 	list_for_each_entry(qm, &qm_list->list, list) {
3094 		dev = &qm->pdev->dev;
3095 
3096 		if (IS_ENABLED(CONFIG_NUMA)) {
3097 			dev_node = dev_to_node(dev);
3098 			if (dev_node < 0)
3099 				dev_node = 0;
3100 		}
3101 
3102 		res = kzalloc(sizeof(*res), GFP_KERNEL);
3103 		if (!res)
3104 			return -ENOMEM;
3105 
3106 		res->qm = qm;
3107 		res->distance = node_distance(dev_node, node);
3108 		n = head;
3109 		list_for_each_entry(tmp, head, list) {
3110 			if (res->distance < tmp->distance) {
3111 				n = &tmp->list;
3112 				break;
3113 			}
3114 		}
3115 		list_add_tail(&res->list, n);
3116 	}
3117 
3118 	return 0;
3119 }
3120 
3121 /**
3122  * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
3123  * @qm_list: The list of all available devices.
3124  * @qp_num: The number of queue pairs need created.
3125  * @alg_type: The algorithm type.
3126  * @node: The numa node.
3127  * @qps: The queue pairs need created.
3128  *
3129  * This function will sort all available device according to numa distance.
3130  * Then try to create all queue pairs from one device, if all devices do
3131  * not meet the requirements will return error.
3132  */
3133 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
3134 			   u8 alg_type, int node, struct hisi_qp **qps)
3135 {
3136 	struct hisi_qm_resource *tmp;
3137 	int ret = -ENODEV;
3138 	LIST_HEAD(head);
3139 	int i;
3140 
3141 	if (!qps || !qm_list || qp_num <= 0)
3142 		return -EINVAL;
3143 
3144 	mutex_lock(&qm_list->lock);
3145 	if (hisi_qm_sort_devices(node, &head, qm_list)) {
3146 		mutex_unlock(&qm_list->lock);
3147 		goto err;
3148 	}
3149 
3150 	list_for_each_entry(tmp, &head, list) {
3151 		for (i = 0; i < qp_num; i++) {
3152 			qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
3153 			if (IS_ERR(qps[i])) {
3154 				hisi_qm_free_qps(qps, i);
3155 				break;
3156 			}
3157 		}
3158 
3159 		if (i == qp_num) {
3160 			ret = 0;
3161 			break;
3162 		}
3163 	}
3164 
3165 	mutex_unlock(&qm_list->lock);
3166 	if (ret)
3167 		pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
3168 			node, alg_type, qp_num);
3169 
3170 err:
3171 	free_list(&head);
3172 	return ret;
3173 }
3174 EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
3175 
3176 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
3177 {
3178 	u32 remain_q_num, q_num, i, j;
3179 	u32 q_base = qm->qp_num;
3180 	int ret;
3181 
3182 	if (!num_vfs)
3183 		return -EINVAL;
3184 
3185 	remain_q_num = qm->ctrl_qp_num - qm->qp_num;
3186 
3187 	/* If remain queues not enough, return error. */
3188 	if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs)
3189 		return -EINVAL;
3190 
3191 	q_num = remain_q_num / num_vfs;
3192 	for (i = 1; i <= num_vfs; i++) {
3193 		if (i == num_vfs)
3194 			q_num += remain_q_num % num_vfs;
3195 		ret = hisi_qm_set_vft(qm, i, q_base, q_num);
3196 		if (ret) {
3197 			for (j = i; j > 0; j--)
3198 				hisi_qm_set_vft(qm, j, 0, 0);
3199 			return ret;
3200 		}
3201 		q_base += q_num;
3202 	}
3203 
3204 	return 0;
3205 }
3206 
3207 static int qm_clear_vft_config(struct hisi_qm *qm)
3208 {
3209 	int ret;
3210 	u32 i;
3211 
3212 	for (i = 1; i <= qm->vfs_num; i++) {
3213 		ret = hisi_qm_set_vft(qm, i, 0, 0);
3214 		if (ret)
3215 			return ret;
3216 	}
3217 	qm->vfs_num = 0;
3218 
3219 	return 0;
3220 }
3221 
3222 /**
3223  * hisi_qm_sriov_enable() - enable virtual functions
3224  * @pdev: the PCIe device
3225  * @max_vfs: the number of virtual functions to enable
3226  *
3227  * Returns the number of enabled VFs. If there are VFs enabled already or
3228  * max_vfs is more than the total number of device can be enabled, returns
3229  * failure.
3230  */
3231 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
3232 {
3233 	struct hisi_qm *qm = pci_get_drvdata(pdev);
3234 	int pre_existing_vfs, num_vfs, total_vfs, ret;
3235 
3236 	total_vfs = pci_sriov_get_totalvfs(pdev);
3237 	pre_existing_vfs = pci_num_vf(pdev);
3238 	if (pre_existing_vfs) {
3239 		pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
3240 			pre_existing_vfs);
3241 		return 0;
3242 	}
3243 
3244 	num_vfs = min_t(int, max_vfs, total_vfs);
3245 	ret = qm_vf_q_assign(qm, num_vfs);
3246 	if (ret) {
3247 		pci_err(pdev, "Can't assign queues for VF!\n");
3248 		return ret;
3249 	}
3250 
3251 	qm->vfs_num = num_vfs;
3252 
3253 	ret = pci_enable_sriov(pdev, num_vfs);
3254 	if (ret) {
3255 		pci_err(pdev, "Can't enable VF!\n");
3256 		qm_clear_vft_config(qm);
3257 		return ret;
3258 	}
3259 
3260 	pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
3261 
3262 	return num_vfs;
3263 }
3264 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
3265 
3266 /**
3267  * hisi_qm_sriov_disable - disable virtual functions
3268  * @pdev: the PCI device.
3269  * @is_frozen: true when all the VFs are frozen.
3270  *
3271  * Return failure if there are VFs assigned already or VF is in used.
3272  */
3273 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
3274 {
3275 	struct hisi_qm *qm = pci_get_drvdata(pdev);
3276 
3277 	if (pci_vfs_assigned(pdev)) {
3278 		pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
3279 		return -EPERM;
3280 	}
3281 
3282 	/* While VF is in used, SRIOV cannot be disabled. */
3283 	if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
3284 		pci_err(pdev, "Task is using its VF!\n");
3285 		return -EBUSY;
3286 	}
3287 
3288 	pci_disable_sriov(pdev);
3289 	return qm_clear_vft_config(qm);
3290 }
3291 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
3292 
3293 /**
3294  * hisi_qm_sriov_configure - configure the number of VFs
3295  * @pdev: The PCI device
3296  * @num_vfs: The number of VFs need enabled
3297  *
3298  * Enable SR-IOV according to num_vfs, 0 means disable.
3299  */
3300 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
3301 {
3302 	if (num_vfs == 0)
3303 		return hisi_qm_sriov_disable(pdev, false);
3304 	else
3305 		return hisi_qm_sriov_enable(pdev, num_vfs);
3306 }
3307 EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
3308 
3309 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
3310 {
3311 	u32 err_sts;
3312 
3313 	if (!qm->err_ini->get_dev_hw_err_status) {
3314 		dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
3315 		return ACC_ERR_NONE;
3316 	}
3317 
3318 	/* get device hardware error status */
3319 	err_sts = qm->err_ini->get_dev_hw_err_status(qm);
3320 	if (err_sts) {
3321 		if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
3322 			qm->err_status.is_dev_ecc_mbit = true;
3323 
3324 		if (qm->err_ini->log_dev_hw_err)
3325 			qm->err_ini->log_dev_hw_err(qm, err_sts);
3326 
3327 		/* ce error does not need to be reset */
3328 		if ((err_sts | qm->err_ini->err_info.dev_ce_mask) ==
3329 		     qm->err_ini->err_info.dev_ce_mask) {
3330 			if (qm->err_ini->clear_dev_hw_err_status)
3331 				qm->err_ini->clear_dev_hw_err_status(qm,
3332 								err_sts);
3333 
3334 			return ACC_ERR_RECOVERED;
3335 		}
3336 
3337 		return ACC_ERR_NEED_RESET;
3338 	}
3339 
3340 	return ACC_ERR_RECOVERED;
3341 }
3342 
3343 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
3344 {
3345 	enum acc_err_result qm_ret, dev_ret;
3346 
3347 	/* log qm error */
3348 	qm_ret = qm_hw_error_handle(qm);
3349 
3350 	/* log device error */
3351 	dev_ret = qm_dev_err_handle(qm);
3352 
3353 	return (qm_ret == ACC_ERR_NEED_RESET ||
3354 		dev_ret == ACC_ERR_NEED_RESET) ?
3355 		ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
3356 }
3357 
3358 /**
3359  * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
3360  * @pdev: The PCI device which need report error.
3361  * @state: The connectivity between CPU and device.
3362  *
3363  * We register this function into PCIe AER handlers, It will report device or
3364  * qm hardware error status when error occur.
3365  */
3366 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
3367 					  pci_channel_state_t state)
3368 {
3369 	struct hisi_qm *qm = pci_get_drvdata(pdev);
3370 	enum acc_err_result ret;
3371 
3372 	if (pdev->is_virtfn)
3373 		return PCI_ERS_RESULT_NONE;
3374 
3375 	pci_info(pdev, "PCI error detected, state(=%u)!!\n", state);
3376 	if (state == pci_channel_io_perm_failure)
3377 		return PCI_ERS_RESULT_DISCONNECT;
3378 
3379 	ret = qm_process_dev_error(qm);
3380 	if (ret == ACC_ERR_NEED_RESET)
3381 		return PCI_ERS_RESULT_NEED_RESET;
3382 
3383 	return PCI_ERS_RESULT_RECOVERED;
3384 }
3385 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
3386 
3387 static u32 qm_get_hw_error_status(struct hisi_qm *qm)
3388 {
3389 	return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
3390 }
3391 
3392 static int qm_check_req_recv(struct hisi_qm *qm)
3393 {
3394 	struct pci_dev *pdev = qm->pdev;
3395 	int ret;
3396 	u32 val;
3397 
3398 	writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
3399 	ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
3400 					 (val == ACC_VENDOR_ID_VALUE),
3401 					 POLL_PERIOD, POLL_TIMEOUT);
3402 	if (ret) {
3403 		dev_err(&pdev->dev, "Fails to read QM reg!\n");
3404 		return ret;
3405 	}
3406 
3407 	writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
3408 	ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
3409 					 (val == PCI_VENDOR_ID_HUAWEI),
3410 					 POLL_PERIOD, POLL_TIMEOUT);
3411 	if (ret)
3412 		dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
3413 
3414 	return ret;
3415 }
3416 
3417 static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
3418 {
3419 	struct pci_dev *pdev = qm->pdev;
3420 	u16 cmd;
3421 	int i;
3422 
3423 	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
3424 	if (set)
3425 		cmd |= PCI_COMMAND_MEMORY;
3426 	else
3427 		cmd &= ~PCI_COMMAND_MEMORY;
3428 
3429 	pci_write_config_word(pdev, PCI_COMMAND, cmd);
3430 	for (i = 0; i < MAX_WAIT_COUNTS; i++) {
3431 		pci_read_config_word(pdev, PCI_COMMAND, &cmd);
3432 		if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
3433 			return 0;
3434 
3435 		udelay(1);
3436 	}
3437 
3438 	return -ETIMEDOUT;
3439 }
3440 
3441 static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
3442 {
3443 	struct pci_dev *pdev = qm->pdev;
3444 	u16 sriov_ctrl;
3445 	int pos;
3446 	int i;
3447 
3448 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
3449 	pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
3450 	if (set)
3451 		sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
3452 	else
3453 		sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
3454 	pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
3455 
3456 	for (i = 0; i < MAX_WAIT_COUNTS; i++) {
3457 		pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
3458 		if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
3459 		    ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
3460 			return 0;
3461 
3462 		udelay(1);
3463 	}
3464 
3465 	return -ETIMEDOUT;
3466 }
3467 
3468 static int qm_set_msi(struct hisi_qm *qm, bool set)
3469 {
3470 	struct pci_dev *pdev = qm->pdev;
3471 
3472 	if (set) {
3473 		pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
3474 				       0);
3475 	} else {
3476 		pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
3477 				       ACC_PEH_MSI_DISABLE);
3478 		if (qm->err_status.is_qm_ecc_mbit ||
3479 		    qm->err_status.is_dev_ecc_mbit)
3480 			return 0;
3481 
3482 		mdelay(1);
3483 		if (readl(qm->io_base + QM_PEH_DFX_INFO0))
3484 			return -EFAULT;
3485 	}
3486 
3487 	return 0;
3488 }
3489 
3490 static int qm_vf_reset_prepare(struct hisi_qm *qm,
3491 			       enum qm_stop_reason stop_reason)
3492 {
3493 	struct hisi_qm_list *qm_list = qm->qm_list;
3494 	struct pci_dev *pdev = qm->pdev;
3495 	struct pci_dev *virtfn;
3496 	struct hisi_qm *vf_qm;
3497 	int ret = 0;
3498 
3499 	mutex_lock(&qm_list->lock);
3500 	list_for_each_entry(vf_qm, &qm_list->list, list) {
3501 		virtfn = vf_qm->pdev;
3502 		if (virtfn == pdev)
3503 			continue;
3504 
3505 		if (pci_physfn(virtfn) == pdev) {
3506 			/* save VFs PCIE BAR configuration */
3507 			pci_save_state(virtfn);
3508 
3509 			ret = hisi_qm_stop(vf_qm, stop_reason);
3510 			if (ret)
3511 				goto stop_fail;
3512 		}
3513 	}
3514 
3515 stop_fail:
3516 	mutex_unlock(&qm_list->lock);
3517 	return ret;
3518 }
3519 
3520 static int qm_reset_prepare_ready(struct hisi_qm *qm)
3521 {
3522 	struct pci_dev *pdev = qm->pdev;
3523 	struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3524 	int delay = 0;
3525 
3526 	/* All reset requests need to be queued for processing */
3527 	while (test_and_set_bit(QM_RESETTING, &pf_qm->misc_ctl)) {
3528 		msleep(++delay);
3529 		if (delay > QM_RESET_WAIT_TIMEOUT)
3530 			return -EBUSY;
3531 	}
3532 
3533 	return 0;
3534 }
3535 
3536 static int qm_controller_reset_prepare(struct hisi_qm *qm)
3537 {
3538 	struct pci_dev *pdev = qm->pdev;
3539 	int ret;
3540 
3541 	ret = qm_reset_prepare_ready(qm);
3542 	if (ret) {
3543 		pci_err(pdev, "Controller reset not ready!\n");
3544 		return ret;
3545 	}
3546 
3547 	if (qm->vfs_num) {
3548 		ret = qm_vf_reset_prepare(qm, QM_SOFT_RESET);
3549 		if (ret) {
3550 			pci_err(pdev, "Fails to stop VFs!\n");
3551 			clear_bit(QM_RESETTING, &qm->misc_ctl);
3552 			return ret;
3553 		}
3554 	}
3555 
3556 	ret = hisi_qm_stop(qm, QM_SOFT_RESET);
3557 	if (ret) {
3558 		pci_err(pdev, "Fails to stop QM!\n");
3559 		clear_bit(QM_RESETTING, &qm->misc_ctl);
3560 		return ret;
3561 	}
3562 
3563 	clear_bit(QM_RST_SCHED, &qm->misc_ctl);
3564 
3565 	return 0;
3566 }
3567 
3568 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
3569 {
3570 	u32 nfe_enb = 0;
3571 
3572 	if (!qm->err_status.is_dev_ecc_mbit &&
3573 	    qm->err_status.is_qm_ecc_mbit &&
3574 	    qm->err_ini->close_axi_master_ooo) {
3575 
3576 		qm->err_ini->close_axi_master_ooo(qm);
3577 
3578 	} else if (qm->err_status.is_dev_ecc_mbit &&
3579 		   !qm->err_status.is_qm_ecc_mbit &&
3580 		   !qm->err_ini->close_axi_master_ooo) {
3581 
3582 		nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
3583 		writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
3584 		       qm->io_base + QM_RAS_NFE_ENABLE);
3585 		writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
3586 	}
3587 }
3588 
3589 static int qm_soft_reset(struct hisi_qm *qm)
3590 {
3591 	struct pci_dev *pdev = qm->pdev;
3592 	int ret;
3593 	u32 val;
3594 
3595 	/* Ensure all doorbells and mailboxes received by QM */
3596 	ret = qm_check_req_recv(qm);
3597 	if (ret)
3598 		return ret;
3599 
3600 	if (qm->vfs_num) {
3601 		ret = qm_set_vf_mse(qm, false);
3602 		if (ret) {
3603 			pci_err(pdev, "Fails to disable vf MSE bit.\n");
3604 			return ret;
3605 		}
3606 	}
3607 
3608 	ret = qm_set_msi(qm, false);
3609 	if (ret) {
3610 		pci_err(pdev, "Fails to disable PEH MSI bit.\n");
3611 		return ret;
3612 	}
3613 
3614 	qm_dev_ecc_mbit_handle(qm);
3615 
3616 	/* OOO register set and check */
3617 	writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
3618 	       qm->io_base + ACC_MASTER_GLOBAL_CTRL);
3619 
3620 	/* If bus lock, reset chip */
3621 	ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
3622 					 val,
3623 					 (val == ACC_MASTER_TRANS_RETURN_RW),
3624 					 POLL_PERIOD, POLL_TIMEOUT);
3625 	if (ret) {
3626 		pci_emerg(pdev, "Bus lock! Please reset system.\n");
3627 		return ret;
3628 	}
3629 
3630 	ret = qm_set_pf_mse(qm, false);
3631 	if (ret) {
3632 		pci_err(pdev, "Fails to disable pf MSE bit.\n");
3633 		return ret;
3634 	}
3635 
3636 	/* The reset related sub-control registers are not in PCI BAR */
3637 	if (ACPI_HANDLE(&pdev->dev)) {
3638 		unsigned long long value = 0;
3639 		acpi_status s;
3640 
3641 		s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
3642 					  qm->err_ini->err_info.acpi_rst,
3643 					  NULL, &value);
3644 		if (ACPI_FAILURE(s)) {
3645 			pci_err(pdev, "NO controller reset method!\n");
3646 			return -EIO;
3647 		}
3648 
3649 		if (value) {
3650 			pci_err(pdev, "Reset step %llu failed!\n", value);
3651 			return -EIO;
3652 		}
3653 	} else {
3654 		pci_err(pdev, "No reset method!\n");
3655 		return -EINVAL;
3656 	}
3657 
3658 	return 0;
3659 }
3660 
3661 static int qm_vf_reset_done(struct hisi_qm *qm)
3662 {
3663 	struct hisi_qm_list *qm_list = qm->qm_list;
3664 	struct pci_dev *pdev = qm->pdev;
3665 	struct pci_dev *virtfn;
3666 	struct hisi_qm *vf_qm;
3667 	int ret = 0;
3668 
3669 	mutex_lock(&qm_list->lock);
3670 	list_for_each_entry(vf_qm, &qm_list->list, list) {
3671 		virtfn = vf_qm->pdev;
3672 		if (virtfn == pdev)
3673 			continue;
3674 
3675 		if (pci_physfn(virtfn) == pdev) {
3676 			/* enable VFs PCIE BAR configuration */
3677 			pci_restore_state(virtfn);
3678 
3679 			ret = qm_restart(vf_qm);
3680 			if (ret)
3681 				goto restart_fail;
3682 		}
3683 	}
3684 
3685 restart_fail:
3686 	mutex_unlock(&qm_list->lock);
3687 	return ret;
3688 }
3689 
3690 static u32 qm_get_dev_err_status(struct hisi_qm *qm)
3691 {
3692 	return qm->err_ini->get_dev_hw_err_status(qm);
3693 }
3694 
3695 static int qm_dev_hw_init(struct hisi_qm *qm)
3696 {
3697 	return qm->err_ini->hw_init(qm);
3698 }
3699 
3700 static void qm_restart_prepare(struct hisi_qm *qm)
3701 {
3702 	u32 value;
3703 
3704 	if (!qm->err_status.is_qm_ecc_mbit &&
3705 	    !qm->err_status.is_dev_ecc_mbit)
3706 		return;
3707 
3708 	/* temporarily close the OOO port used for PEH to write out MSI */
3709 	value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3710 	writel(value & ~qm->err_ini->err_info.msi_wr_port,
3711 	       qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3712 
3713 	/* clear dev ecc 2bit error source if having */
3714 	value = qm_get_dev_err_status(qm) &
3715 		qm->err_ini->err_info.ecc_2bits_mask;
3716 	if (value && qm->err_ini->clear_dev_hw_err_status)
3717 		qm->err_ini->clear_dev_hw_err_status(qm, value);
3718 
3719 	/* clear QM ecc mbit error source */
3720 	writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
3721 
3722 	/* clear AM Reorder Buffer ecc mbit source */
3723 	writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
3724 
3725 	if (qm->err_ini->open_axi_master_ooo)
3726 		qm->err_ini->open_axi_master_ooo(qm);
3727 }
3728 
3729 static void qm_restart_done(struct hisi_qm *qm)
3730 {
3731 	u32 value;
3732 
3733 	if (!qm->err_status.is_qm_ecc_mbit &&
3734 	    !qm->err_status.is_dev_ecc_mbit)
3735 		return;
3736 
3737 	/* open the OOO port for PEH to write out MSI */
3738 	value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3739 	value |= qm->err_ini->err_info.msi_wr_port;
3740 	writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3741 
3742 	qm->err_status.is_qm_ecc_mbit = false;
3743 	qm->err_status.is_dev_ecc_mbit = false;
3744 }
3745 
3746 static int qm_controller_reset_done(struct hisi_qm *qm)
3747 {
3748 	struct pci_dev *pdev = qm->pdev;
3749 	int ret;
3750 
3751 	ret = qm_set_msi(qm, true);
3752 	if (ret) {
3753 		pci_err(pdev, "Fails to enable PEH MSI bit!\n");
3754 		return ret;
3755 	}
3756 
3757 	ret = qm_set_pf_mse(qm, true);
3758 	if (ret) {
3759 		pci_err(pdev, "Fails to enable pf MSE bit!\n");
3760 		return ret;
3761 	}
3762 
3763 	if (qm->vfs_num) {
3764 		ret = qm_set_vf_mse(qm, true);
3765 		if (ret) {
3766 			pci_err(pdev, "Fails to enable vf MSE bit!\n");
3767 			return ret;
3768 		}
3769 	}
3770 
3771 	ret = qm_dev_hw_init(qm);
3772 	if (ret) {
3773 		pci_err(pdev, "Failed to init device\n");
3774 		return ret;
3775 	}
3776 
3777 	qm_restart_prepare(qm);
3778 
3779 	ret = qm_restart(qm);
3780 	if (ret) {
3781 		pci_err(pdev, "Failed to start QM!\n");
3782 		return ret;
3783 	}
3784 
3785 	if (qm->vfs_num) {
3786 		ret = qm_vf_q_assign(qm, qm->vfs_num);
3787 		if (ret) {
3788 			pci_err(pdev, "Failed to assign queue!\n");
3789 			return ret;
3790 		}
3791 	}
3792 
3793 	ret = qm_vf_reset_done(qm);
3794 	if (ret) {
3795 		pci_err(pdev, "Failed to start VFs!\n");
3796 		return -EPERM;
3797 	}
3798 
3799 	hisi_qm_dev_err_init(qm);
3800 	qm_restart_done(qm);
3801 
3802 	clear_bit(QM_RESETTING, &qm->misc_ctl);
3803 
3804 	return 0;
3805 }
3806 
3807 static int qm_controller_reset(struct hisi_qm *qm)
3808 {
3809 	struct pci_dev *pdev = qm->pdev;
3810 	int ret;
3811 
3812 	pci_info(pdev, "Controller resetting...\n");
3813 
3814 	ret = qm_controller_reset_prepare(qm);
3815 	if (ret) {
3816 		clear_bit(QM_RST_SCHED, &qm->misc_ctl);
3817 		return ret;
3818 	}
3819 
3820 	ret = qm_soft_reset(qm);
3821 	if (ret) {
3822 		pci_err(pdev, "Controller reset failed (%d)\n", ret);
3823 		clear_bit(QM_RESETTING, &qm->misc_ctl);
3824 		return ret;
3825 	}
3826 
3827 	ret = qm_controller_reset_done(qm);
3828 	if (ret) {
3829 		clear_bit(QM_RESETTING, &qm->misc_ctl);
3830 		return ret;
3831 	}
3832 
3833 	pci_info(pdev, "Controller reset complete\n");
3834 
3835 	return 0;
3836 }
3837 
3838 /**
3839  * hisi_qm_dev_slot_reset() - slot reset
3840  * @pdev: the PCIe device
3841  *
3842  * This function offers QM relate PCIe device reset interface. Drivers which
3843  * use QM can use this function as slot_reset in its struct pci_error_handlers.
3844  */
3845 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
3846 {
3847 	struct hisi_qm *qm = pci_get_drvdata(pdev);
3848 	int ret;
3849 
3850 	if (pdev->is_virtfn)
3851 		return PCI_ERS_RESULT_RECOVERED;
3852 
3853 	pci_aer_clear_nonfatal_status(pdev);
3854 
3855 	/* reset pcie device controller */
3856 	ret = qm_controller_reset(qm);
3857 	if (ret) {
3858 		pci_err(pdev, "Controller reset failed (%d)\n", ret);
3859 		return PCI_ERS_RESULT_DISCONNECT;
3860 	}
3861 
3862 	return PCI_ERS_RESULT_RECOVERED;
3863 }
3864 EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
3865 
3866 /* check the interrupt is ecc-mbit error or not */
3867 static int qm_check_dev_error(struct hisi_qm *qm)
3868 {
3869 	int ret;
3870 
3871 	if (qm->fun_type == QM_HW_VF)
3872 		return 0;
3873 
3874 	ret = qm_get_hw_error_status(qm) & QM_ECC_MBIT;
3875 	if (ret)
3876 		return ret;
3877 
3878 	return (qm_get_dev_err_status(qm) &
3879 		qm->err_ini->err_info.ecc_2bits_mask);
3880 }
3881 
3882 void hisi_qm_reset_prepare(struct pci_dev *pdev)
3883 {
3884 	struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3885 	struct hisi_qm *qm = pci_get_drvdata(pdev);
3886 	u32 delay = 0;
3887 	int ret;
3888 
3889 	hisi_qm_dev_err_uninit(pf_qm);
3890 
3891 	/*
3892 	 * Check whether there is an ECC mbit error, If it occurs, need to
3893 	 * wait for soft reset to fix it.
3894 	 */
3895 	while (qm_check_dev_error(pf_qm)) {
3896 		msleep(++delay);
3897 		if (delay > QM_RESET_WAIT_TIMEOUT)
3898 			return;
3899 	}
3900 
3901 	ret = qm_reset_prepare_ready(qm);
3902 	if (ret) {
3903 		pci_err(pdev, "FLR not ready!\n");
3904 		return;
3905 	}
3906 
3907 	if (qm->vfs_num) {
3908 		ret = qm_vf_reset_prepare(qm, QM_FLR);
3909 		if (ret) {
3910 			pci_err(pdev, "Failed to prepare reset, ret = %d.\n",
3911 				ret);
3912 			return;
3913 		}
3914 	}
3915 
3916 	ret = hisi_qm_stop(qm, QM_FLR);
3917 	if (ret) {
3918 		pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
3919 		return;
3920 	}
3921 
3922 	pci_info(pdev, "FLR resetting...\n");
3923 }
3924 EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
3925 
3926 static bool qm_flr_reset_complete(struct pci_dev *pdev)
3927 {
3928 	struct pci_dev *pf_pdev = pci_physfn(pdev);
3929 	struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
3930 	u32 id;
3931 
3932 	pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
3933 	if (id == QM_PCI_COMMAND_INVALID) {
3934 		pci_err(pdev, "Device can not be used!\n");
3935 		return false;
3936 	}
3937 
3938 	return true;
3939 }
3940 
3941 void hisi_qm_reset_done(struct pci_dev *pdev)
3942 {
3943 	struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3944 	struct hisi_qm *qm = pci_get_drvdata(pdev);
3945 	int ret;
3946 
3947 	hisi_qm_dev_err_init(pf_qm);
3948 
3949 	ret = qm_restart(qm);
3950 	if (ret) {
3951 		pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
3952 		goto flr_done;
3953 	}
3954 
3955 	if (qm->fun_type == QM_HW_PF) {
3956 		ret = qm_dev_hw_init(qm);
3957 		if (ret) {
3958 			pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
3959 			goto flr_done;
3960 		}
3961 
3962 		if (!qm->vfs_num)
3963 			goto flr_done;
3964 
3965 		ret = qm_vf_q_assign(qm, qm->vfs_num);
3966 		if (ret) {
3967 			pci_err(pdev, "Failed to assign VFs, ret = %d.\n", ret);
3968 			goto flr_done;
3969 		}
3970 
3971 		ret = qm_vf_reset_done(qm);
3972 		if (ret) {
3973 			pci_err(pdev, "Failed to start VFs, ret = %d.\n", ret);
3974 			goto flr_done;
3975 		}
3976 	}
3977 
3978 flr_done:
3979 	if (qm_flr_reset_complete(pdev))
3980 		pci_info(pdev, "FLR reset complete\n");
3981 
3982 	clear_bit(QM_RESETTING, &qm->misc_ctl);
3983 }
3984 EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
3985 
3986 static irqreturn_t qm_abnormal_irq(int irq, void *data)
3987 {
3988 	struct hisi_qm *qm = data;
3989 	enum acc_err_result ret;
3990 
3991 	atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
3992 	ret = qm_process_dev_error(qm);
3993 	if (ret == ACC_ERR_NEED_RESET &&
3994 	    !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
3995 	    !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
3996 		schedule_work(&qm->rst_work);
3997 
3998 	return IRQ_HANDLED;
3999 }
4000 
4001 static int qm_irq_register(struct hisi_qm *qm)
4002 {
4003 	struct pci_dev *pdev = qm->pdev;
4004 	int ret;
4005 
4006 	ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
4007 			  qm_irq, 0, qm->dev_name, qm);
4008 	if (ret)
4009 		return ret;
4010 
4011 	if (qm->ver != QM_HW_V1) {
4012 		ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
4013 				  qm_aeq_irq, 0, qm->dev_name, qm);
4014 		if (ret)
4015 			goto err_aeq_irq;
4016 
4017 		if (qm->fun_type == QM_HW_PF) {
4018 			ret = request_irq(pci_irq_vector(pdev,
4019 					  QM_ABNORMAL_EVENT_IRQ_VECTOR),
4020 					  qm_abnormal_irq, 0, qm->dev_name, qm);
4021 			if (ret)
4022 				goto err_abonormal_irq;
4023 		}
4024 	}
4025 
4026 	return 0;
4027 
4028 err_abonormal_irq:
4029 	free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
4030 err_aeq_irq:
4031 	free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
4032 	return ret;
4033 }
4034 
4035 /**
4036  * hisi_qm_dev_shutdown() - Shutdown device.
4037  * @pdev: The device will be shutdown.
4038  *
4039  * This function will stop qm when OS shutdown or rebooting.
4040  */
4041 void hisi_qm_dev_shutdown(struct pci_dev *pdev)
4042 {
4043 	struct hisi_qm *qm = pci_get_drvdata(pdev);
4044 	int ret;
4045 
4046 	ret = hisi_qm_stop(qm, QM_NORMAL);
4047 	if (ret)
4048 		dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
4049 }
4050 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
4051 
4052 static void hisi_qm_controller_reset(struct work_struct *rst_work)
4053 {
4054 	struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
4055 	int ret;
4056 
4057 	/* reset pcie device controller */
4058 	ret = qm_controller_reset(qm);
4059 	if (ret)
4060 		dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
4061 
4062 }
4063 
4064 /**
4065  * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
4066  * @qm: The qm needs add.
4067  * @qm_list: The qm list.
4068  *
4069  * This function adds qm to qm list, and will register algorithm to
4070  * crypto when the qm list is empty.
4071  */
4072 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
4073 {
4074 	int flag = 0;
4075 	int ret = 0;
4076 	/* HW V2 not support both use uacce sva mode and hardware crypto algs */
4077 	if (qm->ver <= QM_HW_V2 && qm->use_sva)
4078 		return 0;
4079 
4080 	mutex_lock(&qm_list->lock);
4081 	if (list_empty(&qm_list->list))
4082 		flag = 1;
4083 	list_add_tail(&qm->list, &qm_list->list);
4084 	mutex_unlock(&qm_list->lock);
4085 
4086 	if (flag) {
4087 		ret = qm_list->register_to_crypto();
4088 		if (ret) {
4089 			mutex_lock(&qm_list->lock);
4090 			list_del(&qm->list);
4091 			mutex_unlock(&qm_list->lock);
4092 		}
4093 	}
4094 
4095 	return ret;
4096 }
4097 EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
4098 
4099 /**
4100  * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
4101  * qm list.
4102  * @qm: The qm needs delete.
4103  * @qm_list: The qm list.
4104  *
4105  * This function deletes qm from qm list, and will unregister algorithm
4106  * from crypto when the qm list is empty.
4107  */
4108 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
4109 {
4110 	if (qm->ver <= QM_HW_V2 && qm->use_sva)
4111 		return;
4112 
4113 	mutex_lock(&qm_list->lock);
4114 	list_del(&qm->list);
4115 	mutex_unlock(&qm_list->lock);
4116 
4117 	if (list_empty(&qm_list->list))
4118 		qm_list->unregister_from_crypto();
4119 }
4120 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
4121 
4122 static int hisi_qm_pci_init(struct hisi_qm *qm)
4123 {
4124 	struct pci_dev *pdev = qm->pdev;
4125 	struct device *dev = &pdev->dev;
4126 	unsigned int num_vec;
4127 	int ret;
4128 
4129 	ret = pci_enable_device_mem(pdev);
4130 	if (ret < 0) {
4131 		dev_err(dev, "Failed to enable device mem!\n");
4132 		return ret;
4133 	}
4134 
4135 	ret = pci_request_mem_regions(pdev, qm->dev_name);
4136 	if (ret < 0) {
4137 		dev_err(dev, "Failed to request mem regions!\n");
4138 		goto err_disable_pcidev;
4139 	}
4140 
4141 	qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
4142 	qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
4143 	qm->io_base = ioremap(qm->phys_base, qm->phys_size);
4144 	if (!qm->io_base) {
4145 		ret = -EIO;
4146 		goto err_release_mem_regions;
4147 	}
4148 
4149 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4150 	if (ret < 0)
4151 		goto err_iounmap;
4152 	pci_set_master(pdev);
4153 
4154 	if (!qm->ops->get_irq_num) {
4155 		ret = -EOPNOTSUPP;
4156 		goto err_iounmap;
4157 	}
4158 	num_vec = qm->ops->get_irq_num(qm);
4159 	ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
4160 	if (ret < 0) {
4161 		dev_err(dev, "Failed to enable MSI vectors!\n");
4162 		goto err_iounmap;
4163 	}
4164 
4165 	return 0;
4166 
4167 err_iounmap:
4168 	iounmap(qm->io_base);
4169 err_release_mem_regions:
4170 	pci_release_mem_regions(pdev);
4171 err_disable_pcidev:
4172 	pci_disable_device(pdev);
4173 	return ret;
4174 }
4175 
4176 /**
4177  * hisi_qm_init() - Initialize configures about qm.
4178  * @qm: The qm needing init.
4179  *
4180  * This function init qm, then we can call hisi_qm_start to put qm into work.
4181  */
4182 int hisi_qm_init(struct hisi_qm *qm)
4183 {
4184 	struct pci_dev *pdev = qm->pdev;
4185 	struct device *dev = &pdev->dev;
4186 	int ret;
4187 
4188 	hisi_qm_pre_init(qm);
4189 
4190 	ret = qm_alloc_uacce(qm);
4191 	if (ret < 0)
4192 		dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
4193 
4194 	ret = hisi_qm_pci_init(qm);
4195 	if (ret)
4196 		goto err_remove_uacce;
4197 
4198 	ret = qm_irq_register(qm);
4199 	if (ret)
4200 		goto err_pci_uninit;
4201 
4202 	if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
4203 		/* v2 starts to support get vft by mailbox */
4204 		ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
4205 		if (ret)
4206 			goto err_irq_unregister;
4207 	}
4208 
4209 	ret = hisi_qm_memory_init(qm);
4210 	if (ret)
4211 		goto err_irq_unregister;
4212 
4213 	INIT_WORK(&qm->work, qm_work_process);
4214 	if (qm->fun_type == QM_HW_PF)
4215 		INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
4216 
4217 	atomic_set(&qm->status.flags, QM_INIT);
4218 
4219 	return 0;
4220 
4221 err_irq_unregister:
4222 	qm_irq_unregister(qm);
4223 err_pci_uninit:
4224 	hisi_qm_pci_uninit(qm);
4225 err_remove_uacce:
4226 	uacce_remove(qm->uacce);
4227 	qm->uacce = NULL;
4228 	return ret;
4229 }
4230 EXPORT_SYMBOL_GPL(hisi_qm_init);
4231 
4232 MODULE_LICENSE("GPL v2");
4233 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
4234 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
4235