1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) STMicroelectronics SA 2015
4  * Authors: Yannick Fertre <yannick.fertre@st.com>
5  *          Hugues Fruchet <hugues.fruchet@st.com>
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/interrupt.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_runtime.h>
12 #ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
13 #include <linux/seq_file.h>
14 #endif
15 
16 #include "hva.h"
17 #include "hva-hw.h"
18 
19 /* HVA register offsets */
20 #define HVA_HIF_REG_RST                 0x0100U
21 #define HVA_HIF_REG_RST_ACK             0x0104U
22 #define HVA_HIF_REG_MIF_CFG             0x0108U
23 #define HVA_HIF_REG_HEC_MIF_CFG         0x010CU
24 #define HVA_HIF_REG_CFL                 0x0110U
25 #define HVA_HIF_FIFO_CMD                0x0114U
26 #define HVA_HIF_FIFO_STS                0x0118U
27 #define HVA_HIF_REG_SFL                 0x011CU
28 #define HVA_HIF_REG_IT_ACK              0x0120U
29 #define HVA_HIF_REG_ERR_IT_ACK          0x0124U
30 #define HVA_HIF_REG_LMI_ERR             0x0128U
31 #define HVA_HIF_REG_EMI_ERR             0x012CU
32 #define HVA_HIF_REG_HEC_MIF_ERR         0x0130U
33 #define HVA_HIF_REG_HEC_STS             0x0134U
34 #define HVA_HIF_REG_HVC_STS             0x0138U
35 #define HVA_HIF_REG_HJE_STS             0x013CU
36 #define HVA_HIF_REG_CNT                 0x0140U
37 #define HVA_HIF_REG_HEC_CHKSYN_DIS      0x0144U
38 #define HVA_HIF_REG_CLK_GATING          0x0148U
39 #define HVA_HIF_REG_VERSION             0x014CU
40 #define HVA_HIF_REG_BSM                 0x0150U
41 
42 /* define value for version id register (HVA_HIF_REG_VERSION) */
43 #define VERSION_ID_MASK	0x0000FFFF
44 
45 /* define values for BSM register (HVA_HIF_REG_BSM) */
46 #define BSM_CFG_VAL1	0x0003F000
47 #define BSM_CFG_VAL2	0x003F0000
48 
49 /* define values for memory interface register (HVA_HIF_REG_MIF_CFG) */
50 #define MIF_CFG_VAL1	0x04460446
51 #define MIF_CFG_VAL2	0x04460806
52 #define MIF_CFG_VAL3	0x00000000
53 
54 /* define value for HEC memory interface register (HVA_HIF_REG_MIF_CFG) */
55 #define HEC_MIF_CFG_VAL	0x000000C4
56 
57 /*  Bits definition for clock gating register (HVA_HIF_REG_CLK_GATING) */
58 #define CLK_GATING_HVC	BIT(0)
59 #define CLK_GATING_HEC	BIT(1)
60 #define CLK_GATING_HJE	BIT(2)
61 
62 /* fix hva clock rate */
63 #define CLK_RATE		300000000
64 
65 /* fix delay for pmruntime */
66 #define AUTOSUSPEND_DELAY_MS	3
67 
68 /*
69  * hw encode error values
70  * NO_ERROR: Success, Task OK
71  * H264_BITSTREAM_OVERSIZE: VECH264 Bitstream size > bitstream buffer
72  * H264_FRAME_SKIPPED: VECH264 Frame skipped (refers to CPB Buffer Size)
73  * H264_SLICE_LIMIT_SIZE: VECH264 MB > slice limit size
74  * H264_MAX_SLICE_NUMBER: VECH264 max slice number reached
75  * H264_SLICE_READY: VECH264 Slice ready
76  * TASK_LIST_FULL: HVA/FPC task list full
77 		   (discard latest transform command)
78  * UNKNOWN_COMMAND: Transform command not known by HVA/FPC
79  * WRONG_CODEC_OR_RESOLUTION: Wrong Codec or Resolution Selection
80  * NO_INT_COMPLETION: Time-out on interrupt completion
81  * LMI_ERR: Local Memory Interface Error
82  * EMI_ERR: External Memory Interface Error
83  * HECMI_ERR: HEC Memory Interface Error
84  */
85 enum hva_hw_error {
86 	NO_ERROR = 0x0,
87 	H264_BITSTREAM_OVERSIZE = 0x2,
88 	H264_FRAME_SKIPPED = 0x4,
89 	H264_SLICE_LIMIT_SIZE = 0x5,
90 	H264_MAX_SLICE_NUMBER = 0x7,
91 	H264_SLICE_READY = 0x8,
92 	TASK_LIST_FULL = 0xF0,
93 	UNKNOWN_COMMAND = 0xF1,
94 	WRONG_CODEC_OR_RESOLUTION = 0xF4,
95 	NO_INT_COMPLETION = 0x100,
96 	LMI_ERR = 0x101,
97 	EMI_ERR = 0x102,
98 	HECMI_ERR = 0x103,
99 };
100 
hva_hw_its_interrupt(int irq,void * data)101 static irqreturn_t hva_hw_its_interrupt(int irq, void *data)
102 {
103 	struct hva_dev *hva = data;
104 
105 	/* read status registers */
106 	hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
107 	hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
108 
109 	/* acknowledge interruption */
110 	writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
111 
112 	return IRQ_WAKE_THREAD;
113 }
114 
hva_hw_its_irq_thread(int irq,void * arg)115 static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
116 {
117 	struct hva_dev *hva = arg;
118 	struct device *dev = hva_to_dev(hva);
119 	u32 status = hva->sts_reg & 0xFF;
120 	u8 ctx_id = 0;
121 	struct hva_ctx *ctx = NULL;
122 
123 	dev_dbg(dev, "%s     %s: status: 0x%02x fifo level: 0x%02x\n",
124 		HVA_PREFIX, __func__, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
125 
126 	/*
127 	 * status: task_id[31:16] client_id[15:8] status[7:0]
128 	 * the context identifier is retrieved from the client identifier
129 	 */
130 	ctx_id = (hva->sts_reg & 0xFF00) >> 8;
131 	if (ctx_id >= HVA_MAX_INSTANCES) {
132 		dev_err(dev, "%s     %s: bad context identifier: %d\n",
133 			HVA_PREFIX, __func__, ctx_id);
134 		goto out;
135 	}
136 
137 	ctx = hva->instances[ctx_id];
138 	if (!ctx)
139 		goto out;
140 
141 	switch (status) {
142 	case NO_ERROR:
143 		dev_dbg(dev, "%s     %s: no error\n",
144 			ctx->name, __func__);
145 		ctx->hw_err = false;
146 		break;
147 	case H264_SLICE_READY:
148 		dev_dbg(dev, "%s     %s: h264 slice ready\n",
149 			ctx->name, __func__);
150 		ctx->hw_err = false;
151 		break;
152 	case H264_FRAME_SKIPPED:
153 		dev_dbg(dev, "%s     %s: h264 frame skipped\n",
154 			ctx->name, __func__);
155 		ctx->hw_err = false;
156 		break;
157 	case H264_BITSTREAM_OVERSIZE:
158 		dev_err(dev, "%s     %s:h264 bitstream oversize\n",
159 			ctx->name, __func__);
160 		ctx->hw_err = true;
161 		break;
162 	case H264_SLICE_LIMIT_SIZE:
163 		dev_err(dev, "%s     %s: h264 slice limit size is reached\n",
164 			ctx->name, __func__);
165 		ctx->hw_err = true;
166 		break;
167 	case H264_MAX_SLICE_NUMBER:
168 		dev_err(dev, "%s     %s: h264 max slice number is reached\n",
169 			ctx->name, __func__);
170 		ctx->hw_err = true;
171 		break;
172 	case TASK_LIST_FULL:
173 		dev_err(dev, "%s     %s:task list full\n",
174 			ctx->name, __func__);
175 		ctx->hw_err = true;
176 		break;
177 	case UNKNOWN_COMMAND:
178 		dev_err(dev, "%s     %s: command not known\n",
179 			ctx->name, __func__);
180 		ctx->hw_err = true;
181 		break;
182 	case WRONG_CODEC_OR_RESOLUTION:
183 		dev_err(dev, "%s     %s: wrong codec or resolution\n",
184 			ctx->name, __func__);
185 		ctx->hw_err = true;
186 		break;
187 	default:
188 		dev_err(dev, "%s     %s: status not recognized\n",
189 			ctx->name, __func__);
190 		ctx->hw_err = true;
191 		break;
192 	}
193 out:
194 	complete(&hva->interrupt);
195 
196 	return IRQ_HANDLED;
197 }
198 
hva_hw_err_interrupt(int irq,void * data)199 static irqreturn_t hva_hw_err_interrupt(int irq, void *data)
200 {
201 	struct hva_dev *hva = data;
202 
203 	/* read status registers */
204 	hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
205 	hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
206 
207 	/* read error registers */
208 	hva->lmi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_LMI_ERR);
209 	hva->emi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_EMI_ERR);
210 	hva->hec_mif_err_reg = readl_relaxed(hva->regs +
211 					     HVA_HIF_REG_HEC_MIF_ERR);
212 
213 	/* acknowledge interruption */
214 	writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
215 
216 	return IRQ_WAKE_THREAD;
217 }
218 
hva_hw_err_irq_thread(int irq,void * arg)219 static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
220 {
221 	struct hva_dev *hva = arg;
222 	struct device *dev = hva_to_dev(hva);
223 	u8 ctx_id = 0;
224 	struct hva_ctx *ctx;
225 
226 	dev_dbg(dev, "%s     status: 0x%02x fifo level: 0x%02x\n",
227 		HVA_PREFIX, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
228 
229 	/*
230 	 * status: task_id[31:16] client_id[15:8] status[7:0]
231 	 * the context identifier is retrieved from the client identifier
232 	 */
233 	ctx_id = (hva->sts_reg & 0xFF00) >> 8;
234 	if (ctx_id >= HVA_MAX_INSTANCES) {
235 		dev_err(dev, "%s     bad context identifier: %d\n", HVA_PREFIX,
236 			ctx_id);
237 		goto out;
238 	}
239 
240 	ctx = hva->instances[ctx_id];
241 	if (!ctx)
242 		goto out;
243 
244 	if (hva->lmi_err_reg) {
245 		dev_err(dev, "%s     local memory interface error: 0x%08x\n",
246 			ctx->name, hva->lmi_err_reg);
247 		ctx->hw_err = true;
248 	}
249 
250 	if (hva->emi_err_reg) {
251 		dev_err(dev, "%s     external memory interface error: 0x%08x\n",
252 			ctx->name, hva->emi_err_reg);
253 		ctx->hw_err = true;
254 	}
255 
256 	if (hva->hec_mif_err_reg) {
257 		dev_err(dev, "%s     hec memory interface error: 0x%08x\n",
258 			ctx->name, hva->hec_mif_err_reg);
259 		ctx->hw_err = true;
260 	}
261 out:
262 	complete(&hva->interrupt);
263 
264 	return IRQ_HANDLED;
265 }
266 
hva_hw_get_ip_version(struct hva_dev * hva)267 static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
268 {
269 	struct device *dev = hva_to_dev(hva);
270 	unsigned long int version;
271 
272 	if (pm_runtime_resume_and_get(dev) < 0) {
273 		dev_err(dev, "%s     failed to get pm_runtime\n", HVA_PREFIX);
274 		mutex_unlock(&hva->protect_mutex);
275 		return -EFAULT;
276 	}
277 
278 	version = readl_relaxed(hva->regs + HVA_HIF_REG_VERSION) &
279 				VERSION_ID_MASK;
280 
281 	pm_runtime_put_autosuspend(dev);
282 
283 	switch (version) {
284 	case HVA_VERSION_V400:
285 		dev_dbg(dev, "%s     IP hardware version 0x%lx\n",
286 			HVA_PREFIX, version);
287 		break;
288 	default:
289 		dev_err(dev, "%s     unknown IP hardware version 0x%lx\n",
290 			HVA_PREFIX, version);
291 		version = HVA_VERSION_UNKNOWN;
292 		break;
293 	}
294 
295 	return version;
296 }
297 
hva_hw_probe(struct platform_device * pdev,struct hva_dev * hva)298 int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
299 {
300 	struct device *dev = &pdev->dev;
301 	struct resource *esram;
302 	int ret;
303 
304 	WARN_ON(!hva);
305 
306 	/* get memory for registers */
307 	hva->regs = devm_platform_ioremap_resource(pdev, 0);
308 	if (IS_ERR(hva->regs)) {
309 		dev_err(dev, "%s     failed to get regs\n", HVA_PREFIX);
310 		return PTR_ERR(hva->regs);
311 	}
312 
313 	/* get memory for esram */
314 	esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
315 	if (!esram) {
316 		dev_err(dev, "%s     failed to get esram\n", HVA_PREFIX);
317 		return -ENODEV;
318 	}
319 	hva->esram_addr = esram->start;
320 	hva->esram_size = resource_size(esram);
321 
322 	dev_info(dev, "%s     esram reserved for address: 0x%x size:%d\n",
323 		 HVA_PREFIX, hva->esram_addr, hva->esram_size);
324 
325 	/* get clock resource */
326 	hva->clk = devm_clk_get(dev, "clk_hva");
327 	if (IS_ERR(hva->clk)) {
328 		dev_err(dev, "%s     failed to get clock\n", HVA_PREFIX);
329 		return PTR_ERR(hva->clk);
330 	}
331 
332 	ret = clk_prepare(hva->clk);
333 	if (ret < 0) {
334 		dev_err(dev, "%s     failed to prepare clock\n", HVA_PREFIX);
335 		hva->clk = ERR_PTR(-EINVAL);
336 		return ret;
337 	}
338 
339 	/* get status interruption resource */
340 	ret  = platform_get_irq(pdev, 0);
341 	if (ret < 0)
342 		goto err_clk;
343 	hva->irq_its = ret;
344 
345 	ret = devm_request_threaded_irq(dev, hva->irq_its, hva_hw_its_interrupt,
346 					hva_hw_its_irq_thread,
347 					IRQF_ONESHOT,
348 					"hva_its_irq", hva);
349 	if (ret) {
350 		dev_err(dev, "%s     failed to install status IRQ 0x%x\n",
351 			HVA_PREFIX, hva->irq_its);
352 		goto err_clk;
353 	}
354 	disable_irq(hva->irq_its);
355 
356 	/* get error interruption resource */
357 	ret = platform_get_irq(pdev, 1);
358 	if (ret < 0)
359 		goto err_clk;
360 	hva->irq_err = ret;
361 
362 	ret = devm_request_threaded_irq(dev, hva->irq_err, hva_hw_err_interrupt,
363 					hva_hw_err_irq_thread,
364 					IRQF_ONESHOT,
365 					"hva_err_irq", hva);
366 	if (ret) {
367 		dev_err(dev, "%s     failed to install error IRQ 0x%x\n",
368 			HVA_PREFIX, hva->irq_err);
369 		goto err_clk;
370 	}
371 	disable_irq(hva->irq_err);
372 
373 	/* initialise protection mutex */
374 	mutex_init(&hva->protect_mutex);
375 
376 	/* initialise completion signal */
377 	init_completion(&hva->interrupt);
378 
379 	/* initialise runtime power management */
380 	pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY_MS);
381 	pm_runtime_use_autosuspend(dev);
382 	pm_runtime_set_suspended(dev);
383 	pm_runtime_enable(dev);
384 
385 	ret = pm_runtime_resume_and_get(dev);
386 	if (ret < 0) {
387 		dev_err(dev, "%s     failed to set PM\n", HVA_PREFIX);
388 		goto err_disable;
389 	}
390 
391 	/* check IP hardware version */
392 	hva->ip_version = hva_hw_get_ip_version(hva);
393 
394 	if (hva->ip_version == HVA_VERSION_UNKNOWN) {
395 		ret = -EINVAL;
396 		goto err_pm;
397 	}
398 
399 	dev_info(dev, "%s     found hva device (version 0x%lx)\n", HVA_PREFIX,
400 		 hva->ip_version);
401 
402 	return 0;
403 
404 err_pm:
405 	pm_runtime_put(dev);
406 err_disable:
407 	pm_runtime_disable(dev);
408 err_clk:
409 	if (hva->clk)
410 		clk_unprepare(hva->clk);
411 
412 	return ret;
413 }
414 
hva_hw_remove(struct hva_dev * hva)415 void hva_hw_remove(struct hva_dev *hva)
416 {
417 	struct device *dev = hva_to_dev(hva);
418 
419 	disable_irq(hva->irq_its);
420 	disable_irq(hva->irq_err);
421 
422 	pm_runtime_put_autosuspend(dev);
423 	pm_runtime_disable(dev);
424 }
425 
hva_hw_runtime_suspend(struct device * dev)426 int hva_hw_runtime_suspend(struct device *dev)
427 {
428 	struct hva_dev *hva = dev_get_drvdata(dev);
429 
430 	clk_disable_unprepare(hva->clk);
431 
432 	return 0;
433 }
434 
hva_hw_runtime_resume(struct device * dev)435 int hva_hw_runtime_resume(struct device *dev)
436 {
437 	struct hva_dev *hva = dev_get_drvdata(dev);
438 
439 	if (clk_prepare_enable(hva->clk)) {
440 		dev_err(hva->dev, "%s     failed to prepare hva clk\n",
441 			HVA_PREFIX);
442 		return -EINVAL;
443 	}
444 
445 	if (clk_set_rate(hva->clk, CLK_RATE)) {
446 		dev_err(dev, "%s     failed to set clock frequency\n",
447 			HVA_PREFIX);
448 		clk_disable_unprepare(hva->clk);
449 		return -EINVAL;
450 	}
451 
452 	return 0;
453 }
454 
hva_hw_execute_task(struct hva_ctx * ctx,enum hva_hw_cmd_type cmd,struct hva_buffer * task)455 int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
456 			struct hva_buffer *task)
457 {
458 	struct hva_dev *hva = ctx_to_hdev(ctx);
459 	struct device *dev = hva_to_dev(hva);
460 	u8 client_id = ctx->id;
461 	int ret;
462 	u32 reg = 0;
463 	bool got_pm = false;
464 
465 	mutex_lock(&hva->protect_mutex);
466 
467 	/* enable irqs */
468 	enable_irq(hva->irq_its);
469 	enable_irq(hva->irq_err);
470 
471 	if (pm_runtime_resume_and_get(dev) < 0) {
472 		dev_err(dev, "%s     failed to get pm_runtime\n", ctx->name);
473 		ctx->sys_errors++;
474 		ret = -EFAULT;
475 		goto out;
476 	}
477 	got_pm = true;
478 
479 	reg = readl_relaxed(hva->regs + HVA_HIF_REG_CLK_GATING);
480 	switch (cmd) {
481 	case H264_ENC:
482 		reg |= CLK_GATING_HVC;
483 		break;
484 	default:
485 		dev_dbg(dev, "%s     unknown command 0x%x\n", ctx->name, cmd);
486 		ctx->encode_errors++;
487 		ret = -EFAULT;
488 		goto out;
489 	}
490 	writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
491 
492 	dev_dbg(dev, "%s     %s: write configuration registers\n", ctx->name,
493 		__func__);
494 
495 	/* byte swap config */
496 	writel_relaxed(BSM_CFG_VAL1, hva->regs + HVA_HIF_REG_BSM);
497 
498 	/* define Max Opcode Size and Max Message Size for LMI and EMI */
499 	writel_relaxed(MIF_CFG_VAL3, hva->regs + HVA_HIF_REG_MIF_CFG);
500 	writel_relaxed(HEC_MIF_CFG_VAL, hva->regs + HVA_HIF_REG_HEC_MIF_CFG);
501 
502 	/*
503 	 * command FIFO: task_id[31:16] client_id[15:8] command_type[7:0]
504 	 * the context identifier is provided as client identifier to the
505 	 * hardware, and is retrieved in the interrupt functions from the
506 	 * status register
507 	 */
508 	dev_dbg(dev, "%s     %s: send task (cmd: %d, task_desc: %pad)\n",
509 		ctx->name, __func__, cmd + (client_id << 8), &task->paddr);
510 	writel_relaxed(cmd + (client_id << 8), hva->regs + HVA_HIF_FIFO_CMD);
511 	writel_relaxed(task->paddr, hva->regs + HVA_HIF_FIFO_CMD);
512 
513 	if (!wait_for_completion_timeout(&hva->interrupt,
514 					 msecs_to_jiffies(2000))) {
515 		dev_err(dev, "%s     %s: time out on completion\n", ctx->name,
516 			__func__);
517 		ctx->encode_errors++;
518 		ret = -EFAULT;
519 		goto out;
520 	}
521 
522 	/* get encoding status */
523 	ret = ctx->hw_err ? -EFAULT : 0;
524 
525 	ctx->encode_errors += ctx->hw_err ? 1 : 0;
526 
527 out:
528 	disable_irq(hva->irq_its);
529 	disable_irq(hva->irq_err);
530 
531 	switch (cmd) {
532 	case H264_ENC:
533 		reg &= ~CLK_GATING_HVC;
534 		writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
535 		break;
536 	default:
537 		dev_dbg(dev, "%s     unknown command 0x%x\n", ctx->name, cmd);
538 	}
539 
540 	if (got_pm)
541 		pm_runtime_put_autosuspend(dev);
542 	mutex_unlock(&hva->protect_mutex);
543 
544 	return ret;
545 }
546 
547 #ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
548 #define DUMP(reg) seq_printf(s, "%-30s: 0x%08X\n",\
549 			     #reg, readl_relaxed(hva->regs + reg))
550 
hva_hw_dump_regs(struct hva_dev * hva,struct seq_file * s)551 void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
552 {
553 	struct device *dev = hva_to_dev(hva);
554 
555 	mutex_lock(&hva->protect_mutex);
556 
557 	if (pm_runtime_resume_and_get(dev) < 0) {
558 		seq_puts(s, "Cannot wake up IP\n");
559 		mutex_unlock(&hva->protect_mutex);
560 		return;
561 	}
562 
563 	seq_printf(s, "Registers:\nReg @ = 0x%p\n", hva->regs);
564 
565 	DUMP(HVA_HIF_REG_RST);
566 	DUMP(HVA_HIF_REG_RST_ACK);
567 	DUMP(HVA_HIF_REG_MIF_CFG);
568 	DUMP(HVA_HIF_REG_HEC_MIF_CFG);
569 	DUMP(HVA_HIF_REG_CFL);
570 	DUMP(HVA_HIF_REG_SFL);
571 	DUMP(HVA_HIF_REG_LMI_ERR);
572 	DUMP(HVA_HIF_REG_EMI_ERR);
573 	DUMP(HVA_HIF_REG_HEC_MIF_ERR);
574 	DUMP(HVA_HIF_REG_HEC_STS);
575 	DUMP(HVA_HIF_REG_HVC_STS);
576 	DUMP(HVA_HIF_REG_HJE_STS);
577 	DUMP(HVA_HIF_REG_CNT);
578 	DUMP(HVA_HIF_REG_HEC_CHKSYN_DIS);
579 	DUMP(HVA_HIF_REG_CLK_GATING);
580 	DUMP(HVA_HIF_REG_VERSION);
581 
582 	pm_runtime_put_autosuspend(dev);
583 	mutex_unlock(&hva->protect_mutex);
584 }
585 #endif
586