xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c (revision 3c0ff9f1)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27 
28 #include <linux/firmware.h>
29 #include <drm/drmP.h>
30 #include "amdgpu.h"
31 #include "amdgpu_vce.h"
32 #include "vid.h"
33 #include "vce/vce_3_0_d.h"
34 #include "vce/vce_3_0_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
37 #include "gca/gfx_8_0_d.h"
38 #include "smu/smu_7_1_2_d.h"
39 #include "smu/smu_7_1_2_sh_mask.h"
40 
41 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT	0x04
42 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK	0x10
43 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 	0x8616
44 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 	0x8617
45 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 	0x8618
46 
47 #define VCE_V3_0_FW_SIZE	(384 * 1024)
48 #define VCE_V3_0_STACK_SIZE	(64 * 1024)
49 #define VCE_V3_0_DATA_SIZE	((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
50 
51 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
52 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
53 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
54 
55 /**
56  * vce_v3_0_ring_get_rptr - get read pointer
57  *
58  * @ring: amdgpu_ring pointer
59  *
60  * Returns the current hardware read pointer
61  */
62 static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
63 {
64 	struct amdgpu_device *adev = ring->adev;
65 
66 	if (ring == &adev->vce.ring[0])
67 		return RREG32(mmVCE_RB_RPTR);
68 	else
69 		return RREG32(mmVCE_RB_RPTR2);
70 }
71 
72 /**
73  * vce_v3_0_ring_get_wptr - get write pointer
74  *
75  * @ring: amdgpu_ring pointer
76  *
77  * Returns the current hardware write pointer
78  */
79 static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
80 {
81 	struct amdgpu_device *adev = ring->adev;
82 
83 	if (ring == &adev->vce.ring[0])
84 		return RREG32(mmVCE_RB_WPTR);
85 	else
86 		return RREG32(mmVCE_RB_WPTR2);
87 }
88 
89 /**
90  * vce_v3_0_ring_set_wptr - set write pointer
91  *
92  * @ring: amdgpu_ring pointer
93  *
94  * Commits the write pointer to the hardware
95  */
96 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
97 {
98 	struct amdgpu_device *adev = ring->adev;
99 
100 	if (ring == &adev->vce.ring[0])
101 		WREG32(mmVCE_RB_WPTR, ring->wptr);
102 	else
103 		WREG32(mmVCE_RB_WPTR2, ring->wptr);
104 }
105 
106 /**
107  * vce_v3_0_start - start VCE block
108  *
109  * @adev: amdgpu_device pointer
110  *
111  * Setup and start the VCE block
112  */
113 static int vce_v3_0_start(struct amdgpu_device *adev)
114 {
115 	struct amdgpu_ring *ring;
116 	int idx, i, j, r;
117 
118 	mutex_lock(&adev->grbm_idx_mutex);
119 	for (idx = 0; idx < 2; ++idx) {
120 
121 		if (adev->vce.harvest_config & (1 << idx))
122 			continue;
123 
124 		if(idx == 0)
125 			WREG32_P(mmGRBM_GFX_INDEX, 0,
126 				~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
127 		else
128 			WREG32_P(mmGRBM_GFX_INDEX,
129 				GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
130 				~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
131 
132 		vce_v3_0_mc_resume(adev, idx);
133 
134 		/* set BUSY flag */
135 		WREG32_P(mmVCE_STATUS, 1, ~1);
136 		if (adev->asic_type >= CHIP_STONEY)
137 			WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
138 		else
139 			WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
140 				~VCE_VCPU_CNTL__CLK_EN_MASK);
141 
142 		WREG32_P(mmVCE_SOFT_RESET,
143 			 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
144 			 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
145 
146 		mdelay(100);
147 
148 		WREG32_P(mmVCE_SOFT_RESET, 0,
149 			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
150 
151 		for (i = 0; i < 10; ++i) {
152 			uint32_t status;
153 			for (j = 0; j < 100; ++j) {
154 				status = RREG32(mmVCE_STATUS);
155 				if (status & 2)
156 					break;
157 				mdelay(10);
158 			}
159 			r = 0;
160 			if (status & 2)
161 				break;
162 
163 			DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
164 			WREG32_P(mmVCE_SOFT_RESET,
165 				VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
166 				~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
167 			mdelay(10);
168 			WREG32_P(mmVCE_SOFT_RESET, 0,
169 				~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
170 			mdelay(10);
171 			r = -1;
172 		}
173 
174 		/* clear BUSY flag */
175 		WREG32_P(mmVCE_STATUS, 0, ~1);
176 
177 		if (r) {
178 			DRM_ERROR("VCE not responding, giving up!!!\n");
179 			mutex_unlock(&adev->grbm_idx_mutex);
180 			return r;
181 		}
182 	}
183 
184 	WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
185 	mutex_unlock(&adev->grbm_idx_mutex);
186 
187 	ring = &adev->vce.ring[0];
188 	WREG32(mmVCE_RB_RPTR, ring->wptr);
189 	WREG32(mmVCE_RB_WPTR, ring->wptr);
190 	WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
191 	WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
192 	WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
193 
194 	ring = &adev->vce.ring[1];
195 	WREG32(mmVCE_RB_RPTR2, ring->wptr);
196 	WREG32(mmVCE_RB_WPTR2, ring->wptr);
197 	WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
198 	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
199 	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
200 
201 	return 0;
202 }
203 
204 #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS     0xC0014074
205 #define VCE_HARVEST_FUSE_MACRO__SHIFT       27
206 #define VCE_HARVEST_FUSE_MACRO__MASK        0x18000000
207 
208 static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
209 {
210 	u32 tmp;
211 	unsigned ret;
212 
213 	/* Fiji, Stoney are single pipe */
214 	if ((adev->asic_type == CHIP_FIJI) ||
215 	    (adev->asic_type == CHIP_STONEY)){
216 		ret = AMDGPU_VCE_HARVEST_VCE1;
217 		return ret;
218 	}
219 
220 	/* Tonga and CZ are dual or single pipe */
221 	if (adev->flags & AMD_IS_APU)
222 		tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
223 		       VCE_HARVEST_FUSE_MACRO__MASK) >>
224 			VCE_HARVEST_FUSE_MACRO__SHIFT;
225 	else
226 		tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
227 		       CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
228 			CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
229 
230 	switch (tmp) {
231 	case 1:
232 		ret = AMDGPU_VCE_HARVEST_VCE0;
233 		break;
234 	case 2:
235 		ret = AMDGPU_VCE_HARVEST_VCE1;
236 		break;
237 	case 3:
238 		ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
239 		break;
240 	default:
241 		ret = 0;
242 	}
243 
244 	return ret;
245 }
246 
247 static int vce_v3_0_early_init(void *handle)
248 {
249 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
250 
251 	adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
252 
253 	if ((adev->vce.harvest_config &
254 	     (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
255 	    (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
256 		return -ENOENT;
257 
258 	vce_v3_0_set_ring_funcs(adev);
259 	vce_v3_0_set_irq_funcs(adev);
260 
261 	return 0;
262 }
263 
264 static int vce_v3_0_sw_init(void *handle)
265 {
266 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
267 	struct amdgpu_ring *ring;
268 	int r;
269 
270 	/* VCE */
271 	r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
272 	if (r)
273 		return r;
274 
275 	r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
276 		(VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
277 	if (r)
278 		return r;
279 
280 	r = amdgpu_vce_resume(adev);
281 	if (r)
282 		return r;
283 
284 	ring = &adev->vce.ring[0];
285 	sprintf(ring->name, "vce0");
286 	r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
287 			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
288 	if (r)
289 		return r;
290 
291 	ring = &adev->vce.ring[1];
292 	sprintf(ring->name, "vce1");
293 	r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
294 			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
295 	if (r)
296 		return r;
297 
298 	return r;
299 }
300 
301 static int vce_v3_0_sw_fini(void *handle)
302 {
303 	int r;
304 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
305 
306 	r = amdgpu_vce_suspend(adev);
307 	if (r)
308 		return r;
309 
310 	r = amdgpu_vce_sw_fini(adev);
311 	if (r)
312 		return r;
313 
314 	return r;
315 }
316 
317 static int vce_v3_0_hw_init(void *handle)
318 {
319 	struct amdgpu_ring *ring;
320 	int r;
321 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
322 
323 	r = vce_v3_0_start(adev);
324 	if (r)
325 		return r;
326 
327 	ring = &adev->vce.ring[0];
328 	ring->ready = true;
329 	r = amdgpu_ring_test_ring(ring);
330 	if (r) {
331 		ring->ready = false;
332 		return r;
333 	}
334 
335 	ring = &adev->vce.ring[1];
336 	ring->ready = true;
337 	r = amdgpu_ring_test_ring(ring);
338 	if (r) {
339 		ring->ready = false;
340 		return r;
341 	}
342 
343 	DRM_INFO("VCE initialized successfully.\n");
344 
345 	return 0;
346 }
347 
348 static int vce_v3_0_hw_fini(void *handle)
349 {
350 	return 0;
351 }
352 
353 static int vce_v3_0_suspend(void *handle)
354 {
355 	int r;
356 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
357 
358 	r = vce_v3_0_hw_fini(adev);
359 	if (r)
360 		return r;
361 
362 	r = amdgpu_vce_suspend(adev);
363 	if (r)
364 		return r;
365 
366 	return r;
367 }
368 
369 static int vce_v3_0_resume(void *handle)
370 {
371 	int r;
372 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
373 
374 	r = amdgpu_vce_resume(adev);
375 	if (r)
376 		return r;
377 
378 	r = vce_v3_0_hw_init(adev);
379 	if (r)
380 		return r;
381 
382 	return r;
383 }
384 
385 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
386 {
387 	uint32_t offset, size;
388 
389 	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
390 	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
391 	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
392 	WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
393 
394 	WREG32(mmVCE_LMI_CTRL, 0x00398000);
395 	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
396 	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
397 	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
398 	WREG32(mmVCE_LMI_VM_CTRL, 0);
399 	if (adev->asic_type >= CHIP_STONEY) {
400 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
401 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
402 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
403 	} else
404 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
405 	offset = AMDGPU_VCE_FIRMWARE_OFFSET;
406 	size = VCE_V3_0_FW_SIZE;
407 	WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
408 	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
409 
410 	if (idx == 0) {
411 		offset += size;
412 		size = VCE_V3_0_STACK_SIZE;
413 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
414 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
415 		offset += size;
416 		size = VCE_V3_0_DATA_SIZE;
417 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
418 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
419 	} else {
420 		offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
421 		size = VCE_V3_0_STACK_SIZE;
422 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
423 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
424 		offset += size;
425 		size = VCE_V3_0_DATA_SIZE;
426 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
427 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
428 	}
429 
430 	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
431 
432 	WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
433 		 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
434 }
435 
436 static bool vce_v3_0_is_idle(void *handle)
437 {
438 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
439 	u32 mask = 0;
440 	int idx;
441 
442 	for (idx = 0; idx < 2; ++idx) {
443 		if (adev->vce.harvest_config & (1 << idx))
444 			continue;
445 
446 		if (idx == 0)
447 			mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
448 		else
449 			mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
450 	}
451 
452 	return !(RREG32(mmSRBM_STATUS2) & mask);
453 }
454 
455 static int vce_v3_0_wait_for_idle(void *handle)
456 {
457 	unsigned i;
458 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
459 	u32 mask = 0;
460 	int idx;
461 
462 	for (idx = 0; idx < 2; ++idx) {
463 		if (adev->vce.harvest_config & (1 << idx))
464 			continue;
465 
466 		if (idx == 0)
467 			mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
468 		else
469 			mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
470 	}
471 
472 	for (i = 0; i < adev->usec_timeout; i++) {
473 		if (!(RREG32(mmSRBM_STATUS2) & mask))
474 			return 0;
475 	}
476 	return -ETIMEDOUT;
477 }
478 
479 static int vce_v3_0_soft_reset(void *handle)
480 {
481 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
482 	u32 mask = 0;
483 	int idx;
484 
485 	for (idx = 0; idx < 2; ++idx) {
486 		if (adev->vce.harvest_config & (1 << idx))
487 			continue;
488 
489 		if (idx == 0)
490 			mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
491 		else
492 			mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
493 	}
494 	WREG32_P(mmSRBM_SOFT_RESET, mask,
495 		 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
496 		   SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
497 	mdelay(5);
498 
499 	return vce_v3_0_start(adev);
500 }
501 
502 static void vce_v3_0_print_status(void *handle)
503 {
504 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
505 
506 	dev_info(adev->dev, "VCE 3.0 registers\n");
507 	dev_info(adev->dev, "  VCE_STATUS=0x%08X\n",
508 		 RREG32(mmVCE_STATUS));
509 	dev_info(adev->dev, "  VCE_VCPU_CNTL=0x%08X\n",
510 		 RREG32(mmVCE_VCPU_CNTL));
511 	dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
512 		 RREG32(mmVCE_VCPU_CACHE_OFFSET0));
513 	dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE0=0x%08X\n",
514 		 RREG32(mmVCE_VCPU_CACHE_SIZE0));
515 	dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
516 		 RREG32(mmVCE_VCPU_CACHE_OFFSET1));
517 	dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE1=0x%08X\n",
518 		 RREG32(mmVCE_VCPU_CACHE_SIZE1));
519 	dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
520 		 RREG32(mmVCE_VCPU_CACHE_OFFSET2));
521 	dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE2=0x%08X\n",
522 		 RREG32(mmVCE_VCPU_CACHE_SIZE2));
523 	dev_info(adev->dev, "  VCE_SOFT_RESET=0x%08X\n",
524 		 RREG32(mmVCE_SOFT_RESET));
525 	dev_info(adev->dev, "  VCE_RB_BASE_LO2=0x%08X\n",
526 		 RREG32(mmVCE_RB_BASE_LO2));
527 	dev_info(adev->dev, "  VCE_RB_BASE_HI2=0x%08X\n",
528 		 RREG32(mmVCE_RB_BASE_HI2));
529 	dev_info(adev->dev, "  VCE_RB_SIZE2=0x%08X\n",
530 		 RREG32(mmVCE_RB_SIZE2));
531 	dev_info(adev->dev, "  VCE_RB_RPTR2=0x%08X\n",
532 		 RREG32(mmVCE_RB_RPTR2));
533 	dev_info(adev->dev, "  VCE_RB_WPTR2=0x%08X\n",
534 		 RREG32(mmVCE_RB_WPTR2));
535 	dev_info(adev->dev, "  VCE_RB_BASE_LO=0x%08X\n",
536 		 RREG32(mmVCE_RB_BASE_LO));
537 	dev_info(adev->dev, "  VCE_RB_BASE_HI=0x%08X\n",
538 		 RREG32(mmVCE_RB_BASE_HI));
539 	dev_info(adev->dev, "  VCE_RB_SIZE=0x%08X\n",
540 		 RREG32(mmVCE_RB_SIZE));
541 	dev_info(adev->dev, "  VCE_RB_RPTR=0x%08X\n",
542 		 RREG32(mmVCE_RB_RPTR));
543 	dev_info(adev->dev, "  VCE_RB_WPTR=0x%08X\n",
544 		 RREG32(mmVCE_RB_WPTR));
545 	dev_info(adev->dev, "  VCE_CLOCK_GATING_A=0x%08X\n",
546 		 RREG32(mmVCE_CLOCK_GATING_A));
547 	dev_info(adev->dev, "  VCE_CLOCK_GATING_B=0x%08X\n",
548 		 RREG32(mmVCE_CLOCK_GATING_B));
549 	dev_info(adev->dev, "  VCE_UENC_CLOCK_GATING=0x%08X\n",
550 		 RREG32(mmVCE_UENC_CLOCK_GATING));
551 	dev_info(adev->dev, "  VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
552 		 RREG32(mmVCE_UENC_REG_CLOCK_GATING));
553 	dev_info(adev->dev, "  VCE_SYS_INT_EN=0x%08X\n",
554 		 RREG32(mmVCE_SYS_INT_EN));
555 	dev_info(adev->dev, "  VCE_LMI_CTRL2=0x%08X\n",
556 		 RREG32(mmVCE_LMI_CTRL2));
557 	dev_info(adev->dev, "  VCE_LMI_CTRL=0x%08X\n",
558 		 RREG32(mmVCE_LMI_CTRL));
559 	dev_info(adev->dev, "  VCE_LMI_VM_CTRL=0x%08X\n",
560 		 RREG32(mmVCE_LMI_VM_CTRL));
561 	dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL=0x%08X\n",
562 		 RREG32(mmVCE_LMI_SWAP_CNTL));
563 	dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL1=0x%08X\n",
564 		 RREG32(mmVCE_LMI_SWAP_CNTL1));
565 	dev_info(adev->dev, "  VCE_LMI_CACHE_CTRL=0x%08X\n",
566 		 RREG32(mmVCE_LMI_CACHE_CTRL));
567 }
568 
569 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
570 					struct amdgpu_irq_src *source,
571 					unsigned type,
572 					enum amdgpu_interrupt_state state)
573 {
574 	uint32_t val = 0;
575 
576 	if (state == AMDGPU_IRQ_STATE_ENABLE)
577 		val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
578 
579 	WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
580 	return 0;
581 }
582 
583 static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
584 				      struct amdgpu_irq_src *source,
585 				      struct amdgpu_iv_entry *entry)
586 {
587 	DRM_DEBUG("IH: VCE\n");
588 
589 	WREG32_P(mmVCE_SYS_INT_STATUS,
590 		VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
591 		~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
592 
593 	switch (entry->src_data) {
594 	case 0:
595 		amdgpu_fence_process(&adev->vce.ring[0]);
596 		break;
597 	case 1:
598 		amdgpu_fence_process(&adev->vce.ring[1]);
599 		break;
600 	default:
601 		DRM_ERROR("Unhandled interrupt: %d %d\n",
602 			  entry->src_id, entry->src_data);
603 		break;
604 	}
605 
606 	return 0;
607 }
608 
609 static int vce_v3_0_set_clockgating_state(void *handle,
610 					  enum amd_clockgating_state state)
611 {
612 	return 0;
613 }
614 
615 static int vce_v3_0_set_powergating_state(void *handle,
616 					  enum amd_powergating_state state)
617 {
618 	/* This doesn't actually powergate the VCE block.
619 	 * That's done in the dpm code via the SMC.  This
620 	 * just re-inits the block as necessary.  The actual
621 	 * gating still happens in the dpm code.  We should
622 	 * revisit this when there is a cleaner line between
623 	 * the smc and the hw blocks
624 	 */
625 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
626 
627 	if (state == AMD_PG_STATE_GATE)
628 		/* XXX do we need a vce_v3_0_stop()? */
629 		return 0;
630 	else
631 		return vce_v3_0_start(adev);
632 }
633 
634 const struct amd_ip_funcs vce_v3_0_ip_funcs = {
635 	.early_init = vce_v3_0_early_init,
636 	.late_init = NULL,
637 	.sw_init = vce_v3_0_sw_init,
638 	.sw_fini = vce_v3_0_sw_fini,
639 	.hw_init = vce_v3_0_hw_init,
640 	.hw_fini = vce_v3_0_hw_fini,
641 	.suspend = vce_v3_0_suspend,
642 	.resume = vce_v3_0_resume,
643 	.is_idle = vce_v3_0_is_idle,
644 	.wait_for_idle = vce_v3_0_wait_for_idle,
645 	.soft_reset = vce_v3_0_soft_reset,
646 	.print_status = vce_v3_0_print_status,
647 	.set_clockgating_state = vce_v3_0_set_clockgating_state,
648 	.set_powergating_state = vce_v3_0_set_powergating_state,
649 };
650 
651 static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
652 	.get_rptr = vce_v3_0_ring_get_rptr,
653 	.get_wptr = vce_v3_0_ring_get_wptr,
654 	.set_wptr = vce_v3_0_ring_set_wptr,
655 	.parse_cs = amdgpu_vce_ring_parse_cs,
656 	.emit_ib = amdgpu_vce_ring_emit_ib,
657 	.emit_fence = amdgpu_vce_ring_emit_fence,
658 	.emit_semaphore = amdgpu_vce_ring_emit_semaphore,
659 	.test_ring = amdgpu_vce_ring_test_ring,
660 	.test_ib = amdgpu_vce_ring_test_ib,
661 	.insert_nop = amdgpu_ring_insert_nop,
662 };
663 
664 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
665 {
666 	adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
667 	adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
668 }
669 
670 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
671 	.set = vce_v3_0_set_interrupt_state,
672 	.process = vce_v3_0_process_interrupt,
673 };
674 
675 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
676 {
677 	adev->vce.irq.num_types = 1;
678 	adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
679 };
680