xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c (revision 3c0ff9f1)
1aaa36a97SAlex Deucher /*
2aaa36a97SAlex Deucher  * Copyright 2014 Advanced Micro Devices, Inc.
3aaa36a97SAlex Deucher  * All Rights Reserved.
4aaa36a97SAlex Deucher  *
5aaa36a97SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
6aaa36a97SAlex Deucher  * copy of this software and associated documentation files (the
7aaa36a97SAlex Deucher  * "Software"), to deal in the Software without restriction, including
8aaa36a97SAlex Deucher  * without limitation the rights to use, copy, modify, merge, publish,
9aaa36a97SAlex Deucher  * distribute, sub license, and/or sell copies of the Software, and to
10aaa36a97SAlex Deucher  * permit persons to whom the Software is furnished to do so, subject to
11aaa36a97SAlex Deucher  * the following conditions:
12aaa36a97SAlex Deucher  *
13aaa36a97SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14aaa36a97SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15aaa36a97SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16aaa36a97SAlex Deucher  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17aaa36a97SAlex Deucher  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18aaa36a97SAlex Deucher  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19aaa36a97SAlex Deucher  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20aaa36a97SAlex Deucher  *
21aaa36a97SAlex Deucher  * The above copyright notice and this permission notice (including the
22aaa36a97SAlex Deucher  * next paragraph) shall be included in all copies or substantial portions
23aaa36a97SAlex Deucher  * of the Software.
24aaa36a97SAlex Deucher  *
25aaa36a97SAlex Deucher  * Authors: Christian König <christian.koenig@amd.com>
26aaa36a97SAlex Deucher  */
27aaa36a97SAlex Deucher 
28aaa36a97SAlex Deucher #include <linux/firmware.h>
29aaa36a97SAlex Deucher #include <drm/drmP.h>
30aaa36a97SAlex Deucher #include "amdgpu.h"
31aaa36a97SAlex Deucher #include "amdgpu_vce.h"
32aaa36a97SAlex Deucher #include "vid.h"
33aaa36a97SAlex Deucher #include "vce/vce_3_0_d.h"
34aaa36a97SAlex Deucher #include "vce/vce_3_0_sh_mask.h"
35be4f38e2SAlex Deucher #include "oss/oss_3_0_d.h"
36be4f38e2SAlex Deucher #include "oss/oss_3_0_sh_mask.h"
375bbc553aSLeo Liu #include "gca/gfx_8_0_d.h"
386a585777SAlex Deucher #include "smu/smu_7_1_2_d.h"
396a585777SAlex Deucher #include "smu/smu_7_1_2_sh_mask.h"
405bbc553aSLeo Liu 
415bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT	0x04
425bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK	0x10
433c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 	0x8616
443c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 	0x8617
453c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 	0x8618
46aaa36a97SAlex Deucher 
47e9822622SLeo Liu #define VCE_V3_0_FW_SIZE	(384 * 1024)
48e9822622SLeo Liu #define VCE_V3_0_STACK_SIZE	(64 * 1024)
49e9822622SLeo Liu #define VCE_V3_0_DATA_SIZE	((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
50e9822622SLeo Liu 
515bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
52aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
53aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
54aaa36a97SAlex Deucher 
55aaa36a97SAlex Deucher /**
56aaa36a97SAlex Deucher  * vce_v3_0_ring_get_rptr - get read pointer
57aaa36a97SAlex Deucher  *
58aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
59aaa36a97SAlex Deucher  *
60aaa36a97SAlex Deucher  * Returns the current hardware read pointer
61aaa36a97SAlex Deucher  */
62aaa36a97SAlex Deucher static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
63aaa36a97SAlex Deucher {
64aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
65aaa36a97SAlex Deucher 
66aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
67aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_RPTR);
68aaa36a97SAlex Deucher 	else
69aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_RPTR2);
70aaa36a97SAlex Deucher }
71aaa36a97SAlex Deucher 
72aaa36a97SAlex Deucher /**
73aaa36a97SAlex Deucher  * vce_v3_0_ring_get_wptr - get write pointer
74aaa36a97SAlex Deucher  *
75aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
76aaa36a97SAlex Deucher  *
77aaa36a97SAlex Deucher  * Returns the current hardware write pointer
78aaa36a97SAlex Deucher  */
79aaa36a97SAlex Deucher static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
80aaa36a97SAlex Deucher {
81aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
82aaa36a97SAlex Deucher 
83aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
84aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_WPTR);
85aaa36a97SAlex Deucher 	else
86aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_WPTR2);
87aaa36a97SAlex Deucher }
88aaa36a97SAlex Deucher 
89aaa36a97SAlex Deucher /**
90aaa36a97SAlex Deucher  * vce_v3_0_ring_set_wptr - set write pointer
91aaa36a97SAlex Deucher  *
92aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
93aaa36a97SAlex Deucher  *
94aaa36a97SAlex Deucher  * Commits the write pointer to the hardware
95aaa36a97SAlex Deucher  */
96aaa36a97SAlex Deucher static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
97aaa36a97SAlex Deucher {
98aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
99aaa36a97SAlex Deucher 
100aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
101aaa36a97SAlex Deucher 		WREG32(mmVCE_RB_WPTR, ring->wptr);
102aaa36a97SAlex Deucher 	else
103aaa36a97SAlex Deucher 		WREG32(mmVCE_RB_WPTR2, ring->wptr);
104aaa36a97SAlex Deucher }
105aaa36a97SAlex Deucher 
106aaa36a97SAlex Deucher /**
107aaa36a97SAlex Deucher  * vce_v3_0_start - start VCE block
108aaa36a97SAlex Deucher  *
109aaa36a97SAlex Deucher  * @adev: amdgpu_device pointer
110aaa36a97SAlex Deucher  *
111aaa36a97SAlex Deucher  * Setup and start the VCE block
112aaa36a97SAlex Deucher  */
113aaa36a97SAlex Deucher static int vce_v3_0_start(struct amdgpu_device *adev)
114aaa36a97SAlex Deucher {
115aaa36a97SAlex Deucher 	struct amdgpu_ring *ring;
1165bbc553aSLeo Liu 	int idx, i, j, r;
117aaa36a97SAlex Deucher 
1185bbc553aSLeo Liu 	mutex_lock(&adev->grbm_idx_mutex);
1195bbc553aSLeo Liu 	for (idx = 0; idx < 2; ++idx) {
1206a585777SAlex Deucher 
1216a585777SAlex Deucher 		if (adev->vce.harvest_config & (1 << idx))
1226a585777SAlex Deucher 			continue;
1236a585777SAlex Deucher 
1245bbc553aSLeo Liu 		if(idx == 0)
1255bbc553aSLeo Liu 			WREG32_P(mmGRBM_GFX_INDEX, 0,
1265bbc553aSLeo Liu 				~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
1275bbc553aSLeo Liu 		else
1285bbc553aSLeo Liu 			WREG32_P(mmGRBM_GFX_INDEX,
1295bbc553aSLeo Liu 				GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
1305bbc553aSLeo Liu 				~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
1315bbc553aSLeo Liu 
1325bbc553aSLeo Liu 		vce_v3_0_mc_resume(adev, idx);
133aaa36a97SAlex Deucher 
134aaa36a97SAlex Deucher 		/* set BUSY flag */
135aaa36a97SAlex Deucher 		WREG32_P(mmVCE_STATUS, 1, ~1);
1363c0ff9f1SLeo Liu 		if (adev->asic_type >= CHIP_STONEY)
1373c0ff9f1SLeo Liu 			WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
1383c0ff9f1SLeo Liu 		else
1395bbc553aSLeo Liu 			WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
1405bbc553aSLeo Liu 				~VCE_VCPU_CNTL__CLK_EN_MASK);
141aaa36a97SAlex Deucher 
142aaa36a97SAlex Deucher 		WREG32_P(mmVCE_SOFT_RESET,
143aaa36a97SAlex Deucher 			 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
144aaa36a97SAlex Deucher 			 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
145aaa36a97SAlex Deucher 
146aaa36a97SAlex Deucher 		mdelay(100);
147aaa36a97SAlex Deucher 
1485bbc553aSLeo Liu 		WREG32_P(mmVCE_SOFT_RESET, 0,
1495bbc553aSLeo Liu 			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
150aaa36a97SAlex Deucher 
151aaa36a97SAlex Deucher 		for (i = 0; i < 10; ++i) {
152aaa36a97SAlex Deucher 			uint32_t status;
153aaa36a97SAlex Deucher 			for (j = 0; j < 100; ++j) {
154aaa36a97SAlex Deucher 				status = RREG32(mmVCE_STATUS);
155aaa36a97SAlex Deucher 				if (status & 2)
156aaa36a97SAlex Deucher 					break;
157aaa36a97SAlex Deucher 				mdelay(10);
158aaa36a97SAlex Deucher 			}
159aaa36a97SAlex Deucher 			r = 0;
160aaa36a97SAlex Deucher 			if (status & 2)
161aaa36a97SAlex Deucher 				break;
162aaa36a97SAlex Deucher 
163aaa36a97SAlex Deucher 			DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
1645bbc553aSLeo Liu 			WREG32_P(mmVCE_SOFT_RESET,
1655bbc553aSLeo Liu 				VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
166aaa36a97SAlex Deucher 				~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
167aaa36a97SAlex Deucher 			mdelay(10);
1685bbc553aSLeo Liu 			WREG32_P(mmVCE_SOFT_RESET, 0,
1695bbc553aSLeo Liu 				~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
170aaa36a97SAlex Deucher 			mdelay(10);
171aaa36a97SAlex Deucher 			r = -1;
172aaa36a97SAlex Deucher 		}
173aaa36a97SAlex Deucher 
174aaa36a97SAlex Deucher 		/* clear BUSY flag */
175aaa36a97SAlex Deucher 		WREG32_P(mmVCE_STATUS, 0, ~1);
176aaa36a97SAlex Deucher 
177aaa36a97SAlex Deucher 		if (r) {
178aaa36a97SAlex Deucher 			DRM_ERROR("VCE not responding, giving up!!!\n");
1795bbc553aSLeo Liu 			mutex_unlock(&adev->grbm_idx_mutex);
180aaa36a97SAlex Deucher 			return r;
181aaa36a97SAlex Deucher 		}
1825bbc553aSLeo Liu 	}
1835bbc553aSLeo Liu 
1845bbc553aSLeo Liu 	WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
1855bbc553aSLeo Liu 	mutex_unlock(&adev->grbm_idx_mutex);
1865bbc553aSLeo Liu 
1875bbc553aSLeo Liu 	ring = &adev->vce.ring[0];
1885bbc553aSLeo Liu 	WREG32(mmVCE_RB_RPTR, ring->wptr);
1895bbc553aSLeo Liu 	WREG32(mmVCE_RB_WPTR, ring->wptr);
1905bbc553aSLeo Liu 	WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
1915bbc553aSLeo Liu 	WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1925bbc553aSLeo Liu 	WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
1935bbc553aSLeo Liu 
1945bbc553aSLeo Liu 	ring = &adev->vce.ring[1];
1955bbc553aSLeo Liu 	WREG32(mmVCE_RB_RPTR2, ring->wptr);
1965bbc553aSLeo Liu 	WREG32(mmVCE_RB_WPTR2, ring->wptr);
1975bbc553aSLeo Liu 	WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
1985bbc553aSLeo Liu 	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1995bbc553aSLeo Liu 	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
200aaa36a97SAlex Deucher 
201aaa36a97SAlex Deucher 	return 0;
202aaa36a97SAlex Deucher }
203aaa36a97SAlex Deucher 
2046a585777SAlex Deucher #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS     0xC0014074
2056a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__SHIFT       27
2066a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__MASK        0x18000000
2076a585777SAlex Deucher 
2086a585777SAlex Deucher static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
2096a585777SAlex Deucher {
2106a585777SAlex Deucher 	u32 tmp;
2116a585777SAlex Deucher 	unsigned ret;
2126a585777SAlex Deucher 
213cfaba566SSamuel Li 	/* Fiji, Stoney are single pipe */
214cfaba566SSamuel Li 	if ((adev->asic_type == CHIP_FIJI) ||
215cfaba566SSamuel Li 	    (adev->asic_type == CHIP_STONEY)){
216188a9bcdSAlex Deucher 		ret = AMDGPU_VCE_HARVEST_VCE1;
217188a9bcdSAlex Deucher 		return ret;
218188a9bcdSAlex Deucher 	}
219188a9bcdSAlex Deucher 
220188a9bcdSAlex Deucher 	/* Tonga and CZ are dual or single pipe */
2212f7d10b3SJammy Zhou 	if (adev->flags & AMD_IS_APU)
2226a585777SAlex Deucher 		tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
2236a585777SAlex Deucher 		       VCE_HARVEST_FUSE_MACRO__MASK) >>
2246a585777SAlex Deucher 			VCE_HARVEST_FUSE_MACRO__SHIFT;
2256a585777SAlex Deucher 	else
2266a585777SAlex Deucher 		tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
2276a585777SAlex Deucher 		       CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
2286a585777SAlex Deucher 			CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
2296a585777SAlex Deucher 
2306a585777SAlex Deucher 	switch (tmp) {
2316a585777SAlex Deucher 	case 1:
2326a585777SAlex Deucher 		ret = AMDGPU_VCE_HARVEST_VCE0;
2336a585777SAlex Deucher 		break;
2346a585777SAlex Deucher 	case 2:
2356a585777SAlex Deucher 		ret = AMDGPU_VCE_HARVEST_VCE1;
2366a585777SAlex Deucher 		break;
2376a585777SAlex Deucher 	case 3:
2386a585777SAlex Deucher 		ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
2396a585777SAlex Deucher 		break;
2406a585777SAlex Deucher 	default:
2416a585777SAlex Deucher 		ret = 0;
2426a585777SAlex Deucher 	}
2436a585777SAlex Deucher 
2446a585777SAlex Deucher 	return ret;
2456a585777SAlex Deucher }
2466a585777SAlex Deucher 
2475fc3aeebSyanyang1 static int vce_v3_0_early_init(void *handle)
248aaa36a97SAlex Deucher {
2495fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2505fc3aeebSyanyang1 
2516a585777SAlex Deucher 	adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
2526a585777SAlex Deucher 
2536a585777SAlex Deucher 	if ((adev->vce.harvest_config &
2546a585777SAlex Deucher 	     (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
2556a585777SAlex Deucher 	    (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
2566a585777SAlex Deucher 		return -ENOENT;
2576a585777SAlex Deucher 
258aaa36a97SAlex Deucher 	vce_v3_0_set_ring_funcs(adev);
259aaa36a97SAlex Deucher 	vce_v3_0_set_irq_funcs(adev);
260aaa36a97SAlex Deucher 
261aaa36a97SAlex Deucher 	return 0;
262aaa36a97SAlex Deucher }
263aaa36a97SAlex Deucher 
2645fc3aeebSyanyang1 static int vce_v3_0_sw_init(void *handle)
265aaa36a97SAlex Deucher {
2665fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
267aaa36a97SAlex Deucher 	struct amdgpu_ring *ring;
268aaa36a97SAlex Deucher 	int r;
269aaa36a97SAlex Deucher 
270aaa36a97SAlex Deucher 	/* VCE */
271aaa36a97SAlex Deucher 	r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
272aaa36a97SAlex Deucher 	if (r)
273aaa36a97SAlex Deucher 		return r;
274aaa36a97SAlex Deucher 
275e9822622SLeo Liu 	r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
276e9822622SLeo Liu 		(VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
277aaa36a97SAlex Deucher 	if (r)
278aaa36a97SAlex Deucher 		return r;
279aaa36a97SAlex Deucher 
280aaa36a97SAlex Deucher 	r = amdgpu_vce_resume(adev);
281aaa36a97SAlex Deucher 	if (r)
282aaa36a97SAlex Deucher 		return r;
283aaa36a97SAlex Deucher 
284aaa36a97SAlex Deucher 	ring = &adev->vce.ring[0];
285aaa36a97SAlex Deucher 	sprintf(ring->name, "vce0");
286aaa36a97SAlex Deucher 	r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
287aaa36a97SAlex Deucher 			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
288aaa36a97SAlex Deucher 	if (r)
289aaa36a97SAlex Deucher 		return r;
290aaa36a97SAlex Deucher 
291aaa36a97SAlex Deucher 	ring = &adev->vce.ring[1];
292aaa36a97SAlex Deucher 	sprintf(ring->name, "vce1");
293aaa36a97SAlex Deucher 	r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
294aaa36a97SAlex Deucher 			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
295aaa36a97SAlex Deucher 	if (r)
296aaa36a97SAlex Deucher 		return r;
297aaa36a97SAlex Deucher 
298aaa36a97SAlex Deucher 	return r;
299aaa36a97SAlex Deucher }
300aaa36a97SAlex Deucher 
3015fc3aeebSyanyang1 static int vce_v3_0_sw_fini(void *handle)
302aaa36a97SAlex Deucher {
303aaa36a97SAlex Deucher 	int r;
3045fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
305aaa36a97SAlex Deucher 
306aaa36a97SAlex Deucher 	r = amdgpu_vce_suspend(adev);
307aaa36a97SAlex Deucher 	if (r)
308aaa36a97SAlex Deucher 		return r;
309aaa36a97SAlex Deucher 
310aaa36a97SAlex Deucher 	r = amdgpu_vce_sw_fini(adev);
311aaa36a97SAlex Deucher 	if (r)
312aaa36a97SAlex Deucher 		return r;
313aaa36a97SAlex Deucher 
314aaa36a97SAlex Deucher 	return r;
315aaa36a97SAlex Deucher }
316aaa36a97SAlex Deucher 
3175fc3aeebSyanyang1 static int vce_v3_0_hw_init(void *handle)
318aaa36a97SAlex Deucher {
319aaa36a97SAlex Deucher 	struct amdgpu_ring *ring;
320aaa36a97SAlex Deucher 	int r;
3215fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
322aaa36a97SAlex Deucher 
323aaa36a97SAlex Deucher 	r = vce_v3_0_start(adev);
324aaa36a97SAlex Deucher 	if (r)
325aaa36a97SAlex Deucher 		return r;
326aaa36a97SAlex Deucher 
327aaa36a97SAlex Deucher 	ring = &adev->vce.ring[0];
328aaa36a97SAlex Deucher 	ring->ready = true;
329aaa36a97SAlex Deucher 	r = amdgpu_ring_test_ring(ring);
330aaa36a97SAlex Deucher 	if (r) {
331aaa36a97SAlex Deucher 		ring->ready = false;
332aaa36a97SAlex Deucher 		return r;
333aaa36a97SAlex Deucher 	}
334aaa36a97SAlex Deucher 
335aaa36a97SAlex Deucher 	ring = &adev->vce.ring[1];
336aaa36a97SAlex Deucher 	ring->ready = true;
337aaa36a97SAlex Deucher 	r = amdgpu_ring_test_ring(ring);
338aaa36a97SAlex Deucher 	if (r) {
339aaa36a97SAlex Deucher 		ring->ready = false;
340aaa36a97SAlex Deucher 		return r;
341aaa36a97SAlex Deucher 	}
342aaa36a97SAlex Deucher 
343aaa36a97SAlex Deucher 	DRM_INFO("VCE initialized successfully.\n");
344aaa36a97SAlex Deucher 
345aaa36a97SAlex Deucher 	return 0;
346aaa36a97SAlex Deucher }
347aaa36a97SAlex Deucher 
3485fc3aeebSyanyang1 static int vce_v3_0_hw_fini(void *handle)
349aaa36a97SAlex Deucher {
350aaa36a97SAlex Deucher 	return 0;
351aaa36a97SAlex Deucher }
352aaa36a97SAlex Deucher 
3535fc3aeebSyanyang1 static int vce_v3_0_suspend(void *handle)
354aaa36a97SAlex Deucher {
355aaa36a97SAlex Deucher 	int r;
3565fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
357aaa36a97SAlex Deucher 
358aaa36a97SAlex Deucher 	r = vce_v3_0_hw_fini(adev);
359aaa36a97SAlex Deucher 	if (r)
360aaa36a97SAlex Deucher 		return r;
361aaa36a97SAlex Deucher 
362aaa36a97SAlex Deucher 	r = amdgpu_vce_suspend(adev);
363aaa36a97SAlex Deucher 	if (r)
364aaa36a97SAlex Deucher 		return r;
365aaa36a97SAlex Deucher 
366aaa36a97SAlex Deucher 	return r;
367aaa36a97SAlex Deucher }
368aaa36a97SAlex Deucher 
3695fc3aeebSyanyang1 static int vce_v3_0_resume(void *handle)
370aaa36a97SAlex Deucher {
371aaa36a97SAlex Deucher 	int r;
3725fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
373aaa36a97SAlex Deucher 
374aaa36a97SAlex Deucher 	r = amdgpu_vce_resume(adev);
375aaa36a97SAlex Deucher 	if (r)
376aaa36a97SAlex Deucher 		return r;
377aaa36a97SAlex Deucher 
378aaa36a97SAlex Deucher 	r = vce_v3_0_hw_init(adev);
379aaa36a97SAlex Deucher 	if (r)
380aaa36a97SAlex Deucher 		return r;
381aaa36a97SAlex Deucher 
382aaa36a97SAlex Deucher 	return r;
383aaa36a97SAlex Deucher }
384aaa36a97SAlex Deucher 
3855bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
386aaa36a97SAlex Deucher {
387aaa36a97SAlex Deucher 	uint32_t offset, size;
388aaa36a97SAlex Deucher 
389aaa36a97SAlex Deucher 	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
390aaa36a97SAlex Deucher 	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
391aaa36a97SAlex Deucher 	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
392aaa36a97SAlex Deucher 	WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
393aaa36a97SAlex Deucher 
394aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_CTRL, 0x00398000);
395aaa36a97SAlex Deucher 	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
396aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
397aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
398aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_VM_CTRL, 0);
3993c0ff9f1SLeo Liu 	if (adev->asic_type >= CHIP_STONEY) {
4003c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
4013c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
4023c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
4033c0ff9f1SLeo Liu 	} else
404aaa36a97SAlex Deucher 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
405aaa36a97SAlex Deucher 	offset = AMDGPU_VCE_FIRMWARE_OFFSET;
406e9822622SLeo Liu 	size = VCE_V3_0_FW_SIZE;
407aaa36a97SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
408aaa36a97SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
409aaa36a97SAlex Deucher 
4105bbc553aSLeo Liu 	if (idx == 0) {
411aaa36a97SAlex Deucher 		offset += size;
412e9822622SLeo Liu 		size = VCE_V3_0_STACK_SIZE;
413aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
414aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
415aaa36a97SAlex Deucher 		offset += size;
416e9822622SLeo Liu 		size = VCE_V3_0_DATA_SIZE;
417aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
418aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
4195bbc553aSLeo Liu 	} else {
4205bbc553aSLeo Liu 		offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
4215bbc553aSLeo Liu 		size = VCE_V3_0_STACK_SIZE;
4225bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
4235bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
4245bbc553aSLeo Liu 		offset += size;
4255bbc553aSLeo Liu 		size = VCE_V3_0_DATA_SIZE;
4265bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
4275bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
4285bbc553aSLeo Liu 	}
429aaa36a97SAlex Deucher 
430aaa36a97SAlex Deucher 	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
431aaa36a97SAlex Deucher 
432aaa36a97SAlex Deucher 	WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
433aaa36a97SAlex Deucher 		 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
434aaa36a97SAlex Deucher }
435aaa36a97SAlex Deucher 
4365fc3aeebSyanyang1 static bool vce_v3_0_is_idle(void *handle)
437aaa36a97SAlex Deucher {
4385fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
439be4f38e2SAlex Deucher 	u32 mask = 0;
440be4f38e2SAlex Deucher 	int idx;
4415fc3aeebSyanyang1 
442be4f38e2SAlex Deucher 	for (idx = 0; idx < 2; ++idx) {
443be4f38e2SAlex Deucher 		if (adev->vce.harvest_config & (1 << idx))
444be4f38e2SAlex Deucher 			continue;
445be4f38e2SAlex Deucher 
446be4f38e2SAlex Deucher 		if (idx == 0)
447be4f38e2SAlex Deucher 			mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
448be4f38e2SAlex Deucher 		else
449be4f38e2SAlex Deucher 			mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
450be4f38e2SAlex Deucher 	}
451be4f38e2SAlex Deucher 
452be4f38e2SAlex Deucher 	return !(RREG32(mmSRBM_STATUS2) & mask);
453aaa36a97SAlex Deucher }
454aaa36a97SAlex Deucher 
4555fc3aeebSyanyang1 static int vce_v3_0_wait_for_idle(void *handle)
456aaa36a97SAlex Deucher {
457aaa36a97SAlex Deucher 	unsigned i;
4585fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
459be4f38e2SAlex Deucher 	u32 mask = 0;
460be4f38e2SAlex Deucher 	int idx;
461be4f38e2SAlex Deucher 
462be4f38e2SAlex Deucher 	for (idx = 0; idx < 2; ++idx) {
463be4f38e2SAlex Deucher 		if (adev->vce.harvest_config & (1 << idx))
464be4f38e2SAlex Deucher 			continue;
465be4f38e2SAlex Deucher 
466be4f38e2SAlex Deucher 		if (idx == 0)
467be4f38e2SAlex Deucher 			mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
468be4f38e2SAlex Deucher 		else
469be4f38e2SAlex Deucher 			mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
470be4f38e2SAlex Deucher 	}
471aaa36a97SAlex Deucher 
472aaa36a97SAlex Deucher 	for (i = 0; i < adev->usec_timeout; i++) {
473be4f38e2SAlex Deucher 		if (!(RREG32(mmSRBM_STATUS2) & mask))
474aaa36a97SAlex Deucher 			return 0;
475aaa36a97SAlex Deucher 	}
476aaa36a97SAlex Deucher 	return -ETIMEDOUT;
477aaa36a97SAlex Deucher }
478aaa36a97SAlex Deucher 
4795fc3aeebSyanyang1 static int vce_v3_0_soft_reset(void *handle)
480aaa36a97SAlex Deucher {
4815fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
482be4f38e2SAlex Deucher 	u32 mask = 0;
483be4f38e2SAlex Deucher 	int idx;
4845fc3aeebSyanyang1 
485be4f38e2SAlex Deucher 	for (idx = 0; idx < 2; ++idx) {
486be4f38e2SAlex Deucher 		if (adev->vce.harvest_config & (1 << idx))
487be4f38e2SAlex Deucher 			continue;
488be4f38e2SAlex Deucher 
489be4f38e2SAlex Deucher 		if (idx == 0)
490be4f38e2SAlex Deucher 			mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
491be4f38e2SAlex Deucher 		else
492be4f38e2SAlex Deucher 			mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
493be4f38e2SAlex Deucher 	}
494be4f38e2SAlex Deucher 	WREG32_P(mmSRBM_SOFT_RESET, mask,
495be4f38e2SAlex Deucher 		 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
496be4f38e2SAlex Deucher 		   SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
497aaa36a97SAlex Deucher 	mdelay(5);
498aaa36a97SAlex Deucher 
499aaa36a97SAlex Deucher 	return vce_v3_0_start(adev);
500aaa36a97SAlex Deucher }
501aaa36a97SAlex Deucher 
5025fc3aeebSyanyang1 static void vce_v3_0_print_status(void *handle)
503aaa36a97SAlex Deucher {
5045fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5055fc3aeebSyanyang1 
506aaa36a97SAlex Deucher 	dev_info(adev->dev, "VCE 3.0 registers\n");
507aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_STATUS=0x%08X\n",
508aaa36a97SAlex Deucher 		 RREG32(mmVCE_STATUS));
509aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_VCPU_CNTL=0x%08X\n",
510aaa36a97SAlex Deucher 		 RREG32(mmVCE_VCPU_CNTL));
511aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
512aaa36a97SAlex Deucher 		 RREG32(mmVCE_VCPU_CACHE_OFFSET0));
513aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE0=0x%08X\n",
514aaa36a97SAlex Deucher 		 RREG32(mmVCE_VCPU_CACHE_SIZE0));
515aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
516aaa36a97SAlex Deucher 		 RREG32(mmVCE_VCPU_CACHE_OFFSET1));
517aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE1=0x%08X\n",
518aaa36a97SAlex Deucher 		 RREG32(mmVCE_VCPU_CACHE_SIZE1));
519aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
520aaa36a97SAlex Deucher 		 RREG32(mmVCE_VCPU_CACHE_OFFSET2));
521aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE2=0x%08X\n",
522aaa36a97SAlex Deucher 		 RREG32(mmVCE_VCPU_CACHE_SIZE2));
523aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_SOFT_RESET=0x%08X\n",
524aaa36a97SAlex Deucher 		 RREG32(mmVCE_SOFT_RESET));
525aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_RB_BASE_LO2=0x%08X\n",
526aaa36a97SAlex Deucher 		 RREG32(mmVCE_RB_BASE_LO2));
527aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_RB_BASE_HI2=0x%08X\n",
528aaa36a97SAlex Deucher 		 RREG32(mmVCE_RB_BASE_HI2));
529aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_RB_SIZE2=0x%08X\n",
530aaa36a97SAlex Deucher 		 RREG32(mmVCE_RB_SIZE2));
531aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_RB_RPTR2=0x%08X\n",
532aaa36a97SAlex Deucher 		 RREG32(mmVCE_RB_RPTR2));
533aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_RB_WPTR2=0x%08X\n",
534aaa36a97SAlex Deucher 		 RREG32(mmVCE_RB_WPTR2));
535aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_RB_BASE_LO=0x%08X\n",
536aaa36a97SAlex Deucher 		 RREG32(mmVCE_RB_BASE_LO));
537aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_RB_BASE_HI=0x%08X\n",
538aaa36a97SAlex Deucher 		 RREG32(mmVCE_RB_BASE_HI));
539aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_RB_SIZE=0x%08X\n",
540aaa36a97SAlex Deucher 		 RREG32(mmVCE_RB_SIZE));
541aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_RB_RPTR=0x%08X\n",
542aaa36a97SAlex Deucher 		 RREG32(mmVCE_RB_RPTR));
543aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_RB_WPTR=0x%08X\n",
544aaa36a97SAlex Deucher 		 RREG32(mmVCE_RB_WPTR));
545aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_CLOCK_GATING_A=0x%08X\n",
546aaa36a97SAlex Deucher 		 RREG32(mmVCE_CLOCK_GATING_A));
547aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_CLOCK_GATING_B=0x%08X\n",
548aaa36a97SAlex Deucher 		 RREG32(mmVCE_CLOCK_GATING_B));
549aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_UENC_CLOCK_GATING=0x%08X\n",
550aaa36a97SAlex Deucher 		 RREG32(mmVCE_UENC_CLOCK_GATING));
551aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
552aaa36a97SAlex Deucher 		 RREG32(mmVCE_UENC_REG_CLOCK_GATING));
553aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_SYS_INT_EN=0x%08X\n",
554aaa36a97SAlex Deucher 		 RREG32(mmVCE_SYS_INT_EN));
555aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_LMI_CTRL2=0x%08X\n",
556aaa36a97SAlex Deucher 		 RREG32(mmVCE_LMI_CTRL2));
557aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_LMI_CTRL=0x%08X\n",
558aaa36a97SAlex Deucher 		 RREG32(mmVCE_LMI_CTRL));
559aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_LMI_VM_CTRL=0x%08X\n",
560aaa36a97SAlex Deucher 		 RREG32(mmVCE_LMI_VM_CTRL));
561aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL=0x%08X\n",
562aaa36a97SAlex Deucher 		 RREG32(mmVCE_LMI_SWAP_CNTL));
563aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL1=0x%08X\n",
564aaa36a97SAlex Deucher 		 RREG32(mmVCE_LMI_SWAP_CNTL1));
565aaa36a97SAlex Deucher 	dev_info(adev->dev, "  VCE_LMI_CACHE_CTRL=0x%08X\n",
566aaa36a97SAlex Deucher 		 RREG32(mmVCE_LMI_CACHE_CTRL));
567aaa36a97SAlex Deucher }
568aaa36a97SAlex Deucher 
569aaa36a97SAlex Deucher static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
570aaa36a97SAlex Deucher 					struct amdgpu_irq_src *source,
571aaa36a97SAlex Deucher 					unsigned type,
572aaa36a97SAlex Deucher 					enum amdgpu_interrupt_state state)
573aaa36a97SAlex Deucher {
574aaa36a97SAlex Deucher 	uint32_t val = 0;
575aaa36a97SAlex Deucher 
576aaa36a97SAlex Deucher 	if (state == AMDGPU_IRQ_STATE_ENABLE)
577aaa36a97SAlex Deucher 		val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
578aaa36a97SAlex Deucher 
579aaa36a97SAlex Deucher 	WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
580aaa36a97SAlex Deucher 	return 0;
581aaa36a97SAlex Deucher }
582aaa36a97SAlex Deucher 
583aaa36a97SAlex Deucher static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
584aaa36a97SAlex Deucher 				      struct amdgpu_irq_src *source,
585aaa36a97SAlex Deucher 				      struct amdgpu_iv_entry *entry)
586aaa36a97SAlex Deucher {
587aaa36a97SAlex Deucher 	DRM_DEBUG("IH: VCE\n");
588d6c29c30SLeo Liu 
589d6c29c30SLeo Liu 	WREG32_P(mmVCE_SYS_INT_STATUS,
590d6c29c30SLeo Liu 		VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
591d6c29c30SLeo Liu 		~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
592d6c29c30SLeo Liu 
593aaa36a97SAlex Deucher 	switch (entry->src_data) {
594aaa36a97SAlex Deucher 	case 0:
595aaa36a97SAlex Deucher 		amdgpu_fence_process(&adev->vce.ring[0]);
596aaa36a97SAlex Deucher 		break;
597aaa36a97SAlex Deucher 	case 1:
598aaa36a97SAlex Deucher 		amdgpu_fence_process(&adev->vce.ring[1]);
599aaa36a97SAlex Deucher 		break;
600aaa36a97SAlex Deucher 	default:
601aaa36a97SAlex Deucher 		DRM_ERROR("Unhandled interrupt: %d %d\n",
602aaa36a97SAlex Deucher 			  entry->src_id, entry->src_data);
603aaa36a97SAlex Deucher 		break;
604aaa36a97SAlex Deucher 	}
605aaa36a97SAlex Deucher 
606aaa36a97SAlex Deucher 	return 0;
607aaa36a97SAlex Deucher }
608aaa36a97SAlex Deucher 
6095fc3aeebSyanyang1 static int vce_v3_0_set_clockgating_state(void *handle,
6105fc3aeebSyanyang1 					  enum amd_clockgating_state state)
611aaa36a97SAlex Deucher {
612aaa36a97SAlex Deucher 	return 0;
613aaa36a97SAlex Deucher }
614aaa36a97SAlex Deucher 
6155fc3aeebSyanyang1 static int vce_v3_0_set_powergating_state(void *handle,
6165fc3aeebSyanyang1 					  enum amd_powergating_state state)
617aaa36a97SAlex Deucher {
618aaa36a97SAlex Deucher 	/* This doesn't actually powergate the VCE block.
619aaa36a97SAlex Deucher 	 * That's done in the dpm code via the SMC.  This
620aaa36a97SAlex Deucher 	 * just re-inits the block as necessary.  The actual
621aaa36a97SAlex Deucher 	 * gating still happens in the dpm code.  We should
622aaa36a97SAlex Deucher 	 * revisit this when there is a cleaner line between
623aaa36a97SAlex Deucher 	 * the smc and the hw blocks
624aaa36a97SAlex Deucher 	 */
6255fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6265fc3aeebSyanyang1 
6275fc3aeebSyanyang1 	if (state == AMD_PG_STATE_GATE)
628aaa36a97SAlex Deucher 		/* XXX do we need a vce_v3_0_stop()? */
629aaa36a97SAlex Deucher 		return 0;
630aaa36a97SAlex Deucher 	else
631aaa36a97SAlex Deucher 		return vce_v3_0_start(adev);
632aaa36a97SAlex Deucher }
633aaa36a97SAlex Deucher 
6345fc3aeebSyanyang1 const struct amd_ip_funcs vce_v3_0_ip_funcs = {
635aaa36a97SAlex Deucher 	.early_init = vce_v3_0_early_init,
636aaa36a97SAlex Deucher 	.late_init = NULL,
637aaa36a97SAlex Deucher 	.sw_init = vce_v3_0_sw_init,
638aaa36a97SAlex Deucher 	.sw_fini = vce_v3_0_sw_fini,
639aaa36a97SAlex Deucher 	.hw_init = vce_v3_0_hw_init,
640aaa36a97SAlex Deucher 	.hw_fini = vce_v3_0_hw_fini,
641aaa36a97SAlex Deucher 	.suspend = vce_v3_0_suspend,
642aaa36a97SAlex Deucher 	.resume = vce_v3_0_resume,
643aaa36a97SAlex Deucher 	.is_idle = vce_v3_0_is_idle,
644aaa36a97SAlex Deucher 	.wait_for_idle = vce_v3_0_wait_for_idle,
645aaa36a97SAlex Deucher 	.soft_reset = vce_v3_0_soft_reset,
646aaa36a97SAlex Deucher 	.print_status = vce_v3_0_print_status,
647aaa36a97SAlex Deucher 	.set_clockgating_state = vce_v3_0_set_clockgating_state,
648aaa36a97SAlex Deucher 	.set_powergating_state = vce_v3_0_set_powergating_state,
649aaa36a97SAlex Deucher };
650aaa36a97SAlex Deucher 
651aaa36a97SAlex Deucher static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
652aaa36a97SAlex Deucher 	.get_rptr = vce_v3_0_ring_get_rptr,
653aaa36a97SAlex Deucher 	.get_wptr = vce_v3_0_ring_get_wptr,
654aaa36a97SAlex Deucher 	.set_wptr = vce_v3_0_ring_set_wptr,
655aaa36a97SAlex Deucher 	.parse_cs = amdgpu_vce_ring_parse_cs,
656aaa36a97SAlex Deucher 	.emit_ib = amdgpu_vce_ring_emit_ib,
657aaa36a97SAlex Deucher 	.emit_fence = amdgpu_vce_ring_emit_fence,
658aaa36a97SAlex Deucher 	.emit_semaphore = amdgpu_vce_ring_emit_semaphore,
659aaa36a97SAlex Deucher 	.test_ring = amdgpu_vce_ring_test_ring,
660aaa36a97SAlex Deucher 	.test_ib = amdgpu_vce_ring_test_ib,
661edff0e28SJammy Zhou 	.insert_nop = amdgpu_ring_insert_nop,
662aaa36a97SAlex Deucher };
663aaa36a97SAlex Deucher 
664aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
665aaa36a97SAlex Deucher {
666aaa36a97SAlex Deucher 	adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
667aaa36a97SAlex Deucher 	adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
668aaa36a97SAlex Deucher }
669aaa36a97SAlex Deucher 
670aaa36a97SAlex Deucher static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
671aaa36a97SAlex Deucher 	.set = vce_v3_0_set_interrupt_state,
672aaa36a97SAlex Deucher 	.process = vce_v3_0_process_interrupt,
673aaa36a97SAlex Deucher };
674aaa36a97SAlex Deucher 
675aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
676aaa36a97SAlex Deucher {
677aaa36a97SAlex Deucher 	adev->vce.irq.num_types = 1;
678aaa36a97SAlex Deucher 	adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
679aaa36a97SAlex Deucher };
680