xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c (revision 03ab8e6297acd1bc0eedaa050e2a1635c576fd11)
1a2e73f56SAlex Deucher /*
2a2e73f56SAlex Deucher  * Copyright 2013 Advanced Micro Devices, Inc.
3a2e73f56SAlex Deucher  * All Rights Reserved.
4a2e73f56SAlex Deucher  *
5a2e73f56SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
6a2e73f56SAlex Deucher  * copy of this software and associated documentation files (the
7a2e73f56SAlex Deucher  * "Software"), to deal in the Software without restriction, including
8a2e73f56SAlex Deucher  * without limitation the rights to use, copy, modify, merge, publish,
9a2e73f56SAlex Deucher  * distribute, sub license, and/or sell copies of the Software, and to
10a2e73f56SAlex Deucher  * permit persons to whom the Software is furnished to do so, subject to
11a2e73f56SAlex Deucher  * the following conditions:
12a2e73f56SAlex Deucher  *
13a2e73f56SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14a2e73f56SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15a2e73f56SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16a2e73f56SAlex Deucher  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17a2e73f56SAlex Deucher  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18a2e73f56SAlex Deucher  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19a2e73f56SAlex Deucher  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20a2e73f56SAlex Deucher  *
21a2e73f56SAlex Deucher  * The above copyright notice and this permission notice (including the
22a2e73f56SAlex Deucher  * next paragraph) shall be included in all copies or substantial portions
23a2e73f56SAlex Deucher  * of the Software.
24a2e73f56SAlex Deucher  *
25a2e73f56SAlex Deucher  * Authors: Christian König <christian.koenig@amd.com>
26a2e73f56SAlex Deucher  */
27a2e73f56SAlex Deucher 
28a2e73f56SAlex Deucher #include <linux/firmware.h>
2947b757fbSSam Ravnborg 
30a2e73f56SAlex Deucher #include "amdgpu.h"
31a2e73f56SAlex Deucher #include "amdgpu_vce.h"
32a2e73f56SAlex Deucher #include "cikd.h"
33a2e73f56SAlex Deucher #include "vce/vce_2_0_d.h"
34a2e73f56SAlex Deucher #include "vce/vce_2_0_sh_mask.h"
35cbb2fe8eSRex Zhu #include "smu/smu_7_0_1_d.h"
36cbb2fe8eSRex Zhu #include "smu/smu_7_0_1_sh_mask.h"
37a2e73f56SAlex Deucher #include "oss/oss_2_0_d.h"
38a2e73f56SAlex Deucher #include "oss/oss_2_0_sh_mask.h"
39a2e73f56SAlex Deucher 
40e9822622SLeo Liu #define VCE_V2_0_FW_SIZE	(256 * 1024)
41e9822622SLeo Liu #define VCE_V2_0_STACK_SIZE	(64 * 1024)
42e9822622SLeo Liu #define VCE_V2_0_DATA_SIZE	(23552 * AMDGPU_MAX_VCE_HANDLES)
43abc8c1ceSjimqu #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
44e9822622SLeo Liu 
45a2e73f56SAlex Deucher static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
46a2e73f56SAlex Deucher static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
47beeea981SRex Zhu 
48a2e73f56SAlex Deucher /**
49a2e73f56SAlex Deucher  * vce_v2_0_ring_get_rptr - get read pointer
50a2e73f56SAlex Deucher  *
51a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
52a2e73f56SAlex Deucher  *
53a2e73f56SAlex Deucher  * Returns the current hardware read pointer
54a2e73f56SAlex Deucher  */
vce_v2_0_ring_get_rptr(struct amdgpu_ring * ring)55536fbf94SKen Wang static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
56a2e73f56SAlex Deucher {
57a2e73f56SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
58a2e73f56SAlex Deucher 
595d4af988SAlex Deucher 	if (ring->me == 0)
60a2e73f56SAlex Deucher 		return RREG32(mmVCE_RB_RPTR);
61a2e73f56SAlex Deucher 	else
62a2e73f56SAlex Deucher 		return RREG32(mmVCE_RB_RPTR2);
63a2e73f56SAlex Deucher }
64a2e73f56SAlex Deucher 
65a2e73f56SAlex Deucher /**
66a2e73f56SAlex Deucher  * vce_v2_0_ring_get_wptr - get write pointer
67a2e73f56SAlex Deucher  *
68a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
69a2e73f56SAlex Deucher  *
70a2e73f56SAlex Deucher  * Returns the current hardware write pointer
71a2e73f56SAlex Deucher  */
vce_v2_0_ring_get_wptr(struct amdgpu_ring * ring)72536fbf94SKen Wang static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
73a2e73f56SAlex Deucher {
74a2e73f56SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
75a2e73f56SAlex Deucher 
765d4af988SAlex Deucher 	if (ring->me == 0)
77a2e73f56SAlex Deucher 		return RREG32(mmVCE_RB_WPTR);
78a2e73f56SAlex Deucher 	else
79a2e73f56SAlex Deucher 		return RREG32(mmVCE_RB_WPTR2);
80a2e73f56SAlex Deucher }
81a2e73f56SAlex Deucher 
82a2e73f56SAlex Deucher /**
83a2e73f56SAlex Deucher  * vce_v2_0_ring_set_wptr - set write pointer
84a2e73f56SAlex Deucher  *
85a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
86a2e73f56SAlex Deucher  *
87a2e73f56SAlex Deucher  * Commits the write pointer to the hardware
88a2e73f56SAlex Deucher  */
vce_v2_0_ring_set_wptr(struct amdgpu_ring * ring)89a2e73f56SAlex Deucher static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
90a2e73f56SAlex Deucher {
91a2e73f56SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
92a2e73f56SAlex Deucher 
935d4af988SAlex Deucher 	if (ring->me == 0)
94536fbf94SKen Wang 		WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
95a2e73f56SAlex Deucher 	else
96536fbf94SKen Wang 		WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
97a2e73f56SAlex Deucher }
98a2e73f56SAlex Deucher 
vce_v2_0_lmi_clean(struct amdgpu_device * adev)99abc8c1ceSjimqu static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
100abc8c1ceSjimqu {
101abc8c1ceSjimqu 	int i, j;
102abc8c1ceSjimqu 
103abc8c1ceSjimqu 	for (i = 0; i < 10; ++i) {
104abc8c1ceSjimqu 		for (j = 0; j < 100; ++j) {
105abc8c1ceSjimqu 			uint32_t status = RREG32(mmVCE_LMI_STATUS);
106abc8c1ceSjimqu 
107abc8c1ceSjimqu 			if (status & 0x337f)
108abc8c1ceSjimqu 				return 0;
109abc8c1ceSjimqu 			mdelay(10);
110abc8c1ceSjimqu 		}
111abc8c1ceSjimqu 	}
112abc8c1ceSjimqu 
113abc8c1ceSjimqu 	return -ETIMEDOUT;
114abc8c1ceSjimqu }
115abc8c1ceSjimqu 
vce_v2_0_firmware_loaded(struct amdgpu_device * adev)116abc8c1ceSjimqu static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
117abc8c1ceSjimqu {
118abc8c1ceSjimqu 	int i, j;
119abc8c1ceSjimqu 
120abc8c1ceSjimqu 	for (i = 0; i < 10; ++i) {
121abc8c1ceSjimqu 		for (j = 0; j < 100; ++j) {
122abc8c1ceSjimqu 			uint32_t status = RREG32(mmVCE_STATUS);
123abc8c1ceSjimqu 
124abc8c1ceSjimqu 			if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
125abc8c1ceSjimqu 				return 0;
126abc8c1ceSjimqu 			mdelay(10);
127abc8c1ceSjimqu 		}
128abc8c1ceSjimqu 
129abc8c1ceSjimqu 		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
130abc8c1ceSjimqu 		WREG32_P(mmVCE_SOFT_RESET,
131abc8c1ceSjimqu 			VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
132abc8c1ceSjimqu 			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
133abc8c1ceSjimqu 		mdelay(10);
134abc8c1ceSjimqu 		WREG32_P(mmVCE_SOFT_RESET, 0,
135abc8c1ceSjimqu 			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
136abc8c1ceSjimqu 		mdelay(10);
137abc8c1ceSjimqu 	}
138abc8c1ceSjimqu 
139abc8c1ceSjimqu 	return -ETIMEDOUT;
140abc8c1ceSjimqu }
141abc8c1ceSjimqu 
vce_v2_0_disable_cg(struct amdgpu_device * adev)142f1ea278dSRex Zhu static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
143f1ea278dSRex Zhu {
144f1ea278dSRex Zhu 	WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
145f1ea278dSRex Zhu }
146f1ea278dSRex Zhu 
vce_v2_0_init_cg(struct amdgpu_device * adev)147f1ea278dSRex Zhu static void vce_v2_0_init_cg(struct amdgpu_device *adev)
148f1ea278dSRex Zhu {
149f1ea278dSRex Zhu 	u32 tmp;
150f1ea278dSRex Zhu 
151f1ea278dSRex Zhu 	tmp = RREG32(mmVCE_CLOCK_GATING_A);
152f1ea278dSRex Zhu 	tmp &= ~0xfff;
153f1ea278dSRex Zhu 	tmp |= ((0 << 0) | (4 << 4));
154f1ea278dSRex Zhu 	tmp |= 0x40000;
155f1ea278dSRex Zhu 	WREG32(mmVCE_CLOCK_GATING_A, tmp);
156f1ea278dSRex Zhu 
157f1ea278dSRex Zhu 	tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
158f1ea278dSRex Zhu 	tmp &= ~0xfff;
159f1ea278dSRex Zhu 	tmp |= ((0 << 0) | (4 << 4));
160f1ea278dSRex Zhu 	WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
161f1ea278dSRex Zhu 
162f1ea278dSRex Zhu 	tmp = RREG32(mmVCE_CLOCK_GATING_B);
163f1ea278dSRex Zhu 	tmp |= 0x10;
164f1ea278dSRex Zhu 	tmp &= ~0x100000;
165f1ea278dSRex Zhu 	WREG32(mmVCE_CLOCK_GATING_B, tmp);
166f1ea278dSRex Zhu }
167f1ea278dSRex Zhu 
vce_v2_0_mc_resume(struct amdgpu_device * adev)168f1ea278dSRex Zhu static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
169f1ea278dSRex Zhu {
170dc3abc16SAlex Deucher 	uint32_t size, offset;
171f1ea278dSRex Zhu 
172f1ea278dSRex Zhu 	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
173f1ea278dSRex Zhu 	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
174f1ea278dSRex Zhu 	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
175f1ea278dSRex Zhu 	WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
176f1ea278dSRex Zhu 
177f1ea278dSRex Zhu 	WREG32(mmVCE_LMI_CTRL, 0x00398000);
178f1ea278dSRex Zhu 	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
179f1ea278dSRex Zhu 	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
180f1ea278dSRex Zhu 	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
181f1ea278dSRex Zhu 	WREG32(mmVCE_LMI_VM_CTRL, 0);
182f1ea278dSRex Zhu 
183dc3abc16SAlex Deucher 	WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
184dc3abc16SAlex Deucher 
185dc3abc16SAlex Deucher 	offset = AMDGPU_VCE_FIRMWARE_OFFSET;
186f1ea278dSRex Zhu 	size = VCE_V2_0_FW_SIZE;
187dc3abc16SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
188f1ea278dSRex Zhu 	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
189f1ea278dSRex Zhu 
190dc3abc16SAlex Deucher 	offset += size;
191f1ea278dSRex Zhu 	size = VCE_V2_0_STACK_SIZE;
192dc3abc16SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
193f1ea278dSRex Zhu 	WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
194f1ea278dSRex Zhu 
195dc3abc16SAlex Deucher 	offset += size;
196f1ea278dSRex Zhu 	size = VCE_V2_0_DATA_SIZE;
197dc3abc16SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
198f1ea278dSRex Zhu 	WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
199f1ea278dSRex Zhu 
200f1ea278dSRex Zhu 	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
201f1ea278dSRex Zhu 	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
202f1ea278dSRex Zhu }
203f1ea278dSRex Zhu 
vce_v2_0_is_idle(void * handle)204f1ea278dSRex Zhu static bool vce_v2_0_is_idle(void *handle)
205f1ea278dSRex Zhu {
206f1ea278dSRex Zhu 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
207f1ea278dSRex Zhu 
208f1ea278dSRex Zhu 	return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
209f1ea278dSRex Zhu }
210f1ea278dSRex Zhu 
vce_v2_0_wait_for_idle(void * handle)211f1ea278dSRex Zhu static int vce_v2_0_wait_for_idle(void *handle)
212f1ea278dSRex Zhu {
213f1ea278dSRex Zhu 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
214f1ea278dSRex Zhu 	unsigned i;
215f1ea278dSRex Zhu 
216f1ea278dSRex Zhu 	for (i = 0; i < adev->usec_timeout; i++) {
217f1ea278dSRex Zhu 		if (vce_v2_0_is_idle(handle))
218f1ea278dSRex Zhu 			return 0;
219f1ea278dSRex Zhu 	}
220f1ea278dSRex Zhu 	return -ETIMEDOUT;
221f1ea278dSRex Zhu }
222f1ea278dSRex Zhu 
223a2e73f56SAlex Deucher /**
224a2e73f56SAlex Deucher  * vce_v2_0_start - start VCE block
225a2e73f56SAlex Deucher  *
226a2e73f56SAlex Deucher  * @adev: amdgpu_device pointer
227a2e73f56SAlex Deucher  *
228a2e73f56SAlex Deucher  * Setup and start the VCE block
229a2e73f56SAlex Deucher  */
vce_v2_0_start(struct amdgpu_device * adev)230a2e73f56SAlex Deucher static int vce_v2_0_start(struct amdgpu_device *adev)
231a2e73f56SAlex Deucher {
232a2e73f56SAlex Deucher 	struct amdgpu_ring *ring;
233abc8c1ceSjimqu 	int r;
234a2e73f56SAlex Deucher 
235a2e73f56SAlex Deucher 	/* set BUSY flag */
236a2e73f56SAlex Deucher 	WREG32_P(mmVCE_STATUS, 1, ~1);
237a2e73f56SAlex Deucher 
238beeea981SRex Zhu 	vce_v2_0_init_cg(adev);
239beeea981SRex Zhu 	vce_v2_0_disable_cg(adev);
240beeea981SRex Zhu 
241beeea981SRex Zhu 	vce_v2_0_mc_resume(adev);
242beeea981SRex Zhu 
243a2e73f56SAlex Deucher 	ring = &adev->vce.ring[0];
244536fbf94SKen Wang 	WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
245536fbf94SKen Wang 	WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
246a2e73f56SAlex Deucher 	WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
247a2e73f56SAlex Deucher 	WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
248a2e73f56SAlex Deucher 	WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
249a2e73f56SAlex Deucher 
250a2e73f56SAlex Deucher 	ring = &adev->vce.ring[1];
251536fbf94SKen Wang 	WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
252536fbf94SKen Wang 	WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
253a2e73f56SAlex Deucher 	WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
254a2e73f56SAlex Deucher 	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
255a2e73f56SAlex Deucher 	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
256a2e73f56SAlex Deucher 
25721d3cbbeSTom St Denis 	WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
25821d3cbbeSTom St Denis 	WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
259a2e73f56SAlex Deucher 	mdelay(100);
26021d3cbbeSTom St Denis 	WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
261a2e73f56SAlex Deucher 
262abc8c1ceSjimqu 	r = vce_v2_0_firmware_loaded(adev);
263a2e73f56SAlex Deucher 
264a2e73f56SAlex Deucher 	/* clear BUSY flag */
265a2e73f56SAlex Deucher 	WREG32_P(mmVCE_STATUS, 0, ~1);
266a2e73f56SAlex Deucher 
267a2e73f56SAlex Deucher 	if (r) {
268a2e73f56SAlex Deucher 		DRM_ERROR("VCE not responding, giving up!!!\n");
269a2e73f56SAlex Deucher 		return r;
270a2e73f56SAlex Deucher 	}
271a2e73f56SAlex Deucher 
272a2e73f56SAlex Deucher 	return 0;
273a2e73f56SAlex Deucher }
274a2e73f56SAlex Deucher 
vce_v2_0_stop(struct amdgpu_device * adev)275beeea981SRex Zhu static int vce_v2_0_stop(struct amdgpu_device *adev)
276beeea981SRex Zhu {
277f917c2adSColin Ian King 	int i;
278beeea981SRex Zhu 	int status;
279beeea981SRex Zhu 
280beeea981SRex Zhu 	if (vce_v2_0_lmi_clean(adev)) {
281beeea981SRex Zhu 		DRM_INFO("vce is not idle \n");
282beeea981SRex Zhu 		return 0;
283beeea981SRex Zhu 	}
284f917c2adSColin Ian King 
285beeea981SRex Zhu 	if (vce_v2_0_wait_for_idle(adev)) {
286f4895610SColin Ian King 		DRM_INFO("VCE is busy, Can't set clock gating");
287beeea981SRex Zhu 		return 0;
288beeea981SRex Zhu 	}
289beeea981SRex Zhu 
290beeea981SRex Zhu 	/* Stall UMC and register bus before resetting VCPU */
291beeea981SRex Zhu 	WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
292beeea981SRex Zhu 
293f917c2adSColin Ian King 	for (i = 0; i < 100; ++i) {
294beeea981SRex Zhu 		status = RREG32(mmVCE_LMI_STATUS);
295beeea981SRex Zhu 		if (status & 0x240)
296beeea981SRex Zhu 			break;
297beeea981SRex Zhu 		mdelay(1);
298beeea981SRex Zhu 	}
299beeea981SRex Zhu 
300beeea981SRex Zhu 	WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
301beeea981SRex Zhu 
302beeea981SRex Zhu 	/* put LMI, VCPU, RBC etc... into reset */
303beeea981SRex Zhu 	WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
304beeea981SRex Zhu 
305beeea981SRex Zhu 	WREG32(mmVCE_STATUS, 0);
306beeea981SRex Zhu 
307beeea981SRex Zhu 	return 0;
308beeea981SRex Zhu }
309beeea981SRex Zhu 
vce_v2_0_set_sw_cg(struct amdgpu_device * adev,bool gated)310f1ea278dSRex Zhu static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
311f1ea278dSRex Zhu {
312f1ea278dSRex Zhu 	u32 tmp;
313f1ea278dSRex Zhu 
314f1ea278dSRex Zhu 	if (gated) {
315f1ea278dSRex Zhu 		tmp = RREG32(mmVCE_CLOCK_GATING_B);
316f1ea278dSRex Zhu 		tmp |= 0xe70000;
317f1ea278dSRex Zhu 		WREG32(mmVCE_CLOCK_GATING_B, tmp);
318f1ea278dSRex Zhu 
319f1ea278dSRex Zhu 		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
320f1ea278dSRex Zhu 		tmp |= 0xff000000;
321f1ea278dSRex Zhu 		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
322f1ea278dSRex Zhu 
323f1ea278dSRex Zhu 		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
324f1ea278dSRex Zhu 		tmp &= ~0x3fc;
325f1ea278dSRex Zhu 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
326f1ea278dSRex Zhu 
327f1ea278dSRex Zhu 		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
328f1ea278dSRex Zhu 	} else {
329f1ea278dSRex Zhu 		tmp = RREG32(mmVCE_CLOCK_GATING_B);
330f1ea278dSRex Zhu 		tmp |= 0xe7;
331f1ea278dSRex Zhu 		tmp &= ~0xe70000;
332f1ea278dSRex Zhu 		WREG32(mmVCE_CLOCK_GATING_B, tmp);
333f1ea278dSRex Zhu 
334f1ea278dSRex Zhu 		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
335f1ea278dSRex Zhu 		tmp |= 0x1fe000;
336f1ea278dSRex Zhu 		tmp &= ~0xff000000;
337f1ea278dSRex Zhu 		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
338f1ea278dSRex Zhu 
339f1ea278dSRex Zhu 		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
340f1ea278dSRex Zhu 		tmp |= 0x3fc;
341f1ea278dSRex Zhu 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
342f1ea278dSRex Zhu 	}
343f1ea278dSRex Zhu }
344f1ea278dSRex Zhu 
vce_v2_0_set_dyn_cg(struct amdgpu_device * adev,bool gated)345f1ea278dSRex Zhu static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
346f1ea278dSRex Zhu {
347f1ea278dSRex Zhu 	u32 orig, tmp;
348f1ea278dSRex Zhu 
349f1ea278dSRex Zhu /* LMI_MC/LMI_UMC always set in dynamic,
350f1ea278dSRex Zhu  * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
351f1ea278dSRex Zhu  */
352f1ea278dSRex Zhu 	tmp = RREG32(mmVCE_CLOCK_GATING_B);
353f1ea278dSRex Zhu 	tmp &= ~0x00060006;
354f1ea278dSRex Zhu 
355f1ea278dSRex Zhu /* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
356f1ea278dSRex Zhu 	if (gated) {
357f1ea278dSRex Zhu 		tmp |= 0xe10000;
358f1ea278dSRex Zhu 		WREG32(mmVCE_CLOCK_GATING_B, tmp);
359f1ea278dSRex Zhu 	} else {
360f1ea278dSRex Zhu 		tmp |= 0xe1;
361f1ea278dSRex Zhu 		tmp &= ~0xe10000;
362f1ea278dSRex Zhu 		WREG32(mmVCE_CLOCK_GATING_B, tmp);
363f1ea278dSRex Zhu 	}
364f1ea278dSRex Zhu 
365f1ea278dSRex Zhu 	orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
366f1ea278dSRex Zhu 	tmp &= ~0x1fe000;
367f1ea278dSRex Zhu 	tmp &= ~0xff000000;
368f1ea278dSRex Zhu 	if (tmp != orig)
369f1ea278dSRex Zhu 		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
370f1ea278dSRex Zhu 
371f1ea278dSRex Zhu 	orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
372f1ea278dSRex Zhu 	tmp &= ~0x3fc;
373f1ea278dSRex Zhu 	if (tmp != orig)
374f1ea278dSRex Zhu 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
375f1ea278dSRex Zhu 
376f1ea278dSRex Zhu 	/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
377f1ea278dSRex Zhu 	WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
378f1ea278dSRex Zhu 
379f1ea278dSRex Zhu 	if(gated)
380f1ea278dSRex Zhu 		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
381f1ea278dSRex Zhu }
382f1ea278dSRex Zhu 
vce_v2_0_enable_mgcg(struct amdgpu_device * adev,bool enable,bool sw_cg)383f1ea278dSRex Zhu static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
384f1ea278dSRex Zhu 								bool sw_cg)
385f1ea278dSRex Zhu {
386f1ea278dSRex Zhu 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
387f1ea278dSRex Zhu 		if (sw_cg)
388f1ea278dSRex Zhu 			vce_v2_0_set_sw_cg(adev, true);
389f1ea278dSRex Zhu 		else
390f1ea278dSRex Zhu 			vce_v2_0_set_dyn_cg(adev, true);
391f1ea278dSRex Zhu 	} else {
392f1ea278dSRex Zhu 		vce_v2_0_disable_cg(adev);
393f1ea278dSRex Zhu 
394f1ea278dSRex Zhu 		if (sw_cg)
395f1ea278dSRex Zhu 			vce_v2_0_set_sw_cg(adev, false);
396f1ea278dSRex Zhu 		else
397f1ea278dSRex Zhu 			vce_v2_0_set_dyn_cg(adev, false);
398f1ea278dSRex Zhu 	}
399f1ea278dSRex Zhu }
400f1ea278dSRex Zhu 
vce_v2_0_early_init(void * handle)4015fc3aeebSyanyang1 static int vce_v2_0_early_init(void *handle)
402a2e73f56SAlex Deucher {
4035fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4045fc3aeebSyanyang1 
40575c65480SAlex Deucher 	adev->vce.num_rings = 2;
40675c65480SAlex Deucher 
407a2e73f56SAlex Deucher 	vce_v2_0_set_ring_funcs(adev);
408a2e73f56SAlex Deucher 	vce_v2_0_set_irq_funcs(adev);
409a2e73f56SAlex Deucher 
410a2e73f56SAlex Deucher 	return 0;
411a2e73f56SAlex Deucher }
412a2e73f56SAlex Deucher 
vce_v2_0_sw_init(void * handle)4135fc3aeebSyanyang1 static int vce_v2_0_sw_init(void *handle)
414a2e73f56SAlex Deucher {
415a2e73f56SAlex Deucher 	struct amdgpu_ring *ring;
41675c65480SAlex Deucher 	int r, i;
4175fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
418a2e73f56SAlex Deucher 
419a2e73f56SAlex Deucher 	/* VCE */
4201ffdeca6SChristian König 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
421a2e73f56SAlex Deucher 	if (r)
422a2e73f56SAlex Deucher 		return r;
423a2e73f56SAlex Deucher 
424e9822622SLeo Liu 	r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
425e9822622SLeo Liu 		VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
426a2e73f56SAlex Deucher 	if (r)
427a2e73f56SAlex Deucher 		return r;
428a2e73f56SAlex Deucher 
429a2e73f56SAlex Deucher 	r = amdgpu_vce_resume(adev);
430a2e73f56SAlex Deucher 	if (r)
431a2e73f56SAlex Deucher 		return r;
432a2e73f56SAlex Deucher 
43375c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++) {
434080e613cSSatyajit Sahu 		enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
435080e613cSSatyajit Sahu 
43675c65480SAlex Deucher 		ring = &adev->vce.ring[i];
43775c65480SAlex Deucher 		sprintf(ring->name, "vce%d", i);
438c107171bSChristian König 		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
439080e613cSSatyajit Sahu 				     hw_prio, NULL);
440a2e73f56SAlex Deucher 		if (r)
441a2e73f56SAlex Deucher 			return r;
44275c65480SAlex Deucher 	}
443a2e73f56SAlex Deucher 
44420acbed4SEmily Deng 	r = amdgpu_vce_entity_init(adev);
44520acbed4SEmily Deng 
446a2e73f56SAlex Deucher 	return r;
447a2e73f56SAlex Deucher }
448a2e73f56SAlex Deucher 
vce_v2_0_sw_fini(void * handle)4495fc3aeebSyanyang1 static int vce_v2_0_sw_fini(void *handle)
450a2e73f56SAlex Deucher {
451a2e73f56SAlex Deucher 	int r;
4525fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
453a2e73f56SAlex Deucher 
454a2e73f56SAlex Deucher 	r = amdgpu_vce_suspend(adev);
455a2e73f56SAlex Deucher 	if (r)
456a2e73f56SAlex Deucher 		return r;
457a2e73f56SAlex Deucher 
45850237287SRex Zhu 	return amdgpu_vce_sw_fini(adev);
459a2e73f56SAlex Deucher }
460a2e73f56SAlex Deucher 
vce_v2_0_hw_init(void * handle)4615fc3aeebSyanyang1 static int vce_v2_0_hw_init(void *handle)
462a2e73f56SAlex Deucher {
46375c65480SAlex Deucher 	int r, i;
4645fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
465a2e73f56SAlex Deucher 
466beeea981SRex Zhu 	amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
467beeea981SRex Zhu 	vce_v2_0_enable_mgcg(adev, true, false);
468a2e73f56SAlex Deucher 
46975c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++) {
470c66ed765SAndrey Grodzovsky 		r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
47175c65480SAlex Deucher 		if (r)
472a2e73f56SAlex Deucher 			return r;
473a2e73f56SAlex Deucher 	}
474a2e73f56SAlex Deucher 
475a2e73f56SAlex Deucher 	DRM_INFO("VCE initialized successfully.\n");
476a2e73f56SAlex Deucher 
477a2e73f56SAlex Deucher 	return 0;
478a2e73f56SAlex Deucher }
479a2e73f56SAlex Deucher 
vce_v2_0_hw_fini(void * handle)4805fc3aeebSyanyang1 static int vce_v2_0_hw_fini(void *handle)
481a2e73f56SAlex Deucher {
482859e4659SEvan Quan 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
483859e4659SEvan Quan 
484*d82e2c24SAndrey Grodzovsky 	cancel_delayed_work_sync(&adev->vce.idle_work);
485*d82e2c24SAndrey Grodzovsky 
486*d82e2c24SAndrey Grodzovsky 	return 0;
487*d82e2c24SAndrey Grodzovsky }
488*d82e2c24SAndrey Grodzovsky 
vce_v2_0_suspend(void * handle)489*d82e2c24SAndrey Grodzovsky static int vce_v2_0_suspend(void *handle)
490*d82e2c24SAndrey Grodzovsky {
491*d82e2c24SAndrey Grodzovsky 	int r;
492*d82e2c24SAndrey Grodzovsky 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
493*d82e2c24SAndrey Grodzovsky 
494*d82e2c24SAndrey Grodzovsky 
495859e4659SEvan Quan 	/*
496859e4659SEvan Quan 	 * Proper cleanups before halting the HW engine:
497859e4659SEvan Quan 	 *   - cancel the delayed idle work
498859e4659SEvan Quan 	 *   - enable powergating
499859e4659SEvan Quan 	 *   - enable clockgating
500859e4659SEvan Quan 	 *   - disable dpm
501859e4659SEvan Quan 	 *
502859e4659SEvan Quan 	 * TODO: to align with the VCN implementation, move the
503859e4659SEvan Quan 	 * jobs for clockgating/powergating/dpm setting to
504859e4659SEvan Quan 	 * ->set_powergating_state().
505859e4659SEvan Quan 	 */
506859e4659SEvan Quan 	cancel_delayed_work_sync(&adev->vce.idle_work);
507859e4659SEvan Quan 
508859e4659SEvan Quan 	if (adev->pm.dpm_enabled) {
509859e4659SEvan Quan 		amdgpu_dpm_enable_vce(adev, false);
510859e4659SEvan Quan 	} else {
511859e4659SEvan Quan 		amdgpu_asic_set_vce_clocks(adev, 0, 0);
512859e4659SEvan Quan 		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
513859e4659SEvan Quan 						       AMD_PG_STATE_GATE);
514859e4659SEvan Quan 		amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
515859e4659SEvan Quan 						       AMD_CG_STATE_GATE);
516859e4659SEvan Quan 	}
517859e4659SEvan Quan 
518a2e73f56SAlex Deucher 	r = vce_v2_0_hw_fini(adev);
519a2e73f56SAlex Deucher 	if (r)
520a2e73f56SAlex Deucher 		return r;
521a2e73f56SAlex Deucher 
52250237287SRex Zhu 	return amdgpu_vce_suspend(adev);
523a2e73f56SAlex Deucher }
524a2e73f56SAlex Deucher 
vce_v2_0_resume(void * handle)5255fc3aeebSyanyang1 static int vce_v2_0_resume(void *handle)
526a2e73f56SAlex Deucher {
527a2e73f56SAlex Deucher 	int r;
5285fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
529a2e73f56SAlex Deucher 
530a2e73f56SAlex Deucher 	r = amdgpu_vce_resume(adev);
531a2e73f56SAlex Deucher 	if (r)
532a2e73f56SAlex Deucher 		return r;
533a2e73f56SAlex Deucher 
53450237287SRex Zhu 	return vce_v2_0_hw_init(adev);
535a2e73f56SAlex Deucher }
536a2e73f56SAlex Deucher 
vce_v2_0_soft_reset(void * handle)5375fc3aeebSyanyang1 static int vce_v2_0_soft_reset(void *handle)
538a2e73f56SAlex Deucher {
5395fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5405fc3aeebSyanyang1 
54121d3cbbeSTom St Denis 	WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
542a2e73f56SAlex Deucher 	mdelay(5);
543a2e73f56SAlex Deucher 
544a2e73f56SAlex Deucher 	return vce_v2_0_start(adev);
545a2e73f56SAlex Deucher }
546a2e73f56SAlex Deucher 
vce_v2_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)547a2e73f56SAlex Deucher static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
548a2e73f56SAlex Deucher 					struct amdgpu_irq_src *source,
549a2e73f56SAlex Deucher 					unsigned type,
550a2e73f56SAlex Deucher 					enum amdgpu_interrupt_state state)
551a2e73f56SAlex Deucher {
552a2e73f56SAlex Deucher 	uint32_t val = 0;
553a2e73f56SAlex Deucher 
554a2e73f56SAlex Deucher 	if (state == AMDGPU_IRQ_STATE_ENABLE)
555a2e73f56SAlex Deucher 		val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
556a2e73f56SAlex Deucher 
557a2e73f56SAlex Deucher 	WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
558a2e73f56SAlex Deucher 	return 0;
559a2e73f56SAlex Deucher }
560a2e73f56SAlex Deucher 
vce_v2_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)561a2e73f56SAlex Deucher static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
562a2e73f56SAlex Deucher 				      struct amdgpu_irq_src *source,
563a2e73f56SAlex Deucher 				      struct amdgpu_iv_entry *entry)
564a2e73f56SAlex Deucher {
565a2e73f56SAlex Deucher 	DRM_DEBUG("IH: VCE\n");
5667ccf5aa8SAlex Deucher 	switch (entry->src_data[0]) {
567a2e73f56SAlex Deucher 	case 0:
568a2e73f56SAlex Deucher 	case 1:
5697ccf5aa8SAlex Deucher 		amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
570a2e73f56SAlex Deucher 		break;
571a2e73f56SAlex Deucher 	default:
572a2e73f56SAlex Deucher 		DRM_ERROR("Unhandled interrupt: %d %d\n",
5737ccf5aa8SAlex Deucher 			  entry->src_id, entry->src_data[0]);
574a2e73f56SAlex Deucher 		break;
575a2e73f56SAlex Deucher 	}
576a2e73f56SAlex Deucher 
577a2e73f56SAlex Deucher 	return 0;
578a2e73f56SAlex Deucher }
579a2e73f56SAlex Deucher 
vce_v2_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)5805fc3aeebSyanyang1 static int vce_v2_0_set_clockgating_state(void *handle,
5815fc3aeebSyanyang1 					  enum amd_clockgating_state state)
582a2e73f56SAlex Deucher {
583a2e73f56SAlex Deucher 	bool gate = false;
584beeea981SRex Zhu 	bool sw_cg = false;
585beeea981SRex Zhu 
5865fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
587cbb2fe8eSRex Zhu 
588beeea981SRex Zhu 	if (state == AMD_CG_STATE_GATE) {
589a2e73f56SAlex Deucher 		gate = true;
590beeea981SRex Zhu 		sw_cg = true;
591beeea981SRex Zhu 	}
592a2e73f56SAlex Deucher 
593beeea981SRex Zhu 	vce_v2_0_enable_mgcg(adev, gate, sw_cg);
594a2e73f56SAlex Deucher 
595a2e73f56SAlex Deucher 	return 0;
596a2e73f56SAlex Deucher }
597a2e73f56SAlex Deucher 
vce_v2_0_set_powergating_state(void * handle,enum amd_powergating_state state)5985fc3aeebSyanyang1 static int vce_v2_0_set_powergating_state(void *handle,
5995fc3aeebSyanyang1 					  enum amd_powergating_state state)
600a2e73f56SAlex Deucher {
601a2e73f56SAlex Deucher 	/* This doesn't actually powergate the VCE block.
602a2e73f56SAlex Deucher 	 * That's done in the dpm code via the SMC.  This
603a2e73f56SAlex Deucher 	 * just re-inits the block as necessary.  The actual
604a2e73f56SAlex Deucher 	 * gating still happens in the dpm code.  We should
605a2e73f56SAlex Deucher 	 * revisit this when there is a cleaner line between
606a2e73f56SAlex Deucher 	 * the smc and the hw blocks
607a2e73f56SAlex Deucher 	 */
6085fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6095fc3aeebSyanyang1 
6105fc3aeebSyanyang1 	if (state == AMD_PG_STATE_GATE)
611beeea981SRex Zhu 		return vce_v2_0_stop(adev);
612a2e73f56SAlex Deucher 	else
613a2e73f56SAlex Deucher 		return vce_v2_0_start(adev);
614a2e73f56SAlex Deucher }
615a2e73f56SAlex Deucher 
616a1255107SAlex Deucher static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
61788a907d6STom St Denis 	.name = "vce_v2_0",
618a2e73f56SAlex Deucher 	.early_init = vce_v2_0_early_init,
619a2e73f56SAlex Deucher 	.late_init = NULL,
620a2e73f56SAlex Deucher 	.sw_init = vce_v2_0_sw_init,
621a2e73f56SAlex Deucher 	.sw_fini = vce_v2_0_sw_fini,
622a2e73f56SAlex Deucher 	.hw_init = vce_v2_0_hw_init,
623a2e73f56SAlex Deucher 	.hw_fini = vce_v2_0_hw_fini,
624a2e73f56SAlex Deucher 	.suspend = vce_v2_0_suspend,
625a2e73f56SAlex Deucher 	.resume = vce_v2_0_resume,
626a2e73f56SAlex Deucher 	.is_idle = vce_v2_0_is_idle,
627a2e73f56SAlex Deucher 	.wait_for_idle = vce_v2_0_wait_for_idle,
628a2e73f56SAlex Deucher 	.soft_reset = vce_v2_0_soft_reset,
629a2e73f56SAlex Deucher 	.set_clockgating_state = vce_v2_0_set_clockgating_state,
630a2e73f56SAlex Deucher 	.set_powergating_state = vce_v2_0_set_powergating_state,
631a2e73f56SAlex Deucher };
632a2e73f56SAlex Deucher 
633a2e73f56SAlex Deucher static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
63421cd942eSChristian König 	.type = AMDGPU_RING_TYPE_VCE,
63579887142SChristian König 	.align_mask = 0xf,
63679887142SChristian König 	.nop = VCE_CMD_NO_OP,
637536fbf94SKen Wang 	.support_64bit_ptrs = false,
638f61334b5SLeo Liu 	.no_user_fence = true,
639a2e73f56SAlex Deucher 	.get_rptr = vce_v2_0_ring_get_rptr,
640a2e73f56SAlex Deucher 	.get_wptr = vce_v2_0_ring_get_wptr,
641a2e73f56SAlex Deucher 	.set_wptr = vce_v2_0_ring_set_wptr,
642a2e73f56SAlex Deucher 	.parse_cs = amdgpu_vce_ring_parse_cs,
643e12f3d7aSChristian König 	.emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence  x1 no user fence */
644e12f3d7aSChristian König 	.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
645a2e73f56SAlex Deucher 	.emit_ib = amdgpu_vce_ring_emit_ib,
646a2e73f56SAlex Deucher 	.emit_fence = amdgpu_vce_ring_emit_fence,
647a2e73f56SAlex Deucher 	.test_ring = amdgpu_vce_ring_test_ring,
648a2e73f56SAlex Deucher 	.test_ib = amdgpu_vce_ring_test_ib,
649edff0e28SJammy Zhou 	.insert_nop = amdgpu_ring_insert_nop,
6509e5d5309SChristian König 	.pad_ib = amdgpu_ring_generic_pad_ib,
651ebff485eSChristian König 	.begin_use = amdgpu_vce_ring_begin_use,
652ebff485eSChristian König 	.end_use = amdgpu_vce_ring_end_use,
653a2e73f56SAlex Deucher };
654a2e73f56SAlex Deucher 
vce_v2_0_set_ring_funcs(struct amdgpu_device * adev)655a2e73f56SAlex Deucher static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
656a2e73f56SAlex Deucher {
65775c65480SAlex Deucher 	int i;
65875c65480SAlex Deucher 
6595d4af988SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++) {
66075c65480SAlex Deucher 		adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
6615d4af988SAlex Deucher 		adev->vce.ring[i].me = i;
6625d4af988SAlex Deucher 	}
663a2e73f56SAlex Deucher }
664a2e73f56SAlex Deucher 
665a2e73f56SAlex Deucher static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
666a2e73f56SAlex Deucher 	.set = vce_v2_0_set_interrupt_state,
667a2e73f56SAlex Deucher 	.process = vce_v2_0_process_interrupt,
668a2e73f56SAlex Deucher };
669a2e73f56SAlex Deucher 
vce_v2_0_set_irq_funcs(struct amdgpu_device * adev)670a2e73f56SAlex Deucher static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
671a2e73f56SAlex Deucher {
672a2e73f56SAlex Deucher 	adev->vce.irq.num_types = 1;
673a2e73f56SAlex Deucher 	adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
674a2e73f56SAlex Deucher };
675a1255107SAlex Deucher 
676a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v2_0_ip_block =
677a1255107SAlex Deucher {
678a1255107SAlex Deucher 		.type = AMD_IP_BLOCK_TYPE_VCE,
679a1255107SAlex Deucher 		.major = 2,
680a1255107SAlex Deucher 		.minor = 0,
681a1255107SAlex Deucher 		.rev = 0,
682a1255107SAlex Deucher 		.funcs = &vce_v2_0_ip_funcs,
683a1255107SAlex Deucher };
684