xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c (revision d50e5c24)
1aaa36a97SAlex Deucher /*
2aaa36a97SAlex Deucher  * Copyright 2014 Advanced Micro Devices, Inc.
3aaa36a97SAlex Deucher  * All Rights Reserved.
4aaa36a97SAlex Deucher  *
5aaa36a97SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
6aaa36a97SAlex Deucher  * copy of this software and associated documentation files (the
7aaa36a97SAlex Deucher  * "Software"), to deal in the Software without restriction, including
8aaa36a97SAlex Deucher  * without limitation the rights to use, copy, modify, merge, publish,
9aaa36a97SAlex Deucher  * distribute, sub license, and/or sell copies of the Software, and to
10aaa36a97SAlex Deucher  * permit persons to whom the Software is furnished to do so, subject to
11aaa36a97SAlex Deucher  * the following conditions:
12aaa36a97SAlex Deucher  *
13aaa36a97SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14aaa36a97SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15aaa36a97SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16aaa36a97SAlex Deucher  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17aaa36a97SAlex Deucher  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18aaa36a97SAlex Deucher  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19aaa36a97SAlex Deucher  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20aaa36a97SAlex Deucher  *
21aaa36a97SAlex Deucher  * The above copyright notice and this permission notice (including the
22aaa36a97SAlex Deucher  * next paragraph) shall be included in all copies or substantial portions
23aaa36a97SAlex Deucher  * of the Software.
24aaa36a97SAlex Deucher  *
25aaa36a97SAlex Deucher  * Authors: Christian König <christian.koenig@amd.com>
26aaa36a97SAlex Deucher  */
27aaa36a97SAlex Deucher 
28aaa36a97SAlex Deucher #include <linux/firmware.h>
29aaa36a97SAlex Deucher #include <drm/drmP.h>
30aaa36a97SAlex Deucher #include "amdgpu.h"
31aaa36a97SAlex Deucher #include "amdgpu_vce.h"
32aaa36a97SAlex Deucher #include "vid.h"
33aaa36a97SAlex Deucher #include "vce/vce_3_0_d.h"
34aaa36a97SAlex Deucher #include "vce/vce_3_0_sh_mask.h"
35be4f38e2SAlex Deucher #include "oss/oss_3_0_d.h"
36be4f38e2SAlex Deucher #include "oss/oss_3_0_sh_mask.h"
375bbc553aSLeo Liu #include "gca/gfx_8_0_d.h"
386a585777SAlex Deucher #include "smu/smu_7_1_2_d.h"
396a585777SAlex Deucher #include "smu/smu_7_1_2_sh_mask.h"
40115933a5SChunming Zhou #include "gca/gfx_8_0_d.h"
41115933a5SChunming Zhou #include "gca/gfx_8_0_sh_mask.h"
42115933a5SChunming Zhou 
435bbc553aSLeo Liu 
445bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT	0x04
455bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK	0x10
4650a1ebc7SRex Zhu #define GRBM_GFX_INDEX__VCE_ALL_PIPE		0x07
4750a1ebc7SRex Zhu 
483c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0	0x8616
493c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1	0x8617
503c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2	0x8618
5150a1ebc7SRex Zhu #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
5250a1ebc7SRex Zhu 
53567e6e29Sjimqu #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
54aaa36a97SAlex Deucher 
55e9822622SLeo Liu #define VCE_V3_0_FW_SIZE	(384 * 1024)
56e9822622SLeo Liu #define VCE_V3_0_STACK_SIZE	(64 * 1024)
57e9822622SLeo Liu #define VCE_V3_0_DATA_SIZE	((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
58e9822622SLeo Liu 
59ef6239e0SAlex Deucher #define FW_52_8_3	((52 << 24) | (8 << 16) | (3 << 8))
60ef6239e0SAlex Deucher 
6150a1ebc7SRex Zhu #define GET_VCE_INSTANCE(i)  ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
6250a1ebc7SRex Zhu 					| GRBM_GFX_INDEX__VCE_ALL_PIPE)
6350a1ebc7SRex Zhu 
645bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
65aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
66aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
67567e6e29Sjimqu static int vce_v3_0_wait_for_idle(void *handle);
68aaa36a97SAlex Deucher 
69aaa36a97SAlex Deucher /**
70aaa36a97SAlex Deucher  * vce_v3_0_ring_get_rptr - get read pointer
71aaa36a97SAlex Deucher  *
72aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
73aaa36a97SAlex Deucher  *
74aaa36a97SAlex Deucher  * Returns the current hardware read pointer
75aaa36a97SAlex Deucher  */
76aaa36a97SAlex Deucher static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
77aaa36a97SAlex Deucher {
78aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
79aaa36a97SAlex Deucher 
80aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
81aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_RPTR);
826f0359ffSAlex Deucher 	else if (ring == &adev->vce.ring[1])
83aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_RPTR2);
846f0359ffSAlex Deucher 	else
856f0359ffSAlex Deucher 		return RREG32(mmVCE_RB_RPTR3);
86aaa36a97SAlex Deucher }
87aaa36a97SAlex Deucher 
88aaa36a97SAlex Deucher /**
89aaa36a97SAlex Deucher  * vce_v3_0_ring_get_wptr - get write pointer
90aaa36a97SAlex Deucher  *
91aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
92aaa36a97SAlex Deucher  *
93aaa36a97SAlex Deucher  * Returns the current hardware write pointer
94aaa36a97SAlex Deucher  */
95aaa36a97SAlex Deucher static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
96aaa36a97SAlex Deucher {
97aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
98aaa36a97SAlex Deucher 
99aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
100aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_WPTR);
1016f0359ffSAlex Deucher 	else if (ring == &adev->vce.ring[1])
102aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_WPTR2);
1036f0359ffSAlex Deucher 	else
1046f0359ffSAlex Deucher 		return RREG32(mmVCE_RB_WPTR3);
105aaa36a97SAlex Deucher }
106aaa36a97SAlex Deucher 
107aaa36a97SAlex Deucher /**
108aaa36a97SAlex Deucher  * vce_v3_0_ring_set_wptr - set write pointer
109aaa36a97SAlex Deucher  *
110aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
111aaa36a97SAlex Deucher  *
112aaa36a97SAlex Deucher  * Commits the write pointer to the hardware
113aaa36a97SAlex Deucher  */
114aaa36a97SAlex Deucher static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
115aaa36a97SAlex Deucher {
116aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
117aaa36a97SAlex Deucher 
118aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
119aaa36a97SAlex Deucher 		WREG32(mmVCE_RB_WPTR, ring->wptr);
1206f0359ffSAlex Deucher 	else if (ring == &adev->vce.ring[1])
121aaa36a97SAlex Deucher 		WREG32(mmVCE_RB_WPTR2, ring->wptr);
1226f0359ffSAlex Deucher 	else
1236f0359ffSAlex Deucher 		WREG32(mmVCE_RB_WPTR3, ring->wptr);
124aaa36a97SAlex Deucher }
125aaa36a97SAlex Deucher 
1260689a570SEric Huang static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
1270689a570SEric Huang {
128f3f0ea95STom St Denis 	WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
1290689a570SEric Huang }
1300689a570SEric Huang 
1310689a570SEric Huang static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
1320689a570SEric Huang 					     bool gated)
1330689a570SEric Huang {
134f3f0ea95STom St Denis 	u32 data;
135f16fe6d3STom St Denis 
1360689a570SEric Huang 	/* Set Override to disable Clock Gating */
1370689a570SEric Huang 	vce_v3_0_override_vce_clock_gating(adev, true);
1380689a570SEric Huang 
1396f906814STom St Denis 	/* This function enables MGCG which is controlled by firmware.
1406f906814STom St Denis 	   With the clocks in the gated state the core is still
1416f906814STom St Denis 	   accessible but the firmware will throttle the clocks on the
1426f906814STom St Denis 	   fly as necessary.
1430689a570SEric Huang 	*/
144ecc2cf7cSMaruthi Srinivas Bayyavarapu 	if (!gated) {
145f3f0ea95STom St Denis 		data = RREG32(mmVCE_CLOCK_GATING_B);
1460689a570SEric Huang 		data |= 0x1ff;
1470689a570SEric Huang 		data &= ~0xef0000;
1480689a570SEric Huang 		WREG32(mmVCE_CLOCK_GATING_B, data);
1490689a570SEric Huang 
150f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING);
1510689a570SEric Huang 		data |= 0x3ff000;
1520689a570SEric Huang 		data &= ~0xffc00000;
1530689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING, data);
1540689a570SEric Huang 
155f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
1560689a570SEric Huang 		data |= 0x2;
1576f906814STom St Denis 		data &= ~0x00010000;
1580689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
1590689a570SEric Huang 
160f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
1610689a570SEric Huang 		data |= 0x37f;
1620689a570SEric Huang 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
1630689a570SEric Huang 
164f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
1650689a570SEric Huang 		data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
1660689a570SEric Huang 			VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
1670689a570SEric Huang 			VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
1680689a570SEric Huang 			0x8;
1690689a570SEric Huang 		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
1700689a570SEric Huang 	} else {
171f3f0ea95STom St Denis 		data = RREG32(mmVCE_CLOCK_GATING_B);
1720689a570SEric Huang 		data &= ~0x80010;
1730689a570SEric Huang 		data |= 0xe70008;
1740689a570SEric Huang 		WREG32(mmVCE_CLOCK_GATING_B, data);
1756f906814STom St Denis 
176f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING);
1770689a570SEric Huang 		data |= 0xffc00000;
1780689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING, data);
1796f906814STom St Denis 
180f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
1810689a570SEric Huang 		data |= 0x10000;
1820689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
1836f906814STom St Denis 
184f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
185e05208deSRex Zhu 		data &= ~0x3ff;
1860689a570SEric Huang 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
1876f906814STom St Denis 
188f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
1890689a570SEric Huang 		data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
1900689a570SEric Huang 			  VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
1910689a570SEric Huang 			  VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
1920689a570SEric Huang 			  0x8);
1930689a570SEric Huang 		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
1940689a570SEric Huang 	}
1950689a570SEric Huang 	vce_v3_0_override_vce_clock_gating(adev, false);
1960689a570SEric Huang }
1970689a570SEric Huang 
198567e6e29Sjimqu static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
199567e6e29Sjimqu {
200567e6e29Sjimqu 	int i, j;
201567e6e29Sjimqu 
202567e6e29Sjimqu 	for (i = 0; i < 10; ++i) {
203567e6e29Sjimqu 		for (j = 0; j < 100; ++j) {
204b7e2e9f7Sjimqu 			uint32_t status = RREG32(mmVCE_STATUS);
205b7e2e9f7Sjimqu 
206567e6e29Sjimqu 			if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
207567e6e29Sjimqu 				return 0;
208567e6e29Sjimqu 			mdelay(10);
209567e6e29Sjimqu 		}
210567e6e29Sjimqu 
211567e6e29Sjimqu 		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
212f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
213567e6e29Sjimqu 		mdelay(10);
214f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
215567e6e29Sjimqu 		mdelay(10);
216567e6e29Sjimqu 	}
217567e6e29Sjimqu 
218567e6e29Sjimqu 	return -ETIMEDOUT;
219567e6e29Sjimqu }
220567e6e29Sjimqu 
221aaa36a97SAlex Deucher /**
222aaa36a97SAlex Deucher  * vce_v3_0_start - start VCE block
223aaa36a97SAlex Deucher  *
224aaa36a97SAlex Deucher  * @adev: amdgpu_device pointer
225aaa36a97SAlex Deucher  *
226aaa36a97SAlex Deucher  * Setup and start the VCE block
227aaa36a97SAlex Deucher  */
228aaa36a97SAlex Deucher static int vce_v3_0_start(struct amdgpu_device *adev)
229aaa36a97SAlex Deucher {
230aaa36a97SAlex Deucher 	struct amdgpu_ring *ring;
231567e6e29Sjimqu 	int idx, r;
232567e6e29Sjimqu 
233567e6e29Sjimqu 	ring = &adev->vce.ring[0];
234567e6e29Sjimqu 	WREG32(mmVCE_RB_RPTR, ring->wptr);
235567e6e29Sjimqu 	WREG32(mmVCE_RB_WPTR, ring->wptr);
236567e6e29Sjimqu 	WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
237567e6e29Sjimqu 	WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
238567e6e29Sjimqu 	WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
239567e6e29Sjimqu 
240567e6e29Sjimqu 	ring = &adev->vce.ring[1];
241567e6e29Sjimqu 	WREG32(mmVCE_RB_RPTR2, ring->wptr);
242567e6e29Sjimqu 	WREG32(mmVCE_RB_WPTR2, ring->wptr);
243567e6e29Sjimqu 	WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
244567e6e29Sjimqu 	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
245567e6e29Sjimqu 	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
246aaa36a97SAlex Deucher 
2476f0359ffSAlex Deucher 	ring = &adev->vce.ring[2];
2486f0359ffSAlex Deucher 	WREG32(mmVCE_RB_RPTR3, ring->wptr);
2496f0359ffSAlex Deucher 	WREG32(mmVCE_RB_WPTR3, ring->wptr);
2506f0359ffSAlex Deucher 	WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
2516f0359ffSAlex Deucher 	WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
2526f0359ffSAlex Deucher 	WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
2536f0359ffSAlex Deucher 
2545bbc553aSLeo Liu 	mutex_lock(&adev->grbm_idx_mutex);
2555bbc553aSLeo Liu 	for (idx = 0; idx < 2; ++idx) {
2566a585777SAlex Deucher 		if (adev->vce.harvest_config & (1 << idx))
2576a585777SAlex Deucher 			continue;
2586a585777SAlex Deucher 
25950a1ebc7SRex Zhu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
2605bbc553aSLeo Liu 		vce_v3_0_mc_resume(adev, idx);
261f3f0ea95STom St Denis 		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
262567e6e29Sjimqu 
2633c0ff9f1SLeo Liu 		if (adev->asic_type >= CHIP_STONEY)
2643c0ff9f1SLeo Liu 			WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
2653c0ff9f1SLeo Liu 		else
266f3f0ea95STom St Denis 			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
267aaa36a97SAlex Deucher 
268f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
269aaa36a97SAlex Deucher 		mdelay(100);
270aaa36a97SAlex Deucher 
271567e6e29Sjimqu 		r = vce_v3_0_firmware_loaded(adev);
272aaa36a97SAlex Deucher 
273aaa36a97SAlex Deucher 		/* clear BUSY flag */
274f3f0ea95STom St Denis 		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
275aaa36a97SAlex Deucher 
276aaa36a97SAlex Deucher 		if (r) {
277aaa36a97SAlex Deucher 			DRM_ERROR("VCE not responding, giving up!!!\n");
2785bbc553aSLeo Liu 			mutex_unlock(&adev->grbm_idx_mutex);
279aaa36a97SAlex Deucher 			return r;
280aaa36a97SAlex Deucher 		}
2815bbc553aSLeo Liu 	}
2825bbc553aSLeo Liu 
28350a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
2845bbc553aSLeo Liu 	mutex_unlock(&adev->grbm_idx_mutex);
2855bbc553aSLeo Liu 
286567e6e29Sjimqu 	return 0;
287567e6e29Sjimqu }
2885bbc553aSLeo Liu 
289567e6e29Sjimqu static int vce_v3_0_stop(struct amdgpu_device *adev)
290567e6e29Sjimqu {
291567e6e29Sjimqu 	int idx;
292567e6e29Sjimqu 
293567e6e29Sjimqu 	mutex_lock(&adev->grbm_idx_mutex);
294567e6e29Sjimqu 	for (idx = 0; idx < 2; ++idx) {
295567e6e29Sjimqu 		if (adev->vce.harvest_config & (1 << idx))
296567e6e29Sjimqu 			continue;
297567e6e29Sjimqu 
29850a1ebc7SRex Zhu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
299567e6e29Sjimqu 
300567e6e29Sjimqu 		if (adev->asic_type >= CHIP_STONEY)
301567e6e29Sjimqu 			WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
302567e6e29Sjimqu 		else
303f3f0ea95STom St Denis 			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
304f3f0ea95STom St Denis 
305567e6e29Sjimqu 		/* hold on ECPU */
306f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
307567e6e29Sjimqu 
308567e6e29Sjimqu 		/* clear BUSY flag */
309f3f0ea95STom St Denis 		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
310567e6e29Sjimqu 
311567e6e29Sjimqu 		/* Set Clock-Gating off */
312567e6e29Sjimqu 		if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
313567e6e29Sjimqu 			vce_v3_0_set_vce_sw_clock_gating(adev, false);
314567e6e29Sjimqu 	}
315567e6e29Sjimqu 
31650a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
317567e6e29Sjimqu 	mutex_unlock(&adev->grbm_idx_mutex);
318aaa36a97SAlex Deucher 
319aaa36a97SAlex Deucher 	return 0;
320aaa36a97SAlex Deucher }
321aaa36a97SAlex Deucher 
3226a585777SAlex Deucher #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS     0xC0014074
3236a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__SHIFT       27
3246a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__MASK        0x18000000
3256a585777SAlex Deucher 
3266a585777SAlex Deucher static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
3276a585777SAlex Deucher {
3286a585777SAlex Deucher 	u32 tmp;
3296a585777SAlex Deucher 
330c4642a47SJunwei Zhang 	/* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
331cfaba566SSamuel Li 	if ((adev->asic_type == CHIP_FIJI) ||
3321b4eeea5SSonny Jiang 	    (adev->asic_type == CHIP_STONEY) ||
3332cc0c0b5SFlora Cui 	    (adev->asic_type == CHIP_POLARIS10) ||
334c4642a47SJunwei Zhang 	    (adev->asic_type == CHIP_POLARIS11) ||
335c4642a47SJunwei Zhang 	    (adev->asic_type == CHIP_POLARIS12))
3361dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE1;
337188a9bcdSAlex Deucher 
338188a9bcdSAlex Deucher 	/* Tonga and CZ are dual or single pipe */
3392f7d10b3SJammy Zhou 	if (adev->flags & AMD_IS_APU)
3406a585777SAlex Deucher 		tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
3416a585777SAlex Deucher 		       VCE_HARVEST_FUSE_MACRO__MASK) >>
3426a585777SAlex Deucher 			VCE_HARVEST_FUSE_MACRO__SHIFT;
3436a585777SAlex Deucher 	else
3446a585777SAlex Deucher 		tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
3456a585777SAlex Deucher 		       CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
3466a585777SAlex Deucher 			CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
3476a585777SAlex Deucher 
3486a585777SAlex Deucher 	switch (tmp) {
3496a585777SAlex Deucher 	case 1:
3501dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE0;
3516a585777SAlex Deucher 	case 2:
3521dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE1;
3536a585777SAlex Deucher 	case 3:
3541dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
3556a585777SAlex Deucher 	default:
3561dab5f06STom St Denis 		return 0;
3576a585777SAlex Deucher 	}
3586a585777SAlex Deucher }
3596a585777SAlex Deucher 
3605fc3aeebSyanyang1 static int vce_v3_0_early_init(void *handle)
361aaa36a97SAlex Deucher {
3625fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3635fc3aeebSyanyang1 
3646a585777SAlex Deucher 	adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
3656a585777SAlex Deucher 
3666a585777SAlex Deucher 	if ((adev->vce.harvest_config &
3676a585777SAlex Deucher 	     (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
3686a585777SAlex Deucher 	    (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
3696a585777SAlex Deucher 		return -ENOENT;
3706a585777SAlex Deucher 
3716f0359ffSAlex Deucher 	adev->vce.num_rings = 3;
37275c65480SAlex Deucher 
373aaa36a97SAlex Deucher 	vce_v3_0_set_ring_funcs(adev);
374aaa36a97SAlex Deucher 	vce_v3_0_set_irq_funcs(adev);
375aaa36a97SAlex Deucher 
376aaa36a97SAlex Deucher 	return 0;
377aaa36a97SAlex Deucher }
378aaa36a97SAlex Deucher 
3795fc3aeebSyanyang1 static int vce_v3_0_sw_init(void *handle)
380aaa36a97SAlex Deucher {
3815fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
382aaa36a97SAlex Deucher 	struct amdgpu_ring *ring;
38375c65480SAlex Deucher 	int r, i;
384aaa36a97SAlex Deucher 
385aaa36a97SAlex Deucher 	/* VCE */
386aaa36a97SAlex Deucher 	r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
387aaa36a97SAlex Deucher 	if (r)
388aaa36a97SAlex Deucher 		return r;
389aaa36a97SAlex Deucher 
390e9822622SLeo Liu 	r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
391e9822622SLeo Liu 		(VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
392aaa36a97SAlex Deucher 	if (r)
393aaa36a97SAlex Deucher 		return r;
394aaa36a97SAlex Deucher 
395ef6239e0SAlex Deucher 	/* 52.8.3 required for 3 ring support */
396ef6239e0SAlex Deucher 	if (adev->vce.fw_version < FW_52_8_3)
397ef6239e0SAlex Deucher 		adev->vce.num_rings = 2;
398ef6239e0SAlex Deucher 
399aaa36a97SAlex Deucher 	r = amdgpu_vce_resume(adev);
400aaa36a97SAlex Deucher 	if (r)
401aaa36a97SAlex Deucher 		return r;
402aaa36a97SAlex Deucher 
40375c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++) {
40475c65480SAlex Deucher 		ring = &adev->vce.ring[i];
40575c65480SAlex Deucher 		sprintf(ring->name, "vce%d", i);
40679887142SChristian König 		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
407aaa36a97SAlex Deucher 		if (r)
408aaa36a97SAlex Deucher 			return r;
40975c65480SAlex Deucher 	}
410aaa36a97SAlex Deucher 
411aaa36a97SAlex Deucher 	return r;
412aaa36a97SAlex Deucher }
413aaa36a97SAlex Deucher 
4145fc3aeebSyanyang1 static int vce_v3_0_sw_fini(void *handle)
415aaa36a97SAlex Deucher {
416aaa36a97SAlex Deucher 	int r;
4175fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
418aaa36a97SAlex Deucher 
419aaa36a97SAlex Deucher 	r = amdgpu_vce_suspend(adev);
420aaa36a97SAlex Deucher 	if (r)
421aaa36a97SAlex Deucher 		return r;
422aaa36a97SAlex Deucher 
423aaa36a97SAlex Deucher 	r = amdgpu_vce_sw_fini(adev);
424aaa36a97SAlex Deucher 	if (r)
425aaa36a97SAlex Deucher 		return r;
426aaa36a97SAlex Deucher 
427aaa36a97SAlex Deucher 	return r;
428aaa36a97SAlex Deucher }
429aaa36a97SAlex Deucher 
4305fc3aeebSyanyang1 static int vce_v3_0_hw_init(void *handle)
431aaa36a97SAlex Deucher {
432691ca86aSTom St Denis 	int r, i;
4335fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
434aaa36a97SAlex Deucher 
4356fc11b0eSRex Zhu 	vce_v3_0_override_vce_clock_gating(adev, true);
4366fc11b0eSRex Zhu 	if (!(adev->flags & AMD_IS_APU))
4376fc11b0eSRex Zhu 		amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
438aaa36a97SAlex Deucher 
43975c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++)
44075c65480SAlex Deucher 		adev->vce.ring[i].ready = false;
441aaa36a97SAlex Deucher 
44275c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++) {
443691ca86aSTom St Denis 		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
444691ca86aSTom St Denis 		if (r)
445aaa36a97SAlex Deucher 			return r;
446691ca86aSTom St Denis 		else
447691ca86aSTom St Denis 			adev->vce.ring[i].ready = true;
448aaa36a97SAlex Deucher 	}
449aaa36a97SAlex Deucher 
450aaa36a97SAlex Deucher 	DRM_INFO("VCE initialized successfully.\n");
451aaa36a97SAlex Deucher 
452aaa36a97SAlex Deucher 	return 0;
453aaa36a97SAlex Deucher }
454aaa36a97SAlex Deucher 
4555fc3aeebSyanyang1 static int vce_v3_0_hw_fini(void *handle)
456aaa36a97SAlex Deucher {
457567e6e29Sjimqu 	int r;
458567e6e29Sjimqu 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
459567e6e29Sjimqu 
460567e6e29Sjimqu 	r = vce_v3_0_wait_for_idle(handle);
461567e6e29Sjimqu 	if (r)
462567e6e29Sjimqu 		return r;
463567e6e29Sjimqu 
464567e6e29Sjimqu 	return vce_v3_0_stop(adev);
465aaa36a97SAlex Deucher }
466aaa36a97SAlex Deucher 
4675fc3aeebSyanyang1 static int vce_v3_0_suspend(void *handle)
468aaa36a97SAlex Deucher {
469aaa36a97SAlex Deucher 	int r;
4705fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
471aaa36a97SAlex Deucher 
472aaa36a97SAlex Deucher 	r = vce_v3_0_hw_fini(adev);
473aaa36a97SAlex Deucher 	if (r)
474aaa36a97SAlex Deucher 		return r;
475aaa36a97SAlex Deucher 
476aaa36a97SAlex Deucher 	r = amdgpu_vce_suspend(adev);
477aaa36a97SAlex Deucher 	if (r)
478aaa36a97SAlex Deucher 		return r;
479aaa36a97SAlex Deucher 
480aaa36a97SAlex Deucher 	return r;
481aaa36a97SAlex Deucher }
482aaa36a97SAlex Deucher 
4835fc3aeebSyanyang1 static int vce_v3_0_resume(void *handle)
484aaa36a97SAlex Deucher {
485aaa36a97SAlex Deucher 	int r;
4865fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
487aaa36a97SAlex Deucher 
488aaa36a97SAlex Deucher 	r = amdgpu_vce_resume(adev);
489aaa36a97SAlex Deucher 	if (r)
490aaa36a97SAlex Deucher 		return r;
491aaa36a97SAlex Deucher 
492aaa36a97SAlex Deucher 	r = vce_v3_0_hw_init(adev);
493aaa36a97SAlex Deucher 	if (r)
494aaa36a97SAlex Deucher 		return r;
495aaa36a97SAlex Deucher 
496aaa36a97SAlex Deucher 	return r;
497aaa36a97SAlex Deucher }
498aaa36a97SAlex Deucher 
4995bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
500aaa36a97SAlex Deucher {
501aaa36a97SAlex Deucher 	uint32_t offset, size;
502aaa36a97SAlex Deucher 
503aaa36a97SAlex Deucher 	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
504aaa36a97SAlex Deucher 	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
505aaa36a97SAlex Deucher 	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
5066f906814STom St Denis 	WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
507aaa36a97SAlex Deucher 
508aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_CTRL, 0x00398000);
509aaa36a97SAlex Deucher 	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
510aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
511aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
512aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_VM_CTRL, 0);
513d50e5c24SAlan Harrison 	WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000);
514d50e5c24SAlan Harrison 
5153c0ff9f1SLeo Liu 	if (adev->asic_type >= CHIP_STONEY) {
5163c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
5173c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
5183c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
5193c0ff9f1SLeo Liu 	} else
520aaa36a97SAlex Deucher 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
521aaa36a97SAlex Deucher 	offset = AMDGPU_VCE_FIRMWARE_OFFSET;
522e9822622SLeo Liu 	size = VCE_V3_0_FW_SIZE;
523aaa36a97SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
524aaa36a97SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
525aaa36a97SAlex Deucher 
5265bbc553aSLeo Liu 	if (idx == 0) {
527aaa36a97SAlex Deucher 		offset += size;
528e9822622SLeo Liu 		size = VCE_V3_0_STACK_SIZE;
529aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
530aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
531aaa36a97SAlex Deucher 		offset += size;
532e9822622SLeo Liu 		size = VCE_V3_0_DATA_SIZE;
533aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
534aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
5355bbc553aSLeo Liu 	} else {
5365bbc553aSLeo Liu 		offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
5375bbc553aSLeo Liu 		size = VCE_V3_0_STACK_SIZE;
5385bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
5395bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
5405bbc553aSLeo Liu 		offset += size;
5415bbc553aSLeo Liu 		size = VCE_V3_0_DATA_SIZE;
5425bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
5435bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
5445bbc553aSLeo Liu 	}
545aaa36a97SAlex Deucher 
546aaa36a97SAlex Deucher 	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
547f3f0ea95STom St Denis 	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
548aaa36a97SAlex Deucher }
549aaa36a97SAlex Deucher 
5505fc3aeebSyanyang1 static bool vce_v3_0_is_idle(void *handle)
551aaa36a97SAlex Deucher {
5525fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
553be4f38e2SAlex Deucher 	u32 mask = 0;
5545fc3aeebSyanyang1 
55574af1276STom St Denis 	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
55674af1276STom St Denis 	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
557be4f38e2SAlex Deucher 
558be4f38e2SAlex Deucher 	return !(RREG32(mmSRBM_STATUS2) & mask);
559aaa36a97SAlex Deucher }
560aaa36a97SAlex Deucher 
5615fc3aeebSyanyang1 static int vce_v3_0_wait_for_idle(void *handle)
562aaa36a97SAlex Deucher {
563aaa36a97SAlex Deucher 	unsigned i;
5645fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
565be4f38e2SAlex Deucher 
56692988e60STom St Denis 	for (i = 0; i < adev->usec_timeout; i++)
56792988e60STom St Denis 		if (vce_v3_0_is_idle(handle))
568aaa36a97SAlex Deucher 			return 0;
56992988e60STom St Denis 
570aaa36a97SAlex Deucher 	return -ETIMEDOUT;
571aaa36a97SAlex Deucher }
572aaa36a97SAlex Deucher 
573ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK  0x00000008L   /* AUTO_BUSY */
574ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK   0x00000010L   /* RB0_BUSY */
575ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK   0x00000020L   /* RB1_BUSY */
576ac8e3f30SRex Zhu #define  AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
577ac8e3f30SRex Zhu 				      VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
578115933a5SChunming Zhou 
579da146d3bSAlex Deucher static bool vce_v3_0_check_soft_reset(void *handle)
580115933a5SChunming Zhou {
581115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
582115933a5SChunming Zhou 	u32 srbm_soft_reset = 0;
583115933a5SChunming Zhou 
584115933a5SChunming Zhou 	/* According to VCE team , we should use VCE_STATUS instead
585115933a5SChunming Zhou 	 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
586115933a5SChunming Zhou 	 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
587115933a5SChunming Zhou 	 * instance's registers are accessed
588115933a5SChunming Zhou 	 * (0 for 1st instance, 10 for 2nd instance).
589115933a5SChunming Zhou 	 *
590115933a5SChunming Zhou 	 *VCE_STATUS
591115933a5SChunming Zhou 	 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 |          |FW_LOADED|JOB |
592115933a5SChunming Zhou 	 *|----+----+-----------+----+----+----+----------+---------+----|
593115933a5SChunming Zhou 	 *|bit8|bit7|    bit6   |bit5|bit4|bit3|   bit2   |  bit1   |bit0|
594115933a5SChunming Zhou 	 *
595115933a5SChunming Zhou 	 * VCE team suggest use bit 3--bit 6 for busy status check
596115933a5SChunming Zhou 	 */
5979aeb774cSTom St Denis 	mutex_lock(&adev->grbm_idx_mutex);
59850a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
599115933a5SChunming Zhou 	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
600115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
601115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
602115933a5SChunming Zhou 	}
60350a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
604115933a5SChunming Zhou 	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
605115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
606115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
607115933a5SChunming Zhou 	}
60850a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
609da146d3bSAlex Deucher 	mutex_unlock(&adev->grbm_idx_mutex);
610115933a5SChunming Zhou 
611115933a5SChunming Zhou 	if (srbm_soft_reset) {
612115933a5SChunming Zhou 		adev->vce.srbm_soft_reset = srbm_soft_reset;
613da146d3bSAlex Deucher 		return true;
614115933a5SChunming Zhou 	} else {
615115933a5SChunming Zhou 		adev->vce.srbm_soft_reset = 0;
616da146d3bSAlex Deucher 		return false;
617115933a5SChunming Zhou 	}
618115933a5SChunming Zhou }
619115933a5SChunming Zhou 
6205fc3aeebSyanyang1 static int vce_v3_0_soft_reset(void *handle)
621aaa36a97SAlex Deucher {
6225fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
623115933a5SChunming Zhou 	u32 srbm_soft_reset;
6245fc3aeebSyanyang1 
625da146d3bSAlex Deucher 	if (!adev->vce.srbm_soft_reset)
626115933a5SChunming Zhou 		return 0;
627115933a5SChunming Zhou 	srbm_soft_reset = adev->vce.srbm_soft_reset;
628be4f38e2SAlex Deucher 
629115933a5SChunming Zhou 	if (srbm_soft_reset) {
630115933a5SChunming Zhou 		u32 tmp;
631115933a5SChunming Zhou 
632115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
633115933a5SChunming Zhou 		tmp |= srbm_soft_reset;
634115933a5SChunming Zhou 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
635115933a5SChunming Zhou 		WREG32(mmSRBM_SOFT_RESET, tmp);
636115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
637115933a5SChunming Zhou 
638115933a5SChunming Zhou 		udelay(50);
639115933a5SChunming Zhou 
640115933a5SChunming Zhou 		tmp &= ~srbm_soft_reset;
641115933a5SChunming Zhou 		WREG32(mmSRBM_SOFT_RESET, tmp);
642115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
643115933a5SChunming Zhou 
644115933a5SChunming Zhou 		/* Wait a little for things to settle down */
645115933a5SChunming Zhou 		udelay(50);
646115933a5SChunming Zhou 	}
647115933a5SChunming Zhou 
648115933a5SChunming Zhou 	return 0;
649115933a5SChunming Zhou }
650115933a5SChunming Zhou 
651115933a5SChunming Zhou static int vce_v3_0_pre_soft_reset(void *handle)
652115933a5SChunming Zhou {
653115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
654115933a5SChunming Zhou 
655da146d3bSAlex Deucher 	if (!adev->vce.srbm_soft_reset)
656115933a5SChunming Zhou 		return 0;
657115933a5SChunming Zhou 
658aaa36a97SAlex Deucher 	mdelay(5);
659aaa36a97SAlex Deucher 
660115933a5SChunming Zhou 	return vce_v3_0_suspend(adev);
661115933a5SChunming Zhou }
662115933a5SChunming Zhou 
663115933a5SChunming Zhou 
664115933a5SChunming Zhou static int vce_v3_0_post_soft_reset(void *handle)
665115933a5SChunming Zhou {
666115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
667115933a5SChunming Zhou 
668da146d3bSAlex Deucher 	if (!adev->vce.srbm_soft_reset)
669115933a5SChunming Zhou 		return 0;
670115933a5SChunming Zhou 
671115933a5SChunming Zhou 	mdelay(5);
672115933a5SChunming Zhou 
673115933a5SChunming Zhou 	return vce_v3_0_resume(adev);
674aaa36a97SAlex Deucher }
675aaa36a97SAlex Deucher 
676aaa36a97SAlex Deucher static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
677aaa36a97SAlex Deucher 					struct amdgpu_irq_src *source,
678aaa36a97SAlex Deucher 					unsigned type,
679aaa36a97SAlex Deucher 					enum amdgpu_interrupt_state state)
680aaa36a97SAlex Deucher {
681aaa36a97SAlex Deucher 	uint32_t val = 0;
682aaa36a97SAlex Deucher 
683aaa36a97SAlex Deucher 	if (state == AMDGPU_IRQ_STATE_ENABLE)
684aaa36a97SAlex Deucher 		val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
685aaa36a97SAlex Deucher 
686aaa36a97SAlex Deucher 	WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
687aaa36a97SAlex Deucher 	return 0;
688aaa36a97SAlex Deucher }
689aaa36a97SAlex Deucher 
690aaa36a97SAlex Deucher static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
691aaa36a97SAlex Deucher 				      struct amdgpu_irq_src *source,
692aaa36a97SAlex Deucher 				      struct amdgpu_iv_entry *entry)
693aaa36a97SAlex Deucher {
694aaa36a97SAlex Deucher 	DRM_DEBUG("IH: VCE\n");
695d6c29c30SLeo Liu 
696f3f0ea95STom St Denis 	WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
697d6c29c30SLeo Liu 
698aaa36a97SAlex Deucher 	switch (entry->src_data) {
699aaa36a97SAlex Deucher 	case 0:
700aaa36a97SAlex Deucher 	case 1:
7016f0359ffSAlex Deucher 	case 2:
70281da2edeSTom St Denis 		amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
703aaa36a97SAlex Deucher 		break;
704aaa36a97SAlex Deucher 	default:
705aaa36a97SAlex Deucher 		DRM_ERROR("Unhandled interrupt: %d %d\n",
706aaa36a97SAlex Deucher 			  entry->src_id, entry->src_data);
707aaa36a97SAlex Deucher 		break;
708aaa36a97SAlex Deucher 	}
709aaa36a97SAlex Deucher 
710aaa36a97SAlex Deucher 	return 0;
711aaa36a97SAlex Deucher }
712aaa36a97SAlex Deucher 
7135fc3aeebSyanyang1 static int vce_v3_0_set_clockgating_state(void *handle,
7145fc3aeebSyanyang1 					  enum amd_clockgating_state state)
715aaa36a97SAlex Deucher {
7160689a570SEric Huang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7170689a570SEric Huang 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
7180689a570SEric Huang 	int i;
7190689a570SEric Huang 
720e3b04bc7SAlex Deucher 	if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
7210689a570SEric Huang 		return 0;
7220689a570SEric Huang 
7230689a570SEric Huang 	mutex_lock(&adev->grbm_idx_mutex);
7240689a570SEric Huang 	for (i = 0; i < 2; i++) {
7250689a570SEric Huang 		/* Program VCE Instance 0 or 1 if not harvested */
7260689a570SEric Huang 		if (adev->vce.harvest_config & (1 << i))
7270689a570SEric Huang 			continue;
7280689a570SEric Huang 
72950a1ebc7SRex Zhu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
7300689a570SEric Huang 
7310689a570SEric Huang 		if (enable) {
7320689a570SEric Huang 			/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
7330689a570SEric Huang 			uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
7340689a570SEric Huang 			data &= ~(0xf | 0xff0);
7350689a570SEric Huang 			data |= ((0x0 << 0) | (0x04 << 4));
7360689a570SEric Huang 			WREG32(mmVCE_CLOCK_GATING_A, data);
7370689a570SEric Huang 
7380689a570SEric Huang 			/* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
7390689a570SEric Huang 			data = RREG32(mmVCE_UENC_CLOCK_GATING);
7400689a570SEric Huang 			data &= ~(0xf | 0xff0);
7410689a570SEric Huang 			data |= ((0x0 << 0) | (0x04 << 4));
7420689a570SEric Huang 			WREG32(mmVCE_UENC_CLOCK_GATING, data);
7430689a570SEric Huang 		}
7440689a570SEric Huang 
7450689a570SEric Huang 		vce_v3_0_set_vce_sw_clock_gating(adev, enable);
7460689a570SEric Huang 	}
7470689a570SEric Huang 
74850a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
7490689a570SEric Huang 	mutex_unlock(&adev->grbm_idx_mutex);
7500689a570SEric Huang 
751aaa36a97SAlex Deucher 	return 0;
752aaa36a97SAlex Deucher }
753aaa36a97SAlex Deucher 
7545fc3aeebSyanyang1 static int vce_v3_0_set_powergating_state(void *handle,
7555fc3aeebSyanyang1 					  enum amd_powergating_state state)
756aaa36a97SAlex Deucher {
757aaa36a97SAlex Deucher 	/* This doesn't actually powergate the VCE block.
758aaa36a97SAlex Deucher 	 * That's done in the dpm code via the SMC.  This
759aaa36a97SAlex Deucher 	 * just re-inits the block as necessary.  The actual
760aaa36a97SAlex Deucher 	 * gating still happens in the dpm code.  We should
761aaa36a97SAlex Deucher 	 * revisit this when there is a cleaner line between
762aaa36a97SAlex Deucher 	 * the smc and the hw blocks
763aaa36a97SAlex Deucher 	 */
7645fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
765c79b5561SHuang Rui 	int ret = 0;
7665fc3aeebSyanyang1 
767c79b5561SHuang Rui 	if (state == AMD_PG_STATE_GATE) {
7686fc11b0eSRex Zhu 		ret = vce_v3_0_stop(adev);
7696fc11b0eSRex Zhu 		if (ret)
7706fc11b0eSRex Zhu 			goto out;
771c79b5561SHuang Rui 		adev->vce.is_powergated = true;
772c79b5561SHuang Rui 	} else {
773c79b5561SHuang Rui 		ret = vce_v3_0_start(adev);
774c79b5561SHuang Rui 		if (ret)
775c79b5561SHuang Rui 			goto out;
776c79b5561SHuang Rui 		adev->vce.is_powergated = false;
777c79b5561SHuang Rui 	}
778c79b5561SHuang Rui 
779c79b5561SHuang Rui out:
780c79b5561SHuang Rui 	return ret;
781c79b5561SHuang Rui }
782c79b5561SHuang Rui 
783c79b5561SHuang Rui static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
784c79b5561SHuang Rui {
785c79b5561SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
786c79b5561SHuang Rui 	int data;
787c79b5561SHuang Rui 
788c79b5561SHuang Rui 	mutex_lock(&adev->pm.mutex);
789c79b5561SHuang Rui 
790c79b5561SHuang Rui 	if (adev->vce.is_powergated) {
791c79b5561SHuang Rui 		DRM_INFO("Cannot get clockgating state when VCE is powergated.\n");
792c79b5561SHuang Rui 		goto out;
793c79b5561SHuang Rui 	}
794c79b5561SHuang Rui 
795c79b5561SHuang Rui 	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
796c79b5561SHuang Rui 
797c79b5561SHuang Rui 	/* AMD_CG_SUPPORT_VCE_MGCG */
798c79b5561SHuang Rui 	data = RREG32(mmVCE_CLOCK_GATING_A);
799c79b5561SHuang Rui 	if (data & (0x04 << 4))
800c79b5561SHuang Rui 		*flags |= AMD_CG_SUPPORT_VCE_MGCG;
801c79b5561SHuang Rui 
802c79b5561SHuang Rui out:
803c79b5561SHuang Rui 	mutex_unlock(&adev->pm.mutex);
804aaa36a97SAlex Deucher }
805aaa36a97SAlex Deucher 
806ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
807ea4a8c1dSMaruthi Srinivas Bayyavarapu 		struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
808ea4a8c1dSMaruthi Srinivas Bayyavarapu {
809ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_IB_VM);
810ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, vm_id);
811ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
812ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
813ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, ib->length_dw);
814ea4a8c1dSMaruthi Srinivas Bayyavarapu }
815ea4a8c1dSMaruthi Srinivas Bayyavarapu 
816ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
817ea4a8c1dSMaruthi Srinivas Bayyavarapu 			 unsigned int vm_id, uint64_t pd_addr)
818ea4a8c1dSMaruthi Srinivas Bayyavarapu {
819ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
820ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, vm_id);
821ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, pd_addr >> 12);
822ea4a8c1dSMaruthi Srinivas Bayyavarapu 
823ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
824ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, vm_id);
825ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_END);
826ea4a8c1dSMaruthi Srinivas Bayyavarapu }
827ea4a8c1dSMaruthi Srinivas Bayyavarapu 
828ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
829ea4a8c1dSMaruthi Srinivas Bayyavarapu {
830ea4a8c1dSMaruthi Srinivas Bayyavarapu 	uint32_t seq = ring->fence_drv.sync_seq;
831ea4a8c1dSMaruthi Srinivas Bayyavarapu 	uint64_t addr = ring->fence_drv.gpu_addr;
832ea4a8c1dSMaruthi Srinivas Bayyavarapu 
833ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
834ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, lower_32_bits(addr));
835ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, upper_32_bits(addr));
836ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, seq);
837ea4a8c1dSMaruthi Srinivas Bayyavarapu }
838ea4a8c1dSMaruthi Srinivas Bayyavarapu 
839a1255107SAlex Deucher static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
84088a907d6STom St Denis 	.name = "vce_v3_0",
841aaa36a97SAlex Deucher 	.early_init = vce_v3_0_early_init,
842aaa36a97SAlex Deucher 	.late_init = NULL,
843aaa36a97SAlex Deucher 	.sw_init = vce_v3_0_sw_init,
844aaa36a97SAlex Deucher 	.sw_fini = vce_v3_0_sw_fini,
845aaa36a97SAlex Deucher 	.hw_init = vce_v3_0_hw_init,
846aaa36a97SAlex Deucher 	.hw_fini = vce_v3_0_hw_fini,
847aaa36a97SAlex Deucher 	.suspend = vce_v3_0_suspend,
848aaa36a97SAlex Deucher 	.resume = vce_v3_0_resume,
849aaa36a97SAlex Deucher 	.is_idle = vce_v3_0_is_idle,
850aaa36a97SAlex Deucher 	.wait_for_idle = vce_v3_0_wait_for_idle,
851115933a5SChunming Zhou 	.check_soft_reset = vce_v3_0_check_soft_reset,
852115933a5SChunming Zhou 	.pre_soft_reset = vce_v3_0_pre_soft_reset,
853aaa36a97SAlex Deucher 	.soft_reset = vce_v3_0_soft_reset,
854115933a5SChunming Zhou 	.post_soft_reset = vce_v3_0_post_soft_reset,
855aaa36a97SAlex Deucher 	.set_clockgating_state = vce_v3_0_set_clockgating_state,
856aaa36a97SAlex Deucher 	.set_powergating_state = vce_v3_0_set_powergating_state,
857c79b5561SHuang Rui 	.get_clockgating_state = vce_v3_0_get_clockgating_state,
858aaa36a97SAlex Deucher };
859aaa36a97SAlex Deucher 
860ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
86121cd942eSChristian König 	.type = AMDGPU_RING_TYPE_VCE,
86279887142SChristian König 	.align_mask = 0xf,
86379887142SChristian König 	.nop = VCE_CMD_NO_OP,
864aaa36a97SAlex Deucher 	.get_rptr = vce_v3_0_ring_get_rptr,
865aaa36a97SAlex Deucher 	.get_wptr = vce_v3_0_ring_get_wptr,
866aaa36a97SAlex Deucher 	.set_wptr = vce_v3_0_ring_set_wptr,
867aaa36a97SAlex Deucher 	.parse_cs = amdgpu_vce_ring_parse_cs,
868e12f3d7aSChristian König 	.emit_frame_size =
869e12f3d7aSChristian König 		4 + /* vce_v3_0_emit_pipeline_sync */
870e12f3d7aSChristian König 		6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
871e12f3d7aSChristian König 	.emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
872aaa36a97SAlex Deucher 	.emit_ib = amdgpu_vce_ring_emit_ib,
873aaa36a97SAlex Deucher 	.emit_fence = amdgpu_vce_ring_emit_fence,
874aaa36a97SAlex Deucher 	.test_ring = amdgpu_vce_ring_test_ring,
875aaa36a97SAlex Deucher 	.test_ib = amdgpu_vce_ring_test_ib,
876edff0e28SJammy Zhou 	.insert_nop = amdgpu_ring_insert_nop,
8779e5d5309SChristian König 	.pad_ib = amdgpu_ring_generic_pad_ib,
878ebff485eSChristian König 	.begin_use = amdgpu_vce_ring_begin_use,
879ebff485eSChristian König 	.end_use = amdgpu_vce_ring_end_use,
880aaa36a97SAlex Deucher };
881aaa36a97SAlex Deucher 
882ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
88321cd942eSChristian König 	.type = AMDGPU_RING_TYPE_VCE,
88479887142SChristian König 	.align_mask = 0xf,
88579887142SChristian König 	.nop = VCE_CMD_NO_OP,
886ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.get_rptr = vce_v3_0_ring_get_rptr,
887ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.get_wptr = vce_v3_0_ring_get_wptr,
888ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.set_wptr = vce_v3_0_ring_set_wptr,
88998614701SChristian König 	.parse_cs = amdgpu_vce_ring_parse_cs_vm,
890e12f3d7aSChristian König 	.emit_frame_size =
891e12f3d7aSChristian König 		6 + /* vce_v3_0_emit_vm_flush */
892e12f3d7aSChristian König 		4 + /* vce_v3_0_emit_pipeline_sync */
893e12f3d7aSChristian König 		6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
894e12f3d7aSChristian König 	.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
895ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_ib = vce_v3_0_ring_emit_ib,
896ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_vm_flush = vce_v3_0_emit_vm_flush,
897ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
898ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_fence = amdgpu_vce_ring_emit_fence,
899ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.test_ring = amdgpu_vce_ring_test_ring,
900ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.test_ib = amdgpu_vce_ring_test_ib,
901ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.insert_nop = amdgpu_ring_insert_nop,
902ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.pad_ib = amdgpu_ring_generic_pad_ib,
903ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.begin_use = amdgpu_vce_ring_begin_use,
904ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.end_use = amdgpu_vce_ring_end_use,
905ea4a8c1dSMaruthi Srinivas Bayyavarapu };
906ea4a8c1dSMaruthi Srinivas Bayyavarapu 
907aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
908aaa36a97SAlex Deucher {
90975c65480SAlex Deucher 	int i;
91075c65480SAlex Deucher 
911ea4a8c1dSMaruthi Srinivas Bayyavarapu 	if (adev->asic_type >= CHIP_STONEY) {
91275c65480SAlex Deucher 		for (i = 0; i < adev->vce.num_rings; i++)
913ea4a8c1dSMaruthi Srinivas Bayyavarapu 			adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
914ea4a8c1dSMaruthi Srinivas Bayyavarapu 		DRM_INFO("VCE enabled in VM mode\n");
915ea4a8c1dSMaruthi Srinivas Bayyavarapu 	} else {
916ea4a8c1dSMaruthi Srinivas Bayyavarapu 		for (i = 0; i < adev->vce.num_rings; i++)
917ea4a8c1dSMaruthi Srinivas Bayyavarapu 			adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
918ea4a8c1dSMaruthi Srinivas Bayyavarapu 		DRM_INFO("VCE enabled in physical mode\n");
919ea4a8c1dSMaruthi Srinivas Bayyavarapu 	}
920aaa36a97SAlex Deucher }
921aaa36a97SAlex Deucher 
922aaa36a97SAlex Deucher static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
923aaa36a97SAlex Deucher 	.set = vce_v3_0_set_interrupt_state,
924aaa36a97SAlex Deucher 	.process = vce_v3_0_process_interrupt,
925aaa36a97SAlex Deucher };
926aaa36a97SAlex Deucher 
927aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
928aaa36a97SAlex Deucher {
929aaa36a97SAlex Deucher 	adev->vce.irq.num_types = 1;
930aaa36a97SAlex Deucher 	adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
931aaa36a97SAlex Deucher };
932a1255107SAlex Deucher 
933a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_0_ip_block =
934a1255107SAlex Deucher {
935a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_VCE,
936a1255107SAlex Deucher 	.major = 3,
937a1255107SAlex Deucher 	.minor = 0,
938a1255107SAlex Deucher 	.rev = 0,
939a1255107SAlex Deucher 	.funcs = &vce_v3_0_ip_funcs,
940a1255107SAlex Deucher };
941a1255107SAlex Deucher 
942a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_1_ip_block =
943a1255107SAlex Deucher {
944a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_VCE,
945a1255107SAlex Deucher 	.major = 3,
946a1255107SAlex Deucher 	.minor = 1,
947a1255107SAlex Deucher 	.rev = 0,
948a1255107SAlex Deucher 	.funcs = &vce_v3_0_ip_funcs,
949a1255107SAlex Deucher };
950a1255107SAlex Deucher 
951a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_4_ip_block =
952a1255107SAlex Deucher {
953a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_VCE,
954a1255107SAlex Deucher 	.major = 3,
955a1255107SAlex Deucher 	.minor = 4,
956a1255107SAlex Deucher 	.rev = 0,
957a1255107SAlex Deucher 	.funcs = &vce_v3_0_ip_funcs,
958a1255107SAlex Deucher };
959