xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c (revision 1c622002)
1aaa36a97SAlex Deucher /*
2aaa36a97SAlex Deucher  * Copyright 2014 Advanced Micro Devices, Inc.
3aaa36a97SAlex Deucher  * All Rights Reserved.
4aaa36a97SAlex Deucher  *
5aaa36a97SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
6aaa36a97SAlex Deucher  * copy of this software and associated documentation files (the
7aaa36a97SAlex Deucher  * "Software"), to deal in the Software without restriction, including
8aaa36a97SAlex Deucher  * without limitation the rights to use, copy, modify, merge, publish,
9aaa36a97SAlex Deucher  * distribute, sub license, and/or sell copies of the Software, and to
10aaa36a97SAlex Deucher  * permit persons to whom the Software is furnished to do so, subject to
11aaa36a97SAlex Deucher  * the following conditions:
12aaa36a97SAlex Deucher  *
13aaa36a97SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14aaa36a97SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15aaa36a97SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16aaa36a97SAlex Deucher  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17aaa36a97SAlex Deucher  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18aaa36a97SAlex Deucher  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19aaa36a97SAlex Deucher  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20aaa36a97SAlex Deucher  *
21aaa36a97SAlex Deucher  * The above copyright notice and this permission notice (including the
22aaa36a97SAlex Deucher  * next paragraph) shall be included in all copies or substantial portions
23aaa36a97SAlex Deucher  * of the Software.
24aaa36a97SAlex Deucher  *
25aaa36a97SAlex Deucher  * Authors: Christian König <christian.koenig@amd.com>
26aaa36a97SAlex Deucher  */
27aaa36a97SAlex Deucher 
28aaa36a97SAlex Deucher #include <linux/firmware.h>
29aaa36a97SAlex Deucher #include <drm/drmP.h>
30aaa36a97SAlex Deucher #include "amdgpu.h"
31aaa36a97SAlex Deucher #include "amdgpu_vce.h"
32aaa36a97SAlex Deucher #include "vid.h"
33aaa36a97SAlex Deucher #include "vce/vce_3_0_d.h"
34aaa36a97SAlex Deucher #include "vce/vce_3_0_sh_mask.h"
35be4f38e2SAlex Deucher #include "oss/oss_3_0_d.h"
36be4f38e2SAlex Deucher #include "oss/oss_3_0_sh_mask.h"
375bbc553aSLeo Liu #include "gca/gfx_8_0_d.h"
386a585777SAlex Deucher #include "smu/smu_7_1_2_d.h"
396a585777SAlex Deucher #include "smu/smu_7_1_2_sh_mask.h"
40115933a5SChunming Zhou #include "gca/gfx_8_0_d.h"
41115933a5SChunming Zhou #include "gca/gfx_8_0_sh_mask.h"
42115933a5SChunming Zhou 
435bbc553aSLeo Liu 
445bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT	0x04
455bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK	0x10
4650a1ebc7SRex Zhu #define GRBM_GFX_INDEX__VCE_ALL_PIPE		0x07
4750a1ebc7SRex Zhu 
483c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0	0x8616
493c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1	0x8617
503c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2	0x8618
5150a1ebc7SRex Zhu #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
5250a1ebc7SRex Zhu 
53567e6e29Sjimqu #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
54aaa36a97SAlex Deucher 
55e9822622SLeo Liu #define VCE_V3_0_FW_SIZE	(384 * 1024)
56e9822622SLeo Liu #define VCE_V3_0_STACK_SIZE	(64 * 1024)
57e9822622SLeo Liu #define VCE_V3_0_DATA_SIZE	((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
58e9822622SLeo Liu 
59ef6239e0SAlex Deucher #define FW_52_8_3	((52 << 24) | (8 << 16) | (3 << 8))
60ef6239e0SAlex Deucher 
6150a1ebc7SRex Zhu #define GET_VCE_INSTANCE(i)  ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
6250a1ebc7SRex Zhu 					| GRBM_GFX_INDEX__VCE_ALL_PIPE)
6350a1ebc7SRex Zhu 
645bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
65aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
66aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
67567e6e29Sjimqu static int vce_v3_0_wait_for_idle(void *handle);
6826679899SRex Zhu static int vce_v3_0_set_clockgating_state(void *handle,
6926679899SRex Zhu 					  enum amd_clockgating_state state);
70aaa36a97SAlex Deucher /**
71aaa36a97SAlex Deucher  * vce_v3_0_ring_get_rptr - get read pointer
72aaa36a97SAlex Deucher  *
73aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
74aaa36a97SAlex Deucher  *
75aaa36a97SAlex Deucher  * Returns the current hardware read pointer
76aaa36a97SAlex Deucher  */
77536fbf94SKen Wang static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
78aaa36a97SAlex Deucher {
79aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
80aaa36a97SAlex Deucher 
81aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
82aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_RPTR);
836f0359ffSAlex Deucher 	else if (ring == &adev->vce.ring[1])
84aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_RPTR2);
856f0359ffSAlex Deucher 	else
866f0359ffSAlex Deucher 		return RREG32(mmVCE_RB_RPTR3);
87aaa36a97SAlex Deucher }
88aaa36a97SAlex Deucher 
89aaa36a97SAlex Deucher /**
90aaa36a97SAlex Deucher  * vce_v3_0_ring_get_wptr - get write pointer
91aaa36a97SAlex Deucher  *
92aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
93aaa36a97SAlex Deucher  *
94aaa36a97SAlex Deucher  * Returns the current hardware write pointer
95aaa36a97SAlex Deucher  */
96536fbf94SKen Wang static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
97aaa36a97SAlex Deucher {
98aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
99aaa36a97SAlex Deucher 
100aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
101aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_WPTR);
1026f0359ffSAlex Deucher 	else if (ring == &adev->vce.ring[1])
103aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_WPTR2);
1046f0359ffSAlex Deucher 	else
1056f0359ffSAlex Deucher 		return RREG32(mmVCE_RB_WPTR3);
106aaa36a97SAlex Deucher }
107aaa36a97SAlex Deucher 
108aaa36a97SAlex Deucher /**
109aaa36a97SAlex Deucher  * vce_v3_0_ring_set_wptr - set write pointer
110aaa36a97SAlex Deucher  *
111aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
112aaa36a97SAlex Deucher  *
113aaa36a97SAlex Deucher  * Commits the write pointer to the hardware
114aaa36a97SAlex Deucher  */
115aaa36a97SAlex Deucher static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
116aaa36a97SAlex Deucher {
117aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
118aaa36a97SAlex Deucher 
119aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
120536fbf94SKen Wang 		WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
1216f0359ffSAlex Deucher 	else if (ring == &adev->vce.ring[1])
122536fbf94SKen Wang 		WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
1236f0359ffSAlex Deucher 	else
124536fbf94SKen Wang 		WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
125aaa36a97SAlex Deucher }
126aaa36a97SAlex Deucher 
1270689a570SEric Huang static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
1280689a570SEric Huang {
129f3f0ea95STom St Denis 	WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
1300689a570SEric Huang }
1310689a570SEric Huang 
1320689a570SEric Huang static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
1330689a570SEric Huang 					     bool gated)
1340689a570SEric Huang {
135f3f0ea95STom St Denis 	u32 data;
136f16fe6d3STom St Denis 
1370689a570SEric Huang 	/* Set Override to disable Clock Gating */
1380689a570SEric Huang 	vce_v3_0_override_vce_clock_gating(adev, true);
1390689a570SEric Huang 
1406f906814STom St Denis 	/* This function enables MGCG which is controlled by firmware.
1416f906814STom St Denis 	   With the clocks in the gated state the core is still
1426f906814STom St Denis 	   accessible but the firmware will throttle the clocks on the
1436f906814STom St Denis 	   fly as necessary.
1440689a570SEric Huang 	*/
145ecc2cf7cSMaruthi Srinivas Bayyavarapu 	if (!gated) {
146f3f0ea95STom St Denis 		data = RREG32(mmVCE_CLOCK_GATING_B);
1470689a570SEric Huang 		data |= 0x1ff;
1480689a570SEric Huang 		data &= ~0xef0000;
1490689a570SEric Huang 		WREG32(mmVCE_CLOCK_GATING_B, data);
1500689a570SEric Huang 
151f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING);
1520689a570SEric Huang 		data |= 0x3ff000;
1530689a570SEric Huang 		data &= ~0xffc00000;
1540689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING, data);
1550689a570SEric Huang 
156f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
1570689a570SEric Huang 		data |= 0x2;
1586f906814STom St Denis 		data &= ~0x00010000;
1590689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
1600689a570SEric Huang 
161f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
1620689a570SEric Huang 		data |= 0x37f;
1630689a570SEric Huang 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
1640689a570SEric Huang 
165f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
1660689a570SEric Huang 		data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
1670689a570SEric Huang 			VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
1680689a570SEric Huang 			VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
1690689a570SEric Huang 			0x8;
1700689a570SEric Huang 		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
1710689a570SEric Huang 	} else {
172f3f0ea95STom St Denis 		data = RREG32(mmVCE_CLOCK_GATING_B);
1730689a570SEric Huang 		data &= ~0x80010;
1740689a570SEric Huang 		data |= 0xe70008;
1750689a570SEric Huang 		WREG32(mmVCE_CLOCK_GATING_B, data);
1766f906814STom St Denis 
177f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING);
1780689a570SEric Huang 		data |= 0xffc00000;
1790689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING, data);
1806f906814STom St Denis 
181f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
1820689a570SEric Huang 		data |= 0x10000;
1830689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
1846f906814STom St Denis 
185f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
186e05208deSRex Zhu 		data &= ~0x3ff;
1870689a570SEric Huang 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
1886f906814STom St Denis 
189f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
1900689a570SEric Huang 		data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
1910689a570SEric Huang 			  VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
1920689a570SEric Huang 			  VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
1930689a570SEric Huang 			  0x8);
1940689a570SEric Huang 		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
1950689a570SEric Huang 	}
1960689a570SEric Huang 	vce_v3_0_override_vce_clock_gating(adev, false);
1970689a570SEric Huang }
1980689a570SEric Huang 
199567e6e29Sjimqu static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
200567e6e29Sjimqu {
201567e6e29Sjimqu 	int i, j;
202567e6e29Sjimqu 
203567e6e29Sjimqu 	for (i = 0; i < 10; ++i) {
204567e6e29Sjimqu 		for (j = 0; j < 100; ++j) {
205b7e2e9f7Sjimqu 			uint32_t status = RREG32(mmVCE_STATUS);
206b7e2e9f7Sjimqu 
207567e6e29Sjimqu 			if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
208567e6e29Sjimqu 				return 0;
209567e6e29Sjimqu 			mdelay(10);
210567e6e29Sjimqu 		}
211567e6e29Sjimqu 
212567e6e29Sjimqu 		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
213f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
214567e6e29Sjimqu 		mdelay(10);
215f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
216567e6e29Sjimqu 		mdelay(10);
217567e6e29Sjimqu 	}
218567e6e29Sjimqu 
219567e6e29Sjimqu 	return -ETIMEDOUT;
220567e6e29Sjimqu }
221567e6e29Sjimqu 
222aaa36a97SAlex Deucher /**
223aaa36a97SAlex Deucher  * vce_v3_0_start - start VCE block
224aaa36a97SAlex Deucher  *
225aaa36a97SAlex Deucher  * @adev: amdgpu_device pointer
226aaa36a97SAlex Deucher  *
227aaa36a97SAlex Deucher  * Setup and start the VCE block
228aaa36a97SAlex Deucher  */
229aaa36a97SAlex Deucher static int vce_v3_0_start(struct amdgpu_device *adev)
230aaa36a97SAlex Deucher {
231aaa36a97SAlex Deucher 	struct amdgpu_ring *ring;
232567e6e29Sjimqu 	int idx, r;
233567e6e29Sjimqu 
234567e6e29Sjimqu 	ring = &adev->vce.ring[0];
235536fbf94SKen Wang 	WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
236536fbf94SKen Wang 	WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
237567e6e29Sjimqu 	WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
238567e6e29Sjimqu 	WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
239567e6e29Sjimqu 	WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
240567e6e29Sjimqu 
241567e6e29Sjimqu 	ring = &adev->vce.ring[1];
242536fbf94SKen Wang 	WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
243536fbf94SKen Wang 	WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
244567e6e29Sjimqu 	WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
245567e6e29Sjimqu 	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
246567e6e29Sjimqu 	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
247aaa36a97SAlex Deucher 
2486f0359ffSAlex Deucher 	ring = &adev->vce.ring[2];
249536fbf94SKen Wang 	WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
250536fbf94SKen Wang 	WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
2516f0359ffSAlex Deucher 	WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
2526f0359ffSAlex Deucher 	WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
2536f0359ffSAlex Deucher 	WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
2546f0359ffSAlex Deucher 
2555bbc553aSLeo Liu 	mutex_lock(&adev->grbm_idx_mutex);
2565bbc553aSLeo Liu 	for (idx = 0; idx < 2; ++idx) {
2576a585777SAlex Deucher 		if (adev->vce.harvest_config & (1 << idx))
2586a585777SAlex Deucher 			continue;
2596a585777SAlex Deucher 
26050a1ebc7SRex Zhu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
2615bbc553aSLeo Liu 		vce_v3_0_mc_resume(adev, idx);
262f3f0ea95STom St Denis 		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
263567e6e29Sjimqu 
2643c0ff9f1SLeo Liu 		if (adev->asic_type >= CHIP_STONEY)
2653c0ff9f1SLeo Liu 			WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
2663c0ff9f1SLeo Liu 		else
267f3f0ea95STom St Denis 			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
268aaa36a97SAlex Deucher 
269f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
270aaa36a97SAlex Deucher 		mdelay(100);
271aaa36a97SAlex Deucher 
272567e6e29Sjimqu 		r = vce_v3_0_firmware_loaded(adev);
273aaa36a97SAlex Deucher 
274aaa36a97SAlex Deucher 		/* clear BUSY flag */
275f3f0ea95STom St Denis 		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
276aaa36a97SAlex Deucher 
277aaa36a97SAlex Deucher 		if (r) {
278aaa36a97SAlex Deucher 			DRM_ERROR("VCE not responding, giving up!!!\n");
2795bbc553aSLeo Liu 			mutex_unlock(&adev->grbm_idx_mutex);
280aaa36a97SAlex Deucher 			return r;
281aaa36a97SAlex Deucher 		}
2825bbc553aSLeo Liu 	}
2835bbc553aSLeo Liu 
28450a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
2855bbc553aSLeo Liu 	mutex_unlock(&adev->grbm_idx_mutex);
2865bbc553aSLeo Liu 
287567e6e29Sjimqu 	return 0;
288567e6e29Sjimqu }
2895bbc553aSLeo Liu 
290567e6e29Sjimqu static int vce_v3_0_stop(struct amdgpu_device *adev)
291567e6e29Sjimqu {
292567e6e29Sjimqu 	int idx;
293567e6e29Sjimqu 
294567e6e29Sjimqu 	mutex_lock(&adev->grbm_idx_mutex);
295567e6e29Sjimqu 	for (idx = 0; idx < 2; ++idx) {
296567e6e29Sjimqu 		if (adev->vce.harvest_config & (1 << idx))
297567e6e29Sjimqu 			continue;
298567e6e29Sjimqu 
29950a1ebc7SRex Zhu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
300567e6e29Sjimqu 
301567e6e29Sjimqu 		if (adev->asic_type >= CHIP_STONEY)
302567e6e29Sjimqu 			WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
303567e6e29Sjimqu 		else
304f3f0ea95STom St Denis 			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
305f3f0ea95STom St Denis 
306567e6e29Sjimqu 		/* hold on ECPU */
307f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
308567e6e29Sjimqu 
30926679899SRex Zhu 		/* clear VCE STATUS */
31026679899SRex Zhu 		WREG32(mmVCE_STATUS, 0);
311567e6e29Sjimqu 	}
312567e6e29Sjimqu 
31350a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
314567e6e29Sjimqu 	mutex_unlock(&adev->grbm_idx_mutex);
315aaa36a97SAlex Deucher 
316aaa36a97SAlex Deucher 	return 0;
317aaa36a97SAlex Deucher }
318aaa36a97SAlex Deucher 
3196a585777SAlex Deucher #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS     0xC0014074
3206a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__SHIFT       27
3216a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__MASK        0x18000000
3226a585777SAlex Deucher 
3236a585777SAlex Deucher static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
3246a585777SAlex Deucher {
3256a585777SAlex Deucher 	u32 tmp;
3266a585777SAlex Deucher 
327c4642a47SJunwei Zhang 	/* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
328cfaba566SSamuel Li 	if ((adev->asic_type == CHIP_FIJI) ||
3291b4eeea5SSonny Jiang 	    (adev->asic_type == CHIP_STONEY) ||
3302cc0c0b5SFlora Cui 	    (adev->asic_type == CHIP_POLARIS10) ||
331c4642a47SJunwei Zhang 	    (adev->asic_type == CHIP_POLARIS11) ||
332c4642a47SJunwei Zhang 	    (adev->asic_type == CHIP_POLARIS12))
3331dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE1;
334188a9bcdSAlex Deucher 
335188a9bcdSAlex Deucher 	/* Tonga and CZ are dual or single pipe */
3362f7d10b3SJammy Zhou 	if (adev->flags & AMD_IS_APU)
3376a585777SAlex Deucher 		tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
3386a585777SAlex Deucher 		       VCE_HARVEST_FUSE_MACRO__MASK) >>
3396a585777SAlex Deucher 			VCE_HARVEST_FUSE_MACRO__SHIFT;
3406a585777SAlex Deucher 	else
3416a585777SAlex Deucher 		tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
3426a585777SAlex Deucher 		       CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
3436a585777SAlex Deucher 			CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
3446a585777SAlex Deucher 
3456a585777SAlex Deucher 	switch (tmp) {
3466a585777SAlex Deucher 	case 1:
3471dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE0;
3486a585777SAlex Deucher 	case 2:
3491dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE1;
3506a585777SAlex Deucher 	case 3:
3511dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
3526a585777SAlex Deucher 	default:
3531dab5f06STom St Denis 		return 0;
3546a585777SAlex Deucher 	}
3556a585777SAlex Deucher }
3566a585777SAlex Deucher 
3575fc3aeebSyanyang1 static int vce_v3_0_early_init(void *handle)
358aaa36a97SAlex Deucher {
3595fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3605fc3aeebSyanyang1 
3616a585777SAlex Deucher 	adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
3626a585777SAlex Deucher 
3636a585777SAlex Deucher 	if ((adev->vce.harvest_config &
3646a585777SAlex Deucher 	     (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
3656a585777SAlex Deucher 	    (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
3666a585777SAlex Deucher 		return -ENOENT;
3676a585777SAlex Deucher 
3686f0359ffSAlex Deucher 	adev->vce.num_rings = 3;
36975c65480SAlex Deucher 
370aaa36a97SAlex Deucher 	vce_v3_0_set_ring_funcs(adev);
371aaa36a97SAlex Deucher 	vce_v3_0_set_irq_funcs(adev);
372aaa36a97SAlex Deucher 
373aaa36a97SAlex Deucher 	return 0;
374aaa36a97SAlex Deucher }
375aaa36a97SAlex Deucher 
3765fc3aeebSyanyang1 static int vce_v3_0_sw_init(void *handle)
377aaa36a97SAlex Deucher {
3785fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
379aaa36a97SAlex Deucher 	struct amdgpu_ring *ring;
38075c65480SAlex Deucher 	int r, i;
381aaa36a97SAlex Deucher 
382aaa36a97SAlex Deucher 	/* VCE */
383d766e6a3SAlex Deucher 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
384aaa36a97SAlex Deucher 	if (r)
385aaa36a97SAlex Deucher 		return r;
386aaa36a97SAlex Deucher 
387e9822622SLeo Liu 	r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
388e9822622SLeo Liu 		(VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
389aaa36a97SAlex Deucher 	if (r)
390aaa36a97SAlex Deucher 		return r;
391aaa36a97SAlex Deucher 
392ef6239e0SAlex Deucher 	/* 52.8.3 required for 3 ring support */
393ef6239e0SAlex Deucher 	if (adev->vce.fw_version < FW_52_8_3)
394ef6239e0SAlex Deucher 		adev->vce.num_rings = 2;
395ef6239e0SAlex Deucher 
396aaa36a97SAlex Deucher 	r = amdgpu_vce_resume(adev);
397aaa36a97SAlex Deucher 	if (r)
398aaa36a97SAlex Deucher 		return r;
399aaa36a97SAlex Deucher 
40075c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++) {
40175c65480SAlex Deucher 		ring = &adev->vce.ring[i];
40275c65480SAlex Deucher 		sprintf(ring->name, "vce%d", i);
40379887142SChristian König 		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
404aaa36a97SAlex Deucher 		if (r)
405aaa36a97SAlex Deucher 			return r;
40675c65480SAlex Deucher 	}
407aaa36a97SAlex Deucher 
408aaa36a97SAlex Deucher 	return r;
409aaa36a97SAlex Deucher }
410aaa36a97SAlex Deucher 
4115fc3aeebSyanyang1 static int vce_v3_0_sw_fini(void *handle)
412aaa36a97SAlex Deucher {
413aaa36a97SAlex Deucher 	int r;
4145fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
415aaa36a97SAlex Deucher 
416aaa36a97SAlex Deucher 	r = amdgpu_vce_suspend(adev);
417aaa36a97SAlex Deucher 	if (r)
418aaa36a97SAlex Deucher 		return r;
419aaa36a97SAlex Deucher 
420aaa36a97SAlex Deucher 	r = amdgpu_vce_sw_fini(adev);
421aaa36a97SAlex Deucher 	if (r)
422aaa36a97SAlex Deucher 		return r;
423aaa36a97SAlex Deucher 
424aaa36a97SAlex Deucher 	return r;
425aaa36a97SAlex Deucher }
426aaa36a97SAlex Deucher 
4275fc3aeebSyanyang1 static int vce_v3_0_hw_init(void *handle)
428aaa36a97SAlex Deucher {
429691ca86aSTom St Denis 	int r, i;
4305fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
431aaa36a97SAlex Deucher 
4326fc11b0eSRex Zhu 	vce_v3_0_override_vce_clock_gating(adev, true);
4336fc11b0eSRex Zhu 	if (!(adev->flags & AMD_IS_APU))
4346fc11b0eSRex Zhu 		amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
435aaa36a97SAlex Deucher 
43675c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++)
43775c65480SAlex Deucher 		adev->vce.ring[i].ready = false;
438aaa36a97SAlex Deucher 
43975c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++) {
440691ca86aSTom St Denis 		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
441691ca86aSTom St Denis 		if (r)
442aaa36a97SAlex Deucher 			return r;
443691ca86aSTom St Denis 		else
444691ca86aSTom St Denis 			adev->vce.ring[i].ready = true;
445aaa36a97SAlex Deucher 	}
446aaa36a97SAlex Deucher 
447aaa36a97SAlex Deucher 	DRM_INFO("VCE initialized successfully.\n");
448aaa36a97SAlex Deucher 
449aaa36a97SAlex Deucher 	return 0;
450aaa36a97SAlex Deucher }
451aaa36a97SAlex Deucher 
4525fc3aeebSyanyang1 static int vce_v3_0_hw_fini(void *handle)
453aaa36a97SAlex Deucher {
454567e6e29Sjimqu 	int r;
455567e6e29Sjimqu 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
456567e6e29Sjimqu 
457567e6e29Sjimqu 	r = vce_v3_0_wait_for_idle(handle);
458567e6e29Sjimqu 	if (r)
459567e6e29Sjimqu 		return r;
460567e6e29Sjimqu 
46126679899SRex Zhu 	vce_v3_0_stop(adev);
46226679899SRex Zhu 	return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
463aaa36a97SAlex Deucher }
464aaa36a97SAlex Deucher 
4655fc3aeebSyanyang1 static int vce_v3_0_suspend(void *handle)
466aaa36a97SAlex Deucher {
467aaa36a97SAlex Deucher 	int r;
4685fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
469aaa36a97SAlex Deucher 
470aaa36a97SAlex Deucher 	r = vce_v3_0_hw_fini(adev);
471aaa36a97SAlex Deucher 	if (r)
472aaa36a97SAlex Deucher 		return r;
473aaa36a97SAlex Deucher 
474aaa36a97SAlex Deucher 	r = amdgpu_vce_suspend(adev);
475aaa36a97SAlex Deucher 	if (r)
476aaa36a97SAlex Deucher 		return r;
477aaa36a97SAlex Deucher 
478aaa36a97SAlex Deucher 	return r;
479aaa36a97SAlex Deucher }
480aaa36a97SAlex Deucher 
4815fc3aeebSyanyang1 static int vce_v3_0_resume(void *handle)
482aaa36a97SAlex Deucher {
483aaa36a97SAlex Deucher 	int r;
4845fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
485aaa36a97SAlex Deucher 
486aaa36a97SAlex Deucher 	r = amdgpu_vce_resume(adev);
487aaa36a97SAlex Deucher 	if (r)
488aaa36a97SAlex Deucher 		return r;
489aaa36a97SAlex Deucher 
490aaa36a97SAlex Deucher 	r = vce_v3_0_hw_init(adev);
491aaa36a97SAlex Deucher 	if (r)
492aaa36a97SAlex Deucher 		return r;
493aaa36a97SAlex Deucher 
494aaa36a97SAlex Deucher 	return r;
495aaa36a97SAlex Deucher }
496aaa36a97SAlex Deucher 
4975bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
498aaa36a97SAlex Deucher {
499aaa36a97SAlex Deucher 	uint32_t offset, size;
500aaa36a97SAlex Deucher 
501aaa36a97SAlex Deucher 	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
502aaa36a97SAlex Deucher 	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
503aaa36a97SAlex Deucher 	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
5046f906814STom St Denis 	WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
505aaa36a97SAlex Deucher 
506aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_CTRL, 0x00398000);
507aaa36a97SAlex Deucher 	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
508aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
509aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
510aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_VM_CTRL, 0);
511d50e5c24SAlan Harrison 	WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000);
512d50e5c24SAlan Harrison 
5133c0ff9f1SLeo Liu 	if (adev->asic_type >= CHIP_STONEY) {
5143c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
5153c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
5163c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
5173c0ff9f1SLeo Liu 	} else
518aaa36a97SAlex Deucher 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
519aaa36a97SAlex Deucher 	offset = AMDGPU_VCE_FIRMWARE_OFFSET;
520e9822622SLeo Liu 	size = VCE_V3_0_FW_SIZE;
521aaa36a97SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
522aaa36a97SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
523aaa36a97SAlex Deucher 
5245bbc553aSLeo Liu 	if (idx == 0) {
525aaa36a97SAlex Deucher 		offset += size;
526e9822622SLeo Liu 		size = VCE_V3_0_STACK_SIZE;
527aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
528aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
529aaa36a97SAlex Deucher 		offset += size;
530e9822622SLeo Liu 		size = VCE_V3_0_DATA_SIZE;
531aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
532aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
5335bbc553aSLeo Liu 	} else {
5345bbc553aSLeo Liu 		offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
5355bbc553aSLeo Liu 		size = VCE_V3_0_STACK_SIZE;
5365bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
5375bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
5385bbc553aSLeo Liu 		offset += size;
5395bbc553aSLeo Liu 		size = VCE_V3_0_DATA_SIZE;
5405bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
5415bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
5425bbc553aSLeo Liu 	}
543aaa36a97SAlex Deucher 
544aaa36a97SAlex Deucher 	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
545f3f0ea95STom St Denis 	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
546aaa36a97SAlex Deucher }
547aaa36a97SAlex Deucher 
5485fc3aeebSyanyang1 static bool vce_v3_0_is_idle(void *handle)
549aaa36a97SAlex Deucher {
5505fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
551be4f38e2SAlex Deucher 	u32 mask = 0;
5525fc3aeebSyanyang1 
55374af1276STom St Denis 	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
55474af1276STom St Denis 	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
555be4f38e2SAlex Deucher 
556be4f38e2SAlex Deucher 	return !(RREG32(mmSRBM_STATUS2) & mask);
557aaa36a97SAlex Deucher }
558aaa36a97SAlex Deucher 
5595fc3aeebSyanyang1 static int vce_v3_0_wait_for_idle(void *handle)
560aaa36a97SAlex Deucher {
561aaa36a97SAlex Deucher 	unsigned i;
5625fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
563be4f38e2SAlex Deucher 
56492988e60STom St Denis 	for (i = 0; i < adev->usec_timeout; i++)
56592988e60STom St Denis 		if (vce_v3_0_is_idle(handle))
566aaa36a97SAlex Deucher 			return 0;
56792988e60STom St Denis 
568aaa36a97SAlex Deucher 	return -ETIMEDOUT;
569aaa36a97SAlex Deucher }
570aaa36a97SAlex Deucher 
571ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK  0x00000008L   /* AUTO_BUSY */
572ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK   0x00000010L   /* RB0_BUSY */
573ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK   0x00000020L   /* RB1_BUSY */
574ac8e3f30SRex Zhu #define  AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
575ac8e3f30SRex Zhu 				      VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
576115933a5SChunming Zhou 
577da146d3bSAlex Deucher static bool vce_v3_0_check_soft_reset(void *handle)
578115933a5SChunming Zhou {
579115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
580115933a5SChunming Zhou 	u32 srbm_soft_reset = 0;
581115933a5SChunming Zhou 
582115933a5SChunming Zhou 	/* According to VCE team , we should use VCE_STATUS instead
583115933a5SChunming Zhou 	 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
584115933a5SChunming Zhou 	 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
585115933a5SChunming Zhou 	 * instance's registers are accessed
586115933a5SChunming Zhou 	 * (0 for 1st instance, 10 for 2nd instance).
587115933a5SChunming Zhou 	 *
588115933a5SChunming Zhou 	 *VCE_STATUS
589115933a5SChunming Zhou 	 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 |          |FW_LOADED|JOB |
590115933a5SChunming Zhou 	 *|----+----+-----------+----+----+----+----------+---------+----|
591115933a5SChunming Zhou 	 *|bit8|bit7|    bit6   |bit5|bit4|bit3|   bit2   |  bit1   |bit0|
592115933a5SChunming Zhou 	 *
593115933a5SChunming Zhou 	 * VCE team suggest use bit 3--bit 6 for busy status check
594115933a5SChunming Zhou 	 */
5959aeb774cSTom St Denis 	mutex_lock(&adev->grbm_idx_mutex);
59650a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
597115933a5SChunming Zhou 	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
598115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
599115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
600115933a5SChunming Zhou 	}
60150a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
602115933a5SChunming Zhou 	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
603115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
604115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
605115933a5SChunming Zhou 	}
60650a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
607da146d3bSAlex Deucher 	mutex_unlock(&adev->grbm_idx_mutex);
608115933a5SChunming Zhou 
609115933a5SChunming Zhou 	if (srbm_soft_reset) {
610115933a5SChunming Zhou 		adev->vce.srbm_soft_reset = srbm_soft_reset;
611da146d3bSAlex Deucher 		return true;
612115933a5SChunming Zhou 	} else {
613115933a5SChunming Zhou 		adev->vce.srbm_soft_reset = 0;
614da146d3bSAlex Deucher 		return false;
615115933a5SChunming Zhou 	}
616115933a5SChunming Zhou }
617115933a5SChunming Zhou 
6185fc3aeebSyanyang1 static int vce_v3_0_soft_reset(void *handle)
619aaa36a97SAlex Deucher {
6205fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
621115933a5SChunming Zhou 	u32 srbm_soft_reset;
6225fc3aeebSyanyang1 
623da146d3bSAlex Deucher 	if (!adev->vce.srbm_soft_reset)
624115933a5SChunming Zhou 		return 0;
625115933a5SChunming Zhou 	srbm_soft_reset = adev->vce.srbm_soft_reset;
626be4f38e2SAlex Deucher 
627115933a5SChunming Zhou 	if (srbm_soft_reset) {
628115933a5SChunming Zhou 		u32 tmp;
629115933a5SChunming Zhou 
630115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
631115933a5SChunming Zhou 		tmp |= srbm_soft_reset;
632115933a5SChunming Zhou 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
633115933a5SChunming Zhou 		WREG32(mmSRBM_SOFT_RESET, tmp);
634115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
635115933a5SChunming Zhou 
636115933a5SChunming Zhou 		udelay(50);
637115933a5SChunming Zhou 
638115933a5SChunming Zhou 		tmp &= ~srbm_soft_reset;
639115933a5SChunming Zhou 		WREG32(mmSRBM_SOFT_RESET, tmp);
640115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
641115933a5SChunming Zhou 
642115933a5SChunming Zhou 		/* Wait a little for things to settle down */
643115933a5SChunming Zhou 		udelay(50);
644115933a5SChunming Zhou 	}
645115933a5SChunming Zhou 
646115933a5SChunming Zhou 	return 0;
647115933a5SChunming Zhou }
648115933a5SChunming Zhou 
649115933a5SChunming Zhou static int vce_v3_0_pre_soft_reset(void *handle)
650115933a5SChunming Zhou {
651115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
652115933a5SChunming Zhou 
653da146d3bSAlex Deucher 	if (!adev->vce.srbm_soft_reset)
654115933a5SChunming Zhou 		return 0;
655115933a5SChunming Zhou 
656aaa36a97SAlex Deucher 	mdelay(5);
657aaa36a97SAlex Deucher 
658115933a5SChunming Zhou 	return vce_v3_0_suspend(adev);
659115933a5SChunming Zhou }
660115933a5SChunming Zhou 
661115933a5SChunming Zhou 
662115933a5SChunming Zhou static int vce_v3_0_post_soft_reset(void *handle)
663115933a5SChunming Zhou {
664115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
665115933a5SChunming Zhou 
666da146d3bSAlex Deucher 	if (!adev->vce.srbm_soft_reset)
667115933a5SChunming Zhou 		return 0;
668115933a5SChunming Zhou 
669115933a5SChunming Zhou 	mdelay(5);
670115933a5SChunming Zhou 
671115933a5SChunming Zhou 	return vce_v3_0_resume(adev);
672aaa36a97SAlex Deucher }
673aaa36a97SAlex Deucher 
674aaa36a97SAlex Deucher static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
675aaa36a97SAlex Deucher 					struct amdgpu_irq_src *source,
676aaa36a97SAlex Deucher 					unsigned type,
677aaa36a97SAlex Deucher 					enum amdgpu_interrupt_state state)
678aaa36a97SAlex Deucher {
679aaa36a97SAlex Deucher 	uint32_t val = 0;
680aaa36a97SAlex Deucher 
681aaa36a97SAlex Deucher 	if (state == AMDGPU_IRQ_STATE_ENABLE)
682aaa36a97SAlex Deucher 		val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
683aaa36a97SAlex Deucher 
684aaa36a97SAlex Deucher 	WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
685aaa36a97SAlex Deucher 	return 0;
686aaa36a97SAlex Deucher }
687aaa36a97SAlex Deucher 
688aaa36a97SAlex Deucher static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
689aaa36a97SAlex Deucher 				      struct amdgpu_irq_src *source,
690aaa36a97SAlex Deucher 				      struct amdgpu_iv_entry *entry)
691aaa36a97SAlex Deucher {
692aaa36a97SAlex Deucher 	DRM_DEBUG("IH: VCE\n");
693d6c29c30SLeo Liu 
694f3f0ea95STom St Denis 	WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
695d6c29c30SLeo Liu 
6967ccf5aa8SAlex Deucher 	switch (entry->src_data[0]) {
697aaa36a97SAlex Deucher 	case 0:
698aaa36a97SAlex Deucher 	case 1:
6996f0359ffSAlex Deucher 	case 2:
7007ccf5aa8SAlex Deucher 		amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
701aaa36a97SAlex Deucher 		break;
702aaa36a97SAlex Deucher 	default:
703aaa36a97SAlex Deucher 		DRM_ERROR("Unhandled interrupt: %d %d\n",
7047ccf5aa8SAlex Deucher 			  entry->src_id, entry->src_data[0]);
705aaa36a97SAlex Deucher 		break;
706aaa36a97SAlex Deucher 	}
707aaa36a97SAlex Deucher 
708aaa36a97SAlex Deucher 	return 0;
709aaa36a97SAlex Deucher }
710aaa36a97SAlex Deucher 
7115fc3aeebSyanyang1 static int vce_v3_0_set_clockgating_state(void *handle,
7125fc3aeebSyanyang1 					  enum amd_clockgating_state state)
713aaa36a97SAlex Deucher {
7140689a570SEric Huang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7150689a570SEric Huang 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
7160689a570SEric Huang 	int i;
7170689a570SEric Huang 
718e3b04bc7SAlex Deucher 	if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
7190689a570SEric Huang 		return 0;
7200689a570SEric Huang 
7210689a570SEric Huang 	mutex_lock(&adev->grbm_idx_mutex);
7220689a570SEric Huang 	for (i = 0; i < 2; i++) {
7230689a570SEric Huang 		/* Program VCE Instance 0 or 1 if not harvested */
7240689a570SEric Huang 		if (adev->vce.harvest_config & (1 << i))
7250689a570SEric Huang 			continue;
7260689a570SEric Huang 
72750a1ebc7SRex Zhu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
7280689a570SEric Huang 
72926679899SRex Zhu 		if (!enable) {
7300689a570SEric Huang 			/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
7310689a570SEric Huang 			uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
7320689a570SEric Huang 			data &= ~(0xf | 0xff0);
7330689a570SEric Huang 			data |= ((0x0 << 0) | (0x04 << 4));
7340689a570SEric Huang 			WREG32(mmVCE_CLOCK_GATING_A, data);
7350689a570SEric Huang 
7360689a570SEric Huang 			/* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
7370689a570SEric Huang 			data = RREG32(mmVCE_UENC_CLOCK_GATING);
7380689a570SEric Huang 			data &= ~(0xf | 0xff0);
7390689a570SEric Huang 			data |= ((0x0 << 0) | (0x04 << 4));
7400689a570SEric Huang 			WREG32(mmVCE_UENC_CLOCK_GATING, data);
7410689a570SEric Huang 		}
7420689a570SEric Huang 
7430689a570SEric Huang 		vce_v3_0_set_vce_sw_clock_gating(adev, enable);
7440689a570SEric Huang 	}
7450689a570SEric Huang 
74650a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
7470689a570SEric Huang 	mutex_unlock(&adev->grbm_idx_mutex);
7480689a570SEric Huang 
749aaa36a97SAlex Deucher 	return 0;
750aaa36a97SAlex Deucher }
751aaa36a97SAlex Deucher 
7525fc3aeebSyanyang1 static int vce_v3_0_set_powergating_state(void *handle,
7535fc3aeebSyanyang1 					  enum amd_powergating_state state)
754aaa36a97SAlex Deucher {
755aaa36a97SAlex Deucher 	/* This doesn't actually powergate the VCE block.
756aaa36a97SAlex Deucher 	 * That's done in the dpm code via the SMC.  This
757aaa36a97SAlex Deucher 	 * just re-inits the block as necessary.  The actual
758aaa36a97SAlex Deucher 	 * gating still happens in the dpm code.  We should
759aaa36a97SAlex Deucher 	 * revisit this when there is a cleaner line between
760aaa36a97SAlex Deucher 	 * the smc and the hw blocks
761aaa36a97SAlex Deucher 	 */
7625fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
763c79b5561SHuang Rui 	int ret = 0;
7645fc3aeebSyanyang1 
765c79b5561SHuang Rui 	if (state == AMD_PG_STATE_GATE) {
7666fc11b0eSRex Zhu 		ret = vce_v3_0_stop(adev);
7676fc11b0eSRex Zhu 		if (ret)
7686fc11b0eSRex Zhu 			goto out;
769c79b5561SHuang Rui 	} else {
770c79b5561SHuang Rui 		ret = vce_v3_0_start(adev);
771c79b5561SHuang Rui 		if (ret)
772c79b5561SHuang Rui 			goto out;
773c79b5561SHuang Rui 	}
774c79b5561SHuang Rui 
775c79b5561SHuang Rui out:
776c79b5561SHuang Rui 	return ret;
777c79b5561SHuang Rui }
778c79b5561SHuang Rui 
779c79b5561SHuang Rui static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
780c79b5561SHuang Rui {
781c79b5561SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
782c79b5561SHuang Rui 	int data;
783c79b5561SHuang Rui 
784c79b5561SHuang Rui 	mutex_lock(&adev->pm.mutex);
785c79b5561SHuang Rui 
7861c622002SRex Zhu 	if (adev->flags & AMD_IS_APU)
7871c622002SRex Zhu 		data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
7881c622002SRex Zhu 	else
7891c622002SRex Zhu 		data = RREG32_SMC(ixCURRENT_PG_STATUS);
7901c622002SRex Zhu 
7911c622002SRex Zhu 	if (data & CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) {
792c79b5561SHuang Rui 		DRM_INFO("Cannot get clockgating state when VCE is powergated.\n");
793c79b5561SHuang Rui 		goto out;
794c79b5561SHuang Rui 	}
795c79b5561SHuang Rui 
796c79b5561SHuang Rui 	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
797c79b5561SHuang Rui 
798c79b5561SHuang Rui 	/* AMD_CG_SUPPORT_VCE_MGCG */
799c79b5561SHuang Rui 	data = RREG32(mmVCE_CLOCK_GATING_A);
800c79b5561SHuang Rui 	if (data & (0x04 << 4))
801c79b5561SHuang Rui 		*flags |= AMD_CG_SUPPORT_VCE_MGCG;
802c79b5561SHuang Rui 
803c79b5561SHuang Rui out:
804c79b5561SHuang Rui 	mutex_unlock(&adev->pm.mutex);
805aaa36a97SAlex Deucher }
806aaa36a97SAlex Deucher 
807ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
808ea4a8c1dSMaruthi Srinivas Bayyavarapu 		struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
809ea4a8c1dSMaruthi Srinivas Bayyavarapu {
810ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_IB_VM);
811ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, vm_id);
812ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
813ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
814ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, ib->length_dw);
815ea4a8c1dSMaruthi Srinivas Bayyavarapu }
816ea4a8c1dSMaruthi Srinivas Bayyavarapu 
817ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
818ea4a8c1dSMaruthi Srinivas Bayyavarapu 			 unsigned int vm_id, uint64_t pd_addr)
819ea4a8c1dSMaruthi Srinivas Bayyavarapu {
820ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
821ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, vm_id);
822ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, pd_addr >> 12);
823ea4a8c1dSMaruthi Srinivas Bayyavarapu 
824ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
825ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, vm_id);
826ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_END);
827ea4a8c1dSMaruthi Srinivas Bayyavarapu }
828ea4a8c1dSMaruthi Srinivas Bayyavarapu 
829ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
830ea4a8c1dSMaruthi Srinivas Bayyavarapu {
831ea4a8c1dSMaruthi Srinivas Bayyavarapu 	uint32_t seq = ring->fence_drv.sync_seq;
832ea4a8c1dSMaruthi Srinivas Bayyavarapu 	uint64_t addr = ring->fence_drv.gpu_addr;
833ea4a8c1dSMaruthi Srinivas Bayyavarapu 
834ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
835ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, lower_32_bits(addr));
836ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, upper_32_bits(addr));
837ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, seq);
838ea4a8c1dSMaruthi Srinivas Bayyavarapu }
839ea4a8c1dSMaruthi Srinivas Bayyavarapu 
840a1255107SAlex Deucher static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
84188a907d6STom St Denis 	.name = "vce_v3_0",
842aaa36a97SAlex Deucher 	.early_init = vce_v3_0_early_init,
843aaa36a97SAlex Deucher 	.late_init = NULL,
844aaa36a97SAlex Deucher 	.sw_init = vce_v3_0_sw_init,
845aaa36a97SAlex Deucher 	.sw_fini = vce_v3_0_sw_fini,
846aaa36a97SAlex Deucher 	.hw_init = vce_v3_0_hw_init,
847aaa36a97SAlex Deucher 	.hw_fini = vce_v3_0_hw_fini,
848aaa36a97SAlex Deucher 	.suspend = vce_v3_0_suspend,
849aaa36a97SAlex Deucher 	.resume = vce_v3_0_resume,
850aaa36a97SAlex Deucher 	.is_idle = vce_v3_0_is_idle,
851aaa36a97SAlex Deucher 	.wait_for_idle = vce_v3_0_wait_for_idle,
852115933a5SChunming Zhou 	.check_soft_reset = vce_v3_0_check_soft_reset,
853115933a5SChunming Zhou 	.pre_soft_reset = vce_v3_0_pre_soft_reset,
854aaa36a97SAlex Deucher 	.soft_reset = vce_v3_0_soft_reset,
855115933a5SChunming Zhou 	.post_soft_reset = vce_v3_0_post_soft_reset,
856aaa36a97SAlex Deucher 	.set_clockgating_state = vce_v3_0_set_clockgating_state,
857aaa36a97SAlex Deucher 	.set_powergating_state = vce_v3_0_set_powergating_state,
858c79b5561SHuang Rui 	.get_clockgating_state = vce_v3_0_get_clockgating_state,
859aaa36a97SAlex Deucher };
860aaa36a97SAlex Deucher 
861ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
86221cd942eSChristian König 	.type = AMDGPU_RING_TYPE_VCE,
86379887142SChristian König 	.align_mask = 0xf,
86479887142SChristian König 	.nop = VCE_CMD_NO_OP,
865536fbf94SKen Wang 	.support_64bit_ptrs = false,
866aaa36a97SAlex Deucher 	.get_rptr = vce_v3_0_ring_get_rptr,
867aaa36a97SAlex Deucher 	.get_wptr = vce_v3_0_ring_get_wptr,
868aaa36a97SAlex Deucher 	.set_wptr = vce_v3_0_ring_set_wptr,
869aaa36a97SAlex Deucher 	.parse_cs = amdgpu_vce_ring_parse_cs,
870e12f3d7aSChristian König 	.emit_frame_size =
871e12f3d7aSChristian König 		4 + /* vce_v3_0_emit_pipeline_sync */
872e12f3d7aSChristian König 		6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
873e12f3d7aSChristian König 	.emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
874aaa36a97SAlex Deucher 	.emit_ib = amdgpu_vce_ring_emit_ib,
875aaa36a97SAlex Deucher 	.emit_fence = amdgpu_vce_ring_emit_fence,
876aaa36a97SAlex Deucher 	.test_ring = amdgpu_vce_ring_test_ring,
877aaa36a97SAlex Deucher 	.test_ib = amdgpu_vce_ring_test_ib,
878edff0e28SJammy Zhou 	.insert_nop = amdgpu_ring_insert_nop,
8799e5d5309SChristian König 	.pad_ib = amdgpu_ring_generic_pad_ib,
880ebff485eSChristian König 	.begin_use = amdgpu_vce_ring_begin_use,
881ebff485eSChristian König 	.end_use = amdgpu_vce_ring_end_use,
882aaa36a97SAlex Deucher };
883aaa36a97SAlex Deucher 
884ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
88521cd942eSChristian König 	.type = AMDGPU_RING_TYPE_VCE,
88679887142SChristian König 	.align_mask = 0xf,
88779887142SChristian König 	.nop = VCE_CMD_NO_OP,
888536fbf94SKen Wang 	.support_64bit_ptrs = false,
889ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.get_rptr = vce_v3_0_ring_get_rptr,
890ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.get_wptr = vce_v3_0_ring_get_wptr,
891ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.set_wptr = vce_v3_0_ring_set_wptr,
89298614701SChristian König 	.parse_cs = amdgpu_vce_ring_parse_cs_vm,
893e12f3d7aSChristian König 	.emit_frame_size =
894e12f3d7aSChristian König 		6 + /* vce_v3_0_emit_vm_flush */
895e12f3d7aSChristian König 		4 + /* vce_v3_0_emit_pipeline_sync */
896e12f3d7aSChristian König 		6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
897e12f3d7aSChristian König 	.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
898ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_ib = vce_v3_0_ring_emit_ib,
899ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_vm_flush = vce_v3_0_emit_vm_flush,
900ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
901ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_fence = amdgpu_vce_ring_emit_fence,
902ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.test_ring = amdgpu_vce_ring_test_ring,
903ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.test_ib = amdgpu_vce_ring_test_ib,
904ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.insert_nop = amdgpu_ring_insert_nop,
905ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.pad_ib = amdgpu_ring_generic_pad_ib,
906ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.begin_use = amdgpu_vce_ring_begin_use,
907ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.end_use = amdgpu_vce_ring_end_use,
908ea4a8c1dSMaruthi Srinivas Bayyavarapu };
909ea4a8c1dSMaruthi Srinivas Bayyavarapu 
910aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
911aaa36a97SAlex Deucher {
91275c65480SAlex Deucher 	int i;
91375c65480SAlex Deucher 
914ea4a8c1dSMaruthi Srinivas Bayyavarapu 	if (adev->asic_type >= CHIP_STONEY) {
91575c65480SAlex Deucher 		for (i = 0; i < adev->vce.num_rings; i++)
916ea4a8c1dSMaruthi Srinivas Bayyavarapu 			adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
917ea4a8c1dSMaruthi Srinivas Bayyavarapu 		DRM_INFO("VCE enabled in VM mode\n");
918ea4a8c1dSMaruthi Srinivas Bayyavarapu 	} else {
919ea4a8c1dSMaruthi Srinivas Bayyavarapu 		for (i = 0; i < adev->vce.num_rings; i++)
920ea4a8c1dSMaruthi Srinivas Bayyavarapu 			adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
921ea4a8c1dSMaruthi Srinivas Bayyavarapu 		DRM_INFO("VCE enabled in physical mode\n");
922ea4a8c1dSMaruthi Srinivas Bayyavarapu 	}
923aaa36a97SAlex Deucher }
924aaa36a97SAlex Deucher 
925aaa36a97SAlex Deucher static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
926aaa36a97SAlex Deucher 	.set = vce_v3_0_set_interrupt_state,
927aaa36a97SAlex Deucher 	.process = vce_v3_0_process_interrupt,
928aaa36a97SAlex Deucher };
929aaa36a97SAlex Deucher 
930aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
931aaa36a97SAlex Deucher {
932aaa36a97SAlex Deucher 	adev->vce.irq.num_types = 1;
933aaa36a97SAlex Deucher 	adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
934aaa36a97SAlex Deucher };
935a1255107SAlex Deucher 
936a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_0_ip_block =
937a1255107SAlex Deucher {
938a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_VCE,
939a1255107SAlex Deucher 	.major = 3,
940a1255107SAlex Deucher 	.minor = 0,
941a1255107SAlex Deucher 	.rev = 0,
942a1255107SAlex Deucher 	.funcs = &vce_v3_0_ip_funcs,
943a1255107SAlex Deucher };
944a1255107SAlex Deucher 
945a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_1_ip_block =
946a1255107SAlex Deucher {
947a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_VCE,
948a1255107SAlex Deucher 	.major = 3,
949a1255107SAlex Deucher 	.minor = 1,
950a1255107SAlex Deucher 	.rev = 0,
951a1255107SAlex Deucher 	.funcs = &vce_v3_0_ip_funcs,
952a1255107SAlex Deucher };
953a1255107SAlex Deucher 
954a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_4_ip_block =
955a1255107SAlex Deucher {
956a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_VCE,
957a1255107SAlex Deucher 	.major = 3,
958a1255107SAlex Deucher 	.minor = 4,
959a1255107SAlex Deucher 	.rev = 0,
960a1255107SAlex Deucher 	.funcs = &vce_v3_0_ip_funcs,
961a1255107SAlex Deucher };
962