xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c (revision 20acbed4)
1aaa36a97SAlex Deucher /*
2aaa36a97SAlex Deucher  * Copyright 2014 Advanced Micro Devices, Inc.
3aaa36a97SAlex Deucher  * All Rights Reserved.
4aaa36a97SAlex Deucher  *
5aaa36a97SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
6aaa36a97SAlex Deucher  * copy of this software and associated documentation files (the
7aaa36a97SAlex Deucher  * "Software"), to deal in the Software without restriction, including
8aaa36a97SAlex Deucher  * without limitation the rights to use, copy, modify, merge, publish,
9aaa36a97SAlex Deucher  * distribute, sub license, and/or sell copies of the Software, and to
10aaa36a97SAlex Deucher  * permit persons to whom the Software is furnished to do so, subject to
11aaa36a97SAlex Deucher  * the following conditions:
12aaa36a97SAlex Deucher  *
13aaa36a97SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14aaa36a97SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15aaa36a97SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16aaa36a97SAlex Deucher  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17aaa36a97SAlex Deucher  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18aaa36a97SAlex Deucher  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19aaa36a97SAlex Deucher  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20aaa36a97SAlex Deucher  *
21aaa36a97SAlex Deucher  * The above copyright notice and this permission notice (including the
22aaa36a97SAlex Deucher  * next paragraph) shall be included in all copies or substantial portions
23aaa36a97SAlex Deucher  * of the Software.
24aaa36a97SAlex Deucher  *
25aaa36a97SAlex Deucher  * Authors: Christian König <christian.koenig@amd.com>
26aaa36a97SAlex Deucher  */
27aaa36a97SAlex Deucher 
28aaa36a97SAlex Deucher #include <linux/firmware.h>
29aaa36a97SAlex Deucher #include <drm/drmP.h>
30aaa36a97SAlex Deucher #include "amdgpu.h"
31aaa36a97SAlex Deucher #include "amdgpu_vce.h"
32aaa36a97SAlex Deucher #include "vid.h"
33aaa36a97SAlex Deucher #include "vce/vce_3_0_d.h"
34aaa36a97SAlex Deucher #include "vce/vce_3_0_sh_mask.h"
35be4f38e2SAlex Deucher #include "oss/oss_3_0_d.h"
36be4f38e2SAlex Deucher #include "oss/oss_3_0_sh_mask.h"
375bbc553aSLeo Liu #include "gca/gfx_8_0_d.h"
386a585777SAlex Deucher #include "smu/smu_7_1_2_d.h"
396a585777SAlex Deucher #include "smu/smu_7_1_2_sh_mask.h"
40115933a5SChunming Zhou #include "gca/gfx_8_0_d.h"
41115933a5SChunming Zhou #include "gca/gfx_8_0_sh_mask.h"
42091aec0bSAndrey Grodzovsky #include "ivsrcid/ivsrcid_vislands30.h"
43115933a5SChunming Zhou 
445bbc553aSLeo Liu 
455bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT	0x04
465bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK	0x10
4750a1ebc7SRex Zhu #define GRBM_GFX_INDEX__VCE_ALL_PIPE		0x07
4850a1ebc7SRex Zhu 
493c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0	0x8616
503c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1	0x8617
513c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2	0x8618
5250a1ebc7SRex Zhu #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
5350a1ebc7SRex Zhu 
54567e6e29Sjimqu #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
55aaa36a97SAlex Deucher 
56e9822622SLeo Liu #define VCE_V3_0_FW_SIZE	(384 * 1024)
57e9822622SLeo Liu #define VCE_V3_0_STACK_SIZE	(64 * 1024)
58e9822622SLeo Liu #define VCE_V3_0_DATA_SIZE	((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
59e9822622SLeo Liu 
60ef6239e0SAlex Deucher #define FW_52_8_3	((52 << 24) | (8 << 16) | (3 << 8))
61ef6239e0SAlex Deucher 
6250a1ebc7SRex Zhu #define GET_VCE_INSTANCE(i)  ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
6350a1ebc7SRex Zhu 					| GRBM_GFX_INDEX__VCE_ALL_PIPE)
6450a1ebc7SRex Zhu 
655bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
66aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
67aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
68567e6e29Sjimqu static int vce_v3_0_wait_for_idle(void *handle);
6926679899SRex Zhu static int vce_v3_0_set_clockgating_state(void *handle,
7026679899SRex Zhu 					  enum amd_clockgating_state state);
71aaa36a97SAlex Deucher /**
72aaa36a97SAlex Deucher  * vce_v3_0_ring_get_rptr - get read pointer
73aaa36a97SAlex Deucher  *
74aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
75aaa36a97SAlex Deucher  *
76aaa36a97SAlex Deucher  * Returns the current hardware read pointer
77aaa36a97SAlex Deucher  */
78536fbf94SKen Wang static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
79aaa36a97SAlex Deucher {
80aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
8145cc6586SLeo Liu 	u32 v;
8245cc6586SLeo Liu 
8345cc6586SLeo Liu 	mutex_lock(&adev->grbm_idx_mutex);
8445cc6586SLeo Liu 	if (adev->vce.harvest_config == 0 ||
8545cc6586SLeo Liu 		adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
8645cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
8745cc6586SLeo Liu 	else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
8845cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
89aaa36a97SAlex Deucher 
905d4af988SAlex Deucher 	if (ring->me == 0)
9145cc6586SLeo Liu 		v = RREG32(mmVCE_RB_RPTR);
925d4af988SAlex Deucher 	else if (ring->me == 1)
9345cc6586SLeo Liu 		v = RREG32(mmVCE_RB_RPTR2);
946f0359ffSAlex Deucher 	else
9545cc6586SLeo Liu 		v = RREG32(mmVCE_RB_RPTR3);
9645cc6586SLeo Liu 
9745cc6586SLeo Liu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
9845cc6586SLeo Liu 	mutex_unlock(&adev->grbm_idx_mutex);
9945cc6586SLeo Liu 
10045cc6586SLeo Liu 	return v;
101aaa36a97SAlex Deucher }
102aaa36a97SAlex Deucher 
103aaa36a97SAlex Deucher /**
104aaa36a97SAlex Deucher  * vce_v3_0_ring_get_wptr - get write pointer
105aaa36a97SAlex Deucher  *
106aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
107aaa36a97SAlex Deucher  *
108aaa36a97SAlex Deucher  * Returns the current hardware write pointer
109aaa36a97SAlex Deucher  */
110536fbf94SKen Wang static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
111aaa36a97SAlex Deucher {
112aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
11345cc6586SLeo Liu 	u32 v;
11445cc6586SLeo Liu 
11545cc6586SLeo Liu 	mutex_lock(&adev->grbm_idx_mutex);
11645cc6586SLeo Liu 	if (adev->vce.harvest_config == 0 ||
11745cc6586SLeo Liu 		adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
11845cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
11945cc6586SLeo Liu 	else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
12045cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
121aaa36a97SAlex Deucher 
1225d4af988SAlex Deucher 	if (ring->me == 0)
12345cc6586SLeo Liu 		v = RREG32(mmVCE_RB_WPTR);
1245d4af988SAlex Deucher 	else if (ring->me == 1)
12545cc6586SLeo Liu 		v = RREG32(mmVCE_RB_WPTR2);
1266f0359ffSAlex Deucher 	else
12745cc6586SLeo Liu 		v = RREG32(mmVCE_RB_WPTR3);
12845cc6586SLeo Liu 
12945cc6586SLeo Liu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
13045cc6586SLeo Liu 	mutex_unlock(&adev->grbm_idx_mutex);
13145cc6586SLeo Liu 
13245cc6586SLeo Liu 	return v;
133aaa36a97SAlex Deucher }
134aaa36a97SAlex Deucher 
135aaa36a97SAlex Deucher /**
136aaa36a97SAlex Deucher  * vce_v3_0_ring_set_wptr - set write pointer
137aaa36a97SAlex Deucher  *
138aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
139aaa36a97SAlex Deucher  *
140aaa36a97SAlex Deucher  * Commits the write pointer to the hardware
141aaa36a97SAlex Deucher  */
142aaa36a97SAlex Deucher static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
143aaa36a97SAlex Deucher {
144aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
145aaa36a97SAlex Deucher 
14645cc6586SLeo Liu 	mutex_lock(&adev->grbm_idx_mutex);
14745cc6586SLeo Liu 	if (adev->vce.harvest_config == 0 ||
14845cc6586SLeo Liu 		adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
14945cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
15045cc6586SLeo Liu 	else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
15145cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
15245cc6586SLeo Liu 
1535d4af988SAlex Deucher 	if (ring->me == 0)
154536fbf94SKen Wang 		WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
1555d4af988SAlex Deucher 	else if (ring->me == 1)
156536fbf94SKen Wang 		WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
1576f0359ffSAlex Deucher 	else
158536fbf94SKen Wang 		WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
15945cc6586SLeo Liu 
16045cc6586SLeo Liu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
16145cc6586SLeo Liu 	mutex_unlock(&adev->grbm_idx_mutex);
162aaa36a97SAlex Deucher }
163aaa36a97SAlex Deucher 
1640689a570SEric Huang static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
1650689a570SEric Huang {
166f3f0ea95STom St Denis 	WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
1670689a570SEric Huang }
1680689a570SEric Huang 
1690689a570SEric Huang static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
1700689a570SEric Huang 					     bool gated)
1710689a570SEric Huang {
172f3f0ea95STom St Denis 	u32 data;
173f16fe6d3STom St Denis 
1740689a570SEric Huang 	/* Set Override to disable Clock Gating */
1750689a570SEric Huang 	vce_v3_0_override_vce_clock_gating(adev, true);
1760689a570SEric Huang 
1776f906814STom St Denis 	/* This function enables MGCG which is controlled by firmware.
1786f906814STom St Denis 	   With the clocks in the gated state the core is still
1796f906814STom St Denis 	   accessible but the firmware will throttle the clocks on the
1806f906814STom St Denis 	   fly as necessary.
1810689a570SEric Huang 	*/
182ecc2cf7cSMaruthi Srinivas Bayyavarapu 	if (!gated) {
183f3f0ea95STom St Denis 		data = RREG32(mmVCE_CLOCK_GATING_B);
1840689a570SEric Huang 		data |= 0x1ff;
1850689a570SEric Huang 		data &= ~0xef0000;
1860689a570SEric Huang 		WREG32(mmVCE_CLOCK_GATING_B, data);
1870689a570SEric Huang 
188f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING);
1890689a570SEric Huang 		data |= 0x3ff000;
1900689a570SEric Huang 		data &= ~0xffc00000;
1910689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING, data);
1920689a570SEric Huang 
193f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
1940689a570SEric Huang 		data |= 0x2;
1956f906814STom St Denis 		data &= ~0x00010000;
1960689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
1970689a570SEric Huang 
198f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
1990689a570SEric Huang 		data |= 0x37f;
2000689a570SEric Huang 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
2010689a570SEric Huang 
202f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
2030689a570SEric Huang 		data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
2040689a570SEric Huang 			VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
2050689a570SEric Huang 			VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
2060689a570SEric Huang 			0x8;
2070689a570SEric Huang 		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
2080689a570SEric Huang 	} else {
209f3f0ea95STom St Denis 		data = RREG32(mmVCE_CLOCK_GATING_B);
2100689a570SEric Huang 		data &= ~0x80010;
2110689a570SEric Huang 		data |= 0xe70008;
2120689a570SEric Huang 		WREG32(mmVCE_CLOCK_GATING_B, data);
2136f906814STom St Denis 
214f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING);
2150689a570SEric Huang 		data |= 0xffc00000;
2160689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING, data);
2176f906814STom St Denis 
218f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
2190689a570SEric Huang 		data |= 0x10000;
2200689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
2216f906814STom St Denis 
222f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
223e05208deSRex Zhu 		data &= ~0x3ff;
2240689a570SEric Huang 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
2256f906814STom St Denis 
226f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
2270689a570SEric Huang 		data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
2280689a570SEric Huang 			  VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
2290689a570SEric Huang 			  VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
2300689a570SEric Huang 			  0x8);
2310689a570SEric Huang 		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
2320689a570SEric Huang 	}
2330689a570SEric Huang 	vce_v3_0_override_vce_clock_gating(adev, false);
2340689a570SEric Huang }
2350689a570SEric Huang 
236567e6e29Sjimqu static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
237567e6e29Sjimqu {
238567e6e29Sjimqu 	int i, j;
239567e6e29Sjimqu 
240567e6e29Sjimqu 	for (i = 0; i < 10; ++i) {
241567e6e29Sjimqu 		for (j = 0; j < 100; ++j) {
242b7e2e9f7Sjimqu 			uint32_t status = RREG32(mmVCE_STATUS);
243b7e2e9f7Sjimqu 
244567e6e29Sjimqu 			if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
245567e6e29Sjimqu 				return 0;
246567e6e29Sjimqu 			mdelay(10);
247567e6e29Sjimqu 		}
248567e6e29Sjimqu 
249567e6e29Sjimqu 		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
250f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
251567e6e29Sjimqu 		mdelay(10);
252f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
253567e6e29Sjimqu 		mdelay(10);
254567e6e29Sjimqu 	}
255567e6e29Sjimqu 
256567e6e29Sjimqu 	return -ETIMEDOUT;
257567e6e29Sjimqu }
258567e6e29Sjimqu 
259aaa36a97SAlex Deucher /**
260aaa36a97SAlex Deucher  * vce_v3_0_start - start VCE block
261aaa36a97SAlex Deucher  *
262aaa36a97SAlex Deucher  * @adev: amdgpu_device pointer
263aaa36a97SAlex Deucher  *
264aaa36a97SAlex Deucher  * Setup and start the VCE block
265aaa36a97SAlex Deucher  */
266aaa36a97SAlex Deucher static int vce_v3_0_start(struct amdgpu_device *adev)
267aaa36a97SAlex Deucher {
268aaa36a97SAlex Deucher 	struct amdgpu_ring *ring;
269567e6e29Sjimqu 	int idx, r;
270567e6e29Sjimqu 
27145cc6586SLeo Liu 	mutex_lock(&adev->grbm_idx_mutex);
27245cc6586SLeo Liu 	for (idx = 0; idx < 2; ++idx) {
27345cc6586SLeo Liu 		if (adev->vce.harvest_config & (1 << idx))
27445cc6586SLeo Liu 			continue;
27545cc6586SLeo Liu 
27645cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
27745cc6586SLeo Liu 
27845cc6586SLeo Liu 		/* Program instance 0 reg space for two instances or instance 0 case
27945cc6586SLeo Liu 		program instance 1 reg space for only instance 1 available case */
28045cc6586SLeo Liu 		if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
281567e6e29Sjimqu 			ring = &adev->vce.ring[0];
282536fbf94SKen Wang 			WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
283536fbf94SKen Wang 			WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
284567e6e29Sjimqu 			WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
285567e6e29Sjimqu 			WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
286567e6e29Sjimqu 			WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
287567e6e29Sjimqu 
288567e6e29Sjimqu 			ring = &adev->vce.ring[1];
289536fbf94SKen Wang 			WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
290536fbf94SKen Wang 			WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
291567e6e29Sjimqu 			WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
292567e6e29Sjimqu 			WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
293567e6e29Sjimqu 			WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
294aaa36a97SAlex Deucher 
2956f0359ffSAlex Deucher 			ring = &adev->vce.ring[2];
296536fbf94SKen Wang 			WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
297536fbf94SKen Wang 			WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
2986f0359ffSAlex Deucher 			WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
2996f0359ffSAlex Deucher 			WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
3006f0359ffSAlex Deucher 			WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
30145cc6586SLeo Liu 		}
3026f0359ffSAlex Deucher 
3035bbc553aSLeo Liu 		vce_v3_0_mc_resume(adev, idx);
304f3f0ea95STom St Denis 		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
305567e6e29Sjimqu 
3063c0ff9f1SLeo Liu 		if (adev->asic_type >= CHIP_STONEY)
3073c0ff9f1SLeo Liu 			WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
3083c0ff9f1SLeo Liu 		else
309f3f0ea95STom St Denis 			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
310aaa36a97SAlex Deucher 
311f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
312aaa36a97SAlex Deucher 		mdelay(100);
313aaa36a97SAlex Deucher 
314567e6e29Sjimqu 		r = vce_v3_0_firmware_loaded(adev);
315aaa36a97SAlex Deucher 
316aaa36a97SAlex Deucher 		/* clear BUSY flag */
317f3f0ea95STom St Denis 		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
318aaa36a97SAlex Deucher 
319aaa36a97SAlex Deucher 		if (r) {
320aaa36a97SAlex Deucher 			DRM_ERROR("VCE not responding, giving up!!!\n");
3215bbc553aSLeo Liu 			mutex_unlock(&adev->grbm_idx_mutex);
322aaa36a97SAlex Deucher 			return r;
323aaa36a97SAlex Deucher 		}
3245bbc553aSLeo Liu 	}
3255bbc553aSLeo Liu 
32650a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
3275bbc553aSLeo Liu 	mutex_unlock(&adev->grbm_idx_mutex);
3285bbc553aSLeo Liu 
329567e6e29Sjimqu 	return 0;
330567e6e29Sjimqu }
3315bbc553aSLeo Liu 
332567e6e29Sjimqu static int vce_v3_0_stop(struct amdgpu_device *adev)
333567e6e29Sjimqu {
334567e6e29Sjimqu 	int idx;
335567e6e29Sjimqu 
336567e6e29Sjimqu 	mutex_lock(&adev->grbm_idx_mutex);
337567e6e29Sjimqu 	for (idx = 0; idx < 2; ++idx) {
338567e6e29Sjimqu 		if (adev->vce.harvest_config & (1 << idx))
339567e6e29Sjimqu 			continue;
340567e6e29Sjimqu 
34150a1ebc7SRex Zhu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
342567e6e29Sjimqu 
343567e6e29Sjimqu 		if (adev->asic_type >= CHIP_STONEY)
344567e6e29Sjimqu 			WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
345567e6e29Sjimqu 		else
346f3f0ea95STom St Denis 			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
347f3f0ea95STom St Denis 
348567e6e29Sjimqu 		/* hold on ECPU */
349f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
350567e6e29Sjimqu 
35126679899SRex Zhu 		/* clear VCE STATUS */
35226679899SRex Zhu 		WREG32(mmVCE_STATUS, 0);
353567e6e29Sjimqu 	}
354567e6e29Sjimqu 
35550a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
356567e6e29Sjimqu 	mutex_unlock(&adev->grbm_idx_mutex);
357aaa36a97SAlex Deucher 
358aaa36a97SAlex Deucher 	return 0;
359aaa36a97SAlex Deucher }
360aaa36a97SAlex Deucher 
3616a585777SAlex Deucher #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS     0xC0014074
3626a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__SHIFT       27
3636a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__MASK        0x18000000
3646a585777SAlex Deucher 
3656a585777SAlex Deucher static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
3666a585777SAlex Deucher {
3676a585777SAlex Deucher 	u32 tmp;
3686a585777SAlex Deucher 
369cfaba566SSamuel Li 	if ((adev->asic_type == CHIP_FIJI) ||
37032bec2afSLeo Liu 	    (adev->asic_type == CHIP_STONEY))
3711dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE1;
372188a9bcdSAlex Deucher 
3732f7d10b3SJammy Zhou 	if (adev->flags & AMD_IS_APU)
3746a585777SAlex Deucher 		tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
3756a585777SAlex Deucher 		       VCE_HARVEST_FUSE_MACRO__MASK) >>
3766a585777SAlex Deucher 			VCE_HARVEST_FUSE_MACRO__SHIFT;
3776a585777SAlex Deucher 	else
3786a585777SAlex Deucher 		tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
3796a585777SAlex Deucher 		       CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
3806a585777SAlex Deucher 			CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
3816a585777SAlex Deucher 
3826a585777SAlex Deucher 	switch (tmp) {
3836a585777SAlex Deucher 	case 1:
3841dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE0;
3856a585777SAlex Deucher 	case 2:
3861dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE1;
3876a585777SAlex Deucher 	case 3:
3881dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
3896a585777SAlex Deucher 	default:
39032bec2afSLeo Liu 		if ((adev->asic_type == CHIP_POLARIS10) ||
39132bec2afSLeo Liu 		    (adev->asic_type == CHIP_POLARIS11) ||
392a7712897SLeo Liu 		    (adev->asic_type == CHIP_POLARIS12) ||
393a7712897SLeo Liu 		    (adev->asic_type == CHIP_VEGAM))
39432bec2afSLeo Liu 			return AMDGPU_VCE_HARVEST_VCE1;
39532bec2afSLeo Liu 
3961dab5f06STom St Denis 		return 0;
3976a585777SAlex Deucher 	}
3986a585777SAlex Deucher }
3996a585777SAlex Deucher 
4005fc3aeebSyanyang1 static int vce_v3_0_early_init(void *handle)
401aaa36a97SAlex Deucher {
4025fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4035fc3aeebSyanyang1 
4046a585777SAlex Deucher 	adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
4056a585777SAlex Deucher 
4066a585777SAlex Deucher 	if ((adev->vce.harvest_config &
4076a585777SAlex Deucher 	     (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
4086a585777SAlex Deucher 	    (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
4096a585777SAlex Deucher 		return -ENOENT;
4106a585777SAlex Deucher 
4116f0359ffSAlex Deucher 	adev->vce.num_rings = 3;
41275c65480SAlex Deucher 
413aaa36a97SAlex Deucher 	vce_v3_0_set_ring_funcs(adev);
414aaa36a97SAlex Deucher 	vce_v3_0_set_irq_funcs(adev);
415aaa36a97SAlex Deucher 
416aaa36a97SAlex Deucher 	return 0;
417aaa36a97SAlex Deucher }
418aaa36a97SAlex Deucher 
4195fc3aeebSyanyang1 static int vce_v3_0_sw_init(void *handle)
420aaa36a97SAlex Deucher {
4215fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
422aaa36a97SAlex Deucher 	struct amdgpu_ring *ring;
42375c65480SAlex Deucher 	int r, i;
424aaa36a97SAlex Deucher 
425aaa36a97SAlex Deucher 	/* VCE */
426091aec0bSAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
427aaa36a97SAlex Deucher 	if (r)
428aaa36a97SAlex Deucher 		return r;
429aaa36a97SAlex Deucher 
430e9822622SLeo Liu 	r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
431e9822622SLeo Liu 		(VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
432aaa36a97SAlex Deucher 	if (r)
433aaa36a97SAlex Deucher 		return r;
434aaa36a97SAlex Deucher 
435ef6239e0SAlex Deucher 	/* 52.8.3 required for 3 ring support */
436ef6239e0SAlex Deucher 	if (adev->vce.fw_version < FW_52_8_3)
437ef6239e0SAlex Deucher 		adev->vce.num_rings = 2;
438ef6239e0SAlex Deucher 
439aaa36a97SAlex Deucher 	r = amdgpu_vce_resume(adev);
440aaa36a97SAlex Deucher 	if (r)
441aaa36a97SAlex Deucher 		return r;
442aaa36a97SAlex Deucher 
44375c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++) {
44475c65480SAlex Deucher 		ring = &adev->vce.ring[i];
44575c65480SAlex Deucher 		sprintf(ring->name, "vce%d", i);
44679887142SChristian König 		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
447aaa36a97SAlex Deucher 		if (r)
448aaa36a97SAlex Deucher 			return r;
44975c65480SAlex Deucher 	}
450aaa36a97SAlex Deucher 
45120acbed4SEmily Deng 	r = amdgpu_vce_entity_init(adev);
45220acbed4SEmily Deng 
453aaa36a97SAlex Deucher 	return r;
454aaa36a97SAlex Deucher }
455aaa36a97SAlex Deucher 
4565fc3aeebSyanyang1 static int vce_v3_0_sw_fini(void *handle)
457aaa36a97SAlex Deucher {
458aaa36a97SAlex Deucher 	int r;
4595fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
460aaa36a97SAlex Deucher 
461aaa36a97SAlex Deucher 	r = amdgpu_vce_suspend(adev);
462aaa36a97SAlex Deucher 	if (r)
463aaa36a97SAlex Deucher 		return r;
464aaa36a97SAlex Deucher 
46550237287SRex Zhu 	return amdgpu_vce_sw_fini(adev);
466aaa36a97SAlex Deucher }
467aaa36a97SAlex Deucher 
4685fc3aeebSyanyang1 static int vce_v3_0_hw_init(void *handle)
469aaa36a97SAlex Deucher {
470691ca86aSTom St Denis 	int r, i;
4715fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
472aaa36a97SAlex Deucher 
4736fc11b0eSRex Zhu 	vce_v3_0_override_vce_clock_gating(adev, true);
47408ebb6e9SRex Zhu 
4756fc11b0eSRex Zhu 	amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
476aaa36a97SAlex Deucher 
47775c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++)
47875c65480SAlex Deucher 		adev->vce.ring[i].ready = false;
479aaa36a97SAlex Deucher 
48075c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++) {
481691ca86aSTom St Denis 		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
482691ca86aSTom St Denis 		if (r)
483aaa36a97SAlex Deucher 			return r;
484691ca86aSTom St Denis 		else
485691ca86aSTom St Denis 			adev->vce.ring[i].ready = true;
486aaa36a97SAlex Deucher 	}
487aaa36a97SAlex Deucher 
488aaa36a97SAlex Deucher 	DRM_INFO("VCE initialized successfully.\n");
489aaa36a97SAlex Deucher 
490aaa36a97SAlex Deucher 	return 0;
491aaa36a97SAlex Deucher }
492aaa36a97SAlex Deucher 
4935fc3aeebSyanyang1 static int vce_v3_0_hw_fini(void *handle)
494aaa36a97SAlex Deucher {
495567e6e29Sjimqu 	int r;
496567e6e29Sjimqu 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
497567e6e29Sjimqu 
498567e6e29Sjimqu 	r = vce_v3_0_wait_for_idle(handle);
499567e6e29Sjimqu 	if (r)
500567e6e29Sjimqu 		return r;
501567e6e29Sjimqu 
50226679899SRex Zhu 	vce_v3_0_stop(adev);
50326679899SRex Zhu 	return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
504aaa36a97SAlex Deucher }
505aaa36a97SAlex Deucher 
5065fc3aeebSyanyang1 static int vce_v3_0_suspend(void *handle)
507aaa36a97SAlex Deucher {
508aaa36a97SAlex Deucher 	int r;
5095fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
510aaa36a97SAlex Deucher 
511aaa36a97SAlex Deucher 	r = vce_v3_0_hw_fini(adev);
512aaa36a97SAlex Deucher 	if (r)
513aaa36a97SAlex Deucher 		return r;
514aaa36a97SAlex Deucher 
51550237287SRex Zhu 	return amdgpu_vce_suspend(adev);
516aaa36a97SAlex Deucher }
517aaa36a97SAlex Deucher 
5185fc3aeebSyanyang1 static int vce_v3_0_resume(void *handle)
519aaa36a97SAlex Deucher {
520aaa36a97SAlex Deucher 	int r;
5215fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
522aaa36a97SAlex Deucher 
523aaa36a97SAlex Deucher 	r = amdgpu_vce_resume(adev);
524aaa36a97SAlex Deucher 	if (r)
525aaa36a97SAlex Deucher 		return r;
526aaa36a97SAlex Deucher 
52750237287SRex Zhu 	return vce_v3_0_hw_init(adev);
528aaa36a97SAlex Deucher }
529aaa36a97SAlex Deucher 
5305bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
531aaa36a97SAlex Deucher {
532aaa36a97SAlex Deucher 	uint32_t offset, size;
533aaa36a97SAlex Deucher 
534aaa36a97SAlex Deucher 	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
535aaa36a97SAlex Deucher 	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
536aaa36a97SAlex Deucher 	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
5376f906814STom St Denis 	WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
538aaa36a97SAlex Deucher 
539aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_CTRL, 0x00398000);
540aaa36a97SAlex Deucher 	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
541aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
542aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
543aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_VM_CTRL, 0);
544d50e5c24SAlan Harrison 	WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000);
545d50e5c24SAlan Harrison 
5463c0ff9f1SLeo Liu 	if (adev->asic_type >= CHIP_STONEY) {
5473c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
5483c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
5493c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
5503c0ff9f1SLeo Liu 	} else
551aaa36a97SAlex Deucher 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
552aaa36a97SAlex Deucher 	offset = AMDGPU_VCE_FIRMWARE_OFFSET;
553e9822622SLeo Liu 	size = VCE_V3_0_FW_SIZE;
554aaa36a97SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
555aaa36a97SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
556aaa36a97SAlex Deucher 
5575bbc553aSLeo Liu 	if (idx == 0) {
558aaa36a97SAlex Deucher 		offset += size;
559e9822622SLeo Liu 		size = VCE_V3_0_STACK_SIZE;
560aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
561aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
562aaa36a97SAlex Deucher 		offset += size;
563e9822622SLeo Liu 		size = VCE_V3_0_DATA_SIZE;
564aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
565aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
5665bbc553aSLeo Liu 	} else {
5675bbc553aSLeo Liu 		offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
5685bbc553aSLeo Liu 		size = VCE_V3_0_STACK_SIZE;
5695bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
5705bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
5715bbc553aSLeo Liu 		offset += size;
5725bbc553aSLeo Liu 		size = VCE_V3_0_DATA_SIZE;
5735bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
5745bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
5755bbc553aSLeo Liu 	}
576aaa36a97SAlex Deucher 
577aaa36a97SAlex Deucher 	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
578f3f0ea95STom St Denis 	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
579aaa36a97SAlex Deucher }
580aaa36a97SAlex Deucher 
5815fc3aeebSyanyang1 static bool vce_v3_0_is_idle(void *handle)
582aaa36a97SAlex Deucher {
5835fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
584be4f38e2SAlex Deucher 	u32 mask = 0;
5855fc3aeebSyanyang1 
58674af1276STom St Denis 	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
58774af1276STom St Denis 	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
588be4f38e2SAlex Deucher 
589be4f38e2SAlex Deucher 	return !(RREG32(mmSRBM_STATUS2) & mask);
590aaa36a97SAlex Deucher }
591aaa36a97SAlex Deucher 
5925fc3aeebSyanyang1 static int vce_v3_0_wait_for_idle(void *handle)
593aaa36a97SAlex Deucher {
594aaa36a97SAlex Deucher 	unsigned i;
5955fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
596be4f38e2SAlex Deucher 
59792988e60STom St Denis 	for (i = 0; i < adev->usec_timeout; i++)
59892988e60STom St Denis 		if (vce_v3_0_is_idle(handle))
599aaa36a97SAlex Deucher 			return 0;
60092988e60STom St Denis 
601aaa36a97SAlex Deucher 	return -ETIMEDOUT;
602aaa36a97SAlex Deucher }
603aaa36a97SAlex Deucher 
604ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK  0x00000008L   /* AUTO_BUSY */
605ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK   0x00000010L   /* RB0_BUSY */
606ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK   0x00000020L   /* RB1_BUSY */
607ac8e3f30SRex Zhu #define  AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
608ac8e3f30SRex Zhu 				      VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
609115933a5SChunming Zhou 
610da146d3bSAlex Deucher static bool vce_v3_0_check_soft_reset(void *handle)
611115933a5SChunming Zhou {
612115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
613115933a5SChunming Zhou 	u32 srbm_soft_reset = 0;
614115933a5SChunming Zhou 
615115933a5SChunming Zhou 	/* According to VCE team , we should use VCE_STATUS instead
616115933a5SChunming Zhou 	 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
617115933a5SChunming Zhou 	 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
618115933a5SChunming Zhou 	 * instance's registers are accessed
619115933a5SChunming Zhou 	 * (0 for 1st instance, 10 for 2nd instance).
620115933a5SChunming Zhou 	 *
621115933a5SChunming Zhou 	 *VCE_STATUS
622115933a5SChunming Zhou 	 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 |          |FW_LOADED|JOB |
623115933a5SChunming Zhou 	 *|----+----+-----------+----+----+----+----------+---------+----|
624115933a5SChunming Zhou 	 *|bit8|bit7|    bit6   |bit5|bit4|bit3|   bit2   |  bit1   |bit0|
625115933a5SChunming Zhou 	 *
626115933a5SChunming Zhou 	 * VCE team suggest use bit 3--bit 6 for busy status check
627115933a5SChunming Zhou 	 */
6289aeb774cSTom St Denis 	mutex_lock(&adev->grbm_idx_mutex);
62950a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
630115933a5SChunming Zhou 	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
631115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
632115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
633115933a5SChunming Zhou 	}
63450a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
635115933a5SChunming Zhou 	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
636115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
637115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
638115933a5SChunming Zhou 	}
63950a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
640da146d3bSAlex Deucher 	mutex_unlock(&adev->grbm_idx_mutex);
641115933a5SChunming Zhou 
642115933a5SChunming Zhou 	if (srbm_soft_reset) {
643115933a5SChunming Zhou 		adev->vce.srbm_soft_reset = srbm_soft_reset;
644da146d3bSAlex Deucher 		return true;
645115933a5SChunming Zhou 	} else {
646115933a5SChunming Zhou 		adev->vce.srbm_soft_reset = 0;
647da146d3bSAlex Deucher 		return false;
648115933a5SChunming Zhou 	}
649115933a5SChunming Zhou }
650115933a5SChunming Zhou 
6515fc3aeebSyanyang1 static int vce_v3_0_soft_reset(void *handle)
652aaa36a97SAlex Deucher {
6535fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
654115933a5SChunming Zhou 	u32 srbm_soft_reset;
6555fc3aeebSyanyang1 
656da146d3bSAlex Deucher 	if (!adev->vce.srbm_soft_reset)
657115933a5SChunming Zhou 		return 0;
658115933a5SChunming Zhou 	srbm_soft_reset = adev->vce.srbm_soft_reset;
659be4f38e2SAlex Deucher 
660115933a5SChunming Zhou 	if (srbm_soft_reset) {
661115933a5SChunming Zhou 		u32 tmp;
662115933a5SChunming Zhou 
663115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
664115933a5SChunming Zhou 		tmp |= srbm_soft_reset;
665115933a5SChunming Zhou 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
666115933a5SChunming Zhou 		WREG32(mmSRBM_SOFT_RESET, tmp);
667115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
668115933a5SChunming Zhou 
669115933a5SChunming Zhou 		udelay(50);
670115933a5SChunming Zhou 
671115933a5SChunming Zhou 		tmp &= ~srbm_soft_reset;
672115933a5SChunming Zhou 		WREG32(mmSRBM_SOFT_RESET, tmp);
673115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
674115933a5SChunming Zhou 
675115933a5SChunming Zhou 		/* Wait a little for things to settle down */
676115933a5SChunming Zhou 		udelay(50);
677115933a5SChunming Zhou 	}
678115933a5SChunming Zhou 
679115933a5SChunming Zhou 	return 0;
680115933a5SChunming Zhou }
681115933a5SChunming Zhou 
682115933a5SChunming Zhou static int vce_v3_0_pre_soft_reset(void *handle)
683115933a5SChunming Zhou {
684115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
685115933a5SChunming Zhou 
686da146d3bSAlex Deucher 	if (!adev->vce.srbm_soft_reset)
687115933a5SChunming Zhou 		return 0;
688115933a5SChunming Zhou 
689aaa36a97SAlex Deucher 	mdelay(5);
690aaa36a97SAlex Deucher 
691115933a5SChunming Zhou 	return vce_v3_0_suspend(adev);
692115933a5SChunming Zhou }
693115933a5SChunming Zhou 
694115933a5SChunming Zhou 
695115933a5SChunming Zhou static int vce_v3_0_post_soft_reset(void *handle)
696115933a5SChunming Zhou {
697115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
698115933a5SChunming Zhou 
699da146d3bSAlex Deucher 	if (!adev->vce.srbm_soft_reset)
700115933a5SChunming Zhou 		return 0;
701115933a5SChunming Zhou 
702115933a5SChunming Zhou 	mdelay(5);
703115933a5SChunming Zhou 
704115933a5SChunming Zhou 	return vce_v3_0_resume(adev);
705aaa36a97SAlex Deucher }
706aaa36a97SAlex Deucher 
707aaa36a97SAlex Deucher static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
708aaa36a97SAlex Deucher 					struct amdgpu_irq_src *source,
709aaa36a97SAlex Deucher 					unsigned type,
710aaa36a97SAlex Deucher 					enum amdgpu_interrupt_state state)
711aaa36a97SAlex Deucher {
712aaa36a97SAlex Deucher 	uint32_t val = 0;
713aaa36a97SAlex Deucher 
714aaa36a97SAlex Deucher 	if (state == AMDGPU_IRQ_STATE_ENABLE)
715aaa36a97SAlex Deucher 		val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
716aaa36a97SAlex Deucher 
717aaa36a97SAlex Deucher 	WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
718aaa36a97SAlex Deucher 	return 0;
719aaa36a97SAlex Deucher }
720aaa36a97SAlex Deucher 
721aaa36a97SAlex Deucher static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
722aaa36a97SAlex Deucher 				      struct amdgpu_irq_src *source,
723aaa36a97SAlex Deucher 				      struct amdgpu_iv_entry *entry)
724aaa36a97SAlex Deucher {
725aaa36a97SAlex Deucher 	DRM_DEBUG("IH: VCE\n");
726d6c29c30SLeo Liu 
727f3f0ea95STom St Denis 	WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
728d6c29c30SLeo Liu 
7297ccf5aa8SAlex Deucher 	switch (entry->src_data[0]) {
730aaa36a97SAlex Deucher 	case 0:
731aaa36a97SAlex Deucher 	case 1:
7326f0359ffSAlex Deucher 	case 2:
7337ccf5aa8SAlex Deucher 		amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
734aaa36a97SAlex Deucher 		break;
735aaa36a97SAlex Deucher 	default:
736aaa36a97SAlex Deucher 		DRM_ERROR("Unhandled interrupt: %d %d\n",
7377ccf5aa8SAlex Deucher 			  entry->src_id, entry->src_data[0]);
738aaa36a97SAlex Deucher 		break;
739aaa36a97SAlex Deucher 	}
740aaa36a97SAlex Deucher 
741aaa36a97SAlex Deucher 	return 0;
742aaa36a97SAlex Deucher }
743aaa36a97SAlex Deucher 
7445fc3aeebSyanyang1 static int vce_v3_0_set_clockgating_state(void *handle,
7455fc3aeebSyanyang1 					  enum amd_clockgating_state state)
746aaa36a97SAlex Deucher {
7470689a570SEric Huang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7480689a570SEric Huang 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
7490689a570SEric Huang 	int i;
7500689a570SEric Huang 
751e3b04bc7SAlex Deucher 	if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
7520689a570SEric Huang 		return 0;
7530689a570SEric Huang 
7540689a570SEric Huang 	mutex_lock(&adev->grbm_idx_mutex);
7550689a570SEric Huang 	for (i = 0; i < 2; i++) {
7560689a570SEric Huang 		/* Program VCE Instance 0 or 1 if not harvested */
7570689a570SEric Huang 		if (adev->vce.harvest_config & (1 << i))
7580689a570SEric Huang 			continue;
7590689a570SEric Huang 
76050a1ebc7SRex Zhu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
7610689a570SEric Huang 
76226679899SRex Zhu 		if (!enable) {
7630689a570SEric Huang 			/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
7640689a570SEric Huang 			uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
7650689a570SEric Huang 			data &= ~(0xf | 0xff0);
7660689a570SEric Huang 			data |= ((0x0 << 0) | (0x04 << 4));
7670689a570SEric Huang 			WREG32(mmVCE_CLOCK_GATING_A, data);
7680689a570SEric Huang 
7690689a570SEric Huang 			/* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
7700689a570SEric Huang 			data = RREG32(mmVCE_UENC_CLOCK_GATING);
7710689a570SEric Huang 			data &= ~(0xf | 0xff0);
7720689a570SEric Huang 			data |= ((0x0 << 0) | (0x04 << 4));
7730689a570SEric Huang 			WREG32(mmVCE_UENC_CLOCK_GATING, data);
7740689a570SEric Huang 		}
7750689a570SEric Huang 
7760689a570SEric Huang 		vce_v3_0_set_vce_sw_clock_gating(adev, enable);
7770689a570SEric Huang 	}
7780689a570SEric Huang 
77950a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
7800689a570SEric Huang 	mutex_unlock(&adev->grbm_idx_mutex);
7810689a570SEric Huang 
782aaa36a97SAlex Deucher 	return 0;
783aaa36a97SAlex Deucher }
784aaa36a97SAlex Deucher 
7855fc3aeebSyanyang1 static int vce_v3_0_set_powergating_state(void *handle,
7865fc3aeebSyanyang1 					  enum amd_powergating_state state)
787aaa36a97SAlex Deucher {
788aaa36a97SAlex Deucher 	/* This doesn't actually powergate the VCE block.
789aaa36a97SAlex Deucher 	 * That's done in the dpm code via the SMC.  This
790aaa36a97SAlex Deucher 	 * just re-inits the block as necessary.  The actual
791aaa36a97SAlex Deucher 	 * gating still happens in the dpm code.  We should
792aaa36a97SAlex Deucher 	 * revisit this when there is a cleaner line between
793aaa36a97SAlex Deucher 	 * the smc and the hw blocks
794aaa36a97SAlex Deucher 	 */
7955fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
796c79b5561SHuang Rui 	int ret = 0;
7975fc3aeebSyanyang1 
798c79b5561SHuang Rui 	if (state == AMD_PG_STATE_GATE) {
7996fc11b0eSRex Zhu 		ret = vce_v3_0_stop(adev);
8006fc11b0eSRex Zhu 		if (ret)
8016fc11b0eSRex Zhu 			goto out;
802c79b5561SHuang Rui 	} else {
803c79b5561SHuang Rui 		ret = vce_v3_0_start(adev);
804c79b5561SHuang Rui 		if (ret)
805c79b5561SHuang Rui 			goto out;
806c79b5561SHuang Rui 	}
807c79b5561SHuang Rui 
808c79b5561SHuang Rui out:
809c79b5561SHuang Rui 	return ret;
810c79b5561SHuang Rui }
811c79b5561SHuang Rui 
812c79b5561SHuang Rui static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
813c79b5561SHuang Rui {
814c79b5561SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
815c79b5561SHuang Rui 	int data;
816c79b5561SHuang Rui 
817c79b5561SHuang Rui 	mutex_lock(&adev->pm.mutex);
818c79b5561SHuang Rui 
8191c622002SRex Zhu 	if (adev->flags & AMD_IS_APU)
8201c622002SRex Zhu 		data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
8211c622002SRex Zhu 	else
8221c622002SRex Zhu 		data = RREG32_SMC(ixCURRENT_PG_STATUS);
8231c622002SRex Zhu 
8241c622002SRex Zhu 	if (data & CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) {
825c79b5561SHuang Rui 		DRM_INFO("Cannot get clockgating state when VCE is powergated.\n");
826c79b5561SHuang Rui 		goto out;
827c79b5561SHuang Rui 	}
828c79b5561SHuang Rui 
829c79b5561SHuang Rui 	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
830c79b5561SHuang Rui 
831c79b5561SHuang Rui 	/* AMD_CG_SUPPORT_VCE_MGCG */
832c79b5561SHuang Rui 	data = RREG32(mmVCE_CLOCK_GATING_A);
833c79b5561SHuang Rui 	if (data & (0x04 << 4))
834c79b5561SHuang Rui 		*flags |= AMD_CG_SUPPORT_VCE_MGCG;
835c79b5561SHuang Rui 
836c79b5561SHuang Rui out:
837c79b5561SHuang Rui 	mutex_unlock(&adev->pm.mutex);
838aaa36a97SAlex Deucher }
839aaa36a97SAlex Deucher 
840ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
841c4f46f22SChristian König 		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
842ea4a8c1dSMaruthi Srinivas Bayyavarapu {
843ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_IB_VM);
844c4f46f22SChristian König 	amdgpu_ring_write(ring, vmid);
845ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
846ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
847ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, ib->length_dw);
848ea4a8c1dSMaruthi Srinivas Bayyavarapu }
849ea4a8c1dSMaruthi Srinivas Bayyavarapu 
850ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
851c633c00bSChristian König 				   unsigned int vmid, uint64_t pd_addr)
852ea4a8c1dSMaruthi Srinivas Bayyavarapu {
853ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
854c4f46f22SChristian König 	amdgpu_ring_write(ring, vmid);
855ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, pd_addr >> 12);
856ea4a8c1dSMaruthi Srinivas Bayyavarapu 
857ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
858c4f46f22SChristian König 	amdgpu_ring_write(ring, vmid);
859ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_END);
860ea4a8c1dSMaruthi Srinivas Bayyavarapu }
861ea4a8c1dSMaruthi Srinivas Bayyavarapu 
862ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
863ea4a8c1dSMaruthi Srinivas Bayyavarapu {
864ea4a8c1dSMaruthi Srinivas Bayyavarapu 	uint32_t seq = ring->fence_drv.sync_seq;
865ea4a8c1dSMaruthi Srinivas Bayyavarapu 	uint64_t addr = ring->fence_drv.gpu_addr;
866ea4a8c1dSMaruthi Srinivas Bayyavarapu 
867ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
868ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, lower_32_bits(addr));
869ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, upper_32_bits(addr));
870ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, seq);
871ea4a8c1dSMaruthi Srinivas Bayyavarapu }
872ea4a8c1dSMaruthi Srinivas Bayyavarapu 
873a1255107SAlex Deucher static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
87488a907d6STom St Denis 	.name = "vce_v3_0",
875aaa36a97SAlex Deucher 	.early_init = vce_v3_0_early_init,
876aaa36a97SAlex Deucher 	.late_init = NULL,
877aaa36a97SAlex Deucher 	.sw_init = vce_v3_0_sw_init,
878aaa36a97SAlex Deucher 	.sw_fini = vce_v3_0_sw_fini,
879aaa36a97SAlex Deucher 	.hw_init = vce_v3_0_hw_init,
880aaa36a97SAlex Deucher 	.hw_fini = vce_v3_0_hw_fini,
881aaa36a97SAlex Deucher 	.suspend = vce_v3_0_suspend,
882aaa36a97SAlex Deucher 	.resume = vce_v3_0_resume,
883aaa36a97SAlex Deucher 	.is_idle = vce_v3_0_is_idle,
884aaa36a97SAlex Deucher 	.wait_for_idle = vce_v3_0_wait_for_idle,
885115933a5SChunming Zhou 	.check_soft_reset = vce_v3_0_check_soft_reset,
886115933a5SChunming Zhou 	.pre_soft_reset = vce_v3_0_pre_soft_reset,
887aaa36a97SAlex Deucher 	.soft_reset = vce_v3_0_soft_reset,
888115933a5SChunming Zhou 	.post_soft_reset = vce_v3_0_post_soft_reset,
889aaa36a97SAlex Deucher 	.set_clockgating_state = vce_v3_0_set_clockgating_state,
890aaa36a97SAlex Deucher 	.set_powergating_state = vce_v3_0_set_powergating_state,
891c79b5561SHuang Rui 	.get_clockgating_state = vce_v3_0_get_clockgating_state,
892aaa36a97SAlex Deucher };
893aaa36a97SAlex Deucher 
894ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
89521cd942eSChristian König 	.type = AMDGPU_RING_TYPE_VCE,
89679887142SChristian König 	.align_mask = 0xf,
89779887142SChristian König 	.nop = VCE_CMD_NO_OP,
898536fbf94SKen Wang 	.support_64bit_ptrs = false,
899aaa36a97SAlex Deucher 	.get_rptr = vce_v3_0_ring_get_rptr,
900aaa36a97SAlex Deucher 	.get_wptr = vce_v3_0_ring_get_wptr,
901aaa36a97SAlex Deucher 	.set_wptr = vce_v3_0_ring_set_wptr,
902aaa36a97SAlex Deucher 	.parse_cs = amdgpu_vce_ring_parse_cs,
903e12f3d7aSChristian König 	.emit_frame_size =
904e12f3d7aSChristian König 		4 + /* vce_v3_0_emit_pipeline_sync */
905e12f3d7aSChristian König 		6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
9063413accbSAlex Deucher 	.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
907aaa36a97SAlex Deucher 	.emit_ib = amdgpu_vce_ring_emit_ib,
908aaa36a97SAlex Deucher 	.emit_fence = amdgpu_vce_ring_emit_fence,
909aaa36a97SAlex Deucher 	.test_ring = amdgpu_vce_ring_test_ring,
910aaa36a97SAlex Deucher 	.test_ib = amdgpu_vce_ring_test_ib,
911edff0e28SJammy Zhou 	.insert_nop = amdgpu_ring_insert_nop,
9129e5d5309SChristian König 	.pad_ib = amdgpu_ring_generic_pad_ib,
913ebff485eSChristian König 	.begin_use = amdgpu_vce_ring_begin_use,
914ebff485eSChristian König 	.end_use = amdgpu_vce_ring_end_use,
915aaa36a97SAlex Deucher };
916aaa36a97SAlex Deucher 
917ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
91821cd942eSChristian König 	.type = AMDGPU_RING_TYPE_VCE,
91979887142SChristian König 	.align_mask = 0xf,
92079887142SChristian König 	.nop = VCE_CMD_NO_OP,
921536fbf94SKen Wang 	.support_64bit_ptrs = false,
922ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.get_rptr = vce_v3_0_ring_get_rptr,
923ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.get_wptr = vce_v3_0_ring_get_wptr,
924ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.set_wptr = vce_v3_0_ring_set_wptr,
92598614701SChristian König 	.parse_cs = amdgpu_vce_ring_parse_cs_vm,
926e12f3d7aSChristian König 	.emit_frame_size =
927e12f3d7aSChristian König 		6 + /* vce_v3_0_emit_vm_flush */
928e12f3d7aSChristian König 		4 + /* vce_v3_0_emit_pipeline_sync */
929e12f3d7aSChristian König 		6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
9303413accbSAlex Deucher 	.emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
931ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_ib = vce_v3_0_ring_emit_ib,
932ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_vm_flush = vce_v3_0_emit_vm_flush,
933ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
934ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_fence = amdgpu_vce_ring_emit_fence,
935ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.test_ring = amdgpu_vce_ring_test_ring,
936ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.test_ib = amdgpu_vce_ring_test_ib,
937ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.insert_nop = amdgpu_ring_insert_nop,
938ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.pad_ib = amdgpu_ring_generic_pad_ib,
939ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.begin_use = amdgpu_vce_ring_begin_use,
940ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.end_use = amdgpu_vce_ring_end_use,
941ea4a8c1dSMaruthi Srinivas Bayyavarapu };
942ea4a8c1dSMaruthi Srinivas Bayyavarapu 
943aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
944aaa36a97SAlex Deucher {
94575c65480SAlex Deucher 	int i;
94675c65480SAlex Deucher 
947ea4a8c1dSMaruthi Srinivas Bayyavarapu 	if (adev->asic_type >= CHIP_STONEY) {
9485d4af988SAlex Deucher 		for (i = 0; i < adev->vce.num_rings; i++) {
949ea4a8c1dSMaruthi Srinivas Bayyavarapu 			adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
9505d4af988SAlex Deucher 			adev->vce.ring[i].me = i;
9515d4af988SAlex Deucher 		}
952ea4a8c1dSMaruthi Srinivas Bayyavarapu 		DRM_INFO("VCE enabled in VM mode\n");
953ea4a8c1dSMaruthi Srinivas Bayyavarapu 	} else {
9545d4af988SAlex Deucher 		for (i = 0; i < adev->vce.num_rings; i++) {
955ea4a8c1dSMaruthi Srinivas Bayyavarapu 			adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
9565d4af988SAlex Deucher 			adev->vce.ring[i].me = i;
9575d4af988SAlex Deucher 		}
958ea4a8c1dSMaruthi Srinivas Bayyavarapu 		DRM_INFO("VCE enabled in physical mode\n");
959ea4a8c1dSMaruthi Srinivas Bayyavarapu 	}
960aaa36a97SAlex Deucher }
961aaa36a97SAlex Deucher 
962aaa36a97SAlex Deucher static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
963aaa36a97SAlex Deucher 	.set = vce_v3_0_set_interrupt_state,
964aaa36a97SAlex Deucher 	.process = vce_v3_0_process_interrupt,
965aaa36a97SAlex Deucher };
966aaa36a97SAlex Deucher 
967aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
968aaa36a97SAlex Deucher {
969aaa36a97SAlex Deucher 	adev->vce.irq.num_types = 1;
970aaa36a97SAlex Deucher 	adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
971aaa36a97SAlex Deucher };
972a1255107SAlex Deucher 
973a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_0_ip_block =
974a1255107SAlex Deucher {
975a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_VCE,
976a1255107SAlex Deucher 	.major = 3,
977a1255107SAlex Deucher 	.minor = 0,
978a1255107SAlex Deucher 	.rev = 0,
979a1255107SAlex Deucher 	.funcs = &vce_v3_0_ip_funcs,
980a1255107SAlex Deucher };
981a1255107SAlex Deucher 
982a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_1_ip_block =
983a1255107SAlex Deucher {
984a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_VCE,
985a1255107SAlex Deucher 	.major = 3,
986a1255107SAlex Deucher 	.minor = 1,
987a1255107SAlex Deucher 	.rev = 0,
988a1255107SAlex Deucher 	.funcs = &vce_v3_0_ip_funcs,
989a1255107SAlex Deucher };
990a1255107SAlex Deucher 
991a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_4_ip_block =
992a1255107SAlex Deucher {
993a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_VCE,
994a1255107SAlex Deucher 	.major = 3,
995a1255107SAlex Deucher 	.minor = 4,
996a1255107SAlex Deucher 	.rev = 0,
997a1255107SAlex Deucher 	.funcs = &vce_v3_0_ip_funcs,
998a1255107SAlex Deucher };
999