xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c (revision 45cc6586)
1aaa36a97SAlex Deucher /*
2aaa36a97SAlex Deucher  * Copyright 2014 Advanced Micro Devices, Inc.
3aaa36a97SAlex Deucher  * All Rights Reserved.
4aaa36a97SAlex Deucher  *
5aaa36a97SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
6aaa36a97SAlex Deucher  * copy of this software and associated documentation files (the
7aaa36a97SAlex Deucher  * "Software"), to deal in the Software without restriction, including
8aaa36a97SAlex Deucher  * without limitation the rights to use, copy, modify, merge, publish,
9aaa36a97SAlex Deucher  * distribute, sub license, and/or sell copies of the Software, and to
10aaa36a97SAlex Deucher  * permit persons to whom the Software is furnished to do so, subject to
11aaa36a97SAlex Deucher  * the following conditions:
12aaa36a97SAlex Deucher  *
13aaa36a97SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14aaa36a97SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15aaa36a97SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16aaa36a97SAlex Deucher  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17aaa36a97SAlex Deucher  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18aaa36a97SAlex Deucher  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19aaa36a97SAlex Deucher  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20aaa36a97SAlex Deucher  *
21aaa36a97SAlex Deucher  * The above copyright notice and this permission notice (including the
22aaa36a97SAlex Deucher  * next paragraph) shall be included in all copies or substantial portions
23aaa36a97SAlex Deucher  * of the Software.
24aaa36a97SAlex Deucher  *
25aaa36a97SAlex Deucher  * Authors: Christian König <christian.koenig@amd.com>
26aaa36a97SAlex Deucher  */
27aaa36a97SAlex Deucher 
28aaa36a97SAlex Deucher #include <linux/firmware.h>
29aaa36a97SAlex Deucher #include <drm/drmP.h>
30aaa36a97SAlex Deucher #include "amdgpu.h"
31aaa36a97SAlex Deucher #include "amdgpu_vce.h"
32aaa36a97SAlex Deucher #include "vid.h"
33aaa36a97SAlex Deucher #include "vce/vce_3_0_d.h"
34aaa36a97SAlex Deucher #include "vce/vce_3_0_sh_mask.h"
35be4f38e2SAlex Deucher #include "oss/oss_3_0_d.h"
36be4f38e2SAlex Deucher #include "oss/oss_3_0_sh_mask.h"
375bbc553aSLeo Liu #include "gca/gfx_8_0_d.h"
386a585777SAlex Deucher #include "smu/smu_7_1_2_d.h"
396a585777SAlex Deucher #include "smu/smu_7_1_2_sh_mask.h"
40115933a5SChunming Zhou #include "gca/gfx_8_0_d.h"
41115933a5SChunming Zhou #include "gca/gfx_8_0_sh_mask.h"
42115933a5SChunming Zhou 
435bbc553aSLeo Liu 
445bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT	0x04
455bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK	0x10
4650a1ebc7SRex Zhu #define GRBM_GFX_INDEX__VCE_ALL_PIPE		0x07
4750a1ebc7SRex Zhu 
483c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0	0x8616
493c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1	0x8617
503c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2	0x8618
5150a1ebc7SRex Zhu #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
5250a1ebc7SRex Zhu 
53567e6e29Sjimqu #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
54aaa36a97SAlex Deucher 
55e9822622SLeo Liu #define VCE_V3_0_FW_SIZE	(384 * 1024)
56e9822622SLeo Liu #define VCE_V3_0_STACK_SIZE	(64 * 1024)
57e9822622SLeo Liu #define VCE_V3_0_DATA_SIZE	((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
58e9822622SLeo Liu 
59ef6239e0SAlex Deucher #define FW_52_8_3	((52 << 24) | (8 << 16) | (3 << 8))
60ef6239e0SAlex Deucher 
6150a1ebc7SRex Zhu #define GET_VCE_INSTANCE(i)  ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
6250a1ebc7SRex Zhu 					| GRBM_GFX_INDEX__VCE_ALL_PIPE)
6350a1ebc7SRex Zhu 
645bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
65aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
66aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
67567e6e29Sjimqu static int vce_v3_0_wait_for_idle(void *handle);
6826679899SRex Zhu static int vce_v3_0_set_clockgating_state(void *handle,
6926679899SRex Zhu 					  enum amd_clockgating_state state);
70aaa36a97SAlex Deucher /**
71aaa36a97SAlex Deucher  * vce_v3_0_ring_get_rptr - get read pointer
72aaa36a97SAlex Deucher  *
73aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
74aaa36a97SAlex Deucher  *
75aaa36a97SAlex Deucher  * Returns the current hardware read pointer
76aaa36a97SAlex Deucher  */
77536fbf94SKen Wang static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
78aaa36a97SAlex Deucher {
79aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
8045cc6586SLeo Liu 	u32 v;
8145cc6586SLeo Liu 
8245cc6586SLeo Liu 	mutex_lock(&adev->grbm_idx_mutex);
8345cc6586SLeo Liu 	if (adev->vce.harvest_config == 0 ||
8445cc6586SLeo Liu 		adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
8545cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
8645cc6586SLeo Liu 	else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
8745cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
88aaa36a97SAlex Deucher 
89aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
9045cc6586SLeo Liu 		v = RREG32(mmVCE_RB_RPTR);
916f0359ffSAlex Deucher 	else if (ring == &adev->vce.ring[1])
9245cc6586SLeo Liu 		v = RREG32(mmVCE_RB_RPTR2);
936f0359ffSAlex Deucher 	else
9445cc6586SLeo Liu 		v = RREG32(mmVCE_RB_RPTR3);
9545cc6586SLeo Liu 
9645cc6586SLeo Liu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
9745cc6586SLeo Liu 	mutex_unlock(&adev->grbm_idx_mutex);
9845cc6586SLeo Liu 
9945cc6586SLeo Liu 	return v;
100aaa36a97SAlex Deucher }
101aaa36a97SAlex Deucher 
102aaa36a97SAlex Deucher /**
103aaa36a97SAlex Deucher  * vce_v3_0_ring_get_wptr - get write pointer
104aaa36a97SAlex Deucher  *
105aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
106aaa36a97SAlex Deucher  *
107aaa36a97SAlex Deucher  * Returns the current hardware write pointer
108aaa36a97SAlex Deucher  */
109536fbf94SKen Wang static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
110aaa36a97SAlex Deucher {
111aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
11245cc6586SLeo Liu 	u32 v;
11345cc6586SLeo Liu 
11445cc6586SLeo Liu 	mutex_lock(&adev->grbm_idx_mutex);
11545cc6586SLeo Liu 	if (adev->vce.harvest_config == 0 ||
11645cc6586SLeo Liu 		adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
11745cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
11845cc6586SLeo Liu 	else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
11945cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
120aaa36a97SAlex Deucher 
121aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
12245cc6586SLeo Liu 		v = RREG32(mmVCE_RB_WPTR);
1236f0359ffSAlex Deucher 	else if (ring == &adev->vce.ring[1])
12445cc6586SLeo Liu 		v = RREG32(mmVCE_RB_WPTR2);
1256f0359ffSAlex Deucher 	else
12645cc6586SLeo Liu 		v = RREG32(mmVCE_RB_WPTR3);
12745cc6586SLeo Liu 
12845cc6586SLeo Liu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
12945cc6586SLeo Liu 	mutex_unlock(&adev->grbm_idx_mutex);
13045cc6586SLeo Liu 
13145cc6586SLeo Liu 	return v;
132aaa36a97SAlex Deucher }
133aaa36a97SAlex Deucher 
134aaa36a97SAlex Deucher /**
135aaa36a97SAlex Deucher  * vce_v3_0_ring_set_wptr - set write pointer
136aaa36a97SAlex Deucher  *
137aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
138aaa36a97SAlex Deucher  *
139aaa36a97SAlex Deucher  * Commits the write pointer to the hardware
140aaa36a97SAlex Deucher  */
141aaa36a97SAlex Deucher static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
142aaa36a97SAlex Deucher {
143aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
144aaa36a97SAlex Deucher 
14545cc6586SLeo Liu 	mutex_lock(&adev->grbm_idx_mutex);
14645cc6586SLeo Liu 	if (adev->vce.harvest_config == 0 ||
14745cc6586SLeo Liu 		adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
14845cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
14945cc6586SLeo Liu 	else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
15045cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
15145cc6586SLeo Liu 
152aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
153536fbf94SKen Wang 		WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
1546f0359ffSAlex Deucher 	else if (ring == &adev->vce.ring[1])
155536fbf94SKen Wang 		WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
1566f0359ffSAlex Deucher 	else
157536fbf94SKen Wang 		WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
15845cc6586SLeo Liu 
15945cc6586SLeo Liu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
16045cc6586SLeo Liu 	mutex_unlock(&adev->grbm_idx_mutex);
161aaa36a97SAlex Deucher }
162aaa36a97SAlex Deucher 
1630689a570SEric Huang static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
1640689a570SEric Huang {
165f3f0ea95STom St Denis 	WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
1660689a570SEric Huang }
1670689a570SEric Huang 
1680689a570SEric Huang static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
1690689a570SEric Huang 					     bool gated)
1700689a570SEric Huang {
171f3f0ea95STom St Denis 	u32 data;
172f16fe6d3STom St Denis 
1730689a570SEric Huang 	/* Set Override to disable Clock Gating */
1740689a570SEric Huang 	vce_v3_0_override_vce_clock_gating(adev, true);
1750689a570SEric Huang 
1766f906814STom St Denis 	/* This function enables MGCG which is controlled by firmware.
1776f906814STom St Denis 	   With the clocks in the gated state the core is still
1786f906814STom St Denis 	   accessible but the firmware will throttle the clocks on the
1796f906814STom St Denis 	   fly as necessary.
1800689a570SEric Huang 	*/
181ecc2cf7cSMaruthi Srinivas Bayyavarapu 	if (!gated) {
182f3f0ea95STom St Denis 		data = RREG32(mmVCE_CLOCK_GATING_B);
1830689a570SEric Huang 		data |= 0x1ff;
1840689a570SEric Huang 		data &= ~0xef0000;
1850689a570SEric Huang 		WREG32(mmVCE_CLOCK_GATING_B, data);
1860689a570SEric Huang 
187f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING);
1880689a570SEric Huang 		data |= 0x3ff000;
1890689a570SEric Huang 		data &= ~0xffc00000;
1900689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING, data);
1910689a570SEric Huang 
192f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
1930689a570SEric Huang 		data |= 0x2;
1946f906814STom St Denis 		data &= ~0x00010000;
1950689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
1960689a570SEric Huang 
197f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
1980689a570SEric Huang 		data |= 0x37f;
1990689a570SEric Huang 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
2000689a570SEric Huang 
201f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
2020689a570SEric Huang 		data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
2030689a570SEric Huang 			VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
2040689a570SEric Huang 			VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
2050689a570SEric Huang 			0x8;
2060689a570SEric Huang 		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
2070689a570SEric Huang 	} else {
208f3f0ea95STom St Denis 		data = RREG32(mmVCE_CLOCK_GATING_B);
2090689a570SEric Huang 		data &= ~0x80010;
2100689a570SEric Huang 		data |= 0xe70008;
2110689a570SEric Huang 		WREG32(mmVCE_CLOCK_GATING_B, data);
2126f906814STom St Denis 
213f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING);
2140689a570SEric Huang 		data |= 0xffc00000;
2150689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING, data);
2166f906814STom St Denis 
217f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
2180689a570SEric Huang 		data |= 0x10000;
2190689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
2206f906814STom St Denis 
221f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
222e05208deSRex Zhu 		data &= ~0x3ff;
2230689a570SEric Huang 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
2246f906814STom St Denis 
225f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
2260689a570SEric Huang 		data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
2270689a570SEric Huang 			  VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
2280689a570SEric Huang 			  VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
2290689a570SEric Huang 			  0x8);
2300689a570SEric Huang 		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
2310689a570SEric Huang 	}
2320689a570SEric Huang 	vce_v3_0_override_vce_clock_gating(adev, false);
2330689a570SEric Huang }
2340689a570SEric Huang 
235567e6e29Sjimqu static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
236567e6e29Sjimqu {
237567e6e29Sjimqu 	int i, j;
238567e6e29Sjimqu 
239567e6e29Sjimqu 	for (i = 0; i < 10; ++i) {
240567e6e29Sjimqu 		for (j = 0; j < 100; ++j) {
241b7e2e9f7Sjimqu 			uint32_t status = RREG32(mmVCE_STATUS);
242b7e2e9f7Sjimqu 
243567e6e29Sjimqu 			if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
244567e6e29Sjimqu 				return 0;
245567e6e29Sjimqu 			mdelay(10);
246567e6e29Sjimqu 		}
247567e6e29Sjimqu 
248567e6e29Sjimqu 		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
249f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
250567e6e29Sjimqu 		mdelay(10);
251f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
252567e6e29Sjimqu 		mdelay(10);
253567e6e29Sjimqu 	}
254567e6e29Sjimqu 
255567e6e29Sjimqu 	return -ETIMEDOUT;
256567e6e29Sjimqu }
257567e6e29Sjimqu 
258aaa36a97SAlex Deucher /**
259aaa36a97SAlex Deucher  * vce_v3_0_start - start VCE block
260aaa36a97SAlex Deucher  *
261aaa36a97SAlex Deucher  * @adev: amdgpu_device pointer
262aaa36a97SAlex Deucher  *
263aaa36a97SAlex Deucher  * Setup and start the VCE block
264aaa36a97SAlex Deucher  */
265aaa36a97SAlex Deucher static int vce_v3_0_start(struct amdgpu_device *adev)
266aaa36a97SAlex Deucher {
267aaa36a97SAlex Deucher 	struct amdgpu_ring *ring;
268567e6e29Sjimqu 	int idx, r;
269567e6e29Sjimqu 
27045cc6586SLeo Liu 	mutex_lock(&adev->grbm_idx_mutex);
27145cc6586SLeo Liu 	for (idx = 0; idx < 2; ++idx) {
27245cc6586SLeo Liu 		if (adev->vce.harvest_config & (1 << idx))
27345cc6586SLeo Liu 			continue;
27445cc6586SLeo Liu 
27545cc6586SLeo Liu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
27645cc6586SLeo Liu 
27745cc6586SLeo Liu 		/* Program instance 0 reg space for two instances or instance 0 case
27845cc6586SLeo Liu 		program instance 1 reg space for only instance 1 available case */
27945cc6586SLeo Liu 		if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
280567e6e29Sjimqu 			ring = &adev->vce.ring[0];
281536fbf94SKen Wang 			WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
282536fbf94SKen Wang 			WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
283567e6e29Sjimqu 			WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
284567e6e29Sjimqu 			WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
285567e6e29Sjimqu 			WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
286567e6e29Sjimqu 
287567e6e29Sjimqu 			ring = &adev->vce.ring[1];
288536fbf94SKen Wang 			WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
289536fbf94SKen Wang 			WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
290567e6e29Sjimqu 			WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
291567e6e29Sjimqu 			WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
292567e6e29Sjimqu 			WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
293aaa36a97SAlex Deucher 
2946f0359ffSAlex Deucher 			ring = &adev->vce.ring[2];
295536fbf94SKen Wang 			WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
296536fbf94SKen Wang 			WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
2976f0359ffSAlex Deucher 			WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
2986f0359ffSAlex Deucher 			WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
2996f0359ffSAlex Deucher 			WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
30045cc6586SLeo Liu 		}
3016f0359ffSAlex Deucher 
3025bbc553aSLeo Liu 		vce_v3_0_mc_resume(adev, idx);
303f3f0ea95STom St Denis 		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
304567e6e29Sjimqu 
3053c0ff9f1SLeo Liu 		if (adev->asic_type >= CHIP_STONEY)
3063c0ff9f1SLeo Liu 			WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
3073c0ff9f1SLeo Liu 		else
308f3f0ea95STom St Denis 			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
309aaa36a97SAlex Deucher 
310f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
311aaa36a97SAlex Deucher 		mdelay(100);
312aaa36a97SAlex Deucher 
313567e6e29Sjimqu 		r = vce_v3_0_firmware_loaded(adev);
314aaa36a97SAlex Deucher 
315aaa36a97SAlex Deucher 		/* clear BUSY flag */
316f3f0ea95STom St Denis 		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
317aaa36a97SAlex Deucher 
318aaa36a97SAlex Deucher 		if (r) {
319aaa36a97SAlex Deucher 			DRM_ERROR("VCE not responding, giving up!!!\n");
3205bbc553aSLeo Liu 			mutex_unlock(&adev->grbm_idx_mutex);
321aaa36a97SAlex Deucher 			return r;
322aaa36a97SAlex Deucher 		}
3235bbc553aSLeo Liu 	}
3245bbc553aSLeo Liu 
32550a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
3265bbc553aSLeo Liu 	mutex_unlock(&adev->grbm_idx_mutex);
3275bbc553aSLeo Liu 
328567e6e29Sjimqu 	return 0;
329567e6e29Sjimqu }
3305bbc553aSLeo Liu 
331567e6e29Sjimqu static int vce_v3_0_stop(struct amdgpu_device *adev)
332567e6e29Sjimqu {
333567e6e29Sjimqu 	int idx;
334567e6e29Sjimqu 
335567e6e29Sjimqu 	mutex_lock(&adev->grbm_idx_mutex);
336567e6e29Sjimqu 	for (idx = 0; idx < 2; ++idx) {
337567e6e29Sjimqu 		if (adev->vce.harvest_config & (1 << idx))
338567e6e29Sjimqu 			continue;
339567e6e29Sjimqu 
34050a1ebc7SRex Zhu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
341567e6e29Sjimqu 
342567e6e29Sjimqu 		if (adev->asic_type >= CHIP_STONEY)
343567e6e29Sjimqu 			WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
344567e6e29Sjimqu 		else
345f3f0ea95STom St Denis 			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
346f3f0ea95STom St Denis 
347567e6e29Sjimqu 		/* hold on ECPU */
348f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
349567e6e29Sjimqu 
35026679899SRex Zhu 		/* clear VCE STATUS */
35126679899SRex Zhu 		WREG32(mmVCE_STATUS, 0);
352567e6e29Sjimqu 	}
353567e6e29Sjimqu 
35450a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
355567e6e29Sjimqu 	mutex_unlock(&adev->grbm_idx_mutex);
356aaa36a97SAlex Deucher 
357aaa36a97SAlex Deucher 	return 0;
358aaa36a97SAlex Deucher }
359aaa36a97SAlex Deucher 
3606a585777SAlex Deucher #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS     0xC0014074
3616a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__SHIFT       27
3626a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__MASK        0x18000000
3636a585777SAlex Deucher 
3646a585777SAlex Deucher static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
3656a585777SAlex Deucher {
3666a585777SAlex Deucher 	u32 tmp;
3676a585777SAlex Deucher 
368c4642a47SJunwei Zhang 	/* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
369cfaba566SSamuel Li 	if ((adev->asic_type == CHIP_FIJI) ||
3701b4eeea5SSonny Jiang 	    (adev->asic_type == CHIP_STONEY) ||
3712cc0c0b5SFlora Cui 	    (adev->asic_type == CHIP_POLARIS10) ||
372c4642a47SJunwei Zhang 	    (adev->asic_type == CHIP_POLARIS11) ||
373c4642a47SJunwei Zhang 	    (adev->asic_type == CHIP_POLARIS12))
3741dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE1;
375188a9bcdSAlex Deucher 
376188a9bcdSAlex Deucher 	/* Tonga and CZ are dual or single pipe */
3772f7d10b3SJammy Zhou 	if (adev->flags & AMD_IS_APU)
3786a585777SAlex Deucher 		tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
3796a585777SAlex Deucher 		       VCE_HARVEST_FUSE_MACRO__MASK) >>
3806a585777SAlex Deucher 			VCE_HARVEST_FUSE_MACRO__SHIFT;
3816a585777SAlex Deucher 	else
3826a585777SAlex Deucher 		tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
3836a585777SAlex Deucher 		       CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
3846a585777SAlex Deucher 			CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
3856a585777SAlex Deucher 
3866a585777SAlex Deucher 	switch (tmp) {
3876a585777SAlex Deucher 	case 1:
3881dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE0;
3896a585777SAlex Deucher 	case 2:
3901dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE1;
3916a585777SAlex Deucher 	case 3:
3921dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
3936a585777SAlex Deucher 	default:
3941dab5f06STom St Denis 		return 0;
3956a585777SAlex Deucher 	}
3966a585777SAlex Deucher }
3976a585777SAlex Deucher 
3985fc3aeebSyanyang1 static int vce_v3_0_early_init(void *handle)
399aaa36a97SAlex Deucher {
4005fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4015fc3aeebSyanyang1 
4026a585777SAlex Deucher 	adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
4036a585777SAlex Deucher 
4046a585777SAlex Deucher 	if ((adev->vce.harvest_config &
4056a585777SAlex Deucher 	     (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
4066a585777SAlex Deucher 	    (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
4076a585777SAlex Deucher 		return -ENOENT;
4086a585777SAlex Deucher 
4096f0359ffSAlex Deucher 	adev->vce.num_rings = 3;
41075c65480SAlex Deucher 
411aaa36a97SAlex Deucher 	vce_v3_0_set_ring_funcs(adev);
412aaa36a97SAlex Deucher 	vce_v3_0_set_irq_funcs(adev);
413aaa36a97SAlex Deucher 
414aaa36a97SAlex Deucher 	return 0;
415aaa36a97SAlex Deucher }
416aaa36a97SAlex Deucher 
4175fc3aeebSyanyang1 static int vce_v3_0_sw_init(void *handle)
418aaa36a97SAlex Deucher {
4195fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
420aaa36a97SAlex Deucher 	struct amdgpu_ring *ring;
42175c65480SAlex Deucher 	int r, i;
422aaa36a97SAlex Deucher 
423aaa36a97SAlex Deucher 	/* VCE */
424d766e6a3SAlex Deucher 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
425aaa36a97SAlex Deucher 	if (r)
426aaa36a97SAlex Deucher 		return r;
427aaa36a97SAlex Deucher 
428e9822622SLeo Liu 	r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
429e9822622SLeo Liu 		(VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
430aaa36a97SAlex Deucher 	if (r)
431aaa36a97SAlex Deucher 		return r;
432aaa36a97SAlex Deucher 
433ef6239e0SAlex Deucher 	/* 52.8.3 required for 3 ring support */
434ef6239e0SAlex Deucher 	if (adev->vce.fw_version < FW_52_8_3)
435ef6239e0SAlex Deucher 		adev->vce.num_rings = 2;
436ef6239e0SAlex Deucher 
437aaa36a97SAlex Deucher 	r = amdgpu_vce_resume(adev);
438aaa36a97SAlex Deucher 	if (r)
439aaa36a97SAlex Deucher 		return r;
440aaa36a97SAlex Deucher 
44175c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++) {
44275c65480SAlex Deucher 		ring = &adev->vce.ring[i];
44375c65480SAlex Deucher 		sprintf(ring->name, "vce%d", i);
44479887142SChristian König 		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
445aaa36a97SAlex Deucher 		if (r)
446aaa36a97SAlex Deucher 			return r;
44775c65480SAlex Deucher 	}
448aaa36a97SAlex Deucher 
449aaa36a97SAlex Deucher 	return r;
450aaa36a97SAlex Deucher }
451aaa36a97SAlex Deucher 
4525fc3aeebSyanyang1 static int vce_v3_0_sw_fini(void *handle)
453aaa36a97SAlex Deucher {
454aaa36a97SAlex Deucher 	int r;
4555fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
456aaa36a97SAlex Deucher 
457aaa36a97SAlex Deucher 	r = amdgpu_vce_suspend(adev);
458aaa36a97SAlex Deucher 	if (r)
459aaa36a97SAlex Deucher 		return r;
460aaa36a97SAlex Deucher 
46150237287SRex Zhu 	return amdgpu_vce_sw_fini(adev);
462aaa36a97SAlex Deucher }
463aaa36a97SAlex Deucher 
4645fc3aeebSyanyang1 static int vce_v3_0_hw_init(void *handle)
465aaa36a97SAlex Deucher {
466691ca86aSTom St Denis 	int r, i;
4675fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
468aaa36a97SAlex Deucher 
4696fc11b0eSRex Zhu 	vce_v3_0_override_vce_clock_gating(adev, true);
4706fc11b0eSRex Zhu 	if (!(adev->flags & AMD_IS_APU))
4716fc11b0eSRex Zhu 		amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
472aaa36a97SAlex Deucher 
47375c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++)
47475c65480SAlex Deucher 		adev->vce.ring[i].ready = false;
475aaa36a97SAlex Deucher 
47675c65480SAlex Deucher 	for (i = 0; i < adev->vce.num_rings; i++) {
477691ca86aSTom St Denis 		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
478691ca86aSTom St Denis 		if (r)
479aaa36a97SAlex Deucher 			return r;
480691ca86aSTom St Denis 		else
481691ca86aSTom St Denis 			adev->vce.ring[i].ready = true;
482aaa36a97SAlex Deucher 	}
483aaa36a97SAlex Deucher 
484aaa36a97SAlex Deucher 	DRM_INFO("VCE initialized successfully.\n");
485aaa36a97SAlex Deucher 
486aaa36a97SAlex Deucher 	return 0;
487aaa36a97SAlex Deucher }
488aaa36a97SAlex Deucher 
4895fc3aeebSyanyang1 static int vce_v3_0_hw_fini(void *handle)
490aaa36a97SAlex Deucher {
491567e6e29Sjimqu 	int r;
492567e6e29Sjimqu 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
493567e6e29Sjimqu 
494567e6e29Sjimqu 	r = vce_v3_0_wait_for_idle(handle);
495567e6e29Sjimqu 	if (r)
496567e6e29Sjimqu 		return r;
497567e6e29Sjimqu 
49826679899SRex Zhu 	vce_v3_0_stop(adev);
49926679899SRex Zhu 	return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
500aaa36a97SAlex Deucher }
501aaa36a97SAlex Deucher 
5025fc3aeebSyanyang1 static int vce_v3_0_suspend(void *handle)
503aaa36a97SAlex Deucher {
504aaa36a97SAlex Deucher 	int r;
5055fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
506aaa36a97SAlex Deucher 
507aaa36a97SAlex Deucher 	r = vce_v3_0_hw_fini(adev);
508aaa36a97SAlex Deucher 	if (r)
509aaa36a97SAlex Deucher 		return r;
510aaa36a97SAlex Deucher 
51150237287SRex Zhu 	return amdgpu_vce_suspend(adev);
512aaa36a97SAlex Deucher }
513aaa36a97SAlex Deucher 
5145fc3aeebSyanyang1 static int vce_v3_0_resume(void *handle)
515aaa36a97SAlex Deucher {
516aaa36a97SAlex Deucher 	int r;
5175fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
518aaa36a97SAlex Deucher 
519aaa36a97SAlex Deucher 	r = amdgpu_vce_resume(adev);
520aaa36a97SAlex Deucher 	if (r)
521aaa36a97SAlex Deucher 		return r;
522aaa36a97SAlex Deucher 
52350237287SRex Zhu 	return vce_v3_0_hw_init(adev);
524aaa36a97SAlex Deucher }
525aaa36a97SAlex Deucher 
5265bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
527aaa36a97SAlex Deucher {
528aaa36a97SAlex Deucher 	uint32_t offset, size;
529aaa36a97SAlex Deucher 
530aaa36a97SAlex Deucher 	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
531aaa36a97SAlex Deucher 	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
532aaa36a97SAlex Deucher 	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
5336f906814STom St Denis 	WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
534aaa36a97SAlex Deucher 
535aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_CTRL, 0x00398000);
536aaa36a97SAlex Deucher 	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
537aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
538aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
539aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_VM_CTRL, 0);
540d50e5c24SAlan Harrison 	WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000);
541d50e5c24SAlan Harrison 
5423c0ff9f1SLeo Liu 	if (adev->asic_type >= CHIP_STONEY) {
5433c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
5443c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
5453c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
5463c0ff9f1SLeo Liu 	} else
547aaa36a97SAlex Deucher 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
548aaa36a97SAlex Deucher 	offset = AMDGPU_VCE_FIRMWARE_OFFSET;
549e9822622SLeo Liu 	size = VCE_V3_0_FW_SIZE;
550aaa36a97SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
551aaa36a97SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
552aaa36a97SAlex Deucher 
5535bbc553aSLeo Liu 	if (idx == 0) {
554aaa36a97SAlex Deucher 		offset += size;
555e9822622SLeo Liu 		size = VCE_V3_0_STACK_SIZE;
556aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
557aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
558aaa36a97SAlex Deucher 		offset += size;
559e9822622SLeo Liu 		size = VCE_V3_0_DATA_SIZE;
560aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
561aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
5625bbc553aSLeo Liu 	} else {
5635bbc553aSLeo Liu 		offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
5645bbc553aSLeo Liu 		size = VCE_V3_0_STACK_SIZE;
5655bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
5665bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
5675bbc553aSLeo Liu 		offset += size;
5685bbc553aSLeo Liu 		size = VCE_V3_0_DATA_SIZE;
5695bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
5705bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
5715bbc553aSLeo Liu 	}
572aaa36a97SAlex Deucher 
573aaa36a97SAlex Deucher 	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
574f3f0ea95STom St Denis 	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
575aaa36a97SAlex Deucher }
576aaa36a97SAlex Deucher 
5775fc3aeebSyanyang1 static bool vce_v3_0_is_idle(void *handle)
578aaa36a97SAlex Deucher {
5795fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
580be4f38e2SAlex Deucher 	u32 mask = 0;
5815fc3aeebSyanyang1 
58274af1276STom St Denis 	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
58374af1276STom St Denis 	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
584be4f38e2SAlex Deucher 
585be4f38e2SAlex Deucher 	return !(RREG32(mmSRBM_STATUS2) & mask);
586aaa36a97SAlex Deucher }
587aaa36a97SAlex Deucher 
5885fc3aeebSyanyang1 static int vce_v3_0_wait_for_idle(void *handle)
589aaa36a97SAlex Deucher {
590aaa36a97SAlex Deucher 	unsigned i;
5915fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
592be4f38e2SAlex Deucher 
59392988e60STom St Denis 	for (i = 0; i < adev->usec_timeout; i++)
59492988e60STom St Denis 		if (vce_v3_0_is_idle(handle))
595aaa36a97SAlex Deucher 			return 0;
59692988e60STom St Denis 
597aaa36a97SAlex Deucher 	return -ETIMEDOUT;
598aaa36a97SAlex Deucher }
599aaa36a97SAlex Deucher 
600ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK  0x00000008L   /* AUTO_BUSY */
601ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK   0x00000010L   /* RB0_BUSY */
602ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK   0x00000020L   /* RB1_BUSY */
603ac8e3f30SRex Zhu #define  AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
604ac8e3f30SRex Zhu 				      VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
605115933a5SChunming Zhou 
606da146d3bSAlex Deucher static bool vce_v3_0_check_soft_reset(void *handle)
607115933a5SChunming Zhou {
608115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
609115933a5SChunming Zhou 	u32 srbm_soft_reset = 0;
610115933a5SChunming Zhou 
611115933a5SChunming Zhou 	/* According to VCE team , we should use VCE_STATUS instead
612115933a5SChunming Zhou 	 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
613115933a5SChunming Zhou 	 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
614115933a5SChunming Zhou 	 * instance's registers are accessed
615115933a5SChunming Zhou 	 * (0 for 1st instance, 10 for 2nd instance).
616115933a5SChunming Zhou 	 *
617115933a5SChunming Zhou 	 *VCE_STATUS
618115933a5SChunming Zhou 	 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 |          |FW_LOADED|JOB |
619115933a5SChunming Zhou 	 *|----+----+-----------+----+----+----+----------+---------+----|
620115933a5SChunming Zhou 	 *|bit8|bit7|    bit6   |bit5|bit4|bit3|   bit2   |  bit1   |bit0|
621115933a5SChunming Zhou 	 *
622115933a5SChunming Zhou 	 * VCE team suggest use bit 3--bit 6 for busy status check
623115933a5SChunming Zhou 	 */
6249aeb774cSTom St Denis 	mutex_lock(&adev->grbm_idx_mutex);
62550a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
626115933a5SChunming Zhou 	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
627115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
628115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
629115933a5SChunming Zhou 	}
63050a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
631115933a5SChunming Zhou 	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
632115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
633115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
634115933a5SChunming Zhou 	}
63550a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
636da146d3bSAlex Deucher 	mutex_unlock(&adev->grbm_idx_mutex);
637115933a5SChunming Zhou 
638115933a5SChunming Zhou 	if (srbm_soft_reset) {
639115933a5SChunming Zhou 		adev->vce.srbm_soft_reset = srbm_soft_reset;
640da146d3bSAlex Deucher 		return true;
641115933a5SChunming Zhou 	} else {
642115933a5SChunming Zhou 		adev->vce.srbm_soft_reset = 0;
643da146d3bSAlex Deucher 		return false;
644115933a5SChunming Zhou 	}
645115933a5SChunming Zhou }
646115933a5SChunming Zhou 
6475fc3aeebSyanyang1 static int vce_v3_0_soft_reset(void *handle)
648aaa36a97SAlex Deucher {
6495fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
650115933a5SChunming Zhou 	u32 srbm_soft_reset;
6515fc3aeebSyanyang1 
652da146d3bSAlex Deucher 	if (!adev->vce.srbm_soft_reset)
653115933a5SChunming Zhou 		return 0;
654115933a5SChunming Zhou 	srbm_soft_reset = adev->vce.srbm_soft_reset;
655be4f38e2SAlex Deucher 
656115933a5SChunming Zhou 	if (srbm_soft_reset) {
657115933a5SChunming Zhou 		u32 tmp;
658115933a5SChunming Zhou 
659115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
660115933a5SChunming Zhou 		tmp |= srbm_soft_reset;
661115933a5SChunming Zhou 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
662115933a5SChunming Zhou 		WREG32(mmSRBM_SOFT_RESET, tmp);
663115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
664115933a5SChunming Zhou 
665115933a5SChunming Zhou 		udelay(50);
666115933a5SChunming Zhou 
667115933a5SChunming Zhou 		tmp &= ~srbm_soft_reset;
668115933a5SChunming Zhou 		WREG32(mmSRBM_SOFT_RESET, tmp);
669115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
670115933a5SChunming Zhou 
671115933a5SChunming Zhou 		/* Wait a little for things to settle down */
672115933a5SChunming Zhou 		udelay(50);
673115933a5SChunming Zhou 	}
674115933a5SChunming Zhou 
675115933a5SChunming Zhou 	return 0;
676115933a5SChunming Zhou }
677115933a5SChunming Zhou 
678115933a5SChunming Zhou static int vce_v3_0_pre_soft_reset(void *handle)
679115933a5SChunming Zhou {
680115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
681115933a5SChunming Zhou 
682da146d3bSAlex Deucher 	if (!adev->vce.srbm_soft_reset)
683115933a5SChunming Zhou 		return 0;
684115933a5SChunming Zhou 
685aaa36a97SAlex Deucher 	mdelay(5);
686aaa36a97SAlex Deucher 
687115933a5SChunming Zhou 	return vce_v3_0_suspend(adev);
688115933a5SChunming Zhou }
689115933a5SChunming Zhou 
690115933a5SChunming Zhou 
691115933a5SChunming Zhou static int vce_v3_0_post_soft_reset(void *handle)
692115933a5SChunming Zhou {
693115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
694115933a5SChunming Zhou 
695da146d3bSAlex Deucher 	if (!adev->vce.srbm_soft_reset)
696115933a5SChunming Zhou 		return 0;
697115933a5SChunming Zhou 
698115933a5SChunming Zhou 	mdelay(5);
699115933a5SChunming Zhou 
700115933a5SChunming Zhou 	return vce_v3_0_resume(adev);
701aaa36a97SAlex Deucher }
702aaa36a97SAlex Deucher 
703aaa36a97SAlex Deucher static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
704aaa36a97SAlex Deucher 					struct amdgpu_irq_src *source,
705aaa36a97SAlex Deucher 					unsigned type,
706aaa36a97SAlex Deucher 					enum amdgpu_interrupt_state state)
707aaa36a97SAlex Deucher {
708aaa36a97SAlex Deucher 	uint32_t val = 0;
709aaa36a97SAlex Deucher 
710aaa36a97SAlex Deucher 	if (state == AMDGPU_IRQ_STATE_ENABLE)
711aaa36a97SAlex Deucher 		val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
712aaa36a97SAlex Deucher 
713aaa36a97SAlex Deucher 	WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
714aaa36a97SAlex Deucher 	return 0;
715aaa36a97SAlex Deucher }
716aaa36a97SAlex Deucher 
717aaa36a97SAlex Deucher static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
718aaa36a97SAlex Deucher 				      struct amdgpu_irq_src *source,
719aaa36a97SAlex Deucher 				      struct amdgpu_iv_entry *entry)
720aaa36a97SAlex Deucher {
721aaa36a97SAlex Deucher 	DRM_DEBUG("IH: VCE\n");
722d6c29c30SLeo Liu 
723f3f0ea95STom St Denis 	WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
724d6c29c30SLeo Liu 
7257ccf5aa8SAlex Deucher 	switch (entry->src_data[0]) {
726aaa36a97SAlex Deucher 	case 0:
727aaa36a97SAlex Deucher 	case 1:
7286f0359ffSAlex Deucher 	case 2:
7297ccf5aa8SAlex Deucher 		amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
730aaa36a97SAlex Deucher 		break;
731aaa36a97SAlex Deucher 	default:
732aaa36a97SAlex Deucher 		DRM_ERROR("Unhandled interrupt: %d %d\n",
7337ccf5aa8SAlex Deucher 			  entry->src_id, entry->src_data[0]);
734aaa36a97SAlex Deucher 		break;
735aaa36a97SAlex Deucher 	}
736aaa36a97SAlex Deucher 
737aaa36a97SAlex Deucher 	return 0;
738aaa36a97SAlex Deucher }
739aaa36a97SAlex Deucher 
7405fc3aeebSyanyang1 static int vce_v3_0_set_clockgating_state(void *handle,
7415fc3aeebSyanyang1 					  enum amd_clockgating_state state)
742aaa36a97SAlex Deucher {
7430689a570SEric Huang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7440689a570SEric Huang 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
7450689a570SEric Huang 	int i;
7460689a570SEric Huang 
747e3b04bc7SAlex Deucher 	if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
7480689a570SEric Huang 		return 0;
7490689a570SEric Huang 
7500689a570SEric Huang 	mutex_lock(&adev->grbm_idx_mutex);
7510689a570SEric Huang 	for (i = 0; i < 2; i++) {
7520689a570SEric Huang 		/* Program VCE Instance 0 or 1 if not harvested */
7530689a570SEric Huang 		if (adev->vce.harvest_config & (1 << i))
7540689a570SEric Huang 			continue;
7550689a570SEric Huang 
75650a1ebc7SRex Zhu 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
7570689a570SEric Huang 
75826679899SRex Zhu 		if (!enable) {
7590689a570SEric Huang 			/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
7600689a570SEric Huang 			uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
7610689a570SEric Huang 			data &= ~(0xf | 0xff0);
7620689a570SEric Huang 			data |= ((0x0 << 0) | (0x04 << 4));
7630689a570SEric Huang 			WREG32(mmVCE_CLOCK_GATING_A, data);
7640689a570SEric Huang 
7650689a570SEric Huang 			/* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
7660689a570SEric Huang 			data = RREG32(mmVCE_UENC_CLOCK_GATING);
7670689a570SEric Huang 			data &= ~(0xf | 0xff0);
7680689a570SEric Huang 			data |= ((0x0 << 0) | (0x04 << 4));
7690689a570SEric Huang 			WREG32(mmVCE_UENC_CLOCK_GATING, data);
7700689a570SEric Huang 		}
7710689a570SEric Huang 
7720689a570SEric Huang 		vce_v3_0_set_vce_sw_clock_gating(adev, enable);
7730689a570SEric Huang 	}
7740689a570SEric Huang 
77550a1ebc7SRex Zhu 	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
7760689a570SEric Huang 	mutex_unlock(&adev->grbm_idx_mutex);
7770689a570SEric Huang 
778aaa36a97SAlex Deucher 	return 0;
779aaa36a97SAlex Deucher }
780aaa36a97SAlex Deucher 
7815fc3aeebSyanyang1 static int vce_v3_0_set_powergating_state(void *handle,
7825fc3aeebSyanyang1 					  enum amd_powergating_state state)
783aaa36a97SAlex Deucher {
784aaa36a97SAlex Deucher 	/* This doesn't actually powergate the VCE block.
785aaa36a97SAlex Deucher 	 * That's done in the dpm code via the SMC.  This
786aaa36a97SAlex Deucher 	 * just re-inits the block as necessary.  The actual
787aaa36a97SAlex Deucher 	 * gating still happens in the dpm code.  We should
788aaa36a97SAlex Deucher 	 * revisit this when there is a cleaner line between
789aaa36a97SAlex Deucher 	 * the smc and the hw blocks
790aaa36a97SAlex Deucher 	 */
7915fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
792c79b5561SHuang Rui 	int ret = 0;
7935fc3aeebSyanyang1 
794c79b5561SHuang Rui 	if (state == AMD_PG_STATE_GATE) {
7956fc11b0eSRex Zhu 		ret = vce_v3_0_stop(adev);
7966fc11b0eSRex Zhu 		if (ret)
7976fc11b0eSRex Zhu 			goto out;
798c79b5561SHuang Rui 	} else {
799c79b5561SHuang Rui 		ret = vce_v3_0_start(adev);
800c79b5561SHuang Rui 		if (ret)
801c79b5561SHuang Rui 			goto out;
802c79b5561SHuang Rui 	}
803c79b5561SHuang Rui 
804c79b5561SHuang Rui out:
805c79b5561SHuang Rui 	return ret;
806c79b5561SHuang Rui }
807c79b5561SHuang Rui 
808c79b5561SHuang Rui static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
809c79b5561SHuang Rui {
810c79b5561SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
811c79b5561SHuang Rui 	int data;
812c79b5561SHuang Rui 
813c79b5561SHuang Rui 	mutex_lock(&adev->pm.mutex);
814c79b5561SHuang Rui 
8151c622002SRex Zhu 	if (adev->flags & AMD_IS_APU)
8161c622002SRex Zhu 		data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
8171c622002SRex Zhu 	else
8181c622002SRex Zhu 		data = RREG32_SMC(ixCURRENT_PG_STATUS);
8191c622002SRex Zhu 
8201c622002SRex Zhu 	if (data & CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) {
821c79b5561SHuang Rui 		DRM_INFO("Cannot get clockgating state when VCE is powergated.\n");
822c79b5561SHuang Rui 		goto out;
823c79b5561SHuang Rui 	}
824c79b5561SHuang Rui 
825c79b5561SHuang Rui 	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
826c79b5561SHuang Rui 
827c79b5561SHuang Rui 	/* AMD_CG_SUPPORT_VCE_MGCG */
828c79b5561SHuang Rui 	data = RREG32(mmVCE_CLOCK_GATING_A);
829c79b5561SHuang Rui 	if (data & (0x04 << 4))
830c79b5561SHuang Rui 		*flags |= AMD_CG_SUPPORT_VCE_MGCG;
831c79b5561SHuang Rui 
832c79b5561SHuang Rui out:
833c79b5561SHuang Rui 	mutex_unlock(&adev->pm.mutex);
834aaa36a97SAlex Deucher }
835aaa36a97SAlex Deucher 
836ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
837ea4a8c1dSMaruthi Srinivas Bayyavarapu 		struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
838ea4a8c1dSMaruthi Srinivas Bayyavarapu {
839ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_IB_VM);
840ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, vm_id);
841ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
842ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
843ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, ib->length_dw);
844ea4a8c1dSMaruthi Srinivas Bayyavarapu }
845ea4a8c1dSMaruthi Srinivas Bayyavarapu 
846ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
847ea4a8c1dSMaruthi Srinivas Bayyavarapu 			 unsigned int vm_id, uint64_t pd_addr)
848ea4a8c1dSMaruthi Srinivas Bayyavarapu {
849ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
850ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, vm_id);
851ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, pd_addr >> 12);
852ea4a8c1dSMaruthi Srinivas Bayyavarapu 
853ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
854ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, vm_id);
855ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_END);
856ea4a8c1dSMaruthi Srinivas Bayyavarapu }
857ea4a8c1dSMaruthi Srinivas Bayyavarapu 
858ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
859ea4a8c1dSMaruthi Srinivas Bayyavarapu {
860ea4a8c1dSMaruthi Srinivas Bayyavarapu 	uint32_t seq = ring->fence_drv.sync_seq;
861ea4a8c1dSMaruthi Srinivas Bayyavarapu 	uint64_t addr = ring->fence_drv.gpu_addr;
862ea4a8c1dSMaruthi Srinivas Bayyavarapu 
863ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
864ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, lower_32_bits(addr));
865ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, upper_32_bits(addr));
866ea4a8c1dSMaruthi Srinivas Bayyavarapu 	amdgpu_ring_write(ring, seq);
867ea4a8c1dSMaruthi Srinivas Bayyavarapu }
868ea4a8c1dSMaruthi Srinivas Bayyavarapu 
869a1255107SAlex Deucher static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
87088a907d6STom St Denis 	.name = "vce_v3_0",
871aaa36a97SAlex Deucher 	.early_init = vce_v3_0_early_init,
872aaa36a97SAlex Deucher 	.late_init = NULL,
873aaa36a97SAlex Deucher 	.sw_init = vce_v3_0_sw_init,
874aaa36a97SAlex Deucher 	.sw_fini = vce_v3_0_sw_fini,
875aaa36a97SAlex Deucher 	.hw_init = vce_v3_0_hw_init,
876aaa36a97SAlex Deucher 	.hw_fini = vce_v3_0_hw_fini,
877aaa36a97SAlex Deucher 	.suspend = vce_v3_0_suspend,
878aaa36a97SAlex Deucher 	.resume = vce_v3_0_resume,
879aaa36a97SAlex Deucher 	.is_idle = vce_v3_0_is_idle,
880aaa36a97SAlex Deucher 	.wait_for_idle = vce_v3_0_wait_for_idle,
881115933a5SChunming Zhou 	.check_soft_reset = vce_v3_0_check_soft_reset,
882115933a5SChunming Zhou 	.pre_soft_reset = vce_v3_0_pre_soft_reset,
883aaa36a97SAlex Deucher 	.soft_reset = vce_v3_0_soft_reset,
884115933a5SChunming Zhou 	.post_soft_reset = vce_v3_0_post_soft_reset,
885aaa36a97SAlex Deucher 	.set_clockgating_state = vce_v3_0_set_clockgating_state,
886aaa36a97SAlex Deucher 	.set_powergating_state = vce_v3_0_set_powergating_state,
887c79b5561SHuang Rui 	.get_clockgating_state = vce_v3_0_get_clockgating_state,
888aaa36a97SAlex Deucher };
889aaa36a97SAlex Deucher 
890ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
89121cd942eSChristian König 	.type = AMDGPU_RING_TYPE_VCE,
89279887142SChristian König 	.align_mask = 0xf,
89379887142SChristian König 	.nop = VCE_CMD_NO_OP,
894536fbf94SKen Wang 	.support_64bit_ptrs = false,
895aaa36a97SAlex Deucher 	.get_rptr = vce_v3_0_ring_get_rptr,
896aaa36a97SAlex Deucher 	.get_wptr = vce_v3_0_ring_get_wptr,
897aaa36a97SAlex Deucher 	.set_wptr = vce_v3_0_ring_set_wptr,
898aaa36a97SAlex Deucher 	.parse_cs = amdgpu_vce_ring_parse_cs,
899e12f3d7aSChristian König 	.emit_frame_size =
900e12f3d7aSChristian König 		4 + /* vce_v3_0_emit_pipeline_sync */
901e12f3d7aSChristian König 		6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
902e12f3d7aSChristian König 	.emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
903aaa36a97SAlex Deucher 	.emit_ib = amdgpu_vce_ring_emit_ib,
904aaa36a97SAlex Deucher 	.emit_fence = amdgpu_vce_ring_emit_fence,
905aaa36a97SAlex Deucher 	.test_ring = amdgpu_vce_ring_test_ring,
906aaa36a97SAlex Deucher 	.test_ib = amdgpu_vce_ring_test_ib,
907edff0e28SJammy Zhou 	.insert_nop = amdgpu_ring_insert_nop,
9089e5d5309SChristian König 	.pad_ib = amdgpu_ring_generic_pad_ib,
909ebff485eSChristian König 	.begin_use = amdgpu_vce_ring_begin_use,
910ebff485eSChristian König 	.end_use = amdgpu_vce_ring_end_use,
911aaa36a97SAlex Deucher };
912aaa36a97SAlex Deucher 
913ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
91421cd942eSChristian König 	.type = AMDGPU_RING_TYPE_VCE,
91579887142SChristian König 	.align_mask = 0xf,
91679887142SChristian König 	.nop = VCE_CMD_NO_OP,
917536fbf94SKen Wang 	.support_64bit_ptrs = false,
918ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.get_rptr = vce_v3_0_ring_get_rptr,
919ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.get_wptr = vce_v3_0_ring_get_wptr,
920ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.set_wptr = vce_v3_0_ring_set_wptr,
92198614701SChristian König 	.parse_cs = amdgpu_vce_ring_parse_cs_vm,
922e12f3d7aSChristian König 	.emit_frame_size =
923e12f3d7aSChristian König 		6 + /* vce_v3_0_emit_vm_flush */
924e12f3d7aSChristian König 		4 + /* vce_v3_0_emit_pipeline_sync */
925e12f3d7aSChristian König 		6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
926e12f3d7aSChristian König 	.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
927ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_ib = vce_v3_0_ring_emit_ib,
928ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_vm_flush = vce_v3_0_emit_vm_flush,
929ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
930ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.emit_fence = amdgpu_vce_ring_emit_fence,
931ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.test_ring = amdgpu_vce_ring_test_ring,
932ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.test_ib = amdgpu_vce_ring_test_ib,
933ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.insert_nop = amdgpu_ring_insert_nop,
934ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.pad_ib = amdgpu_ring_generic_pad_ib,
935ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.begin_use = amdgpu_vce_ring_begin_use,
936ea4a8c1dSMaruthi Srinivas Bayyavarapu 	.end_use = amdgpu_vce_ring_end_use,
937ea4a8c1dSMaruthi Srinivas Bayyavarapu };
938ea4a8c1dSMaruthi Srinivas Bayyavarapu 
939aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
940aaa36a97SAlex Deucher {
94175c65480SAlex Deucher 	int i;
94275c65480SAlex Deucher 
943ea4a8c1dSMaruthi Srinivas Bayyavarapu 	if (adev->asic_type >= CHIP_STONEY) {
94475c65480SAlex Deucher 		for (i = 0; i < adev->vce.num_rings; i++)
945ea4a8c1dSMaruthi Srinivas Bayyavarapu 			adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
946ea4a8c1dSMaruthi Srinivas Bayyavarapu 		DRM_INFO("VCE enabled in VM mode\n");
947ea4a8c1dSMaruthi Srinivas Bayyavarapu 	} else {
948ea4a8c1dSMaruthi Srinivas Bayyavarapu 		for (i = 0; i < adev->vce.num_rings; i++)
949ea4a8c1dSMaruthi Srinivas Bayyavarapu 			adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
950ea4a8c1dSMaruthi Srinivas Bayyavarapu 		DRM_INFO("VCE enabled in physical mode\n");
951ea4a8c1dSMaruthi Srinivas Bayyavarapu 	}
952aaa36a97SAlex Deucher }
953aaa36a97SAlex Deucher 
954aaa36a97SAlex Deucher static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
955aaa36a97SAlex Deucher 	.set = vce_v3_0_set_interrupt_state,
956aaa36a97SAlex Deucher 	.process = vce_v3_0_process_interrupt,
957aaa36a97SAlex Deucher };
958aaa36a97SAlex Deucher 
959aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
960aaa36a97SAlex Deucher {
961aaa36a97SAlex Deucher 	adev->vce.irq.num_types = 1;
962aaa36a97SAlex Deucher 	adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
963aaa36a97SAlex Deucher };
964a1255107SAlex Deucher 
965a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_0_ip_block =
966a1255107SAlex Deucher {
967a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_VCE,
968a1255107SAlex Deucher 	.major = 3,
969a1255107SAlex Deucher 	.minor = 0,
970a1255107SAlex Deucher 	.rev = 0,
971a1255107SAlex Deucher 	.funcs = &vce_v3_0_ip_funcs,
972a1255107SAlex Deucher };
973a1255107SAlex Deucher 
974a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_1_ip_block =
975a1255107SAlex Deucher {
976a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_VCE,
977a1255107SAlex Deucher 	.major = 3,
978a1255107SAlex Deucher 	.minor = 1,
979a1255107SAlex Deucher 	.rev = 0,
980a1255107SAlex Deucher 	.funcs = &vce_v3_0_ip_funcs,
981a1255107SAlex Deucher };
982a1255107SAlex Deucher 
983a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_4_ip_block =
984a1255107SAlex Deucher {
985a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_VCE,
986a1255107SAlex Deucher 	.major = 3,
987a1255107SAlex Deucher 	.minor = 4,
988a1255107SAlex Deucher 	.rev = 0,
989a1255107SAlex Deucher 	.funcs = &vce_v3_0_ip_funcs,
990a1255107SAlex Deucher };
991