xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c (revision f3f0ea95)
1aaa36a97SAlex Deucher /*
2aaa36a97SAlex Deucher  * Copyright 2014 Advanced Micro Devices, Inc.
3aaa36a97SAlex Deucher  * All Rights Reserved.
4aaa36a97SAlex Deucher  *
5aaa36a97SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
6aaa36a97SAlex Deucher  * copy of this software and associated documentation files (the
7aaa36a97SAlex Deucher  * "Software"), to deal in the Software without restriction, including
8aaa36a97SAlex Deucher  * without limitation the rights to use, copy, modify, merge, publish,
9aaa36a97SAlex Deucher  * distribute, sub license, and/or sell copies of the Software, and to
10aaa36a97SAlex Deucher  * permit persons to whom the Software is furnished to do so, subject to
11aaa36a97SAlex Deucher  * the following conditions:
12aaa36a97SAlex Deucher  *
13aaa36a97SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14aaa36a97SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15aaa36a97SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16aaa36a97SAlex Deucher  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17aaa36a97SAlex Deucher  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18aaa36a97SAlex Deucher  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19aaa36a97SAlex Deucher  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20aaa36a97SAlex Deucher  *
21aaa36a97SAlex Deucher  * The above copyright notice and this permission notice (including the
22aaa36a97SAlex Deucher  * next paragraph) shall be included in all copies or substantial portions
23aaa36a97SAlex Deucher  * of the Software.
24aaa36a97SAlex Deucher  *
25aaa36a97SAlex Deucher  * Authors: Christian König <christian.koenig@amd.com>
26aaa36a97SAlex Deucher  */
27aaa36a97SAlex Deucher 
28aaa36a97SAlex Deucher #include <linux/firmware.h>
29aaa36a97SAlex Deucher #include <drm/drmP.h>
30aaa36a97SAlex Deucher #include "amdgpu.h"
31aaa36a97SAlex Deucher #include "amdgpu_vce.h"
32aaa36a97SAlex Deucher #include "vid.h"
33aaa36a97SAlex Deucher #include "vce/vce_3_0_d.h"
34aaa36a97SAlex Deucher #include "vce/vce_3_0_sh_mask.h"
35be4f38e2SAlex Deucher #include "oss/oss_3_0_d.h"
36be4f38e2SAlex Deucher #include "oss/oss_3_0_sh_mask.h"
375bbc553aSLeo Liu #include "gca/gfx_8_0_d.h"
386a585777SAlex Deucher #include "smu/smu_7_1_2_d.h"
396a585777SAlex Deucher #include "smu/smu_7_1_2_sh_mask.h"
40115933a5SChunming Zhou #include "gca/gfx_8_0_d.h"
41115933a5SChunming Zhou #include "gca/gfx_8_0_sh_mask.h"
42115933a5SChunming Zhou 
435bbc553aSLeo Liu 
445bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT	0x04
455bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK	0x10
463c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0	0x8616
473c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1	0x8617
483c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2	0x8618
49567e6e29Sjimqu #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
50aaa36a97SAlex Deucher 
51e9822622SLeo Liu #define VCE_V3_0_FW_SIZE	(384 * 1024)
52e9822622SLeo Liu #define VCE_V3_0_STACK_SIZE	(64 * 1024)
53e9822622SLeo Liu #define VCE_V3_0_DATA_SIZE	((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
54e9822622SLeo Liu 
555bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
56aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
57aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
58567e6e29Sjimqu static int vce_v3_0_wait_for_idle(void *handle);
59aaa36a97SAlex Deucher 
60aaa36a97SAlex Deucher /**
61aaa36a97SAlex Deucher  * vce_v3_0_ring_get_rptr - get read pointer
62aaa36a97SAlex Deucher  *
63aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
64aaa36a97SAlex Deucher  *
65aaa36a97SAlex Deucher  * Returns the current hardware read pointer
66aaa36a97SAlex Deucher  */
67aaa36a97SAlex Deucher static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
68aaa36a97SAlex Deucher {
69aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
70aaa36a97SAlex Deucher 
71aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
72aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_RPTR);
73aaa36a97SAlex Deucher 	else
74aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_RPTR2);
75aaa36a97SAlex Deucher }
76aaa36a97SAlex Deucher 
77aaa36a97SAlex Deucher /**
78aaa36a97SAlex Deucher  * vce_v3_0_ring_get_wptr - get write pointer
79aaa36a97SAlex Deucher  *
80aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
81aaa36a97SAlex Deucher  *
82aaa36a97SAlex Deucher  * Returns the current hardware write pointer
83aaa36a97SAlex Deucher  */
84aaa36a97SAlex Deucher static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
85aaa36a97SAlex Deucher {
86aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
87aaa36a97SAlex Deucher 
88aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
89aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_WPTR);
90aaa36a97SAlex Deucher 	else
91aaa36a97SAlex Deucher 		return RREG32(mmVCE_RB_WPTR2);
92aaa36a97SAlex Deucher }
93aaa36a97SAlex Deucher 
94aaa36a97SAlex Deucher /**
95aaa36a97SAlex Deucher  * vce_v3_0_ring_set_wptr - set write pointer
96aaa36a97SAlex Deucher  *
97aaa36a97SAlex Deucher  * @ring: amdgpu_ring pointer
98aaa36a97SAlex Deucher  *
99aaa36a97SAlex Deucher  * Commits the write pointer to the hardware
100aaa36a97SAlex Deucher  */
101aaa36a97SAlex Deucher static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
102aaa36a97SAlex Deucher {
103aaa36a97SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
104aaa36a97SAlex Deucher 
105aaa36a97SAlex Deucher 	if (ring == &adev->vce.ring[0])
106aaa36a97SAlex Deucher 		WREG32(mmVCE_RB_WPTR, ring->wptr);
107aaa36a97SAlex Deucher 	else
108aaa36a97SAlex Deucher 		WREG32(mmVCE_RB_WPTR2, ring->wptr);
109aaa36a97SAlex Deucher }
110aaa36a97SAlex Deucher 
1110689a570SEric Huang static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
1120689a570SEric Huang {
113f3f0ea95STom St Denis 	WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
1140689a570SEric Huang }
1150689a570SEric Huang 
1160689a570SEric Huang static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
1170689a570SEric Huang 					     bool gated)
1180689a570SEric Huang {
119f3f0ea95STom St Denis 	u32 data;
120f16fe6d3STom St Denis 
1210689a570SEric Huang 	/* Set Override to disable Clock Gating */
1220689a570SEric Huang 	vce_v3_0_override_vce_clock_gating(adev, true);
1230689a570SEric Huang 
1246f906814STom St Denis 	/* This function enables MGCG which is controlled by firmware.
1256f906814STom St Denis 	   With the clocks in the gated state the core is still
1266f906814STom St Denis 	   accessible but the firmware will throttle the clocks on the
1276f906814STom St Denis 	   fly as necessary.
1280689a570SEric Huang 	*/
1296f906814STom St Denis 	if (gated) {
130f3f0ea95STom St Denis 		data = RREG32(mmVCE_CLOCK_GATING_B);
1310689a570SEric Huang 		data |= 0x1ff;
1320689a570SEric Huang 		data &= ~0xef0000;
1330689a570SEric Huang 		WREG32(mmVCE_CLOCK_GATING_B, data);
1340689a570SEric Huang 
135f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING);
1360689a570SEric Huang 		data |= 0x3ff000;
1370689a570SEric Huang 		data &= ~0xffc00000;
1380689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING, data);
1390689a570SEric Huang 
140f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
1410689a570SEric Huang 		data |= 0x2;
1426f906814STom St Denis 		data &= ~0x00010000;
1430689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
1440689a570SEric Huang 
145f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
1460689a570SEric Huang 		data |= 0x37f;
1470689a570SEric Huang 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
1480689a570SEric Huang 
149f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
1500689a570SEric Huang 		data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
1510689a570SEric Huang 			VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
1520689a570SEric Huang 			VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
1530689a570SEric Huang 			0x8;
1540689a570SEric Huang 		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
1550689a570SEric Huang 	} else {
156f3f0ea95STom St Denis 		data = RREG32(mmVCE_CLOCK_GATING_B);
1570689a570SEric Huang 		data &= ~0x80010;
1580689a570SEric Huang 		data |= 0xe70008;
1590689a570SEric Huang 		WREG32(mmVCE_CLOCK_GATING_B, data);
1606f906814STom St Denis 
161f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING);
1620689a570SEric Huang 		data |= 0xffc00000;
1630689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING, data);
1646f906814STom St Denis 
165f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
1660689a570SEric Huang 		data |= 0x10000;
1670689a570SEric Huang 		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
1686f906814STom St Denis 
169f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
1700689a570SEric Huang 		data &= ~0xffc00000;
1710689a570SEric Huang 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
1726f906814STom St Denis 
173f3f0ea95STom St Denis 		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
1740689a570SEric Huang 		data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
1750689a570SEric Huang 			  VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
1760689a570SEric Huang 			  VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
1770689a570SEric Huang 			  0x8);
1780689a570SEric Huang 		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
1790689a570SEric Huang 	}
1800689a570SEric Huang 	vce_v3_0_override_vce_clock_gating(adev, false);
1810689a570SEric Huang }
1820689a570SEric Huang 
183567e6e29Sjimqu static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
184567e6e29Sjimqu {
185567e6e29Sjimqu 	int i, j;
186567e6e29Sjimqu 
187567e6e29Sjimqu 	for (i = 0; i < 10; ++i) {
188567e6e29Sjimqu 		for (j = 0; j < 100; ++j) {
189b7e2e9f7Sjimqu 			uint32_t status = RREG32(mmVCE_STATUS);
190b7e2e9f7Sjimqu 
191567e6e29Sjimqu 			if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
192567e6e29Sjimqu 				return 0;
193567e6e29Sjimqu 			mdelay(10);
194567e6e29Sjimqu 		}
195567e6e29Sjimqu 
196567e6e29Sjimqu 		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
197f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
198567e6e29Sjimqu 		mdelay(10);
199f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
200567e6e29Sjimqu 		mdelay(10);
201567e6e29Sjimqu 	}
202567e6e29Sjimqu 
203567e6e29Sjimqu 	return -ETIMEDOUT;
204567e6e29Sjimqu }
205567e6e29Sjimqu 
206aaa36a97SAlex Deucher /**
207aaa36a97SAlex Deucher  * vce_v3_0_start - start VCE block
208aaa36a97SAlex Deucher  *
209aaa36a97SAlex Deucher  * @adev: amdgpu_device pointer
210aaa36a97SAlex Deucher  *
211aaa36a97SAlex Deucher  * Setup and start the VCE block
212aaa36a97SAlex Deucher  */
213aaa36a97SAlex Deucher static int vce_v3_0_start(struct amdgpu_device *adev)
214aaa36a97SAlex Deucher {
215aaa36a97SAlex Deucher 	struct amdgpu_ring *ring;
216567e6e29Sjimqu 	int idx, r;
217567e6e29Sjimqu 
218567e6e29Sjimqu 	ring = &adev->vce.ring[0];
219567e6e29Sjimqu 	WREG32(mmVCE_RB_RPTR, ring->wptr);
220567e6e29Sjimqu 	WREG32(mmVCE_RB_WPTR, ring->wptr);
221567e6e29Sjimqu 	WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
222567e6e29Sjimqu 	WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
223567e6e29Sjimqu 	WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
224567e6e29Sjimqu 
225567e6e29Sjimqu 	ring = &adev->vce.ring[1];
226567e6e29Sjimqu 	WREG32(mmVCE_RB_RPTR2, ring->wptr);
227567e6e29Sjimqu 	WREG32(mmVCE_RB_WPTR2, ring->wptr);
228567e6e29Sjimqu 	WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
229567e6e29Sjimqu 	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
230567e6e29Sjimqu 	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
231aaa36a97SAlex Deucher 
2325bbc553aSLeo Liu 	mutex_lock(&adev->grbm_idx_mutex);
2335bbc553aSLeo Liu 	for (idx = 0; idx < 2; ++idx) {
2346a585777SAlex Deucher 		if (adev->vce.harvest_config & (1 << idx))
2356a585777SAlex Deucher 			continue;
2366a585777SAlex Deucher 
237f3f0ea95STom St Denis 		WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
2385bbc553aSLeo Liu 		vce_v3_0_mc_resume(adev, idx);
239f3f0ea95STom St Denis 		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
240567e6e29Sjimqu 
2413c0ff9f1SLeo Liu 		if (adev->asic_type >= CHIP_STONEY)
2423c0ff9f1SLeo Liu 			WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
2433c0ff9f1SLeo Liu 		else
244f3f0ea95STom St Denis 			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
245aaa36a97SAlex Deucher 
246f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
247aaa36a97SAlex Deucher 		mdelay(100);
248aaa36a97SAlex Deucher 
249567e6e29Sjimqu 		r = vce_v3_0_firmware_loaded(adev);
250aaa36a97SAlex Deucher 
251aaa36a97SAlex Deucher 		/* clear BUSY flag */
252f3f0ea95STom St Denis 		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
253aaa36a97SAlex Deucher 
254aaa36a97SAlex Deucher 		if (r) {
255aaa36a97SAlex Deucher 			DRM_ERROR("VCE not responding, giving up!!!\n");
2565bbc553aSLeo Liu 			mutex_unlock(&adev->grbm_idx_mutex);
257aaa36a97SAlex Deucher 			return r;
258aaa36a97SAlex Deucher 		}
2595bbc553aSLeo Liu 	}
2605bbc553aSLeo Liu 
261f3f0ea95STom St Denis 	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
2625bbc553aSLeo Liu 	mutex_unlock(&adev->grbm_idx_mutex);
2635bbc553aSLeo Liu 
264567e6e29Sjimqu 	return 0;
265567e6e29Sjimqu }
2665bbc553aSLeo Liu 
267567e6e29Sjimqu static int vce_v3_0_stop(struct amdgpu_device *adev)
268567e6e29Sjimqu {
269567e6e29Sjimqu 	int idx;
270567e6e29Sjimqu 
271567e6e29Sjimqu 	mutex_lock(&adev->grbm_idx_mutex);
272567e6e29Sjimqu 	for (idx = 0; idx < 2; ++idx) {
273567e6e29Sjimqu 		if (adev->vce.harvest_config & (1 << idx))
274567e6e29Sjimqu 			continue;
275567e6e29Sjimqu 
276f3f0ea95STom St Denis 		WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
277567e6e29Sjimqu 
278567e6e29Sjimqu 		if (adev->asic_type >= CHIP_STONEY)
279567e6e29Sjimqu 			WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
280567e6e29Sjimqu 		else
281f3f0ea95STom St Denis 			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
282f3f0ea95STom St Denis 
283567e6e29Sjimqu 		/* hold on ECPU */
284f3f0ea95STom St Denis 		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
285567e6e29Sjimqu 
286567e6e29Sjimqu 		/* clear BUSY flag */
287f3f0ea95STom St Denis 		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
288567e6e29Sjimqu 
289567e6e29Sjimqu 		/* Set Clock-Gating off */
290567e6e29Sjimqu 		if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
291567e6e29Sjimqu 			vce_v3_0_set_vce_sw_clock_gating(adev, false);
292567e6e29Sjimqu 	}
293567e6e29Sjimqu 
294f3f0ea95STom St Denis 	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
295567e6e29Sjimqu 	mutex_unlock(&adev->grbm_idx_mutex);
296aaa36a97SAlex Deucher 
297aaa36a97SAlex Deucher 	return 0;
298aaa36a97SAlex Deucher }
299aaa36a97SAlex Deucher 
3006a585777SAlex Deucher #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS     0xC0014074
3016a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__SHIFT       27
3026a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__MASK        0x18000000
3036a585777SAlex Deucher 
3046a585777SAlex Deucher static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
3056a585777SAlex Deucher {
3066a585777SAlex Deucher 	u32 tmp;
3076a585777SAlex Deucher 
3082cc0c0b5SFlora Cui 	/* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
309cfaba566SSamuel Li 	if ((adev->asic_type == CHIP_FIJI) ||
3101b4eeea5SSonny Jiang 	    (adev->asic_type == CHIP_STONEY) ||
3112cc0c0b5SFlora Cui 	    (adev->asic_type == CHIP_POLARIS10) ||
3122cc0c0b5SFlora Cui 	    (adev->asic_type == CHIP_POLARIS11))
3131dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE1;
314188a9bcdSAlex Deucher 
315188a9bcdSAlex Deucher 	/* Tonga and CZ are dual or single pipe */
3162f7d10b3SJammy Zhou 	if (adev->flags & AMD_IS_APU)
3176a585777SAlex Deucher 		tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
3186a585777SAlex Deucher 		       VCE_HARVEST_FUSE_MACRO__MASK) >>
3196a585777SAlex Deucher 			VCE_HARVEST_FUSE_MACRO__SHIFT;
3206a585777SAlex Deucher 	else
3216a585777SAlex Deucher 		tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
3226a585777SAlex Deucher 		       CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
3236a585777SAlex Deucher 			CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
3246a585777SAlex Deucher 
3256a585777SAlex Deucher 	switch (tmp) {
3266a585777SAlex Deucher 	case 1:
3271dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE0;
3286a585777SAlex Deucher 	case 2:
3291dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE1;
3306a585777SAlex Deucher 	case 3:
3311dab5f06STom St Denis 		return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
3326a585777SAlex Deucher 	default:
3331dab5f06STom St Denis 		return 0;
3346a585777SAlex Deucher 	}
3356a585777SAlex Deucher }
3366a585777SAlex Deucher 
3375fc3aeebSyanyang1 static int vce_v3_0_early_init(void *handle)
338aaa36a97SAlex Deucher {
3395fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3405fc3aeebSyanyang1 
3416a585777SAlex Deucher 	adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
3426a585777SAlex Deucher 
3436a585777SAlex Deucher 	if ((adev->vce.harvest_config &
3446a585777SAlex Deucher 	     (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
3456a585777SAlex Deucher 	    (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
3466a585777SAlex Deucher 		return -ENOENT;
3476a585777SAlex Deucher 
348aaa36a97SAlex Deucher 	vce_v3_0_set_ring_funcs(adev);
349aaa36a97SAlex Deucher 	vce_v3_0_set_irq_funcs(adev);
350aaa36a97SAlex Deucher 
351aaa36a97SAlex Deucher 	return 0;
352aaa36a97SAlex Deucher }
353aaa36a97SAlex Deucher 
3545fc3aeebSyanyang1 static int vce_v3_0_sw_init(void *handle)
355aaa36a97SAlex Deucher {
3565fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
357aaa36a97SAlex Deucher 	struct amdgpu_ring *ring;
358aaa36a97SAlex Deucher 	int r;
359aaa36a97SAlex Deucher 
360aaa36a97SAlex Deucher 	/* VCE */
361aaa36a97SAlex Deucher 	r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
362aaa36a97SAlex Deucher 	if (r)
363aaa36a97SAlex Deucher 		return r;
364aaa36a97SAlex Deucher 
365e9822622SLeo Liu 	r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
366e9822622SLeo Liu 		(VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
367aaa36a97SAlex Deucher 	if (r)
368aaa36a97SAlex Deucher 		return r;
369aaa36a97SAlex Deucher 
370aaa36a97SAlex Deucher 	r = amdgpu_vce_resume(adev);
371aaa36a97SAlex Deucher 	if (r)
372aaa36a97SAlex Deucher 		return r;
373aaa36a97SAlex Deucher 
374aaa36a97SAlex Deucher 	ring = &adev->vce.ring[0];
375aaa36a97SAlex Deucher 	sprintf(ring->name, "vce0");
376a3f1cf35SChristian König 	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
377aaa36a97SAlex Deucher 			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
378aaa36a97SAlex Deucher 	if (r)
379aaa36a97SAlex Deucher 		return r;
380aaa36a97SAlex Deucher 
381aaa36a97SAlex Deucher 	ring = &adev->vce.ring[1];
382aaa36a97SAlex Deucher 	sprintf(ring->name, "vce1");
383a3f1cf35SChristian König 	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
384aaa36a97SAlex Deucher 			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
385aaa36a97SAlex Deucher 	if (r)
386aaa36a97SAlex Deucher 		return r;
387aaa36a97SAlex Deucher 
388aaa36a97SAlex Deucher 	return r;
389aaa36a97SAlex Deucher }
390aaa36a97SAlex Deucher 
3915fc3aeebSyanyang1 static int vce_v3_0_sw_fini(void *handle)
392aaa36a97SAlex Deucher {
393aaa36a97SAlex Deucher 	int r;
3945fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
395aaa36a97SAlex Deucher 
396aaa36a97SAlex Deucher 	r = amdgpu_vce_suspend(adev);
397aaa36a97SAlex Deucher 	if (r)
398aaa36a97SAlex Deucher 		return r;
399aaa36a97SAlex Deucher 
400aaa36a97SAlex Deucher 	r = amdgpu_vce_sw_fini(adev);
401aaa36a97SAlex Deucher 	if (r)
402aaa36a97SAlex Deucher 		return r;
403aaa36a97SAlex Deucher 
404aaa36a97SAlex Deucher 	return r;
405aaa36a97SAlex Deucher }
406aaa36a97SAlex Deucher 
4075fc3aeebSyanyang1 static int vce_v3_0_hw_init(void *handle)
408aaa36a97SAlex Deucher {
409691ca86aSTom St Denis 	int r, i;
4105fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
411aaa36a97SAlex Deucher 
412aaa36a97SAlex Deucher 	r = vce_v3_0_start(adev);
413aaa36a97SAlex Deucher 	if (r)
414aaa36a97SAlex Deucher 		return r;
415aaa36a97SAlex Deucher 
416691ca86aSTom St Denis 	adev->vce.ring[0].ready = false;
417691ca86aSTom St Denis 	adev->vce.ring[1].ready = false;
418aaa36a97SAlex Deucher 
419691ca86aSTom St Denis 	for (i = 0; i < 2; i++) {
420691ca86aSTom St Denis 		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
421691ca86aSTom St Denis 		if (r)
422aaa36a97SAlex Deucher 			return r;
423691ca86aSTom St Denis 		else
424691ca86aSTom St Denis 			adev->vce.ring[i].ready = true;
425aaa36a97SAlex Deucher 	}
426aaa36a97SAlex Deucher 
427aaa36a97SAlex Deucher 	DRM_INFO("VCE initialized successfully.\n");
428aaa36a97SAlex Deucher 
429aaa36a97SAlex Deucher 	return 0;
430aaa36a97SAlex Deucher }
431aaa36a97SAlex Deucher 
4325fc3aeebSyanyang1 static int vce_v3_0_hw_fini(void *handle)
433aaa36a97SAlex Deucher {
434567e6e29Sjimqu 	int r;
435567e6e29Sjimqu 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
436567e6e29Sjimqu 
437567e6e29Sjimqu 	r = vce_v3_0_wait_for_idle(handle);
438567e6e29Sjimqu 	if (r)
439567e6e29Sjimqu 		return r;
440567e6e29Sjimqu 
441567e6e29Sjimqu 	return vce_v3_0_stop(adev);
442aaa36a97SAlex Deucher }
443aaa36a97SAlex Deucher 
4445fc3aeebSyanyang1 static int vce_v3_0_suspend(void *handle)
445aaa36a97SAlex Deucher {
446aaa36a97SAlex Deucher 	int r;
4475fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
448aaa36a97SAlex Deucher 
449aaa36a97SAlex Deucher 	r = vce_v3_0_hw_fini(adev);
450aaa36a97SAlex Deucher 	if (r)
451aaa36a97SAlex Deucher 		return r;
452aaa36a97SAlex Deucher 
453aaa36a97SAlex Deucher 	r = amdgpu_vce_suspend(adev);
454aaa36a97SAlex Deucher 	if (r)
455aaa36a97SAlex Deucher 		return r;
456aaa36a97SAlex Deucher 
457aaa36a97SAlex Deucher 	return r;
458aaa36a97SAlex Deucher }
459aaa36a97SAlex Deucher 
4605fc3aeebSyanyang1 static int vce_v3_0_resume(void *handle)
461aaa36a97SAlex Deucher {
462aaa36a97SAlex Deucher 	int r;
4635fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
464aaa36a97SAlex Deucher 
465aaa36a97SAlex Deucher 	r = amdgpu_vce_resume(adev);
466aaa36a97SAlex Deucher 	if (r)
467aaa36a97SAlex Deucher 		return r;
468aaa36a97SAlex Deucher 
469aaa36a97SAlex Deucher 	r = vce_v3_0_hw_init(adev);
470aaa36a97SAlex Deucher 	if (r)
471aaa36a97SAlex Deucher 		return r;
472aaa36a97SAlex Deucher 
473aaa36a97SAlex Deucher 	return r;
474aaa36a97SAlex Deucher }
475aaa36a97SAlex Deucher 
4765bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
477aaa36a97SAlex Deucher {
478aaa36a97SAlex Deucher 	uint32_t offset, size;
479aaa36a97SAlex Deucher 
480aaa36a97SAlex Deucher 	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
481aaa36a97SAlex Deucher 	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
482aaa36a97SAlex Deucher 	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
4836f906814STom St Denis 	WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
484aaa36a97SAlex Deucher 
485aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_CTRL, 0x00398000);
486aaa36a97SAlex Deucher 	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
487aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
488aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
489aaa36a97SAlex Deucher 	WREG32(mmVCE_LMI_VM_CTRL, 0);
4903c0ff9f1SLeo Liu 	if (adev->asic_type >= CHIP_STONEY) {
4913c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
4923c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
4933c0ff9f1SLeo Liu 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
4943c0ff9f1SLeo Liu 	} else
495aaa36a97SAlex Deucher 		WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
496aaa36a97SAlex Deucher 	offset = AMDGPU_VCE_FIRMWARE_OFFSET;
497e9822622SLeo Liu 	size = VCE_V3_0_FW_SIZE;
498aaa36a97SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
499aaa36a97SAlex Deucher 	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
500aaa36a97SAlex Deucher 
5015bbc553aSLeo Liu 	if (idx == 0) {
502aaa36a97SAlex Deucher 		offset += size;
503e9822622SLeo Liu 		size = VCE_V3_0_STACK_SIZE;
504aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
505aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
506aaa36a97SAlex Deucher 		offset += size;
507e9822622SLeo Liu 		size = VCE_V3_0_DATA_SIZE;
508aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
509aaa36a97SAlex Deucher 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
5105bbc553aSLeo Liu 	} else {
5115bbc553aSLeo Liu 		offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
5125bbc553aSLeo Liu 		size = VCE_V3_0_STACK_SIZE;
5135bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
5145bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
5155bbc553aSLeo Liu 		offset += size;
5165bbc553aSLeo Liu 		size = VCE_V3_0_DATA_SIZE;
5175bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
5185bbc553aSLeo Liu 		WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
5195bbc553aSLeo Liu 	}
520aaa36a97SAlex Deucher 
521aaa36a97SAlex Deucher 	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
522f3f0ea95STom St Denis 	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
523aaa36a97SAlex Deucher }
524aaa36a97SAlex Deucher 
5255fc3aeebSyanyang1 static bool vce_v3_0_is_idle(void *handle)
526aaa36a97SAlex Deucher {
5275fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
528be4f38e2SAlex Deucher 	u32 mask = 0;
5295fc3aeebSyanyang1 
53074af1276STom St Denis 	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
53174af1276STom St Denis 	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
532be4f38e2SAlex Deucher 
533be4f38e2SAlex Deucher 	return !(RREG32(mmSRBM_STATUS2) & mask);
534aaa36a97SAlex Deucher }
535aaa36a97SAlex Deucher 
5365fc3aeebSyanyang1 static int vce_v3_0_wait_for_idle(void *handle)
537aaa36a97SAlex Deucher {
538aaa36a97SAlex Deucher 	unsigned i;
5395fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
540be4f38e2SAlex Deucher 
54192988e60STom St Denis 	for (i = 0; i < adev->usec_timeout; i++)
54292988e60STom St Denis 		if (vce_v3_0_is_idle(handle))
543aaa36a97SAlex Deucher 			return 0;
54492988e60STom St Denis 
545aaa36a97SAlex Deucher 	return -ETIMEDOUT;
546aaa36a97SAlex Deucher }
547aaa36a97SAlex Deucher 
548ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK  0x00000008L   /* AUTO_BUSY */
549ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK   0x00000010L   /* RB0_BUSY */
550ac8e3f30SRex Zhu #define  VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK   0x00000020L   /* RB1_BUSY */
551ac8e3f30SRex Zhu #define  AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
552ac8e3f30SRex Zhu 				      VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
553115933a5SChunming Zhou 
554115933a5SChunming Zhou static int vce_v3_0_check_soft_reset(void *handle)
555115933a5SChunming Zhou {
556115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
557115933a5SChunming Zhou 	u32 srbm_soft_reset = 0;
558115933a5SChunming Zhou 
559115933a5SChunming Zhou 	/* According to VCE team , we should use VCE_STATUS instead
560115933a5SChunming Zhou 	 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
561115933a5SChunming Zhou 	 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
562115933a5SChunming Zhou 	 * instance's registers are accessed
563115933a5SChunming Zhou 	 * (0 for 1st instance, 10 for 2nd instance).
564115933a5SChunming Zhou 	 *
565115933a5SChunming Zhou 	 *VCE_STATUS
566115933a5SChunming Zhou 	 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 |          |FW_LOADED|JOB |
567115933a5SChunming Zhou 	 *|----+----+-----------+----+----+----+----------+---------+----|
568115933a5SChunming Zhou 	 *|bit8|bit7|    bit6   |bit5|bit4|bit3|   bit2   |  bit1   |bit0|
569115933a5SChunming Zhou 	 *
570115933a5SChunming Zhou 	 * VCE team suggest use bit 3--bit 6 for busy status check
571115933a5SChunming Zhou 	 */
572f3f0ea95STom St Denis 	WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
573115933a5SChunming Zhou 	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
574115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
575115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
576115933a5SChunming Zhou 	}
577f3f0ea95STom St Denis 	WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
578115933a5SChunming Zhou 	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
579115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
580115933a5SChunming Zhou 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
581115933a5SChunming Zhou 	}
582f3f0ea95STom St Denis 	WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
583115933a5SChunming Zhou 
584115933a5SChunming Zhou 	if (srbm_soft_reset) {
585115933a5SChunming Zhou 		adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
586115933a5SChunming Zhou 		adev->vce.srbm_soft_reset = srbm_soft_reset;
587115933a5SChunming Zhou 	} else {
588115933a5SChunming Zhou 		adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
589115933a5SChunming Zhou 		adev->vce.srbm_soft_reset = 0;
590115933a5SChunming Zhou 	}
591115933a5SChunming Zhou 	return 0;
592115933a5SChunming Zhou }
593115933a5SChunming Zhou 
5945fc3aeebSyanyang1 static int vce_v3_0_soft_reset(void *handle)
595aaa36a97SAlex Deucher {
5965fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
597115933a5SChunming Zhou 	u32 srbm_soft_reset;
5985fc3aeebSyanyang1 
599115933a5SChunming Zhou 	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
600115933a5SChunming Zhou 		return 0;
601115933a5SChunming Zhou 	srbm_soft_reset = adev->vce.srbm_soft_reset;
602be4f38e2SAlex Deucher 
603115933a5SChunming Zhou 	if (srbm_soft_reset) {
604115933a5SChunming Zhou 		u32 tmp;
605115933a5SChunming Zhou 
606115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
607115933a5SChunming Zhou 		tmp |= srbm_soft_reset;
608115933a5SChunming Zhou 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
609115933a5SChunming Zhou 		WREG32(mmSRBM_SOFT_RESET, tmp);
610115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
611115933a5SChunming Zhou 
612115933a5SChunming Zhou 		udelay(50);
613115933a5SChunming Zhou 
614115933a5SChunming Zhou 		tmp &= ~srbm_soft_reset;
615115933a5SChunming Zhou 		WREG32(mmSRBM_SOFT_RESET, tmp);
616115933a5SChunming Zhou 		tmp = RREG32(mmSRBM_SOFT_RESET);
617115933a5SChunming Zhou 
618115933a5SChunming Zhou 		/* Wait a little for things to settle down */
619115933a5SChunming Zhou 		udelay(50);
620115933a5SChunming Zhou 	}
621115933a5SChunming Zhou 
622115933a5SChunming Zhou 	return 0;
623115933a5SChunming Zhou }
624115933a5SChunming Zhou 
625115933a5SChunming Zhou static int vce_v3_0_pre_soft_reset(void *handle)
626115933a5SChunming Zhou {
627115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
628115933a5SChunming Zhou 
629115933a5SChunming Zhou 	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
630115933a5SChunming Zhou 		return 0;
631115933a5SChunming Zhou 
632aaa36a97SAlex Deucher 	mdelay(5);
633aaa36a97SAlex Deucher 
634115933a5SChunming Zhou 	return vce_v3_0_suspend(adev);
635115933a5SChunming Zhou }
636115933a5SChunming Zhou 
637115933a5SChunming Zhou 
638115933a5SChunming Zhou static int vce_v3_0_post_soft_reset(void *handle)
639115933a5SChunming Zhou {
640115933a5SChunming Zhou 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
641115933a5SChunming Zhou 
642115933a5SChunming Zhou 	if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
643115933a5SChunming Zhou 		return 0;
644115933a5SChunming Zhou 
645115933a5SChunming Zhou 	mdelay(5);
646115933a5SChunming Zhou 
647115933a5SChunming Zhou 	return vce_v3_0_resume(adev);
648aaa36a97SAlex Deucher }
649aaa36a97SAlex Deucher 
650aaa36a97SAlex Deucher static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
651aaa36a97SAlex Deucher 					struct amdgpu_irq_src *source,
652aaa36a97SAlex Deucher 					unsigned type,
653aaa36a97SAlex Deucher 					enum amdgpu_interrupt_state state)
654aaa36a97SAlex Deucher {
655aaa36a97SAlex Deucher 	uint32_t val = 0;
656aaa36a97SAlex Deucher 
657aaa36a97SAlex Deucher 	if (state == AMDGPU_IRQ_STATE_ENABLE)
658aaa36a97SAlex Deucher 		val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
659aaa36a97SAlex Deucher 
660aaa36a97SAlex Deucher 	WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
661aaa36a97SAlex Deucher 	return 0;
662aaa36a97SAlex Deucher }
663aaa36a97SAlex Deucher 
664aaa36a97SAlex Deucher static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
665aaa36a97SAlex Deucher 				      struct amdgpu_irq_src *source,
666aaa36a97SAlex Deucher 				      struct amdgpu_iv_entry *entry)
667aaa36a97SAlex Deucher {
668aaa36a97SAlex Deucher 	DRM_DEBUG("IH: VCE\n");
669d6c29c30SLeo Liu 
670f3f0ea95STom St Denis 	WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
671d6c29c30SLeo Liu 
672aaa36a97SAlex Deucher 	switch (entry->src_data) {
673aaa36a97SAlex Deucher 	case 0:
674aaa36a97SAlex Deucher 	case 1:
67581da2edeSTom St Denis 		amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
676aaa36a97SAlex Deucher 		break;
677aaa36a97SAlex Deucher 	default:
678aaa36a97SAlex Deucher 		DRM_ERROR("Unhandled interrupt: %d %d\n",
679aaa36a97SAlex Deucher 			  entry->src_id, entry->src_data);
680aaa36a97SAlex Deucher 		break;
681aaa36a97SAlex Deucher 	}
682aaa36a97SAlex Deucher 
683aaa36a97SAlex Deucher 	return 0;
684aaa36a97SAlex Deucher }
685aaa36a97SAlex Deucher 
686ec38f188SRex Zhu static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
687ec38f188SRex Zhu {
688ec38f188SRex Zhu 	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
689ec38f188SRex Zhu 
690ec38f188SRex Zhu 	if (enable)
691ec38f188SRex Zhu 		tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
692ec38f188SRex Zhu 	else
693ec38f188SRex Zhu 		tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
694ec38f188SRex Zhu 
695ec38f188SRex Zhu 	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
696ec38f188SRex Zhu }
697ec38f188SRex Zhu 
6985fc3aeebSyanyang1 static int vce_v3_0_set_clockgating_state(void *handle,
6995fc3aeebSyanyang1 					  enum amd_clockgating_state state)
700aaa36a97SAlex Deucher {
7010689a570SEric Huang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7020689a570SEric Huang 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
7030689a570SEric Huang 	int i;
7040689a570SEric Huang 
705ec38f188SRex Zhu 	if (adev->asic_type == CHIP_POLARIS10)
706ec38f188SRex Zhu 		vce_v3_set_bypass_mode(adev, enable);
707ec38f188SRex Zhu 
708e3b04bc7SAlex Deucher 	if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
7090689a570SEric Huang 		return 0;
7100689a570SEric Huang 
7110689a570SEric Huang 	mutex_lock(&adev->grbm_idx_mutex);
7120689a570SEric Huang 	for (i = 0; i < 2; i++) {
7130689a570SEric Huang 		/* Program VCE Instance 0 or 1 if not harvested */
7140689a570SEric Huang 		if (adev->vce.harvest_config & (1 << i))
7150689a570SEric Huang 			continue;
7160689a570SEric Huang 
717f3f0ea95STom St Denis 		WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
7180689a570SEric Huang 
7190689a570SEric Huang 		if (enable) {
7200689a570SEric Huang 			/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
7210689a570SEric Huang 			uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
7220689a570SEric Huang 			data &= ~(0xf | 0xff0);
7230689a570SEric Huang 			data |= ((0x0 << 0) | (0x04 << 4));
7240689a570SEric Huang 			WREG32(mmVCE_CLOCK_GATING_A, data);
7250689a570SEric Huang 
7260689a570SEric Huang 			/* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
7270689a570SEric Huang 			data = RREG32(mmVCE_UENC_CLOCK_GATING);
7280689a570SEric Huang 			data &= ~(0xf | 0xff0);
7290689a570SEric Huang 			data |= ((0x0 << 0) | (0x04 << 4));
7300689a570SEric Huang 			WREG32(mmVCE_UENC_CLOCK_GATING, data);
7310689a570SEric Huang 		}
7320689a570SEric Huang 
7330689a570SEric Huang 		vce_v3_0_set_vce_sw_clock_gating(adev, enable);
7340689a570SEric Huang 	}
7350689a570SEric Huang 
736f3f0ea95STom St Denis 	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
7370689a570SEric Huang 	mutex_unlock(&adev->grbm_idx_mutex);
7380689a570SEric Huang 
739aaa36a97SAlex Deucher 	return 0;
740aaa36a97SAlex Deucher }
741aaa36a97SAlex Deucher 
7425fc3aeebSyanyang1 static int vce_v3_0_set_powergating_state(void *handle,
7435fc3aeebSyanyang1 					  enum amd_powergating_state state)
744aaa36a97SAlex Deucher {
745aaa36a97SAlex Deucher 	/* This doesn't actually powergate the VCE block.
746aaa36a97SAlex Deucher 	 * That's done in the dpm code via the SMC.  This
747aaa36a97SAlex Deucher 	 * just re-inits the block as necessary.  The actual
748aaa36a97SAlex Deucher 	 * gating still happens in the dpm code.  We should
749aaa36a97SAlex Deucher 	 * revisit this when there is a cleaner line between
750aaa36a97SAlex Deucher 	 * the smc and the hw blocks
751aaa36a97SAlex Deucher 	 */
7525fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7535fc3aeebSyanyang1 
754e3b04bc7SAlex Deucher 	if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
755808a934fSAlex Deucher 		return 0;
756808a934fSAlex Deucher 
7575fc3aeebSyanyang1 	if (state == AMD_PG_STATE_GATE)
758aaa36a97SAlex Deucher 		/* XXX do we need a vce_v3_0_stop()? */
759aaa36a97SAlex Deucher 		return 0;
760aaa36a97SAlex Deucher 	else
761aaa36a97SAlex Deucher 		return vce_v3_0_start(adev);
762aaa36a97SAlex Deucher }
763aaa36a97SAlex Deucher 
7645fc3aeebSyanyang1 const struct amd_ip_funcs vce_v3_0_ip_funcs = {
76588a907d6STom St Denis 	.name = "vce_v3_0",
766aaa36a97SAlex Deucher 	.early_init = vce_v3_0_early_init,
767aaa36a97SAlex Deucher 	.late_init = NULL,
768aaa36a97SAlex Deucher 	.sw_init = vce_v3_0_sw_init,
769aaa36a97SAlex Deucher 	.sw_fini = vce_v3_0_sw_fini,
770aaa36a97SAlex Deucher 	.hw_init = vce_v3_0_hw_init,
771aaa36a97SAlex Deucher 	.hw_fini = vce_v3_0_hw_fini,
772aaa36a97SAlex Deucher 	.suspend = vce_v3_0_suspend,
773aaa36a97SAlex Deucher 	.resume = vce_v3_0_resume,
774aaa36a97SAlex Deucher 	.is_idle = vce_v3_0_is_idle,
775aaa36a97SAlex Deucher 	.wait_for_idle = vce_v3_0_wait_for_idle,
776115933a5SChunming Zhou 	.check_soft_reset = vce_v3_0_check_soft_reset,
777115933a5SChunming Zhou 	.pre_soft_reset = vce_v3_0_pre_soft_reset,
778aaa36a97SAlex Deucher 	.soft_reset = vce_v3_0_soft_reset,
779115933a5SChunming Zhou 	.post_soft_reset = vce_v3_0_post_soft_reset,
780aaa36a97SAlex Deucher 	.set_clockgating_state = vce_v3_0_set_clockgating_state,
781aaa36a97SAlex Deucher 	.set_powergating_state = vce_v3_0_set_powergating_state,
782aaa36a97SAlex Deucher };
783aaa36a97SAlex Deucher 
784aaa36a97SAlex Deucher static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
785aaa36a97SAlex Deucher 	.get_rptr = vce_v3_0_ring_get_rptr,
786aaa36a97SAlex Deucher 	.get_wptr = vce_v3_0_ring_get_wptr,
787aaa36a97SAlex Deucher 	.set_wptr = vce_v3_0_ring_set_wptr,
788aaa36a97SAlex Deucher 	.parse_cs = amdgpu_vce_ring_parse_cs,
789aaa36a97SAlex Deucher 	.emit_ib = amdgpu_vce_ring_emit_ib,
790aaa36a97SAlex Deucher 	.emit_fence = amdgpu_vce_ring_emit_fence,
791aaa36a97SAlex Deucher 	.test_ring = amdgpu_vce_ring_test_ring,
792aaa36a97SAlex Deucher 	.test_ib = amdgpu_vce_ring_test_ib,
793edff0e28SJammy Zhou 	.insert_nop = amdgpu_ring_insert_nop,
7949e5d5309SChristian König 	.pad_ib = amdgpu_ring_generic_pad_ib,
795ebff485eSChristian König 	.begin_use = amdgpu_vce_ring_begin_use,
796ebff485eSChristian König 	.end_use = amdgpu_vce_ring_end_use,
797aaa36a97SAlex Deucher };
798aaa36a97SAlex Deucher 
799aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
800aaa36a97SAlex Deucher {
801aaa36a97SAlex Deucher 	adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
802aaa36a97SAlex Deucher 	adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
803aaa36a97SAlex Deucher }
804aaa36a97SAlex Deucher 
805aaa36a97SAlex Deucher static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
806aaa36a97SAlex Deucher 	.set = vce_v3_0_set_interrupt_state,
807aaa36a97SAlex Deucher 	.process = vce_v3_0_process_interrupt,
808aaa36a97SAlex Deucher };
809aaa36a97SAlex Deucher 
810aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
811aaa36a97SAlex Deucher {
812aaa36a97SAlex Deucher 	adev->vce.irq.num_types = 1;
813aaa36a97SAlex Deucher 	adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
814aaa36a97SAlex Deucher };
815