xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c (revision dc9eeff8)
1a2e73f56SAlex Deucher /*
2a2e73f56SAlex Deucher  * Copyright 2013 Advanced Micro Devices, Inc.
3a2e73f56SAlex Deucher  *
4a2e73f56SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
5a2e73f56SAlex Deucher  * copy of this software and associated documentation files (the "Software"),
6a2e73f56SAlex Deucher  * to deal in the Software without restriction, including without limitation
7a2e73f56SAlex Deucher  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8a2e73f56SAlex Deucher  * and/or sell copies of the Software, and to permit persons to whom the
9a2e73f56SAlex Deucher  * Software is furnished to do so, subject to the following conditions:
10a2e73f56SAlex Deucher  *
11a2e73f56SAlex Deucher  * The above copyright notice and this permission notice shall be included in
12a2e73f56SAlex Deucher  * all copies or substantial portions of the Software.
13a2e73f56SAlex Deucher  *
14a2e73f56SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15a2e73f56SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16a2e73f56SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17a2e73f56SAlex Deucher  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18a2e73f56SAlex Deucher  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19a2e73f56SAlex Deucher  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20a2e73f56SAlex Deucher  * OTHER DEALINGS IN THE SOFTWARE.
21a2e73f56SAlex Deucher  *
22a2e73f56SAlex Deucher  * Authors: Christian König <christian.koenig@amd.com>
23a2e73f56SAlex Deucher  */
24a2e73f56SAlex Deucher 
25a2e73f56SAlex Deucher #include <linux/firmware.h>
26a2e73f56SAlex Deucher #include <drm/drmP.h>
27a2e73f56SAlex Deucher #include "amdgpu.h"
28a2e73f56SAlex Deucher #include "amdgpu_uvd.h"
29a2e73f56SAlex Deucher #include "cikd.h"
30a2e73f56SAlex Deucher 
31a2e73f56SAlex Deucher #include "uvd/uvd_4_2_d.h"
32a2e73f56SAlex Deucher #include "uvd/uvd_4_2_sh_mask.h"
33a2e73f56SAlex Deucher 
34a2e73f56SAlex Deucher #include "oss/oss_2_0_d.h"
35a2e73f56SAlex Deucher #include "oss/oss_2_0_sh_mask.h"
36a2e73f56SAlex Deucher 
37d5b4e25dSChristian König #include "bif/bif_4_1_d.h"
38d5b4e25dSChristian König 
394be5097cSRex Zhu #include "smu/smu_7_0_1_d.h"
404be5097cSRex Zhu #include "smu/smu_7_0_1_sh_mask.h"
414be5097cSRex Zhu 
42a2e73f56SAlex Deucher static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
43a2e73f56SAlex Deucher static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
44a2e73f56SAlex Deucher static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
45a2e73f56SAlex Deucher static int uvd_v4_2_start(struct amdgpu_device *adev);
46a2e73f56SAlex Deucher static void uvd_v4_2_stop(struct amdgpu_device *adev);
47aa4747c0SRex Zhu static int uvd_v4_2_set_clockgating_state(void *handle,
48aa4747c0SRex Zhu 				enum amd_clockgating_state state);
49ca581e45SRex Zhu static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
50ca581e45SRex Zhu 			     bool sw_mode);
51a2e73f56SAlex Deucher /**
52a2e73f56SAlex Deucher  * uvd_v4_2_ring_get_rptr - get read pointer
53a2e73f56SAlex Deucher  *
54a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
55a2e73f56SAlex Deucher  *
56a2e73f56SAlex Deucher  * Returns the current hardware read pointer
57a2e73f56SAlex Deucher  */
58536fbf94SKen Wang static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
59a2e73f56SAlex Deucher {
60a2e73f56SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
61a2e73f56SAlex Deucher 
62a2e73f56SAlex Deucher 	return RREG32(mmUVD_RBC_RB_RPTR);
63a2e73f56SAlex Deucher }
64a2e73f56SAlex Deucher 
65a2e73f56SAlex Deucher /**
66a2e73f56SAlex Deucher  * uvd_v4_2_ring_get_wptr - get write pointer
67a2e73f56SAlex Deucher  *
68a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
69a2e73f56SAlex Deucher  *
70a2e73f56SAlex Deucher  * Returns the current hardware write pointer
71a2e73f56SAlex Deucher  */
72536fbf94SKen Wang static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
73a2e73f56SAlex Deucher {
74a2e73f56SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
75a2e73f56SAlex Deucher 
76a2e73f56SAlex Deucher 	return RREG32(mmUVD_RBC_RB_WPTR);
77a2e73f56SAlex Deucher }
78a2e73f56SAlex Deucher 
79a2e73f56SAlex Deucher /**
80a2e73f56SAlex Deucher  * uvd_v4_2_ring_set_wptr - set write pointer
81a2e73f56SAlex Deucher  *
82a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
83a2e73f56SAlex Deucher  *
84a2e73f56SAlex Deucher  * Commits the write pointer to the hardware
85a2e73f56SAlex Deucher  */
86a2e73f56SAlex Deucher static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
87a2e73f56SAlex Deucher {
88a2e73f56SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
89a2e73f56SAlex Deucher 
90536fbf94SKen Wang 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
91a2e73f56SAlex Deucher }
92a2e73f56SAlex Deucher 
935fc3aeebSyanyang1 static int uvd_v4_2_early_init(void *handle)
94a2e73f56SAlex Deucher {
955fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
962bb795f5SJames Zhu 	adev->uvd.num_uvd_inst = 1;
975fc3aeebSyanyang1 
98a2e73f56SAlex Deucher 	uvd_v4_2_set_ring_funcs(adev);
99a2e73f56SAlex Deucher 	uvd_v4_2_set_irq_funcs(adev);
100a2e73f56SAlex Deucher 
101a2e73f56SAlex Deucher 	return 0;
102a2e73f56SAlex Deucher }
103a2e73f56SAlex Deucher 
1045fc3aeebSyanyang1 static int uvd_v4_2_sw_init(void *handle)
105a2e73f56SAlex Deucher {
106a2e73f56SAlex Deucher 	struct amdgpu_ring *ring;
1075fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
108a2e73f56SAlex Deucher 	int r;
109a2e73f56SAlex Deucher 
110a2e73f56SAlex Deucher 	/* UVD TRAP */
1111ffdeca6SChristian König 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
112a2e73f56SAlex Deucher 	if (r)
113a2e73f56SAlex Deucher 		return r;
114a2e73f56SAlex Deucher 
115a2e73f56SAlex Deucher 	r = amdgpu_uvd_sw_init(adev);
116a2e73f56SAlex Deucher 	if (r)
117a2e73f56SAlex Deucher 		return r;
118a2e73f56SAlex Deucher 
119a2e73f56SAlex Deucher 	r = amdgpu_uvd_resume(adev);
120a2e73f56SAlex Deucher 	if (r)
121a2e73f56SAlex Deucher 		return r;
122a2e73f56SAlex Deucher 
1232bb795f5SJames Zhu 	ring = &adev->uvd.inst->ring;
124a2e73f56SAlex Deucher 	sprintf(ring->name, "uvd");
1252bb795f5SJames Zhu 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
12633d5bd07SEmily Deng 	if (r)
12733d5bd07SEmily Deng 		return r;
12833d5bd07SEmily Deng 
12933d5bd07SEmily Deng 	r = amdgpu_uvd_entity_init(adev);
130a2e73f56SAlex Deucher 
131a2e73f56SAlex Deucher 	return r;
132a2e73f56SAlex Deucher }
133a2e73f56SAlex Deucher 
1345fc3aeebSyanyang1 static int uvd_v4_2_sw_fini(void *handle)
135a2e73f56SAlex Deucher {
136a2e73f56SAlex Deucher 	int r;
1375fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
138a2e73f56SAlex Deucher 
139a2e73f56SAlex Deucher 	r = amdgpu_uvd_suspend(adev);
140a2e73f56SAlex Deucher 	if (r)
141a2e73f56SAlex Deucher 		return r;
142a2e73f56SAlex Deucher 
14350237287SRex Zhu 	return amdgpu_uvd_sw_fini(adev);
144a2e73f56SAlex Deucher }
14550237287SRex Zhu 
146ca581e45SRex Zhu static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
147ca581e45SRex Zhu 				 bool enable);
148a2e73f56SAlex Deucher /**
149a2e73f56SAlex Deucher  * uvd_v4_2_hw_init - start and test UVD block
150a2e73f56SAlex Deucher  *
151a2e73f56SAlex Deucher  * @adev: amdgpu_device pointer
152a2e73f56SAlex Deucher  *
153a2e73f56SAlex Deucher  * Initialize the hardware, boot up the VCPU and do some testing
154a2e73f56SAlex Deucher  */
1555fc3aeebSyanyang1 static int uvd_v4_2_hw_init(void *handle)
156a2e73f56SAlex Deucher {
1575fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1582bb795f5SJames Zhu 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
159a2e73f56SAlex Deucher 	uint32_t tmp;
160a2e73f56SAlex Deucher 	int r;
161a2e73f56SAlex Deucher 
162ca581e45SRex Zhu 	uvd_v4_2_enable_mgcg(adev, true);
163aa4747c0SRex Zhu 	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
164a2e73f56SAlex Deucher 
165c66ed765SAndrey Grodzovsky 	r = amdgpu_ring_test_helper(ring);
166c66ed765SAndrey Grodzovsky 	if (r)
167a2e73f56SAlex Deucher 		goto done;
168a2e73f56SAlex Deucher 
169a27de35cSChristian König 	r = amdgpu_ring_alloc(ring, 10);
170a2e73f56SAlex Deucher 	if (r) {
171a2e73f56SAlex Deucher 		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
172a2e73f56SAlex Deucher 		goto done;
173a2e73f56SAlex Deucher 	}
174a2e73f56SAlex Deucher 
175a2e73f56SAlex Deucher 	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
176a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, tmp);
177a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0xFFFFF);
178a2e73f56SAlex Deucher 
179a2e73f56SAlex Deucher 	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
180a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, tmp);
181a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0xFFFFF);
182a2e73f56SAlex Deucher 
183a2e73f56SAlex Deucher 	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
184a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, tmp);
185a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0xFFFFF);
186a2e73f56SAlex Deucher 
187a2e73f56SAlex Deucher 	/* Clear timeout status bits */
188a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
189a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0x8);
190a2e73f56SAlex Deucher 
191a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
192a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 3);
193a2e73f56SAlex Deucher 
194a27de35cSChristian König 	amdgpu_ring_commit(ring);
195a2e73f56SAlex Deucher 
196a2e73f56SAlex Deucher done:
197a2e73f56SAlex Deucher 	if (!r)
198a2e73f56SAlex Deucher 		DRM_INFO("UVD initialized successfully.\n");
199a2e73f56SAlex Deucher 
200a2e73f56SAlex Deucher 	return r;
201a2e73f56SAlex Deucher }
202a2e73f56SAlex Deucher 
203a2e73f56SAlex Deucher /**
204a2e73f56SAlex Deucher  * uvd_v4_2_hw_fini - stop the hardware block
205a2e73f56SAlex Deucher  *
206a2e73f56SAlex Deucher  * @adev: amdgpu_device pointer
207a2e73f56SAlex Deucher  *
208a2e73f56SAlex Deucher  * Stop the UVD block, mark ring as not ready any more
209a2e73f56SAlex Deucher  */
2105fc3aeebSyanyang1 static int uvd_v4_2_hw_fini(void *handle)
211a2e73f56SAlex Deucher {
2125fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2132bb795f5SJames Zhu 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
214a2e73f56SAlex Deucher 
2158b55d17eSRex Zhu 	if (RREG32(mmUVD_STATUS) != 0)
216a2e73f56SAlex Deucher 		uvd_v4_2_stop(adev);
2178b55d17eSRex Zhu 
218c66ed765SAndrey Grodzovsky 	ring->sched.ready = false;
219a2e73f56SAlex Deucher 
220a2e73f56SAlex Deucher 	return 0;
221a2e73f56SAlex Deucher }
222a2e73f56SAlex Deucher 
2235fc3aeebSyanyang1 static int uvd_v4_2_suspend(void *handle)
224a2e73f56SAlex Deucher {
225a2e73f56SAlex Deucher 	int r;
2265fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
227a2e73f56SAlex Deucher 
2283f99dd81SLeo Liu 	r = uvd_v4_2_hw_fini(adev);
229a2e73f56SAlex Deucher 	if (r)
230a2e73f56SAlex Deucher 		return r;
231a2e73f56SAlex Deucher 
23250237287SRex Zhu 	return amdgpu_uvd_suspend(adev);
233a2e73f56SAlex Deucher }
234a2e73f56SAlex Deucher 
2355fc3aeebSyanyang1 static int uvd_v4_2_resume(void *handle)
236a2e73f56SAlex Deucher {
237a2e73f56SAlex Deucher 	int r;
2385fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
239a2e73f56SAlex Deucher 
240a2e73f56SAlex Deucher 	r = amdgpu_uvd_resume(adev);
241a2e73f56SAlex Deucher 	if (r)
242a2e73f56SAlex Deucher 		return r;
243a2e73f56SAlex Deucher 
24450237287SRex Zhu 	return uvd_v4_2_hw_init(adev);
245a2e73f56SAlex Deucher }
246a2e73f56SAlex Deucher 
247a2e73f56SAlex Deucher /**
248a2e73f56SAlex Deucher  * uvd_v4_2_start - start UVD block
249a2e73f56SAlex Deucher  *
250a2e73f56SAlex Deucher  * @adev: amdgpu_device pointer
251a2e73f56SAlex Deucher  *
252a2e73f56SAlex Deucher  * Setup and start the UVD block
253a2e73f56SAlex Deucher  */
254a2e73f56SAlex Deucher static int uvd_v4_2_start(struct amdgpu_device *adev)
255a2e73f56SAlex Deucher {
2562bb795f5SJames Zhu 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
257a2e73f56SAlex Deucher 	uint32_t rb_bufsz;
258a2e73f56SAlex Deucher 	int i, j, r;
2598b55d17eSRex Zhu 	u32 tmp;
260a2e73f56SAlex Deucher 	/* disable byte swapping */
261a2e73f56SAlex Deucher 	u32 lmi_swap_cntl = 0;
262a2e73f56SAlex Deucher 	u32 mp_swap_cntl = 0;
263a2e73f56SAlex Deucher 
2648b55d17eSRex Zhu 	/* set uvd busy */
2658b55d17eSRex Zhu 	WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
2668b55d17eSRex Zhu 
267ca581e45SRex Zhu 	uvd_v4_2_set_dcm(adev, true);
2688b55d17eSRex Zhu 	WREG32(mmUVD_CGC_GATE, 0);
269a2e73f56SAlex Deucher 
270a2e73f56SAlex Deucher 	/* take UVD block out of reset */
271a2e73f56SAlex Deucher 	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
272a2e73f56SAlex Deucher 	mdelay(5);
273a2e73f56SAlex Deucher 
2748b55d17eSRex Zhu 	/* enable VCPU clock */
2758b55d17eSRex Zhu 	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
2768b55d17eSRex Zhu 
2778b55d17eSRex Zhu 	/* disable interupt */
2788b55d17eSRex Zhu 	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
279a2e73f56SAlex Deucher 
280a2e73f56SAlex Deucher #ifdef __BIG_ENDIAN
281a2e73f56SAlex Deucher 	/* swap (8 in 32) RB and IB */
282a2e73f56SAlex Deucher 	lmi_swap_cntl = 0xa;
283a2e73f56SAlex Deucher 	mp_swap_cntl = 0;
284a2e73f56SAlex Deucher #endif
285a2e73f56SAlex Deucher 	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
286a2e73f56SAlex Deucher 	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
2878b55d17eSRex Zhu 	/* initialize UVD memory controller */
2888b55d17eSRex Zhu 	WREG32(mmUVD_LMI_CTRL, 0x203108);
2898b55d17eSRex Zhu 
2908b55d17eSRex Zhu 	tmp = RREG32(mmUVD_MPC_CNTL);
2918b55d17eSRex Zhu 	WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
292a2e73f56SAlex Deucher 
293a2e73f56SAlex Deucher 	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
294a2e73f56SAlex Deucher 	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
295a2e73f56SAlex Deucher 	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
296a2e73f56SAlex Deucher 	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
297a2e73f56SAlex Deucher 	WREG32(mmUVD_MPC_SET_ALU, 0);
298a2e73f56SAlex Deucher 	WREG32(mmUVD_MPC_SET_MUX, 0x88);
299a2e73f56SAlex Deucher 
3008b55d17eSRex Zhu 	uvd_v4_2_mc_resume(adev);
301a2e73f56SAlex Deucher 
3028b55d17eSRex Zhu 	tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
3038b55d17eSRex Zhu 	WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
304a2e73f56SAlex Deucher 
305a2e73f56SAlex Deucher 	/* enable UMC */
306a2e73f56SAlex Deucher 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
307a2e73f56SAlex Deucher 
3088b55d17eSRex Zhu 	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
3098b55d17eSRex Zhu 
3108b55d17eSRex Zhu 	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
3118b55d17eSRex Zhu 
3128b55d17eSRex Zhu 	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
3138b55d17eSRex Zhu 
314a2e73f56SAlex Deucher 	mdelay(10);
315a2e73f56SAlex Deucher 
316a2e73f56SAlex Deucher 	for (i = 0; i < 10; ++i) {
317a2e73f56SAlex Deucher 		uint32_t status;
318a2e73f56SAlex Deucher 		for (j = 0; j < 100; ++j) {
319a2e73f56SAlex Deucher 			status = RREG32(mmUVD_STATUS);
320a2e73f56SAlex Deucher 			if (status & 2)
321a2e73f56SAlex Deucher 				break;
322a2e73f56SAlex Deucher 			mdelay(10);
323a2e73f56SAlex Deucher 		}
324a2e73f56SAlex Deucher 		r = 0;
325a2e73f56SAlex Deucher 		if (status & 2)
326a2e73f56SAlex Deucher 			break;
327a2e73f56SAlex Deucher 
328a2e73f56SAlex Deucher 		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
329a2e73f56SAlex Deucher 		WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
330a2e73f56SAlex Deucher 				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
331a2e73f56SAlex Deucher 		mdelay(10);
332a2e73f56SAlex Deucher 		WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
333a2e73f56SAlex Deucher 		mdelay(10);
334a2e73f56SAlex Deucher 		r = -1;
335a2e73f56SAlex Deucher 	}
336a2e73f56SAlex Deucher 
337a2e73f56SAlex Deucher 	if (r) {
338a2e73f56SAlex Deucher 		DRM_ERROR("UVD not responding, giving up!!!\n");
339a2e73f56SAlex Deucher 		return r;
340a2e73f56SAlex Deucher 	}
341a2e73f56SAlex Deucher 
342a2e73f56SAlex Deucher 	/* enable interupt */
343a2e73f56SAlex Deucher 	WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
344a2e73f56SAlex Deucher 
3458b55d17eSRex Zhu 	WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
3468b55d17eSRex Zhu 
347a2e73f56SAlex Deucher 	/* force RBC into idle state */
348a2e73f56SAlex Deucher 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
349a2e73f56SAlex Deucher 
350a2e73f56SAlex Deucher 	/* Set the write pointer delay */
351a2e73f56SAlex Deucher 	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
352a2e73f56SAlex Deucher 
353a2e73f56SAlex Deucher 	/* programm the 4GB memory segment for rptr and ring buffer */
354a2e73f56SAlex Deucher 	WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
355a2e73f56SAlex Deucher 				   (0x7 << 16) | (0x1 << 31));
356a2e73f56SAlex Deucher 
357a2e73f56SAlex Deucher 	/* Initialize the ring buffer's read and write pointers */
358a2e73f56SAlex Deucher 	WREG32(mmUVD_RBC_RB_RPTR, 0x0);
359a2e73f56SAlex Deucher 
360a2e73f56SAlex Deucher 	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
361536fbf94SKen Wang 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
362a2e73f56SAlex Deucher 
363a2e73f56SAlex Deucher 	/* set the ring address */
364a2e73f56SAlex Deucher 	WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
365a2e73f56SAlex Deucher 
366a2e73f56SAlex Deucher 	/* Set ring buffer size */
367a2e73f56SAlex Deucher 	rb_bufsz = order_base_2(ring->ring_size);
368a2e73f56SAlex Deucher 	rb_bufsz = (0x1 << 8) | rb_bufsz;
369a2e73f56SAlex Deucher 	WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
370a2e73f56SAlex Deucher 
371a2e73f56SAlex Deucher 	return 0;
372a2e73f56SAlex Deucher }
373a2e73f56SAlex Deucher 
374a2e73f56SAlex Deucher /**
375a2e73f56SAlex Deucher  * uvd_v4_2_stop - stop UVD block
376a2e73f56SAlex Deucher  *
377a2e73f56SAlex Deucher  * @adev: amdgpu_device pointer
378a2e73f56SAlex Deucher  *
379a2e73f56SAlex Deucher  * stop the UVD block
380a2e73f56SAlex Deucher  */
381a2e73f56SAlex Deucher static void uvd_v4_2_stop(struct amdgpu_device *adev)
382a2e73f56SAlex Deucher {
3838b55d17eSRex Zhu 	uint32_t i, j;
3848b55d17eSRex Zhu 	uint32_t status;
3858b55d17eSRex Zhu 
386a2e73f56SAlex Deucher 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
387a2e73f56SAlex Deucher 
3888b55d17eSRex Zhu 	for (i = 0; i < 10; ++i) {
3898b55d17eSRex Zhu 		for (j = 0; j < 100; ++j) {
3908b55d17eSRex Zhu 			status = RREG32(mmUVD_STATUS);
3918b55d17eSRex Zhu 			if (status & 2)
3928b55d17eSRex Zhu 				break;
3938b55d17eSRex Zhu 			mdelay(1);
3948b55d17eSRex Zhu 		}
395e89d5b5cSTom St Denis 		if (status & 2)
3968b55d17eSRex Zhu 			break;
3978b55d17eSRex Zhu 	}
3988b55d17eSRex Zhu 
3998b55d17eSRex Zhu 	for (i = 0; i < 10; ++i) {
4008b55d17eSRex Zhu 		for (j = 0; j < 100; ++j) {
4018b55d17eSRex Zhu 			status = RREG32(mmUVD_LMI_STATUS);
4028b55d17eSRex Zhu 			if (status & 0xf)
4038b55d17eSRex Zhu 				break;
4048b55d17eSRex Zhu 			mdelay(1);
4058b55d17eSRex Zhu 		}
406e89d5b5cSTom St Denis 		if (status & 0xf)
4078b55d17eSRex Zhu 			break;
4088b55d17eSRex Zhu 	}
4098b55d17eSRex Zhu 
410a2e73f56SAlex Deucher 	/* Stall UMC and register bus before resetting VCPU */
411a2e73f56SAlex Deucher 	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
4128b55d17eSRex Zhu 
4138b55d17eSRex Zhu 	for (i = 0; i < 10; ++i) {
4148b55d17eSRex Zhu 		for (j = 0; j < 100; ++j) {
4158b55d17eSRex Zhu 			status = RREG32(mmUVD_LMI_STATUS);
4168b55d17eSRex Zhu 			if (status & 0x240)
4178b55d17eSRex Zhu 				break;
418a2e73f56SAlex Deucher 			mdelay(1);
4198b55d17eSRex Zhu 		}
420e89d5b5cSTom St Denis 		if (status & 0x240)
4218b55d17eSRex Zhu 			break;
4228b55d17eSRex Zhu 	}
423a2e73f56SAlex Deucher 
4248b55d17eSRex Zhu 	WREG32_P(0x3D49, 0, ~(1 << 2));
425a2e73f56SAlex Deucher 
4268b55d17eSRex Zhu 	WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
427a2e73f56SAlex Deucher 
4288b55d17eSRex Zhu 	/* put LMI, VCPU, RBC etc... into reset */
4298b55d17eSRex Zhu 	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
4308b55d17eSRex Zhu 		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
4318b55d17eSRex Zhu 		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
4328b55d17eSRex Zhu 
4338b55d17eSRex Zhu 	WREG32(mmUVD_STATUS, 0);
434ca581e45SRex Zhu 
435ca581e45SRex Zhu 	uvd_v4_2_set_dcm(adev, false);
436a2e73f56SAlex Deucher }
437a2e73f56SAlex Deucher 
438a2e73f56SAlex Deucher /**
439a2e73f56SAlex Deucher  * uvd_v4_2_ring_emit_fence - emit an fence & trap command
440a2e73f56SAlex Deucher  *
441a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
442a2e73f56SAlex Deucher  * @fence: fence to emit
443a2e73f56SAlex Deucher  *
444a2e73f56SAlex Deucher  * Write a fence and a trap command to the ring.
445a2e73f56SAlex Deucher  */
446a2e73f56SAlex Deucher static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
447890ee23fSChunming Zhou 				     unsigned flags)
448a2e73f56SAlex Deucher {
449890ee23fSChunming Zhou 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
450a2e73f56SAlex Deucher 
451a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
452a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, seq);
453a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
454a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, addr & 0xffffffff);
455a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
456a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
457a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
458a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0);
459a2e73f56SAlex Deucher 
460a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
461a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0);
462a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
463a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0);
464a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
465a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 2);
466a2e73f56SAlex Deucher }
467a2e73f56SAlex Deucher 
468a2e73f56SAlex Deucher /**
469a2e73f56SAlex Deucher  * uvd_v4_2_ring_test_ring - register write test
470a2e73f56SAlex Deucher  *
471a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
472a2e73f56SAlex Deucher  *
473a2e73f56SAlex Deucher  * Test if we can successfully write to the context register
474a2e73f56SAlex Deucher  */
475a2e73f56SAlex Deucher static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
476a2e73f56SAlex Deucher {
477a2e73f56SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
478a2e73f56SAlex Deucher 	uint32_t tmp = 0;
479a2e73f56SAlex Deucher 	unsigned i;
480a2e73f56SAlex Deucher 	int r;
481a2e73f56SAlex Deucher 
482a2e73f56SAlex Deucher 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
483a27de35cSChristian König 	r = amdgpu_ring_alloc(ring, 3);
484dc9eeff8SChristian König 	if (r)
485a2e73f56SAlex Deucher 		return r;
486dc9eeff8SChristian König 
487a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
488a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, 0xDEADBEEF);
489a27de35cSChristian König 	amdgpu_ring_commit(ring);
490a2e73f56SAlex Deucher 	for (i = 0; i < adev->usec_timeout; i++) {
491a2e73f56SAlex Deucher 		tmp = RREG32(mmUVD_CONTEXT_ID);
492a2e73f56SAlex Deucher 		if (tmp == 0xDEADBEEF)
493a2e73f56SAlex Deucher 			break;
494a2e73f56SAlex Deucher 		DRM_UDELAY(1);
495a2e73f56SAlex Deucher 	}
496a2e73f56SAlex Deucher 
497dc9eeff8SChristian König 	if (i >= adev->usec_timeout)
498dc9eeff8SChristian König 		r = -ETIMEDOUT;
499dc9eeff8SChristian König 
500a2e73f56SAlex Deucher 	return r;
501a2e73f56SAlex Deucher }
502a2e73f56SAlex Deucher 
503a2e73f56SAlex Deucher /**
504a2e73f56SAlex Deucher  * uvd_v4_2_ring_emit_ib - execute indirect buffer
505a2e73f56SAlex Deucher  *
506a2e73f56SAlex Deucher  * @ring: amdgpu_ring pointer
507a2e73f56SAlex Deucher  * @ib: indirect buffer to execute
508a2e73f56SAlex Deucher  *
509a2e73f56SAlex Deucher  * Write ring commands to execute the indirect buffer
510a2e73f56SAlex Deucher  */
511a2e73f56SAlex Deucher static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
512d88bf583SChristian König 				  struct amdgpu_ib *ib,
513c4f46f22SChristian König 				  unsigned vmid, bool ctx_switch)
514a2e73f56SAlex Deucher {
515a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
516a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, ib->gpu_addr);
517a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
518a2e73f56SAlex Deucher 	amdgpu_ring_write(ring, ib->length_dw);
519a2e73f56SAlex Deucher }
520a2e73f56SAlex Deucher 
521def13903SLeo Liu static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
522def13903SLeo Liu {
523def13903SLeo Liu 	int i;
524def13903SLeo Liu 
525def13903SLeo Liu 	WARN_ON(ring->wptr % 2 || count % 2);
526def13903SLeo Liu 
527def13903SLeo Liu 	for (i = 0; i < count / 2; i++) {
528def13903SLeo Liu 		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
529def13903SLeo Liu 		amdgpu_ring_write(ring, 0);
530def13903SLeo Liu 	}
531def13903SLeo Liu }
532def13903SLeo Liu 
533a2e73f56SAlex Deucher /**
534a2e73f56SAlex Deucher  * uvd_v4_2_mc_resume - memory controller programming
535a2e73f56SAlex Deucher  *
536a2e73f56SAlex Deucher  * @adev: amdgpu_device pointer
537a2e73f56SAlex Deucher  *
538a2e73f56SAlex Deucher  * Let the UVD memory controller know it's offsets
539a2e73f56SAlex Deucher  */
540a2e73f56SAlex Deucher static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
541a2e73f56SAlex Deucher {
542a2e73f56SAlex Deucher 	uint64_t addr;
543a2e73f56SAlex Deucher 	uint32_t size;
544a2e73f56SAlex Deucher 
545a2e73f56SAlex Deucher 	/* programm the VCPU memory controller bits 0-27 */
5462bb795f5SJames Zhu 	addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
547c1fe75c9SPiotr Redlewski 	size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
548a2e73f56SAlex Deucher 	WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
549a2e73f56SAlex Deucher 	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
550a2e73f56SAlex Deucher 
551a2e73f56SAlex Deucher 	addr += size;
552c0365541SArindam Nath 	size = AMDGPU_UVD_HEAP_SIZE >> 3;
553a2e73f56SAlex Deucher 	WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
554a2e73f56SAlex Deucher 	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
555a2e73f56SAlex Deucher 
556a2e73f56SAlex Deucher 	addr += size;
557c0365541SArindam Nath 	size = (AMDGPU_UVD_STACK_SIZE +
558c0365541SArindam Nath 	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
559a2e73f56SAlex Deucher 	WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
560a2e73f56SAlex Deucher 	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
561a2e73f56SAlex Deucher 
562a2e73f56SAlex Deucher 	/* bits 28-31 */
5632bb795f5SJames Zhu 	addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
564a2e73f56SAlex Deucher 	WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
565a2e73f56SAlex Deucher 
566a2e73f56SAlex Deucher 	/* bits 32-39 */
5672bb795f5SJames Zhu 	addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
568a2e73f56SAlex Deucher 	WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
569a2e73f56SAlex Deucher 
57076ed6cb0SAlex Deucher 	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
57176ed6cb0SAlex Deucher 	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
57276ed6cb0SAlex Deucher 	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
573a2e73f56SAlex Deucher }
574a2e73f56SAlex Deucher 
575a2e73f56SAlex Deucher static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
576a2e73f56SAlex Deucher 				 bool enable)
577a2e73f56SAlex Deucher {
578a2e73f56SAlex Deucher 	u32 orig, data;
579a2e73f56SAlex Deucher 
580e3b04bc7SAlex Deucher 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
581a2e73f56SAlex Deucher 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
582aa4747c0SRex Zhu 		data |= 0xfff;
583a2e73f56SAlex Deucher 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
584a2e73f56SAlex Deucher 
585a2e73f56SAlex Deucher 		orig = data = RREG32(mmUVD_CGC_CTRL);
586a2e73f56SAlex Deucher 		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
587a2e73f56SAlex Deucher 		if (orig != data)
588a2e73f56SAlex Deucher 			WREG32(mmUVD_CGC_CTRL, data);
589a2e73f56SAlex Deucher 	} else {
590a2e73f56SAlex Deucher 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
591a2e73f56SAlex Deucher 		data &= ~0xfff;
592a2e73f56SAlex Deucher 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
593a2e73f56SAlex Deucher 
594a2e73f56SAlex Deucher 		orig = data = RREG32(mmUVD_CGC_CTRL);
595a2e73f56SAlex Deucher 		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
596a2e73f56SAlex Deucher 		if (orig != data)
597a2e73f56SAlex Deucher 			WREG32(mmUVD_CGC_CTRL, data);
598a2e73f56SAlex Deucher 	}
599a2e73f56SAlex Deucher }
600a2e73f56SAlex Deucher 
601a2e73f56SAlex Deucher static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
602a2e73f56SAlex Deucher 			     bool sw_mode)
603a2e73f56SAlex Deucher {
604a2e73f56SAlex Deucher 	u32 tmp, tmp2;
605a2e73f56SAlex Deucher 
606953618cfSRex Zhu 	WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
607953618cfSRex Zhu 
608a2e73f56SAlex Deucher 	tmp = RREG32(mmUVD_CGC_CTRL);
609a2e73f56SAlex Deucher 	tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
610a2e73f56SAlex Deucher 	tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
611a2e73f56SAlex Deucher 		(1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
612a2e73f56SAlex Deucher 		(4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
613a2e73f56SAlex Deucher 
614a2e73f56SAlex Deucher 	if (sw_mode) {
615a2e73f56SAlex Deucher 		tmp &= ~0x7ffff800;
616a2e73f56SAlex Deucher 		tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
617a2e73f56SAlex Deucher 			UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
618a2e73f56SAlex Deucher 			(7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
619a2e73f56SAlex Deucher 	} else {
620a2e73f56SAlex Deucher 		tmp |= 0x7ffff800;
621a2e73f56SAlex Deucher 		tmp2 = 0;
622a2e73f56SAlex Deucher 	}
623a2e73f56SAlex Deucher 
624a2e73f56SAlex Deucher 	WREG32(mmUVD_CGC_CTRL, tmp);
625a2e73f56SAlex Deucher 	WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
626a2e73f56SAlex Deucher }
627a2e73f56SAlex Deucher 
6285fc3aeebSyanyang1 static bool uvd_v4_2_is_idle(void *handle)
629a2e73f56SAlex Deucher {
6305fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6315fc3aeebSyanyang1 
632a2e73f56SAlex Deucher 	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
633a2e73f56SAlex Deucher }
634a2e73f56SAlex Deucher 
6355fc3aeebSyanyang1 static int uvd_v4_2_wait_for_idle(void *handle)
636a2e73f56SAlex Deucher {
637a2e73f56SAlex Deucher 	unsigned i;
6385fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
639a2e73f56SAlex Deucher 
640a2e73f56SAlex Deucher 	for (i = 0; i < adev->usec_timeout; i++) {
641a2e73f56SAlex Deucher 		if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
642a2e73f56SAlex Deucher 			return 0;
643a2e73f56SAlex Deucher 	}
644a2e73f56SAlex Deucher 	return -ETIMEDOUT;
645a2e73f56SAlex Deucher }
646a2e73f56SAlex Deucher 
6475fc3aeebSyanyang1 static int uvd_v4_2_soft_reset(void *handle)
648a2e73f56SAlex Deucher {
6495fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6505fc3aeebSyanyang1 
651a2e73f56SAlex Deucher 	uvd_v4_2_stop(adev);
652a2e73f56SAlex Deucher 
653a2e73f56SAlex Deucher 	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
654a2e73f56SAlex Deucher 			~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
655a2e73f56SAlex Deucher 	mdelay(5);
656a2e73f56SAlex Deucher 
657a2e73f56SAlex Deucher 	return uvd_v4_2_start(adev);
658a2e73f56SAlex Deucher }
659a2e73f56SAlex Deucher 
660a2e73f56SAlex Deucher static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
661a2e73f56SAlex Deucher 					struct amdgpu_irq_src *source,
662a2e73f56SAlex Deucher 					unsigned type,
663a2e73f56SAlex Deucher 					enum amdgpu_interrupt_state state)
664a2e73f56SAlex Deucher {
665a2e73f56SAlex Deucher 	// TODO
666a2e73f56SAlex Deucher 	return 0;
667a2e73f56SAlex Deucher }
668a2e73f56SAlex Deucher 
669a2e73f56SAlex Deucher static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
670a2e73f56SAlex Deucher 				      struct amdgpu_irq_src *source,
671a2e73f56SAlex Deucher 				      struct amdgpu_iv_entry *entry)
672a2e73f56SAlex Deucher {
673a2e73f56SAlex Deucher 	DRM_DEBUG("IH: UVD TRAP\n");
6742bb795f5SJames Zhu 	amdgpu_fence_process(&adev->uvd.inst->ring);
675a2e73f56SAlex Deucher 	return 0;
676a2e73f56SAlex Deucher }
677a2e73f56SAlex Deucher 
6785fc3aeebSyanyang1 static int uvd_v4_2_set_clockgating_state(void *handle,
6795fc3aeebSyanyang1 					  enum amd_clockgating_state state)
680a2e73f56SAlex Deucher {
681a2e73f56SAlex Deucher 	return 0;
682a2e73f56SAlex Deucher }
683a2e73f56SAlex Deucher 
6845fc3aeebSyanyang1 static int uvd_v4_2_set_powergating_state(void *handle,
6855fc3aeebSyanyang1 					  enum amd_powergating_state state)
686a2e73f56SAlex Deucher {
687a2e73f56SAlex Deucher 	/* This doesn't actually powergate the UVD block.
688a2e73f56SAlex Deucher 	 * That's done in the dpm code via the SMC.  This
689a2e73f56SAlex Deucher 	 * just re-inits the block as necessary.  The actual
690a2e73f56SAlex Deucher 	 * gating still happens in the dpm code.  We should
691a2e73f56SAlex Deucher 	 * revisit this when there is a cleaner line between
692a2e73f56SAlex Deucher 	 * the smc and the hw blocks
693a2e73f56SAlex Deucher 	 */
6945fc3aeebSyanyang1 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6955fc3aeebSyanyang1 
6965fc3aeebSyanyang1 	if (state == AMD_PG_STATE_GATE) {
697a2e73f56SAlex Deucher 		uvd_v4_2_stop(adev);
698b13aa109SRex Zhu 		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
699254cd2e0SRex Zhu 			if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
700254cd2e0SRex Zhu 				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
7013a786966SRex Zhu 				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
7023a786966SRex Zhu 							UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
7033a786966SRex Zhu 							UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
7043a786966SRex Zhu 				mdelay(20);
7053a786966SRex Zhu 			}
7063a786966SRex Zhu 		}
707a2e73f56SAlex Deucher 		return 0;
708a2e73f56SAlex Deucher 	} else {
709b13aa109SRex Zhu 		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
710254cd2e0SRex Zhu 			if (RREG32_SMC(ixCURRENT_PG_STATUS) &
711254cd2e0SRex Zhu 				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
7123a786966SRex Zhu 				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
7133a786966SRex Zhu 						UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
7143a786966SRex Zhu 						UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
7153a786966SRex Zhu 				mdelay(30);
7163a786966SRex Zhu 			}
7173a786966SRex Zhu 		}
718a2e73f56SAlex Deucher 		return uvd_v4_2_start(adev);
719a2e73f56SAlex Deucher 	}
720a2e73f56SAlex Deucher }
721a2e73f56SAlex Deucher 
722a1255107SAlex Deucher static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
72388a907d6STom St Denis 	.name = "uvd_v4_2",
724a2e73f56SAlex Deucher 	.early_init = uvd_v4_2_early_init,
725a2e73f56SAlex Deucher 	.late_init = NULL,
726a2e73f56SAlex Deucher 	.sw_init = uvd_v4_2_sw_init,
727a2e73f56SAlex Deucher 	.sw_fini = uvd_v4_2_sw_fini,
728a2e73f56SAlex Deucher 	.hw_init = uvd_v4_2_hw_init,
729a2e73f56SAlex Deucher 	.hw_fini = uvd_v4_2_hw_fini,
730a2e73f56SAlex Deucher 	.suspend = uvd_v4_2_suspend,
731a2e73f56SAlex Deucher 	.resume = uvd_v4_2_resume,
732a2e73f56SAlex Deucher 	.is_idle = uvd_v4_2_is_idle,
733a2e73f56SAlex Deucher 	.wait_for_idle = uvd_v4_2_wait_for_idle,
734a2e73f56SAlex Deucher 	.soft_reset = uvd_v4_2_soft_reset,
735a2e73f56SAlex Deucher 	.set_clockgating_state = uvd_v4_2_set_clockgating_state,
736a2e73f56SAlex Deucher 	.set_powergating_state = uvd_v4_2_set_powergating_state,
737a2e73f56SAlex Deucher };
738a2e73f56SAlex Deucher 
739a2e73f56SAlex Deucher static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
74021cd942eSChristian König 	.type = AMDGPU_RING_TYPE_UVD,
74179887142SChristian König 	.align_mask = 0xf,
742536fbf94SKen Wang 	.support_64bit_ptrs = false,
743a2e73f56SAlex Deucher 	.get_rptr = uvd_v4_2_ring_get_rptr,
744a2e73f56SAlex Deucher 	.get_wptr = uvd_v4_2_ring_get_wptr,
745a2e73f56SAlex Deucher 	.set_wptr = uvd_v4_2_ring_set_wptr,
746a2e73f56SAlex Deucher 	.parse_cs = amdgpu_uvd_ring_parse_cs,
747e12f3d7aSChristian König 	.emit_frame_size =
748e12f3d7aSChristian König 		14, /* uvd_v4_2_ring_emit_fence  x1 no user fence */
749e12f3d7aSChristian König 	.emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
750a2e73f56SAlex Deucher 	.emit_ib = uvd_v4_2_ring_emit_ib,
751a2e73f56SAlex Deucher 	.emit_fence = uvd_v4_2_ring_emit_fence,
752a2e73f56SAlex Deucher 	.test_ring = uvd_v4_2_ring_test_ring,
7538de190c9SChristian König 	.test_ib = amdgpu_uvd_ring_test_ib,
754def13903SLeo Liu 	.insert_nop = uvd_v4_2_ring_insert_nop,
7559e5d5309SChristian König 	.pad_ib = amdgpu_ring_generic_pad_ib,
756c4120d55SChristian König 	.begin_use = amdgpu_uvd_ring_begin_use,
757c4120d55SChristian König 	.end_use = amdgpu_uvd_ring_end_use,
758a2e73f56SAlex Deucher };
759a2e73f56SAlex Deucher 
760a2e73f56SAlex Deucher static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
761a2e73f56SAlex Deucher {
7622bb795f5SJames Zhu 	adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
763a2e73f56SAlex Deucher }
764a2e73f56SAlex Deucher 
765a2e73f56SAlex Deucher static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
766a2e73f56SAlex Deucher 	.set = uvd_v4_2_set_interrupt_state,
767a2e73f56SAlex Deucher 	.process = uvd_v4_2_process_interrupt,
768a2e73f56SAlex Deucher };
769a2e73f56SAlex Deucher 
770a2e73f56SAlex Deucher static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
771a2e73f56SAlex Deucher {
7722bb795f5SJames Zhu 	adev->uvd.inst->irq.num_types = 1;
7732bb795f5SJames Zhu 	adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
774a2e73f56SAlex Deucher }
775a1255107SAlex Deucher 
776a1255107SAlex Deucher const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
777a1255107SAlex Deucher {
778a1255107SAlex Deucher 		.type = AMD_IP_BLOCK_TYPE_UVD,
779a1255107SAlex Deucher 		.major = 4,
780a1255107SAlex Deucher 		.minor = 2,
781a1255107SAlex Deucher 		.rev = 0,
782a1255107SAlex Deucher 		.funcs = &uvd_v4_2_ip_funcs,
783a1255107SAlex Deucher };
784