xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c (revision 9d4fa1a1)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_vcn.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_psp.h"
32 #include "mmsch_v2_0.h"
33 
34 #include "vcn/vcn_2_0_0_offset.h"
35 #include "vcn/vcn_2_0_0_sh_mask.h"
36 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
37 
38 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET			0x1fd
39 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET			0x503
40 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET			0x504
41 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET			0x505
42 #define mmUVD_NO_OP_INTERNAL_OFFSET				0x53f
43 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET			0x54a
44 #define mmUVD_SCRATCH9_INTERNAL_OFFSET				0xc01d
45 
46 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET			0x1e1
47 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 	0x5a6
48 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET		0x5a7
49 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET			0x1e2
50 
51 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
52 static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
53 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
54 static int vcn_v2_0_set_powergating_state(void *handle,
55 				enum amd_powergating_state state);
56 static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
57 				int inst_idx, struct dpg_pause_state *new_state);
58 static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
59 /**
60  * vcn_v2_0_early_init - set function pointers
61  *
62  * @handle: amdgpu_device pointer
63  *
64  * Set ring and irq function pointers
65  */
66 static int vcn_v2_0_early_init(void *handle)
67 {
68 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
69 
70 	adev->vcn.num_vcn_inst = 1;
71 	if (amdgpu_sriov_vf(adev))
72 		adev->vcn.num_enc_rings = 1;
73 	else
74 		adev->vcn.num_enc_rings = 2;
75 
76 	vcn_v2_0_set_dec_ring_funcs(adev);
77 	vcn_v2_0_set_enc_ring_funcs(adev);
78 	vcn_v2_0_set_irq_funcs(adev);
79 
80 	return 0;
81 }
82 
83 /**
84  * vcn_v2_0_sw_init - sw init for VCN block
85  *
86  * @handle: amdgpu_device pointer
87  *
88  * Load firmware and sw initialization
89  */
90 static int vcn_v2_0_sw_init(void *handle)
91 {
92 	struct amdgpu_ring *ring;
93 	int i, r;
94 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
95 
96 	/* VCN DEC TRAP */
97 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
98 			      VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
99 			      &adev->vcn.inst->irq);
100 	if (r)
101 		return r;
102 
103 	/* VCN ENC TRAP */
104 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
105 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
106 				      i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
107 				      &adev->vcn.inst->irq);
108 		if (r)
109 			return r;
110 	}
111 
112 	r = amdgpu_vcn_sw_init(adev);
113 	if (r)
114 		return r;
115 
116 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
117 		const struct common_firmware_header *hdr;
118 		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
119 		adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
120 		adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
121 		adev->firmware.fw_size +=
122 			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
123 		DRM_INFO("PSP loading VCN firmware\n");
124 	}
125 
126 	r = amdgpu_vcn_resume(adev);
127 	if (r)
128 		return r;
129 
130 	ring = &adev->vcn.inst->ring_dec;
131 
132 	ring->use_doorbell = true;
133 	ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
134 
135 	sprintf(ring->name, "vcn_dec");
136 	r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
137 	if (r)
138 		return r;
139 
140 	adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
141 	adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
142 	adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
143 	adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
144 	adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
145 	adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
146 
147 	adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
148 	adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
149 	adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
150 	adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
151 	adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
152 	adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
153 	adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
154 	adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
155 	adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
156 	adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
157 
158 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
159 		ring = &adev->vcn.inst->ring_enc[i];
160 		ring->use_doorbell = true;
161 		if (!amdgpu_sriov_vf(adev))
162 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
163 		else
164 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
165 		sprintf(ring->name, "vcn_enc%d", i);
166 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
167 		if (r)
168 			return r;
169 	}
170 
171 	adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
172 
173 	r = amdgpu_virt_alloc_mm_table(adev);
174 	if (r)
175 		return r;
176 
177 	return 0;
178 }
179 
180 /**
181  * vcn_v2_0_sw_fini - sw fini for VCN block
182  *
183  * @handle: amdgpu_device pointer
184  *
185  * VCN suspend and free up sw allocation
186  */
187 static int vcn_v2_0_sw_fini(void *handle)
188 {
189 	int r;
190 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
191 
192 	amdgpu_virt_free_mm_table(adev);
193 
194 	r = amdgpu_vcn_suspend(adev);
195 	if (r)
196 		return r;
197 
198 	r = amdgpu_vcn_sw_fini(adev);
199 
200 	return r;
201 }
202 
203 /**
204  * vcn_v2_0_hw_init - start and test VCN block
205  *
206  * @handle: amdgpu_device pointer
207  *
208  * Initialize the hardware, boot up the VCPU and do some testing
209  */
210 static int vcn_v2_0_hw_init(void *handle)
211 {
212 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
213 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
214 	int i, r;
215 
216 	adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
217 					     ring->doorbell_index, 0);
218 
219 	if (amdgpu_sriov_vf(adev))
220 		vcn_v2_0_start_sriov(adev);
221 
222 	r = amdgpu_ring_test_helper(ring);
223 	if (r)
224 		goto done;
225 
226 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
227 		ring = &adev->vcn.inst->ring_enc[i];
228 		r = amdgpu_ring_test_helper(ring);
229 		if (r)
230 			goto done;
231 	}
232 
233 done:
234 	if (!r)
235 		DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
236 			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
237 
238 	return r;
239 }
240 
241 /**
242  * vcn_v2_0_hw_fini - stop the hardware block
243  *
244  * @handle: amdgpu_device pointer
245  *
246  * Stop the VCN block, mark ring as not ready any more
247  */
248 static int vcn_v2_0_hw_fini(void *handle)
249 {
250 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
251 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
252 	int i;
253 
254 	if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
255 	    (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
256 	      RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
257 		vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
258 
259 	ring->sched.ready = false;
260 
261 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
262 		ring = &adev->vcn.inst->ring_enc[i];
263 		ring->sched.ready = false;
264 	}
265 
266 	return 0;
267 }
268 
269 /**
270  * vcn_v2_0_suspend - suspend VCN block
271  *
272  * @handle: amdgpu_device pointer
273  *
274  * HW fini and suspend VCN block
275  */
276 static int vcn_v2_0_suspend(void *handle)
277 {
278 	int r;
279 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
280 
281 	r = vcn_v2_0_hw_fini(adev);
282 	if (r)
283 		return r;
284 
285 	r = amdgpu_vcn_suspend(adev);
286 
287 	return r;
288 }
289 
290 /**
291  * vcn_v2_0_resume - resume VCN block
292  *
293  * @handle: amdgpu_device pointer
294  *
295  * Resume firmware and hw init VCN block
296  */
297 static int vcn_v2_0_resume(void *handle)
298 {
299 	int r;
300 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
301 
302 	r = amdgpu_vcn_resume(adev);
303 	if (r)
304 		return r;
305 
306 	r = vcn_v2_0_hw_init(adev);
307 
308 	return r;
309 }
310 
311 /**
312  * vcn_v2_0_mc_resume - memory controller programming
313  *
314  * @adev: amdgpu_device pointer
315  *
316  * Let the VCN memory controller know it's offsets
317  */
318 static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
319 {
320 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
321 	uint32_t offset;
322 
323 	if (amdgpu_sriov_vf(adev))
324 		return;
325 
326 	/* cache window 0: fw */
327 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
328 		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
329 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
330 		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
331 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
332 		WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
333 		offset = 0;
334 	} else {
335 		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
336 			lower_32_bits(adev->vcn.inst->gpu_addr));
337 		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
338 			upper_32_bits(adev->vcn.inst->gpu_addr));
339 		offset = size;
340 		WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
341 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
342 	}
343 
344 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
345 
346 	/* cache window 1: stack */
347 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
348 		lower_32_bits(adev->vcn.inst->gpu_addr + offset));
349 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
350 		upper_32_bits(adev->vcn.inst->gpu_addr + offset));
351 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
352 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
353 
354 	/* cache window 2: context */
355 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
356 		lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
357 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
358 		upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
359 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
360 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
361 
362 	WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
363 }
364 
365 static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
366 {
367 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
368 	uint32_t offset;
369 
370 	/* cache window 0: fw */
371 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
372 		if (!indirect) {
373 			WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
374 				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
375 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
376 			WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
377 				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
378 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
379 			WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
380 				UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
381 		} else {
382 			WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
383 				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
384 			WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
385 				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
386 			WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
387 				UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
388 		}
389 		offset = 0;
390 	} else {
391 		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
392 			UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
393 			lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
394 		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
395 			UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
396 			upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
397 		offset = size;
398 		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
399 			UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
400 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
401 	}
402 
403 	if (!indirect)
404 		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
405 			UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
406 	else
407 		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
408 			UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
409 
410 	/* cache window 1: stack */
411 	if (!indirect) {
412 		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
413 			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
414 			lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
415 		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
416 			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
417 			upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
418 		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
419 			UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
420 	} else {
421 		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
422 			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
423 		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
424 			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
425 		WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
426 			UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
427 	}
428 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
429 		UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
430 
431 	/* cache window 2: context */
432 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
433 		UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
434 		lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
435 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
436 		UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
437 		upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
438 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
439 		UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
440 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
441 		UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
442 
443 	/* non-cache window */
444 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
445 		UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
446 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
447 		UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
448 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
449 		UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
450 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
451 		UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
452 
453 	/* VCN global tiling registers */
454 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
455 		UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
456 }
457 
458 /**
459  * vcn_v2_0_disable_clock_gating - disable VCN clock gating
460  *
461  * @adev: amdgpu_device pointer
462  * @sw: enable SW clock gating
463  *
464  * Disable clock gating for VCN block
465  */
466 static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
467 {
468 	uint32_t data;
469 
470 	if (amdgpu_sriov_vf(adev))
471 		return;
472 
473 	/* UVD disable CGC */
474 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
475 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
476 		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
477 	else
478 		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
479 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
480 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
481 	WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
482 
483 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
484 	data &= ~(UVD_CGC_GATE__SYS_MASK
485 		| UVD_CGC_GATE__UDEC_MASK
486 		| UVD_CGC_GATE__MPEG2_MASK
487 		| UVD_CGC_GATE__REGS_MASK
488 		| UVD_CGC_GATE__RBC_MASK
489 		| UVD_CGC_GATE__LMI_MC_MASK
490 		| UVD_CGC_GATE__LMI_UMC_MASK
491 		| UVD_CGC_GATE__IDCT_MASK
492 		| UVD_CGC_GATE__MPRD_MASK
493 		| UVD_CGC_GATE__MPC_MASK
494 		| UVD_CGC_GATE__LBSI_MASK
495 		| UVD_CGC_GATE__LRBBM_MASK
496 		| UVD_CGC_GATE__UDEC_RE_MASK
497 		| UVD_CGC_GATE__UDEC_CM_MASK
498 		| UVD_CGC_GATE__UDEC_IT_MASK
499 		| UVD_CGC_GATE__UDEC_DB_MASK
500 		| UVD_CGC_GATE__UDEC_MP_MASK
501 		| UVD_CGC_GATE__WCB_MASK
502 		| UVD_CGC_GATE__VCPU_MASK
503 		| UVD_CGC_GATE__SCPU_MASK);
504 	WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
505 
506 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
507 	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
508 		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
509 		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
510 		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
511 		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
512 		| UVD_CGC_CTRL__SYS_MODE_MASK
513 		| UVD_CGC_CTRL__UDEC_MODE_MASK
514 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
515 		| UVD_CGC_CTRL__REGS_MODE_MASK
516 		| UVD_CGC_CTRL__RBC_MODE_MASK
517 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
518 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
519 		| UVD_CGC_CTRL__IDCT_MODE_MASK
520 		| UVD_CGC_CTRL__MPRD_MODE_MASK
521 		| UVD_CGC_CTRL__MPC_MODE_MASK
522 		| UVD_CGC_CTRL__LBSI_MODE_MASK
523 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
524 		| UVD_CGC_CTRL__WCB_MODE_MASK
525 		| UVD_CGC_CTRL__VCPU_MODE_MASK
526 		| UVD_CGC_CTRL__SCPU_MODE_MASK);
527 	WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
528 
529 	/* turn on */
530 	data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
531 	data |= (UVD_SUVD_CGC_GATE__SRE_MASK
532 		| UVD_SUVD_CGC_GATE__SIT_MASK
533 		| UVD_SUVD_CGC_GATE__SMP_MASK
534 		| UVD_SUVD_CGC_GATE__SCM_MASK
535 		| UVD_SUVD_CGC_GATE__SDB_MASK
536 		| UVD_SUVD_CGC_GATE__SRE_H264_MASK
537 		| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
538 		| UVD_SUVD_CGC_GATE__SIT_H264_MASK
539 		| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
540 		| UVD_SUVD_CGC_GATE__SCM_H264_MASK
541 		| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
542 		| UVD_SUVD_CGC_GATE__SDB_H264_MASK
543 		| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
544 		| UVD_SUVD_CGC_GATE__SCLR_MASK
545 		| UVD_SUVD_CGC_GATE__UVD_SC_MASK
546 		| UVD_SUVD_CGC_GATE__ENT_MASK
547 		| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
548 		| UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
549 		| UVD_SUVD_CGC_GATE__SITE_MASK
550 		| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
551 		| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
552 		| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
553 		| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
554 		| UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
555 	WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
556 
557 	data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
558 	data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
559 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
560 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
561 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
562 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
563 		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
564 		| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
565 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
566 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
567 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
568 	WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
569 }
570 
571 static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
572 		uint8_t sram_sel, uint8_t indirect)
573 {
574 	uint32_t reg_data = 0;
575 
576 	/* enable sw clock gating control */
577 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
578 		reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
579 	else
580 		reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
581 	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
582 	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
583 	reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
584 		 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
585 		 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
586 		 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
587 		 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
588 		 UVD_CGC_CTRL__SYS_MODE_MASK |
589 		 UVD_CGC_CTRL__UDEC_MODE_MASK |
590 		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
591 		 UVD_CGC_CTRL__REGS_MODE_MASK |
592 		 UVD_CGC_CTRL__RBC_MODE_MASK |
593 		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
594 		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
595 		 UVD_CGC_CTRL__IDCT_MODE_MASK |
596 		 UVD_CGC_CTRL__MPRD_MODE_MASK |
597 		 UVD_CGC_CTRL__MPC_MODE_MASK |
598 		 UVD_CGC_CTRL__LBSI_MODE_MASK |
599 		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
600 		 UVD_CGC_CTRL__WCB_MODE_MASK |
601 		 UVD_CGC_CTRL__VCPU_MODE_MASK |
602 		 UVD_CGC_CTRL__SCPU_MODE_MASK);
603 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
604 		UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
605 
606 	/* turn off clock gating */
607 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
608 		UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
609 
610 	/* turn on SUVD clock gating */
611 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
612 		UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
613 
614 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
615 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
616 		UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
617 }
618 
619 /**
620  * vcn_v2_0_enable_clock_gating - enable VCN clock gating
621  *
622  * @adev: amdgpu_device pointer
623  * @sw: enable SW clock gating
624  *
625  * Enable clock gating for VCN block
626  */
627 static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
628 {
629 	uint32_t data = 0;
630 
631 	if (amdgpu_sriov_vf(adev))
632 		return;
633 
634 	/* enable UVD CGC */
635 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
636 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
637 		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
638 	else
639 		data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
640 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
641 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
642 	WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
643 
644 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
645 	data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
646 		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
647 		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
648 		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
649 		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
650 		| UVD_CGC_CTRL__SYS_MODE_MASK
651 		| UVD_CGC_CTRL__UDEC_MODE_MASK
652 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
653 		| UVD_CGC_CTRL__REGS_MODE_MASK
654 		| UVD_CGC_CTRL__RBC_MODE_MASK
655 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
656 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
657 		| UVD_CGC_CTRL__IDCT_MODE_MASK
658 		| UVD_CGC_CTRL__MPRD_MODE_MASK
659 		| UVD_CGC_CTRL__MPC_MODE_MASK
660 		| UVD_CGC_CTRL__LBSI_MODE_MASK
661 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
662 		| UVD_CGC_CTRL__WCB_MODE_MASK
663 		| UVD_CGC_CTRL__VCPU_MODE_MASK
664 		| UVD_CGC_CTRL__SCPU_MODE_MASK);
665 	WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
666 
667 	data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
668 	data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
669 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
670 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
671 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
672 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
673 		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
674 		| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
675 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
676 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
677 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
678 	WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
679 }
680 
681 static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
682 {
683 	uint32_t data = 0;
684 	int ret;
685 
686 	if (amdgpu_sriov_vf(adev))
687 		return;
688 
689 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
690 		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
691 			| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
692 			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
693 			| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
694 			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
695 			| 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
696 			| 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
697 			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
698 			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
699 			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
700 
701 		WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
702 		SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
703 			UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF, ret);
704 	} else {
705 		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
706 			| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
707 			| 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
708 			| 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
709 			| 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
710 			| 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
711 			| 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
712 			| 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
713 			| 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
714 			| 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
715 		WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
716 		SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0,  0xFFFFF, ret);
717 	}
718 
719 	/* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS,
720 	 * UVDU_PWR_STATUS are 0 (power on) */
721 
722 	data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
723 	data &= ~0x103;
724 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
725 		data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
726 			UVD_POWER_STATUS__UVD_PG_EN_MASK;
727 
728 	WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
729 }
730 
731 static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
732 {
733 	uint32_t data = 0;
734 	int ret;
735 
736 	if (amdgpu_sriov_vf(adev))
737 		return;
738 
739 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
740 		/* Before power off, this indicator has to be turned on */
741 		data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
742 		data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
743 		data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
744 		WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
745 
746 
747 		data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
748 			| 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
749 			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
750 			| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
751 			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
752 			| 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
753 			| 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
754 			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
755 			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
756 			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
757 
758 		WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
759 
760 		data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
761 			| 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
762 			| 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
763 			| 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
764 			| 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
765 			| 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
766 			| 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
767 			| 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
768 			| 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
769 			| 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT);
770 		SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF, ret);
771 	}
772 }
773 
774 static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
775 {
776 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
777 	uint32_t rb_bufsz, tmp;
778 
779 	vcn_v2_0_enable_static_power_gating(adev);
780 
781 	/* enable dynamic power gating mode */
782 	tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
783 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
784 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
785 	WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
786 
787 	if (indirect)
788 		adev->vcn.inst->dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst->dpg_sram_cpu_addr;
789 
790 	/* enable clock gating */
791 	vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
792 
793 	/* enable VCPU clock */
794 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
795 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
796 	tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
797 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
798 		UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
799 
800 	/* disable master interupt */
801 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
802 		UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
803 
804 	/* setup mmUVD_LMI_CTRL */
805 	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
806 		UVD_LMI_CTRL__REQ_MODE_MASK |
807 		UVD_LMI_CTRL__CRC_RESET_MASK |
808 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
809 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
810 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
811 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
812 		0x00100000L);
813 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
814 		UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
815 
816 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
817 		UVD, 0, mmUVD_MPC_CNTL),
818 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
819 
820 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
821 		UVD, 0, mmUVD_MPC_SET_MUXA0),
822 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
823 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
824 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
825 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
826 
827 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
828 		UVD, 0, mmUVD_MPC_SET_MUXB0),
829 		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
830 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
831 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
832 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
833 
834 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
835 		UVD, 0, mmUVD_MPC_SET_MUX),
836 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
837 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
838 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
839 
840 	vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
841 
842 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
843 		UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
844 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
845 		UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
846 
847 	/* release VCPU reset to boot */
848 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
849 		UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
850 
851 	/* enable LMI MC and UMC channels */
852 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
853 		UVD, 0, mmUVD_LMI_CTRL2),
854 		0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
855 
856 	/* enable master interrupt */
857 	WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
858 		UVD, 0, mmUVD_MASTINT_EN),
859 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
860 
861 	if (indirect)
862 		psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr,
863 				    (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr -
864 					       (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr));
865 
866 	/* force RBC into idle state */
867 	rb_bufsz = order_base_2(ring->ring_size);
868 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
869 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
870 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
871 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
872 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
873 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
874 
875 	/* set the write pointer delay */
876 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
877 
878 	/* set the wb address */
879 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
880 		(upper_32_bits(ring->gpu_addr) >> 2));
881 
882 	/* programm the RB_BASE for ring buffer */
883 	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
884 		lower_32_bits(ring->gpu_addr));
885 	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
886 		upper_32_bits(ring->gpu_addr));
887 
888 	/* Initialize the ring buffer's read and write pointers */
889 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
890 
891 	WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
892 
893 	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
894 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
895 		lower_32_bits(ring->wptr));
896 
897 	return 0;
898 }
899 
900 static int vcn_v2_0_start(struct amdgpu_device *adev)
901 {
902 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
903 	uint32_t rb_bufsz, tmp;
904 	uint32_t lmi_swap_cntl;
905 	int i, j, r;
906 
907 	if (adev->pm.dpm_enabled)
908 		amdgpu_dpm_enable_uvd(adev, true);
909 
910 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
911 		return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
912 
913 	vcn_v2_0_disable_static_power_gating(adev);
914 
915 	/* set uvd status busy */
916 	tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
917 	WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
918 
919 	/*SW clock gating */
920 	vcn_v2_0_disable_clock_gating(adev);
921 
922 	/* enable VCPU clock */
923 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
924 		UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
925 
926 	/* disable master interrupt */
927 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
928 		~UVD_MASTINT_EN__VCPU_EN_MASK);
929 
930 	/* setup mmUVD_LMI_CTRL */
931 	tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
932 	WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
933 		UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK	|
934 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
935 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
936 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
937 
938 	/* setup mmUVD_MPC_CNTL */
939 	tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
940 	tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
941 	tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
942 	WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp);
943 
944 	/* setup UVD_MPC_SET_MUXA0 */
945 	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
946 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
947 		(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
948 		(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
949 		(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
950 
951 	/* setup UVD_MPC_SET_MUXB0 */
952 	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
953 		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
954 		(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
955 		(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
956 		(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
957 
958 	/* setup mmUVD_MPC_SET_MUX */
959 	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
960 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
961 		(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
962 		(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
963 
964 	vcn_v2_0_mc_resume(adev);
965 
966 	/* release VCPU reset to boot */
967 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
968 		~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
969 
970 	/* enable LMI MC and UMC channels */
971 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
972 		~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
973 
974 	tmp = RREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET);
975 	tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
976 	tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
977 	WREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET, tmp);
978 
979 	/* disable byte swapping */
980 	lmi_swap_cntl = 0;
981 #ifdef __BIG_ENDIAN
982 	/* swap (8 in 32) RB and IB */
983 	lmi_swap_cntl = 0xa;
984 #endif
985 	WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
986 
987 	for (i = 0; i < 10; ++i) {
988 		uint32_t status;
989 
990 		for (j = 0; j < 100; ++j) {
991 			status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
992 			if (status & 2)
993 				break;
994 			mdelay(10);
995 		}
996 		r = 0;
997 		if (status & 2)
998 			break;
999 
1000 		DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1001 		WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1002 			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1003 			~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1004 		mdelay(10);
1005 		WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
1006 			~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1007 		mdelay(10);
1008 		r = -1;
1009 	}
1010 
1011 	if (r) {
1012 		DRM_ERROR("VCN decode not responding, giving up!!!\n");
1013 		return r;
1014 	}
1015 
1016 	/* enable master interrupt */
1017 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
1018 		UVD_MASTINT_EN__VCPU_EN_MASK,
1019 		~UVD_MASTINT_EN__VCPU_EN_MASK);
1020 
1021 	/* clear the busy bit of VCN_STATUS */
1022 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
1023 		~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1024 
1025 	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0);
1026 
1027 	/* force RBC into idle state */
1028 	rb_bufsz = order_base_2(ring->ring_size);
1029 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1030 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1031 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1032 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1033 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1034 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1035 
1036 	/* programm the RB_BASE for ring buffer */
1037 	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1038 		lower_32_bits(ring->gpu_addr));
1039 	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1040 		upper_32_bits(ring->gpu_addr));
1041 
1042 	/* Initialize the ring buffer's read and write pointers */
1043 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1044 
1045 	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1046 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1047 			lower_32_bits(ring->wptr));
1048 
1049 	ring = &adev->vcn.inst->ring_enc[0];
1050 	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1051 	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1052 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1053 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1054 	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1055 
1056 	ring = &adev->vcn.inst->ring_enc[1];
1057 	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1058 	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1059 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1060 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1061 	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1062 
1063 	return 0;
1064 }
1065 
1066 static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
1067 {
1068 	int ret_code = 0;
1069 	uint32_t tmp;
1070 
1071 	/* Wait for power status to be 1 */
1072 	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1073 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1074 
1075 	/* wait for read ptr to be equal to write ptr */
1076 	tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1077 	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1078 
1079 	tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1080 	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
1081 
1082 	tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1083 	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1084 
1085 	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1086 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1087 
1088 	/* disable dynamic power gating mode */
1089 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1090 			~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1091 
1092 	return 0;
1093 }
1094 
1095 static int vcn_v2_0_stop(struct amdgpu_device *adev)
1096 {
1097 	uint32_t tmp;
1098 	int r;
1099 
1100 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1101 		r = vcn_v2_0_stop_dpg_mode(adev);
1102 		if (r)
1103 			return r;
1104 		goto power_off;
1105 	}
1106 
1107 	/* wait for uvd idle */
1108 	SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
1109 	if (r)
1110 		return r;
1111 
1112 	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1113 		UVD_LMI_STATUS__READ_CLEAN_MASK |
1114 		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1115 		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1116 	SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r);
1117 	if (r)
1118 		return r;
1119 
1120 	/* stall UMC channel */
1121 	tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2);
1122 	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1123 	WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp);
1124 
1125 	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1126 		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1127 	SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r);
1128 	if (r)
1129 		return r;
1130 
1131 	/* disable VCPU clock */
1132 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1133 		~(UVD_VCPU_CNTL__CLK_EN_MASK));
1134 
1135 	/* reset LMI UMC */
1136 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1137 		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1138 		~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1139 
1140 	/* reset LMI */
1141 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1142 		UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1143 		~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1144 
1145 	/* reset VCPU */
1146 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1147 		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1148 		~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1149 
1150 	/* clear status */
1151 	WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
1152 
1153 	vcn_v2_0_enable_clock_gating(adev);
1154 	vcn_v2_0_enable_static_power_gating(adev);
1155 
1156 power_off:
1157 	if (adev->pm.dpm_enabled)
1158 		amdgpu_dpm_enable_uvd(adev, false);
1159 
1160 	return 0;
1161 }
1162 
1163 static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
1164 				int inst_idx, struct dpg_pause_state *new_state)
1165 {
1166 	struct amdgpu_ring *ring;
1167 	uint32_t reg_data = 0;
1168 	int ret_code;
1169 
1170 	/* pause/unpause if state is changed */
1171 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1172 		DRM_DEBUG("dpg pause state changed %d -> %d",
1173 			adev->vcn.inst[inst_idx].pause_state.fw_based,	new_state->fw_based);
1174 		reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1175 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1176 
1177 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1178 			ret_code = 0;
1179 			SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
1180 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1181 
1182 			if (!ret_code) {
1183 				/* pause DPG */
1184 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1185 				WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1186 
1187 				/* wait for ACK */
1188 				SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1189 					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1190 					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
1191 
1192 				/* Restore */
1193 				ring = &adev->vcn.inst->ring_enc[0];
1194 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1195 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1196 				WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1197 				WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1198 				WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1199 
1200 				ring = &adev->vcn.inst->ring_enc[1];
1201 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1202 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1203 				WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1204 				WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1205 				WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1206 
1207 				WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1208 					   RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1209 
1210 				SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1211 					   UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1212 					   UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1213 			}
1214 		} else {
1215 			/* unpause dpg, no need to wait */
1216 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1217 			WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1218 		}
1219 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1220 	}
1221 
1222 	return 0;
1223 }
1224 
1225 static bool vcn_v2_0_is_idle(void *handle)
1226 {
1227 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1228 
1229 	return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1230 }
1231 
1232 static int vcn_v2_0_wait_for_idle(void *handle)
1233 {
1234 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1235 	int ret = 0;
1236 
1237 	SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1238 		UVD_STATUS__IDLE, ret);
1239 
1240 	return ret;
1241 }
1242 
1243 static int vcn_v2_0_set_clockgating_state(void *handle,
1244 					  enum amd_clockgating_state state)
1245 {
1246 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1247 	bool enable = (state == AMD_CG_STATE_GATE);
1248 
1249 	if (amdgpu_sriov_vf(adev))
1250 		return 0;
1251 
1252 	if (enable) {
1253 		/* wait for STATUS to clear */
1254 		if (!vcn_v2_0_is_idle(handle))
1255 			return -EBUSY;
1256 		vcn_v2_0_enable_clock_gating(adev);
1257 	} else {
1258 		/* disable HW gating and enable Sw gating */
1259 		vcn_v2_0_disable_clock_gating(adev);
1260 	}
1261 	return 0;
1262 }
1263 
1264 /**
1265  * vcn_v2_0_dec_ring_get_rptr - get read pointer
1266  *
1267  * @ring: amdgpu_ring pointer
1268  *
1269  * Returns the current hardware read pointer
1270  */
1271 static uint64_t vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1272 {
1273 	struct amdgpu_device *adev = ring->adev;
1274 
1275 	return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1276 }
1277 
1278 /**
1279  * vcn_v2_0_dec_ring_get_wptr - get write pointer
1280  *
1281  * @ring: amdgpu_ring pointer
1282  *
1283  * Returns the current hardware write pointer
1284  */
1285 static uint64_t vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1286 {
1287 	struct amdgpu_device *adev = ring->adev;
1288 
1289 	if (ring->use_doorbell)
1290 		return adev->wb.wb[ring->wptr_offs];
1291 	else
1292 		return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1293 }
1294 
1295 /**
1296  * vcn_v2_0_dec_ring_set_wptr - set write pointer
1297  *
1298  * @ring: amdgpu_ring pointer
1299  *
1300  * Commits the write pointer to the hardware
1301  */
1302 static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1303 {
1304 	struct amdgpu_device *adev = ring->adev;
1305 
1306 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1307 		WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1308 			lower_32_bits(ring->wptr) | 0x80000000);
1309 
1310 	if (ring->use_doorbell) {
1311 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1312 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1313 	} else {
1314 		WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1315 	}
1316 }
1317 
1318 /**
1319  * vcn_v2_0_dec_ring_insert_start - insert a start command
1320  *
1321  * @ring: amdgpu_ring pointer
1322  *
1323  * Write a start command to the ring.
1324  */
1325 void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1326 {
1327 	struct amdgpu_device *adev = ring->adev;
1328 
1329 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1330 	amdgpu_ring_write(ring, 0);
1331 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1332 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1333 }
1334 
1335 /**
1336  * vcn_v2_0_dec_ring_insert_end - insert a end command
1337  *
1338  * @ring: amdgpu_ring pointer
1339  *
1340  * Write a end command to the ring.
1341  */
1342 void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1343 {
1344 	struct amdgpu_device *adev = ring->adev;
1345 
1346 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1347 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
1348 }
1349 
1350 /**
1351  * vcn_v2_0_dec_ring_insert_nop - insert a nop command
1352  *
1353  * @ring: amdgpu_ring pointer
1354  *
1355  * Write a nop command to the ring.
1356  */
1357 void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1358 {
1359 	struct amdgpu_device *adev = ring->adev;
1360 	int i;
1361 
1362 	WARN_ON(ring->wptr % 2 || count % 2);
1363 
1364 	for (i = 0; i < count / 2; i++) {
1365 		amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0));
1366 		amdgpu_ring_write(ring, 0);
1367 	}
1368 }
1369 
1370 /**
1371  * vcn_v2_0_dec_ring_emit_fence - emit an fence & trap command
1372  *
1373  * @ring: amdgpu_ring pointer
1374  * @fence: fence to emit
1375  *
1376  * Write a fence and a trap command to the ring.
1377  */
1378 void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1379 				unsigned flags)
1380 {
1381 	struct amdgpu_device *adev = ring->adev;
1382 
1383 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1384 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0));
1385 	amdgpu_ring_write(ring, seq);
1386 
1387 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1388 	amdgpu_ring_write(ring, addr & 0xffffffff);
1389 
1390 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1391 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1392 
1393 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1394 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
1395 
1396 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1397 	amdgpu_ring_write(ring, 0);
1398 
1399 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1400 	amdgpu_ring_write(ring, 0);
1401 
1402 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1403 
1404 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
1405 }
1406 
1407 /**
1408  * vcn_v2_0_dec_ring_emit_ib - execute indirect buffer
1409  *
1410  * @ring: amdgpu_ring pointer
1411  * @ib: indirect buffer to execute
1412  *
1413  * Write ring commands to execute the indirect buffer
1414  */
1415 void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1416 			       struct amdgpu_job *job,
1417 			       struct amdgpu_ib *ib,
1418 			       uint32_t flags)
1419 {
1420 	struct amdgpu_device *adev = ring->adev;
1421 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1422 
1423 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0));
1424 	amdgpu_ring_write(ring, vmid);
1425 
1426 	amdgpu_ring_write(ring,	PACKET0(adev->vcn.internal.ib_bar_low, 0));
1427 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1428 	amdgpu_ring_write(ring,	PACKET0(adev->vcn.internal.ib_bar_high, 0));
1429 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1430 	amdgpu_ring_write(ring,	PACKET0(adev->vcn.internal.ib_size, 0));
1431 	amdgpu_ring_write(ring, ib->length_dw);
1432 }
1433 
1434 void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1435 				uint32_t val, uint32_t mask)
1436 {
1437 	struct amdgpu_device *adev = ring->adev;
1438 
1439 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1440 	amdgpu_ring_write(ring, reg << 2);
1441 
1442 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1443 	amdgpu_ring_write(ring, val);
1444 
1445 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0));
1446 	amdgpu_ring_write(ring, mask);
1447 
1448 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1449 
1450 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
1451 }
1452 
1453 void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1454 				unsigned vmid, uint64_t pd_addr)
1455 {
1456 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1457 	uint32_t data0, data1, mask;
1458 
1459 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1460 
1461 	/* wait for register write */
1462 	data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1463 	data1 = lower_32_bits(pd_addr);
1464 	mask = 0xffffffff;
1465 	vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1466 }
1467 
1468 void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1469 				uint32_t reg, uint32_t val)
1470 {
1471 	struct amdgpu_device *adev = ring->adev;
1472 
1473 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1474 	amdgpu_ring_write(ring, reg << 2);
1475 
1476 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1477 	amdgpu_ring_write(ring, val);
1478 
1479 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1480 
1481 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
1482 }
1483 
1484 /**
1485  * vcn_v2_0_enc_ring_get_rptr - get enc read pointer
1486  *
1487  * @ring: amdgpu_ring pointer
1488  *
1489  * Returns the current hardware enc read pointer
1490  */
1491 static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1492 {
1493 	struct amdgpu_device *adev = ring->adev;
1494 
1495 	if (ring == &adev->vcn.inst->ring_enc[0])
1496 		return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1497 	else
1498 		return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1499 }
1500 
1501  /**
1502  * vcn_v2_0_enc_ring_get_wptr - get enc write pointer
1503  *
1504  * @ring: amdgpu_ring pointer
1505  *
1506  * Returns the current hardware enc write pointer
1507  */
1508 static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1509 {
1510 	struct amdgpu_device *adev = ring->adev;
1511 
1512 	if (ring == &adev->vcn.inst->ring_enc[0]) {
1513 		if (ring->use_doorbell)
1514 			return adev->wb.wb[ring->wptr_offs];
1515 		else
1516 			return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1517 	} else {
1518 		if (ring->use_doorbell)
1519 			return adev->wb.wb[ring->wptr_offs];
1520 		else
1521 			return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1522 	}
1523 }
1524 
1525  /**
1526  * vcn_v2_0_enc_ring_set_wptr - set enc write pointer
1527  *
1528  * @ring: amdgpu_ring pointer
1529  *
1530  * Commits the enc write pointer to the hardware
1531  */
1532 static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1533 {
1534 	struct amdgpu_device *adev = ring->adev;
1535 
1536 	if (ring == &adev->vcn.inst->ring_enc[0]) {
1537 		if (ring->use_doorbell) {
1538 			adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1539 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1540 		} else {
1541 			WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1542 		}
1543 	} else {
1544 		if (ring->use_doorbell) {
1545 			adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1546 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1547 		} else {
1548 			WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1549 		}
1550 	}
1551 }
1552 
1553 /**
1554  * vcn_v2_0_enc_ring_emit_fence - emit an enc fence & trap command
1555  *
1556  * @ring: amdgpu_ring pointer
1557  * @fence: fence to emit
1558  *
1559  * Write enc a fence and a trap command to the ring.
1560  */
1561 void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1562 				u64 seq, unsigned flags)
1563 {
1564 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1565 
1566 	amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1567 	amdgpu_ring_write(ring, addr);
1568 	amdgpu_ring_write(ring, upper_32_bits(addr));
1569 	amdgpu_ring_write(ring, seq);
1570 	amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1571 }
1572 
1573 void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1574 {
1575 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1576 }
1577 
1578 /**
1579  * vcn_v2_0_enc_ring_emit_ib - enc execute indirect buffer
1580  *
1581  * @ring: amdgpu_ring pointer
1582  * @ib: indirect buffer to execute
1583  *
1584  * Write enc ring commands to execute the indirect buffer
1585  */
1586 void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1587 			       struct amdgpu_job *job,
1588 			       struct amdgpu_ib *ib,
1589 			       uint32_t flags)
1590 {
1591 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1592 
1593 	amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1594 	amdgpu_ring_write(ring, vmid);
1595 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1596 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1597 	amdgpu_ring_write(ring, ib->length_dw);
1598 }
1599 
1600 void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1601 				uint32_t val, uint32_t mask)
1602 {
1603 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1604 	amdgpu_ring_write(ring, reg << 2);
1605 	amdgpu_ring_write(ring, mask);
1606 	amdgpu_ring_write(ring, val);
1607 }
1608 
1609 void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1610 				unsigned int vmid, uint64_t pd_addr)
1611 {
1612 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1613 
1614 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1615 
1616 	/* wait for reg writes */
1617 	vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1618 					lower_32_bits(pd_addr), 0xffffffff);
1619 }
1620 
1621 void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
1622 {
1623 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1624 	amdgpu_ring_write(ring,	reg << 2);
1625 	amdgpu_ring_write(ring, val);
1626 }
1627 
1628 static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
1629 					struct amdgpu_irq_src *source,
1630 					unsigned type,
1631 					enum amdgpu_interrupt_state state)
1632 {
1633 	return 0;
1634 }
1635 
1636 static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
1637 				      struct amdgpu_irq_src *source,
1638 				      struct amdgpu_iv_entry *entry)
1639 {
1640 	DRM_DEBUG("IH: VCN TRAP\n");
1641 
1642 	switch (entry->src_id) {
1643 	case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1644 		amdgpu_fence_process(&adev->vcn.inst->ring_dec);
1645 		break;
1646 	case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1647 		amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
1648 		break;
1649 	case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1650 		amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
1651 		break;
1652 	default:
1653 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1654 			  entry->src_id, entry->src_data[0]);
1655 		break;
1656 	}
1657 
1658 	return 0;
1659 }
1660 
1661 int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
1662 {
1663 	struct amdgpu_device *adev = ring->adev;
1664 	uint32_t tmp = 0;
1665 	unsigned i;
1666 	int r;
1667 
1668 	if (amdgpu_sriov_vf(adev))
1669 		return 0;
1670 
1671 	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
1672 	r = amdgpu_ring_alloc(ring, 4);
1673 	if (r)
1674 		return r;
1675 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1676 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1677 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
1678 	amdgpu_ring_write(ring, 0xDEADBEEF);
1679 	amdgpu_ring_commit(ring);
1680 	for (i = 0; i < adev->usec_timeout; i++) {
1681 		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
1682 		if (tmp == 0xDEADBEEF)
1683 			break;
1684 		udelay(1);
1685 	}
1686 
1687 	if (i >= adev->usec_timeout)
1688 		r = -ETIMEDOUT;
1689 
1690 	return r;
1691 }
1692 
1693 
1694 static int vcn_v2_0_set_powergating_state(void *handle,
1695 					  enum amd_powergating_state state)
1696 {
1697 	/* This doesn't actually powergate the VCN block.
1698 	 * That's done in the dpm code via the SMC.  This
1699 	 * just re-inits the block as necessary.  The actual
1700 	 * gating still happens in the dpm code.  We should
1701 	 * revisit this when there is a cleaner line between
1702 	 * the smc and the hw blocks
1703 	 */
1704 	int ret;
1705 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1706 
1707 	if (amdgpu_sriov_vf(adev)) {
1708 		adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
1709 		return 0;
1710 	}
1711 
1712 	if (state == adev->vcn.cur_state)
1713 		return 0;
1714 
1715 	if (state == AMD_PG_STATE_GATE)
1716 		ret = vcn_v2_0_stop(adev);
1717 	else
1718 		ret = vcn_v2_0_start(adev);
1719 
1720 	if (!ret)
1721 		adev->vcn.cur_state = state;
1722 	return ret;
1723 }
1724 
1725 static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
1726 				struct amdgpu_mm_table *table)
1727 {
1728 	uint32_t data = 0, loop;
1729 	uint64_t addr = table->gpu_addr;
1730 	struct mmsch_v2_0_init_header *header;
1731 	uint32_t size;
1732 	int i;
1733 
1734 	header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
1735 	size = header->header_size + header->vcn_table_size;
1736 
1737 	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1738 	 * of memory descriptor location
1739 	 */
1740 	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1741 	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1742 
1743 	/* 2, update vmid of descriptor */
1744 	data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
1745 	data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1746 	/* use domain0 for MM scheduler */
1747 	data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1748 	WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
1749 
1750 	/* 3, notify mmsch about the size of this descriptor */
1751 	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
1752 
1753 	/* 4, set resp to zero */
1754 	WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1755 
1756 	adev->vcn.inst->ring_dec.wptr = 0;
1757 	adev->vcn.inst->ring_dec.wptr_old = 0;
1758 	vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
1759 
1760 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1761 		adev->vcn.inst->ring_enc[i].wptr = 0;
1762 		adev->vcn.inst->ring_enc[i].wptr_old = 0;
1763 		vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
1764 	}
1765 
1766 	/* 5, kick off the initialization and wait until
1767 	 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1768 	 */
1769 	WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1770 
1771 	data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1772 	loop = 1000;
1773 	while ((data & 0x10000002) != 0x10000002) {
1774 		udelay(10);
1775 		data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1776 		loop--;
1777 		if (!loop)
1778 			break;
1779 	}
1780 
1781 	if (!loop) {
1782 		DRM_ERROR("failed to init MMSCH, " \
1783 			"mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
1784 		return -EBUSY;
1785 	}
1786 
1787 	return 0;
1788 }
1789 
1790 static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
1791 {
1792 	int r;
1793 	uint32_t tmp;
1794 	struct amdgpu_ring *ring;
1795 	uint32_t offset, size;
1796 	uint32_t table_size = 0;
1797 	struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
1798 	struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
1799 	struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} };
1800 	struct mmsch_v2_0_cmd_end end = { {0} };
1801 	struct mmsch_v2_0_init_header *header;
1802 	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1803 	uint8_t i = 0;
1804 
1805 	header = (struct mmsch_v2_0_init_header *)init_table;
1806 	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1807 	direct_rd_mod_wt.cmd_header.command_type =
1808 		MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1809 	direct_poll.cmd_header.command_type =
1810 		MMSCH_COMMAND__DIRECT_REG_POLLING;
1811 	end.cmd_header.command_type = MMSCH_COMMAND__END;
1812 
1813 	if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
1814 		header->version = MMSCH_VERSION;
1815 		header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
1816 
1817 		header->vcn_table_offset = header->header_size;
1818 
1819 		init_table += header->vcn_table_offset;
1820 
1821 		size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1822 
1823 		MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
1824 			SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
1825 			0xFFFFFFFF, 0x00000004);
1826 
1827 		/* mc resume*/
1828 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1829 			tmp = AMDGPU_UCODE_ID_VCN;
1830 			MMSCH_V2_0_INSERT_DIRECT_WT(
1831 				SOC15_REG_OFFSET(UVD, i,
1832 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1833 				adev->firmware.ucode[tmp].tmr_mc_addr_lo);
1834 			MMSCH_V2_0_INSERT_DIRECT_WT(
1835 				SOC15_REG_OFFSET(UVD, i,
1836 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1837 				adev->firmware.ucode[tmp].tmr_mc_addr_hi);
1838 			offset = 0;
1839 		} else {
1840 			MMSCH_V2_0_INSERT_DIRECT_WT(
1841 				SOC15_REG_OFFSET(UVD, i,
1842 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1843 				lower_32_bits(adev->vcn.inst->gpu_addr));
1844 			MMSCH_V2_0_INSERT_DIRECT_WT(
1845 				SOC15_REG_OFFSET(UVD, i,
1846 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1847 				upper_32_bits(adev->vcn.inst->gpu_addr));
1848 			offset = size;
1849 		}
1850 
1851 		MMSCH_V2_0_INSERT_DIRECT_WT(
1852 			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
1853 			0);
1854 		MMSCH_V2_0_INSERT_DIRECT_WT(
1855 			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
1856 			size);
1857 
1858 		MMSCH_V2_0_INSERT_DIRECT_WT(
1859 			SOC15_REG_OFFSET(UVD, i,
1860 				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1861 			lower_32_bits(adev->vcn.inst->gpu_addr + offset));
1862 		MMSCH_V2_0_INSERT_DIRECT_WT(
1863 			SOC15_REG_OFFSET(UVD, i,
1864 				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1865 			upper_32_bits(adev->vcn.inst->gpu_addr + offset));
1866 		MMSCH_V2_0_INSERT_DIRECT_WT(
1867 			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
1868 			0);
1869 		MMSCH_V2_0_INSERT_DIRECT_WT(
1870 			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
1871 			AMDGPU_VCN_STACK_SIZE);
1872 
1873 		MMSCH_V2_0_INSERT_DIRECT_WT(
1874 			SOC15_REG_OFFSET(UVD, i,
1875 				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1876 			lower_32_bits(adev->vcn.inst->gpu_addr + offset +
1877 				AMDGPU_VCN_STACK_SIZE));
1878 		MMSCH_V2_0_INSERT_DIRECT_WT(
1879 			SOC15_REG_OFFSET(UVD, i,
1880 				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1881 			upper_32_bits(adev->vcn.inst->gpu_addr + offset +
1882 				AMDGPU_VCN_STACK_SIZE));
1883 		MMSCH_V2_0_INSERT_DIRECT_WT(
1884 			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
1885 			0);
1886 		MMSCH_V2_0_INSERT_DIRECT_WT(
1887 			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
1888 			AMDGPU_VCN_CONTEXT_SIZE);
1889 
1890 		for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
1891 			ring = &adev->vcn.inst->ring_enc[r];
1892 			ring->wptr = 0;
1893 			MMSCH_V2_0_INSERT_DIRECT_WT(
1894 				SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
1895 				lower_32_bits(ring->gpu_addr));
1896 			MMSCH_V2_0_INSERT_DIRECT_WT(
1897 				SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
1898 				upper_32_bits(ring->gpu_addr));
1899 			MMSCH_V2_0_INSERT_DIRECT_WT(
1900 				SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
1901 				ring->ring_size / 4);
1902 		}
1903 
1904 		ring = &adev->vcn.inst->ring_dec;
1905 		ring->wptr = 0;
1906 		MMSCH_V2_0_INSERT_DIRECT_WT(
1907 			SOC15_REG_OFFSET(UVD, i,
1908 				mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1909 			lower_32_bits(ring->gpu_addr));
1910 		MMSCH_V2_0_INSERT_DIRECT_WT(
1911 			SOC15_REG_OFFSET(UVD, i,
1912 				mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1913 			upper_32_bits(ring->gpu_addr));
1914 		/* force RBC into idle state */
1915 		tmp = order_base_2(ring->ring_size);
1916 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1917 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1918 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1919 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1920 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1921 		MMSCH_V2_0_INSERT_DIRECT_WT(
1922 			SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
1923 
1924 		/* add end packet */
1925 		tmp = sizeof(struct mmsch_v2_0_cmd_end);
1926 		memcpy((void *)init_table, &end, tmp);
1927 		table_size += (tmp / 4);
1928 		header->vcn_table_size = table_size;
1929 
1930 	}
1931 	return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
1932 }
1933 
1934 static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
1935 	.name = "vcn_v2_0",
1936 	.early_init = vcn_v2_0_early_init,
1937 	.late_init = NULL,
1938 	.sw_init = vcn_v2_0_sw_init,
1939 	.sw_fini = vcn_v2_0_sw_fini,
1940 	.hw_init = vcn_v2_0_hw_init,
1941 	.hw_fini = vcn_v2_0_hw_fini,
1942 	.suspend = vcn_v2_0_suspend,
1943 	.resume = vcn_v2_0_resume,
1944 	.is_idle = vcn_v2_0_is_idle,
1945 	.wait_for_idle = vcn_v2_0_wait_for_idle,
1946 	.check_soft_reset = NULL,
1947 	.pre_soft_reset = NULL,
1948 	.soft_reset = NULL,
1949 	.post_soft_reset = NULL,
1950 	.set_clockgating_state = vcn_v2_0_set_clockgating_state,
1951 	.set_powergating_state = vcn_v2_0_set_powergating_state,
1952 };
1953 
1954 static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
1955 	.type = AMDGPU_RING_TYPE_VCN_DEC,
1956 	.align_mask = 0xf,
1957 	.vmhub = AMDGPU_MMHUB_0,
1958 	.get_rptr = vcn_v2_0_dec_ring_get_rptr,
1959 	.get_wptr = vcn_v2_0_dec_ring_get_wptr,
1960 	.set_wptr = vcn_v2_0_dec_ring_set_wptr,
1961 	.emit_frame_size =
1962 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1963 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1964 		8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1965 		14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1966 		6,
1967 	.emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1968 	.emit_ib = vcn_v2_0_dec_ring_emit_ib,
1969 	.emit_fence = vcn_v2_0_dec_ring_emit_fence,
1970 	.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1971 	.test_ring = vcn_v2_0_dec_ring_test_ring,
1972 	.test_ib = amdgpu_vcn_dec_ring_test_ib,
1973 	.insert_nop = vcn_v2_0_dec_ring_insert_nop,
1974 	.insert_start = vcn_v2_0_dec_ring_insert_start,
1975 	.insert_end = vcn_v2_0_dec_ring_insert_end,
1976 	.pad_ib = amdgpu_ring_generic_pad_ib,
1977 	.begin_use = amdgpu_vcn_ring_begin_use,
1978 	.end_use = amdgpu_vcn_ring_end_use,
1979 	.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1980 	.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1981 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1982 };
1983 
1984 static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
1985 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1986 	.align_mask = 0x3f,
1987 	.nop = VCN_ENC_CMD_NO_OP,
1988 	.vmhub = AMDGPU_MMHUB_0,
1989 	.get_rptr = vcn_v2_0_enc_ring_get_rptr,
1990 	.get_wptr = vcn_v2_0_enc_ring_get_wptr,
1991 	.set_wptr = vcn_v2_0_enc_ring_set_wptr,
1992 	.emit_frame_size =
1993 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1994 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1995 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1996 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1997 		1, /* vcn_v2_0_enc_ring_insert_end */
1998 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1999 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
2000 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
2001 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2002 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
2003 	.test_ib = amdgpu_vcn_enc_ring_test_ib,
2004 	.insert_nop = amdgpu_ring_insert_nop,
2005 	.insert_end = vcn_v2_0_enc_ring_insert_end,
2006 	.pad_ib = amdgpu_ring_generic_pad_ib,
2007 	.begin_use = amdgpu_vcn_ring_begin_use,
2008 	.end_use = amdgpu_vcn_ring_end_use,
2009 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2010 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2011 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2012 };
2013 
2014 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2015 {
2016 	adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
2017 	DRM_INFO("VCN decode is enabled in VM mode\n");
2018 }
2019 
2020 static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2021 {
2022 	int i;
2023 
2024 	for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2025 		adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
2026 
2027 	DRM_INFO("VCN encode is enabled in VM mode\n");
2028 }
2029 
2030 static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
2031 	.set = vcn_v2_0_set_interrupt_state,
2032 	.process = vcn_v2_0_process_interrupt,
2033 };
2034 
2035 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
2036 {
2037 	adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
2038 	adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
2039 }
2040 
2041 const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
2042 {
2043 		.type = AMD_IP_BLOCK_TYPE_VCN,
2044 		.major = 2,
2045 		.minor = 0,
2046 		.rev = 0,
2047 		.funcs = &vcn_v2_0_ip_funcs,
2048 };
2049