xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c (revision 37002bc6)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_vcn.h"
29 #include "soc15.h"
30 #include "soc15d.h"
31 #include "amdgpu_pm.h"
32 #include "amdgpu_psp.h"
33 #include "mmsch_v2_0.h"
34 #include "vcn_v2_0.h"
35 
36 #include "vcn/vcn_2_0_0_offset.h"
37 #include "vcn/vcn_2_0_0_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
39 
40 #define VCN_VID_SOC_ADDRESS_2_0					0x1fa00
41 #define VCN1_VID_SOC_ADDRESS_3_0				0x48200
42 
43 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET			0x1fd
44 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET			0x503
45 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET			0x504
46 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET			0x505
47 #define mmUVD_NO_OP_INTERNAL_OFFSET				0x53f
48 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET			0x54a
49 #define mmUVD_SCRATCH9_INTERNAL_OFFSET				0xc01d
50 
51 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET			0x1e1
52 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET		0x5a6
53 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET		0x5a7
54 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET			0x1e2
55 
56 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
57 static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
58 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
59 static int vcn_v2_0_set_powergating_state(void *handle,
60 				enum amd_powergating_state state);
61 static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
62 				int inst_idx, struct dpg_pause_state *new_state);
63 static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
64 /**
65  * vcn_v2_0_early_init - set function pointers and load microcode
66  *
67  * @handle: amdgpu_device pointer
68  *
69  * Set ring and irq function pointers
70  * Load microcode from filesystem
71  */
72 static int vcn_v2_0_early_init(void *handle)
73 {
74 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
75 
76 	if (amdgpu_sriov_vf(adev))
77 		adev->vcn.num_enc_rings = 1;
78 	else
79 		adev->vcn.num_enc_rings = 2;
80 
81 	vcn_v2_0_set_dec_ring_funcs(adev);
82 	vcn_v2_0_set_enc_ring_funcs(adev);
83 	vcn_v2_0_set_irq_funcs(adev);
84 
85 	return amdgpu_vcn_early_init(adev);
86 }
87 
88 /**
89  * vcn_v2_0_sw_init - sw init for VCN block
90  *
91  * @handle: amdgpu_device pointer
92  *
93  * Load firmware and sw initialization
94  */
95 static int vcn_v2_0_sw_init(void *handle)
96 {
97 	struct amdgpu_ring *ring;
98 	int i, r;
99 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
100 	volatile struct amdgpu_fw_shared *fw_shared;
101 
102 	/* VCN DEC TRAP */
103 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
104 			      VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
105 			      &adev->vcn.inst->irq);
106 	if (r)
107 		return r;
108 
109 	/* VCN ENC TRAP */
110 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
111 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
112 				      i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
113 				      &adev->vcn.inst->irq);
114 		if (r)
115 			return r;
116 	}
117 
118 	r = amdgpu_vcn_sw_init(adev);
119 	if (r)
120 		return r;
121 
122 	amdgpu_vcn_setup_ucode(adev);
123 
124 	r = amdgpu_vcn_resume(adev);
125 	if (r)
126 		return r;
127 
128 	ring = &adev->vcn.inst->ring_dec;
129 
130 	ring->use_doorbell = true;
131 	ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
132 	ring->vm_hub = AMDGPU_MMHUB0(0);
133 
134 	sprintf(ring->name, "vcn_dec");
135 	r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
136 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
137 	if (r)
138 		return r;
139 
140 	adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
141 	adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
142 	adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
143 	adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
144 	adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
145 	adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
146 
147 	adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
148 	adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
149 	adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
150 	adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
151 	adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
152 	adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
153 	adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
154 	adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
155 	adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
156 	adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
157 
158 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
159 		enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
160 
161 		ring = &adev->vcn.inst->ring_enc[i];
162 		ring->use_doorbell = true;
163 		ring->vm_hub = AMDGPU_MMHUB0(0);
164 		if (!amdgpu_sriov_vf(adev))
165 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
166 		else
167 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
168 		sprintf(ring->name, "vcn_enc%d", i);
169 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
170 				     hw_prio, NULL);
171 		if (r)
172 			return r;
173 	}
174 
175 	adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
176 
177 	r = amdgpu_virt_alloc_mm_table(adev);
178 	if (r)
179 		return r;
180 
181 	fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
182 	fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
183 
184 	if (amdgpu_vcnfw_log)
185 		amdgpu_vcn_fwlog_init(adev->vcn.inst);
186 
187 	return 0;
188 }
189 
190 /**
191  * vcn_v2_0_sw_fini - sw fini for VCN block
192  *
193  * @handle: amdgpu_device pointer
194  *
195  * VCN suspend and free up sw allocation
196  */
197 static int vcn_v2_0_sw_fini(void *handle)
198 {
199 	int r, idx;
200 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
201 	volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
202 
203 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
204 		fw_shared->present_flag_0 = 0;
205 		drm_dev_exit(idx);
206 	}
207 
208 	amdgpu_virt_free_mm_table(adev);
209 
210 	r = amdgpu_vcn_suspend(adev);
211 	if (r)
212 		return r;
213 
214 	r = amdgpu_vcn_sw_fini(adev);
215 
216 	return r;
217 }
218 
219 /**
220  * vcn_v2_0_hw_init - start and test VCN block
221  *
222  * @handle: amdgpu_device pointer
223  *
224  * Initialize the hardware, boot up the VCPU and do some testing
225  */
226 static int vcn_v2_0_hw_init(void *handle)
227 {
228 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
229 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
230 	int i, r;
231 
232 	adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
233 					     ring->doorbell_index, 0);
234 
235 	if (amdgpu_sriov_vf(adev))
236 		vcn_v2_0_start_sriov(adev);
237 
238 	r = amdgpu_ring_test_helper(ring);
239 	if (r)
240 		goto done;
241 
242 	//Disable vcn decode for sriov
243 	if (amdgpu_sriov_vf(adev))
244 		ring->sched.ready = false;
245 
246 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
247 		ring = &adev->vcn.inst->ring_enc[i];
248 		r = amdgpu_ring_test_helper(ring);
249 		if (r)
250 			goto done;
251 	}
252 
253 done:
254 	if (!r)
255 		DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
256 			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
257 
258 	return r;
259 }
260 
261 /**
262  * vcn_v2_0_hw_fini - stop the hardware block
263  *
264  * @handle: amdgpu_device pointer
265  *
266  * Stop the VCN block, mark ring as not ready any more
267  */
268 static int vcn_v2_0_hw_fini(void *handle)
269 {
270 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
271 
272 	cancel_delayed_work_sync(&adev->vcn.idle_work);
273 
274 	if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
275 	    (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
276 	      RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
277 		vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
278 
279 	return 0;
280 }
281 
282 /**
283  * vcn_v2_0_suspend - suspend VCN block
284  *
285  * @handle: amdgpu_device pointer
286  *
287  * HW fini and suspend VCN block
288  */
289 static int vcn_v2_0_suspend(void *handle)
290 {
291 	int r;
292 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
293 
294 	r = vcn_v2_0_hw_fini(adev);
295 	if (r)
296 		return r;
297 
298 	r = amdgpu_vcn_suspend(adev);
299 
300 	return r;
301 }
302 
303 /**
304  * vcn_v2_0_resume - resume VCN block
305  *
306  * @handle: amdgpu_device pointer
307  *
308  * Resume firmware and hw init VCN block
309  */
310 static int vcn_v2_0_resume(void *handle)
311 {
312 	int r;
313 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
314 
315 	r = amdgpu_vcn_resume(adev);
316 	if (r)
317 		return r;
318 
319 	r = vcn_v2_0_hw_init(adev);
320 
321 	return r;
322 }
323 
324 /**
325  * vcn_v2_0_mc_resume - memory controller programming
326  *
327  * @adev: amdgpu_device pointer
328  *
329  * Let the VCN memory controller know it's offsets
330  */
331 static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
332 {
333 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
334 	uint32_t offset;
335 
336 	if (amdgpu_sriov_vf(adev))
337 		return;
338 
339 	/* cache window 0: fw */
340 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
341 		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
342 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
343 		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
344 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
345 		WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
346 		offset = 0;
347 	} else {
348 		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
349 			lower_32_bits(adev->vcn.inst->gpu_addr));
350 		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
351 			upper_32_bits(adev->vcn.inst->gpu_addr));
352 		offset = size;
353 		WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
354 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
355 	}
356 
357 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
358 
359 	/* cache window 1: stack */
360 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
361 		lower_32_bits(adev->vcn.inst->gpu_addr + offset));
362 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
363 		upper_32_bits(adev->vcn.inst->gpu_addr + offset));
364 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
365 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
366 
367 	/* cache window 2: context */
368 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
369 		lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
370 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
371 		upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
372 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
373 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
374 
375 	/* non-cache window */
376 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
377 		lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr));
378 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
379 		upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr));
380 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
381 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
382 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
383 
384 	WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
385 }
386 
387 static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
388 {
389 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
390 	uint32_t offset;
391 
392 	/* cache window 0: fw */
393 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
394 		if (!indirect) {
395 			WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
396 				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
397 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
398 			WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
399 				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
400 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
401 			WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
402 				UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
403 		} else {
404 			WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
405 				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
406 			WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
407 				UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
408 			WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
409 				UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
410 		}
411 		offset = 0;
412 	} else {
413 		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
414 			UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
415 			lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
416 		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
417 			UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
418 			upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
419 		offset = size;
420 		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
421 			UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
422 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
423 	}
424 
425 	if (!indirect)
426 		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
427 			UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
428 	else
429 		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
430 			UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
431 
432 	/* cache window 1: stack */
433 	if (!indirect) {
434 		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
435 			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
436 			lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
437 		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
438 			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
439 			upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
440 		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
441 			UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
442 	} else {
443 		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
444 			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
445 		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
446 			UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
447 		WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
448 			UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
449 	}
450 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
451 		UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
452 
453 	/* cache window 2: context */
454 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
455 		UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
456 		lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
457 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
458 		UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
459 		upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
460 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
461 		UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
462 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
463 		UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
464 
465 	/* non-cache window */
466 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
467 		UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
468 		lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect);
469 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
470 		UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
471 		upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect);
472 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
473 		UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
474 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
475 		UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
476 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
477 
478 	/* VCN global tiling registers */
479 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
480 		UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
481 }
482 
483 /**
484  * vcn_v2_0_disable_clock_gating - disable VCN clock gating
485  *
486  * @adev: amdgpu_device pointer
487  *
488  * Disable clock gating for VCN block
489  */
490 static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
491 {
492 	uint32_t data;
493 
494 	if (amdgpu_sriov_vf(adev))
495 		return;
496 
497 	/* UVD disable CGC */
498 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
499 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
500 		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
501 	else
502 		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
503 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
504 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
505 	WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
506 
507 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
508 	data &= ~(UVD_CGC_GATE__SYS_MASK
509 		| UVD_CGC_GATE__UDEC_MASK
510 		| UVD_CGC_GATE__MPEG2_MASK
511 		| UVD_CGC_GATE__REGS_MASK
512 		| UVD_CGC_GATE__RBC_MASK
513 		| UVD_CGC_GATE__LMI_MC_MASK
514 		| UVD_CGC_GATE__LMI_UMC_MASK
515 		| UVD_CGC_GATE__IDCT_MASK
516 		| UVD_CGC_GATE__MPRD_MASK
517 		| UVD_CGC_GATE__MPC_MASK
518 		| UVD_CGC_GATE__LBSI_MASK
519 		| UVD_CGC_GATE__LRBBM_MASK
520 		| UVD_CGC_GATE__UDEC_RE_MASK
521 		| UVD_CGC_GATE__UDEC_CM_MASK
522 		| UVD_CGC_GATE__UDEC_IT_MASK
523 		| UVD_CGC_GATE__UDEC_DB_MASK
524 		| UVD_CGC_GATE__UDEC_MP_MASK
525 		| UVD_CGC_GATE__WCB_MASK
526 		| UVD_CGC_GATE__VCPU_MASK
527 		| UVD_CGC_GATE__SCPU_MASK);
528 	WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
529 
530 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
531 	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
532 		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
533 		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
534 		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
535 		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
536 		| UVD_CGC_CTRL__SYS_MODE_MASK
537 		| UVD_CGC_CTRL__UDEC_MODE_MASK
538 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
539 		| UVD_CGC_CTRL__REGS_MODE_MASK
540 		| UVD_CGC_CTRL__RBC_MODE_MASK
541 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
542 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
543 		| UVD_CGC_CTRL__IDCT_MODE_MASK
544 		| UVD_CGC_CTRL__MPRD_MODE_MASK
545 		| UVD_CGC_CTRL__MPC_MODE_MASK
546 		| UVD_CGC_CTRL__LBSI_MODE_MASK
547 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
548 		| UVD_CGC_CTRL__WCB_MODE_MASK
549 		| UVD_CGC_CTRL__VCPU_MODE_MASK
550 		| UVD_CGC_CTRL__SCPU_MODE_MASK);
551 	WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
552 
553 	/* turn on */
554 	data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
555 	data |= (UVD_SUVD_CGC_GATE__SRE_MASK
556 		| UVD_SUVD_CGC_GATE__SIT_MASK
557 		| UVD_SUVD_CGC_GATE__SMP_MASK
558 		| UVD_SUVD_CGC_GATE__SCM_MASK
559 		| UVD_SUVD_CGC_GATE__SDB_MASK
560 		| UVD_SUVD_CGC_GATE__SRE_H264_MASK
561 		| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
562 		| UVD_SUVD_CGC_GATE__SIT_H264_MASK
563 		| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
564 		| UVD_SUVD_CGC_GATE__SCM_H264_MASK
565 		| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
566 		| UVD_SUVD_CGC_GATE__SDB_H264_MASK
567 		| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
568 		| UVD_SUVD_CGC_GATE__SCLR_MASK
569 		| UVD_SUVD_CGC_GATE__UVD_SC_MASK
570 		| UVD_SUVD_CGC_GATE__ENT_MASK
571 		| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
572 		| UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
573 		| UVD_SUVD_CGC_GATE__SITE_MASK
574 		| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
575 		| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
576 		| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
577 		| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
578 		| UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
579 	WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
580 
581 	data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
582 	data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
583 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
584 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
585 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
586 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
587 		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
588 		| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
589 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
590 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
591 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
592 	WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
593 }
594 
595 static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
596 		uint8_t sram_sel, uint8_t indirect)
597 {
598 	uint32_t reg_data = 0;
599 
600 	/* enable sw clock gating control */
601 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
602 		reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
603 	else
604 		reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
605 	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
606 	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
607 	reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
608 		 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
609 		 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
610 		 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
611 		 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
612 		 UVD_CGC_CTRL__SYS_MODE_MASK |
613 		 UVD_CGC_CTRL__UDEC_MODE_MASK |
614 		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
615 		 UVD_CGC_CTRL__REGS_MODE_MASK |
616 		 UVD_CGC_CTRL__RBC_MODE_MASK |
617 		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
618 		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
619 		 UVD_CGC_CTRL__IDCT_MODE_MASK |
620 		 UVD_CGC_CTRL__MPRD_MODE_MASK |
621 		 UVD_CGC_CTRL__MPC_MODE_MASK |
622 		 UVD_CGC_CTRL__LBSI_MODE_MASK |
623 		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
624 		 UVD_CGC_CTRL__WCB_MODE_MASK |
625 		 UVD_CGC_CTRL__VCPU_MODE_MASK |
626 		 UVD_CGC_CTRL__SCPU_MODE_MASK);
627 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
628 		UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
629 
630 	/* turn off clock gating */
631 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
632 		UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
633 
634 	/* turn on SUVD clock gating */
635 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
636 		UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
637 
638 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
639 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
640 		UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
641 }
642 
643 /**
644  * vcn_v2_0_enable_clock_gating - enable VCN clock gating
645  *
646  * @adev: amdgpu_device pointer
647  *
648  * Enable clock gating for VCN block
649  */
650 static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
651 {
652 	uint32_t data = 0;
653 
654 	if (amdgpu_sriov_vf(adev))
655 		return;
656 
657 	/* enable UVD CGC */
658 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
659 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
660 		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
661 	else
662 		data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
663 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
664 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
665 	WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
666 
667 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
668 	data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
669 		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
670 		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
671 		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
672 		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
673 		| UVD_CGC_CTRL__SYS_MODE_MASK
674 		| UVD_CGC_CTRL__UDEC_MODE_MASK
675 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
676 		| UVD_CGC_CTRL__REGS_MODE_MASK
677 		| UVD_CGC_CTRL__RBC_MODE_MASK
678 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
679 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
680 		| UVD_CGC_CTRL__IDCT_MODE_MASK
681 		| UVD_CGC_CTRL__MPRD_MODE_MASK
682 		| UVD_CGC_CTRL__MPC_MODE_MASK
683 		| UVD_CGC_CTRL__LBSI_MODE_MASK
684 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
685 		| UVD_CGC_CTRL__WCB_MODE_MASK
686 		| UVD_CGC_CTRL__VCPU_MODE_MASK
687 		| UVD_CGC_CTRL__SCPU_MODE_MASK);
688 	WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
689 
690 	data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
691 	data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
692 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
693 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
694 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
695 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
696 		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
697 		| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
698 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
699 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
700 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
701 	WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
702 }
703 
704 static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
705 {
706 	uint32_t data = 0;
707 
708 	if (amdgpu_sriov_vf(adev))
709 		return;
710 
711 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
712 		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
713 			| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
714 			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
715 			| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
716 			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
717 			| 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
718 			| 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
719 			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
720 			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
721 			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
722 
723 		WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
724 		SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
725 			UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF);
726 	} else {
727 		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
728 			| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
729 			| 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
730 			| 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
731 			| 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
732 			| 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
733 			| 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
734 			| 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
735 			| 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
736 			| 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
737 		WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
738 		SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0,  0xFFFFF);
739 	}
740 
741 	/* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS,
742 	 * UVDU_PWR_STATUS are 0 (power on) */
743 
744 	data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
745 	data &= ~0x103;
746 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
747 		data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
748 			UVD_POWER_STATUS__UVD_PG_EN_MASK;
749 
750 	WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
751 }
752 
753 static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
754 {
755 	uint32_t data = 0;
756 
757 	if (amdgpu_sriov_vf(adev))
758 		return;
759 
760 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
761 		/* Before power off, this indicator has to be turned on */
762 		data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
763 		data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
764 		data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
765 		WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
766 
767 
768 		data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
769 			| 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
770 			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
771 			| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
772 			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
773 			| 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
774 			| 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
775 			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
776 			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
777 			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
778 
779 		WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
780 
781 		data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
782 			| 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
783 			| 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
784 			| 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
785 			| 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
786 			| 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
787 			| 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
788 			| 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
789 			| 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
790 			| 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT);
791 		SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF);
792 	}
793 }
794 
795 static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
796 {
797 	volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
798 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
799 	uint32_t rb_bufsz, tmp;
800 
801 	vcn_v2_0_enable_static_power_gating(adev);
802 
803 	/* enable dynamic power gating mode */
804 	tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
805 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
806 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
807 	WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
808 
809 	if (indirect)
810 		adev->vcn.inst->dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst->dpg_sram_cpu_addr;
811 
812 	/* enable clock gating */
813 	vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
814 
815 	/* enable VCPU clock */
816 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
817 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
818 	tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
819 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
820 		UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
821 
822 	/* disable master interupt */
823 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
824 		UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
825 
826 	/* setup mmUVD_LMI_CTRL */
827 	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
828 		UVD_LMI_CTRL__REQ_MODE_MASK |
829 		UVD_LMI_CTRL__CRC_RESET_MASK |
830 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
831 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
832 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
833 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
834 		0x00100000L);
835 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
836 		UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
837 
838 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
839 		UVD, 0, mmUVD_MPC_CNTL),
840 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
841 
842 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
843 		UVD, 0, mmUVD_MPC_SET_MUXA0),
844 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
845 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
846 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
847 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
848 
849 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
850 		UVD, 0, mmUVD_MPC_SET_MUXB0),
851 		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
852 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
853 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
854 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
855 
856 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
857 		UVD, 0, mmUVD_MPC_SET_MUX),
858 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
859 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
860 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
861 
862 	vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
863 
864 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
865 		UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
866 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
867 		UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
868 
869 	/* release VCPU reset to boot */
870 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
871 		UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
872 
873 	/* enable LMI MC and UMC channels */
874 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
875 		UVD, 0, mmUVD_LMI_CTRL2),
876 		0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
877 
878 	/* enable master interrupt */
879 	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
880 		UVD, 0, mmUVD_MASTINT_EN),
881 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
882 
883 	if (indirect)
884 		psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr,
885 				    (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr -
886 					       (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr));
887 
888 	/* force RBC into idle state */
889 	rb_bufsz = order_base_2(ring->ring_size);
890 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
891 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
892 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
893 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
894 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
895 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
896 
897 	/* Stall DPG before WPTR/RPTR reset */
898 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
899 		UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
900 		~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
901 	fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
902 
903 	/* set the write pointer delay */
904 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
905 
906 	/* set the wb address */
907 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
908 		(upper_32_bits(ring->gpu_addr) >> 2));
909 
910 	/* program the RB_BASE for ring buffer */
911 	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
912 		lower_32_bits(ring->gpu_addr));
913 	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
914 		upper_32_bits(ring->gpu_addr));
915 
916 	/* Initialize the ring buffer's read and write pointers */
917 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
918 
919 	WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
920 
921 	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
922 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
923 		lower_32_bits(ring->wptr));
924 
925 	fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
926 	/* Unstall DPG */
927 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
928 		0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
929 	return 0;
930 }
931 
932 static int vcn_v2_0_start(struct amdgpu_device *adev)
933 {
934 	volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
935 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
936 	uint32_t rb_bufsz, tmp;
937 	uint32_t lmi_swap_cntl;
938 	int i, j, r;
939 
940 	if (adev->pm.dpm_enabled)
941 		amdgpu_dpm_enable_uvd(adev, true);
942 
943 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
944 		return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
945 
946 	vcn_v2_0_disable_static_power_gating(adev);
947 
948 	/* set uvd status busy */
949 	tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
950 	WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
951 
952 	/*SW clock gating */
953 	vcn_v2_0_disable_clock_gating(adev);
954 
955 	/* enable VCPU clock */
956 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
957 		UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
958 
959 	/* disable master interrupt */
960 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
961 		~UVD_MASTINT_EN__VCPU_EN_MASK);
962 
963 	/* setup mmUVD_LMI_CTRL */
964 	tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
965 	WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
966 		UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK	|
967 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
968 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
969 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
970 
971 	/* setup mmUVD_MPC_CNTL */
972 	tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
973 	tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
974 	tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
975 	WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp);
976 
977 	/* setup UVD_MPC_SET_MUXA0 */
978 	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
979 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
980 		(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
981 		(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
982 		(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
983 
984 	/* setup UVD_MPC_SET_MUXB0 */
985 	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
986 		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
987 		(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
988 		(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
989 		(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
990 
991 	/* setup mmUVD_MPC_SET_MUX */
992 	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
993 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
994 		(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
995 		(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
996 
997 	vcn_v2_0_mc_resume(adev);
998 
999 	/* release VCPU reset to boot */
1000 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
1001 		~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1002 
1003 	/* enable LMI MC and UMC channels */
1004 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
1005 		~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1006 
1007 	tmp = RREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET);
1008 	tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1009 	tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1010 	WREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET, tmp);
1011 
1012 	/* disable byte swapping */
1013 	lmi_swap_cntl = 0;
1014 #ifdef __BIG_ENDIAN
1015 	/* swap (8 in 32) RB and IB */
1016 	lmi_swap_cntl = 0xa;
1017 #endif
1018 	WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1019 
1020 	for (i = 0; i < 10; ++i) {
1021 		uint32_t status;
1022 
1023 		for (j = 0; j < 100; ++j) {
1024 			status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1025 			if (status & 2)
1026 				break;
1027 			mdelay(10);
1028 		}
1029 		r = 0;
1030 		if (status & 2)
1031 			break;
1032 
1033 		DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1034 		WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1035 			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1036 			~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1037 		mdelay(10);
1038 		WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
1039 			~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1040 		mdelay(10);
1041 		r = -1;
1042 	}
1043 
1044 	if (r) {
1045 		DRM_ERROR("VCN decode not responding, giving up!!!\n");
1046 		return r;
1047 	}
1048 
1049 	/* enable master interrupt */
1050 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
1051 		UVD_MASTINT_EN__VCPU_EN_MASK,
1052 		~UVD_MASTINT_EN__VCPU_EN_MASK);
1053 
1054 	/* clear the busy bit of VCN_STATUS */
1055 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
1056 		~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1057 
1058 	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0);
1059 
1060 	/* force RBC into idle state */
1061 	rb_bufsz = order_base_2(ring->ring_size);
1062 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1063 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1064 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1065 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1066 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1067 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1068 
1069 	fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1070 	/* program the RB_BASE for ring buffer */
1071 	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1072 		lower_32_bits(ring->gpu_addr));
1073 	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1074 		upper_32_bits(ring->gpu_addr));
1075 
1076 	/* Initialize the ring buffer's read and write pointers */
1077 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1078 
1079 	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1080 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1081 			lower_32_bits(ring->wptr));
1082 	fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1083 
1084 	fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1085 	ring = &adev->vcn.inst->ring_enc[0];
1086 	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1087 	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1088 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1089 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1090 	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1091 	fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1092 
1093 	fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1094 	ring = &adev->vcn.inst->ring_enc[1];
1095 	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1096 	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1097 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1098 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1099 	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1100 	fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1101 
1102 	return 0;
1103 }
1104 
1105 static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
1106 {
1107 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
1108 	uint32_t tmp;
1109 
1110 	vcn_v2_0_pause_dpg_mode(adev, 0, &state);
1111 	/* Wait for power status to be 1 */
1112 	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1113 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1114 
1115 	/* wait for read ptr to be equal to write ptr */
1116 	tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1117 	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1118 
1119 	tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1120 	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1121 
1122 	tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1123 	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1124 
1125 	SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1126 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1127 
1128 	/* disable dynamic power gating mode */
1129 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1130 			~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1131 
1132 	return 0;
1133 }
1134 
1135 static int vcn_v2_0_stop(struct amdgpu_device *adev)
1136 {
1137 	uint32_t tmp;
1138 	int r;
1139 
1140 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1141 		r = vcn_v2_0_stop_dpg_mode(adev);
1142 		if (r)
1143 			return r;
1144 		goto power_off;
1145 	}
1146 
1147 	/* wait for uvd idle */
1148 	r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1149 	if (r)
1150 		return r;
1151 
1152 	tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1153 		UVD_LMI_STATUS__READ_CLEAN_MASK |
1154 		UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1155 		UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1156 	r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
1157 	if (r)
1158 		return r;
1159 
1160 	/* stall UMC channel */
1161 	tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2);
1162 	tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1163 	WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp);
1164 
1165 	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1166 		UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1167 	r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
1168 	if (r)
1169 		return r;
1170 
1171 	/* disable VCPU clock */
1172 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1173 		~(UVD_VCPU_CNTL__CLK_EN_MASK));
1174 
1175 	/* reset LMI UMC */
1176 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1177 		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1178 		~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1179 
1180 	/* reset LMI */
1181 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1182 		UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1183 		~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1184 
1185 	/* reset VCPU */
1186 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1187 		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1188 		~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1189 
1190 	/* clear status */
1191 	WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
1192 
1193 	vcn_v2_0_enable_clock_gating(adev);
1194 	vcn_v2_0_enable_static_power_gating(adev);
1195 
1196 power_off:
1197 	if (adev->pm.dpm_enabled)
1198 		amdgpu_dpm_enable_uvd(adev, false);
1199 
1200 	return 0;
1201 }
1202 
1203 static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
1204 				int inst_idx, struct dpg_pause_state *new_state)
1205 {
1206 	struct amdgpu_ring *ring;
1207 	uint32_t reg_data = 0;
1208 	int ret_code;
1209 
1210 	/* pause/unpause if state is changed */
1211 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1212 		DRM_DEBUG("dpg pause state changed %d -> %d",
1213 			adev->vcn.inst[inst_idx].pause_state.fw_based,	new_state->fw_based);
1214 		reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1215 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1216 
1217 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1218 			ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
1219 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1220 
1221 			if (!ret_code) {
1222 				volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
1223 				/* pause DPG */
1224 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1225 				WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1226 
1227 				/* wait for ACK */
1228 				SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1229 					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1230 					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1231 
1232 				/* Stall DPG before WPTR/RPTR reset */
1233 				WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
1234 					   UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1235 					   ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1236 				/* Restore */
1237 				fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1238 				ring = &adev->vcn.inst->ring_enc[0];
1239 				ring->wptr = 0;
1240 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1241 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1242 				WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1243 				WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1244 				WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1245 				fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1246 
1247 				fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1248 				ring = &adev->vcn.inst->ring_enc[1];
1249 				ring->wptr = 0;
1250 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1251 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1252 				WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1253 				WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1254 				WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1255 				fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1256 
1257 				fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1258 				WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1259 					   RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1260 				fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1261 				/* Unstall DPG */
1262 				WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
1263 					   0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1264 
1265 				SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1266 					   UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1267 					   UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1268 			}
1269 		} else {
1270 			/* unpause dpg, no need to wait */
1271 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1272 			WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1273 		}
1274 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1275 	}
1276 
1277 	return 0;
1278 }
1279 
1280 static bool vcn_v2_0_is_idle(void *handle)
1281 {
1282 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1283 
1284 	return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1285 }
1286 
1287 static int vcn_v2_0_wait_for_idle(void *handle)
1288 {
1289 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1290 	int ret;
1291 
1292 	ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1293 		UVD_STATUS__IDLE);
1294 
1295 	return ret;
1296 }
1297 
1298 static int vcn_v2_0_set_clockgating_state(void *handle,
1299 					  enum amd_clockgating_state state)
1300 {
1301 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1302 	bool enable = (state == AMD_CG_STATE_GATE);
1303 
1304 	if (amdgpu_sriov_vf(adev))
1305 		return 0;
1306 
1307 	if (enable) {
1308 		/* wait for STATUS to clear */
1309 		if (!vcn_v2_0_is_idle(handle))
1310 			return -EBUSY;
1311 		vcn_v2_0_enable_clock_gating(adev);
1312 	} else {
1313 		/* disable HW gating and enable Sw gating */
1314 		vcn_v2_0_disable_clock_gating(adev);
1315 	}
1316 	return 0;
1317 }
1318 
1319 /**
1320  * vcn_v2_0_dec_ring_get_rptr - get read pointer
1321  *
1322  * @ring: amdgpu_ring pointer
1323  *
1324  * Returns the current hardware read pointer
1325  */
1326 static uint64_t vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1327 {
1328 	struct amdgpu_device *adev = ring->adev;
1329 
1330 	return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1331 }
1332 
1333 /**
1334  * vcn_v2_0_dec_ring_get_wptr - get write pointer
1335  *
1336  * @ring: amdgpu_ring pointer
1337  *
1338  * Returns the current hardware write pointer
1339  */
1340 static uint64_t vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1341 {
1342 	struct amdgpu_device *adev = ring->adev;
1343 
1344 	if (ring->use_doorbell)
1345 		return *ring->wptr_cpu_addr;
1346 	else
1347 		return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1348 }
1349 
1350 /**
1351  * vcn_v2_0_dec_ring_set_wptr - set write pointer
1352  *
1353  * @ring: amdgpu_ring pointer
1354  *
1355  * Commits the write pointer to the hardware
1356  */
1357 static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1358 {
1359 	struct amdgpu_device *adev = ring->adev;
1360 
1361 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1362 		WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1363 			lower_32_bits(ring->wptr) | 0x80000000);
1364 
1365 	if (ring->use_doorbell) {
1366 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1367 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1368 	} else {
1369 		WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1370 	}
1371 }
1372 
1373 /**
1374  * vcn_v2_0_dec_ring_insert_start - insert a start command
1375  *
1376  * @ring: amdgpu_ring pointer
1377  *
1378  * Write a start command to the ring.
1379  */
1380 void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1381 {
1382 	struct amdgpu_device *adev = ring->adev;
1383 
1384 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1385 	amdgpu_ring_write(ring, 0);
1386 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1387 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1388 }
1389 
1390 /**
1391  * vcn_v2_0_dec_ring_insert_end - insert a end command
1392  *
1393  * @ring: amdgpu_ring pointer
1394  *
1395  * Write a end command to the ring.
1396  */
1397 void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1398 {
1399 	struct amdgpu_device *adev = ring->adev;
1400 
1401 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1402 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
1403 }
1404 
1405 /**
1406  * vcn_v2_0_dec_ring_insert_nop - insert a nop command
1407  *
1408  * @ring: amdgpu_ring pointer
1409  * @count: the number of NOP packets to insert
1410  *
1411  * Write a nop command to the ring.
1412  */
1413 void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1414 {
1415 	struct amdgpu_device *adev = ring->adev;
1416 	int i;
1417 
1418 	WARN_ON(ring->wptr % 2 || count % 2);
1419 
1420 	for (i = 0; i < count / 2; i++) {
1421 		amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0));
1422 		amdgpu_ring_write(ring, 0);
1423 	}
1424 }
1425 
1426 /**
1427  * vcn_v2_0_dec_ring_emit_fence - emit an fence & trap command
1428  *
1429  * @ring: amdgpu_ring pointer
1430  * @addr: address
1431  * @seq: sequence number
1432  * @flags: fence related flags
1433  *
1434  * Write a fence and a trap command to the ring.
1435  */
1436 void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1437 				unsigned flags)
1438 {
1439 	struct amdgpu_device *adev = ring->adev;
1440 
1441 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1442 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0));
1443 	amdgpu_ring_write(ring, seq);
1444 
1445 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1446 	amdgpu_ring_write(ring, addr & 0xffffffff);
1447 
1448 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1449 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1450 
1451 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1452 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
1453 
1454 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1455 	amdgpu_ring_write(ring, 0);
1456 
1457 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1458 	amdgpu_ring_write(ring, 0);
1459 
1460 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1461 
1462 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
1463 }
1464 
1465 /**
1466  * vcn_v2_0_dec_ring_emit_ib - execute indirect buffer
1467  *
1468  * @ring: amdgpu_ring pointer
1469  * @job: job to retrieve vmid from
1470  * @ib: indirect buffer to execute
1471  * @flags: unused
1472  *
1473  * Write ring commands to execute the indirect buffer
1474  */
1475 void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1476 			       struct amdgpu_job *job,
1477 			       struct amdgpu_ib *ib,
1478 			       uint32_t flags)
1479 {
1480 	struct amdgpu_device *adev = ring->adev;
1481 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1482 
1483 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0));
1484 	amdgpu_ring_write(ring, vmid);
1485 
1486 	amdgpu_ring_write(ring,	PACKET0(adev->vcn.internal.ib_bar_low, 0));
1487 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1488 	amdgpu_ring_write(ring,	PACKET0(adev->vcn.internal.ib_bar_high, 0));
1489 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1490 	amdgpu_ring_write(ring,	PACKET0(adev->vcn.internal.ib_size, 0));
1491 	amdgpu_ring_write(ring, ib->length_dw);
1492 }
1493 
1494 void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1495 				uint32_t val, uint32_t mask)
1496 {
1497 	struct amdgpu_device *adev = ring->adev;
1498 
1499 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1500 	amdgpu_ring_write(ring, reg << 2);
1501 
1502 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1503 	amdgpu_ring_write(ring, val);
1504 
1505 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0));
1506 	amdgpu_ring_write(ring, mask);
1507 
1508 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1509 
1510 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
1511 }
1512 
1513 void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1514 				unsigned vmid, uint64_t pd_addr)
1515 {
1516 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1517 	uint32_t data0, data1, mask;
1518 
1519 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1520 
1521 	/* wait for register write */
1522 	data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1523 	data1 = lower_32_bits(pd_addr);
1524 	mask = 0xffffffff;
1525 	vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1526 }
1527 
1528 void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1529 				uint32_t reg, uint32_t val)
1530 {
1531 	struct amdgpu_device *adev = ring->adev;
1532 
1533 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1534 	amdgpu_ring_write(ring, reg << 2);
1535 
1536 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1537 	amdgpu_ring_write(ring, val);
1538 
1539 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1540 
1541 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
1542 }
1543 
1544 /**
1545  * vcn_v2_0_enc_ring_get_rptr - get enc read pointer
1546  *
1547  * @ring: amdgpu_ring pointer
1548  *
1549  * Returns the current hardware enc read pointer
1550  */
1551 static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1552 {
1553 	struct amdgpu_device *adev = ring->adev;
1554 
1555 	if (ring == &adev->vcn.inst->ring_enc[0])
1556 		return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1557 	else
1558 		return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1559 }
1560 
1561  /**
1562  * vcn_v2_0_enc_ring_get_wptr - get enc write pointer
1563  *
1564  * @ring: amdgpu_ring pointer
1565  *
1566  * Returns the current hardware enc write pointer
1567  */
1568 static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1569 {
1570 	struct amdgpu_device *adev = ring->adev;
1571 
1572 	if (ring == &adev->vcn.inst->ring_enc[0]) {
1573 		if (ring->use_doorbell)
1574 			return *ring->wptr_cpu_addr;
1575 		else
1576 			return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1577 	} else {
1578 		if (ring->use_doorbell)
1579 			return *ring->wptr_cpu_addr;
1580 		else
1581 			return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1582 	}
1583 }
1584 
1585  /**
1586  * vcn_v2_0_enc_ring_set_wptr - set enc write pointer
1587  *
1588  * @ring: amdgpu_ring pointer
1589  *
1590  * Commits the enc write pointer to the hardware
1591  */
1592 static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1593 {
1594 	struct amdgpu_device *adev = ring->adev;
1595 
1596 	if (ring == &adev->vcn.inst->ring_enc[0]) {
1597 		if (ring->use_doorbell) {
1598 			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1599 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1600 		} else {
1601 			WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1602 		}
1603 	} else {
1604 		if (ring->use_doorbell) {
1605 			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1606 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1607 		} else {
1608 			WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1609 		}
1610 	}
1611 }
1612 
1613 /**
1614  * vcn_v2_0_enc_ring_emit_fence - emit an enc fence & trap command
1615  *
1616  * @ring: amdgpu_ring pointer
1617  * @addr: address
1618  * @seq: sequence number
1619  * @flags: fence related flags
1620  *
1621  * Write enc a fence and a trap command to the ring.
1622  */
1623 void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1624 				u64 seq, unsigned flags)
1625 {
1626 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1627 
1628 	amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1629 	amdgpu_ring_write(ring, addr);
1630 	amdgpu_ring_write(ring, upper_32_bits(addr));
1631 	amdgpu_ring_write(ring, seq);
1632 	amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1633 }
1634 
1635 void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1636 {
1637 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1638 }
1639 
1640 /**
1641  * vcn_v2_0_enc_ring_emit_ib - enc execute indirect buffer
1642  *
1643  * @ring: amdgpu_ring pointer
1644  * @job: job to retrive vmid from
1645  * @ib: indirect buffer to execute
1646  * @flags: unused
1647  *
1648  * Write enc ring commands to execute the indirect buffer
1649  */
1650 void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1651 			       struct amdgpu_job *job,
1652 			       struct amdgpu_ib *ib,
1653 			       uint32_t flags)
1654 {
1655 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1656 
1657 	amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1658 	amdgpu_ring_write(ring, vmid);
1659 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1660 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1661 	amdgpu_ring_write(ring, ib->length_dw);
1662 }
1663 
1664 void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1665 				uint32_t val, uint32_t mask)
1666 {
1667 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1668 	amdgpu_ring_write(ring, reg << 2);
1669 	amdgpu_ring_write(ring, mask);
1670 	amdgpu_ring_write(ring, val);
1671 }
1672 
1673 void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1674 				unsigned int vmid, uint64_t pd_addr)
1675 {
1676 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1677 
1678 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1679 
1680 	/* wait for reg writes */
1681 	vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1682 					vmid * hub->ctx_addr_distance,
1683 					lower_32_bits(pd_addr), 0xffffffff);
1684 }
1685 
1686 void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
1687 {
1688 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1689 	amdgpu_ring_write(ring,	reg << 2);
1690 	amdgpu_ring_write(ring, val);
1691 }
1692 
1693 static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
1694 					struct amdgpu_irq_src *source,
1695 					unsigned type,
1696 					enum amdgpu_interrupt_state state)
1697 {
1698 	return 0;
1699 }
1700 
1701 static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
1702 				      struct amdgpu_irq_src *source,
1703 				      struct amdgpu_iv_entry *entry)
1704 {
1705 	DRM_DEBUG("IH: VCN TRAP\n");
1706 
1707 	switch (entry->src_id) {
1708 	case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1709 		amdgpu_fence_process(&adev->vcn.inst->ring_dec);
1710 		break;
1711 	case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1712 		amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
1713 		break;
1714 	case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1715 		amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
1716 		break;
1717 	default:
1718 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1719 			  entry->src_id, entry->src_data[0]);
1720 		break;
1721 	}
1722 
1723 	return 0;
1724 }
1725 
1726 int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
1727 {
1728 	struct amdgpu_device *adev = ring->adev;
1729 	uint32_t tmp = 0;
1730 	unsigned i;
1731 	int r;
1732 
1733 	if (amdgpu_sriov_vf(adev))
1734 		return 0;
1735 
1736 	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
1737 	r = amdgpu_ring_alloc(ring, 4);
1738 	if (r)
1739 		return r;
1740 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1741 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1742 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
1743 	amdgpu_ring_write(ring, 0xDEADBEEF);
1744 	amdgpu_ring_commit(ring);
1745 	for (i = 0; i < adev->usec_timeout; i++) {
1746 		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
1747 		if (tmp == 0xDEADBEEF)
1748 			break;
1749 		udelay(1);
1750 	}
1751 
1752 	if (i >= adev->usec_timeout)
1753 		r = -ETIMEDOUT;
1754 
1755 	return r;
1756 }
1757 
1758 
1759 static int vcn_v2_0_set_powergating_state(void *handle,
1760 					  enum amd_powergating_state state)
1761 {
1762 	/* This doesn't actually powergate the VCN block.
1763 	 * That's done in the dpm code via the SMC.  This
1764 	 * just re-inits the block as necessary.  The actual
1765 	 * gating still happens in the dpm code.  We should
1766 	 * revisit this when there is a cleaner line between
1767 	 * the smc and the hw blocks
1768 	 */
1769 	int ret;
1770 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1771 
1772 	if (amdgpu_sriov_vf(adev)) {
1773 		adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
1774 		return 0;
1775 	}
1776 
1777 	if (state == adev->vcn.cur_state)
1778 		return 0;
1779 
1780 	if (state == AMD_PG_STATE_GATE)
1781 		ret = vcn_v2_0_stop(adev);
1782 	else
1783 		ret = vcn_v2_0_start(adev);
1784 
1785 	if (!ret)
1786 		adev->vcn.cur_state = state;
1787 	return ret;
1788 }
1789 
1790 static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
1791 				struct amdgpu_mm_table *table)
1792 {
1793 	uint32_t data = 0, loop;
1794 	uint64_t addr = table->gpu_addr;
1795 	struct mmsch_v2_0_init_header *header;
1796 	uint32_t size;
1797 	int i;
1798 
1799 	header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
1800 	size = header->header_size + header->vcn_table_size;
1801 
1802 	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1803 	 * of memory descriptor location
1804 	 */
1805 	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1806 	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1807 
1808 	/* 2, update vmid of descriptor */
1809 	data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
1810 	data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1811 	/* use domain0 for MM scheduler */
1812 	data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1813 	WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
1814 
1815 	/* 3, notify mmsch about the size of this descriptor */
1816 	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
1817 
1818 	/* 4, set resp to zero */
1819 	WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1820 
1821 	adev->vcn.inst->ring_dec.wptr = 0;
1822 	adev->vcn.inst->ring_dec.wptr_old = 0;
1823 	vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
1824 
1825 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1826 		adev->vcn.inst->ring_enc[i].wptr = 0;
1827 		adev->vcn.inst->ring_enc[i].wptr_old = 0;
1828 		vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
1829 	}
1830 
1831 	/* 5, kick off the initialization and wait until
1832 	 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1833 	 */
1834 	WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1835 
1836 	data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1837 	loop = 1000;
1838 	while ((data & 0x10000002) != 0x10000002) {
1839 		udelay(10);
1840 		data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1841 		loop--;
1842 		if (!loop)
1843 			break;
1844 	}
1845 
1846 	if (!loop) {
1847 		DRM_ERROR("failed to init MMSCH, " \
1848 			"mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
1849 		return -EBUSY;
1850 	}
1851 
1852 	return 0;
1853 }
1854 
1855 static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
1856 {
1857 	int r;
1858 	uint32_t tmp;
1859 	struct amdgpu_ring *ring;
1860 	uint32_t offset, size;
1861 	uint32_t table_size = 0;
1862 	struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
1863 	struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
1864 	struct mmsch_v2_0_cmd_end end = { {0} };
1865 	struct mmsch_v2_0_init_header *header;
1866 	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1867 	uint8_t i = 0;
1868 
1869 	header = (struct mmsch_v2_0_init_header *)init_table;
1870 	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1871 	direct_rd_mod_wt.cmd_header.command_type =
1872 		MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1873 	end.cmd_header.command_type = MMSCH_COMMAND__END;
1874 
1875 	if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
1876 		header->version = MMSCH_VERSION;
1877 		header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
1878 
1879 		header->vcn_table_offset = header->header_size;
1880 
1881 		init_table += header->vcn_table_offset;
1882 
1883 		size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1884 
1885 		MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
1886 			SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
1887 			0xFFFFFFFF, 0x00000004);
1888 
1889 		/* mc resume*/
1890 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1891 			MMSCH_V2_0_INSERT_DIRECT_WT(
1892 				SOC15_REG_OFFSET(UVD, i,
1893 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1894 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo);
1895 			MMSCH_V2_0_INSERT_DIRECT_WT(
1896 				SOC15_REG_OFFSET(UVD, i,
1897 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1898 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi);
1899 			offset = 0;
1900 		} else {
1901 			MMSCH_V2_0_INSERT_DIRECT_WT(
1902 				SOC15_REG_OFFSET(UVD, i,
1903 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1904 				lower_32_bits(adev->vcn.inst->gpu_addr));
1905 			MMSCH_V2_0_INSERT_DIRECT_WT(
1906 				SOC15_REG_OFFSET(UVD, i,
1907 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1908 				upper_32_bits(adev->vcn.inst->gpu_addr));
1909 			offset = size;
1910 		}
1911 
1912 		MMSCH_V2_0_INSERT_DIRECT_WT(
1913 			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
1914 			0);
1915 		MMSCH_V2_0_INSERT_DIRECT_WT(
1916 			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
1917 			size);
1918 
1919 		MMSCH_V2_0_INSERT_DIRECT_WT(
1920 			SOC15_REG_OFFSET(UVD, i,
1921 				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1922 			lower_32_bits(adev->vcn.inst->gpu_addr + offset));
1923 		MMSCH_V2_0_INSERT_DIRECT_WT(
1924 			SOC15_REG_OFFSET(UVD, i,
1925 				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1926 			upper_32_bits(adev->vcn.inst->gpu_addr + offset));
1927 		MMSCH_V2_0_INSERT_DIRECT_WT(
1928 			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
1929 			0);
1930 		MMSCH_V2_0_INSERT_DIRECT_WT(
1931 			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
1932 			AMDGPU_VCN_STACK_SIZE);
1933 
1934 		MMSCH_V2_0_INSERT_DIRECT_WT(
1935 			SOC15_REG_OFFSET(UVD, i,
1936 				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1937 			lower_32_bits(adev->vcn.inst->gpu_addr + offset +
1938 				AMDGPU_VCN_STACK_SIZE));
1939 		MMSCH_V2_0_INSERT_DIRECT_WT(
1940 			SOC15_REG_OFFSET(UVD, i,
1941 				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1942 			upper_32_bits(adev->vcn.inst->gpu_addr + offset +
1943 				AMDGPU_VCN_STACK_SIZE));
1944 		MMSCH_V2_0_INSERT_DIRECT_WT(
1945 			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
1946 			0);
1947 		MMSCH_V2_0_INSERT_DIRECT_WT(
1948 			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
1949 			AMDGPU_VCN_CONTEXT_SIZE);
1950 
1951 		for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
1952 			ring = &adev->vcn.inst->ring_enc[r];
1953 			ring->wptr = 0;
1954 			MMSCH_V2_0_INSERT_DIRECT_WT(
1955 				SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
1956 				lower_32_bits(ring->gpu_addr));
1957 			MMSCH_V2_0_INSERT_DIRECT_WT(
1958 				SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
1959 				upper_32_bits(ring->gpu_addr));
1960 			MMSCH_V2_0_INSERT_DIRECT_WT(
1961 				SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
1962 				ring->ring_size / 4);
1963 		}
1964 
1965 		ring = &adev->vcn.inst->ring_dec;
1966 		ring->wptr = 0;
1967 		MMSCH_V2_0_INSERT_DIRECT_WT(
1968 			SOC15_REG_OFFSET(UVD, i,
1969 				mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1970 			lower_32_bits(ring->gpu_addr));
1971 		MMSCH_V2_0_INSERT_DIRECT_WT(
1972 			SOC15_REG_OFFSET(UVD, i,
1973 				mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1974 			upper_32_bits(ring->gpu_addr));
1975 		/* force RBC into idle state */
1976 		tmp = order_base_2(ring->ring_size);
1977 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1978 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1979 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1980 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1981 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1982 		MMSCH_V2_0_INSERT_DIRECT_WT(
1983 			SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
1984 
1985 		/* add end packet */
1986 		tmp = sizeof(struct mmsch_v2_0_cmd_end);
1987 		memcpy((void *)init_table, &end, tmp);
1988 		table_size += (tmp / 4);
1989 		header->vcn_table_size = table_size;
1990 
1991 	}
1992 	return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
1993 }
1994 
1995 static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
1996 	.name = "vcn_v2_0",
1997 	.early_init = vcn_v2_0_early_init,
1998 	.late_init = NULL,
1999 	.sw_init = vcn_v2_0_sw_init,
2000 	.sw_fini = vcn_v2_0_sw_fini,
2001 	.hw_init = vcn_v2_0_hw_init,
2002 	.hw_fini = vcn_v2_0_hw_fini,
2003 	.suspend = vcn_v2_0_suspend,
2004 	.resume = vcn_v2_0_resume,
2005 	.is_idle = vcn_v2_0_is_idle,
2006 	.wait_for_idle = vcn_v2_0_wait_for_idle,
2007 	.check_soft_reset = NULL,
2008 	.pre_soft_reset = NULL,
2009 	.soft_reset = NULL,
2010 	.post_soft_reset = NULL,
2011 	.set_clockgating_state = vcn_v2_0_set_clockgating_state,
2012 	.set_powergating_state = vcn_v2_0_set_powergating_state,
2013 };
2014 
2015 static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
2016 	.type = AMDGPU_RING_TYPE_VCN_DEC,
2017 	.align_mask = 0xf,
2018 	.secure_submission_supported = true,
2019 	.get_rptr = vcn_v2_0_dec_ring_get_rptr,
2020 	.get_wptr = vcn_v2_0_dec_ring_get_wptr,
2021 	.set_wptr = vcn_v2_0_dec_ring_set_wptr,
2022 	.emit_frame_size =
2023 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2024 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2025 		8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
2026 		14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
2027 		6,
2028 	.emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
2029 	.emit_ib = vcn_v2_0_dec_ring_emit_ib,
2030 	.emit_fence = vcn_v2_0_dec_ring_emit_fence,
2031 	.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
2032 	.test_ring = vcn_v2_0_dec_ring_test_ring,
2033 	.test_ib = amdgpu_vcn_dec_ring_test_ib,
2034 	.insert_nop = vcn_v2_0_dec_ring_insert_nop,
2035 	.insert_start = vcn_v2_0_dec_ring_insert_start,
2036 	.insert_end = vcn_v2_0_dec_ring_insert_end,
2037 	.pad_ib = amdgpu_ring_generic_pad_ib,
2038 	.begin_use = amdgpu_vcn_ring_begin_use,
2039 	.end_use = amdgpu_vcn_ring_end_use,
2040 	.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
2041 	.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
2042 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2043 };
2044 
2045 static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
2046 	.type = AMDGPU_RING_TYPE_VCN_ENC,
2047 	.align_mask = 0x3f,
2048 	.nop = VCN_ENC_CMD_NO_OP,
2049 	.get_rptr = vcn_v2_0_enc_ring_get_rptr,
2050 	.get_wptr = vcn_v2_0_enc_ring_get_wptr,
2051 	.set_wptr = vcn_v2_0_enc_ring_set_wptr,
2052 	.emit_frame_size =
2053 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2054 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2055 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2056 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2057 		1, /* vcn_v2_0_enc_ring_insert_end */
2058 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2059 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
2060 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
2061 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2062 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
2063 	.test_ib = amdgpu_vcn_enc_ring_test_ib,
2064 	.insert_nop = amdgpu_ring_insert_nop,
2065 	.insert_end = vcn_v2_0_enc_ring_insert_end,
2066 	.pad_ib = amdgpu_ring_generic_pad_ib,
2067 	.begin_use = amdgpu_vcn_ring_begin_use,
2068 	.end_use = amdgpu_vcn_ring_end_use,
2069 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2070 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2071 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2072 };
2073 
2074 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2075 {
2076 	adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
2077 	DRM_INFO("VCN decode is enabled in VM mode\n");
2078 }
2079 
2080 static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2081 {
2082 	int i;
2083 
2084 	for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2085 		adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
2086 
2087 	DRM_INFO("VCN encode is enabled in VM mode\n");
2088 }
2089 
2090 static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
2091 	.set = vcn_v2_0_set_interrupt_state,
2092 	.process = vcn_v2_0_process_interrupt,
2093 };
2094 
2095 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
2096 {
2097 	adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
2098 	adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
2099 }
2100 
2101 const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
2102 {
2103 		.type = AMD_IP_BLOCK_TYPE_VCN,
2104 		.major = 2,
2105 		.minor = 0,
2106 		.rev = 0,
2107 		.funcs = &vcn_v2_0_ip_funcs,
2108 };
2109