xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c (revision fd1e77d9)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_cs.h"
29 #include "soc15.h"
30 #include "soc15d.h"
31 #include "vcn_v2_0.h"
32 #include "mmsch_v3_0.h"
33 
34 #include "vcn/vcn_3_0_0_offset.h"
35 #include "vcn/vcn_3_0_0_sh_mask.h"
36 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
37 
38 #include <drm/drm_drv.h>
39 
40 #define VCN_VID_SOC_ADDRESS_2_0					0x1fa00
41 #define VCN1_VID_SOC_ADDRESS_3_0				0x48200
42 
43 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET			0x27
44 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET			0x0f
45 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET			0x10
46 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET			0x11
47 #define mmUVD_NO_OP_INTERNAL_OFFSET				0x29
48 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET			0x66
49 #define mmUVD_SCRATCH9_INTERNAL_OFFSET				0xc01d
50 
51 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET			0x431
52 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET		0x3b4
53 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET		0x3b5
54 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET			0x25c
55 
56 #define VCN_INSTANCES_SIENNA_CICHLID				2
57 #define DEC_SW_RING_ENABLED					FALSE
58 
59 #define RDECODE_MSG_CREATE					0x00000000
60 #define RDECODE_MESSAGE_CREATE					0x00000001
61 
62 static int amdgpu_ih_clientid_vcns[] = {
63 	SOC15_IH_CLIENTID_VCN,
64 	SOC15_IH_CLIENTID_VCN1
65 };
66 
67 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
68 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
69 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
70 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
71 static int vcn_v3_0_set_powergating_state(void *handle,
72 			enum amd_powergating_state state);
73 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
74 			int inst_idx, struct dpg_pause_state *new_state);
75 
76 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
77 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
78 
79 /**
80  * vcn_v3_0_early_init - set function pointers
81  *
82  * @handle: amdgpu_device pointer
83  *
84  * Set ring and irq function pointers
85  */
86 static int vcn_v3_0_early_init(void *handle)
87 {
88 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
89 
90 	if (amdgpu_sriov_vf(adev)) {
91 		adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
92 		adev->vcn.harvest_config = 0;
93 		adev->vcn.num_enc_rings = 1;
94 
95 	} else {
96 		if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
97 						 AMDGPU_VCN_HARVEST_VCN1))
98 			/* both instances are harvested, disable the block */
99 			return -ENOENT;
100 
101 		if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 33))
102 			adev->vcn.num_enc_rings = 0;
103 		else
104 			adev->vcn.num_enc_rings = 2;
105 	}
106 
107 	vcn_v3_0_set_dec_ring_funcs(adev);
108 	vcn_v3_0_set_enc_ring_funcs(adev);
109 	vcn_v3_0_set_irq_funcs(adev);
110 
111 	return 0;
112 }
113 
114 /**
115  * vcn_v3_0_sw_init - sw init for VCN block
116  *
117  * @handle: amdgpu_device pointer
118  *
119  * Load firmware and sw initialization
120  */
121 static int vcn_v3_0_sw_init(void *handle)
122 {
123 	struct amdgpu_ring *ring;
124 	int i, j, r;
125 	int vcn_doorbell_index = 0;
126 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
127 
128 	r = amdgpu_vcn_sw_init(adev);
129 	if (r)
130 		return r;
131 
132 	amdgpu_vcn_setup_ucode(adev);
133 
134 	r = amdgpu_vcn_resume(adev);
135 	if (r)
136 		return r;
137 
138 	/*
139 	 * Note: doorbell assignment is fixed for SRIOV multiple VCN engines
140 	 * Formula:
141 	 *   vcn_db_base  = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
142 	 *   dec_ring_i   = vcn_db_base + i * (adev->vcn.num_enc_rings + 1)
143 	 *   enc_ring_i,j = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) + 1 + j
144 	 */
145 	if (amdgpu_sriov_vf(adev)) {
146 		vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1;
147 		/* get DWORD offset */
148 		vcn_doorbell_index = vcn_doorbell_index << 1;
149 	}
150 
151 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
152 		volatile struct amdgpu_fw_shared *fw_shared;
153 
154 		if (adev->vcn.harvest_config & (1 << i))
155 			continue;
156 
157 		adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
158 		adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
159 		adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
160 		adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
161 		adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
162 		adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
163 
164 		adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
165 		adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9);
166 		adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
167 		adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0);
168 		adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
169 		adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1);
170 		adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
171 		adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD);
172 		adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
173 		adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP);
174 
175 		/* VCN DEC TRAP */
176 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
177 				VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[i].irq);
178 		if (r)
179 			return r;
180 
181 		atomic_set(&adev->vcn.inst[i].sched_score, 0);
182 
183 		ring = &adev->vcn.inst[i].ring_dec;
184 		ring->use_doorbell = true;
185 		if (amdgpu_sriov_vf(adev)) {
186 			ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1);
187 		} else {
188 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
189 		}
190 		sprintf(ring->name, "vcn_dec_%d", i);
191 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
192 				     AMDGPU_RING_PRIO_DEFAULT,
193 				     &adev->vcn.inst[i].sched_score);
194 		if (r)
195 			return r;
196 
197 		for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
198 			enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j);
199 
200 			/* VCN ENC TRAP */
201 			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
202 				j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
203 			if (r)
204 				return r;
205 
206 			ring = &adev->vcn.inst[i].ring_enc[j];
207 			ring->use_doorbell = true;
208 			if (amdgpu_sriov_vf(adev)) {
209 				ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1 + j;
210 			} else {
211 				ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
212 			}
213 			sprintf(ring->name, "vcn_enc_%d.%d", i, j);
214 			r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
215 					     hw_prio, &adev->vcn.inst[i].sched_score);
216 			if (r)
217 				return r;
218 		}
219 
220 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
221 		fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) |
222 					     cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
223 					     cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
224 		fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
225 		fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG;
226 		if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 2))
227 			fw_shared->smu_interface_info.smu_interface_type = 2;
228 		else if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 1))
229 			fw_shared->smu_interface_info.smu_interface_type = 1;
230 
231 		if (amdgpu_vcnfw_log)
232 			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
233 	}
234 
235 	if (amdgpu_sriov_vf(adev)) {
236 		r = amdgpu_virt_alloc_mm_table(adev);
237 		if (r)
238 			return r;
239 	}
240 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
241 		adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
242 
243 	return 0;
244 }
245 
246 /**
247  * vcn_v3_0_sw_fini - sw fini for VCN block
248  *
249  * @handle: amdgpu_device pointer
250  *
251  * VCN suspend and free up sw allocation
252  */
253 static int vcn_v3_0_sw_fini(void *handle)
254 {
255 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
256 	int i, r, idx;
257 
258 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
259 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
260 			volatile struct amdgpu_fw_shared *fw_shared;
261 
262 			if (adev->vcn.harvest_config & (1 << i))
263 				continue;
264 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
265 			fw_shared->present_flag_0 = 0;
266 			fw_shared->sw_ring.is_enabled = false;
267 		}
268 
269 		drm_dev_exit(idx);
270 	}
271 
272 	if (amdgpu_sriov_vf(adev))
273 		amdgpu_virt_free_mm_table(adev);
274 
275 	r = amdgpu_vcn_suspend(adev);
276 	if (r)
277 		return r;
278 
279 	r = amdgpu_vcn_sw_fini(adev);
280 
281 	return r;
282 }
283 
284 /**
285  * vcn_v3_0_hw_init - start and test VCN block
286  *
287  * @handle: amdgpu_device pointer
288  *
289  * Initialize the hardware, boot up the VCPU and do some testing
290  */
291 static int vcn_v3_0_hw_init(void *handle)
292 {
293 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
294 	struct amdgpu_ring *ring;
295 	int i, j, r;
296 
297 	if (amdgpu_sriov_vf(adev)) {
298 		r = vcn_v3_0_start_sriov(adev);
299 		if (r)
300 			goto done;
301 
302 		/* initialize VCN dec and enc ring buffers */
303 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
304 			if (adev->vcn.harvest_config & (1 << i))
305 				continue;
306 
307 			ring = &adev->vcn.inst[i].ring_dec;
308 			if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i)) {
309 				ring->sched.ready = false;
310 				ring->no_scheduler = true;
311 				dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
312 			} else {
313 				ring->wptr = 0;
314 				ring->wptr_old = 0;
315 				vcn_v3_0_dec_ring_set_wptr(ring);
316 				ring->sched.ready = true;
317 			}
318 
319 			for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
320 				ring = &adev->vcn.inst[i].ring_enc[j];
321 				if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
322 					ring->sched.ready = false;
323 					ring->no_scheduler = true;
324 					dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
325 				} else {
326 					ring->wptr = 0;
327 					ring->wptr_old = 0;
328 					vcn_v3_0_enc_ring_set_wptr(ring);
329 					ring->sched.ready = true;
330 				}
331 			}
332 		}
333 	} else {
334 		for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
335 			if (adev->vcn.harvest_config & (1 << i))
336 				continue;
337 
338 			ring = &adev->vcn.inst[i].ring_dec;
339 
340 			adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
341 						     ring->doorbell_index, i);
342 
343 			r = amdgpu_ring_test_helper(ring);
344 			if (r)
345 				goto done;
346 
347 			for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
348 				ring = &adev->vcn.inst[i].ring_enc[j];
349 				r = amdgpu_ring_test_helper(ring);
350 				if (r)
351 					goto done;
352 			}
353 		}
354 	}
355 
356 done:
357 	if (!r)
358 		DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
359 			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
360 
361 	return r;
362 }
363 
364 /**
365  * vcn_v3_0_hw_fini - stop the hardware block
366  *
367  * @handle: amdgpu_device pointer
368  *
369  * Stop the VCN block, mark ring as not ready any more
370  */
371 static int vcn_v3_0_hw_fini(void *handle)
372 {
373 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
374 	int i;
375 
376 	cancel_delayed_work_sync(&adev->vcn.idle_work);
377 
378 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
379 		if (adev->vcn.harvest_config & (1 << i))
380 			continue;
381 
382 		if (!amdgpu_sriov_vf(adev)) {
383 			if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
384 					(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
385 					 RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
386 				vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
387 			}
388 		}
389 	}
390 
391 	return 0;
392 }
393 
394 /**
395  * vcn_v3_0_suspend - suspend VCN block
396  *
397  * @handle: amdgpu_device pointer
398  *
399  * HW fini and suspend VCN block
400  */
401 static int vcn_v3_0_suspend(void *handle)
402 {
403 	int r;
404 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
405 
406 	r = vcn_v3_0_hw_fini(adev);
407 	if (r)
408 		return r;
409 
410 	r = amdgpu_vcn_suspend(adev);
411 
412 	return r;
413 }
414 
415 /**
416  * vcn_v3_0_resume - resume VCN block
417  *
418  * @handle: amdgpu_device pointer
419  *
420  * Resume firmware and hw init VCN block
421  */
422 static int vcn_v3_0_resume(void *handle)
423 {
424 	int r;
425 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
426 
427 	r = amdgpu_vcn_resume(adev);
428 	if (r)
429 		return r;
430 
431 	r = vcn_v3_0_hw_init(adev);
432 
433 	return r;
434 }
435 
436 /**
437  * vcn_v3_0_mc_resume - memory controller programming
438  *
439  * @adev: amdgpu_device pointer
440  * @inst: instance number
441  *
442  * Let the VCN memory controller know it's offsets
443  */
444 static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
445 {
446 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
447 	uint32_t offset;
448 
449 	/* cache window 0: fw */
450 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
451 		WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
452 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
453 		WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
454 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
455 		WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 0);
456 		offset = 0;
457 	} else {
458 		WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
459 			lower_32_bits(adev->vcn.inst[inst].gpu_addr));
460 		WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
461 			upper_32_bits(adev->vcn.inst[inst].gpu_addr));
462 		offset = size;
463 		WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0,
464 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
465 	}
466 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE0, size);
467 
468 	/* cache window 1: stack */
469 	WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
470 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
471 	WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
472 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
473 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET1, 0);
474 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
475 
476 	/* cache window 2: context */
477 	WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
478 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
479 	WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
480 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
481 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET2, 0);
482 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
483 
484 	/* non-cache window */
485 	WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
486 		lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
487 	WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
488 		upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
489 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
490 	WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_SIZE0,
491 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
492 }
493 
494 static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
495 {
496 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
497 	uint32_t offset;
498 
499 	/* cache window 0: fw */
500 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
501 		if (!indirect) {
502 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
503 				VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
504 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
505 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
506 				VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
507 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
508 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
509 				VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
510 		} else {
511 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
512 				VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
513 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
514 				VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
515 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
516 				VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
517 		}
518 		offset = 0;
519 	} else {
520 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
521 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
522 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
523 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
524 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
525 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
526 		offset = size;
527 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
528 			VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0),
529 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
530 	}
531 
532 	if (!indirect)
533 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
534 			VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
535 	else
536 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
537 			VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
538 
539 	/* cache window 1: stack */
540 	if (!indirect) {
541 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
542 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
543 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
544 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
545 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
546 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
547 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
548 			VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
549 	} else {
550 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
551 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
552 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
553 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
554 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
555 			VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
556 	}
557 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
558 			VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
559 
560 	/* cache window 2: context */
561 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
562 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
563 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
564 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
565 			VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
566 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
567 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
568 			VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
569 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
570 			VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
571 
572 	/* non-cache window */
573 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
574 			VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
575 			lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
576 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
577 			VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
578 			upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
579 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
580 			VCN, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
581 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
582 			VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
583 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
584 
585 	/* VCN global tiling registers */
586 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
587 		UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
588 }
589 
590 static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
591 {
592 	uint32_t data = 0;
593 
594 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
595 		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
596 			| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
597 			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
598 			| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
599 			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
600 			| 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
601 			| 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
602 			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
603 			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
604 			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
605 			| 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
606 			| 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
607 			| 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
608 			| 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
609 
610 		WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
611 		SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS,
612 			UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF);
613 	} else {
614 		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
615 			| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
616 			| 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
617 			| 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
618 			| 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
619 			| 1 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
620 			| 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
621 			| 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
622 			| 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
623 			| 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
624 			| 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
625 			| 1 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
626 			| 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
627 			| 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
628 		WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
629 		SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 0,  0x3F3FFFFF);
630 	}
631 
632 	data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
633 	data &= ~0x103;
634 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
635 		data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
636 			UVD_POWER_STATUS__UVD_PG_EN_MASK;
637 
638 	WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
639 }
640 
641 static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
642 {
643 	uint32_t data;
644 
645 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
646 		/* Before power off, this indicator has to be turned on */
647 		data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
648 		data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
649 		data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
650 		WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
651 
652 		data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
653 			| 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
654 			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
655 			| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
656 			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
657 			| 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
658 			| 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
659 			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
660 			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
661 			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
662 			| 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
663 			| 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
664 			| 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
665 			| 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
666 		WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
667 
668 		data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
669 			| 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
670 			| 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
671 			| 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
672 			| 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
673 			| 2 << UVD_PGFSM_STATUS__UVDIRL_PWR_STATUS__SHIFT
674 			| 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT
675 			| 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
676 			| 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
677 			| 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
678 			| 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT
679 			| 2 << UVD_PGFSM_STATUS__UVDATD_PWR_STATUS__SHIFT
680 			| 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT
681 			| 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT);
682 		SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, data, 0x3F3FFFFF);
683 	}
684 }
685 
686 /**
687  * vcn_v3_0_disable_clock_gating - disable VCN clock gating
688  *
689  * @adev: amdgpu_device pointer
690  * @inst: instance number
691  *
692  * Disable clock gating for VCN block
693  */
694 static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
695 {
696 	uint32_t data;
697 
698 	/* VCN disable CGC */
699 	data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
700 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
701 		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
702 	else
703 		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
704 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
705 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
706 	WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
707 
708 	data = RREG32_SOC15(VCN, inst, mmUVD_CGC_GATE);
709 	data &= ~(UVD_CGC_GATE__SYS_MASK
710 		| UVD_CGC_GATE__UDEC_MASK
711 		| UVD_CGC_GATE__MPEG2_MASK
712 		| UVD_CGC_GATE__REGS_MASK
713 		| UVD_CGC_GATE__RBC_MASK
714 		| UVD_CGC_GATE__LMI_MC_MASK
715 		| UVD_CGC_GATE__LMI_UMC_MASK
716 		| UVD_CGC_GATE__IDCT_MASK
717 		| UVD_CGC_GATE__MPRD_MASK
718 		| UVD_CGC_GATE__MPC_MASK
719 		| UVD_CGC_GATE__LBSI_MASK
720 		| UVD_CGC_GATE__LRBBM_MASK
721 		| UVD_CGC_GATE__UDEC_RE_MASK
722 		| UVD_CGC_GATE__UDEC_CM_MASK
723 		| UVD_CGC_GATE__UDEC_IT_MASK
724 		| UVD_CGC_GATE__UDEC_DB_MASK
725 		| UVD_CGC_GATE__UDEC_MP_MASK
726 		| UVD_CGC_GATE__WCB_MASK
727 		| UVD_CGC_GATE__VCPU_MASK
728 		| UVD_CGC_GATE__MMSCH_MASK);
729 
730 	WREG32_SOC15(VCN, inst, mmUVD_CGC_GATE, data);
731 
732 	SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_CGC_GATE, 0,  0xFFFFFFFF);
733 
734 	data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
735 	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
736 		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
737 		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
738 		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
739 		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
740 		| UVD_CGC_CTRL__SYS_MODE_MASK
741 		| UVD_CGC_CTRL__UDEC_MODE_MASK
742 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
743 		| UVD_CGC_CTRL__REGS_MODE_MASK
744 		| UVD_CGC_CTRL__RBC_MODE_MASK
745 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
746 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
747 		| UVD_CGC_CTRL__IDCT_MODE_MASK
748 		| UVD_CGC_CTRL__MPRD_MODE_MASK
749 		| UVD_CGC_CTRL__MPC_MODE_MASK
750 		| UVD_CGC_CTRL__LBSI_MODE_MASK
751 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
752 		| UVD_CGC_CTRL__WCB_MODE_MASK
753 		| UVD_CGC_CTRL__VCPU_MODE_MASK
754 		| UVD_CGC_CTRL__MMSCH_MODE_MASK);
755 	WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
756 
757 	data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE);
758 	data |= (UVD_SUVD_CGC_GATE__SRE_MASK
759 		| UVD_SUVD_CGC_GATE__SIT_MASK
760 		| UVD_SUVD_CGC_GATE__SMP_MASK
761 		| UVD_SUVD_CGC_GATE__SCM_MASK
762 		| UVD_SUVD_CGC_GATE__SDB_MASK
763 		| UVD_SUVD_CGC_GATE__SRE_H264_MASK
764 		| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
765 		| UVD_SUVD_CGC_GATE__SIT_H264_MASK
766 		| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
767 		| UVD_SUVD_CGC_GATE__SCM_H264_MASK
768 		| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
769 		| UVD_SUVD_CGC_GATE__SDB_H264_MASK
770 		| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
771 		| UVD_SUVD_CGC_GATE__SCLR_MASK
772 		| UVD_SUVD_CGC_GATE__ENT_MASK
773 		| UVD_SUVD_CGC_GATE__IME_MASK
774 		| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
775 		| UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
776 		| UVD_SUVD_CGC_GATE__SITE_MASK
777 		| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
778 		| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
779 		| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
780 		| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
781 		| UVD_SUVD_CGC_GATE__IME_HEVC_MASK
782 		| UVD_SUVD_CGC_GATE__EFC_MASK
783 		| UVD_SUVD_CGC_GATE__SAOE_MASK
784 		| UVD_SUVD_CGC_GATE__SRE_AV1_MASK
785 		| UVD_SUVD_CGC_GATE__FBC_PCLK_MASK
786 		| UVD_SUVD_CGC_GATE__FBC_CCLK_MASK
787 		| UVD_SUVD_CGC_GATE__SCM_AV1_MASK
788 		| UVD_SUVD_CGC_GATE__SMPA_MASK);
789 	WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data);
790 
791 	data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2);
792 	data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK
793 		| UVD_SUVD_CGC_GATE2__MPBE1_MASK
794 		| UVD_SUVD_CGC_GATE2__SIT_AV1_MASK
795 		| UVD_SUVD_CGC_GATE2__SDB_AV1_MASK
796 		| UVD_SUVD_CGC_GATE2__MPC1_MASK);
797 	WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data);
798 
799 	data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
800 	data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
801 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
802 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
803 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
804 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
805 		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
806 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
807 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
808 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
809 		| UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
810 		| UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
811 		| UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
812 		| UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
813 		| UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
814 		| UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
815 		| UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
816 		| UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
817 		| UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
818 		| UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
819 	WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
820 }
821 
822 static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
823 		uint8_t sram_sel, int inst_idx, uint8_t indirect)
824 {
825 	uint32_t reg_data = 0;
826 
827 	/* enable sw clock gating control */
828 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
829 		reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
830 	else
831 		reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
832 	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
833 	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
834 	reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
835 		 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
836 		 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
837 		 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
838 		 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
839 		 UVD_CGC_CTRL__SYS_MODE_MASK |
840 		 UVD_CGC_CTRL__UDEC_MODE_MASK |
841 		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
842 		 UVD_CGC_CTRL__REGS_MODE_MASK |
843 		 UVD_CGC_CTRL__RBC_MODE_MASK |
844 		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
845 		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
846 		 UVD_CGC_CTRL__IDCT_MODE_MASK |
847 		 UVD_CGC_CTRL__MPRD_MODE_MASK |
848 		 UVD_CGC_CTRL__MPC_MODE_MASK |
849 		 UVD_CGC_CTRL__LBSI_MODE_MASK |
850 		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
851 		 UVD_CGC_CTRL__WCB_MODE_MASK |
852 		 UVD_CGC_CTRL__VCPU_MODE_MASK |
853 		 UVD_CGC_CTRL__MMSCH_MODE_MASK);
854 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
855 		VCN, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
856 
857 	/* turn off clock gating */
858 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
859 		VCN, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect);
860 
861 	/* turn on SUVD clock gating */
862 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
863 		VCN, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
864 
865 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
866 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
867 		VCN, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
868 }
869 
870 /**
871  * vcn_v3_0_enable_clock_gating - enable VCN clock gating
872  *
873  * @adev: amdgpu_device pointer
874  * @inst: instance number
875  *
876  * Enable clock gating for VCN block
877  */
878 static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
879 {
880 	uint32_t data;
881 
882 	/* enable VCN CGC */
883 	data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
884 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
885 		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
886 	else
887 		data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
888 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
889 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
890 	WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
891 
892 	data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
893 	data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
894 		| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
895 		| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
896 		| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
897 		| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
898 		| UVD_CGC_CTRL__SYS_MODE_MASK
899 		| UVD_CGC_CTRL__UDEC_MODE_MASK
900 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
901 		| UVD_CGC_CTRL__REGS_MODE_MASK
902 		| UVD_CGC_CTRL__RBC_MODE_MASK
903 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
904 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
905 		| UVD_CGC_CTRL__IDCT_MODE_MASK
906 		| UVD_CGC_CTRL__MPRD_MODE_MASK
907 		| UVD_CGC_CTRL__MPC_MODE_MASK
908 		| UVD_CGC_CTRL__LBSI_MODE_MASK
909 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
910 		| UVD_CGC_CTRL__WCB_MODE_MASK
911 		| UVD_CGC_CTRL__VCPU_MODE_MASK
912 		| UVD_CGC_CTRL__MMSCH_MODE_MASK);
913 	WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
914 
915 	data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
916 	data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
917 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
918 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
919 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
920 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
921 		| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
922 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
923 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
924 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
925 		| UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
926 		| UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
927 		| UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
928 		| UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
929 		| UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
930 		| UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
931 		| UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
932 		| UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
933 		| UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
934 		| UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
935 	WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
936 }
937 
938 static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
939 {
940 	volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
941 	struct amdgpu_ring *ring;
942 	uint32_t rb_bufsz, tmp;
943 
944 	/* disable register anti-hang mechanism */
945 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
946 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
947 	/* enable dynamic power gating mode */
948 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
949 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
950 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
951 	WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
952 
953 	if (indirect)
954 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
955 
956 	/* enable clock gating */
957 	vcn_v3_0_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
958 
959 	/* enable VCPU clock */
960 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
961 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
962 	tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
963 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
964 		VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
965 
966 	/* disable master interupt */
967 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
968 		VCN, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect);
969 
970 	/* setup mmUVD_LMI_CTRL */
971 	tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
972 		UVD_LMI_CTRL__REQ_MODE_MASK |
973 		UVD_LMI_CTRL__CRC_RESET_MASK |
974 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
975 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
976 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
977 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
978 		0x00100000L);
979 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
980 		VCN, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect);
981 
982 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
983 		VCN, inst_idx, mmUVD_MPC_CNTL),
984 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
985 
986 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
987 		VCN, inst_idx, mmUVD_MPC_SET_MUXA0),
988 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
989 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
990 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
991 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
992 
993 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
994 		VCN, inst_idx, mmUVD_MPC_SET_MUXB0),
995 		 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
996 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
997 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
998 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
999 
1000 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1001 		VCN, inst_idx, mmUVD_MPC_SET_MUX),
1002 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1003 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1004 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
1005 
1006 	vcn_v3_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
1007 
1008 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1009 		VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
1010 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1011 		VCN, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
1012 
1013 	/* enable LMI MC and UMC channels */
1014 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1015 		VCN, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect);
1016 
1017 	/* unblock VCPU register access */
1018 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1019 		VCN, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
1020 
1021 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1022 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1023 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1024 		VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1025 
1026 	/* enable master interrupt */
1027 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1028 		VCN, inst_idx, mmUVD_MASTINT_EN),
1029 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
1030 
1031 	/* add nop to workaround PSP size check */
1032 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1033 		VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1034 
1035 	if (indirect)
1036 		psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1037 			(uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1038 				(uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
1039 
1040 	ring = &adev->vcn.inst[inst_idx].ring_dec;
1041 	/* force RBC into idle state */
1042 	rb_bufsz = order_base_2(ring->ring_size);
1043 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1044 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1045 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1046 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1047 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1048 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
1049 
1050 	/* Stall DPG before WPTR/RPTR reset */
1051 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1052 		UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1053 		~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1054 	fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1055 
1056 	/* set the write pointer delay */
1057 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
1058 
1059 	/* set the wb address */
1060 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
1061 		(upper_32_bits(ring->gpu_addr) >> 2));
1062 
1063 	/* programm the RB_BASE for ring buffer */
1064 	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1065 		lower_32_bits(ring->gpu_addr));
1066 	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1067 		upper_32_bits(ring->gpu_addr));
1068 
1069 	/* Initialize the ring buffer's read and write pointers */
1070 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
1071 
1072 	WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
1073 
1074 	ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
1075 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
1076 		lower_32_bits(ring->wptr));
1077 
1078 	/* Reset FW shared memory RBC WPTR/RPTR */
1079 	fw_shared->rb.rptr = 0;
1080 	fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1081 
1082 	/*resetting done, fw can check RB ring */
1083 	fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1084 
1085 	/* Unstall DPG */
1086 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1087 		0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1088 
1089 	return 0;
1090 }
1091 
1092 static int vcn_v3_0_start(struct amdgpu_device *adev)
1093 {
1094 	volatile struct amdgpu_fw_shared *fw_shared;
1095 	struct amdgpu_ring *ring;
1096 	uint32_t rb_bufsz, tmp;
1097 	int i, j, k, r;
1098 
1099 	if (adev->pm.dpm_enabled)
1100 		amdgpu_dpm_enable_uvd(adev, true);
1101 
1102 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1103 		if (adev->vcn.harvest_config & (1 << i))
1104 			continue;
1105 
1106 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG){
1107 			r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
1108 			continue;
1109 		}
1110 
1111 		/* disable VCN power gating */
1112 		vcn_v3_0_disable_static_power_gating(adev, i);
1113 
1114 		/* set VCN status busy */
1115 		tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1116 		WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
1117 
1118 		/*SW clock gating */
1119 		vcn_v3_0_disable_clock_gating(adev, i);
1120 
1121 		/* enable VCPU clock */
1122 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1123 			UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1124 
1125 		/* disable master interrupt */
1126 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1127 			~UVD_MASTINT_EN__VCPU_EN_MASK);
1128 
1129 		/* enable LMI MC and UMC channels */
1130 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1131 			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1132 
1133 		tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1134 		tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1135 		tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1136 		WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1137 
1138 		/* setup mmUVD_LMI_CTRL */
1139 		tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1140 		WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
1141 			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK	|
1142 			UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1143 			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1144 			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1145 
1146 		/* setup mmUVD_MPC_CNTL */
1147 		tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1148 		tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1149 		tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1150 		WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1151 
1152 		/* setup UVD_MPC_SET_MUXA0 */
1153 		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1154 			((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1155 			(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1156 			(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1157 			(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1158 
1159 		/* setup UVD_MPC_SET_MUXB0 */
1160 		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1161 			((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1162 			(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1163 			(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1164 			(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1165 
1166 		/* setup mmUVD_MPC_SET_MUX */
1167 		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1168 			((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1169 			(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1170 			(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1171 
1172 		vcn_v3_0_mc_resume(adev, i);
1173 
1174 		/* VCN global tiling registers */
1175 		WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
1176 			adev->gfx.config.gb_addr_config);
1177 
1178 		/* unblock VCPU register access */
1179 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1180 			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1181 
1182 		/* release VCPU reset to boot */
1183 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1184 			~UVD_VCPU_CNTL__BLK_RST_MASK);
1185 
1186 		for (j = 0; j < 10; ++j) {
1187 			uint32_t status;
1188 
1189 			for (k = 0; k < 100; ++k) {
1190 				status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1191 				if (status & 2)
1192 					break;
1193 				mdelay(10);
1194 			}
1195 			r = 0;
1196 			if (status & 2)
1197 				break;
1198 
1199 			DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
1200 			WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1201 				UVD_VCPU_CNTL__BLK_RST_MASK,
1202 				~UVD_VCPU_CNTL__BLK_RST_MASK);
1203 			mdelay(10);
1204 			WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1205 				~UVD_VCPU_CNTL__BLK_RST_MASK);
1206 
1207 			mdelay(10);
1208 			r = -1;
1209 		}
1210 
1211 		if (r) {
1212 			DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
1213 			return r;
1214 		}
1215 
1216 		/* enable master interrupt */
1217 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1218 			UVD_MASTINT_EN__VCPU_EN_MASK,
1219 			~UVD_MASTINT_EN__VCPU_EN_MASK);
1220 
1221 		/* clear the busy bit of VCN_STATUS */
1222 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1223 			~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1224 
1225 		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1226 
1227 		ring = &adev->vcn.inst[i].ring_dec;
1228 		/* force RBC into idle state */
1229 		rb_bufsz = order_base_2(ring->ring_size);
1230 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1231 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1232 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1233 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1234 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1235 		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1236 
1237 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1238 		fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1239 
1240 		/* programm the RB_BASE for ring buffer */
1241 		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1242 			lower_32_bits(ring->gpu_addr));
1243 		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1244 			upper_32_bits(ring->gpu_addr));
1245 
1246 		/* Initialize the ring buffer's read and write pointers */
1247 		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1248 
1249 		WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
1250 		ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1251 		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1252 			lower_32_bits(ring->wptr));
1253 		fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1254 		fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1255 
1256 		if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) {
1257 			fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1258 			ring = &adev->vcn.inst[i].ring_enc[0];
1259 			WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1260 			WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1261 			WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1262 			WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1263 			WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1264 			fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1265 
1266 			fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1267 			ring = &adev->vcn.inst[i].ring_enc[1];
1268 			WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1269 			WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1270 			WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1271 			WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1272 			WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1273 			fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1274 		}
1275 	}
1276 
1277 	return 0;
1278 }
1279 
1280 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
1281 {
1282 	int i, j;
1283 	struct amdgpu_ring *ring;
1284 	uint64_t cache_addr;
1285 	uint64_t rb_addr;
1286 	uint64_t ctx_addr;
1287 	uint32_t param, resp, expected;
1288 	uint32_t offset, cache_size;
1289 	uint32_t tmp, timeout;
1290 
1291 	struct amdgpu_mm_table *table = &adev->virt.mm_table;
1292 	uint32_t *table_loc;
1293 	uint32_t table_size;
1294 	uint32_t size, size_dw;
1295 
1296 	struct mmsch_v3_0_cmd_direct_write
1297 		direct_wt = { {0} };
1298 	struct mmsch_v3_0_cmd_direct_read_modify_write
1299 		direct_rd_mod_wt = { {0} };
1300 	struct mmsch_v3_0_cmd_end end = { {0} };
1301 	struct mmsch_v3_0_init_header header;
1302 
1303 	direct_wt.cmd_header.command_type =
1304 		MMSCH_COMMAND__DIRECT_REG_WRITE;
1305 	direct_rd_mod_wt.cmd_header.command_type =
1306 		MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1307 	end.cmd_header.command_type =
1308 		MMSCH_COMMAND__END;
1309 
1310 	header.version = MMSCH_VERSION;
1311 	header.total_size = sizeof(struct mmsch_v3_0_init_header) >> 2;
1312 	for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) {
1313 		header.inst[i].init_status = 0;
1314 		header.inst[i].table_offset = 0;
1315 		header.inst[i].table_size = 0;
1316 	}
1317 
1318 	table_loc = (uint32_t *)table->cpu_addr;
1319 	table_loc += header.total_size;
1320 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1321 		if (adev->vcn.harvest_config & (1 << i))
1322 			continue;
1323 
1324 		table_size = 0;
1325 
1326 		MMSCH_V3_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
1327 			mmUVD_STATUS),
1328 			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1329 
1330 		cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1331 
1332 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1333 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1334 				mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1335 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1336 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1337 				mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1338 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1339 			offset = 0;
1340 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1341 				mmUVD_VCPU_CACHE_OFFSET0),
1342 				0);
1343 		} else {
1344 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1345 				mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1346 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
1347 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1348 				mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1349 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
1350 			offset = cache_size;
1351 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1352 				mmUVD_VCPU_CACHE_OFFSET0),
1353 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1354 		}
1355 
1356 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1357 			mmUVD_VCPU_CACHE_SIZE0),
1358 			cache_size);
1359 
1360 		cache_addr = adev->vcn.inst[i].gpu_addr + offset;
1361 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1362 			mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1363 			lower_32_bits(cache_addr));
1364 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1365 			mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1366 			upper_32_bits(cache_addr));
1367 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1368 			mmUVD_VCPU_CACHE_OFFSET1),
1369 			0);
1370 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1371 			mmUVD_VCPU_CACHE_SIZE1),
1372 			AMDGPU_VCN_STACK_SIZE);
1373 
1374 		cache_addr = adev->vcn.inst[i].gpu_addr + offset +
1375 			AMDGPU_VCN_STACK_SIZE;
1376 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1377 			mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1378 			lower_32_bits(cache_addr));
1379 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1380 			mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1381 			upper_32_bits(cache_addr));
1382 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1383 			mmUVD_VCPU_CACHE_OFFSET2),
1384 			0);
1385 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1386 			mmUVD_VCPU_CACHE_SIZE2),
1387 			AMDGPU_VCN_CONTEXT_SIZE);
1388 
1389 		for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
1390 			ring = &adev->vcn.inst[i].ring_enc[j];
1391 			ring->wptr = 0;
1392 			rb_addr = ring->gpu_addr;
1393 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1394 				mmUVD_RB_BASE_LO),
1395 				lower_32_bits(rb_addr));
1396 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1397 				mmUVD_RB_BASE_HI),
1398 				upper_32_bits(rb_addr));
1399 			MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1400 				mmUVD_RB_SIZE),
1401 				ring->ring_size / 4);
1402 		}
1403 
1404 		ring = &adev->vcn.inst[i].ring_dec;
1405 		ring->wptr = 0;
1406 		rb_addr = ring->gpu_addr;
1407 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1408 			mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1409 			lower_32_bits(rb_addr));
1410 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1411 			mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1412 			upper_32_bits(rb_addr));
1413 		/* force RBC into idle state */
1414 		tmp = order_base_2(ring->ring_size);
1415 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1416 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1417 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1418 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1419 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1420 		MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1421 			mmUVD_RBC_RB_CNTL),
1422 			tmp);
1423 
1424 		/* add end packet */
1425 		MMSCH_V3_0_INSERT_END();
1426 
1427 		/* refine header */
1428 		header.inst[i].init_status = 0;
1429 		header.inst[i].table_offset = header.total_size;
1430 		header.inst[i].table_size = table_size;
1431 		header.total_size += table_size;
1432 	}
1433 
1434 	/* Update init table header in memory */
1435 	size = sizeof(struct mmsch_v3_0_init_header);
1436 	table_loc = (uint32_t *)table->cpu_addr;
1437 	memcpy((void *)table_loc, &header, size);
1438 
1439 	/* message MMSCH (in VCN[0]) to initialize this client
1440 	 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1441 	 * of memory descriptor location
1442 	 */
1443 	ctx_addr = table->gpu_addr;
1444 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1445 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1446 
1447 	/* 2, update vmid of descriptor */
1448 	tmp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1449 	tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1450 	/* use domain0 for MM scheduler */
1451 	tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1452 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, tmp);
1453 
1454 	/* 3, notify mmsch about the size of this descriptor */
1455 	size = header.total_size;
1456 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1457 
1458 	/* 4, set resp to zero */
1459 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1460 
1461 	/* 5, kick off the initialization and wait until
1462 	 * MMSCH_VF_MAILBOX_RESP becomes non-zero
1463 	 */
1464 	param = 0x10000001;
1465 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, param);
1466 	tmp = 0;
1467 	timeout = 1000;
1468 	resp = 0;
1469 	expected = param + 1;
1470 	while (resp != expected) {
1471 		resp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1472 		if (resp == expected)
1473 			break;
1474 
1475 		udelay(10);
1476 		tmp = tmp + 10;
1477 		if (tmp >= timeout) {
1478 			DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1479 				" waiting for mmMMSCH_VF_MAILBOX_RESP "\
1480 				"(expected=0x%08x, readback=0x%08x)\n",
1481 				tmp, expected, resp);
1482 			return -EBUSY;
1483 		}
1484 	}
1485 
1486 	return 0;
1487 }
1488 
1489 static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1490 {
1491 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
1492 	uint32_t tmp;
1493 
1494 	vcn_v3_0_pause_dpg_mode(adev, inst_idx, &state);
1495 
1496 	/* Wait for power status to be 1 */
1497 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1498 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1499 
1500 	/* wait for read ptr to be equal to write ptr */
1501 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1502 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1503 
1504 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1505 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1506 
1507 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1508 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1509 
1510 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1511 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1512 
1513 	/* disable dynamic power gating mode */
1514 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1515 		~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1516 
1517 	return 0;
1518 }
1519 
1520 static int vcn_v3_0_stop(struct amdgpu_device *adev)
1521 {
1522 	uint32_t tmp;
1523 	int i, r = 0;
1524 
1525 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1526 		if (adev->vcn.harvest_config & (1 << i))
1527 			continue;
1528 
1529 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1530 			r = vcn_v3_0_stop_dpg_mode(adev, i);
1531 			continue;
1532 		}
1533 
1534 		/* wait for vcn idle */
1535 		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1536 		if (r)
1537 			return r;
1538 
1539 		tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1540 			UVD_LMI_STATUS__READ_CLEAN_MASK |
1541 			UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1542 			UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1543 		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1544 		if (r)
1545 			return r;
1546 
1547 		/* disable LMI UMC channel */
1548 		tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1549 		tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1550 		WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1551 		tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1552 			UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1553 		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1554 		if (r)
1555 			return r;
1556 
1557 		/* block VCPU register access */
1558 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1559 			UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1560 			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1561 
1562 		/* reset VCPU */
1563 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1564 			UVD_VCPU_CNTL__BLK_RST_MASK,
1565 			~UVD_VCPU_CNTL__BLK_RST_MASK);
1566 
1567 		/* disable VCPU clock */
1568 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1569 			~(UVD_VCPU_CNTL__CLK_EN_MASK));
1570 
1571 		/* apply soft reset */
1572 		tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1573 		tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1574 		WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1575 		tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1576 		tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1577 		WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1578 
1579 		/* clear status */
1580 		WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1581 
1582 		/* apply HW clock gating */
1583 		vcn_v3_0_enable_clock_gating(adev, i);
1584 
1585 		/* enable VCN power gating */
1586 		vcn_v3_0_enable_static_power_gating(adev, i);
1587 	}
1588 
1589 	if (adev->pm.dpm_enabled)
1590 		amdgpu_dpm_enable_uvd(adev, false);
1591 
1592 	return 0;
1593 }
1594 
1595 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
1596 		   int inst_idx, struct dpg_pause_state *new_state)
1597 {
1598 	volatile struct amdgpu_fw_shared *fw_shared;
1599 	struct amdgpu_ring *ring;
1600 	uint32_t reg_data = 0;
1601 	int ret_code;
1602 
1603 	/* pause/unpause if state is changed */
1604 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1605 		DRM_DEBUG("dpg pause state changed %d -> %d",
1606 			adev->vcn.inst[inst_idx].pause_state.fw_based,	new_state->fw_based);
1607 		reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1608 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1609 
1610 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1611 			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1612 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1613 
1614 			if (!ret_code) {
1615 				/* pause DPG */
1616 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1617 				WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1618 
1619 				/* wait for ACK */
1620 				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1621 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1622 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1623 
1624 				/* Stall DPG before WPTR/RPTR reset */
1625 				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1626 					UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1627 					~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1628 
1629 				if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) {
1630 					/* Restore */
1631 					fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1632 					fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1633 					ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1634 					ring->wptr = 0;
1635 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1636 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1637 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1638 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1639 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1640 					fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1641 
1642 					fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1643 					ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1644 					ring->wptr = 0;
1645 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1646 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1647 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1648 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1649 					WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1650 					fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1651 
1652 					/* restore wptr/rptr with pointers saved in FW shared memory*/
1653 					WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
1654 					WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
1655 				}
1656 
1657 				/* Unstall DPG */
1658 				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1659 					0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1660 
1661 				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1662 					UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1663 			}
1664 		} else {
1665 			/* unpause dpg, no need to wait */
1666 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1667 			WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1668 		}
1669 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1670 	}
1671 
1672 	return 0;
1673 }
1674 
1675 /**
1676  * vcn_v3_0_dec_ring_get_rptr - get read pointer
1677  *
1678  * @ring: amdgpu_ring pointer
1679  *
1680  * Returns the current hardware read pointer
1681  */
1682 static uint64_t vcn_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1683 {
1684 	struct amdgpu_device *adev = ring->adev;
1685 
1686 	return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1687 }
1688 
1689 /**
1690  * vcn_v3_0_dec_ring_get_wptr - get write pointer
1691  *
1692  * @ring: amdgpu_ring pointer
1693  *
1694  * Returns the current hardware write pointer
1695  */
1696 static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1697 {
1698 	struct amdgpu_device *adev = ring->adev;
1699 
1700 	if (ring->use_doorbell)
1701 		return *ring->wptr_cpu_addr;
1702 	else
1703 		return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1704 }
1705 
1706 /**
1707  * vcn_v3_0_dec_ring_set_wptr - set write pointer
1708  *
1709  * @ring: amdgpu_ring pointer
1710  *
1711  * Commits the write pointer to the hardware
1712  */
1713 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1714 {
1715 	struct amdgpu_device *adev = ring->adev;
1716 	volatile struct amdgpu_fw_shared *fw_shared;
1717 
1718 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1719 		/*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
1720 		fw_shared = adev->vcn.inst[ring->me].fw_shared.cpu_addr;
1721 		fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1722 		WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
1723 			lower_32_bits(ring->wptr));
1724 	}
1725 
1726 	if (ring->use_doorbell) {
1727 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1728 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1729 	} else {
1730 		WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1731 	}
1732 }
1733 
1734 void vcn_v3_0_dec_sw_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1735       u64 seq, uint32_t flags)
1736 {
1737 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1738 
1739 	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_FENCE);
1740 	amdgpu_ring_write(ring, addr);
1741 	amdgpu_ring_write(ring, upper_32_bits(addr));
1742 	amdgpu_ring_write(ring, seq);
1743 	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_TRAP);
1744 }
1745 
1746 void vcn_v3_0_dec_sw_ring_insert_end(struct amdgpu_ring *ring)
1747 {
1748 	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
1749 }
1750 
1751 void vcn_v3_0_dec_sw_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
1752         struct amdgpu_ib *ib, uint32_t flags)
1753 {
1754 	uint32_t vmid = AMDGPU_JOB_GET_VMID(job);
1755 
1756 	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_IB);
1757 	amdgpu_ring_write(ring, vmid);
1758 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1759 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1760 	amdgpu_ring_write(ring, ib->length_dw);
1761 }
1762 
1763 void vcn_v3_0_dec_sw_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1764         uint32_t val, uint32_t mask)
1765 {
1766 	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WAIT);
1767 	amdgpu_ring_write(ring, reg << 2);
1768 	amdgpu_ring_write(ring, mask);
1769 	amdgpu_ring_write(ring, val);
1770 }
1771 
1772 void vcn_v3_0_dec_sw_ring_emit_vm_flush(struct amdgpu_ring *ring,
1773         uint32_t vmid, uint64_t pd_addr)
1774 {
1775 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1776 	uint32_t data0, data1, mask;
1777 
1778 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1779 
1780 	/* wait for register write */
1781 	data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1782 	data1 = lower_32_bits(pd_addr);
1783 	mask = 0xffffffff;
1784 	vcn_v3_0_dec_sw_ring_emit_reg_wait(ring, data0, data1, mask);
1785 }
1786 
1787 void vcn_v3_0_dec_sw_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
1788       uint32_t val)
1789 {
1790 	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WRITE);
1791 	amdgpu_ring_write(ring,	reg << 2);
1792 	amdgpu_ring_write(ring, val);
1793 }
1794 
1795 static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
1796 	.type = AMDGPU_RING_TYPE_VCN_DEC,
1797 	.align_mask = 0x3f,
1798 	.nop = VCN_DEC_SW_CMD_NO_OP,
1799 	.secure_submission_supported = true,
1800 	.vmhub = AMDGPU_MMHUB_0,
1801 	.get_rptr = vcn_v3_0_dec_ring_get_rptr,
1802 	.get_wptr = vcn_v3_0_dec_ring_get_wptr,
1803 	.set_wptr = vcn_v3_0_dec_ring_set_wptr,
1804 	.emit_frame_size =
1805 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1806 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1807 		4 + /* vcn_v3_0_dec_sw_ring_emit_vm_flush */
1808 		5 + 5 + /* vcn_v3_0_dec_sw_ring_emit_fdec_swe x2 vm fdec_swe */
1809 		1, /* vcn_v3_0_dec_sw_ring_insert_end */
1810 	.emit_ib_size = 5, /* vcn_v3_0_dec_sw_ring_emit_ib */
1811 	.emit_ib = vcn_v3_0_dec_sw_ring_emit_ib,
1812 	.emit_fence = vcn_v3_0_dec_sw_ring_emit_fence,
1813 	.emit_vm_flush = vcn_v3_0_dec_sw_ring_emit_vm_flush,
1814 	.test_ring = amdgpu_vcn_dec_sw_ring_test_ring,
1815 	.test_ib = NULL,//amdgpu_vcn_dec_sw_ring_test_ib,
1816 	.insert_nop = amdgpu_ring_insert_nop,
1817 	.insert_end = vcn_v3_0_dec_sw_ring_insert_end,
1818 	.pad_ib = amdgpu_ring_generic_pad_ib,
1819 	.begin_use = amdgpu_vcn_ring_begin_use,
1820 	.end_use = amdgpu_vcn_ring_end_use,
1821 	.emit_wreg = vcn_v3_0_dec_sw_ring_emit_wreg,
1822 	.emit_reg_wait = vcn_v3_0_dec_sw_ring_emit_reg_wait,
1823 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1824 };
1825 
1826 static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
1827 				struct amdgpu_job *job)
1828 {
1829 	struct drm_gpu_scheduler **scheds;
1830 
1831 	/* The create msg must be in the first IB submitted */
1832 	if (atomic_read(&job->base.entity->fence_seq))
1833 		return -EINVAL;
1834 
1835 	scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
1836 		[AMDGPU_RING_PRIO_DEFAULT].sched;
1837 	drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
1838 	return 0;
1839 }
1840 
1841 static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
1842 			    uint64_t addr)
1843 {
1844 	struct ttm_operation_ctx ctx = { false, false };
1845 	struct amdgpu_bo_va_mapping *map;
1846 	uint32_t *msg, num_buffers;
1847 	struct amdgpu_bo *bo;
1848 	uint64_t start, end;
1849 	unsigned int i;
1850 	void * ptr;
1851 	int r;
1852 
1853 	addr &= AMDGPU_GMC_HOLE_MASK;
1854 	r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
1855 	if (r) {
1856 		DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
1857 		return r;
1858 	}
1859 
1860 	start = map->start * AMDGPU_GPU_PAGE_SIZE;
1861 	end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
1862 	if (addr & 0x7) {
1863 		DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1864 		return -EINVAL;
1865 	}
1866 
1867 	bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1868 	amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1869 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1870 	if (r) {
1871 		DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
1872 		return r;
1873 	}
1874 
1875 	r = amdgpu_bo_kmap(bo, &ptr);
1876 	if (r) {
1877 		DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
1878 		return r;
1879 	}
1880 
1881 	msg = ptr + addr - start;
1882 
1883 	/* Check length */
1884 	if (msg[1] > end - addr) {
1885 		r = -EINVAL;
1886 		goto out;
1887 	}
1888 
1889 	if (msg[3] != RDECODE_MSG_CREATE)
1890 		goto out;
1891 
1892 	num_buffers = msg[2];
1893 	for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
1894 		uint32_t offset, size, *create;
1895 
1896 		if (msg[0] != RDECODE_MESSAGE_CREATE)
1897 			continue;
1898 
1899 		offset = msg[1];
1900 		size = msg[2];
1901 
1902 		if (offset + size > end) {
1903 			r = -EINVAL;
1904 			goto out;
1905 		}
1906 
1907 		create = ptr + addr + offset - start;
1908 
1909 		/* H246, HEVC and VP9 can run on any instance */
1910 		if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
1911 			continue;
1912 
1913 		r = vcn_v3_0_limit_sched(p, job);
1914 		if (r)
1915 			goto out;
1916 	}
1917 
1918 out:
1919 	amdgpu_bo_kunmap(bo);
1920 	return r;
1921 }
1922 
1923 static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1924 					   struct amdgpu_job *job,
1925 					   struct amdgpu_ib *ib)
1926 {
1927 	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
1928 	uint32_t msg_lo = 0, msg_hi = 0;
1929 	unsigned i;
1930 	int r;
1931 
1932 	/* The first instance can decode anything */
1933 	if (!ring->me)
1934 		return 0;
1935 
1936 	for (i = 0; i < ib->length_dw; i += 2) {
1937 		uint32_t reg = amdgpu_ib_get_value(ib, i);
1938 		uint32_t val = amdgpu_ib_get_value(ib, i + 1);
1939 
1940 		if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
1941 			msg_lo = val;
1942 		} else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
1943 			msg_hi = val;
1944 		} else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
1945 			   val == 0) {
1946 			r = vcn_v3_0_dec_msg(p, job,
1947 					     ((u64)msg_hi) << 32 | msg_lo);
1948 			if (r)
1949 				return r;
1950 		}
1951 	}
1952 	return 0;
1953 }
1954 
1955 static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
1956 	.type = AMDGPU_RING_TYPE_VCN_DEC,
1957 	.align_mask = 0xf,
1958 	.secure_submission_supported = true,
1959 	.vmhub = AMDGPU_MMHUB_0,
1960 	.get_rptr = vcn_v3_0_dec_ring_get_rptr,
1961 	.get_wptr = vcn_v3_0_dec_ring_get_wptr,
1962 	.set_wptr = vcn_v3_0_dec_ring_set_wptr,
1963 	.patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place,
1964 	.emit_frame_size =
1965 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1966 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1967 		8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1968 		14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1969 		6,
1970 	.emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1971 	.emit_ib = vcn_v2_0_dec_ring_emit_ib,
1972 	.emit_fence = vcn_v2_0_dec_ring_emit_fence,
1973 	.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1974 	.test_ring = vcn_v2_0_dec_ring_test_ring,
1975 	.test_ib = amdgpu_vcn_dec_ring_test_ib,
1976 	.insert_nop = vcn_v2_0_dec_ring_insert_nop,
1977 	.insert_start = vcn_v2_0_dec_ring_insert_start,
1978 	.insert_end = vcn_v2_0_dec_ring_insert_end,
1979 	.pad_ib = amdgpu_ring_generic_pad_ib,
1980 	.begin_use = amdgpu_vcn_ring_begin_use,
1981 	.end_use = amdgpu_vcn_ring_end_use,
1982 	.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1983 	.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1984 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1985 };
1986 
1987 /**
1988  * vcn_v3_0_enc_ring_get_rptr - get enc read pointer
1989  *
1990  * @ring: amdgpu_ring pointer
1991  *
1992  * Returns the current hardware enc read pointer
1993  */
1994 static uint64_t vcn_v3_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1995 {
1996 	struct amdgpu_device *adev = ring->adev;
1997 
1998 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1999 		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
2000 	else
2001 		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
2002 }
2003 
2004 /**
2005  * vcn_v3_0_enc_ring_get_wptr - get enc write pointer
2006  *
2007  * @ring: amdgpu_ring pointer
2008  *
2009  * Returns the current hardware enc write pointer
2010  */
2011 static uint64_t vcn_v3_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
2012 {
2013 	struct amdgpu_device *adev = ring->adev;
2014 
2015 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2016 		if (ring->use_doorbell)
2017 			return *ring->wptr_cpu_addr;
2018 		else
2019 			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
2020 	} else {
2021 		if (ring->use_doorbell)
2022 			return *ring->wptr_cpu_addr;
2023 		else
2024 			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
2025 	}
2026 }
2027 
2028 /**
2029  * vcn_v3_0_enc_ring_set_wptr - set enc write pointer
2030  *
2031  * @ring: amdgpu_ring pointer
2032  *
2033  * Commits the enc write pointer to the hardware
2034  */
2035 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
2036 {
2037 	struct amdgpu_device *adev = ring->adev;
2038 
2039 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2040 		if (ring->use_doorbell) {
2041 			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
2042 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2043 		} else {
2044 			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
2045 		}
2046 	} else {
2047 		if (ring->use_doorbell) {
2048 			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
2049 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2050 		} else {
2051 			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
2052 		}
2053 	}
2054 }
2055 
2056 static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
2057 	.type = AMDGPU_RING_TYPE_VCN_ENC,
2058 	.align_mask = 0x3f,
2059 	.nop = VCN_ENC_CMD_NO_OP,
2060 	.vmhub = AMDGPU_MMHUB_0,
2061 	.get_rptr = vcn_v3_0_enc_ring_get_rptr,
2062 	.get_wptr = vcn_v3_0_enc_ring_get_wptr,
2063 	.set_wptr = vcn_v3_0_enc_ring_set_wptr,
2064 	.emit_frame_size =
2065 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2066 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2067 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2068 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2069 		1, /* vcn_v2_0_enc_ring_insert_end */
2070 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2071 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
2072 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
2073 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2074 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
2075 	.test_ib = amdgpu_vcn_enc_ring_test_ib,
2076 	.insert_nop = amdgpu_ring_insert_nop,
2077 	.insert_end = vcn_v2_0_enc_ring_insert_end,
2078 	.pad_ib = amdgpu_ring_generic_pad_ib,
2079 	.begin_use = amdgpu_vcn_ring_begin_use,
2080 	.end_use = amdgpu_vcn_ring_end_use,
2081 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2082 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2083 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2084 };
2085 
2086 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2087 {
2088 	int i;
2089 
2090 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2091 		if (adev->vcn.harvest_config & (1 << i))
2092 			continue;
2093 
2094 		if (!DEC_SW_RING_ENABLED)
2095 			adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs;
2096 		else
2097 			adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs;
2098 		adev->vcn.inst[i].ring_dec.me = i;
2099 		DRM_INFO("VCN(%d) decode%s is enabled in VM mode\n", i,
2100 			  DEC_SW_RING_ENABLED?"(Software Ring)":"");
2101 	}
2102 }
2103 
2104 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2105 {
2106 	int i, j;
2107 
2108 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2109 		if (adev->vcn.harvest_config & (1 << i))
2110 			continue;
2111 
2112 		for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
2113 			adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
2114 			adev->vcn.inst[i].ring_enc[j].me = i;
2115 		}
2116 		if (adev->vcn.num_enc_rings > 0)
2117 			DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i);
2118 	}
2119 }
2120 
2121 static bool vcn_v3_0_is_idle(void *handle)
2122 {
2123 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2124 	int i, ret = 1;
2125 
2126 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2127 		if (adev->vcn.harvest_config & (1 << i))
2128 			continue;
2129 
2130 		ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
2131 	}
2132 
2133 	return ret;
2134 }
2135 
2136 static int vcn_v3_0_wait_for_idle(void *handle)
2137 {
2138 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2139 	int i, ret = 0;
2140 
2141 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2142 		if (adev->vcn.harvest_config & (1 << i))
2143 			continue;
2144 
2145 		ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
2146 			UVD_STATUS__IDLE);
2147 		if (ret)
2148 			return ret;
2149 	}
2150 
2151 	return ret;
2152 }
2153 
2154 static int vcn_v3_0_set_clockgating_state(void *handle,
2155 					  enum amd_clockgating_state state)
2156 {
2157 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2158 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
2159 	int i;
2160 
2161 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2162 		if (adev->vcn.harvest_config & (1 << i))
2163 			continue;
2164 
2165 		if (enable) {
2166 			if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE)
2167 				return -EBUSY;
2168 			vcn_v3_0_enable_clock_gating(adev, i);
2169 		} else {
2170 			vcn_v3_0_disable_clock_gating(adev, i);
2171 		}
2172 	}
2173 
2174 	return 0;
2175 }
2176 
2177 static int vcn_v3_0_set_powergating_state(void *handle,
2178 					  enum amd_powergating_state state)
2179 {
2180 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2181 	int ret;
2182 
2183 	/* for SRIOV, guest should not control VCN Power-gating
2184 	 * MMSCH FW should control Power-gating and clock-gating
2185 	 * guest should avoid touching CGC and PG
2186 	 */
2187 	if (amdgpu_sriov_vf(adev)) {
2188 		adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
2189 		return 0;
2190 	}
2191 
2192 	if(state == adev->vcn.cur_state)
2193 		return 0;
2194 
2195 	if (state == AMD_PG_STATE_GATE)
2196 		ret = vcn_v3_0_stop(adev);
2197 	else
2198 		ret = vcn_v3_0_start(adev);
2199 
2200 	if(!ret)
2201 		adev->vcn.cur_state = state;
2202 
2203 	return ret;
2204 }
2205 
2206 static int vcn_v3_0_set_interrupt_state(struct amdgpu_device *adev,
2207 					struct amdgpu_irq_src *source,
2208 					unsigned type,
2209 					enum amdgpu_interrupt_state state)
2210 {
2211 	return 0;
2212 }
2213 
2214 static int vcn_v3_0_process_interrupt(struct amdgpu_device *adev,
2215 				      struct amdgpu_irq_src *source,
2216 				      struct amdgpu_iv_entry *entry)
2217 {
2218 	uint32_t ip_instance;
2219 
2220 	switch (entry->client_id) {
2221 	case SOC15_IH_CLIENTID_VCN:
2222 		ip_instance = 0;
2223 		break;
2224 	case SOC15_IH_CLIENTID_VCN1:
2225 		ip_instance = 1;
2226 		break;
2227 	default:
2228 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
2229 		return 0;
2230 	}
2231 
2232 	DRM_DEBUG("IH: VCN TRAP\n");
2233 
2234 	switch (entry->src_id) {
2235 	case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
2236 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
2237 		break;
2238 	case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2239 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
2240 		break;
2241 	case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
2242 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
2243 		break;
2244 	default:
2245 		DRM_ERROR("Unhandled interrupt: %d %d\n",
2246 			  entry->src_id, entry->src_data[0]);
2247 		break;
2248 	}
2249 
2250 	return 0;
2251 }
2252 
2253 static const struct amdgpu_irq_src_funcs vcn_v3_0_irq_funcs = {
2254 	.set = vcn_v3_0_set_interrupt_state,
2255 	.process = vcn_v3_0_process_interrupt,
2256 };
2257 
2258 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
2259 {
2260 	int i;
2261 
2262 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2263 		if (adev->vcn.harvest_config & (1 << i))
2264 			continue;
2265 
2266 		adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
2267 		adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs;
2268 	}
2269 }
2270 
2271 static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
2272 	.name = "vcn_v3_0",
2273 	.early_init = vcn_v3_0_early_init,
2274 	.late_init = NULL,
2275 	.sw_init = vcn_v3_0_sw_init,
2276 	.sw_fini = vcn_v3_0_sw_fini,
2277 	.hw_init = vcn_v3_0_hw_init,
2278 	.hw_fini = vcn_v3_0_hw_fini,
2279 	.suspend = vcn_v3_0_suspend,
2280 	.resume = vcn_v3_0_resume,
2281 	.is_idle = vcn_v3_0_is_idle,
2282 	.wait_for_idle = vcn_v3_0_wait_for_idle,
2283 	.check_soft_reset = NULL,
2284 	.pre_soft_reset = NULL,
2285 	.soft_reset = NULL,
2286 	.post_soft_reset = NULL,
2287 	.set_clockgating_state = vcn_v3_0_set_clockgating_state,
2288 	.set_powergating_state = vcn_v3_0_set_powergating_state,
2289 };
2290 
2291 const struct amdgpu_ip_block_version vcn_v3_0_ip_block =
2292 {
2293 	.type = AMD_IP_BLOCK_TYPE_VCN,
2294 	.major = 3,
2295 	.minor = 0,
2296 	.rev = 0,
2297 	.funcs = &vcn_v3_0_ip_funcs,
2298 };
2299