xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c (revision a395b8d1)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "vcn_v2_0.h"
33 #include "mmsch_v1_0.h"
34 #include "vcn_v2_5.h"
35 
36 #include "vcn/vcn_2_5_offset.h"
37 #include "vcn/vcn_2_5_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
39 
40 #define VCN_VID_SOC_ADDRESS_2_0					0x1fa00
41 #define VCN1_VID_SOC_ADDRESS_3_0				0x48200
42 
43 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET			0x27
44 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET			0x0f
45 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET			0x10
46 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET			0x11
47 #define mmUVD_NO_OP_INTERNAL_OFFSET				0x29
48 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET			0x66
49 #define mmUVD_SCRATCH9_INTERNAL_OFFSET				0xc01d
50 
51 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET			0x431
52 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET		0x3b4
53 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET		0x3b5
54 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET			0x25c
55 
56 #define VCN25_MAX_HW_INSTANCES_ARCTURUS			2
57 
58 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
59 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
60 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
61 static int vcn_v2_5_set_powergating_state(void *handle,
62 				enum amd_powergating_state state);
63 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
64 				int inst_idx, struct dpg_pause_state *new_state);
65 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
66 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
67 
68 static int amdgpu_ih_clientid_vcns[] = {
69 	SOC15_IH_CLIENTID_VCN,
70 	SOC15_IH_CLIENTID_VCN1
71 };
72 
73 /**
74  * vcn_v2_5_early_init - set function pointers and load microcode
75  *
76  * @handle: amdgpu_device pointer
77  *
78  * Set ring and irq function pointers
79  * Load microcode from filesystem
80  */
81 static int vcn_v2_5_early_init(void *handle)
82 {
83 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
84 
85 	if (amdgpu_sriov_vf(adev)) {
86 		adev->vcn.num_vcn_inst = 2;
87 		adev->vcn.harvest_config = 0;
88 		adev->vcn.num_enc_rings = 1;
89 	} else {
90 		u32 harvest;
91 		int i;
92 
93 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
94 			harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
95 			if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
96 				adev->vcn.harvest_config |= 1 << i;
97 		}
98 		if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
99 					AMDGPU_VCN_HARVEST_VCN1))
100 			/* both instances are harvested, disable the block */
101 			return -ENOENT;
102 
103 		adev->vcn.num_enc_rings = 2;
104 	}
105 
106 	vcn_v2_5_set_dec_ring_funcs(adev);
107 	vcn_v2_5_set_enc_ring_funcs(adev);
108 	vcn_v2_5_set_irq_funcs(adev);
109 	vcn_v2_5_set_ras_funcs(adev);
110 
111 	return amdgpu_vcn_early_init(adev);
112 }
113 
114 /**
115  * vcn_v2_5_sw_init - sw init for VCN block
116  *
117  * @handle: amdgpu_device pointer
118  *
119  * Load firmware and sw initialization
120  */
121 static int vcn_v2_5_sw_init(void *handle)
122 {
123 	struct amdgpu_ring *ring;
124 	int i, j, r;
125 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
126 
127 	for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
128 		if (adev->vcn.harvest_config & (1 << j))
129 			continue;
130 		/* VCN DEC TRAP */
131 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
132 				VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
133 		if (r)
134 			return r;
135 
136 		/* VCN ENC TRAP */
137 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
138 			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
139 				i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
140 			if (r)
141 				return r;
142 		}
143 
144 		/* VCN POISON TRAP */
145 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
146 			VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq);
147 		if (r)
148 			return r;
149 	}
150 
151 	r = amdgpu_vcn_sw_init(adev);
152 	if (r)
153 		return r;
154 
155 	amdgpu_vcn_setup_ucode(adev);
156 
157 	r = amdgpu_vcn_resume(adev);
158 	if (r)
159 		return r;
160 
161 	for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
162 		volatile struct amdgpu_fw_shared *fw_shared;
163 
164 		if (adev->vcn.harvest_config & (1 << j))
165 			continue;
166 		adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
167 		adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
168 		adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
169 		adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
170 		adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
171 		adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
172 
173 		adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
174 		adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
175 		adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
176 		adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
177 		adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
178 		adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
179 		adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
180 		adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
181 		adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
182 		adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
183 
184 		ring = &adev->vcn.inst[j].ring_dec;
185 		ring->use_doorbell = true;
186 
187 		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
188 				(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
189 
190 		if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
191 			ring->vm_hub = AMDGPU_MMHUB_1;
192 		else
193 			ring->vm_hub = AMDGPU_MMHUB_0;
194 
195 		sprintf(ring->name, "vcn_dec_%d", j);
196 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
197 				     0, AMDGPU_RING_PRIO_DEFAULT, NULL);
198 		if (r)
199 			return r;
200 
201 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
202 			enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
203 
204 			ring = &adev->vcn.inst[j].ring_enc[i];
205 			ring->use_doorbell = true;
206 
207 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
208 					(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
209 
210 			if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
211 				ring->vm_hub = AMDGPU_MMHUB_1;
212 			else
213 				ring->vm_hub = AMDGPU_MMHUB_0;
214 
215 			sprintf(ring->name, "vcn_enc_%d.%d", j, i);
216 			r = amdgpu_ring_init(adev, ring, 512,
217 					     &adev->vcn.inst[j].irq, 0,
218 					     hw_prio, NULL);
219 			if (r)
220 				return r;
221 		}
222 
223 		fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
224 		fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
225 
226 		if (amdgpu_vcnfw_log)
227 			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
228 	}
229 
230 	if (amdgpu_sriov_vf(adev)) {
231 		r = amdgpu_virt_alloc_mm_table(adev);
232 		if (r)
233 			return r;
234 	}
235 
236 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
237 		adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
238 
239 	r = amdgpu_vcn_ras_sw_init(adev);
240 	if (r)
241 		return r;
242 
243 	return 0;
244 }
245 
246 /**
247  * vcn_v2_5_sw_fini - sw fini for VCN block
248  *
249  * @handle: amdgpu_device pointer
250  *
251  * VCN suspend and free up sw allocation
252  */
253 static int vcn_v2_5_sw_fini(void *handle)
254 {
255 	int i, r, idx;
256 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
257 	volatile struct amdgpu_fw_shared *fw_shared;
258 
259 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
260 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
261 			if (adev->vcn.harvest_config & (1 << i))
262 				continue;
263 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
264 			fw_shared->present_flag_0 = 0;
265 		}
266 		drm_dev_exit(idx);
267 	}
268 
269 
270 	if (amdgpu_sriov_vf(adev))
271 		amdgpu_virt_free_mm_table(adev);
272 
273 	r = amdgpu_vcn_suspend(adev);
274 	if (r)
275 		return r;
276 
277 	r = amdgpu_vcn_sw_fini(adev);
278 
279 	return r;
280 }
281 
282 /**
283  * vcn_v2_5_hw_init - start and test VCN block
284  *
285  * @handle: amdgpu_device pointer
286  *
287  * Initialize the hardware, boot up the VCPU and do some testing
288  */
289 static int vcn_v2_5_hw_init(void *handle)
290 {
291 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
292 	struct amdgpu_ring *ring;
293 	int i, j, r = 0;
294 
295 	if (amdgpu_sriov_vf(adev))
296 		r = vcn_v2_5_sriov_start(adev);
297 
298 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
299 		if (adev->vcn.harvest_config & (1 << j))
300 			continue;
301 
302 		if (amdgpu_sriov_vf(adev)) {
303 			adev->vcn.inst[j].ring_enc[0].sched.ready = true;
304 			adev->vcn.inst[j].ring_enc[1].sched.ready = false;
305 			adev->vcn.inst[j].ring_enc[2].sched.ready = false;
306 			adev->vcn.inst[j].ring_dec.sched.ready = true;
307 		} else {
308 
309 			ring = &adev->vcn.inst[j].ring_dec;
310 
311 			adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
312 						     ring->doorbell_index, j);
313 
314 			r = amdgpu_ring_test_helper(ring);
315 			if (r)
316 				goto done;
317 
318 			for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
319 				ring = &adev->vcn.inst[j].ring_enc[i];
320 				r = amdgpu_ring_test_helper(ring);
321 				if (r)
322 					goto done;
323 			}
324 		}
325 	}
326 
327 done:
328 	if (!r)
329 		DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
330 			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
331 
332 	return r;
333 }
334 
335 /**
336  * vcn_v2_5_hw_fini - stop the hardware block
337  *
338  * @handle: amdgpu_device pointer
339  *
340  * Stop the VCN block, mark ring as not ready any more
341  */
342 static int vcn_v2_5_hw_fini(void *handle)
343 {
344 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
345 	int i;
346 
347 	cancel_delayed_work_sync(&adev->vcn.idle_work);
348 
349 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
350 		if (adev->vcn.harvest_config & (1 << i))
351 			continue;
352 
353 		if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
354 		    (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
355 		     RREG32_SOC15(VCN, i, mmUVD_STATUS)))
356 			vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
357 	}
358 
359 	return 0;
360 }
361 
362 /**
363  * vcn_v2_5_suspend - suspend VCN block
364  *
365  * @handle: amdgpu_device pointer
366  *
367  * HW fini and suspend VCN block
368  */
369 static int vcn_v2_5_suspend(void *handle)
370 {
371 	int r;
372 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
373 
374 	r = vcn_v2_5_hw_fini(adev);
375 	if (r)
376 		return r;
377 
378 	r = amdgpu_vcn_suspend(adev);
379 
380 	return r;
381 }
382 
383 /**
384  * vcn_v2_5_resume - resume VCN block
385  *
386  * @handle: amdgpu_device pointer
387  *
388  * Resume firmware and hw init VCN block
389  */
390 static int vcn_v2_5_resume(void *handle)
391 {
392 	int r;
393 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
394 
395 	r = amdgpu_vcn_resume(adev);
396 	if (r)
397 		return r;
398 
399 	r = vcn_v2_5_hw_init(adev);
400 
401 	return r;
402 }
403 
404 /**
405  * vcn_v2_5_mc_resume - memory controller programming
406  *
407  * @adev: amdgpu_device pointer
408  *
409  * Let the VCN memory controller know it's offsets
410  */
411 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
412 {
413 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
414 	uint32_t offset;
415 	int i;
416 
417 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
418 		if (adev->vcn.harvest_config & (1 << i))
419 			continue;
420 		/* cache window 0: fw */
421 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
422 			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
423 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
424 			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
425 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
426 			WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
427 			offset = 0;
428 		} else {
429 			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
430 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
431 			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
432 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
433 			offset = size;
434 			WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
435 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
436 		}
437 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
438 
439 		/* cache window 1: stack */
440 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
441 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
442 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
443 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
444 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
445 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
446 
447 		/* cache window 2: context */
448 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
449 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
450 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
451 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
452 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
453 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
454 
455 		/* non-cache window */
456 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
457 			lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
458 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
459 			upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
460 		WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
461 		WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
462 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
463 	}
464 }
465 
466 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
467 {
468 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
469 	uint32_t offset;
470 
471 	/* cache window 0: fw */
472 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
473 		if (!indirect) {
474 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
475 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
476 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
477 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
478 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
479 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
480 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
481 				VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
482 		} else {
483 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
484 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
485 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
486 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
487 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
488 				VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
489 		}
490 		offset = 0;
491 	} else {
492 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
493 			VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
494 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
495 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
496 			VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
497 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
498 		offset = size;
499 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
500 			VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
501 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
502 	}
503 
504 	if (!indirect)
505 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
506 			VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
507 	else
508 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
509 			VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
510 
511 	/* cache window 1: stack */
512 	if (!indirect) {
513 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
514 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
515 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
516 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
517 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
518 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
519 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
520 			VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
521 	} else {
522 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
523 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
524 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
525 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
526 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
527 			VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
528 	}
529 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
530 		VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
531 
532 	/* cache window 2: context */
533 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
534 		VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
535 		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
536 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
537 		VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
538 		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
539 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
540 		VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
541 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
542 		VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
543 
544 	/* non-cache window */
545 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
546 		VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
547 		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
548 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
549 		VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
550 		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
551 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
552 		VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
553 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
554 		VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
555 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
556 
557 	/* VCN global tiling registers */
558 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
559 		VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
560 }
561 
562 /**
563  * vcn_v2_5_disable_clock_gating - disable VCN clock gating
564  *
565  * @adev: amdgpu_device pointer
566  *
567  * Disable clock gating for VCN block
568  */
569 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
570 {
571 	uint32_t data;
572 	int i;
573 
574 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
575 		if (adev->vcn.harvest_config & (1 << i))
576 			continue;
577 		/* UVD disable CGC */
578 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
579 		if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
580 			data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
581 		else
582 			data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
583 		data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
584 		data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
585 		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
586 
587 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
588 		data &= ~(UVD_CGC_GATE__SYS_MASK
589 			| UVD_CGC_GATE__UDEC_MASK
590 			| UVD_CGC_GATE__MPEG2_MASK
591 			| UVD_CGC_GATE__REGS_MASK
592 			| UVD_CGC_GATE__RBC_MASK
593 			| UVD_CGC_GATE__LMI_MC_MASK
594 			| UVD_CGC_GATE__LMI_UMC_MASK
595 			| UVD_CGC_GATE__IDCT_MASK
596 			| UVD_CGC_GATE__MPRD_MASK
597 			| UVD_CGC_GATE__MPC_MASK
598 			| UVD_CGC_GATE__LBSI_MASK
599 			| UVD_CGC_GATE__LRBBM_MASK
600 			| UVD_CGC_GATE__UDEC_RE_MASK
601 			| UVD_CGC_GATE__UDEC_CM_MASK
602 			| UVD_CGC_GATE__UDEC_IT_MASK
603 			| UVD_CGC_GATE__UDEC_DB_MASK
604 			| UVD_CGC_GATE__UDEC_MP_MASK
605 			| UVD_CGC_GATE__WCB_MASK
606 			| UVD_CGC_GATE__VCPU_MASK
607 			| UVD_CGC_GATE__MMSCH_MASK);
608 
609 		WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
610 
611 		SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0,  0xFFFFFFFF);
612 
613 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
614 		data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
615 			| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
616 			| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
617 			| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
618 			| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
619 			| UVD_CGC_CTRL__SYS_MODE_MASK
620 			| UVD_CGC_CTRL__UDEC_MODE_MASK
621 			| UVD_CGC_CTRL__MPEG2_MODE_MASK
622 			| UVD_CGC_CTRL__REGS_MODE_MASK
623 			| UVD_CGC_CTRL__RBC_MODE_MASK
624 			| UVD_CGC_CTRL__LMI_MC_MODE_MASK
625 			| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
626 			| UVD_CGC_CTRL__IDCT_MODE_MASK
627 			| UVD_CGC_CTRL__MPRD_MODE_MASK
628 			| UVD_CGC_CTRL__MPC_MODE_MASK
629 			| UVD_CGC_CTRL__LBSI_MODE_MASK
630 			| UVD_CGC_CTRL__LRBBM_MODE_MASK
631 			| UVD_CGC_CTRL__WCB_MODE_MASK
632 			| UVD_CGC_CTRL__VCPU_MODE_MASK
633 			| UVD_CGC_CTRL__MMSCH_MODE_MASK);
634 		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
635 
636 		/* turn on */
637 		data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
638 		data |= (UVD_SUVD_CGC_GATE__SRE_MASK
639 			| UVD_SUVD_CGC_GATE__SIT_MASK
640 			| UVD_SUVD_CGC_GATE__SMP_MASK
641 			| UVD_SUVD_CGC_GATE__SCM_MASK
642 			| UVD_SUVD_CGC_GATE__SDB_MASK
643 			| UVD_SUVD_CGC_GATE__SRE_H264_MASK
644 			| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
645 			| UVD_SUVD_CGC_GATE__SIT_H264_MASK
646 			| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
647 			| UVD_SUVD_CGC_GATE__SCM_H264_MASK
648 			| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
649 			| UVD_SUVD_CGC_GATE__SDB_H264_MASK
650 			| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
651 			| UVD_SUVD_CGC_GATE__SCLR_MASK
652 			| UVD_SUVD_CGC_GATE__UVD_SC_MASK
653 			| UVD_SUVD_CGC_GATE__ENT_MASK
654 			| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
655 			| UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
656 			| UVD_SUVD_CGC_GATE__SITE_MASK
657 			| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
658 			| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
659 			| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
660 			| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
661 			| UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
662 		WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
663 
664 		data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
665 		data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
666 			| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
667 			| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
668 			| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
669 			| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
670 			| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
671 			| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
672 			| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
673 			| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
674 			| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
675 		WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
676 	}
677 }
678 
679 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
680 		uint8_t sram_sel, int inst_idx, uint8_t indirect)
681 {
682 	uint32_t reg_data = 0;
683 
684 	/* enable sw clock gating control */
685 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
686 		reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
687 	else
688 		reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
689 	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
690 	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
691 	reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
692 		 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
693 		 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
694 		 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
695 		 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
696 		 UVD_CGC_CTRL__SYS_MODE_MASK |
697 		 UVD_CGC_CTRL__UDEC_MODE_MASK |
698 		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
699 		 UVD_CGC_CTRL__REGS_MODE_MASK |
700 		 UVD_CGC_CTRL__RBC_MODE_MASK |
701 		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
702 		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
703 		 UVD_CGC_CTRL__IDCT_MODE_MASK |
704 		 UVD_CGC_CTRL__MPRD_MODE_MASK |
705 		 UVD_CGC_CTRL__MPC_MODE_MASK |
706 		 UVD_CGC_CTRL__LBSI_MODE_MASK |
707 		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
708 		 UVD_CGC_CTRL__WCB_MODE_MASK |
709 		 UVD_CGC_CTRL__VCPU_MODE_MASK |
710 		 UVD_CGC_CTRL__MMSCH_MODE_MASK);
711 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
712 		VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
713 
714 	/* turn off clock gating */
715 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
716 		VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
717 
718 	/* turn on SUVD clock gating */
719 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
720 		VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
721 
722 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
723 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
724 		VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
725 }
726 
727 /**
728  * vcn_v2_5_enable_clock_gating - enable VCN clock gating
729  *
730  * @adev: amdgpu_device pointer
731  *
732  * Enable clock gating for VCN block
733  */
734 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
735 {
736 	uint32_t data = 0;
737 	int i;
738 
739 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
740 		if (adev->vcn.harvest_config & (1 << i))
741 			continue;
742 		/* enable UVD CGC */
743 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
744 		if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
745 			data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
746 		else
747 			data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
748 		data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
749 		data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
750 		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
751 
752 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
753 		data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
754 			| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
755 			| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
756 			| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
757 			| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
758 			| UVD_CGC_CTRL__SYS_MODE_MASK
759 			| UVD_CGC_CTRL__UDEC_MODE_MASK
760 			| UVD_CGC_CTRL__MPEG2_MODE_MASK
761 			| UVD_CGC_CTRL__REGS_MODE_MASK
762 			| UVD_CGC_CTRL__RBC_MODE_MASK
763 			| UVD_CGC_CTRL__LMI_MC_MODE_MASK
764 			| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
765 			| UVD_CGC_CTRL__IDCT_MODE_MASK
766 			| UVD_CGC_CTRL__MPRD_MODE_MASK
767 			| UVD_CGC_CTRL__MPC_MODE_MASK
768 			| UVD_CGC_CTRL__LBSI_MODE_MASK
769 			| UVD_CGC_CTRL__LRBBM_MODE_MASK
770 			| UVD_CGC_CTRL__WCB_MODE_MASK
771 			| UVD_CGC_CTRL__VCPU_MODE_MASK);
772 		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
773 
774 		data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
775 		data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
776 			| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
777 			| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
778 			| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
779 			| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
780 			| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
781 			| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
782 			| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
783 			| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
784 			| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
785 		WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
786 	}
787 }
788 
789 static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,
790 				bool indirect)
791 {
792 	uint32_t tmp;
793 
794 	if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(2, 6, 0))
795 		return;
796 
797 	tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
798 	      VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
799 	      VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
800 	      VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
801 	WREG32_SOC15_DPG_MODE(inst_idx,
802 			      SOC15_DPG_MODE_OFFSET(VCN, 0, mmVCN_RAS_CNTL),
803 			      tmp, 0, indirect);
804 
805 	tmp = UVD_VCPU_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
806 	WREG32_SOC15_DPG_MODE(inst_idx,
807 			      SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_VCPU_INT_EN),
808 			      tmp, 0, indirect);
809 
810 	tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
811 	WREG32_SOC15_DPG_MODE(inst_idx,
812 			      SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_SYS_INT_EN),
813 			      tmp, 0, indirect);
814 }
815 
816 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
817 {
818 	volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
819 	struct amdgpu_ring *ring;
820 	uint32_t rb_bufsz, tmp;
821 
822 	/* disable register anti-hang mechanism */
823 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
824 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
825 	/* enable dynamic power gating mode */
826 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
827 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
828 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
829 	WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
830 
831 	if (indirect)
832 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
833 
834 	/* enable clock gating */
835 	vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
836 
837 	/* enable VCPU clock */
838 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
839 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
840 	tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
841 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
842 		VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
843 
844 	/* disable master interupt */
845 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
846 		VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
847 
848 	/* setup mmUVD_LMI_CTRL */
849 	tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
850 		UVD_LMI_CTRL__REQ_MODE_MASK |
851 		UVD_LMI_CTRL__CRC_RESET_MASK |
852 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
853 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
854 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
855 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
856 		0x00100000L);
857 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
858 		VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
859 
860 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
861 		VCN, 0, mmUVD_MPC_CNTL),
862 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
863 
864 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
865 		VCN, 0, mmUVD_MPC_SET_MUXA0),
866 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
867 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
868 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
869 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
870 
871 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
872 		VCN, 0, mmUVD_MPC_SET_MUXB0),
873 		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
874 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
875 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
876 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
877 
878 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
879 		VCN, 0, mmUVD_MPC_SET_MUX),
880 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
881 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
882 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
883 
884 	vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
885 
886 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
887 		VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
888 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
889 		VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
890 
891 	/* enable LMI MC and UMC channels */
892 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
893 		VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
894 
895 	vcn_v2_6_enable_ras(adev, inst_idx, indirect);
896 
897 	/* unblock VCPU register access */
898 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
899 		VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
900 
901 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
902 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
903 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
904 		VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
905 
906 	/* enable master interrupt */
907 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
908 		VCN, 0, mmUVD_MASTINT_EN),
909 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
910 
911 	if (indirect)
912 		psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
913 				    (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
914 					       (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
915 
916 	ring = &adev->vcn.inst[inst_idx].ring_dec;
917 	/* force RBC into idle state */
918 	rb_bufsz = order_base_2(ring->ring_size);
919 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
920 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
921 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
922 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
923 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
924 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
925 
926 	/* Stall DPG before WPTR/RPTR reset */
927 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
928 		UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
929 		~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
930 	fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
931 
932 	/* set the write pointer delay */
933 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
934 
935 	/* set the wb address */
936 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
937 		(upper_32_bits(ring->gpu_addr) >> 2));
938 
939 	/* program the RB_BASE for ring buffer */
940 	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
941 		lower_32_bits(ring->gpu_addr));
942 	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
943 		upper_32_bits(ring->gpu_addr));
944 
945 	/* Initialize the ring buffer's read and write pointers */
946 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
947 
948 	WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
949 
950 	ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
951 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
952 		lower_32_bits(ring->wptr));
953 
954 	fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
955 	/* Unstall DPG */
956 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
957 		0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
958 
959 	return 0;
960 }
961 
962 static int vcn_v2_5_start(struct amdgpu_device *adev)
963 {
964 	struct amdgpu_ring *ring;
965 	uint32_t rb_bufsz, tmp;
966 	int i, j, k, r;
967 
968 	if (adev->pm.dpm_enabled)
969 		amdgpu_dpm_enable_uvd(adev, true);
970 
971 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
972 		if (adev->vcn.harvest_config & (1 << i))
973 			continue;
974 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
975 			r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
976 			continue;
977 		}
978 
979 		/* disable register anti-hang mechanism */
980 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
981 			~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
982 
983 		/* set uvd status busy */
984 		tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
985 		WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
986 	}
987 
988 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
989 		return 0;
990 
991 	/*SW clock gating */
992 	vcn_v2_5_disable_clock_gating(adev);
993 
994 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
995 		if (adev->vcn.harvest_config & (1 << i))
996 			continue;
997 		/* enable VCPU clock */
998 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
999 			UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1000 
1001 		/* disable master interrupt */
1002 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1003 			~UVD_MASTINT_EN__VCPU_EN_MASK);
1004 
1005 		/* setup mmUVD_LMI_CTRL */
1006 		tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1007 		tmp &= ~0xff;
1008 		WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
1009 			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK	|
1010 			UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1011 			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1012 			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1013 
1014 		/* setup mmUVD_MPC_CNTL */
1015 		tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1016 		tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1017 		tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1018 		WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1019 
1020 		/* setup UVD_MPC_SET_MUXA0 */
1021 		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1022 			((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1023 			(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1024 			(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1025 			(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1026 
1027 		/* setup UVD_MPC_SET_MUXB0 */
1028 		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1029 			((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1030 			(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1031 			(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1032 			(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1033 
1034 		/* setup mmUVD_MPC_SET_MUX */
1035 		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1036 			((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1037 			(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1038 			(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1039 	}
1040 
1041 	vcn_v2_5_mc_resume(adev);
1042 
1043 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1044 		volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1045 		if (adev->vcn.harvest_config & (1 << i))
1046 			continue;
1047 		/* VCN global tiling registers */
1048 		WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1049 			adev->gfx.config.gb_addr_config);
1050 		WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1051 			adev->gfx.config.gb_addr_config);
1052 
1053 		/* enable LMI MC and UMC channels */
1054 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1055 			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1056 
1057 		/* unblock VCPU register access */
1058 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1059 			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1060 
1061 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1062 			~UVD_VCPU_CNTL__BLK_RST_MASK);
1063 
1064 		for (k = 0; k < 10; ++k) {
1065 			uint32_t status;
1066 
1067 			for (j = 0; j < 100; ++j) {
1068 				status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1069 				if (status & 2)
1070 					break;
1071 				if (amdgpu_emu_mode == 1)
1072 					msleep(500);
1073 				else
1074 					mdelay(10);
1075 			}
1076 			r = 0;
1077 			if (status & 2)
1078 				break;
1079 
1080 			DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1081 			WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1082 				UVD_VCPU_CNTL__BLK_RST_MASK,
1083 				~UVD_VCPU_CNTL__BLK_RST_MASK);
1084 			mdelay(10);
1085 			WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1086 				~UVD_VCPU_CNTL__BLK_RST_MASK);
1087 
1088 			mdelay(10);
1089 			r = -1;
1090 		}
1091 
1092 		if (r) {
1093 			DRM_ERROR("VCN decode not responding, giving up!!!\n");
1094 			return r;
1095 		}
1096 
1097 		/* enable master interrupt */
1098 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1099 			UVD_MASTINT_EN__VCPU_EN_MASK,
1100 			~UVD_MASTINT_EN__VCPU_EN_MASK);
1101 
1102 		/* clear the busy bit of VCN_STATUS */
1103 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1104 			~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1105 
1106 		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1107 
1108 		ring = &adev->vcn.inst[i].ring_dec;
1109 		/* force RBC into idle state */
1110 		rb_bufsz = order_base_2(ring->ring_size);
1111 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1112 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1113 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1114 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1115 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1116 		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1117 
1118 		fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1119 		/* program the RB_BASE for ring buffer */
1120 		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1121 			lower_32_bits(ring->gpu_addr));
1122 		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1123 			upper_32_bits(ring->gpu_addr));
1124 
1125 		/* Initialize the ring buffer's read and write pointers */
1126 		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1127 
1128 		ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1129 		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1130 				lower_32_bits(ring->wptr));
1131 		fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1132 
1133 		fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1134 		ring = &adev->vcn.inst[i].ring_enc[0];
1135 		WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1136 		WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1137 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1138 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1139 		WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1140 		fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1141 
1142 		fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1143 		ring = &adev->vcn.inst[i].ring_enc[1];
1144 		WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1145 		WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1146 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1147 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1148 		WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1149 		fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1150 	}
1151 
1152 	return 0;
1153 }
1154 
1155 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1156 				struct amdgpu_mm_table *table)
1157 {
1158 	uint32_t data = 0, loop = 0, size = 0;
1159 	uint64_t addr = table->gpu_addr;
1160 	struct mmsch_v1_1_init_header *header = NULL;
1161 
1162 	header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1163 	size = header->total_size;
1164 
1165 	/*
1166 	 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1167 	 *  memory descriptor location
1168 	 */
1169 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1170 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1171 
1172 	/* 2, update vmid of descriptor */
1173 	data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1174 	data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1175 	/* use domain0 for MM scheduler */
1176 	data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1177 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
1178 
1179 	/* 3, notify mmsch about the size of this descriptor */
1180 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1181 
1182 	/* 4, set resp to zero */
1183 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1184 
1185 	/*
1186 	 * 5, kick off the initialization and wait until
1187 	 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1188 	 */
1189 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1190 
1191 	data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1192 	loop = 10;
1193 	while ((data & 0x10000002) != 0x10000002) {
1194 		udelay(100);
1195 		data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1196 		loop--;
1197 		if (!loop)
1198 			break;
1199 	}
1200 
1201 	if (!loop) {
1202 		dev_err(adev->dev,
1203 			"failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1204 			data);
1205 		return -EBUSY;
1206 	}
1207 
1208 	return 0;
1209 }
1210 
1211 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1212 {
1213 	struct amdgpu_ring *ring;
1214 	uint32_t offset, size, tmp, i, rb_bufsz;
1215 	uint32_t table_size = 0;
1216 	struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1217 	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1218 	struct mmsch_v1_0_cmd_end end = { { 0 } };
1219 	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1220 	struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1221 
1222 	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1223 	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1224 	end.cmd_header.command_type = MMSCH_COMMAND__END;
1225 
1226 	header->version = MMSCH_VERSION;
1227 	header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1228 	init_table += header->total_size;
1229 
1230 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1231 		header->eng[i].table_offset = header->total_size;
1232 		header->eng[i].init_status = 0;
1233 		header->eng[i].table_size = 0;
1234 
1235 		table_size = 0;
1236 
1237 		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1238 			SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
1239 			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1240 
1241 		size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1242 		/* mc resume*/
1243 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1244 			MMSCH_V1_0_INSERT_DIRECT_WT(
1245 				SOC15_REG_OFFSET(VCN, i,
1246 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1247 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1248 			MMSCH_V1_0_INSERT_DIRECT_WT(
1249 				SOC15_REG_OFFSET(VCN, i,
1250 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1251 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1252 			offset = 0;
1253 			MMSCH_V1_0_INSERT_DIRECT_WT(
1254 				SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1255 		} else {
1256 			MMSCH_V1_0_INSERT_DIRECT_WT(
1257 				SOC15_REG_OFFSET(VCN, i,
1258 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1259 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
1260 			MMSCH_V1_0_INSERT_DIRECT_WT(
1261 				SOC15_REG_OFFSET(VCN, i,
1262 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1263 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
1264 			offset = size;
1265 			MMSCH_V1_0_INSERT_DIRECT_WT(
1266 				SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
1267 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1268 		}
1269 
1270 		MMSCH_V1_0_INSERT_DIRECT_WT(
1271 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
1272 			size);
1273 		MMSCH_V1_0_INSERT_DIRECT_WT(
1274 			SOC15_REG_OFFSET(VCN, i,
1275 				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1276 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1277 		MMSCH_V1_0_INSERT_DIRECT_WT(
1278 			SOC15_REG_OFFSET(VCN, i,
1279 				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1280 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1281 		MMSCH_V1_0_INSERT_DIRECT_WT(
1282 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
1283 			0);
1284 		MMSCH_V1_0_INSERT_DIRECT_WT(
1285 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
1286 			AMDGPU_VCN_STACK_SIZE);
1287 		MMSCH_V1_0_INSERT_DIRECT_WT(
1288 			SOC15_REG_OFFSET(VCN, i,
1289 				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1290 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1291 				AMDGPU_VCN_STACK_SIZE));
1292 		MMSCH_V1_0_INSERT_DIRECT_WT(
1293 			SOC15_REG_OFFSET(VCN, i,
1294 				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1295 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1296 				AMDGPU_VCN_STACK_SIZE));
1297 		MMSCH_V1_0_INSERT_DIRECT_WT(
1298 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
1299 			0);
1300 		MMSCH_V1_0_INSERT_DIRECT_WT(
1301 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
1302 			AMDGPU_VCN_CONTEXT_SIZE);
1303 
1304 		ring = &adev->vcn.inst[i].ring_enc[0];
1305 		ring->wptr = 0;
1306 
1307 		MMSCH_V1_0_INSERT_DIRECT_WT(
1308 			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
1309 			lower_32_bits(ring->gpu_addr));
1310 		MMSCH_V1_0_INSERT_DIRECT_WT(
1311 			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
1312 			upper_32_bits(ring->gpu_addr));
1313 		MMSCH_V1_0_INSERT_DIRECT_WT(
1314 			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
1315 			ring->ring_size / 4);
1316 
1317 		ring = &adev->vcn.inst[i].ring_dec;
1318 		ring->wptr = 0;
1319 		MMSCH_V1_0_INSERT_DIRECT_WT(
1320 			SOC15_REG_OFFSET(VCN, i,
1321 				mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1322 			lower_32_bits(ring->gpu_addr));
1323 		MMSCH_V1_0_INSERT_DIRECT_WT(
1324 			SOC15_REG_OFFSET(VCN, i,
1325 				mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1326 			upper_32_bits(ring->gpu_addr));
1327 
1328 		/* force RBC into idle state */
1329 		rb_bufsz = order_base_2(ring->ring_size);
1330 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1331 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1332 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1333 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1334 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1335 		MMSCH_V1_0_INSERT_DIRECT_WT(
1336 			SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
1337 
1338 		/* add end packet */
1339 		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1340 		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1341 		init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1342 
1343 		/* refine header */
1344 		header->eng[i].table_size = table_size;
1345 		header->total_size += table_size;
1346 	}
1347 
1348 	return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1349 }
1350 
1351 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1352 {
1353 	uint32_t tmp;
1354 
1355 	/* Wait for power status to be 1 */
1356 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1357 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1358 
1359 	/* wait for read ptr to be equal to write ptr */
1360 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1361 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1362 
1363 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1364 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1365 
1366 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1367 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1368 
1369 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1370 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1371 
1372 	/* disable dynamic power gating mode */
1373 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1374 			~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1375 
1376 	return 0;
1377 }
1378 
1379 static int vcn_v2_5_stop(struct amdgpu_device *adev)
1380 {
1381 	uint32_t tmp;
1382 	int i, r = 0;
1383 
1384 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1385 		if (adev->vcn.harvest_config & (1 << i))
1386 			continue;
1387 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1388 			r = vcn_v2_5_stop_dpg_mode(adev, i);
1389 			continue;
1390 		}
1391 
1392 		/* wait for vcn idle */
1393 		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1394 		if (r)
1395 			return r;
1396 
1397 		tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1398 			UVD_LMI_STATUS__READ_CLEAN_MASK |
1399 			UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1400 			UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1401 		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1402 		if (r)
1403 			return r;
1404 
1405 		/* block LMI UMC channel */
1406 		tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1407 		tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1408 		WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1409 
1410 		tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1411 			UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1412 		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1413 		if (r)
1414 			return r;
1415 
1416 		/* block VCPU register access */
1417 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1418 			UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1419 			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1420 
1421 		/* reset VCPU */
1422 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1423 			UVD_VCPU_CNTL__BLK_RST_MASK,
1424 			~UVD_VCPU_CNTL__BLK_RST_MASK);
1425 
1426 		/* disable VCPU clock */
1427 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1428 			~(UVD_VCPU_CNTL__CLK_EN_MASK));
1429 
1430 		/* clear status */
1431 		WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1432 
1433 		vcn_v2_5_enable_clock_gating(adev);
1434 
1435 		/* enable register anti-hang mechanism */
1436 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
1437 			UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1438 			~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1439 	}
1440 
1441 	if (adev->pm.dpm_enabled)
1442 		amdgpu_dpm_enable_uvd(adev, false);
1443 
1444 	return 0;
1445 }
1446 
1447 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1448 				int inst_idx, struct dpg_pause_state *new_state)
1449 {
1450 	struct amdgpu_ring *ring;
1451 	uint32_t reg_data = 0;
1452 	int ret_code = 0;
1453 
1454 	/* pause/unpause if state is changed */
1455 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1456 		DRM_DEBUG("dpg pause state changed %d -> %d",
1457 			adev->vcn.inst[inst_idx].pause_state.fw_based,	new_state->fw_based);
1458 		reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1459 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1460 
1461 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1462 			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1463 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1464 
1465 			if (!ret_code) {
1466 				volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1467 
1468 				/* pause DPG */
1469 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1470 				WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1471 
1472 				/* wait for ACK */
1473 				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1474 					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1475 					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1476 
1477 				/* Stall DPG before WPTR/RPTR reset */
1478 				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1479 					   UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1480 					   ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1481 
1482 				/* Restore */
1483 				fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1484 				ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1485 				ring->wptr = 0;
1486 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1487 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1488 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1489 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1490 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1491 				fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1492 
1493 				fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1494 				ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1495 				ring->wptr = 0;
1496 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1497 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1498 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1499 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1500 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1501 				fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1502 
1503 				/* Unstall DPG */
1504 				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1505 					   0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1506 
1507 				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1508 					   UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1509 			}
1510 		} else {
1511 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1512 			WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1513 			SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1514 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1515 		}
1516 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1517 	}
1518 
1519 	return 0;
1520 }
1521 
1522 /**
1523  * vcn_v2_5_dec_ring_get_rptr - get read pointer
1524  *
1525  * @ring: amdgpu_ring pointer
1526  *
1527  * Returns the current hardware read pointer
1528  */
1529 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1530 {
1531 	struct amdgpu_device *adev = ring->adev;
1532 
1533 	return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1534 }
1535 
1536 /**
1537  * vcn_v2_5_dec_ring_get_wptr - get write pointer
1538  *
1539  * @ring: amdgpu_ring pointer
1540  *
1541  * Returns the current hardware write pointer
1542  */
1543 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1544 {
1545 	struct amdgpu_device *adev = ring->adev;
1546 
1547 	if (ring->use_doorbell)
1548 		return *ring->wptr_cpu_addr;
1549 	else
1550 		return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1551 }
1552 
1553 /**
1554  * vcn_v2_5_dec_ring_set_wptr - set write pointer
1555  *
1556  * @ring: amdgpu_ring pointer
1557  *
1558  * Commits the write pointer to the hardware
1559  */
1560 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1561 {
1562 	struct amdgpu_device *adev = ring->adev;
1563 
1564 	if (ring->use_doorbell) {
1565 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1566 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1567 	} else {
1568 		WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1569 	}
1570 }
1571 
1572 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1573 	.type = AMDGPU_RING_TYPE_VCN_DEC,
1574 	.align_mask = 0xf,
1575 	.secure_submission_supported = true,
1576 	.get_rptr = vcn_v2_5_dec_ring_get_rptr,
1577 	.get_wptr = vcn_v2_5_dec_ring_get_wptr,
1578 	.set_wptr = vcn_v2_5_dec_ring_set_wptr,
1579 	.emit_frame_size =
1580 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1581 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1582 		8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1583 		14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1584 		6,
1585 	.emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1586 	.emit_ib = vcn_v2_0_dec_ring_emit_ib,
1587 	.emit_fence = vcn_v2_0_dec_ring_emit_fence,
1588 	.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1589 	.test_ring = vcn_v2_0_dec_ring_test_ring,
1590 	.test_ib = amdgpu_vcn_dec_ring_test_ib,
1591 	.insert_nop = vcn_v2_0_dec_ring_insert_nop,
1592 	.insert_start = vcn_v2_0_dec_ring_insert_start,
1593 	.insert_end = vcn_v2_0_dec_ring_insert_end,
1594 	.pad_ib = amdgpu_ring_generic_pad_ib,
1595 	.begin_use = amdgpu_vcn_ring_begin_use,
1596 	.end_use = amdgpu_vcn_ring_end_use,
1597 	.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1598 	.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1599 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1600 };
1601 
1602 /**
1603  * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1604  *
1605  * @ring: amdgpu_ring pointer
1606  *
1607  * Returns the current hardware enc read pointer
1608  */
1609 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1610 {
1611 	struct amdgpu_device *adev = ring->adev;
1612 
1613 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1614 		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1615 	else
1616 		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
1617 }
1618 
1619 /**
1620  * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1621  *
1622  * @ring: amdgpu_ring pointer
1623  *
1624  * Returns the current hardware enc write pointer
1625  */
1626 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1627 {
1628 	struct amdgpu_device *adev = ring->adev;
1629 
1630 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1631 		if (ring->use_doorbell)
1632 			return *ring->wptr_cpu_addr;
1633 		else
1634 			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
1635 	} else {
1636 		if (ring->use_doorbell)
1637 			return *ring->wptr_cpu_addr;
1638 		else
1639 			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
1640 	}
1641 }
1642 
1643 /**
1644  * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1645  *
1646  * @ring: amdgpu_ring pointer
1647  *
1648  * Commits the enc write pointer to the hardware
1649  */
1650 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1651 {
1652 	struct amdgpu_device *adev = ring->adev;
1653 
1654 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1655 		if (ring->use_doorbell) {
1656 			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1657 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1658 		} else {
1659 			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1660 		}
1661 	} else {
1662 		if (ring->use_doorbell) {
1663 			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1664 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1665 		} else {
1666 			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1667 		}
1668 	}
1669 }
1670 
1671 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1672 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1673 	.align_mask = 0x3f,
1674 	.nop = VCN_ENC_CMD_NO_OP,
1675 	.get_rptr = vcn_v2_5_enc_ring_get_rptr,
1676 	.get_wptr = vcn_v2_5_enc_ring_get_wptr,
1677 	.set_wptr = vcn_v2_5_enc_ring_set_wptr,
1678 	.emit_frame_size =
1679 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1680 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1681 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1682 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1683 		1, /* vcn_v2_0_enc_ring_insert_end */
1684 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1685 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1686 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1687 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1688 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1689 	.test_ib = amdgpu_vcn_enc_ring_test_ib,
1690 	.insert_nop = amdgpu_ring_insert_nop,
1691 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1692 	.pad_ib = amdgpu_ring_generic_pad_ib,
1693 	.begin_use = amdgpu_vcn_ring_begin_use,
1694 	.end_use = amdgpu_vcn_ring_end_use,
1695 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1696 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1697 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1698 };
1699 
1700 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1701 {
1702 	int i;
1703 
1704 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1705 		if (adev->vcn.harvest_config & (1 << i))
1706 			continue;
1707 		adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1708 		adev->vcn.inst[i].ring_dec.me = i;
1709 		DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1710 	}
1711 }
1712 
1713 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1714 {
1715 	int i, j;
1716 
1717 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1718 		if (adev->vcn.harvest_config & (1 << j))
1719 			continue;
1720 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1721 			adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1722 			adev->vcn.inst[j].ring_enc[i].me = j;
1723 		}
1724 		DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1725 	}
1726 }
1727 
1728 static bool vcn_v2_5_is_idle(void *handle)
1729 {
1730 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1731 	int i, ret = 1;
1732 
1733 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1734 		if (adev->vcn.harvest_config & (1 << i))
1735 			continue;
1736 		ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1737 	}
1738 
1739 	return ret;
1740 }
1741 
1742 static int vcn_v2_5_wait_for_idle(void *handle)
1743 {
1744 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1745 	int i, ret = 0;
1746 
1747 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1748 		if (adev->vcn.harvest_config & (1 << i))
1749 			continue;
1750 		ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1751 			UVD_STATUS__IDLE);
1752 		if (ret)
1753 			return ret;
1754 	}
1755 
1756 	return ret;
1757 }
1758 
1759 static int vcn_v2_5_set_clockgating_state(void *handle,
1760 					  enum amd_clockgating_state state)
1761 {
1762 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1763 	bool enable = (state == AMD_CG_STATE_GATE);
1764 
1765 	if (amdgpu_sriov_vf(adev))
1766 		return 0;
1767 
1768 	if (enable) {
1769 		if (!vcn_v2_5_is_idle(handle))
1770 			return -EBUSY;
1771 		vcn_v2_5_enable_clock_gating(adev);
1772 	} else {
1773 		vcn_v2_5_disable_clock_gating(adev);
1774 	}
1775 
1776 	return 0;
1777 }
1778 
1779 static int vcn_v2_5_set_powergating_state(void *handle,
1780 					  enum amd_powergating_state state)
1781 {
1782 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1783 	int ret;
1784 
1785 	if (amdgpu_sriov_vf(adev))
1786 		return 0;
1787 
1788 	if(state == adev->vcn.cur_state)
1789 		return 0;
1790 
1791 	if (state == AMD_PG_STATE_GATE)
1792 		ret = vcn_v2_5_stop(adev);
1793 	else
1794 		ret = vcn_v2_5_start(adev);
1795 
1796 	if(!ret)
1797 		adev->vcn.cur_state = state;
1798 
1799 	return ret;
1800 }
1801 
1802 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1803 					struct amdgpu_irq_src *source,
1804 					unsigned type,
1805 					enum amdgpu_interrupt_state state)
1806 {
1807 	return 0;
1808 }
1809 
1810 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1811 				      struct amdgpu_irq_src *source,
1812 				      struct amdgpu_iv_entry *entry)
1813 {
1814 	uint32_t ip_instance;
1815 
1816 	switch (entry->client_id) {
1817 	case SOC15_IH_CLIENTID_VCN:
1818 		ip_instance = 0;
1819 		break;
1820 	case SOC15_IH_CLIENTID_VCN1:
1821 		ip_instance = 1;
1822 		break;
1823 	default:
1824 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1825 		return 0;
1826 	}
1827 
1828 	DRM_DEBUG("IH: VCN TRAP\n");
1829 
1830 	switch (entry->src_id) {
1831 	case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1832 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1833 		break;
1834 	case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1835 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1836 		break;
1837 	case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1838 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1839 		break;
1840 	case VCN_2_6__SRCID_UVD_POISON:
1841 		amdgpu_vcn_process_poison_irq(adev, source, entry);
1842 		break;
1843 	default:
1844 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1845 			  entry->src_id, entry->src_data[0]);
1846 		break;
1847 	}
1848 
1849 	return 0;
1850 }
1851 
1852 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1853 	.set = vcn_v2_5_set_interrupt_state,
1854 	.process = vcn_v2_5_process_interrupt,
1855 };
1856 
1857 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1858 {
1859 	int i;
1860 
1861 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1862 		if (adev->vcn.harvest_config & (1 << i))
1863 			continue;
1864 		adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1865 		adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1866 	}
1867 }
1868 
1869 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1870 	.name = "vcn_v2_5",
1871 	.early_init = vcn_v2_5_early_init,
1872 	.late_init = NULL,
1873 	.sw_init = vcn_v2_5_sw_init,
1874 	.sw_fini = vcn_v2_5_sw_fini,
1875 	.hw_init = vcn_v2_5_hw_init,
1876 	.hw_fini = vcn_v2_5_hw_fini,
1877 	.suspend = vcn_v2_5_suspend,
1878 	.resume = vcn_v2_5_resume,
1879 	.is_idle = vcn_v2_5_is_idle,
1880 	.wait_for_idle = vcn_v2_5_wait_for_idle,
1881 	.check_soft_reset = NULL,
1882 	.pre_soft_reset = NULL,
1883 	.soft_reset = NULL,
1884 	.post_soft_reset = NULL,
1885 	.set_clockgating_state = vcn_v2_5_set_clockgating_state,
1886 	.set_powergating_state = vcn_v2_5_set_powergating_state,
1887 };
1888 
1889 static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
1890         .name = "vcn_v2_6",
1891         .early_init = vcn_v2_5_early_init,
1892         .late_init = NULL,
1893         .sw_init = vcn_v2_5_sw_init,
1894         .sw_fini = vcn_v2_5_sw_fini,
1895         .hw_init = vcn_v2_5_hw_init,
1896         .hw_fini = vcn_v2_5_hw_fini,
1897         .suspend = vcn_v2_5_suspend,
1898         .resume = vcn_v2_5_resume,
1899         .is_idle = vcn_v2_5_is_idle,
1900         .wait_for_idle = vcn_v2_5_wait_for_idle,
1901         .check_soft_reset = NULL,
1902         .pre_soft_reset = NULL,
1903         .soft_reset = NULL,
1904         .post_soft_reset = NULL,
1905         .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1906         .set_powergating_state = vcn_v2_5_set_powergating_state,
1907 };
1908 
1909 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1910 {
1911 		.type = AMD_IP_BLOCK_TYPE_VCN,
1912 		.major = 2,
1913 		.minor = 5,
1914 		.rev = 0,
1915 		.funcs = &vcn_v2_5_ip_funcs,
1916 };
1917 
1918 const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
1919 {
1920 		.type = AMD_IP_BLOCK_TYPE_VCN,
1921 		.major = 2,
1922 		.minor = 6,
1923 		.rev = 0,
1924 		.funcs = &vcn_v2_6_ip_funcs,
1925 };
1926 
1927 static uint32_t vcn_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
1928 			uint32_t instance, uint32_t sub_block)
1929 {
1930 	uint32_t poison_stat = 0, reg_value = 0;
1931 
1932 	switch (sub_block) {
1933 	case AMDGPU_VCN_V2_6_VCPU_VCODEC:
1934 		reg_value = RREG32_SOC15(VCN, instance, mmUVD_RAS_VCPU_VCODEC_STATUS);
1935 		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
1936 		break;
1937 	default:
1938 		break;
1939 	}
1940 
1941 	if (poison_stat)
1942 		dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
1943 			instance, sub_block);
1944 
1945 	return poison_stat;
1946 }
1947 
1948 static bool vcn_v2_6_query_poison_status(struct amdgpu_device *adev)
1949 {
1950 	uint32_t inst, sub;
1951 	uint32_t poison_stat = 0;
1952 
1953 	for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
1954 		for (sub = 0; sub < AMDGPU_VCN_V2_6_MAX_SUB_BLOCK; sub++)
1955 			poison_stat +=
1956 			vcn_v2_6_query_poison_by_instance(adev, inst, sub);
1957 
1958 	return !!poison_stat;
1959 }
1960 
1961 const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
1962 	.query_poison_status = vcn_v2_6_query_poison_status,
1963 };
1964 
1965 static struct amdgpu_vcn_ras vcn_v2_6_ras = {
1966 	.ras_block = {
1967 		.hw_ops = &vcn_v2_6_ras_hw_ops,
1968 	},
1969 };
1970 
1971 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
1972 {
1973 	switch (adev->ip_versions[VCN_HWIP][0]) {
1974 	case IP_VERSION(2, 6, 0):
1975 		adev->vcn.ras = &vcn_v2_6_ras;
1976 		break;
1977 	default:
1978 		break;
1979 	}
1980 }
1981