xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c (revision 16c8d76a)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "vcn_v2_0.h"
33 #include "mmsch_v1_0.h"
34 #include "vcn_v2_5.h"
35 
36 #include "vcn/vcn_2_5_offset.h"
37 #include "vcn/vcn_2_5_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
39 
40 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET			0x27
41 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET			0x0f
42 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET			0x10
43 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET			0x11
44 #define mmUVD_NO_OP_INTERNAL_OFFSET				0x29
45 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET			0x66
46 #define mmUVD_SCRATCH9_INTERNAL_OFFSET				0xc01d
47 
48 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET			0x431
49 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET		0x3b4
50 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET		0x3b5
51 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET			0x25c
52 
53 #define VCN25_MAX_HW_INSTANCES_ARCTURUS			2
54 
55 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
56 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
57 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
58 static int vcn_v2_5_set_powergating_state(void *handle,
59 				enum amd_powergating_state state);
60 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
61 				int inst_idx, struct dpg_pause_state *new_state);
62 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
63 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
64 
65 static int amdgpu_ih_clientid_vcns[] = {
66 	SOC15_IH_CLIENTID_VCN,
67 	SOC15_IH_CLIENTID_VCN1
68 };
69 
70 /**
71  * vcn_v2_5_early_init - set function pointers
72  *
73  * @handle: amdgpu_device pointer
74  *
75  * Set ring and irq function pointers
76  */
77 static int vcn_v2_5_early_init(void *handle)
78 {
79 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
80 
81 	if (amdgpu_sriov_vf(adev)) {
82 		adev->vcn.num_vcn_inst = 2;
83 		adev->vcn.harvest_config = 0;
84 		adev->vcn.num_enc_rings = 1;
85 	} else {
86 		u32 harvest;
87 		int i;
88 
89 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
90 			harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
91 			if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
92 				adev->vcn.harvest_config |= 1 << i;
93 		}
94 		if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
95 					AMDGPU_VCN_HARVEST_VCN1))
96 			/* both instances are harvested, disable the block */
97 			return -ENOENT;
98 
99 		adev->vcn.num_enc_rings = 2;
100 	}
101 
102 	vcn_v2_5_set_dec_ring_funcs(adev);
103 	vcn_v2_5_set_enc_ring_funcs(adev);
104 	vcn_v2_5_set_irq_funcs(adev);
105 	vcn_v2_5_set_ras_funcs(adev);
106 
107 	return 0;
108 }
109 
110 /**
111  * vcn_v2_5_sw_init - sw init for VCN block
112  *
113  * @handle: amdgpu_device pointer
114  *
115  * Load firmware and sw initialization
116  */
117 static int vcn_v2_5_sw_init(void *handle)
118 {
119 	struct amdgpu_ring *ring;
120 	int i, j, r;
121 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
122 
123 	for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
124 		if (adev->vcn.harvest_config & (1 << j))
125 			continue;
126 		/* VCN DEC TRAP */
127 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
128 				VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
129 		if (r)
130 			return r;
131 
132 		/* VCN ENC TRAP */
133 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
134 			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
135 				i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
136 			if (r)
137 				return r;
138 		}
139 	}
140 
141 	r = amdgpu_vcn_sw_init(adev);
142 	if (r)
143 		return r;
144 
145 	amdgpu_vcn_setup_ucode(adev);
146 
147 	r = amdgpu_vcn_resume(adev);
148 	if (r)
149 		return r;
150 
151 	for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
152 		volatile struct amdgpu_fw_shared *fw_shared;
153 
154 		if (adev->vcn.harvest_config & (1 << j))
155 			continue;
156 		adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
157 		adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
158 		adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
159 		adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
160 		adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
161 		adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
162 
163 		adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
164 		adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
165 		adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
166 		adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
167 		adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
168 		adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
169 		adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
170 		adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
171 		adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
172 		adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
173 
174 		ring = &adev->vcn.inst[j].ring_dec;
175 		ring->use_doorbell = true;
176 
177 		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
178 				(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
179 		sprintf(ring->name, "vcn_dec_%d", j);
180 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
181 				     0, AMDGPU_RING_PRIO_DEFAULT, NULL);
182 		if (r)
183 			return r;
184 
185 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
186 			enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
187 
188 			ring = &adev->vcn.inst[j].ring_enc[i];
189 			ring->use_doorbell = true;
190 
191 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
192 					(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
193 
194 			sprintf(ring->name, "vcn_enc_%d.%d", j, i);
195 			r = amdgpu_ring_init(adev, ring, 512,
196 					     &adev->vcn.inst[j].irq, 0,
197 					     hw_prio, NULL);
198 			if (r)
199 				return r;
200 		}
201 
202 		fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
203 		fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
204 
205 		if (amdgpu_vcnfw_log)
206 			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
207 	}
208 
209 	if (amdgpu_sriov_vf(adev)) {
210 		r = amdgpu_virt_alloc_mm_table(adev);
211 		if (r)
212 			return r;
213 	}
214 
215 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
216 		adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
217 
218 	return 0;
219 }
220 
221 /**
222  * vcn_v2_5_sw_fini - sw fini for VCN block
223  *
224  * @handle: amdgpu_device pointer
225  *
226  * VCN suspend and free up sw allocation
227  */
228 static int vcn_v2_5_sw_fini(void *handle)
229 {
230 	int i, r, idx;
231 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
232 	volatile struct amdgpu_fw_shared *fw_shared;
233 
234 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
235 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
236 			if (adev->vcn.harvest_config & (1 << i))
237 				continue;
238 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
239 			fw_shared->present_flag_0 = 0;
240 		}
241 		drm_dev_exit(idx);
242 	}
243 
244 
245 	if (amdgpu_sriov_vf(adev))
246 		amdgpu_virt_free_mm_table(adev);
247 
248 	r = amdgpu_vcn_suspend(adev);
249 	if (r)
250 		return r;
251 
252 	r = amdgpu_vcn_sw_fini(adev);
253 
254 	return r;
255 }
256 
257 /**
258  * vcn_v2_5_hw_init - start and test VCN block
259  *
260  * @handle: amdgpu_device pointer
261  *
262  * Initialize the hardware, boot up the VCPU and do some testing
263  */
264 static int vcn_v2_5_hw_init(void *handle)
265 {
266 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
267 	struct amdgpu_ring *ring;
268 	int i, j, r = 0;
269 
270 	if (amdgpu_sriov_vf(adev))
271 		r = vcn_v2_5_sriov_start(adev);
272 
273 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
274 		if (adev->vcn.harvest_config & (1 << j))
275 			continue;
276 
277 		if (amdgpu_sriov_vf(adev)) {
278 			adev->vcn.inst[j].ring_enc[0].sched.ready = true;
279 			adev->vcn.inst[j].ring_enc[1].sched.ready = false;
280 			adev->vcn.inst[j].ring_enc[2].sched.ready = false;
281 			adev->vcn.inst[j].ring_dec.sched.ready = true;
282 		} else {
283 
284 			ring = &adev->vcn.inst[j].ring_dec;
285 
286 			adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
287 						     ring->doorbell_index, j);
288 
289 			r = amdgpu_ring_test_helper(ring);
290 			if (r)
291 				goto done;
292 
293 			for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
294 				ring = &adev->vcn.inst[j].ring_enc[i];
295 				r = amdgpu_ring_test_helper(ring);
296 				if (r)
297 					goto done;
298 			}
299 		}
300 	}
301 
302 done:
303 	if (!r)
304 		DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
305 			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
306 
307 	return r;
308 }
309 
310 /**
311  * vcn_v2_5_hw_fini - stop the hardware block
312  *
313  * @handle: amdgpu_device pointer
314  *
315  * Stop the VCN block, mark ring as not ready any more
316  */
317 static int vcn_v2_5_hw_fini(void *handle)
318 {
319 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
320 	int i;
321 
322 	cancel_delayed_work_sync(&adev->vcn.idle_work);
323 
324 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
325 		if (adev->vcn.harvest_config & (1 << i))
326 			continue;
327 
328 		if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
329 		    (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
330 		     RREG32_SOC15(VCN, i, mmUVD_STATUS)))
331 			vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
332 	}
333 
334 	return 0;
335 }
336 
337 /**
338  * vcn_v2_5_suspend - suspend VCN block
339  *
340  * @handle: amdgpu_device pointer
341  *
342  * HW fini and suspend VCN block
343  */
344 static int vcn_v2_5_suspend(void *handle)
345 {
346 	int r;
347 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
348 
349 	r = vcn_v2_5_hw_fini(adev);
350 	if (r)
351 		return r;
352 
353 	r = amdgpu_vcn_suspend(adev);
354 
355 	return r;
356 }
357 
358 /**
359  * vcn_v2_5_resume - resume VCN block
360  *
361  * @handle: amdgpu_device pointer
362  *
363  * Resume firmware and hw init VCN block
364  */
365 static int vcn_v2_5_resume(void *handle)
366 {
367 	int r;
368 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
369 
370 	r = amdgpu_vcn_resume(adev);
371 	if (r)
372 		return r;
373 
374 	r = vcn_v2_5_hw_init(adev);
375 
376 	return r;
377 }
378 
379 /**
380  * vcn_v2_5_mc_resume - memory controller programming
381  *
382  * @adev: amdgpu_device pointer
383  *
384  * Let the VCN memory controller know it's offsets
385  */
386 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
387 {
388 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
389 	uint32_t offset;
390 	int i;
391 
392 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
393 		if (adev->vcn.harvest_config & (1 << i))
394 			continue;
395 		/* cache window 0: fw */
396 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
397 			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
398 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
399 			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
400 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
401 			WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
402 			offset = 0;
403 		} else {
404 			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
405 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
406 			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
407 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
408 			offset = size;
409 			WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
410 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
411 		}
412 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
413 
414 		/* cache window 1: stack */
415 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
416 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
417 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
418 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
419 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
420 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
421 
422 		/* cache window 2: context */
423 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
424 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
425 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
426 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
427 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
428 		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
429 
430 		/* non-cache window */
431 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
432 			lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
433 		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
434 			upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
435 		WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
436 		WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
437 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
438 	}
439 }
440 
441 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
442 {
443 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
444 	uint32_t offset;
445 
446 	/* cache window 0: fw */
447 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
448 		if (!indirect) {
449 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
450 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
451 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
452 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
453 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
454 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
455 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
456 				VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
457 		} else {
458 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
459 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
460 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
461 				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
462 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
463 				VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
464 		}
465 		offset = 0;
466 	} else {
467 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
468 			VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
469 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
470 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
471 			VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
472 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
473 		offset = size;
474 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
475 			VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
476 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
477 	}
478 
479 	if (!indirect)
480 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
481 			VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
482 	else
483 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
484 			VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
485 
486 	/* cache window 1: stack */
487 	if (!indirect) {
488 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
489 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
490 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
491 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
492 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
493 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
494 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
495 			VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
496 	} else {
497 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
498 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
499 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
500 			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
501 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
502 			VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
503 	}
504 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
505 		VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
506 
507 	/* cache window 2: context */
508 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
509 		VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
510 		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
511 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
512 		VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
513 		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
514 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
515 		VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
516 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
517 		VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
518 
519 	/* non-cache window */
520 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
521 		VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
522 		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
523 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
524 		VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
525 		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
526 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
527 		VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
528 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
529 		VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
530 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
531 
532 	/* VCN global tiling registers */
533 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
534 		VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
535 }
536 
537 /**
538  * vcn_v2_5_disable_clock_gating - disable VCN clock gating
539  *
540  * @adev: amdgpu_device pointer
541  *
542  * Disable clock gating for VCN block
543  */
544 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
545 {
546 	uint32_t data;
547 	int i;
548 
549 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
550 		if (adev->vcn.harvest_config & (1 << i))
551 			continue;
552 		/* UVD disable CGC */
553 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
554 		if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
555 			data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
556 		else
557 			data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
558 		data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
559 		data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
560 		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
561 
562 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
563 		data &= ~(UVD_CGC_GATE__SYS_MASK
564 			| UVD_CGC_GATE__UDEC_MASK
565 			| UVD_CGC_GATE__MPEG2_MASK
566 			| UVD_CGC_GATE__REGS_MASK
567 			| UVD_CGC_GATE__RBC_MASK
568 			| UVD_CGC_GATE__LMI_MC_MASK
569 			| UVD_CGC_GATE__LMI_UMC_MASK
570 			| UVD_CGC_GATE__IDCT_MASK
571 			| UVD_CGC_GATE__MPRD_MASK
572 			| UVD_CGC_GATE__MPC_MASK
573 			| UVD_CGC_GATE__LBSI_MASK
574 			| UVD_CGC_GATE__LRBBM_MASK
575 			| UVD_CGC_GATE__UDEC_RE_MASK
576 			| UVD_CGC_GATE__UDEC_CM_MASK
577 			| UVD_CGC_GATE__UDEC_IT_MASK
578 			| UVD_CGC_GATE__UDEC_DB_MASK
579 			| UVD_CGC_GATE__UDEC_MP_MASK
580 			| UVD_CGC_GATE__WCB_MASK
581 			| UVD_CGC_GATE__VCPU_MASK
582 			| UVD_CGC_GATE__MMSCH_MASK);
583 
584 		WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
585 
586 		SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0,  0xFFFFFFFF);
587 
588 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
589 		data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
590 			| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
591 			| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
592 			| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
593 			| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
594 			| UVD_CGC_CTRL__SYS_MODE_MASK
595 			| UVD_CGC_CTRL__UDEC_MODE_MASK
596 			| UVD_CGC_CTRL__MPEG2_MODE_MASK
597 			| UVD_CGC_CTRL__REGS_MODE_MASK
598 			| UVD_CGC_CTRL__RBC_MODE_MASK
599 			| UVD_CGC_CTRL__LMI_MC_MODE_MASK
600 			| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
601 			| UVD_CGC_CTRL__IDCT_MODE_MASK
602 			| UVD_CGC_CTRL__MPRD_MODE_MASK
603 			| UVD_CGC_CTRL__MPC_MODE_MASK
604 			| UVD_CGC_CTRL__LBSI_MODE_MASK
605 			| UVD_CGC_CTRL__LRBBM_MODE_MASK
606 			| UVD_CGC_CTRL__WCB_MODE_MASK
607 			| UVD_CGC_CTRL__VCPU_MODE_MASK
608 			| UVD_CGC_CTRL__MMSCH_MODE_MASK);
609 		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
610 
611 		/* turn on */
612 		data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
613 		data |= (UVD_SUVD_CGC_GATE__SRE_MASK
614 			| UVD_SUVD_CGC_GATE__SIT_MASK
615 			| UVD_SUVD_CGC_GATE__SMP_MASK
616 			| UVD_SUVD_CGC_GATE__SCM_MASK
617 			| UVD_SUVD_CGC_GATE__SDB_MASK
618 			| UVD_SUVD_CGC_GATE__SRE_H264_MASK
619 			| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
620 			| UVD_SUVD_CGC_GATE__SIT_H264_MASK
621 			| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
622 			| UVD_SUVD_CGC_GATE__SCM_H264_MASK
623 			| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
624 			| UVD_SUVD_CGC_GATE__SDB_H264_MASK
625 			| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
626 			| UVD_SUVD_CGC_GATE__SCLR_MASK
627 			| UVD_SUVD_CGC_GATE__UVD_SC_MASK
628 			| UVD_SUVD_CGC_GATE__ENT_MASK
629 			| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
630 			| UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
631 			| UVD_SUVD_CGC_GATE__SITE_MASK
632 			| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
633 			| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
634 			| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
635 			| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
636 			| UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
637 		WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
638 
639 		data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
640 		data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
641 			| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
642 			| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
643 			| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
644 			| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
645 			| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
646 			| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
647 			| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
648 			| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
649 			| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
650 		WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
651 	}
652 }
653 
654 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
655 		uint8_t sram_sel, int inst_idx, uint8_t indirect)
656 {
657 	uint32_t reg_data = 0;
658 
659 	/* enable sw clock gating control */
660 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
661 		reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
662 	else
663 		reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
664 	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
665 	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
666 	reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
667 		 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
668 		 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
669 		 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
670 		 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
671 		 UVD_CGC_CTRL__SYS_MODE_MASK |
672 		 UVD_CGC_CTRL__UDEC_MODE_MASK |
673 		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
674 		 UVD_CGC_CTRL__REGS_MODE_MASK |
675 		 UVD_CGC_CTRL__RBC_MODE_MASK |
676 		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
677 		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
678 		 UVD_CGC_CTRL__IDCT_MODE_MASK |
679 		 UVD_CGC_CTRL__MPRD_MODE_MASK |
680 		 UVD_CGC_CTRL__MPC_MODE_MASK |
681 		 UVD_CGC_CTRL__LBSI_MODE_MASK |
682 		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
683 		 UVD_CGC_CTRL__WCB_MODE_MASK |
684 		 UVD_CGC_CTRL__VCPU_MODE_MASK |
685 		 UVD_CGC_CTRL__MMSCH_MODE_MASK);
686 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
687 		VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
688 
689 	/* turn off clock gating */
690 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
691 		VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
692 
693 	/* turn on SUVD clock gating */
694 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
695 		VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
696 
697 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
698 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
699 		VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
700 }
701 
702 /**
703  * vcn_v2_5_enable_clock_gating - enable VCN clock gating
704  *
705  * @adev: amdgpu_device pointer
706  *
707  * Enable clock gating for VCN block
708  */
709 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
710 {
711 	uint32_t data = 0;
712 	int i;
713 
714 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
715 		if (adev->vcn.harvest_config & (1 << i))
716 			continue;
717 		/* enable UVD CGC */
718 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
719 		if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
720 			data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
721 		else
722 			data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
723 		data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
724 		data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
725 		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
726 
727 		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
728 		data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
729 			| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
730 			| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
731 			| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
732 			| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
733 			| UVD_CGC_CTRL__SYS_MODE_MASK
734 			| UVD_CGC_CTRL__UDEC_MODE_MASK
735 			| UVD_CGC_CTRL__MPEG2_MODE_MASK
736 			| UVD_CGC_CTRL__REGS_MODE_MASK
737 			| UVD_CGC_CTRL__RBC_MODE_MASK
738 			| UVD_CGC_CTRL__LMI_MC_MODE_MASK
739 			| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
740 			| UVD_CGC_CTRL__IDCT_MODE_MASK
741 			| UVD_CGC_CTRL__MPRD_MODE_MASK
742 			| UVD_CGC_CTRL__MPC_MODE_MASK
743 			| UVD_CGC_CTRL__LBSI_MODE_MASK
744 			| UVD_CGC_CTRL__LRBBM_MODE_MASK
745 			| UVD_CGC_CTRL__WCB_MODE_MASK
746 			| UVD_CGC_CTRL__VCPU_MODE_MASK);
747 		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
748 
749 		data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
750 		data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
751 			| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
752 			| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
753 			| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
754 			| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
755 			| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
756 			| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
757 			| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
758 			| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
759 			| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
760 		WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
761 	}
762 }
763 
764 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
765 {
766 	volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
767 	struct amdgpu_ring *ring;
768 	uint32_t rb_bufsz, tmp;
769 
770 	/* disable register anti-hang mechanism */
771 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
772 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
773 	/* enable dynamic power gating mode */
774 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
775 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
776 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
777 	WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
778 
779 	if (indirect)
780 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
781 
782 	/* enable clock gating */
783 	vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
784 
785 	/* enable VCPU clock */
786 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
787 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
788 	tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
789 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
790 		VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
791 
792 	/* disable master interupt */
793 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
794 		VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
795 
796 	/* setup mmUVD_LMI_CTRL */
797 	tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
798 		UVD_LMI_CTRL__REQ_MODE_MASK |
799 		UVD_LMI_CTRL__CRC_RESET_MASK |
800 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
801 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
802 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
803 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
804 		0x00100000L);
805 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
806 		VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
807 
808 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
809 		VCN, 0, mmUVD_MPC_CNTL),
810 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
811 
812 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
813 		VCN, 0, mmUVD_MPC_SET_MUXA0),
814 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
815 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
816 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
817 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
818 
819 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
820 		VCN, 0, mmUVD_MPC_SET_MUXB0),
821 		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
822 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
823 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
824 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
825 
826 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
827 		VCN, 0, mmUVD_MPC_SET_MUX),
828 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
829 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
830 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
831 
832 	vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
833 
834 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
835 		VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
836 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
837 		VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
838 
839 	/* enable LMI MC and UMC channels */
840 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
841 		VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
842 
843 	/* unblock VCPU register access */
844 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
845 		VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
846 
847 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
848 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
849 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
850 		VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
851 
852 	/* enable master interrupt */
853 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
854 		VCN, 0, mmUVD_MASTINT_EN),
855 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
856 
857 	if (indirect)
858 		psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
859 				    (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
860 					       (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
861 
862 	ring = &adev->vcn.inst[inst_idx].ring_dec;
863 	/* force RBC into idle state */
864 	rb_bufsz = order_base_2(ring->ring_size);
865 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
866 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
867 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
868 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
869 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
870 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
871 
872 	/* Stall DPG before WPTR/RPTR reset */
873 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
874 		UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
875 		~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
876 	fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
877 
878 	/* set the write pointer delay */
879 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
880 
881 	/* set the wb address */
882 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
883 		(upper_32_bits(ring->gpu_addr) >> 2));
884 
885 	/* program the RB_BASE for ring buffer */
886 	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
887 		lower_32_bits(ring->gpu_addr));
888 	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
889 		upper_32_bits(ring->gpu_addr));
890 
891 	/* Initialize the ring buffer's read and write pointers */
892 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
893 
894 	WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
895 
896 	ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
897 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
898 		lower_32_bits(ring->wptr));
899 
900 	fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
901 	/* Unstall DPG */
902 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
903 		0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
904 
905 	return 0;
906 }
907 
908 static int vcn_v2_5_start(struct amdgpu_device *adev)
909 {
910 	struct amdgpu_ring *ring;
911 	uint32_t rb_bufsz, tmp;
912 	int i, j, k, r;
913 
914 	if (adev->pm.dpm_enabled)
915 		amdgpu_dpm_enable_uvd(adev, true);
916 
917 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
918 		if (adev->vcn.harvest_config & (1 << i))
919 			continue;
920 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
921 			r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
922 			continue;
923 		}
924 
925 		/* disable register anti-hang mechanism */
926 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
927 			~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
928 
929 		/* set uvd status busy */
930 		tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
931 		WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
932 	}
933 
934 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
935 		return 0;
936 
937 	/*SW clock gating */
938 	vcn_v2_5_disable_clock_gating(adev);
939 
940 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
941 		if (adev->vcn.harvest_config & (1 << i))
942 			continue;
943 		/* enable VCPU clock */
944 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
945 			UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
946 
947 		/* disable master interrupt */
948 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
949 			~UVD_MASTINT_EN__VCPU_EN_MASK);
950 
951 		/* setup mmUVD_LMI_CTRL */
952 		tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
953 		tmp &= ~0xff;
954 		WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
955 			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK	|
956 			UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
957 			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
958 			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
959 
960 		/* setup mmUVD_MPC_CNTL */
961 		tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
962 		tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
963 		tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
964 		WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
965 
966 		/* setup UVD_MPC_SET_MUXA0 */
967 		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
968 			((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
969 			(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
970 			(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
971 			(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
972 
973 		/* setup UVD_MPC_SET_MUXB0 */
974 		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
975 			((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
976 			(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
977 			(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
978 			(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
979 
980 		/* setup mmUVD_MPC_SET_MUX */
981 		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
982 			((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
983 			(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
984 			(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
985 	}
986 
987 	vcn_v2_5_mc_resume(adev);
988 
989 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
990 		volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
991 		if (adev->vcn.harvest_config & (1 << i))
992 			continue;
993 		/* VCN global tiling registers */
994 		WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
995 			adev->gfx.config.gb_addr_config);
996 		WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
997 			adev->gfx.config.gb_addr_config);
998 
999 		/* enable LMI MC and UMC channels */
1000 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1001 			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1002 
1003 		/* unblock VCPU register access */
1004 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1005 			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1006 
1007 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1008 			~UVD_VCPU_CNTL__BLK_RST_MASK);
1009 
1010 		for (k = 0; k < 10; ++k) {
1011 			uint32_t status;
1012 
1013 			for (j = 0; j < 100; ++j) {
1014 				status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1015 				if (status & 2)
1016 					break;
1017 				if (amdgpu_emu_mode == 1)
1018 					msleep(500);
1019 				else
1020 					mdelay(10);
1021 			}
1022 			r = 0;
1023 			if (status & 2)
1024 				break;
1025 
1026 			DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1027 			WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1028 				UVD_VCPU_CNTL__BLK_RST_MASK,
1029 				~UVD_VCPU_CNTL__BLK_RST_MASK);
1030 			mdelay(10);
1031 			WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1032 				~UVD_VCPU_CNTL__BLK_RST_MASK);
1033 
1034 			mdelay(10);
1035 			r = -1;
1036 		}
1037 
1038 		if (r) {
1039 			DRM_ERROR("VCN decode not responding, giving up!!!\n");
1040 			return r;
1041 		}
1042 
1043 		/* enable master interrupt */
1044 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1045 			UVD_MASTINT_EN__VCPU_EN_MASK,
1046 			~UVD_MASTINT_EN__VCPU_EN_MASK);
1047 
1048 		/* clear the busy bit of VCN_STATUS */
1049 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1050 			~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1051 
1052 		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1053 
1054 		ring = &adev->vcn.inst[i].ring_dec;
1055 		/* force RBC into idle state */
1056 		rb_bufsz = order_base_2(ring->ring_size);
1057 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1058 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1059 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1060 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1061 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1062 		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1063 
1064 		fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1065 		/* program the RB_BASE for ring buffer */
1066 		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1067 			lower_32_bits(ring->gpu_addr));
1068 		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1069 			upper_32_bits(ring->gpu_addr));
1070 
1071 		/* Initialize the ring buffer's read and write pointers */
1072 		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1073 
1074 		ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1075 		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1076 				lower_32_bits(ring->wptr));
1077 		fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1078 
1079 		fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1080 		ring = &adev->vcn.inst[i].ring_enc[0];
1081 		WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1082 		WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1083 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1084 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1085 		WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1086 		fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1087 
1088 		fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1089 		ring = &adev->vcn.inst[i].ring_enc[1];
1090 		WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1091 		WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1092 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1093 		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1094 		WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1095 		fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1096 	}
1097 
1098 	return 0;
1099 }
1100 
1101 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1102 				struct amdgpu_mm_table *table)
1103 {
1104 	uint32_t data = 0, loop = 0, size = 0;
1105 	uint64_t addr = table->gpu_addr;
1106 	struct mmsch_v1_1_init_header *header = NULL;
1107 
1108 	header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1109 	size = header->total_size;
1110 
1111 	/*
1112 	 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1113 	 *  memory descriptor location
1114 	 */
1115 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1116 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1117 
1118 	/* 2, update vmid of descriptor */
1119 	data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1120 	data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1121 	/* use domain0 for MM scheduler */
1122 	data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1123 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
1124 
1125 	/* 3, notify mmsch about the size of this descriptor */
1126 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1127 
1128 	/* 4, set resp to zero */
1129 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1130 
1131 	/*
1132 	 * 5, kick off the initialization and wait until
1133 	 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1134 	 */
1135 	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1136 
1137 	data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1138 	loop = 10;
1139 	while ((data & 0x10000002) != 0x10000002) {
1140 		udelay(100);
1141 		data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1142 		loop--;
1143 		if (!loop)
1144 			break;
1145 	}
1146 
1147 	if (!loop) {
1148 		dev_err(adev->dev,
1149 			"failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1150 			data);
1151 		return -EBUSY;
1152 	}
1153 
1154 	return 0;
1155 }
1156 
1157 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1158 {
1159 	struct amdgpu_ring *ring;
1160 	uint32_t offset, size, tmp, i, rb_bufsz;
1161 	uint32_t table_size = 0;
1162 	struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1163 	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1164 	struct mmsch_v1_0_cmd_end end = { { 0 } };
1165 	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1166 	struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1167 
1168 	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1169 	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1170 	end.cmd_header.command_type = MMSCH_COMMAND__END;
1171 
1172 	header->version = MMSCH_VERSION;
1173 	header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1174 	init_table += header->total_size;
1175 
1176 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1177 		header->eng[i].table_offset = header->total_size;
1178 		header->eng[i].init_status = 0;
1179 		header->eng[i].table_size = 0;
1180 
1181 		table_size = 0;
1182 
1183 		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1184 			SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
1185 			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1186 
1187 		size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1188 		/* mc resume*/
1189 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1190 			MMSCH_V1_0_INSERT_DIRECT_WT(
1191 				SOC15_REG_OFFSET(VCN, i,
1192 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1193 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1194 			MMSCH_V1_0_INSERT_DIRECT_WT(
1195 				SOC15_REG_OFFSET(VCN, i,
1196 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1197 				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1198 			offset = 0;
1199 			MMSCH_V1_0_INSERT_DIRECT_WT(
1200 				SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1201 		} else {
1202 			MMSCH_V1_0_INSERT_DIRECT_WT(
1203 				SOC15_REG_OFFSET(VCN, i,
1204 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1205 				lower_32_bits(adev->vcn.inst[i].gpu_addr));
1206 			MMSCH_V1_0_INSERT_DIRECT_WT(
1207 				SOC15_REG_OFFSET(VCN, i,
1208 					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1209 				upper_32_bits(adev->vcn.inst[i].gpu_addr));
1210 			offset = size;
1211 			MMSCH_V1_0_INSERT_DIRECT_WT(
1212 				SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
1213 				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1214 		}
1215 
1216 		MMSCH_V1_0_INSERT_DIRECT_WT(
1217 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
1218 			size);
1219 		MMSCH_V1_0_INSERT_DIRECT_WT(
1220 			SOC15_REG_OFFSET(VCN, i,
1221 				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1222 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1223 		MMSCH_V1_0_INSERT_DIRECT_WT(
1224 			SOC15_REG_OFFSET(VCN, i,
1225 				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1226 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1227 		MMSCH_V1_0_INSERT_DIRECT_WT(
1228 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
1229 			0);
1230 		MMSCH_V1_0_INSERT_DIRECT_WT(
1231 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
1232 			AMDGPU_VCN_STACK_SIZE);
1233 		MMSCH_V1_0_INSERT_DIRECT_WT(
1234 			SOC15_REG_OFFSET(VCN, i,
1235 				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1236 			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1237 				AMDGPU_VCN_STACK_SIZE));
1238 		MMSCH_V1_0_INSERT_DIRECT_WT(
1239 			SOC15_REG_OFFSET(VCN, i,
1240 				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1241 			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1242 				AMDGPU_VCN_STACK_SIZE));
1243 		MMSCH_V1_0_INSERT_DIRECT_WT(
1244 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
1245 			0);
1246 		MMSCH_V1_0_INSERT_DIRECT_WT(
1247 			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
1248 			AMDGPU_VCN_CONTEXT_SIZE);
1249 
1250 		ring = &adev->vcn.inst[i].ring_enc[0];
1251 		ring->wptr = 0;
1252 
1253 		MMSCH_V1_0_INSERT_DIRECT_WT(
1254 			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
1255 			lower_32_bits(ring->gpu_addr));
1256 		MMSCH_V1_0_INSERT_DIRECT_WT(
1257 			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
1258 			upper_32_bits(ring->gpu_addr));
1259 		MMSCH_V1_0_INSERT_DIRECT_WT(
1260 			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
1261 			ring->ring_size / 4);
1262 
1263 		ring = &adev->vcn.inst[i].ring_dec;
1264 		ring->wptr = 0;
1265 		MMSCH_V1_0_INSERT_DIRECT_WT(
1266 			SOC15_REG_OFFSET(VCN, i,
1267 				mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1268 			lower_32_bits(ring->gpu_addr));
1269 		MMSCH_V1_0_INSERT_DIRECT_WT(
1270 			SOC15_REG_OFFSET(VCN, i,
1271 				mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1272 			upper_32_bits(ring->gpu_addr));
1273 
1274 		/* force RBC into idle state */
1275 		rb_bufsz = order_base_2(ring->ring_size);
1276 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1277 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1278 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1279 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1280 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1281 		MMSCH_V1_0_INSERT_DIRECT_WT(
1282 			SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
1283 
1284 		/* add end packet */
1285 		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1286 		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1287 		init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1288 
1289 		/* refine header */
1290 		header->eng[i].table_size = table_size;
1291 		header->total_size += table_size;
1292 	}
1293 
1294 	return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1295 }
1296 
1297 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1298 {
1299 	uint32_t tmp;
1300 
1301 	/* Wait for power status to be 1 */
1302 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1303 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1304 
1305 	/* wait for read ptr to be equal to write ptr */
1306 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1307 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1308 
1309 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1310 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1311 
1312 	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1313 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1314 
1315 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1316 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1317 
1318 	/* disable dynamic power gating mode */
1319 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1320 			~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1321 
1322 	return 0;
1323 }
1324 
1325 static int vcn_v2_5_stop(struct amdgpu_device *adev)
1326 {
1327 	uint32_t tmp;
1328 	int i, r = 0;
1329 
1330 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1331 		if (adev->vcn.harvest_config & (1 << i))
1332 			continue;
1333 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1334 			r = vcn_v2_5_stop_dpg_mode(adev, i);
1335 			continue;
1336 		}
1337 
1338 		/* wait for vcn idle */
1339 		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1340 		if (r)
1341 			return r;
1342 
1343 		tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1344 			UVD_LMI_STATUS__READ_CLEAN_MASK |
1345 			UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1346 			UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1347 		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1348 		if (r)
1349 			return r;
1350 
1351 		/* block LMI UMC channel */
1352 		tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1353 		tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1354 		WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1355 
1356 		tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1357 			UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1358 		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1359 		if (r)
1360 			return r;
1361 
1362 		/* block VCPU register access */
1363 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1364 			UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1365 			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1366 
1367 		/* reset VCPU */
1368 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1369 			UVD_VCPU_CNTL__BLK_RST_MASK,
1370 			~UVD_VCPU_CNTL__BLK_RST_MASK);
1371 
1372 		/* disable VCPU clock */
1373 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1374 			~(UVD_VCPU_CNTL__CLK_EN_MASK));
1375 
1376 		/* clear status */
1377 		WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1378 
1379 		vcn_v2_5_enable_clock_gating(adev);
1380 
1381 		/* enable register anti-hang mechanism */
1382 		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
1383 			UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1384 			~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1385 	}
1386 
1387 	if (adev->pm.dpm_enabled)
1388 		amdgpu_dpm_enable_uvd(adev, false);
1389 
1390 	return 0;
1391 }
1392 
1393 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1394 				int inst_idx, struct dpg_pause_state *new_state)
1395 {
1396 	struct amdgpu_ring *ring;
1397 	uint32_t reg_data = 0;
1398 	int ret_code = 0;
1399 
1400 	/* pause/unpause if state is changed */
1401 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1402 		DRM_DEBUG("dpg pause state changed %d -> %d",
1403 			adev->vcn.inst[inst_idx].pause_state.fw_based,	new_state->fw_based);
1404 		reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1405 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1406 
1407 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1408 			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1409 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1410 
1411 			if (!ret_code) {
1412 				volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1413 
1414 				/* pause DPG */
1415 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1416 				WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1417 
1418 				/* wait for ACK */
1419 				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1420 					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1421 					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1422 
1423 				/* Stall DPG before WPTR/RPTR reset */
1424 				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1425 					   UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1426 					   ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1427 
1428 				/* Restore */
1429 				fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1430 				ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1431 				ring->wptr = 0;
1432 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1433 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1434 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1435 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1436 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1437 				fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1438 
1439 				fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1440 				ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1441 				ring->wptr = 0;
1442 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1443 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1444 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1445 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1446 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1447 				fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1448 
1449 				/* Unstall DPG */
1450 				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1451 					   0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1452 
1453 				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1454 					   UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1455 			}
1456 		} else {
1457 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1458 			WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1459 			SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1460 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1461 		}
1462 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1463 	}
1464 
1465 	return 0;
1466 }
1467 
1468 /**
1469  * vcn_v2_5_dec_ring_get_rptr - get read pointer
1470  *
1471  * @ring: amdgpu_ring pointer
1472  *
1473  * Returns the current hardware read pointer
1474  */
1475 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1476 {
1477 	struct amdgpu_device *adev = ring->adev;
1478 
1479 	return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1480 }
1481 
1482 /**
1483  * vcn_v2_5_dec_ring_get_wptr - get write pointer
1484  *
1485  * @ring: amdgpu_ring pointer
1486  *
1487  * Returns the current hardware write pointer
1488  */
1489 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1490 {
1491 	struct amdgpu_device *adev = ring->adev;
1492 
1493 	if (ring->use_doorbell)
1494 		return adev->wb.wb[ring->wptr_offs];
1495 	else
1496 		return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1497 }
1498 
1499 /**
1500  * vcn_v2_5_dec_ring_set_wptr - set write pointer
1501  *
1502  * @ring: amdgpu_ring pointer
1503  *
1504  * Commits the write pointer to the hardware
1505  */
1506 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1507 {
1508 	struct amdgpu_device *adev = ring->adev;
1509 
1510 	if (ring->use_doorbell) {
1511 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1512 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1513 	} else {
1514 		WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1515 	}
1516 }
1517 
1518 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1519 	.type = AMDGPU_RING_TYPE_VCN_DEC,
1520 	.align_mask = 0xf,
1521 	.secure_submission_supported = true,
1522 	.vmhub = AMDGPU_MMHUB_1,
1523 	.get_rptr = vcn_v2_5_dec_ring_get_rptr,
1524 	.get_wptr = vcn_v2_5_dec_ring_get_wptr,
1525 	.set_wptr = vcn_v2_5_dec_ring_set_wptr,
1526 	.emit_frame_size =
1527 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1528 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1529 		8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1530 		14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1531 		6,
1532 	.emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1533 	.emit_ib = vcn_v2_0_dec_ring_emit_ib,
1534 	.emit_fence = vcn_v2_0_dec_ring_emit_fence,
1535 	.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1536 	.test_ring = vcn_v2_0_dec_ring_test_ring,
1537 	.test_ib = amdgpu_vcn_dec_ring_test_ib,
1538 	.insert_nop = vcn_v2_0_dec_ring_insert_nop,
1539 	.insert_start = vcn_v2_0_dec_ring_insert_start,
1540 	.insert_end = vcn_v2_0_dec_ring_insert_end,
1541 	.pad_ib = amdgpu_ring_generic_pad_ib,
1542 	.begin_use = amdgpu_vcn_ring_begin_use,
1543 	.end_use = amdgpu_vcn_ring_end_use,
1544 	.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1545 	.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1546 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1547 };
1548 
1549 static const struct amdgpu_ring_funcs vcn_v2_6_dec_ring_vm_funcs = {
1550 	.type = AMDGPU_RING_TYPE_VCN_DEC,
1551 	.align_mask = 0xf,
1552 	.secure_submission_supported = true,
1553 	.vmhub = AMDGPU_MMHUB_0,
1554 	.get_rptr = vcn_v2_5_dec_ring_get_rptr,
1555 	.get_wptr = vcn_v2_5_dec_ring_get_wptr,
1556 	.set_wptr = vcn_v2_5_dec_ring_set_wptr,
1557 	.emit_frame_size =
1558 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1559 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1560 		8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1561 		14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1562 		6,
1563 	.emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1564 	.emit_ib = vcn_v2_0_dec_ring_emit_ib,
1565 	.emit_fence = vcn_v2_0_dec_ring_emit_fence,
1566 	.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1567 	.test_ring = vcn_v2_0_dec_ring_test_ring,
1568 	.test_ib = amdgpu_vcn_dec_ring_test_ib,
1569 	.insert_nop = vcn_v2_0_dec_ring_insert_nop,
1570 	.insert_start = vcn_v2_0_dec_ring_insert_start,
1571 	.insert_end = vcn_v2_0_dec_ring_insert_end,
1572 	.pad_ib = amdgpu_ring_generic_pad_ib,
1573 	.begin_use = amdgpu_vcn_ring_begin_use,
1574 	.end_use = amdgpu_vcn_ring_end_use,
1575 	.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1576 	.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1577 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1578 };
1579 
1580 /**
1581  * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1582  *
1583  * @ring: amdgpu_ring pointer
1584  *
1585  * Returns the current hardware enc read pointer
1586  */
1587 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1588 {
1589 	struct amdgpu_device *adev = ring->adev;
1590 
1591 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1592 		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1593 	else
1594 		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
1595 }
1596 
1597 /**
1598  * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1599  *
1600  * @ring: amdgpu_ring pointer
1601  *
1602  * Returns the current hardware enc write pointer
1603  */
1604 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1605 {
1606 	struct amdgpu_device *adev = ring->adev;
1607 
1608 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1609 		if (ring->use_doorbell)
1610 			return adev->wb.wb[ring->wptr_offs];
1611 		else
1612 			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
1613 	} else {
1614 		if (ring->use_doorbell)
1615 			return adev->wb.wb[ring->wptr_offs];
1616 		else
1617 			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
1618 	}
1619 }
1620 
1621 /**
1622  * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1623  *
1624  * @ring: amdgpu_ring pointer
1625  *
1626  * Commits the enc write pointer to the hardware
1627  */
1628 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1629 {
1630 	struct amdgpu_device *adev = ring->adev;
1631 
1632 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1633 		if (ring->use_doorbell) {
1634 			adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1635 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1636 		} else {
1637 			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1638 		}
1639 	} else {
1640 		if (ring->use_doorbell) {
1641 			adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1642 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1643 		} else {
1644 			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1645 		}
1646 	}
1647 }
1648 
1649 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1650 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1651 	.align_mask = 0x3f,
1652 	.nop = VCN_ENC_CMD_NO_OP,
1653 	.vmhub = AMDGPU_MMHUB_1,
1654 	.get_rptr = vcn_v2_5_enc_ring_get_rptr,
1655 	.get_wptr = vcn_v2_5_enc_ring_get_wptr,
1656 	.set_wptr = vcn_v2_5_enc_ring_set_wptr,
1657 	.emit_frame_size =
1658 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1659 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1660 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1661 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1662 		1, /* vcn_v2_0_enc_ring_insert_end */
1663 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1664 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1665 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1666 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1667 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1668 	.test_ib = amdgpu_vcn_enc_ring_test_ib,
1669 	.insert_nop = amdgpu_ring_insert_nop,
1670 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1671 	.pad_ib = amdgpu_ring_generic_pad_ib,
1672 	.begin_use = amdgpu_vcn_ring_begin_use,
1673 	.end_use = amdgpu_vcn_ring_end_use,
1674 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1675 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1676 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1677 };
1678 
1679 static const struct amdgpu_ring_funcs vcn_v2_6_enc_ring_vm_funcs = {
1680         .type = AMDGPU_RING_TYPE_VCN_ENC,
1681         .align_mask = 0x3f,
1682         .nop = VCN_ENC_CMD_NO_OP,
1683         .vmhub = AMDGPU_MMHUB_0,
1684         .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1685         .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1686         .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1687         .emit_frame_size =
1688                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1689                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1690                 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1691                 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1692                 1, /* vcn_v2_0_enc_ring_insert_end */
1693         .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1694         .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1695         .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1696         .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1697         .test_ring = amdgpu_vcn_enc_ring_test_ring,
1698         .test_ib = amdgpu_vcn_enc_ring_test_ib,
1699         .insert_nop = amdgpu_ring_insert_nop,
1700         .insert_end = vcn_v2_0_enc_ring_insert_end,
1701         .pad_ib = amdgpu_ring_generic_pad_ib,
1702         .begin_use = amdgpu_vcn_ring_begin_use,
1703         .end_use = amdgpu_vcn_ring_end_use,
1704         .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1705         .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1706         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1707 };
1708 
1709 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1710 {
1711 	int i;
1712 
1713 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1714 		if (adev->vcn.harvest_config & (1 << i))
1715 			continue;
1716 		if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
1717 			adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1718 		else /* CHIP_ALDEBARAN */
1719 			adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs;
1720 		adev->vcn.inst[i].ring_dec.me = i;
1721 		DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1722 	}
1723 }
1724 
1725 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1726 {
1727 	int i, j;
1728 
1729 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1730 		if (adev->vcn.harvest_config & (1 << j))
1731 			continue;
1732 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1733 			if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
1734 				adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1735 			else /* CHIP_ALDEBARAN */
1736 				adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs;
1737 			adev->vcn.inst[j].ring_enc[i].me = j;
1738 		}
1739 		DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1740 	}
1741 }
1742 
1743 static bool vcn_v2_5_is_idle(void *handle)
1744 {
1745 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1746 	int i, ret = 1;
1747 
1748 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1749 		if (adev->vcn.harvest_config & (1 << i))
1750 			continue;
1751 		ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1752 	}
1753 
1754 	return ret;
1755 }
1756 
1757 static int vcn_v2_5_wait_for_idle(void *handle)
1758 {
1759 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1760 	int i, ret = 0;
1761 
1762 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1763 		if (adev->vcn.harvest_config & (1 << i))
1764 			continue;
1765 		ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1766 			UVD_STATUS__IDLE);
1767 		if (ret)
1768 			return ret;
1769 	}
1770 
1771 	return ret;
1772 }
1773 
1774 static int vcn_v2_5_set_clockgating_state(void *handle,
1775 					  enum amd_clockgating_state state)
1776 {
1777 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1778 	bool enable = (state == AMD_CG_STATE_GATE);
1779 
1780 	if (amdgpu_sriov_vf(adev))
1781 		return 0;
1782 
1783 	if (enable) {
1784 		if (!vcn_v2_5_is_idle(handle))
1785 			return -EBUSY;
1786 		vcn_v2_5_enable_clock_gating(adev);
1787 	} else {
1788 		vcn_v2_5_disable_clock_gating(adev);
1789 	}
1790 
1791 	return 0;
1792 }
1793 
1794 static int vcn_v2_5_set_powergating_state(void *handle,
1795 					  enum amd_powergating_state state)
1796 {
1797 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1798 	int ret;
1799 
1800 	if (amdgpu_sriov_vf(adev))
1801 		return 0;
1802 
1803 	if(state == adev->vcn.cur_state)
1804 		return 0;
1805 
1806 	if (state == AMD_PG_STATE_GATE)
1807 		ret = vcn_v2_5_stop(adev);
1808 	else
1809 		ret = vcn_v2_5_start(adev);
1810 
1811 	if(!ret)
1812 		adev->vcn.cur_state = state;
1813 
1814 	return ret;
1815 }
1816 
1817 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1818 					struct amdgpu_irq_src *source,
1819 					unsigned type,
1820 					enum amdgpu_interrupt_state state)
1821 {
1822 	return 0;
1823 }
1824 
1825 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1826 				      struct amdgpu_irq_src *source,
1827 				      struct amdgpu_iv_entry *entry)
1828 {
1829 	uint32_t ip_instance;
1830 
1831 	switch (entry->client_id) {
1832 	case SOC15_IH_CLIENTID_VCN:
1833 		ip_instance = 0;
1834 		break;
1835 	case SOC15_IH_CLIENTID_VCN1:
1836 		ip_instance = 1;
1837 		break;
1838 	default:
1839 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1840 		return 0;
1841 	}
1842 
1843 	DRM_DEBUG("IH: VCN TRAP\n");
1844 
1845 	switch (entry->src_id) {
1846 	case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1847 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1848 		break;
1849 	case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1850 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1851 		break;
1852 	case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1853 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1854 		break;
1855 	default:
1856 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1857 			  entry->src_id, entry->src_data[0]);
1858 		break;
1859 	}
1860 
1861 	return 0;
1862 }
1863 
1864 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1865 	.set = vcn_v2_5_set_interrupt_state,
1866 	.process = vcn_v2_5_process_interrupt,
1867 };
1868 
1869 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1870 {
1871 	int i;
1872 
1873 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1874 		if (adev->vcn.harvest_config & (1 << i))
1875 			continue;
1876 		adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1877 		adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1878 	}
1879 }
1880 
1881 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1882 	.name = "vcn_v2_5",
1883 	.early_init = vcn_v2_5_early_init,
1884 	.late_init = NULL,
1885 	.sw_init = vcn_v2_5_sw_init,
1886 	.sw_fini = vcn_v2_5_sw_fini,
1887 	.hw_init = vcn_v2_5_hw_init,
1888 	.hw_fini = vcn_v2_5_hw_fini,
1889 	.suspend = vcn_v2_5_suspend,
1890 	.resume = vcn_v2_5_resume,
1891 	.is_idle = vcn_v2_5_is_idle,
1892 	.wait_for_idle = vcn_v2_5_wait_for_idle,
1893 	.check_soft_reset = NULL,
1894 	.pre_soft_reset = NULL,
1895 	.soft_reset = NULL,
1896 	.post_soft_reset = NULL,
1897 	.set_clockgating_state = vcn_v2_5_set_clockgating_state,
1898 	.set_powergating_state = vcn_v2_5_set_powergating_state,
1899 };
1900 
1901 static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
1902         .name = "vcn_v2_6",
1903         .early_init = vcn_v2_5_early_init,
1904         .late_init = NULL,
1905         .sw_init = vcn_v2_5_sw_init,
1906         .sw_fini = vcn_v2_5_sw_fini,
1907         .hw_init = vcn_v2_5_hw_init,
1908         .hw_fini = vcn_v2_5_hw_fini,
1909         .suspend = vcn_v2_5_suspend,
1910         .resume = vcn_v2_5_resume,
1911         .is_idle = vcn_v2_5_is_idle,
1912         .wait_for_idle = vcn_v2_5_wait_for_idle,
1913         .check_soft_reset = NULL,
1914         .pre_soft_reset = NULL,
1915         .soft_reset = NULL,
1916         .post_soft_reset = NULL,
1917         .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1918         .set_powergating_state = vcn_v2_5_set_powergating_state,
1919 };
1920 
1921 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1922 {
1923 		.type = AMD_IP_BLOCK_TYPE_VCN,
1924 		.major = 2,
1925 		.minor = 5,
1926 		.rev = 0,
1927 		.funcs = &vcn_v2_5_ip_funcs,
1928 };
1929 
1930 const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
1931 {
1932 		.type = AMD_IP_BLOCK_TYPE_VCN,
1933 		.major = 2,
1934 		.minor = 6,
1935 		.rev = 0,
1936 		.funcs = &vcn_v2_6_ip_funcs,
1937 };
1938 
1939 static uint32_t vcn_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
1940 			uint32_t instance, uint32_t sub_block)
1941 {
1942 	uint32_t poison_stat = 0, reg_value = 0;
1943 
1944 	switch (sub_block) {
1945 	case AMDGPU_VCN_V2_6_VCPU_VCODEC:
1946 		reg_value = RREG32_SOC15(VCN, instance, mmUVD_RAS_VCPU_VCODEC_STATUS);
1947 		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
1948 		break;
1949 	default:
1950 		break;
1951 	}
1952 
1953 	if (poison_stat)
1954 		dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
1955 			instance, sub_block);
1956 
1957 	return poison_stat;
1958 }
1959 
1960 static bool vcn_v2_6_query_poison_status(struct amdgpu_device *adev)
1961 {
1962 	uint32_t inst, sub;
1963 	uint32_t poison_stat = 0;
1964 
1965 	for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
1966 		for (sub = 0; sub < AMDGPU_VCN_V2_6_MAX_SUB_BLOCK; sub++)
1967 			poison_stat +=
1968 			vcn_v2_6_query_poison_by_instance(adev, inst, sub);
1969 
1970 	return !!poison_stat;
1971 }
1972 
1973 const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
1974 	.query_poison_status = vcn_v2_6_query_poison_status,
1975 };
1976 
1977 static struct amdgpu_vcn_ras vcn_v2_6_ras = {
1978 	.ras_block = {
1979 		.hw_ops = &vcn_v2_6_ras_hw_ops,
1980 	},
1981 };
1982 
1983 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
1984 {
1985 	switch (adev->ip_versions[VCN_HWIP][0]) {
1986 	case IP_VERSION(2, 6, 0):
1987 		adev->vcn.ras = &vcn_v2_6_ras;
1988 		break;
1989 	default:
1990 		break;
1991 	}
1992 
1993 	if (adev->vcn.ras) {
1994 		amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
1995 
1996 		strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
1997 		adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1998 		adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1999 		adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
2000 
2001 		/* If don't define special ras_late_init function, use default ras_late_init */
2002 		if (!adev->vcn.ras->ras_block.ras_late_init)
2003 			adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
2004 	}
2005 }
2006