1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "soc15_hw_ip.h"
33 #include "vcn_v2_0.h"
34 
35 #include "vcn/vcn_4_0_3_offset.h"
36 #include "vcn/vcn_4_0_3_sh_mask.h"
37 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
38 
39 #define mmUVD_DPG_LMA_CTL		regUVD_DPG_LMA_CTL
40 #define mmUVD_DPG_LMA_CTL_BASE_IDX	regUVD_DPG_LMA_CTL_BASE_IDX
41 #define mmUVD_DPG_LMA_DATA		regUVD_DPG_LMA_DATA
42 #define mmUVD_DPG_LMA_DATA_BASE_IDX	regUVD_DPG_LMA_DATA_BASE_IDX
43 
44 #define VCN_VID_SOC_ADDRESS_2_0		0x1fb00
45 #define VCN1_VID_SOC_ADDRESS_3_0	0x48300
46 
47 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
48 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
49 static int vcn_v4_0_3_set_powergating_state(void *handle,
50 		enum amd_powergating_state state);
51 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
52 		int inst_idx, struct dpg_pause_state *new_state);
53 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
54 
55 /**
56  * vcn_v4_0_3_early_init - set function pointers
57  *
58  * @handle: amdgpu_device pointer
59  *
60  * Set ring and irq function pointers
61  */
62 static int vcn_v4_0_3_early_init(void *handle)
63 {
64 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
65 
66 	/* re-use enc ring as unified ring */
67 	adev->vcn.num_enc_rings = 1;
68 
69 	vcn_v4_0_3_set_unified_ring_funcs(adev);
70 	vcn_v4_0_3_set_irq_funcs(adev);
71 
72 	return 0;
73 }
74 
75 /**
76  * vcn_v4_0_3_sw_init - sw init for VCN block
77  *
78  * @handle: amdgpu_device pointer
79  *
80  * Load firmware and sw initialization
81  */
82 static int vcn_v4_0_3_sw_init(void *handle)
83 {
84 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
85 	struct amdgpu_ring *ring;
86 	int i, r;
87 
88 	r = amdgpu_vcn_sw_init(adev);
89 	if (r)
90 		return r;
91 
92 	amdgpu_vcn_setup_ucode(adev);
93 
94 	r = amdgpu_vcn_resume(adev);
95 	if (r)
96 		return r;
97 
98 	/* VCN DEC TRAP */
99 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
100 		VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
101 	if (r)
102 		return r;
103 
104 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
105 		volatile struct amdgpu_vcn4_fw_shared *fw_shared;
106 
107 		if (adev->vcn.harvest_config & (1 << i))
108 			continue;
109 
110 		ring = &adev->vcn.inst[i].ring_enc[0];
111 		ring->use_doorbell = true;
112 		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i;
113 		ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
114 		sprintf(ring->name, "vcn_unified_%d", i);
115 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
116 				     AMDGPU_RING_PRIO_DEFAULT,
117 				     &adev->vcn.inst[i].sched_score);
118 		if (r)
119 			return r;
120 
121 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
122 		fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
123 		fw_shared->sq.is_enabled = cpu_to_le32(true);
124 
125 		if (amdgpu_vcnfw_log)
126 			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
127 	}
128 
129 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
130 		adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
131 
132 	return 0;
133 }
134 
135 /**
136  * vcn_v4_0_3_sw_fini - sw fini for VCN block
137  *
138  * @handle: amdgpu_device pointer
139  *
140  * VCN suspend and free up sw allocation
141  */
142 static int vcn_v4_0_3_sw_fini(void *handle)
143 {
144 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
145 	int i, r, idx;
146 
147 	if (drm_dev_enter(&adev->ddev, &idx)) {
148 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
149 			volatile struct amdgpu_vcn4_fw_shared *fw_shared;
150 
151 			if (adev->vcn.harvest_config & (1 << i))
152 				continue;
153 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
154 			fw_shared->present_flag_0 = 0;
155 			fw_shared->sq.is_enabled = cpu_to_le32(false);
156 		}
157 		drm_dev_exit(idx);
158 	}
159 
160 	r = amdgpu_vcn_suspend(adev);
161 	if (r)
162 		return r;
163 
164 	r = amdgpu_vcn_sw_fini(adev);
165 
166 	return r;
167 }
168 
169 /**
170  * vcn_v4_0_3_hw_init - start and test VCN block
171  *
172  * @handle: amdgpu_device pointer
173  *
174  * Initialize the hardware, boot up the VCPU and do some testing
175  */
176 static int vcn_v4_0_3_hw_init(void *handle)
177 {
178 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
179 	struct amdgpu_ring *ring;
180 	int i, r;
181 
182 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
183 		if (adev->vcn.harvest_config & (1 << i))
184 			continue;
185 		ring = &adev->vcn.inst[i].ring_enc[0];
186 
187 		if (ring->use_doorbell) {
188 			adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
189 					(adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i,
190 					adev->vcn.inst[i].aid_id);
191 
192 			WREG32_SOC15(VCN, ring->me, regVCN_RB1_DB_CTRL,
193 				ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
194 				VCN_RB1_DB_CTRL__EN_MASK);
195 		}
196 
197 		r = amdgpu_ring_test_helper(ring);
198 		if (r)
199 			goto done;
200 	}
201 
202 done:
203 	if (!r)
204 		DRM_DEV_INFO(adev->dev, "VCN decode initialized successfully(under %s).\n",
205 			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
206 
207 	return r;
208 }
209 
210 /**
211  * vcn_v4_0_3_hw_fini - stop the hardware block
212  *
213  * @handle: amdgpu_device pointer
214  *
215  * Stop the VCN block, mark ring as not ready any more
216  */
217 static int vcn_v4_0_3_hw_fini(void *handle)
218 {
219 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
220 
221 	cancel_delayed_work_sync(&adev->vcn.idle_work);
222 
223 	if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
224 		vcn_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
225 
226 	return 0;
227 }
228 
229 /**
230  * vcn_v4_0_3_suspend - suspend VCN block
231  *
232  * @handle: amdgpu_device pointer
233  *
234  * HW fini and suspend VCN block
235  */
236 static int vcn_v4_0_3_suspend(void *handle)
237 {
238 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
239 	int r;
240 
241 	r = vcn_v4_0_3_hw_fini(adev);
242 	if (r)
243 		return r;
244 
245 	r = amdgpu_vcn_suspend(adev);
246 
247 	return r;
248 }
249 
250 /**
251  * vcn_v4_0_3_resume - resume VCN block
252  *
253  * @handle: amdgpu_device pointer
254  *
255  * Resume firmware and hw init VCN block
256  */
257 static int vcn_v4_0_3_resume(void *handle)
258 {
259 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
260 	int r;
261 
262 	r = amdgpu_vcn_resume(adev);
263 	if (r)
264 		return r;
265 
266 	r = vcn_v4_0_3_hw_init(adev);
267 
268 	return r;
269 }
270 
271 /**
272  * vcn_v4_0_3_mc_resume - memory controller programming
273  *
274  * @adev: amdgpu_device pointer
275  * @inst_idx: instance number
276  *
277  * Let the VCN memory controller know it's offsets
278  */
279 static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
280 {
281 	uint32_t offset, size;
282 	const struct common_firmware_header *hdr;
283 
284 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
285 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
286 
287 	/* cache window 0: fw */
288 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
289 		WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
290 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo));
291 		WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
292 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi));
293 		WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0, 0);
294 		offset = 0;
295 	} else {
296 		WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
297 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
298 		WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
299 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
300 		offset = size;
301 		WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0,
302 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
303 	}
304 	WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0, size);
305 
306 	/* cache window 1: stack */
307 	WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
308 		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
309 	WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
310 		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
311 	WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1, 0);
312 	WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
313 
314 	/* cache window 2: context */
315 	WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
316 		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
317 	WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
318 		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
319 	WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2, 0);
320 	WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
321 
322 	/* non-cache window */
323 	WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
324 		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
325 	WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
326 		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
327 	WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0, 0);
328 	WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0,
329 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
330 }
331 
332 /**
333  * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode
334  *
335  * @adev: amdgpu_device pointer
336  * @inst_idx: instance number index
337  * @indirect: indirectly write sram
338  *
339  * Let the VCN memory controller know it's offsets with dpg mode
340  */
341 static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
342 {
343 	uint32_t offset, size;
344 	const struct common_firmware_header *hdr;
345 
346 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
347 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
348 
349 	/* cache window 0: fw */
350 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
351 		if (!indirect) {
352 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
353 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
354 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
355 					inst_idx].tmr_mc_addr_lo), 0, indirect);
356 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
357 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
358 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
359 					inst_idx].tmr_mc_addr_hi), 0, indirect);
360 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
361 				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
362 		} else {
363 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
364 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
365 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
366 				VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
367 			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
368 				VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
369 		}
370 		offset = 0;
371 	} else {
372 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
373 			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
374 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
375 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
376 			VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
377 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
378 		offset = size;
379 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
380 			VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
381 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
382 	}
383 
384 	if (!indirect)
385 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
386 			VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
387 	else
388 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
389 			VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
390 
391 	/* cache window 1: stack */
392 	if (!indirect) {
393 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
394 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
395 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
396 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
397 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
398 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
399 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
400 			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
401 	} else {
402 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
403 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
404 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
405 			VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
406 		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
407 			VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
408 	}
409 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
410 			VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
411 
412 	/* cache window 2: context */
413 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
414 			VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
415 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
416 				AMDGPU_VCN_STACK_SIZE), 0, indirect);
417 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
418 			VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
419 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
420 				AMDGPU_VCN_STACK_SIZE), 0, indirect);
421 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
422 			VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
423 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
424 			VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
425 
426 	/* non-cache window */
427 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
428 			VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
429 			lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
430 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
431 			VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
432 			upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
433 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
434 			VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
435 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
436 			VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
437 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
438 
439 	/* VCN global tiling registers */
440 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
441 		VCN, 0, regUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
442 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
443 		VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
444 }
445 
446 /**
447  * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
448  *
449  * @adev: amdgpu_device pointer
450  * @inst_idx: instance number
451  *
452  * Disable clock gating for VCN block
453  */
454 static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
455 {
456 	uint32_t data;
457 
458 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
459 		return;
460 
461 	/* VCN disable CGC */
462 	data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL);
463 	data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
464 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
465 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
466 	WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data);
467 
468 	data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_GATE);
469 	data &= ~(UVD_CGC_GATE__SYS_MASK
470 		| UVD_CGC_GATE__MPEG2_MASK
471 		| UVD_CGC_GATE__REGS_MASK
472 		| UVD_CGC_GATE__RBC_MASK
473 		| UVD_CGC_GATE__LMI_MC_MASK
474 		| UVD_CGC_GATE__LMI_UMC_MASK
475 		| UVD_CGC_GATE__MPC_MASK
476 		| UVD_CGC_GATE__LBSI_MASK
477 		| UVD_CGC_GATE__LRBBM_MASK
478 		| UVD_CGC_GATE__WCB_MASK
479 		| UVD_CGC_GATE__VCPU_MASK
480 		| UVD_CGC_GATE__MMSCH_MASK);
481 
482 	WREG32_SOC15(VCN, inst_idx, regUVD_CGC_GATE, data);
483 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_CGC_GATE, 0,  0xFFFFFFFF);
484 
485 	data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL);
486 	data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK
487 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
488 		| UVD_CGC_CTRL__REGS_MODE_MASK
489 		| UVD_CGC_CTRL__RBC_MODE_MASK
490 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
491 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
492 		| UVD_CGC_CTRL__MPC_MODE_MASK
493 		| UVD_CGC_CTRL__LBSI_MODE_MASK
494 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
495 		| UVD_CGC_CTRL__WCB_MODE_MASK
496 		| UVD_CGC_CTRL__VCPU_MODE_MASK
497 		| UVD_CGC_CTRL__MMSCH_MODE_MASK);
498 	WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data);
499 
500 	data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_GATE);
501 	data |= (UVD_SUVD_CGC_GATE__SRE_MASK
502 		| UVD_SUVD_CGC_GATE__SIT_MASK
503 		| UVD_SUVD_CGC_GATE__SMP_MASK
504 		| UVD_SUVD_CGC_GATE__SCM_MASK
505 		| UVD_SUVD_CGC_GATE__SDB_MASK
506 		| UVD_SUVD_CGC_GATE__SRE_H264_MASK
507 		| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
508 		| UVD_SUVD_CGC_GATE__SIT_H264_MASK
509 		| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
510 		| UVD_SUVD_CGC_GATE__SCM_H264_MASK
511 		| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
512 		| UVD_SUVD_CGC_GATE__SDB_H264_MASK
513 		| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
514 		| UVD_SUVD_CGC_GATE__ENT_MASK
515 		| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
516 		| UVD_SUVD_CGC_GATE__SITE_MASK
517 		| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
518 		| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
519 		| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
520 		| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
521 		| UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
522 	WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_GATE, data);
523 
524 	data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL);
525 	data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
526 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
527 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
528 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
529 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
530 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
531 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
532 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
533 	WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL, data);
534 }
535 
536 /**
537  * vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
538  *
539  * @adev: amdgpu_device pointer
540  * @sram_sel: sram select
541  * @inst_idx: instance number index
542  * @indirect: indirectly write sram
543  *
544  * Disable clock gating for VCN block with dpg mode
545  */
546 static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
547 				int inst_idx, uint8_t indirect)
548 {
549 	uint32_t reg_data = 0;
550 
551 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
552 		return;
553 
554 	/* enable sw clock gating control */
555 	reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
556 	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
557 	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
558 	reg_data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK |
559 		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
560 		 UVD_CGC_CTRL__REGS_MODE_MASK |
561 		 UVD_CGC_CTRL__RBC_MODE_MASK |
562 		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
563 		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
564 		 UVD_CGC_CTRL__IDCT_MODE_MASK |
565 		 UVD_CGC_CTRL__MPRD_MODE_MASK |
566 		 UVD_CGC_CTRL__MPC_MODE_MASK |
567 		 UVD_CGC_CTRL__LBSI_MODE_MASK |
568 		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
569 		 UVD_CGC_CTRL__WCB_MODE_MASK |
570 		 UVD_CGC_CTRL__VCPU_MODE_MASK);
571 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
572 		VCN, 0, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
573 
574 	/* turn off clock gating */
575 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
576 		VCN, 0, regUVD_CGC_GATE), 0, sram_sel, indirect);
577 
578 	/* turn on SUVD clock gating */
579 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
580 		VCN, 0, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
581 
582 	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
583 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
584 		VCN, 0, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
585 }
586 
587 /**
588  * vcn_v4_0_enable_clock_gating - enable VCN clock gating
589  *
590  * @adev: amdgpu_device pointer
591  * @inst_idx: instance number
592  *
593  * Enable clock gating for VCN block
594  */
595 static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
596 {
597 	uint32_t data;
598 
599 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
600 		return;
601 
602 	/* enable VCN CGC */
603 	data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL);
604 	data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
605 	data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
606 	data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
607 	WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data);
608 
609 	data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL);
610 	data |= (UVD_CGC_CTRL__SYS_MODE_MASK
611 		| UVD_CGC_CTRL__MPEG2_MODE_MASK
612 		| UVD_CGC_CTRL__REGS_MODE_MASK
613 		| UVD_CGC_CTRL__RBC_MODE_MASK
614 		| UVD_CGC_CTRL__LMI_MC_MODE_MASK
615 		| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
616 		| UVD_CGC_CTRL__MPC_MODE_MASK
617 		| UVD_CGC_CTRL__LBSI_MODE_MASK
618 		| UVD_CGC_CTRL__LRBBM_MODE_MASK
619 		| UVD_CGC_CTRL__WCB_MODE_MASK
620 		| UVD_CGC_CTRL__VCPU_MODE_MASK);
621 	WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data);
622 
623 	data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL);
624 	data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
625 		| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
626 		| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
627 		| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
628 		| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
629 		| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
630 		| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
631 		| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
632 	WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL, data);
633 }
634 
635 /**
636  * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
637  *
638  * @adev: amdgpu_device pointer
639  * @inst_idx: instance number index
640  * @indirect: indirectly write sram
641  *
642  * Start VCN block with dpg mode
643  */
644 static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
645 {
646 	volatile struct amdgpu_vcn4_fw_shared *fw_shared =
647 						adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
648 	struct amdgpu_ring *ring;
649 	uint32_t tmp;
650 
651 	/* disable register anti-hang mechanism */
652 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
653 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
654 	/* enable dynamic power gating mode */
655 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
656 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
657 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
658 	WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
659 
660 	if (indirect)
661 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
662 				(uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
663 
664 	/* enable clock gating */
665 	vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
666 
667 	/* enable VCPU clock */
668 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
669 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
670 	tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
671 
672 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
673 		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
674 
675 	/* disable master interrupt */
676 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
677 		VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
678 
679 	/* setup regUVD_LMI_CTRL */
680 	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
681 		UVD_LMI_CTRL__REQ_MODE_MASK |
682 		UVD_LMI_CTRL__CRC_RESET_MASK |
683 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
684 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
685 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
686 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
687 		0x00100000L);
688 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
689 		VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
690 
691 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
692 		VCN, 0, regUVD_MPC_CNTL),
693 		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
694 
695 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
696 		VCN, 0, regUVD_MPC_SET_MUXA0),
697 		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
698 		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
699 		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
700 		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
701 
702 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
703 		VCN, 0, regUVD_MPC_SET_MUXB0),
704 		 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
705 		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
706 		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
707 		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
708 
709 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
710 		VCN, 0, regUVD_MPC_SET_MUX),
711 		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
712 		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
713 		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
714 
715 	vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect);
716 
717 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
718 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
719 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
720 		VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
721 
722 	/* enable LMI MC and UMC channels */
723 	tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
724 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
725 		VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
726 
727 	/* enable master interrupt */
728 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
729 		VCN, 0, regUVD_MASTINT_EN),
730 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
731 
732 	if (indirect)
733 		psp_update_vcn_sram(adev, 0, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
734 			(uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
735 				(uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
736 
737 	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
738 
739 	/* program the RB_BASE for ring buffer */
740 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO,
741 		lower_32_bits(ring->gpu_addr));
742 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI,
743 		upper_32_bits(ring->gpu_addr));
744 
745 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t));
746 
747 	/* resetting ring, fw should not check RB ring */
748 	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
749 	tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
750 	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
751 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
752 
753 	/* Initialize the ring buffer's read and write pointers */
754 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
755 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
756 	ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
757 
758 	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
759 	tmp |= VCN_RB_ENABLE__RB_EN_MASK;
760 	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
761 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
762 
763 	/*resetting done, fw can check RB ring */
764 	fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
765 
766 	return 0;
767 }
768 
769 /**
770  * vcn_v4_0_3_start - VCN start
771  *
772  * @adev: amdgpu_device pointer
773  *
774  * Start VCN block
775  */
776 static int vcn_v4_0_3_start(struct amdgpu_device *adev)
777 {
778 	volatile struct amdgpu_vcn4_fw_shared *fw_shared;
779 	struct amdgpu_ring *ring;
780 	uint32_t tmp;
781 	int i, j, k, r;
782 
783 	if (adev->pm.dpm_enabled)
784 		amdgpu_dpm_enable_uvd(adev, true);
785 
786 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
787 		if (adev->vcn.harvest_config & (1 << i))
788 			continue;
789 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
790 			r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
791 			continue;
792 		}
793 
794 		/* set VCN status busy */
795 		tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
796 		WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
797 
798 		/*SW clock gating */
799 		vcn_v4_0_3_disable_clock_gating(adev, i);
800 
801 		/* enable VCPU clock */
802 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
803 			UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
804 
805 		/* disable master interrupt */
806 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
807 			~UVD_MASTINT_EN__VCPU_EN_MASK);
808 
809 		/* enable LMI MC and UMC channels */
810 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
811 			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
812 
813 		tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
814 		tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
815 		tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
816 		WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
817 
818 		/* setup regUVD_LMI_CTRL */
819 		tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
820 		WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
821 			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
822 			UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
823 			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
824 			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
825 
826 		/* setup regUVD_MPC_CNTL */
827 		tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
828 		tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
829 		tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
830 		WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
831 
832 		/* setup UVD_MPC_SET_MUXA0 */
833 		WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
834 			((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
835 			(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
836 			(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
837 			(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
838 
839 		/* setup UVD_MPC_SET_MUXB0 */
840 		WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
841 			((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
842 			(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
843 			(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
844 			(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
845 
846 		/* setup UVD_MPC_SET_MUX */
847 		WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
848 			((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
849 			(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
850 			(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
851 
852 		vcn_v4_0_3_mc_resume(adev, i);
853 
854 		/* VCN global tiling registers */
855 		WREG32_SOC15(VCN, i, regUVD_GFX8_ADDR_CONFIG,
856 			adev->gfx.config.gb_addr_config);
857 		WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
858 			adev->gfx.config.gb_addr_config);
859 
860 		/* unblock VCPU register access */
861 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
862 			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
863 
864 		/* release VCPU reset to boot */
865 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
866 			~UVD_VCPU_CNTL__BLK_RST_MASK);
867 
868 		for (j = 0; j < 10; ++j) {
869 			uint32_t status;
870 
871 			for (k = 0; k < 100; ++k) {
872 				status = RREG32_SOC15(VCN, i, regUVD_STATUS);
873 				if (status & 2)
874 					break;
875 				mdelay(10);
876 			}
877 			r = 0;
878 			if (status & 2)
879 				break;
880 
881 			DRM_DEV_ERROR(adev->dev,
882 				"VCN decode not responding, trying to reset the VCPU!!!\n");
883 			WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
884 				UVD_VCPU_CNTL__BLK_RST_MASK,
885 				~UVD_VCPU_CNTL__BLK_RST_MASK);
886 			mdelay(10);
887 			WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
888 				~UVD_VCPU_CNTL__BLK_RST_MASK);
889 
890 			mdelay(10);
891 			r = -1;
892 		}
893 
894 		if (r) {
895 			DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
896 			return r;
897 		}
898 
899 		/* enable master interrupt */
900 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
901 			UVD_MASTINT_EN__VCPU_EN_MASK,
902 			~UVD_MASTINT_EN__VCPU_EN_MASK);
903 
904 		/* clear the busy bit of VCN_STATUS */
905 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
906 			~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
907 
908 		ring = &adev->vcn.inst[i].ring_enc[0];
909 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
910 
911 		/* program the RB_BASE for ring buffer */
912 		WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO,
913 			lower_32_bits(ring->gpu_addr));
914 		WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI,
915 			upper_32_bits(ring->gpu_addr));
916 
917 		WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t));
918 
919 		/* resetting ring, fw should not check RB ring */
920 		tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
921 		tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
922 		WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
923 
924 		/* Initialize the ring buffer's read and write pointers */
925 		WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
926 		WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
927 
928 		tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
929 		tmp |= VCN_RB_ENABLE__RB_EN_MASK;
930 		WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
931 
932 		ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
933 		fw_shared->sq.queue_mode &=
934 			cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
935 
936 	}
937 	return 0;
938 }
939 
940 /**
941  * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
942  *
943  * @adev: amdgpu_device pointer
944  * @inst_idx: instance number index
945  *
946  * Stop VCN block with dpg mode
947  */
948 static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
949 {
950 	uint32_t tmp;
951 
952 	/* Wait for power status to be 1 */
953 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
954 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
955 
956 	/* wait for read ptr to be equal to write ptr */
957 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
958 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
959 
960 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
961 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
962 
963 	/* disable dynamic power gating mode */
964 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
965 		~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
966 	return 0;
967 }
968 
969 /**
970  * vcn_v4_0_3_stop - VCN stop
971  *
972  * @adev: amdgpu_device pointer
973  *
974  * Stop VCN block
975  */
976 static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
977 {
978 	volatile struct amdgpu_vcn4_fw_shared *fw_shared;
979 	uint32_t tmp;
980 	int i, r = 0;
981 
982 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
983 		if (adev->vcn.harvest_config & (1 << i))
984 			continue;
985 
986 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
987 		fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
988 
989 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
990 			vcn_v4_0_3_stop_dpg_mode(adev, i);
991 			continue;
992 		}
993 
994 		/* wait for vcn idle */
995 		r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
996 		if (r)
997 			goto Done;
998 
999 		tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1000 			UVD_LMI_STATUS__READ_CLEAN_MASK |
1001 			UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1002 			UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1003 		r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1004 		if (r)
1005 			goto Done;
1006 
1007 		/* stall UMC channel */
1008 		tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
1009 		tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1010 		WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
1011 		tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1012 			UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1013 		r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1014 		if (r)
1015 			goto Done;
1016 
1017 		/* Unblock VCPU Register access */
1018 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
1019 			UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1020 			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1021 
1022 		/* release VCPU reset to boot */
1023 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1024 			UVD_VCPU_CNTL__BLK_RST_MASK,
1025 			~UVD_VCPU_CNTL__BLK_RST_MASK);
1026 
1027 		/* disable VCPU clock */
1028 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1029 			~(UVD_VCPU_CNTL__CLK_EN_MASK));
1030 
1031 		/* reset LMI UMC/LMI/VCPU */
1032 		tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1033 		tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1034 		WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1035 
1036 		tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1037 		tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1038 		WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1039 
1040 		/* clear VCN status */
1041 		WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
1042 
1043 		/* apply HW clock gating */
1044 		vcn_v4_0_3_enable_clock_gating(adev, i);
1045 	}
1046 Done:
1047 	if (adev->pm.dpm_enabled)
1048 		amdgpu_dpm_enable_uvd(adev, false);
1049 
1050 	return 0;
1051 }
1052 
1053 /**
1054  * vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
1055  *
1056  * @adev: amdgpu_device pointer
1057  * @inst_idx: instance number index
1058  * @new_state: pause state
1059  *
1060  * Pause dpg mode for VCN block
1061  */
1062 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
1063 				struct dpg_pause_state *new_state)
1064 {
1065 
1066 	return 0;
1067 }
1068 
1069 /**
1070  * vcn_v4_0_3_unified_ring_get_rptr - get unified read pointer
1071  *
1072  * @ring: amdgpu_ring pointer
1073  *
1074  * Returns the current hardware unified read pointer
1075  */
1076 static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring)
1077 {
1078 	struct amdgpu_device *adev = ring->adev;
1079 
1080 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1081 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1082 
1083 	return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
1084 }
1085 
1086 /**
1087  * vcn_v4_0_3_unified_ring_get_wptr - get unified write pointer
1088  *
1089  * @ring: amdgpu_ring pointer
1090  *
1091  * Returns the current hardware unified write pointer
1092  */
1093 static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
1094 {
1095 	struct amdgpu_device *adev = ring->adev;
1096 
1097 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1098 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1099 
1100 	if (ring->use_doorbell)
1101 		return *ring->wptr_cpu_addr;
1102 	else
1103 		return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
1104 }
1105 
1106 /**
1107  * vcn_v4_0_3_unified_ring_set_wptr - set enc write pointer
1108  *
1109  * @ring: amdgpu_ring pointer
1110  *
1111  * Commits the enc write pointer to the hardware
1112  */
1113 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
1114 {
1115 	struct amdgpu_device *adev = ring->adev;
1116 
1117 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1118 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1119 
1120 	if (ring->use_doorbell) {
1121 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1122 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1123 	} else {
1124 		WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
1125 	}
1126 }
1127 
1128 static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
1129 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1130 	.align_mask = 0x3f,
1131 	.nop = VCN_ENC_CMD_NO_OP,
1132 	.get_rptr = vcn_v4_0_3_unified_ring_get_rptr,
1133 	.get_wptr = vcn_v4_0_3_unified_ring_get_wptr,
1134 	.set_wptr = vcn_v4_0_3_unified_ring_set_wptr,
1135 	.emit_frame_size =
1136 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1137 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1138 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1139 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1140 		1, /* vcn_v2_0_enc_ring_insert_end */
1141 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1142 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1143 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1144 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1145 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1146 	.test_ib = amdgpu_vcn_unified_ring_test_ib,
1147 	.insert_nop = amdgpu_ring_insert_nop,
1148 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1149 	.pad_ib = amdgpu_ring_generic_pad_ib,
1150 	.begin_use = amdgpu_vcn_ring_begin_use,
1151 	.end_use = amdgpu_vcn_ring_end_use,
1152 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1153 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1154 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1155 };
1156 
1157 /**
1158  * vcn_v4_0_3_set_unified_ring_funcs - set unified ring functions
1159  *
1160  * @adev: amdgpu_device pointer
1161  *
1162  * Set unified ring functions
1163  */
1164 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
1165 {
1166 	int i;
1167 
1168 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1169 		if (adev->vcn.harvest_config & (1 << i))
1170 			continue;
1171 		adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs;
1172 		adev->vcn.inst[i].ring_enc[0].me = i;
1173 		adev->vcn.inst[i].aid_id = i / adev->vcn.num_inst_per_aid;
1174 	}
1175 	DRM_DEV_INFO(adev->dev, "VCN decode is enabled in VM mode\n");
1176 }
1177 
1178 /**
1179  * vcn_v4_0_3_is_idle - check VCN block is idle
1180  *
1181  * @handle: amdgpu_device pointer
1182  *
1183  * Check whether VCN block is idle
1184  */
1185 static bool vcn_v4_0_3_is_idle(void *handle)
1186 {
1187 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1188 	int i, ret = 1;
1189 
1190 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1191 		if (adev->vcn.harvest_config & (1 << i))
1192 			continue;
1193 		ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
1194 	}
1195 
1196 	return ret;
1197 }
1198 
1199 /**
1200  * vcn_v4_0_3_wait_for_idle - wait for VCN block idle
1201  *
1202  * @handle: amdgpu_device pointer
1203  *
1204  * Wait for VCN block idle
1205  */
1206 static int vcn_v4_0_3_wait_for_idle(void *handle)
1207 {
1208 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1209 	int i, ret = 0;
1210 
1211 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1212 		if (adev->vcn.harvest_config & (1 << i))
1213 			continue;
1214 		ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
1215 				UVD_STATUS__IDLE);
1216 		if (ret)
1217 			return ret;
1218 	}
1219 
1220 	return ret;
1221 }
1222 
1223 /* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
1224  *
1225  * @handle: amdgpu_device pointer
1226  * @state: clock gating state
1227  *
1228  * Set VCN block clockgating state
1229  */
1230 static int vcn_v4_0_3_set_clockgating_state(void *handle,
1231 					  enum amd_clockgating_state state)
1232 {
1233 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1234 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1235 	int i;
1236 
1237 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1238 		if (adev->vcn.harvest_config & (1 << i))
1239 			continue;
1240 		if (enable) {
1241 			if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
1242 				return -EBUSY;
1243 			vcn_v4_0_3_enable_clock_gating(adev, i);
1244 		} else {
1245 			vcn_v4_0_3_disable_clock_gating(adev, i);
1246 		}
1247 	}
1248 	return 0;
1249 }
1250 
1251 /**
1252  * vcn_v4_0_3_set_powergating_state - set VCN block powergating state
1253  *
1254  * @handle: amdgpu_device pointer
1255  * @state: power gating state
1256  *
1257  * Set VCN block powergating state
1258  */
1259 static int vcn_v4_0_3_set_powergating_state(void *handle,
1260 					  enum amd_powergating_state state)
1261 {
1262 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1263 	int ret;
1264 
1265 	if (state == adev->vcn.cur_state)
1266 		return 0;
1267 
1268 	if (state == AMD_PG_STATE_GATE)
1269 		ret = vcn_v4_0_3_stop(adev);
1270 	else
1271 		ret = vcn_v4_0_3_start(adev);
1272 
1273 	if (!ret)
1274 		adev->vcn.cur_state = state;
1275 
1276 	return ret;
1277 }
1278 
1279 /**
1280  * vcn_v4_0_3_set_interrupt_state - set VCN block interrupt state
1281  *
1282  * @adev: amdgpu_device pointer
1283  * @source: interrupt sources
1284  * @type: interrupt types
1285  * @state: interrupt states
1286  *
1287  * Set VCN block interrupt state
1288  */
1289 static int vcn_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
1290 					struct amdgpu_irq_src *source,
1291 					unsigned int type,
1292 					enum amdgpu_interrupt_state state)
1293 {
1294 	return 0;
1295 }
1296 
1297 /**
1298  * vcn_v4_0_3_process_interrupt - process VCN block interrupt
1299  *
1300  * @adev: amdgpu_device pointer
1301  * @source: interrupt sources
1302  * @entry: interrupt entry from clients and sources
1303  *
1304  * Process VCN block interrupt
1305  */
1306 static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
1307 				      struct amdgpu_irq_src *source,
1308 				      struct amdgpu_iv_entry *entry)
1309 {
1310 	uint32_t i;
1311 
1312 	i = node_id_to_phys_map[entry->node_id];
1313 
1314 	DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1315 
1316 	switch (entry->src_id) {
1317 	case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1318 		amdgpu_fence_process(&adev->vcn.inst[i].ring_enc[0]);
1319 		break;
1320 	default:
1321 		DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1322 			  entry->src_id, entry->src_data[0]);
1323 		break;
1324 	}
1325 
1326 	return 0;
1327 }
1328 
1329 static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
1330 	.set = vcn_v4_0_3_set_interrupt_state,
1331 	.process = vcn_v4_0_3_process_interrupt,
1332 };
1333 
1334 /**
1335  * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
1336  *
1337  * @adev: amdgpu_device pointer
1338  *
1339  * Set VCN block interrupt irq functions
1340  */
1341 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
1342 {
1343 	int i;
1344 
1345 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1346 		if (adev->vcn.harvest_config & (1 << i))
1347 			continue;
1348 
1349 		adev->vcn.inst->irq.num_types++;
1350 	}
1351 	adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
1352 }
1353 
1354 static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
1355 	.name = "vcn_v4_0_3",
1356 	.early_init = vcn_v4_0_3_early_init,
1357 	.late_init = NULL,
1358 	.sw_init = vcn_v4_0_3_sw_init,
1359 	.sw_fini = vcn_v4_0_3_sw_fini,
1360 	.hw_init = vcn_v4_0_3_hw_init,
1361 	.hw_fini = vcn_v4_0_3_hw_fini,
1362 	.suspend = vcn_v4_0_3_suspend,
1363 	.resume = vcn_v4_0_3_resume,
1364 	.is_idle = vcn_v4_0_3_is_idle,
1365 	.wait_for_idle = vcn_v4_0_3_wait_for_idle,
1366 	.check_soft_reset = NULL,
1367 	.pre_soft_reset = NULL,
1368 	.soft_reset = NULL,
1369 	.post_soft_reset = NULL,
1370 	.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
1371 	.set_powergating_state = vcn_v4_0_3_set_powergating_state,
1372 };
1373 
1374 const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
1375 	.type = AMD_IP_BLOCK_TYPE_VCN,
1376 	.major = 4,
1377 	.minor = 0,
1378 	.rev = 3,
1379 	.funcs = &vcn_v4_0_3_ip_funcs,
1380 };
1381