xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/nv.c (revision 19b438592238b3b40c3f945bb5f9c4ca971c0c45)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include <drm/amdgpu_drm.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_atombios.h"
32 #include "amdgpu_ih.h"
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
35 #include "amdgpu_ucode.h"
36 #include "amdgpu_psp.h"
37 #include "atom.h"
38 #include "amd_pcie.h"
39 
40 #include "gc/gc_10_1_0_offset.h"
41 #include "gc/gc_10_1_0_sh_mask.h"
42 #include "mp/mp_11_0_offset.h"
43 
44 #include "soc15.h"
45 #include "soc15_common.h"
46 #include "gmc_v10_0.h"
47 #include "gfxhub_v2_0.h"
48 #include "mmhub_v2_0.h"
49 #include "nbio_v2_3.h"
50 #include "nbio_v7_2.h"
51 #include "hdp_v5_0.h"
52 #include "nv.h"
53 #include "navi10_ih.h"
54 #include "gfx_v10_0.h"
55 #include "sdma_v5_0.h"
56 #include "sdma_v5_2.h"
57 #include "vcn_v2_0.h"
58 #include "jpeg_v2_0.h"
59 #include "vcn_v3_0.h"
60 #include "jpeg_v3_0.h"
61 #include "dce_virtual.h"
62 #include "mes_v10_1.h"
63 #include "mxgpu_nv.h"
64 #include "smuio_v11_0.h"
65 #include "smuio_v11_0_6.h"
66 
67 static const struct amd_ip_funcs nv_common_ip_funcs;
68 
69 /* Navi */
70 static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
71 {
72 	{
73 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
74 		.max_width = 4096,
75 		.max_height = 2304,
76 		.max_pixels_per_frame = 4096 * 2304,
77 		.max_level = 0,
78 	},
79 	{
80 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
81 		.max_width = 4096,
82 		.max_height = 2304,
83 		.max_pixels_per_frame = 4096 * 2304,
84 		.max_level = 0,
85 	},
86 };
87 
88 static const struct amdgpu_video_codecs nv_video_codecs_encode =
89 {
90 	.codec_count = ARRAY_SIZE(nv_video_codecs_encode_array),
91 	.codec_array = nv_video_codecs_encode_array,
92 };
93 
94 /* Navi1x */
95 static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
96 {
97 	{
98 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
99 		.max_width = 4096,
100 		.max_height = 4096,
101 		.max_pixels_per_frame = 4096 * 4096,
102 		.max_level = 3,
103 	},
104 	{
105 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
106 		.max_width = 4096,
107 		.max_height = 4096,
108 		.max_pixels_per_frame = 4096 * 4096,
109 		.max_level = 5,
110 	},
111 	{
112 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
113 		.max_width = 4096,
114 		.max_height = 4096,
115 		.max_pixels_per_frame = 4096 * 4096,
116 		.max_level = 52,
117 	},
118 	{
119 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
120 		.max_width = 4096,
121 		.max_height = 4096,
122 		.max_pixels_per_frame = 4096 * 4096,
123 		.max_level = 4,
124 	},
125 	{
126 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
127 		.max_width = 8192,
128 		.max_height = 4352,
129 		.max_pixels_per_frame = 8192 * 4352,
130 		.max_level = 186,
131 	},
132 	{
133 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
134 		.max_width = 4096,
135 		.max_height = 4096,
136 		.max_pixels_per_frame = 4096 * 4096,
137 		.max_level = 0,
138 	},
139 	{
140 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
141 		.max_width = 8192,
142 		.max_height = 4352,
143 		.max_pixels_per_frame = 8192 * 4352,
144 		.max_level = 0,
145 	},
146 };
147 
148 static const struct amdgpu_video_codecs nv_video_codecs_decode =
149 {
150 	.codec_count = ARRAY_SIZE(nv_video_codecs_decode_array),
151 	.codec_array = nv_video_codecs_decode_array,
152 };
153 
154 /* Sienna Cichlid */
155 static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
156 {
157 	{
158 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
159 		.max_width = 4096,
160 		.max_height = 4096,
161 		.max_pixels_per_frame = 4096 * 4096,
162 		.max_level = 3,
163 	},
164 	{
165 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
166 		.max_width = 4096,
167 		.max_height = 4096,
168 		.max_pixels_per_frame = 4096 * 4096,
169 		.max_level = 5,
170 	},
171 	{
172 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
173 		.max_width = 4096,
174 		.max_height = 4096,
175 		.max_pixels_per_frame = 4096 * 4096,
176 		.max_level = 52,
177 	},
178 	{
179 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
180 		.max_width = 4096,
181 		.max_height = 4096,
182 		.max_pixels_per_frame = 4096 * 4096,
183 		.max_level = 4,
184 	},
185 	{
186 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
187 		.max_width = 8192,
188 		.max_height = 4352,
189 		.max_pixels_per_frame = 8192 * 4352,
190 		.max_level = 186,
191 	},
192 	{
193 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
194 		.max_width = 4096,
195 		.max_height = 4096,
196 		.max_pixels_per_frame = 4096 * 4096,
197 		.max_level = 0,
198 	},
199 	{
200 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
201 		.max_width = 8192,
202 		.max_height = 4352,
203 		.max_pixels_per_frame = 8192 * 4352,
204 		.max_level = 0,
205 	},
206 	{
207 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
208 		.max_width = 8192,
209 		.max_height = 4352,
210 		.max_pixels_per_frame = 8192 * 4352,
211 		.max_level = 0,
212 	},
213 };
214 
215 static const struct amdgpu_video_codecs sc_video_codecs_decode =
216 {
217 	.codec_count = ARRAY_SIZE(sc_video_codecs_decode_array),
218 	.codec_array = sc_video_codecs_decode_array,
219 };
220 
221 /* SRIOV Sienna Cichlid, not const since data is controlled by host */
222 static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
223 {
224 	{
225 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
226 		.max_width = 4096,
227 		.max_height = 2304,
228 		.max_pixels_per_frame = 4096 * 2304,
229 		.max_level = 0,
230 	},
231 	{
232 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
233 		.max_width = 4096,
234 		.max_height = 2304,
235 		.max_pixels_per_frame = 4096 * 2304,
236 		.max_level = 0,
237 	},
238 };
239 
240 static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
241 {
242 	{
243 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
244 		.max_width = 4096,
245 		.max_height = 4096,
246 		.max_pixels_per_frame = 4096 * 4096,
247 		.max_level = 3,
248 	},
249 	{
250 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
251 		.max_width = 4096,
252 		.max_height = 4096,
253 		.max_pixels_per_frame = 4096 * 4096,
254 		.max_level = 5,
255 	},
256 	{
257 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
258 		.max_width = 4096,
259 		.max_height = 4096,
260 		.max_pixels_per_frame = 4096 * 4096,
261 		.max_level = 52,
262 	},
263 	{
264 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
265 		.max_width = 4096,
266 		.max_height = 4096,
267 		.max_pixels_per_frame = 4096 * 4096,
268 		.max_level = 4,
269 	},
270 	{
271 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
272 		.max_width = 8192,
273 		.max_height = 4352,
274 		.max_pixels_per_frame = 8192 * 4352,
275 		.max_level = 186,
276 	},
277 	{
278 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
279 		.max_width = 4096,
280 		.max_height = 4096,
281 		.max_pixels_per_frame = 4096 * 4096,
282 		.max_level = 0,
283 	},
284 	{
285 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
286 		.max_width = 8192,
287 		.max_height = 4352,
288 		.max_pixels_per_frame = 8192 * 4352,
289 		.max_level = 0,
290 	},
291 	{
292 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
293 		.max_width = 8192,
294 		.max_height = 4352,
295 		.max_pixels_per_frame = 8192 * 4352,
296 		.max_level = 0,
297 	},
298 };
299 
300 static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
301 {
302 	.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
303 	.codec_array = sriov_sc_video_codecs_encode_array,
304 };
305 
306 static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
307 {
308 	.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array),
309 	.codec_array = sriov_sc_video_codecs_decode_array,
310 };
311 
312 static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
313 				 const struct amdgpu_video_codecs **codecs)
314 {
315 	switch (adev->asic_type) {
316 	case CHIP_SIENNA_CICHLID:
317 		if (amdgpu_sriov_vf(adev)) {
318 			if (encode)
319 				*codecs = &sriov_sc_video_codecs_encode;
320 			else
321 				*codecs = &sriov_sc_video_codecs_decode;
322 		} else {
323 			if (encode)
324 				*codecs = &nv_video_codecs_encode;
325 			else
326 				*codecs = &sc_video_codecs_decode;
327 		}
328 		return 0;
329 	case CHIP_NAVY_FLOUNDER:
330 	case CHIP_DIMGREY_CAVEFISH:
331 	case CHIP_VANGOGH:
332 	case CHIP_YELLOW_CARP:
333 		if (encode)
334 			*codecs = &nv_video_codecs_encode;
335 		else
336 			*codecs = &sc_video_codecs_decode;
337 		return 0;
338 	case CHIP_NAVI10:
339 	case CHIP_NAVI14:
340 	case CHIP_NAVI12:
341 		if (encode)
342 			*codecs = &nv_video_codecs_encode;
343 		else
344 			*codecs = &nv_video_codecs_decode;
345 		return 0;
346 	default:
347 		return -EINVAL;
348 	}
349 }
350 
351 /*
352  * Indirect registers accessor
353  */
354 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
355 {
356 	unsigned long address, data;
357 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
358 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
359 
360 	return amdgpu_device_indirect_rreg(adev, address, data, reg);
361 }
362 
363 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
364 {
365 	unsigned long address, data;
366 
367 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
368 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
369 
370 	amdgpu_device_indirect_wreg(adev, address, data, reg, v);
371 }
372 
373 static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
374 {
375 	unsigned long address, data;
376 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
377 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
378 
379 	return amdgpu_device_indirect_rreg64(adev, address, data, reg);
380 }
381 
382 static u32 nv_pcie_port_rreg(struct amdgpu_device *adev, u32 reg)
383 {
384 	unsigned long flags, address, data;
385 	u32 r;
386 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
387 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
388 
389 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
390 	WREG32(address, reg * 4);
391 	(void)RREG32(address);
392 	r = RREG32(data);
393 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
394 	return r;
395 }
396 
397 static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
398 {
399 	unsigned long address, data;
400 
401 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
402 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
403 
404 	amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
405 }
406 
407 static void nv_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
408 {
409 	unsigned long flags, address, data;
410 
411 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
412 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
413 
414 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
415 	WREG32(address, reg * 4);
416 	(void)RREG32(address);
417 	WREG32(data, v);
418 	(void)RREG32(data);
419 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
420 }
421 
422 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
423 {
424 	unsigned long flags, address, data;
425 	u32 r;
426 
427 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
428 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
429 
430 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
431 	WREG32(address, (reg));
432 	r = RREG32(data);
433 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
434 	return r;
435 }
436 
437 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
438 {
439 	unsigned long flags, address, data;
440 
441 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
442 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
443 
444 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
445 	WREG32(address, (reg));
446 	WREG32(data, (v));
447 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
448 }
449 
450 static u32 nv_get_config_memsize(struct amdgpu_device *adev)
451 {
452 	return adev->nbio.funcs->get_memsize(adev);
453 }
454 
455 static u32 nv_get_xclk(struct amdgpu_device *adev)
456 {
457 	return adev->clock.spll.reference_freq;
458 }
459 
460 
461 void nv_grbm_select(struct amdgpu_device *adev,
462 		     u32 me, u32 pipe, u32 queue, u32 vmid)
463 {
464 	u32 grbm_gfx_cntl = 0;
465 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
466 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
467 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
468 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
469 
470 	WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
471 }
472 
473 static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
474 {
475 	/* todo */
476 }
477 
478 static bool nv_read_disabled_bios(struct amdgpu_device *adev)
479 {
480 	/* todo */
481 	return false;
482 }
483 
484 static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
485 				  u8 *bios, u32 length_bytes)
486 {
487 	u32 *dw_ptr;
488 	u32 i, length_dw;
489 	u32 rom_index_offset, rom_data_offset;
490 
491 	if (bios == NULL)
492 		return false;
493 	if (length_bytes == 0)
494 		return false;
495 	/* APU vbios image is part of sbios image */
496 	if (adev->flags & AMD_IS_APU)
497 		return false;
498 
499 	dw_ptr = (u32 *)bios;
500 	length_dw = ALIGN(length_bytes, 4) / 4;
501 
502 	rom_index_offset =
503 		adev->smuio.funcs->get_rom_index_offset(adev);
504 	rom_data_offset =
505 		adev->smuio.funcs->get_rom_data_offset(adev);
506 
507 	/* set rom index to 0 */
508 	WREG32(rom_index_offset, 0);
509 	/* read out the rom data */
510 	for (i = 0; i < length_dw; i++)
511 		dw_ptr[i] = RREG32(rom_data_offset);
512 
513 	return true;
514 }
515 
516 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
517 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
518 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
519 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
520 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
521 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
522 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
523 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
524 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
525 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
526 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
527 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
528 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
529 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
530 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
531 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
532 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
533 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
534 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
535 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
536 };
537 
538 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
539 					 u32 sh_num, u32 reg_offset)
540 {
541 	uint32_t val;
542 
543 	mutex_lock(&adev->grbm_idx_mutex);
544 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
545 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
546 
547 	val = RREG32(reg_offset);
548 
549 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
550 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
551 	mutex_unlock(&adev->grbm_idx_mutex);
552 	return val;
553 }
554 
555 static uint32_t nv_get_register_value(struct amdgpu_device *adev,
556 				      bool indexed, u32 se_num,
557 				      u32 sh_num, u32 reg_offset)
558 {
559 	if (indexed) {
560 		return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
561 	} else {
562 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
563 			return adev->gfx.config.gb_addr_config;
564 		return RREG32(reg_offset);
565 	}
566 }
567 
568 static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
569 			    u32 sh_num, u32 reg_offset, u32 *value)
570 {
571 	uint32_t i;
572 	struct soc15_allowed_register_entry  *en;
573 
574 	*value = 0;
575 	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
576 		en = &nv_allowed_read_registers[i];
577 		if ((i == 7 && (adev->sdma.num_instances == 1)) || /* some asics don't have SDMA1 */
578 		    reg_offset !=
579 		    (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
580 			continue;
581 
582 		*value = nv_get_register_value(adev,
583 					       nv_allowed_read_registers[i].grbm_indexed,
584 					       se_num, sh_num, reg_offset);
585 		return 0;
586 	}
587 	return -EINVAL;
588 }
589 
590 static int nv_asic_mode2_reset(struct amdgpu_device *adev)
591 {
592 	u32 i;
593 	int ret = 0;
594 
595 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
596 
597 	/* disable BM */
598 	pci_clear_master(adev->pdev);
599 
600 	amdgpu_device_cache_pci_state(adev->pdev);
601 
602 	ret = amdgpu_dpm_mode2_reset(adev);
603 	if (ret)
604 		dev_err(adev->dev, "GPU mode2 reset failed\n");
605 
606 	amdgpu_device_load_pci_state(adev->pdev);
607 
608 	/* wait for asic to come out of reset */
609 	for (i = 0; i < adev->usec_timeout; i++) {
610 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
611 
612 		if (memsize != 0xffffffff)
613 			break;
614 		udelay(1);
615 	}
616 
617 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
618 
619 	return ret;
620 }
621 
622 static enum amd_reset_method
623 nv_asic_reset_method(struct amdgpu_device *adev)
624 {
625 	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
626 	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
627 	    amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
628 	    amdgpu_reset_method == AMD_RESET_METHOD_PCI)
629 		return amdgpu_reset_method;
630 
631 	if (amdgpu_reset_method != -1)
632 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
633 				  amdgpu_reset_method);
634 
635 	switch (adev->asic_type) {
636 	case CHIP_VANGOGH:
637 	case CHIP_YELLOW_CARP:
638 		return AMD_RESET_METHOD_MODE2;
639 	case CHIP_SIENNA_CICHLID:
640 	case CHIP_NAVY_FLOUNDER:
641 	case CHIP_DIMGREY_CAVEFISH:
642 	case CHIP_BEIGE_GOBY:
643 		return AMD_RESET_METHOD_MODE1;
644 	default:
645 		if (amdgpu_dpm_is_baco_supported(adev))
646 			return AMD_RESET_METHOD_BACO;
647 		else
648 			return AMD_RESET_METHOD_MODE1;
649 	}
650 }
651 
652 static int nv_asic_reset(struct amdgpu_device *adev)
653 {
654 	int ret = 0;
655 
656 	switch (nv_asic_reset_method(adev)) {
657 	case AMD_RESET_METHOD_PCI:
658 		dev_info(adev->dev, "PCI reset\n");
659 		ret = amdgpu_device_pci_reset(adev);
660 		break;
661 	case AMD_RESET_METHOD_BACO:
662 		dev_info(adev->dev, "BACO reset\n");
663 		ret = amdgpu_dpm_baco_reset(adev);
664 		break;
665 	case AMD_RESET_METHOD_MODE2:
666 		dev_info(adev->dev, "MODE2 reset\n");
667 		ret = nv_asic_mode2_reset(adev);
668 		break;
669 	default:
670 		dev_info(adev->dev, "MODE1 reset\n");
671 		ret = amdgpu_device_mode1_reset(adev);
672 		break;
673 	}
674 
675 	return ret;
676 }
677 
678 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
679 {
680 	/* todo */
681 	return 0;
682 }
683 
684 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
685 {
686 	/* todo */
687 	return 0;
688 }
689 
690 static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
691 {
692 	if (pci_is_root_bus(adev->pdev->bus))
693 		return;
694 
695 	if (amdgpu_pcie_gen2 == 0)
696 		return;
697 
698 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
699 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
700 		return;
701 
702 	/* todo */
703 }
704 
705 static void nv_program_aspm(struct amdgpu_device *adev)
706 {
707 	if (!amdgpu_aspm)
708 		return;
709 
710 	if (!(adev->flags & AMD_IS_APU) &&
711 	    (adev->nbio.funcs->program_aspm))
712 		adev->nbio.funcs->program_aspm(adev);
713 
714 }
715 
716 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
717 					bool enable)
718 {
719 	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
720 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
721 }
722 
723 static const struct amdgpu_ip_block_version nv_common_ip_block =
724 {
725 	.type = AMD_IP_BLOCK_TYPE_COMMON,
726 	.major = 1,
727 	.minor = 0,
728 	.rev = 0,
729 	.funcs = &nv_common_ip_funcs,
730 };
731 
732 static bool nv_is_headless_sku(struct pci_dev *pdev)
733 {
734 	if ((pdev->device == 0x731E &&
735 	    (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
736 	    (pdev->device == 0x7340 && pdev->revision == 0xC9)  ||
737 	    (pdev->device == 0x7360 && pdev->revision == 0xC7))
738 		return true;
739 	return false;
740 }
741 
742 static int nv_reg_base_init(struct amdgpu_device *adev)
743 {
744 	int r;
745 
746 	if (amdgpu_discovery) {
747 		r = amdgpu_discovery_reg_base_init(adev);
748 		if (r) {
749 			DRM_WARN("failed to init reg base from ip discovery table, "
750 					"fallback to legacy init method\n");
751 			goto legacy_init;
752 		}
753 
754 		amdgpu_discovery_harvest_ip(adev);
755 		if (nv_is_headless_sku(adev->pdev)) {
756 			adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
757 			adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
758 		}
759 
760 		return 0;
761 	}
762 
763 legacy_init:
764 	switch (adev->asic_type) {
765 	case CHIP_NAVI10:
766 		navi10_reg_base_init(adev);
767 		break;
768 	case CHIP_NAVI14:
769 		navi14_reg_base_init(adev);
770 		break;
771 	case CHIP_NAVI12:
772 		navi12_reg_base_init(adev);
773 		break;
774 	case CHIP_SIENNA_CICHLID:
775 	case CHIP_NAVY_FLOUNDER:
776 		sienna_cichlid_reg_base_init(adev);
777 		break;
778 	case CHIP_VANGOGH:
779 		vangogh_reg_base_init(adev);
780 		break;
781 	case CHIP_DIMGREY_CAVEFISH:
782 		dimgrey_cavefish_reg_base_init(adev);
783 		break;
784 	case CHIP_BEIGE_GOBY:
785 		beige_goby_reg_base_init(adev);
786 		break;
787 	case CHIP_YELLOW_CARP:
788 		yellow_carp_reg_base_init(adev);
789 		break;
790 	default:
791 		return -EINVAL;
792 	}
793 
794 	return 0;
795 }
796 
797 void nv_set_virt_ops(struct amdgpu_device *adev)
798 {
799 	adev->virt.ops = &xgpu_nv_virt_ops;
800 }
801 
802 int nv_set_ip_blocks(struct amdgpu_device *adev)
803 {
804 	int r;
805 
806 	if (adev->flags & AMD_IS_APU) {
807 		adev->nbio.funcs = &nbio_v7_2_funcs;
808 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
809 	} else {
810 		adev->nbio.funcs = &nbio_v2_3_funcs;
811 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
812 	}
813 	adev->hdp.funcs = &hdp_v5_0_funcs;
814 
815 	if (adev->asic_type >= CHIP_SIENNA_CICHLID)
816 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
817 	else
818 		adev->smuio.funcs = &smuio_v11_0_funcs;
819 
820 	if (adev->asic_type == CHIP_SIENNA_CICHLID)
821 		adev->gmc.xgmi.supported = true;
822 
823 	/* Set IP register base before any HW register access */
824 	r = nv_reg_base_init(adev);
825 	if (r)
826 		return r;
827 
828 	switch (adev->asic_type) {
829 	case CHIP_NAVI10:
830 	case CHIP_NAVI14:
831 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
832 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
833 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
834 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
835 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
836 		    !amdgpu_sriov_vf(adev))
837 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
838 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
839 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
840 #if defined(CONFIG_DRM_AMD_DC)
841 		else if (amdgpu_device_has_dc_support(adev))
842 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
843 #endif
844 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
845 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
846 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
847 		    !amdgpu_sriov_vf(adev))
848 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
849 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
850 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
851 		if (adev->enable_mes)
852 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
853 		break;
854 	case CHIP_NAVI12:
855 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
856 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
857 		if (!amdgpu_sriov_vf(adev)) {
858 			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
859 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
860 		} else {
861 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
862 			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
863 		}
864 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
865 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
866 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
867 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
868 #if defined(CONFIG_DRM_AMD_DC)
869 		else if (amdgpu_device_has_dc_support(adev))
870 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
871 #endif
872 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
873 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
874 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
875 		    !amdgpu_sriov_vf(adev))
876 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
877 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
878 		if (!amdgpu_sriov_vf(adev))
879 			amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
880 		break;
881 	case CHIP_SIENNA_CICHLID:
882 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
883 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
884 		if (!amdgpu_sriov_vf(adev)) {
885 			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
886 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
887 				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
888 		} else {
889 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
890 				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
891 			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
892 		}
893 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
894 		    is_support_sw_smu(adev))
895 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
896 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
897 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
898 #if defined(CONFIG_DRM_AMD_DC)
899 		else if (amdgpu_device_has_dc_support(adev))
900 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
901 #endif
902 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
903 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
904 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
905 		if (!amdgpu_sriov_vf(adev))
906 			amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
907 		if (adev->enable_mes)
908 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
909 		break;
910 	case CHIP_NAVY_FLOUNDER:
911 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
912 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
913 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
914 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
915 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
916 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
917 		    is_support_sw_smu(adev))
918 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
919 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
920 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
921 #if defined(CONFIG_DRM_AMD_DC)
922 		else if (amdgpu_device_has_dc_support(adev))
923 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
924 #endif
925 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
926 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
927 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
928 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
929 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
930 		    is_support_sw_smu(adev))
931 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
932 		break;
933 	case CHIP_VANGOGH:
934 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
935 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
936 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
937 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
938 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
939 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
940 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
941 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
942 #if defined(CONFIG_DRM_AMD_DC)
943 		else if (amdgpu_device_has_dc_support(adev))
944 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
945 #endif
946 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
947 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
948 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
949 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
950 		break;
951 	case CHIP_DIMGREY_CAVEFISH:
952 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
953 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
954 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
955 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
956 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
957 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
958 		    is_support_sw_smu(adev))
959 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
960 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
961 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
962 #if defined(CONFIG_DRM_AMD_DC)
963                 else if (amdgpu_device_has_dc_support(adev))
964                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
965 #endif
966 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
967 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
968 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
969 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
970 		break;
971 	case CHIP_BEIGE_GOBY:
972 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
973 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
974 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
975 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
976 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
977 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
978 		    is_support_sw_smu(adev))
979 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
980 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
981 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
982 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
983 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
984 #if defined(CONFIG_DRM_AMD_DC)
985 		else if (amdgpu_device_has_dc_support(adev))
986 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
987 #endif
988 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
989 		    is_support_sw_smu(adev))
990 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
991 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
992 		break;
993 	case CHIP_YELLOW_CARP:
994 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
995 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
996 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
997 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
998 			amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
999 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1000 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1001 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1002 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
1003 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
1004 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1005 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1006 #if defined(CONFIG_DRM_AMD_DC)
1007 		else if (amdgpu_device_has_dc_support(adev))
1008 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1009 #endif
1010 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
1011 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
1012 		break;
1013 	default:
1014 		return -EINVAL;
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
1021 {
1022 	return adev->nbio.funcs->get_rev_id(adev);
1023 }
1024 
1025 static bool nv_need_full_reset(struct amdgpu_device *adev)
1026 {
1027 	return true;
1028 }
1029 
1030 static bool nv_need_reset_on_init(struct amdgpu_device *adev)
1031 {
1032 	u32 sol_reg;
1033 
1034 	if (adev->flags & AMD_IS_APU)
1035 		return false;
1036 
1037 	/* Check sOS sign of life register to confirm sys driver and sOS
1038 	 * are already been loaded.
1039 	 */
1040 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
1041 	if (sol_reg)
1042 		return true;
1043 
1044 	return false;
1045 }
1046 
1047 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
1048 {
1049 
1050 	/* TODO
1051 	 * dummy implement for pcie_replay_count sysfs interface
1052 	 * */
1053 
1054 	return 0;
1055 }
1056 
1057 static void nv_init_doorbell_index(struct amdgpu_device *adev)
1058 {
1059 	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
1060 	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
1061 	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
1062 	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
1063 	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
1064 	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
1065 	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
1066 	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
1067 	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
1068 	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
1069 	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
1070 	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
1071 	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
1072 	adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING;
1073 	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
1074 	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
1075 	adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
1076 	adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
1077 	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
1078 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
1079 	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
1080 	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
1081 	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
1082 	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
1083 	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
1084 
1085 	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
1086 	adev->doorbell_index.sdma_doorbell_range = 20;
1087 }
1088 
1089 static void nv_pre_asic_init(struct amdgpu_device *adev)
1090 {
1091 }
1092 
1093 static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
1094 				       bool enter)
1095 {
1096 	if (enter)
1097 		amdgpu_gfx_rlc_enter_safe_mode(adev);
1098 	else
1099 		amdgpu_gfx_rlc_exit_safe_mode(adev);
1100 
1101 	if (adev->gfx.funcs->update_perfmon_mgcg)
1102 		adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
1103 
1104 	if (!(adev->flags & AMD_IS_APU) &&
1105 	    (adev->nbio.funcs->enable_aspm))
1106 		adev->nbio.funcs->enable_aspm(adev, !enter);
1107 
1108 	return 0;
1109 }
1110 
1111 static const struct amdgpu_asic_funcs nv_asic_funcs =
1112 {
1113 	.read_disabled_bios = &nv_read_disabled_bios,
1114 	.read_bios_from_rom = &nv_read_bios_from_rom,
1115 	.read_register = &nv_read_register,
1116 	.reset = &nv_asic_reset,
1117 	.reset_method = &nv_asic_reset_method,
1118 	.set_vga_state = &nv_vga_set_state,
1119 	.get_xclk = &nv_get_xclk,
1120 	.set_uvd_clocks = &nv_set_uvd_clocks,
1121 	.set_vce_clocks = &nv_set_vce_clocks,
1122 	.get_config_memsize = &nv_get_config_memsize,
1123 	.init_doorbell_index = &nv_init_doorbell_index,
1124 	.need_full_reset = &nv_need_full_reset,
1125 	.need_reset_on_init = &nv_need_reset_on_init,
1126 	.get_pcie_replay_count = &nv_get_pcie_replay_count,
1127 	.supports_baco = &amdgpu_dpm_is_baco_supported,
1128 	.pre_asic_init = &nv_pre_asic_init,
1129 	.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
1130 	.query_video_codecs = &nv_query_video_codecs,
1131 };
1132 
1133 static int nv_common_early_init(void *handle)
1134 {
1135 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1136 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1137 
1138 	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
1139 	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
1140 	adev->smc_rreg = NULL;
1141 	adev->smc_wreg = NULL;
1142 	adev->pcie_rreg = &nv_pcie_rreg;
1143 	adev->pcie_wreg = &nv_pcie_wreg;
1144 	adev->pcie_rreg64 = &nv_pcie_rreg64;
1145 	adev->pcie_wreg64 = &nv_pcie_wreg64;
1146 	adev->pciep_rreg = &nv_pcie_port_rreg;
1147 	adev->pciep_wreg = &nv_pcie_port_wreg;
1148 
1149 	/* TODO: will add them during VCN v2 implementation */
1150 	adev->uvd_ctx_rreg = NULL;
1151 	adev->uvd_ctx_wreg = NULL;
1152 
1153 	adev->didt_rreg = &nv_didt_rreg;
1154 	adev->didt_wreg = &nv_didt_wreg;
1155 
1156 	adev->asic_funcs = &nv_asic_funcs;
1157 
1158 	adev->rev_id = nv_get_rev_id(adev);
1159 	adev->external_rev_id = 0xff;
1160 	switch (adev->asic_type) {
1161 	case CHIP_NAVI10:
1162 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1163 			AMD_CG_SUPPORT_GFX_CGCG |
1164 			AMD_CG_SUPPORT_IH_CG |
1165 			AMD_CG_SUPPORT_HDP_MGCG |
1166 			AMD_CG_SUPPORT_HDP_LS |
1167 			AMD_CG_SUPPORT_SDMA_MGCG |
1168 			AMD_CG_SUPPORT_SDMA_LS |
1169 			AMD_CG_SUPPORT_MC_MGCG |
1170 			AMD_CG_SUPPORT_MC_LS |
1171 			AMD_CG_SUPPORT_ATHUB_MGCG |
1172 			AMD_CG_SUPPORT_ATHUB_LS |
1173 			AMD_CG_SUPPORT_VCN_MGCG |
1174 			AMD_CG_SUPPORT_JPEG_MGCG |
1175 			AMD_CG_SUPPORT_BIF_MGCG |
1176 			AMD_CG_SUPPORT_BIF_LS;
1177 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1178 			AMD_PG_SUPPORT_VCN_DPG |
1179 			AMD_PG_SUPPORT_JPEG |
1180 			AMD_PG_SUPPORT_ATHUB;
1181 		adev->external_rev_id = adev->rev_id + 0x1;
1182 		break;
1183 	case CHIP_NAVI14:
1184 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1185 			AMD_CG_SUPPORT_GFX_CGCG |
1186 			AMD_CG_SUPPORT_IH_CG |
1187 			AMD_CG_SUPPORT_HDP_MGCG |
1188 			AMD_CG_SUPPORT_HDP_LS |
1189 			AMD_CG_SUPPORT_SDMA_MGCG |
1190 			AMD_CG_SUPPORT_SDMA_LS |
1191 			AMD_CG_SUPPORT_MC_MGCG |
1192 			AMD_CG_SUPPORT_MC_LS |
1193 			AMD_CG_SUPPORT_ATHUB_MGCG |
1194 			AMD_CG_SUPPORT_ATHUB_LS |
1195 			AMD_CG_SUPPORT_VCN_MGCG |
1196 			AMD_CG_SUPPORT_JPEG_MGCG |
1197 			AMD_CG_SUPPORT_BIF_MGCG |
1198 			AMD_CG_SUPPORT_BIF_LS;
1199 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1200 			AMD_PG_SUPPORT_JPEG |
1201 			AMD_PG_SUPPORT_VCN_DPG;
1202 		adev->external_rev_id = adev->rev_id + 20;
1203 		break;
1204 	case CHIP_NAVI12:
1205 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1206 			AMD_CG_SUPPORT_GFX_MGLS |
1207 			AMD_CG_SUPPORT_GFX_CGCG |
1208 			AMD_CG_SUPPORT_GFX_CP_LS |
1209 			AMD_CG_SUPPORT_GFX_RLC_LS |
1210 			AMD_CG_SUPPORT_IH_CG |
1211 			AMD_CG_SUPPORT_HDP_MGCG |
1212 			AMD_CG_SUPPORT_HDP_LS |
1213 			AMD_CG_SUPPORT_SDMA_MGCG |
1214 			AMD_CG_SUPPORT_SDMA_LS |
1215 			AMD_CG_SUPPORT_MC_MGCG |
1216 			AMD_CG_SUPPORT_MC_LS |
1217 			AMD_CG_SUPPORT_ATHUB_MGCG |
1218 			AMD_CG_SUPPORT_ATHUB_LS |
1219 			AMD_CG_SUPPORT_VCN_MGCG |
1220 			AMD_CG_SUPPORT_JPEG_MGCG;
1221 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1222 			AMD_PG_SUPPORT_VCN_DPG |
1223 			AMD_PG_SUPPORT_JPEG |
1224 			AMD_PG_SUPPORT_ATHUB;
1225 		/* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
1226 		 * as a consequence, the rev_id and external_rev_id are wrong.
1227 		 * workaround it by hardcoding rev_id to 0 (default value).
1228 		 */
1229 		if (amdgpu_sriov_vf(adev))
1230 			adev->rev_id = 0;
1231 		adev->external_rev_id = adev->rev_id + 0xa;
1232 		break;
1233 	case CHIP_SIENNA_CICHLID:
1234 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1235 			AMD_CG_SUPPORT_GFX_CGCG |
1236 			AMD_CG_SUPPORT_GFX_CGLS |
1237 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1238 			AMD_CG_SUPPORT_MC_MGCG |
1239 			AMD_CG_SUPPORT_VCN_MGCG |
1240 			AMD_CG_SUPPORT_JPEG_MGCG |
1241 			AMD_CG_SUPPORT_HDP_MGCG |
1242 			AMD_CG_SUPPORT_HDP_LS |
1243 			AMD_CG_SUPPORT_IH_CG |
1244 			AMD_CG_SUPPORT_MC_LS;
1245 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1246 			AMD_PG_SUPPORT_VCN_DPG |
1247 			AMD_PG_SUPPORT_JPEG |
1248 			AMD_PG_SUPPORT_ATHUB |
1249 			AMD_PG_SUPPORT_MMHUB;
1250 		if (amdgpu_sriov_vf(adev)) {
1251 			/* hypervisor control CG and PG enablement */
1252 			adev->cg_flags = 0;
1253 			adev->pg_flags = 0;
1254 		}
1255 		adev->external_rev_id = adev->rev_id + 0x28;
1256 		break;
1257 	case CHIP_NAVY_FLOUNDER:
1258 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1259 			AMD_CG_SUPPORT_GFX_CGCG |
1260 			AMD_CG_SUPPORT_GFX_CGLS |
1261 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1262 			AMD_CG_SUPPORT_VCN_MGCG |
1263 			AMD_CG_SUPPORT_JPEG_MGCG |
1264 			AMD_CG_SUPPORT_MC_MGCG |
1265 			AMD_CG_SUPPORT_MC_LS |
1266 			AMD_CG_SUPPORT_HDP_MGCG |
1267 			AMD_CG_SUPPORT_HDP_LS |
1268 			AMD_CG_SUPPORT_IH_CG;
1269 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1270 			AMD_PG_SUPPORT_VCN_DPG |
1271 			AMD_PG_SUPPORT_JPEG |
1272 			AMD_PG_SUPPORT_ATHUB |
1273 			AMD_PG_SUPPORT_MMHUB;
1274 		adev->external_rev_id = adev->rev_id + 0x32;
1275 		break;
1276 
1277 	case CHIP_VANGOGH:
1278 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1279 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1280 			AMD_CG_SUPPORT_GFX_MGLS |
1281 			AMD_CG_SUPPORT_GFX_CP_LS |
1282 			AMD_CG_SUPPORT_GFX_RLC_LS |
1283 			AMD_CG_SUPPORT_GFX_CGCG |
1284 			AMD_CG_SUPPORT_GFX_CGLS |
1285 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1286 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1287 			AMD_CG_SUPPORT_MC_MGCG |
1288 			AMD_CG_SUPPORT_MC_LS |
1289 			AMD_CG_SUPPORT_GFX_FGCG |
1290 			AMD_CG_SUPPORT_VCN_MGCG |
1291 			AMD_CG_SUPPORT_SDMA_MGCG |
1292 			AMD_CG_SUPPORT_SDMA_LS |
1293 			AMD_CG_SUPPORT_JPEG_MGCG;
1294 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1295 			AMD_PG_SUPPORT_VCN |
1296 			AMD_PG_SUPPORT_VCN_DPG |
1297 			AMD_PG_SUPPORT_JPEG;
1298 		if (adev->apu_flags & AMD_APU_IS_VANGOGH)
1299 			adev->external_rev_id = adev->rev_id + 0x01;
1300 		break;
1301 	case CHIP_DIMGREY_CAVEFISH:
1302 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1303 			AMD_CG_SUPPORT_GFX_CGCG |
1304 			AMD_CG_SUPPORT_GFX_CGLS |
1305 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1306 			AMD_CG_SUPPORT_VCN_MGCG |
1307 			AMD_CG_SUPPORT_JPEG_MGCG |
1308 			AMD_CG_SUPPORT_MC_MGCG |
1309 			AMD_CG_SUPPORT_MC_LS |
1310 			AMD_CG_SUPPORT_HDP_MGCG |
1311 			AMD_CG_SUPPORT_HDP_LS |
1312 			AMD_CG_SUPPORT_IH_CG;
1313 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1314 			AMD_PG_SUPPORT_VCN_DPG |
1315 			AMD_PG_SUPPORT_JPEG |
1316 			AMD_PG_SUPPORT_ATHUB |
1317 			AMD_PG_SUPPORT_MMHUB;
1318 		adev->external_rev_id = adev->rev_id + 0x3c;
1319 		break;
1320 	case CHIP_BEIGE_GOBY:
1321 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1322 			AMD_CG_SUPPORT_GFX_CGCG |
1323 			AMD_CG_SUPPORT_GFX_CGLS |
1324 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1325 			AMD_CG_SUPPORT_MC_MGCG |
1326 			AMD_CG_SUPPORT_MC_LS |
1327 			AMD_CG_SUPPORT_HDP_MGCG |
1328 			AMD_CG_SUPPORT_HDP_LS |
1329 			AMD_CG_SUPPORT_IH_CG |
1330 			AMD_CG_SUPPORT_VCN_MGCG;
1331 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1332 			AMD_PG_SUPPORT_VCN_DPG |
1333 			AMD_PG_SUPPORT_ATHUB |
1334 			AMD_PG_SUPPORT_MMHUB;
1335 		adev->external_rev_id = adev->rev_id + 0x46;
1336 		break;
1337 	case CHIP_YELLOW_CARP:
1338 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1339 			AMD_CG_SUPPORT_GFX_MGLS |
1340 			AMD_CG_SUPPORT_GFX_CGCG |
1341 			AMD_CG_SUPPORT_GFX_CGLS |
1342 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1343 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1344 			AMD_CG_SUPPORT_GFX_RLC_LS |
1345 			AMD_CG_SUPPORT_GFX_CP_LS |
1346 			AMD_CG_SUPPORT_GFX_FGCG |
1347 			AMD_CG_SUPPORT_MC_MGCG |
1348 			AMD_CG_SUPPORT_MC_LS |
1349 			AMD_CG_SUPPORT_SDMA_LS |
1350 			AMD_CG_SUPPORT_HDP_MGCG |
1351 			AMD_CG_SUPPORT_HDP_LS |
1352 			AMD_CG_SUPPORT_ATHUB_MGCG |
1353 			AMD_CG_SUPPORT_ATHUB_LS |
1354 			AMD_CG_SUPPORT_IH_CG |
1355 			AMD_CG_SUPPORT_VCN_MGCG |
1356 			AMD_CG_SUPPORT_JPEG_MGCG;
1357 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1358 			AMD_PG_SUPPORT_VCN |
1359 			AMD_PG_SUPPORT_VCN_DPG |
1360 			AMD_PG_SUPPORT_JPEG;
1361 		adev->external_rev_id = adev->rev_id + 0x01;
1362 		break;
1363 	default:
1364 		/* FIXME: not supported yet */
1365 		return -EINVAL;
1366 	}
1367 
1368 	if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1369 		adev->pg_flags &= ~(AMD_PG_SUPPORT_VCN |
1370 				    AMD_PG_SUPPORT_VCN_DPG |
1371 				    AMD_PG_SUPPORT_JPEG);
1372 
1373 	if (amdgpu_sriov_vf(adev)) {
1374 		amdgpu_virt_init_setting(adev);
1375 		xgpu_nv_mailbox_set_irq_funcs(adev);
1376 	}
1377 
1378 	return 0;
1379 }
1380 
1381 static int nv_common_late_init(void *handle)
1382 {
1383 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1384 
1385 	if (amdgpu_sriov_vf(adev)) {
1386 		xgpu_nv_mailbox_get_irq(adev);
1387 		amdgpu_virt_update_sriov_video_codec(adev,
1388 				sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
1389 				sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array));
1390 	}
1391 
1392 	return 0;
1393 }
1394 
1395 static int nv_common_sw_init(void *handle)
1396 {
1397 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1398 
1399 	if (amdgpu_sriov_vf(adev))
1400 		xgpu_nv_mailbox_add_irq_id(adev);
1401 
1402 	return 0;
1403 }
1404 
1405 static int nv_common_sw_fini(void *handle)
1406 {
1407 	return 0;
1408 }
1409 
1410 static int nv_common_hw_init(void *handle)
1411 {
1412 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1413 
1414 	/* enable pcie gen2/3 link */
1415 	nv_pcie_gen3_enable(adev);
1416 	/* enable aspm */
1417 	nv_program_aspm(adev);
1418 	/* setup nbio registers */
1419 	adev->nbio.funcs->init_registers(adev);
1420 	/* remap HDP registers to a hole in mmio space,
1421 	 * for the purpose of expose those registers
1422 	 * to process space
1423 	 */
1424 	if (adev->nbio.funcs->remap_hdp_registers)
1425 		adev->nbio.funcs->remap_hdp_registers(adev);
1426 	/* enable the doorbell aperture */
1427 	nv_enable_doorbell_aperture(adev, true);
1428 
1429 	return 0;
1430 }
1431 
1432 static int nv_common_hw_fini(void *handle)
1433 {
1434 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1435 
1436 	/* disable the doorbell aperture */
1437 	nv_enable_doorbell_aperture(adev, false);
1438 
1439 	return 0;
1440 }
1441 
1442 static int nv_common_suspend(void *handle)
1443 {
1444 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1445 
1446 	return nv_common_hw_fini(adev);
1447 }
1448 
1449 static int nv_common_resume(void *handle)
1450 {
1451 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1452 
1453 	return nv_common_hw_init(adev);
1454 }
1455 
1456 static bool nv_common_is_idle(void *handle)
1457 {
1458 	return true;
1459 }
1460 
1461 static int nv_common_wait_for_idle(void *handle)
1462 {
1463 	return 0;
1464 }
1465 
1466 static int nv_common_soft_reset(void *handle)
1467 {
1468 	return 0;
1469 }
1470 
1471 static int nv_common_set_clockgating_state(void *handle,
1472 					   enum amd_clockgating_state state)
1473 {
1474 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1475 
1476 	if (amdgpu_sriov_vf(adev))
1477 		return 0;
1478 
1479 	switch (adev->asic_type) {
1480 	case CHIP_NAVI10:
1481 	case CHIP_NAVI14:
1482 	case CHIP_NAVI12:
1483 	case CHIP_SIENNA_CICHLID:
1484 	case CHIP_NAVY_FLOUNDER:
1485 	case CHIP_DIMGREY_CAVEFISH:
1486 	case CHIP_BEIGE_GOBY:
1487 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1488 				state == AMD_CG_STATE_GATE);
1489 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1490 				state == AMD_CG_STATE_GATE);
1491 		adev->hdp.funcs->update_clock_gating(adev,
1492 				state == AMD_CG_STATE_GATE);
1493 		adev->smuio.funcs->update_rom_clock_gating(adev,
1494 				state == AMD_CG_STATE_GATE);
1495 		break;
1496 	default:
1497 		break;
1498 	}
1499 	return 0;
1500 }
1501 
1502 static int nv_common_set_powergating_state(void *handle,
1503 					   enum amd_powergating_state state)
1504 {
1505 	/* TODO */
1506 	return 0;
1507 }
1508 
1509 static void nv_common_get_clockgating_state(void *handle, u32 *flags)
1510 {
1511 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1512 
1513 	if (amdgpu_sriov_vf(adev))
1514 		*flags = 0;
1515 
1516 	adev->nbio.funcs->get_clockgating_state(adev, flags);
1517 
1518 	adev->hdp.funcs->get_clock_gating_state(adev, flags);
1519 
1520 	adev->smuio.funcs->get_clock_gating_state(adev, flags);
1521 
1522 	return;
1523 }
1524 
1525 static const struct amd_ip_funcs nv_common_ip_funcs = {
1526 	.name = "nv_common",
1527 	.early_init = nv_common_early_init,
1528 	.late_init = nv_common_late_init,
1529 	.sw_init = nv_common_sw_init,
1530 	.sw_fini = nv_common_sw_fini,
1531 	.hw_init = nv_common_hw_init,
1532 	.hw_fini = nv_common_hw_fini,
1533 	.suspend = nv_common_suspend,
1534 	.resume = nv_common_resume,
1535 	.is_idle = nv_common_is_idle,
1536 	.wait_for_idle = nv_common_wait_for_idle,
1537 	.soft_reset = nv_common_soft_reset,
1538 	.set_clockgating_state = nv_common_set_clockgating_state,
1539 	.set_powergating_state = nv_common_set_powergating_state,
1540 	.get_clockgating_state = nv_common_get_clockgating_state,
1541 };
1542