xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/nv.c (revision 7e60e389)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
35 #include "amdgpu_smu.h"
36 #include "atom.h"
37 #include "amd_pcie.h"
38 
39 #include "gc/gc_10_1_0_offset.h"
40 #include "gc/gc_10_1_0_sh_mask.h"
41 #include "smuio/smuio_11_0_0_offset.h"
42 #include "mp/mp_11_0_offset.h"
43 
44 #include "soc15.h"
45 #include "soc15_common.h"
46 #include "gmc_v10_0.h"
47 #include "gfxhub_v2_0.h"
48 #include "mmhub_v2_0.h"
49 #include "nbio_v2_3.h"
50 #include "nbio_v7_2.h"
51 #include "hdp_v5_0.h"
52 #include "nv.h"
53 #include "navi10_ih.h"
54 #include "gfx_v10_0.h"
55 #include "sdma_v5_0.h"
56 #include "sdma_v5_2.h"
57 #include "vcn_v2_0.h"
58 #include "jpeg_v2_0.h"
59 #include "vcn_v3_0.h"
60 #include "jpeg_v3_0.h"
61 #include "dce_virtual.h"
62 #include "mes_v10_1.h"
63 #include "mxgpu_nv.h"
64 
65 static const struct amd_ip_funcs nv_common_ip_funcs;
66 
67 /*
68  * Indirect registers accessor
69  */
70 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
71 {
72 	unsigned long address, data;
73 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
74 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
75 
76 	return amdgpu_device_indirect_rreg(adev, address, data, reg);
77 }
78 
79 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
80 {
81 	unsigned long address, data;
82 
83 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
84 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
85 
86 	amdgpu_device_indirect_wreg(adev, address, data, reg, v);
87 }
88 
89 static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
90 {
91 	unsigned long address, data;
92 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
93 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
94 
95 	return amdgpu_device_indirect_rreg64(adev, address, data, reg);
96 }
97 
98 static u32 nv_pcie_port_rreg(struct amdgpu_device *adev, u32 reg)
99 {
100 	unsigned long flags, address, data;
101 	u32 r;
102 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
103 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
104 
105 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
106 	WREG32(address, reg * 4);
107 	(void)RREG32(address);
108 	r = RREG32(data);
109 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
110 	return r;
111 }
112 
113 static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
114 {
115 	unsigned long address, data;
116 
117 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
118 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
119 
120 	amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
121 }
122 
123 static void nv_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
124 {
125 	unsigned long flags, address, data;
126 
127 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
128 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
129 
130 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
131 	WREG32(address, reg * 4);
132 	(void)RREG32(address);
133 	WREG32(data, v);
134 	(void)RREG32(data);
135 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
136 }
137 
138 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
139 {
140 	unsigned long flags, address, data;
141 	u32 r;
142 
143 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
144 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
145 
146 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
147 	WREG32(address, (reg));
148 	r = RREG32(data);
149 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
150 	return r;
151 }
152 
153 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
154 {
155 	unsigned long flags, address, data;
156 
157 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
158 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
159 
160 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
161 	WREG32(address, (reg));
162 	WREG32(data, (v));
163 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
164 }
165 
166 static u32 nv_get_config_memsize(struct amdgpu_device *adev)
167 {
168 	return adev->nbio.funcs->get_memsize(adev);
169 }
170 
171 static u32 nv_get_xclk(struct amdgpu_device *adev)
172 {
173 	return adev->clock.spll.reference_freq;
174 }
175 
176 
177 void nv_grbm_select(struct amdgpu_device *adev,
178 		     u32 me, u32 pipe, u32 queue, u32 vmid)
179 {
180 	u32 grbm_gfx_cntl = 0;
181 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
182 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
183 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
184 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
185 
186 	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
187 }
188 
189 static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
190 {
191 	/* todo */
192 }
193 
194 static bool nv_read_disabled_bios(struct amdgpu_device *adev)
195 {
196 	/* todo */
197 	return false;
198 }
199 
200 static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
201 				  u8 *bios, u32 length_bytes)
202 {
203 	u32 *dw_ptr;
204 	u32 i, length_dw;
205 
206 	if (bios == NULL)
207 		return false;
208 	if (length_bytes == 0)
209 		return false;
210 	/* APU vbios image is part of sbios image */
211 	if (adev->flags & AMD_IS_APU)
212 		return false;
213 
214 	dw_ptr = (u32 *)bios;
215 	length_dw = ALIGN(length_bytes, 4) / 4;
216 
217 	/* set rom index to 0 */
218 	WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
219 	/* read out the rom data */
220 	for (i = 0; i < length_dw; i++)
221 		dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
222 
223 	return true;
224 }
225 
226 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
227 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
228 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
229 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
230 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
231 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
232 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
233 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
234 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
235 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
236 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
237 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
238 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
239 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
240 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
241 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
242 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
243 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
244 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
245 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
246 };
247 
248 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
249 					 u32 sh_num, u32 reg_offset)
250 {
251 	uint32_t val;
252 
253 	mutex_lock(&adev->grbm_idx_mutex);
254 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
255 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
256 
257 	val = RREG32(reg_offset);
258 
259 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
260 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
261 	mutex_unlock(&adev->grbm_idx_mutex);
262 	return val;
263 }
264 
265 static uint32_t nv_get_register_value(struct amdgpu_device *adev,
266 				      bool indexed, u32 se_num,
267 				      u32 sh_num, u32 reg_offset)
268 {
269 	if (indexed) {
270 		return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
271 	} else {
272 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
273 			return adev->gfx.config.gb_addr_config;
274 		return RREG32(reg_offset);
275 	}
276 }
277 
278 static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
279 			    u32 sh_num, u32 reg_offset, u32 *value)
280 {
281 	uint32_t i;
282 	struct soc15_allowed_register_entry  *en;
283 
284 	*value = 0;
285 	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
286 		en = &nv_allowed_read_registers[i];
287 		if ((i == 7 && (adev->sdma.num_instances == 1)) || /* some asics don't have SDMA1 */
288 		    reg_offset !=
289 		    (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
290 			continue;
291 
292 		*value = nv_get_register_value(adev,
293 					       nv_allowed_read_registers[i].grbm_indexed,
294 					       se_num, sh_num, reg_offset);
295 		return 0;
296 	}
297 	return -EINVAL;
298 }
299 
300 static int nv_asic_mode1_reset(struct amdgpu_device *adev)
301 {
302 	u32 i;
303 	int ret = 0;
304 
305 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
306 
307 	/* disable BM */
308 	pci_clear_master(adev->pdev);
309 
310 	amdgpu_device_cache_pci_state(adev->pdev);
311 
312 	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
313 		dev_info(adev->dev, "GPU smu mode1 reset\n");
314 		ret = amdgpu_dpm_mode1_reset(adev);
315 	} else {
316 		dev_info(adev->dev, "GPU psp mode1 reset\n");
317 		ret = psp_gpu_reset(adev);
318 	}
319 
320 	if (ret)
321 		dev_err(adev->dev, "GPU mode1 reset failed\n");
322 	amdgpu_device_load_pci_state(adev->pdev);
323 
324 	/* wait for asic to come out of reset */
325 	for (i = 0; i < adev->usec_timeout; i++) {
326 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
327 
328 		if (memsize != 0xffffffff)
329 			break;
330 		udelay(1);
331 	}
332 
333 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
334 
335 	return ret;
336 }
337 
338 static int nv_asic_mode2_reset(struct amdgpu_device *adev)
339 {
340 	u32 i;
341 	int ret = 0;
342 
343 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
344 
345 	/* disable BM */
346 	pci_clear_master(adev->pdev);
347 
348 	amdgpu_device_cache_pci_state(adev->pdev);
349 
350 	ret = amdgpu_dpm_mode2_reset(adev);
351 	if (ret)
352 		dev_err(adev->dev, "GPU mode2 reset failed\n");
353 
354 	amdgpu_device_load_pci_state(adev->pdev);
355 
356 	/* wait for asic to come out of reset */
357 	for (i = 0; i < adev->usec_timeout; i++) {
358 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
359 
360 		if (memsize != 0xffffffff)
361 			break;
362 		udelay(1);
363 	}
364 
365 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
366 
367 	return ret;
368 }
369 
370 static bool nv_asic_supports_baco(struct amdgpu_device *adev)
371 {
372 	struct smu_context *smu = &adev->smu;
373 
374 	if (smu_baco_is_support(smu))
375 		return true;
376 	else
377 		return false;
378 }
379 
380 static enum amd_reset_method
381 nv_asic_reset_method(struct amdgpu_device *adev)
382 {
383 	struct smu_context *smu = &adev->smu;
384 
385 	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
386 	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
387 	    amdgpu_reset_method == AMD_RESET_METHOD_BACO)
388 		return amdgpu_reset_method;
389 
390 	if (amdgpu_reset_method != -1)
391 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
392 				  amdgpu_reset_method);
393 
394 	switch (adev->asic_type) {
395 	case CHIP_VANGOGH:
396 		return AMD_RESET_METHOD_MODE2;
397 	case CHIP_SIENNA_CICHLID:
398 	case CHIP_NAVY_FLOUNDER:
399 	case CHIP_DIMGREY_CAVEFISH:
400 		return AMD_RESET_METHOD_MODE1;
401 	default:
402 		if (smu_baco_is_support(smu))
403 			return AMD_RESET_METHOD_BACO;
404 		else
405 			return AMD_RESET_METHOD_MODE1;
406 	}
407 }
408 
409 static int nv_asic_reset(struct amdgpu_device *adev)
410 {
411 	int ret = 0;
412 	struct smu_context *smu = &adev->smu;
413 
414 	/* skip reset on vangogh for now */
415 	if (adev->asic_type == CHIP_VANGOGH)
416 		return 0;
417 
418 	switch (nv_asic_reset_method(adev)) {
419 	case AMD_RESET_METHOD_BACO:
420 		dev_info(adev->dev, "BACO reset\n");
421 
422 		ret = smu_baco_enter(smu);
423 		if (ret)
424 			return ret;
425 		ret = smu_baco_exit(smu);
426 		if (ret)
427 			return ret;
428 		break;
429 	case AMD_RESET_METHOD_MODE2:
430 		dev_info(adev->dev, "MODE2 reset\n");
431 		ret = nv_asic_mode2_reset(adev);
432 		break;
433 	default:
434 		dev_info(adev->dev, "MODE1 reset\n");
435 		ret = nv_asic_mode1_reset(adev);
436 		break;
437 	}
438 
439 	return ret;
440 }
441 
442 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
443 {
444 	/* todo */
445 	return 0;
446 }
447 
448 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
449 {
450 	/* todo */
451 	return 0;
452 }
453 
454 static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
455 {
456 	if (pci_is_root_bus(adev->pdev->bus))
457 		return;
458 
459 	if (amdgpu_pcie_gen2 == 0)
460 		return;
461 
462 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
463 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
464 		return;
465 
466 	/* todo */
467 }
468 
469 static void nv_program_aspm(struct amdgpu_device *adev)
470 {
471 
472 	if (amdgpu_aspm == 0)
473 		return;
474 
475 	/* todo */
476 }
477 
478 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
479 					bool enable)
480 {
481 	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
482 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
483 }
484 
485 static const struct amdgpu_ip_block_version nv_common_ip_block =
486 {
487 	.type = AMD_IP_BLOCK_TYPE_COMMON,
488 	.major = 1,
489 	.minor = 0,
490 	.rev = 0,
491 	.funcs = &nv_common_ip_funcs,
492 };
493 
494 static int nv_reg_base_init(struct amdgpu_device *adev)
495 {
496 	int r;
497 
498 	if (amdgpu_discovery) {
499 		r = amdgpu_discovery_reg_base_init(adev);
500 		if (r) {
501 			DRM_WARN("failed to init reg base from ip discovery table, "
502 					"fallback to legacy init method\n");
503 			goto legacy_init;
504 		}
505 
506 		return 0;
507 	}
508 
509 legacy_init:
510 	switch (adev->asic_type) {
511 	case CHIP_NAVI10:
512 		navi10_reg_base_init(adev);
513 		break;
514 	case CHIP_NAVI14:
515 		navi14_reg_base_init(adev);
516 		break;
517 	case CHIP_NAVI12:
518 		navi12_reg_base_init(adev);
519 		break;
520 	case CHIP_SIENNA_CICHLID:
521 	case CHIP_NAVY_FLOUNDER:
522 		sienna_cichlid_reg_base_init(adev);
523 		break;
524 	case CHIP_VANGOGH:
525 		vangogh_reg_base_init(adev);
526 		break;
527 	case CHIP_DIMGREY_CAVEFISH:
528 		dimgrey_cavefish_reg_base_init(adev);
529 		break;
530 	default:
531 		return -EINVAL;
532 	}
533 
534 	return 0;
535 }
536 
537 void nv_set_virt_ops(struct amdgpu_device *adev)
538 {
539 	adev->virt.ops = &xgpu_nv_virt_ops;
540 }
541 
542 static bool nv_is_headless_sku(struct pci_dev *pdev)
543 {
544 	if ((pdev->device == 0x731E &&
545 	    (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
546 	    (pdev->device == 0x7340 && pdev->revision == 0xC9))
547 		return true;
548 	return false;
549 }
550 
551 int nv_set_ip_blocks(struct amdgpu_device *adev)
552 {
553 	int r;
554 
555 	if (adev->flags & AMD_IS_APU) {
556 		adev->nbio.funcs = &nbio_v7_2_funcs;
557 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
558 	} else {
559 		adev->nbio.funcs = &nbio_v2_3_funcs;
560 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
561 	}
562 	adev->hdp.funcs = &hdp_v5_0_funcs;
563 
564 	if (adev->asic_type == CHIP_SIENNA_CICHLID)
565 		adev->gmc.xgmi.supported = true;
566 
567 	/* Set IP register base before any HW register access */
568 	r = nv_reg_base_init(adev);
569 	if (r)
570 		return r;
571 
572 	switch (adev->asic_type) {
573 	case CHIP_NAVI10:
574 	case CHIP_NAVI14:
575 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
576 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
577 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
578 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
579 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
580 		    !amdgpu_sriov_vf(adev))
581 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
582 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
583 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
584 #if defined(CONFIG_DRM_AMD_DC)
585 		else if (amdgpu_device_has_dc_support(adev))
586 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
587 #endif
588 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
589 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
590 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
591 		    !amdgpu_sriov_vf(adev))
592 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
593 		if (!nv_is_headless_sku(adev->pdev))
594 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
595 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
596 		if (adev->enable_mes)
597 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
598 		break;
599 	case CHIP_NAVI12:
600 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
601 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
602 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
603 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
604 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
605 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
606 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
607 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
608 #if defined(CONFIG_DRM_AMD_DC)
609 		else if (amdgpu_device_has_dc_support(adev))
610 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
611 #endif
612 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
613 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
614 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
615 		    !amdgpu_sriov_vf(adev))
616 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
617 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
618 		if (!amdgpu_sriov_vf(adev))
619 			amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
620 		break;
621 	case CHIP_SIENNA_CICHLID:
622 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
623 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
624 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
625 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
626 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
627 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
628 		    is_support_sw_smu(adev))
629 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
630 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
631 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
632 #if defined(CONFIG_DRM_AMD_DC)
633 		else if (amdgpu_device_has_dc_support(adev))
634 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
635 #endif
636 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
637 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
638 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
639 		if (!amdgpu_sriov_vf(adev))
640 			amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
641 
642 		if (adev->enable_mes)
643 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
644 		break;
645 	case CHIP_NAVY_FLOUNDER:
646 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
647 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
648 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
649 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
650 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
651 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
652 		    is_support_sw_smu(adev))
653 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
654 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
655 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
656 #if defined(CONFIG_DRM_AMD_DC)
657 		else if (amdgpu_device_has_dc_support(adev))
658 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
659 #endif
660 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
661 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
662 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
663 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
664 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
665 		    is_support_sw_smu(adev))
666 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
667 		break;
668 	case CHIP_VANGOGH:
669 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
670 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
671 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
672 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
673 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
674 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
675 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
676 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
677 #if defined(CONFIG_DRM_AMD_DC)
678 		else if (amdgpu_device_has_dc_support(adev))
679 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
680 #endif
681 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
682 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
683 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
684 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
685 		break;
686 	case CHIP_DIMGREY_CAVEFISH:
687 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
688 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
689 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
690 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
691 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
692 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
693 		    is_support_sw_smu(adev))
694 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
695 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
696 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
697 #if defined(CONFIG_DRM_AMD_DC)
698                 else if (amdgpu_device_has_dc_support(adev))
699                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
700 #endif
701 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
702 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
703 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
704 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
705 		break;
706 	default:
707 		return -EINVAL;
708 	}
709 
710 	return 0;
711 }
712 
713 static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
714 {
715 	return adev->nbio.funcs->get_rev_id(adev);
716 }
717 
718 static bool nv_need_full_reset(struct amdgpu_device *adev)
719 {
720 	return true;
721 }
722 
723 static bool nv_need_reset_on_init(struct amdgpu_device *adev)
724 {
725 	u32 sol_reg;
726 
727 	if (adev->flags & AMD_IS_APU)
728 		return false;
729 
730 	/* Check sOS sign of life register to confirm sys driver and sOS
731 	 * are already been loaded.
732 	 */
733 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
734 	if (sol_reg)
735 		return true;
736 
737 	return false;
738 }
739 
740 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
741 {
742 
743 	/* TODO
744 	 * dummy implement for pcie_replay_count sysfs interface
745 	 * */
746 
747 	return 0;
748 }
749 
750 static void nv_init_doorbell_index(struct amdgpu_device *adev)
751 {
752 	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
753 	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
754 	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
755 	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
756 	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
757 	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
758 	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
759 	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
760 	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
761 	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
762 	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
763 	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
764 	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
765 	adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING;
766 	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
767 	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
768 	adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
769 	adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
770 	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
771 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
772 	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
773 	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
774 	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
775 	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
776 	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
777 
778 	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
779 	adev->doorbell_index.sdma_doorbell_range = 20;
780 }
781 
782 static void nv_pre_asic_init(struct amdgpu_device *adev)
783 {
784 }
785 
786 static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
787 				       bool enter)
788 {
789 	if (enter)
790 		amdgpu_gfx_rlc_enter_safe_mode(adev);
791 	else
792 		amdgpu_gfx_rlc_exit_safe_mode(adev);
793 
794 	if (adev->gfx.funcs->update_perfmon_mgcg)
795 		adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
796 
797 	/*
798 	 * The ASPM function is not fully enabled and verified on
799 	 * Navi yet. Temporarily skip this until ASPM enabled.
800 	 */
801 #if 0
802 	if (adev->nbio.funcs->enable_aspm)
803 		adev->nbio.funcs->enable_aspm(adev, !enter);
804 #endif
805 
806 	return 0;
807 }
808 
809 static const struct amdgpu_asic_funcs nv_asic_funcs =
810 {
811 	.read_disabled_bios = &nv_read_disabled_bios,
812 	.read_bios_from_rom = &nv_read_bios_from_rom,
813 	.read_register = &nv_read_register,
814 	.reset = &nv_asic_reset,
815 	.reset_method = &nv_asic_reset_method,
816 	.set_vga_state = &nv_vga_set_state,
817 	.get_xclk = &nv_get_xclk,
818 	.set_uvd_clocks = &nv_set_uvd_clocks,
819 	.set_vce_clocks = &nv_set_vce_clocks,
820 	.get_config_memsize = &nv_get_config_memsize,
821 	.init_doorbell_index = &nv_init_doorbell_index,
822 	.need_full_reset = &nv_need_full_reset,
823 	.need_reset_on_init = &nv_need_reset_on_init,
824 	.get_pcie_replay_count = &nv_get_pcie_replay_count,
825 	.supports_baco = &nv_asic_supports_baco,
826 	.pre_asic_init = &nv_pre_asic_init,
827 	.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
828 };
829 
830 static int nv_common_early_init(void *handle)
831 {
832 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
833 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
834 
835 	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
836 	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
837 	adev->smc_rreg = NULL;
838 	adev->smc_wreg = NULL;
839 	adev->pcie_rreg = &nv_pcie_rreg;
840 	adev->pcie_wreg = &nv_pcie_wreg;
841 	adev->pcie_rreg64 = &nv_pcie_rreg64;
842 	adev->pcie_wreg64 = &nv_pcie_wreg64;
843 	adev->pciep_rreg = &nv_pcie_port_rreg;
844 	adev->pciep_wreg = &nv_pcie_port_wreg;
845 
846 	/* TODO: will add them during VCN v2 implementation */
847 	adev->uvd_ctx_rreg = NULL;
848 	adev->uvd_ctx_wreg = NULL;
849 
850 	adev->didt_rreg = &nv_didt_rreg;
851 	adev->didt_wreg = &nv_didt_wreg;
852 
853 	adev->asic_funcs = &nv_asic_funcs;
854 
855 	adev->rev_id = nv_get_rev_id(adev);
856 	adev->external_rev_id = 0xff;
857 	switch (adev->asic_type) {
858 	case CHIP_NAVI10:
859 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
860 			AMD_CG_SUPPORT_GFX_CGCG |
861 			AMD_CG_SUPPORT_IH_CG |
862 			AMD_CG_SUPPORT_HDP_MGCG |
863 			AMD_CG_SUPPORT_HDP_LS |
864 			AMD_CG_SUPPORT_SDMA_MGCG |
865 			AMD_CG_SUPPORT_SDMA_LS |
866 			AMD_CG_SUPPORT_MC_MGCG |
867 			AMD_CG_SUPPORT_MC_LS |
868 			AMD_CG_SUPPORT_ATHUB_MGCG |
869 			AMD_CG_SUPPORT_ATHUB_LS |
870 			AMD_CG_SUPPORT_VCN_MGCG |
871 			AMD_CG_SUPPORT_JPEG_MGCG |
872 			AMD_CG_SUPPORT_BIF_MGCG |
873 			AMD_CG_SUPPORT_BIF_LS;
874 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
875 			AMD_PG_SUPPORT_VCN_DPG |
876 			AMD_PG_SUPPORT_JPEG |
877 			AMD_PG_SUPPORT_ATHUB;
878 		adev->external_rev_id = adev->rev_id + 0x1;
879 		break;
880 	case CHIP_NAVI14:
881 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
882 			AMD_CG_SUPPORT_GFX_CGCG |
883 			AMD_CG_SUPPORT_IH_CG |
884 			AMD_CG_SUPPORT_HDP_MGCG |
885 			AMD_CG_SUPPORT_HDP_LS |
886 			AMD_CG_SUPPORT_SDMA_MGCG |
887 			AMD_CG_SUPPORT_SDMA_LS |
888 			AMD_CG_SUPPORT_MC_MGCG |
889 			AMD_CG_SUPPORT_MC_LS |
890 			AMD_CG_SUPPORT_ATHUB_MGCG |
891 			AMD_CG_SUPPORT_ATHUB_LS |
892 			AMD_CG_SUPPORT_VCN_MGCG |
893 			AMD_CG_SUPPORT_JPEG_MGCG |
894 			AMD_CG_SUPPORT_BIF_MGCG |
895 			AMD_CG_SUPPORT_BIF_LS;
896 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
897 			AMD_PG_SUPPORT_JPEG |
898 			AMD_PG_SUPPORT_VCN_DPG;
899 		adev->external_rev_id = adev->rev_id + 20;
900 		break;
901 	case CHIP_NAVI12:
902 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
903 			AMD_CG_SUPPORT_GFX_MGLS |
904 			AMD_CG_SUPPORT_GFX_CGCG |
905 			AMD_CG_SUPPORT_GFX_CP_LS |
906 			AMD_CG_SUPPORT_GFX_RLC_LS |
907 			AMD_CG_SUPPORT_IH_CG |
908 			AMD_CG_SUPPORT_HDP_MGCG |
909 			AMD_CG_SUPPORT_HDP_LS |
910 			AMD_CG_SUPPORT_SDMA_MGCG |
911 			AMD_CG_SUPPORT_SDMA_LS |
912 			AMD_CG_SUPPORT_MC_MGCG |
913 			AMD_CG_SUPPORT_MC_LS |
914 			AMD_CG_SUPPORT_ATHUB_MGCG |
915 			AMD_CG_SUPPORT_ATHUB_LS |
916 			AMD_CG_SUPPORT_VCN_MGCG |
917 			AMD_CG_SUPPORT_JPEG_MGCG;
918 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
919 			AMD_PG_SUPPORT_VCN_DPG |
920 			AMD_PG_SUPPORT_JPEG |
921 			AMD_PG_SUPPORT_ATHUB;
922 		/* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
923 		 * as a consequence, the rev_id and external_rev_id are wrong.
924 		 * workaround it by hardcoding rev_id to 0 (default value).
925 		 */
926 		if (amdgpu_sriov_vf(adev))
927 			adev->rev_id = 0;
928 		adev->external_rev_id = adev->rev_id + 0xa;
929 		break;
930 	case CHIP_SIENNA_CICHLID:
931 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
932 			AMD_CG_SUPPORT_GFX_CGCG |
933 			AMD_CG_SUPPORT_GFX_3D_CGCG |
934 			AMD_CG_SUPPORT_MC_MGCG |
935 			AMD_CG_SUPPORT_VCN_MGCG |
936 			AMD_CG_SUPPORT_JPEG_MGCG |
937 			AMD_CG_SUPPORT_HDP_MGCG |
938 			AMD_CG_SUPPORT_HDP_LS |
939 			AMD_CG_SUPPORT_IH_CG |
940 			AMD_CG_SUPPORT_MC_LS;
941 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
942 			AMD_PG_SUPPORT_VCN_DPG |
943 			AMD_PG_SUPPORT_JPEG |
944 			AMD_PG_SUPPORT_ATHUB |
945 			AMD_PG_SUPPORT_MMHUB;
946 		if (amdgpu_sriov_vf(adev)) {
947 			/* hypervisor control CG and PG enablement */
948 			adev->cg_flags = 0;
949 			adev->pg_flags = 0;
950 		}
951 		adev->external_rev_id = adev->rev_id + 0x28;
952 		break;
953 	case CHIP_NAVY_FLOUNDER:
954 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
955 			AMD_CG_SUPPORT_GFX_CGCG |
956 			AMD_CG_SUPPORT_GFX_3D_CGCG |
957 			AMD_CG_SUPPORT_VCN_MGCG |
958 			AMD_CG_SUPPORT_JPEG_MGCG |
959 			AMD_CG_SUPPORT_MC_MGCG |
960 			AMD_CG_SUPPORT_MC_LS |
961 			AMD_CG_SUPPORT_HDP_MGCG |
962 			AMD_CG_SUPPORT_HDP_LS |
963 			AMD_CG_SUPPORT_IH_CG;
964 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
965 			AMD_PG_SUPPORT_VCN_DPG |
966 			AMD_PG_SUPPORT_JPEG |
967 			AMD_PG_SUPPORT_ATHUB |
968 			AMD_PG_SUPPORT_MMHUB;
969 		adev->external_rev_id = adev->rev_id + 0x32;
970 		break;
971 
972 	case CHIP_VANGOGH:
973 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
974 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
975 			AMD_CG_SUPPORT_GFX_MGLS |
976 			AMD_CG_SUPPORT_GFX_CP_LS |
977 			AMD_CG_SUPPORT_GFX_RLC_LS |
978 			AMD_CG_SUPPORT_GFX_CGCG |
979 			AMD_CG_SUPPORT_GFX_CGLS |
980 			AMD_CG_SUPPORT_GFX_3D_CGCG |
981 			AMD_CG_SUPPORT_GFX_3D_CGLS |
982 			AMD_CG_SUPPORT_MC_MGCG |
983 			AMD_CG_SUPPORT_MC_LS |
984 			AMD_CG_SUPPORT_GFX_FGCG |
985 			AMD_CG_SUPPORT_VCN_MGCG |
986 			AMD_CG_SUPPORT_JPEG_MGCG;
987 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
988 			AMD_PG_SUPPORT_VCN |
989 			AMD_PG_SUPPORT_VCN_DPG |
990 			AMD_PG_SUPPORT_JPEG;
991 		if (adev->apu_flags & AMD_APU_IS_VANGOGH)
992 			adev->external_rev_id = adev->rev_id + 0x01;
993 		break;
994 	case CHIP_DIMGREY_CAVEFISH:
995 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
996 			AMD_CG_SUPPORT_GFX_CGCG |
997 			AMD_CG_SUPPORT_GFX_3D_CGCG |
998 			AMD_CG_SUPPORT_VCN_MGCG |
999 			AMD_CG_SUPPORT_JPEG_MGCG |
1000 			AMD_CG_SUPPORT_MC_MGCG |
1001 			AMD_CG_SUPPORT_MC_LS |
1002 			AMD_CG_SUPPORT_HDP_MGCG |
1003 			AMD_CG_SUPPORT_HDP_LS |
1004 			AMD_CG_SUPPORT_IH_CG;
1005 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1006 			AMD_PG_SUPPORT_VCN_DPG |
1007 			AMD_PG_SUPPORT_JPEG |
1008 			AMD_PG_SUPPORT_ATHUB |
1009 			AMD_PG_SUPPORT_MMHUB;
1010 		adev->external_rev_id = adev->rev_id + 0x3c;
1011 		break;
1012 	default:
1013 		/* FIXME: not supported yet */
1014 		return -EINVAL;
1015 	}
1016 
1017 	if (amdgpu_sriov_vf(adev)) {
1018 		amdgpu_virt_init_setting(adev);
1019 		xgpu_nv_mailbox_set_irq_funcs(adev);
1020 	}
1021 
1022 	return 0;
1023 }
1024 
1025 static int nv_common_late_init(void *handle)
1026 {
1027 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1028 
1029 	if (amdgpu_sriov_vf(adev))
1030 		xgpu_nv_mailbox_get_irq(adev);
1031 
1032 	return 0;
1033 }
1034 
1035 static int nv_common_sw_init(void *handle)
1036 {
1037 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1038 
1039 	if (amdgpu_sriov_vf(adev))
1040 		xgpu_nv_mailbox_add_irq_id(adev);
1041 
1042 	return 0;
1043 }
1044 
1045 static int nv_common_sw_fini(void *handle)
1046 {
1047 	return 0;
1048 }
1049 
1050 static int nv_common_hw_init(void *handle)
1051 {
1052 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1053 
1054 	/* enable pcie gen2/3 link */
1055 	nv_pcie_gen3_enable(adev);
1056 	/* enable aspm */
1057 	nv_program_aspm(adev);
1058 	/* setup nbio registers */
1059 	adev->nbio.funcs->init_registers(adev);
1060 	/* remap HDP registers to a hole in mmio space,
1061 	 * for the purpose of expose those registers
1062 	 * to process space
1063 	 */
1064 	if (adev->nbio.funcs->remap_hdp_registers)
1065 		adev->nbio.funcs->remap_hdp_registers(adev);
1066 	/* enable the doorbell aperture */
1067 	nv_enable_doorbell_aperture(adev, true);
1068 
1069 	return 0;
1070 }
1071 
1072 static int nv_common_hw_fini(void *handle)
1073 {
1074 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1075 
1076 	/* disable the doorbell aperture */
1077 	nv_enable_doorbell_aperture(adev, false);
1078 
1079 	return 0;
1080 }
1081 
1082 static int nv_common_suspend(void *handle)
1083 {
1084 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1085 
1086 	return nv_common_hw_fini(adev);
1087 }
1088 
1089 static int nv_common_resume(void *handle)
1090 {
1091 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1092 
1093 	return nv_common_hw_init(adev);
1094 }
1095 
1096 static bool nv_common_is_idle(void *handle)
1097 {
1098 	return true;
1099 }
1100 
1101 static int nv_common_wait_for_idle(void *handle)
1102 {
1103 	return 0;
1104 }
1105 
1106 static int nv_common_soft_reset(void *handle)
1107 {
1108 	return 0;
1109 }
1110 
1111 static int nv_common_set_clockgating_state(void *handle,
1112 					   enum amd_clockgating_state state)
1113 {
1114 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1115 
1116 	if (amdgpu_sriov_vf(adev))
1117 		return 0;
1118 
1119 	switch (adev->asic_type) {
1120 	case CHIP_NAVI10:
1121 	case CHIP_NAVI14:
1122 	case CHIP_NAVI12:
1123 	case CHIP_SIENNA_CICHLID:
1124 	case CHIP_NAVY_FLOUNDER:
1125 	case CHIP_DIMGREY_CAVEFISH:
1126 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1127 				state == AMD_CG_STATE_GATE);
1128 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1129 				state == AMD_CG_STATE_GATE);
1130 		adev->hdp.funcs->update_clock_gating(adev,
1131 				state == AMD_CG_STATE_GATE);
1132 		break;
1133 	default:
1134 		break;
1135 	}
1136 	return 0;
1137 }
1138 
1139 static int nv_common_set_powergating_state(void *handle,
1140 					   enum amd_powergating_state state)
1141 {
1142 	/* TODO */
1143 	return 0;
1144 }
1145 
1146 static void nv_common_get_clockgating_state(void *handle, u32 *flags)
1147 {
1148 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1149 
1150 	if (amdgpu_sriov_vf(adev))
1151 		*flags = 0;
1152 
1153 	adev->nbio.funcs->get_clockgating_state(adev, flags);
1154 
1155 	adev->hdp.funcs->get_clock_gating_state(adev, flags);
1156 
1157 	return;
1158 }
1159 
1160 static const struct amd_ip_funcs nv_common_ip_funcs = {
1161 	.name = "nv_common",
1162 	.early_init = nv_common_early_init,
1163 	.late_init = nv_common_late_init,
1164 	.sw_init = nv_common_sw_init,
1165 	.sw_fini = nv_common_sw_fini,
1166 	.hw_init = nv_common_hw_init,
1167 	.hw_fini = nv_common_hw_fini,
1168 	.suspend = nv_common_suspend,
1169 	.resume = nv_common_resume,
1170 	.is_idle = nv_common_is_idle,
1171 	.wait_for_idle = nv_common_wait_for_idle,
1172 	.soft_reset = nv_common_soft_reset,
1173 	.set_clockgating_state = nv_common_set_clockgating_state,
1174 	.set_powergating_state = nv_common_set_powergating_state,
1175 	.get_clockgating_state = nv_common_get_clockgating_state,
1176 };
1177