xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/nv.c (revision 6f4eaea2)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
35 #include "amdgpu_smu.h"
36 #include "atom.h"
37 #include "amd_pcie.h"
38 
39 #include "gc/gc_10_1_0_offset.h"
40 #include "gc/gc_10_1_0_sh_mask.h"
41 #include "mp/mp_11_0_offset.h"
42 
43 #include "soc15.h"
44 #include "soc15_common.h"
45 #include "gmc_v10_0.h"
46 #include "gfxhub_v2_0.h"
47 #include "mmhub_v2_0.h"
48 #include "nbio_v2_3.h"
49 #include "nbio_v7_2.h"
50 #include "hdp_v5_0.h"
51 #include "nv.h"
52 #include "navi10_ih.h"
53 #include "gfx_v10_0.h"
54 #include "sdma_v5_0.h"
55 #include "sdma_v5_2.h"
56 #include "vcn_v2_0.h"
57 #include "jpeg_v2_0.h"
58 #include "vcn_v3_0.h"
59 #include "jpeg_v3_0.h"
60 #include "dce_virtual.h"
61 #include "mes_v10_1.h"
62 #include "mxgpu_nv.h"
63 #include "smuio_v11_0.h"
64 #include "smuio_v11_0_6.h"
65 
66 static const struct amd_ip_funcs nv_common_ip_funcs;
67 
68 /*
69  * Indirect registers accessor
70  */
71 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
72 {
73 	unsigned long address, data;
74 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
75 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
76 
77 	return amdgpu_device_indirect_rreg(adev, address, data, reg);
78 }
79 
80 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
81 {
82 	unsigned long address, data;
83 
84 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
85 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
86 
87 	amdgpu_device_indirect_wreg(adev, address, data, reg, v);
88 }
89 
90 static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
91 {
92 	unsigned long address, data;
93 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
94 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
95 
96 	return amdgpu_device_indirect_rreg64(adev, address, data, reg);
97 }
98 
99 static u32 nv_pcie_port_rreg(struct amdgpu_device *adev, u32 reg)
100 {
101 	unsigned long flags, address, data;
102 	u32 r;
103 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
104 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
105 
106 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
107 	WREG32(address, reg * 4);
108 	(void)RREG32(address);
109 	r = RREG32(data);
110 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
111 	return r;
112 }
113 
114 static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
115 {
116 	unsigned long address, data;
117 
118 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
119 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
120 
121 	amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
122 }
123 
124 static void nv_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
125 {
126 	unsigned long flags, address, data;
127 
128 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
129 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
130 
131 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
132 	WREG32(address, reg * 4);
133 	(void)RREG32(address);
134 	WREG32(data, v);
135 	(void)RREG32(data);
136 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
137 }
138 
139 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
140 {
141 	unsigned long flags, address, data;
142 	u32 r;
143 
144 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
145 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
146 
147 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
148 	WREG32(address, (reg));
149 	r = RREG32(data);
150 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
151 	return r;
152 }
153 
154 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
155 {
156 	unsigned long flags, address, data;
157 
158 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
159 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
160 
161 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
162 	WREG32(address, (reg));
163 	WREG32(data, (v));
164 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
165 }
166 
167 static u32 nv_get_config_memsize(struct amdgpu_device *adev)
168 {
169 	return adev->nbio.funcs->get_memsize(adev);
170 }
171 
172 static u32 nv_get_xclk(struct amdgpu_device *adev)
173 {
174 	return adev->clock.spll.reference_freq;
175 }
176 
177 
178 void nv_grbm_select(struct amdgpu_device *adev,
179 		     u32 me, u32 pipe, u32 queue, u32 vmid)
180 {
181 	u32 grbm_gfx_cntl = 0;
182 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
183 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
184 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
185 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
186 
187 	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
188 }
189 
190 static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
191 {
192 	/* todo */
193 }
194 
195 static bool nv_read_disabled_bios(struct amdgpu_device *adev)
196 {
197 	/* todo */
198 	return false;
199 }
200 
201 static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
202 				  u8 *bios, u32 length_bytes)
203 {
204 	u32 *dw_ptr;
205 	u32 i, length_dw;
206 	u32 rom_index_offset, rom_data_offset;
207 
208 	if (bios == NULL)
209 		return false;
210 	if (length_bytes == 0)
211 		return false;
212 	/* APU vbios image is part of sbios image */
213 	if (adev->flags & AMD_IS_APU)
214 		return false;
215 
216 	dw_ptr = (u32 *)bios;
217 	length_dw = ALIGN(length_bytes, 4) / 4;
218 
219 	rom_index_offset =
220 		adev->smuio.funcs->get_rom_index_offset(adev);
221 	rom_data_offset =
222 		adev->smuio.funcs->get_rom_data_offset(adev);
223 
224 	/* set rom index to 0 */
225 	WREG32(rom_index_offset, 0);
226 	/* read out the rom data */
227 	for (i = 0; i < length_dw; i++)
228 		dw_ptr[i] = RREG32(rom_data_offset);
229 
230 	return true;
231 }
232 
233 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
234 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
235 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
236 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
237 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
238 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
239 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
240 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
241 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
242 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
243 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
244 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
245 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
246 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
247 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
248 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
249 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
250 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
251 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
252 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
253 };
254 
255 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
256 					 u32 sh_num, u32 reg_offset)
257 {
258 	uint32_t val;
259 
260 	mutex_lock(&adev->grbm_idx_mutex);
261 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
262 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
263 
264 	val = RREG32(reg_offset);
265 
266 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
267 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
268 	mutex_unlock(&adev->grbm_idx_mutex);
269 	return val;
270 }
271 
272 static uint32_t nv_get_register_value(struct amdgpu_device *adev,
273 				      bool indexed, u32 se_num,
274 				      u32 sh_num, u32 reg_offset)
275 {
276 	if (indexed) {
277 		return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
278 	} else {
279 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
280 			return adev->gfx.config.gb_addr_config;
281 		return RREG32(reg_offset);
282 	}
283 }
284 
285 static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
286 			    u32 sh_num, u32 reg_offset, u32 *value)
287 {
288 	uint32_t i;
289 	struct soc15_allowed_register_entry  *en;
290 
291 	*value = 0;
292 	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
293 		en = &nv_allowed_read_registers[i];
294 		if ((i == 7 && (adev->sdma.num_instances == 1)) || /* some asics don't have SDMA1 */
295 		    reg_offset !=
296 		    (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
297 			continue;
298 
299 		*value = nv_get_register_value(adev,
300 					       nv_allowed_read_registers[i].grbm_indexed,
301 					       se_num, sh_num, reg_offset);
302 		return 0;
303 	}
304 	return -EINVAL;
305 }
306 
307 static int nv_asic_mode1_reset(struct amdgpu_device *adev)
308 {
309 	u32 i;
310 	int ret = 0;
311 
312 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
313 
314 	/* disable BM */
315 	pci_clear_master(adev->pdev);
316 
317 	amdgpu_device_cache_pci_state(adev->pdev);
318 
319 	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
320 		dev_info(adev->dev, "GPU smu mode1 reset\n");
321 		ret = amdgpu_dpm_mode1_reset(adev);
322 	} else {
323 		dev_info(adev->dev, "GPU psp mode1 reset\n");
324 		ret = psp_gpu_reset(adev);
325 	}
326 
327 	if (ret)
328 		dev_err(adev->dev, "GPU mode1 reset failed\n");
329 	amdgpu_device_load_pci_state(adev->pdev);
330 
331 	/* wait for asic to come out of reset */
332 	for (i = 0; i < adev->usec_timeout; i++) {
333 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
334 
335 		if (memsize != 0xffffffff)
336 			break;
337 		udelay(1);
338 	}
339 
340 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
341 
342 	return ret;
343 }
344 
345 static int nv_asic_mode2_reset(struct amdgpu_device *adev)
346 {
347 	u32 i;
348 	int ret = 0;
349 
350 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
351 
352 	/* disable BM */
353 	pci_clear_master(adev->pdev);
354 
355 	amdgpu_device_cache_pci_state(adev->pdev);
356 
357 	ret = amdgpu_dpm_mode2_reset(adev);
358 	if (ret)
359 		dev_err(adev->dev, "GPU mode2 reset failed\n");
360 
361 	amdgpu_device_load_pci_state(adev->pdev);
362 
363 	/* wait for asic to come out of reset */
364 	for (i = 0; i < adev->usec_timeout; i++) {
365 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
366 
367 		if (memsize != 0xffffffff)
368 			break;
369 		udelay(1);
370 	}
371 
372 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
373 
374 	return ret;
375 }
376 
377 static bool nv_asic_supports_baco(struct amdgpu_device *adev)
378 {
379 	struct smu_context *smu = &adev->smu;
380 
381 	if (smu_baco_is_support(smu))
382 		return true;
383 	else
384 		return false;
385 }
386 
387 static enum amd_reset_method
388 nv_asic_reset_method(struct amdgpu_device *adev)
389 {
390 	struct smu_context *smu = &adev->smu;
391 
392 	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
393 	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
394 	    amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
395 	    amdgpu_reset_method == AMD_RESET_METHOD_PCI)
396 		return amdgpu_reset_method;
397 
398 	if (amdgpu_reset_method != -1)
399 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
400 				  amdgpu_reset_method);
401 
402 	switch (adev->asic_type) {
403 	case CHIP_VANGOGH:
404 		return AMD_RESET_METHOD_MODE2;
405 	case CHIP_SIENNA_CICHLID:
406 	case CHIP_NAVY_FLOUNDER:
407 	case CHIP_DIMGREY_CAVEFISH:
408 		return AMD_RESET_METHOD_MODE1;
409 	default:
410 		if (smu_baco_is_support(smu))
411 			return AMD_RESET_METHOD_BACO;
412 		else
413 			return AMD_RESET_METHOD_MODE1;
414 	}
415 }
416 
417 static int nv_asic_reset(struct amdgpu_device *adev)
418 {
419 	int ret = 0;
420 	struct smu_context *smu = &adev->smu;
421 
422 	/* skip reset on vangogh for now */
423 	if (adev->asic_type == CHIP_VANGOGH)
424 		return 0;
425 
426 	switch (nv_asic_reset_method(adev)) {
427 	case AMD_RESET_METHOD_PCI:
428 		dev_info(adev->dev, "PCI reset\n");
429 		ret = amdgpu_device_pci_reset(adev);
430 		break;
431 	case AMD_RESET_METHOD_BACO:
432 		dev_info(adev->dev, "BACO reset\n");
433 
434 		ret = smu_baco_enter(smu);
435 		if (ret)
436 			return ret;
437 		ret = smu_baco_exit(smu);
438 		if (ret)
439 			return ret;
440 		break;
441 	case AMD_RESET_METHOD_MODE2:
442 		dev_info(adev->dev, "MODE2 reset\n");
443 		ret = nv_asic_mode2_reset(adev);
444 		break;
445 	default:
446 		dev_info(adev->dev, "MODE1 reset\n");
447 		ret = nv_asic_mode1_reset(adev);
448 		break;
449 	}
450 
451 	return ret;
452 }
453 
454 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
455 {
456 	/* todo */
457 	return 0;
458 }
459 
460 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
461 {
462 	/* todo */
463 	return 0;
464 }
465 
466 static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
467 {
468 	if (pci_is_root_bus(adev->pdev->bus))
469 		return;
470 
471 	if (amdgpu_pcie_gen2 == 0)
472 		return;
473 
474 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
475 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
476 		return;
477 
478 	/* todo */
479 }
480 
481 static void nv_program_aspm(struct amdgpu_device *adev)
482 {
483 	if (amdgpu_aspm != 1)
484 		return;
485 
486 	if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
487 	    !(adev->flags & AMD_IS_APU) &&
488 	    (adev->nbio.funcs->program_aspm))
489 		adev->nbio.funcs->program_aspm(adev);
490 
491 }
492 
493 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
494 					bool enable)
495 {
496 	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
497 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
498 }
499 
500 static const struct amdgpu_ip_block_version nv_common_ip_block =
501 {
502 	.type = AMD_IP_BLOCK_TYPE_COMMON,
503 	.major = 1,
504 	.minor = 0,
505 	.rev = 0,
506 	.funcs = &nv_common_ip_funcs,
507 };
508 
509 static int nv_reg_base_init(struct amdgpu_device *adev)
510 {
511 	int r;
512 
513 	if (amdgpu_discovery) {
514 		r = amdgpu_discovery_reg_base_init(adev);
515 		if (r) {
516 			DRM_WARN("failed to init reg base from ip discovery table, "
517 					"fallback to legacy init method\n");
518 			goto legacy_init;
519 		}
520 
521 		return 0;
522 	}
523 
524 legacy_init:
525 	switch (adev->asic_type) {
526 	case CHIP_NAVI10:
527 		navi10_reg_base_init(adev);
528 		break;
529 	case CHIP_NAVI14:
530 		navi14_reg_base_init(adev);
531 		break;
532 	case CHIP_NAVI12:
533 		navi12_reg_base_init(adev);
534 		break;
535 	case CHIP_SIENNA_CICHLID:
536 	case CHIP_NAVY_FLOUNDER:
537 		sienna_cichlid_reg_base_init(adev);
538 		break;
539 	case CHIP_VANGOGH:
540 		vangogh_reg_base_init(adev);
541 		break;
542 	case CHIP_DIMGREY_CAVEFISH:
543 		dimgrey_cavefish_reg_base_init(adev);
544 		break;
545 	default:
546 		return -EINVAL;
547 	}
548 
549 	return 0;
550 }
551 
552 void nv_set_virt_ops(struct amdgpu_device *adev)
553 {
554 	adev->virt.ops = &xgpu_nv_virt_ops;
555 }
556 
557 static bool nv_is_headless_sku(struct pci_dev *pdev)
558 {
559 	if ((pdev->device == 0x731E &&
560 	    (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
561 	    (pdev->device == 0x7340 && pdev->revision == 0xC9)  ||
562 	    (pdev->device == 0x7360 && pdev->revision == 0xC7))
563 		return true;
564 	return false;
565 }
566 
567 int nv_set_ip_blocks(struct amdgpu_device *adev)
568 {
569 	int r;
570 
571 	if (adev->flags & AMD_IS_APU) {
572 		adev->nbio.funcs = &nbio_v7_2_funcs;
573 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
574 	} else {
575 		adev->nbio.funcs = &nbio_v2_3_funcs;
576 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
577 	}
578 	adev->hdp.funcs = &hdp_v5_0_funcs;
579 
580 	if (adev->asic_type >= CHIP_SIENNA_CICHLID)
581 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
582 	else
583 		adev->smuio.funcs = &smuio_v11_0_funcs;
584 
585 	if (adev->asic_type == CHIP_SIENNA_CICHLID)
586 		adev->gmc.xgmi.supported = true;
587 
588 	/* Set IP register base before any HW register access */
589 	r = nv_reg_base_init(adev);
590 	if (r)
591 		return r;
592 
593 	switch (adev->asic_type) {
594 	case CHIP_NAVI10:
595 	case CHIP_NAVI14:
596 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
597 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
598 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
599 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
600 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
601 		    !amdgpu_sriov_vf(adev))
602 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
603 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
604 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
605 #if defined(CONFIG_DRM_AMD_DC)
606 		else if (amdgpu_device_has_dc_support(adev))
607 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
608 #endif
609 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
610 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
611 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
612 		    !amdgpu_sriov_vf(adev))
613 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
614 		if (!nv_is_headless_sku(adev->pdev))
615 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
616 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
617 		if (adev->enable_mes)
618 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
619 		break;
620 	case CHIP_NAVI12:
621 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
622 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
623 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
624 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
625 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
626 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
627 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
628 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
629 #if defined(CONFIG_DRM_AMD_DC)
630 		else if (amdgpu_device_has_dc_support(adev))
631 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
632 #endif
633 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
634 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
635 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
636 		    !amdgpu_sriov_vf(adev))
637 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
638 		if (!nv_is_headless_sku(adev->pdev))
639 		        amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
640 		if (!amdgpu_sriov_vf(adev))
641 			amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
642 		break;
643 	case CHIP_SIENNA_CICHLID:
644 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
645 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
646 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
647 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
648 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
649 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
650 		    is_support_sw_smu(adev))
651 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
652 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
653 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
654 #if defined(CONFIG_DRM_AMD_DC)
655 		else if (amdgpu_device_has_dc_support(adev))
656 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
657 #endif
658 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
659 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
660 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
661 		if (!amdgpu_sriov_vf(adev))
662 			amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
663 
664 		if (adev->enable_mes)
665 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
666 		break;
667 	case CHIP_NAVY_FLOUNDER:
668 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
669 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
670 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
671 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
672 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
673 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
674 		    is_support_sw_smu(adev))
675 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
676 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
677 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
678 #if defined(CONFIG_DRM_AMD_DC)
679 		else if (amdgpu_device_has_dc_support(adev))
680 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
681 #endif
682 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
683 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
684 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
685 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
686 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
687 		    is_support_sw_smu(adev))
688 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
689 		break;
690 	case CHIP_VANGOGH:
691 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
692 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
693 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
694 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
695 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
696 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
697 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
698 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
699 #if defined(CONFIG_DRM_AMD_DC)
700 		else if (amdgpu_device_has_dc_support(adev))
701 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
702 #endif
703 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
704 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
705 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
706 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
707 		break;
708 	case CHIP_DIMGREY_CAVEFISH:
709 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
710 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
711 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
712 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
713 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
714 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
715 		    is_support_sw_smu(adev))
716 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
717 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
718 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
719 #if defined(CONFIG_DRM_AMD_DC)
720                 else if (amdgpu_device_has_dc_support(adev))
721                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
722 #endif
723 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
724 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
725 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
726 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
727 		break;
728 	default:
729 		return -EINVAL;
730 	}
731 
732 	return 0;
733 }
734 
735 static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
736 {
737 	return adev->nbio.funcs->get_rev_id(adev);
738 }
739 
740 static bool nv_need_full_reset(struct amdgpu_device *adev)
741 {
742 	return true;
743 }
744 
745 static bool nv_need_reset_on_init(struct amdgpu_device *adev)
746 {
747 	u32 sol_reg;
748 
749 	if (adev->flags & AMD_IS_APU)
750 		return false;
751 
752 	/* Check sOS sign of life register to confirm sys driver and sOS
753 	 * are already been loaded.
754 	 */
755 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
756 	if (sol_reg)
757 		return true;
758 
759 	return false;
760 }
761 
762 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
763 {
764 
765 	/* TODO
766 	 * dummy implement for pcie_replay_count sysfs interface
767 	 * */
768 
769 	return 0;
770 }
771 
772 static void nv_init_doorbell_index(struct amdgpu_device *adev)
773 {
774 	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
775 	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
776 	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
777 	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
778 	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
779 	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
780 	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
781 	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
782 	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
783 	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
784 	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
785 	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
786 	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
787 	adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING;
788 	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
789 	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
790 	adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
791 	adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
792 	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
793 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
794 	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
795 	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
796 	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
797 	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
798 	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
799 
800 	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
801 	adev->doorbell_index.sdma_doorbell_range = 20;
802 }
803 
804 static void nv_pre_asic_init(struct amdgpu_device *adev)
805 {
806 }
807 
808 static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
809 				       bool enter)
810 {
811 	if (enter)
812 		amdgpu_gfx_rlc_enter_safe_mode(adev);
813 	else
814 		amdgpu_gfx_rlc_exit_safe_mode(adev);
815 
816 	if (adev->gfx.funcs->update_perfmon_mgcg)
817 		adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
818 
819 	/*
820 	 * The ASPM function is not fully enabled and verified on
821 	 * Navi yet. Temporarily skip this until ASPM enabled.
822 	 */
823 	if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
824 	    !(adev->flags & AMD_IS_APU) &&
825 	    (adev->nbio.funcs->enable_aspm))
826 		adev->nbio.funcs->enable_aspm(adev, !enter);
827 
828 	return 0;
829 }
830 
831 static const struct amdgpu_asic_funcs nv_asic_funcs =
832 {
833 	.read_disabled_bios = &nv_read_disabled_bios,
834 	.read_bios_from_rom = &nv_read_bios_from_rom,
835 	.read_register = &nv_read_register,
836 	.reset = &nv_asic_reset,
837 	.reset_method = &nv_asic_reset_method,
838 	.set_vga_state = &nv_vga_set_state,
839 	.get_xclk = &nv_get_xclk,
840 	.set_uvd_clocks = &nv_set_uvd_clocks,
841 	.set_vce_clocks = &nv_set_vce_clocks,
842 	.get_config_memsize = &nv_get_config_memsize,
843 	.init_doorbell_index = &nv_init_doorbell_index,
844 	.need_full_reset = &nv_need_full_reset,
845 	.need_reset_on_init = &nv_need_reset_on_init,
846 	.get_pcie_replay_count = &nv_get_pcie_replay_count,
847 	.supports_baco = &nv_asic_supports_baco,
848 	.pre_asic_init = &nv_pre_asic_init,
849 	.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
850 };
851 
852 static int nv_common_early_init(void *handle)
853 {
854 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
855 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
856 
857 	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
858 	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
859 	adev->smc_rreg = NULL;
860 	adev->smc_wreg = NULL;
861 	adev->pcie_rreg = &nv_pcie_rreg;
862 	adev->pcie_wreg = &nv_pcie_wreg;
863 	adev->pcie_rreg64 = &nv_pcie_rreg64;
864 	adev->pcie_wreg64 = &nv_pcie_wreg64;
865 	adev->pciep_rreg = &nv_pcie_port_rreg;
866 	adev->pciep_wreg = &nv_pcie_port_wreg;
867 
868 	/* TODO: will add them during VCN v2 implementation */
869 	adev->uvd_ctx_rreg = NULL;
870 	adev->uvd_ctx_wreg = NULL;
871 
872 	adev->didt_rreg = &nv_didt_rreg;
873 	adev->didt_wreg = &nv_didt_wreg;
874 
875 	adev->asic_funcs = &nv_asic_funcs;
876 
877 	adev->rev_id = nv_get_rev_id(adev);
878 	adev->external_rev_id = 0xff;
879 	switch (adev->asic_type) {
880 	case CHIP_NAVI10:
881 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
882 			AMD_CG_SUPPORT_GFX_CGCG |
883 			AMD_CG_SUPPORT_IH_CG |
884 			AMD_CG_SUPPORT_HDP_MGCG |
885 			AMD_CG_SUPPORT_HDP_LS |
886 			AMD_CG_SUPPORT_SDMA_MGCG |
887 			AMD_CG_SUPPORT_SDMA_LS |
888 			AMD_CG_SUPPORT_MC_MGCG |
889 			AMD_CG_SUPPORT_MC_LS |
890 			AMD_CG_SUPPORT_ATHUB_MGCG |
891 			AMD_CG_SUPPORT_ATHUB_LS |
892 			AMD_CG_SUPPORT_VCN_MGCG |
893 			AMD_CG_SUPPORT_JPEG_MGCG |
894 			AMD_CG_SUPPORT_BIF_MGCG |
895 			AMD_CG_SUPPORT_BIF_LS;
896 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
897 			AMD_PG_SUPPORT_VCN_DPG |
898 			AMD_PG_SUPPORT_JPEG |
899 			AMD_PG_SUPPORT_ATHUB;
900 		adev->external_rev_id = adev->rev_id + 0x1;
901 		break;
902 	case CHIP_NAVI14:
903 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
904 			AMD_CG_SUPPORT_GFX_CGCG |
905 			AMD_CG_SUPPORT_IH_CG |
906 			AMD_CG_SUPPORT_HDP_MGCG |
907 			AMD_CG_SUPPORT_HDP_LS |
908 			AMD_CG_SUPPORT_SDMA_MGCG |
909 			AMD_CG_SUPPORT_SDMA_LS |
910 			AMD_CG_SUPPORT_MC_MGCG |
911 			AMD_CG_SUPPORT_MC_LS |
912 			AMD_CG_SUPPORT_ATHUB_MGCG |
913 			AMD_CG_SUPPORT_ATHUB_LS |
914 			AMD_CG_SUPPORT_VCN_MGCG |
915 			AMD_CG_SUPPORT_JPEG_MGCG |
916 			AMD_CG_SUPPORT_BIF_MGCG |
917 			AMD_CG_SUPPORT_BIF_LS;
918 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
919 			AMD_PG_SUPPORT_JPEG |
920 			AMD_PG_SUPPORT_VCN_DPG;
921 		adev->external_rev_id = adev->rev_id + 20;
922 		break;
923 	case CHIP_NAVI12:
924 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
925 			AMD_CG_SUPPORT_GFX_MGLS |
926 			AMD_CG_SUPPORT_GFX_CGCG |
927 			AMD_CG_SUPPORT_GFX_CP_LS |
928 			AMD_CG_SUPPORT_GFX_RLC_LS |
929 			AMD_CG_SUPPORT_IH_CG |
930 			AMD_CG_SUPPORT_HDP_MGCG |
931 			AMD_CG_SUPPORT_HDP_LS |
932 			AMD_CG_SUPPORT_SDMA_MGCG |
933 			AMD_CG_SUPPORT_SDMA_LS |
934 			AMD_CG_SUPPORT_MC_MGCG |
935 			AMD_CG_SUPPORT_MC_LS |
936 			AMD_CG_SUPPORT_ATHUB_MGCG |
937 			AMD_CG_SUPPORT_ATHUB_LS |
938 			AMD_CG_SUPPORT_VCN_MGCG |
939 			AMD_CG_SUPPORT_JPEG_MGCG;
940 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
941 			AMD_PG_SUPPORT_VCN_DPG |
942 			AMD_PG_SUPPORT_JPEG |
943 			AMD_PG_SUPPORT_ATHUB;
944 		/* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
945 		 * as a consequence, the rev_id and external_rev_id are wrong.
946 		 * workaround it by hardcoding rev_id to 0 (default value).
947 		 */
948 		if (amdgpu_sriov_vf(adev))
949 			adev->rev_id = 0;
950 		adev->external_rev_id = adev->rev_id + 0xa;
951 		break;
952 	case CHIP_SIENNA_CICHLID:
953 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
954 			AMD_CG_SUPPORT_GFX_CGCG |
955 			AMD_CG_SUPPORT_GFX_3D_CGCG |
956 			AMD_CG_SUPPORT_MC_MGCG |
957 			AMD_CG_SUPPORT_VCN_MGCG |
958 			AMD_CG_SUPPORT_JPEG_MGCG |
959 			AMD_CG_SUPPORT_HDP_MGCG |
960 			AMD_CG_SUPPORT_HDP_LS |
961 			AMD_CG_SUPPORT_IH_CG |
962 			AMD_CG_SUPPORT_MC_LS;
963 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
964 			AMD_PG_SUPPORT_VCN_DPG |
965 			AMD_PG_SUPPORT_JPEG |
966 			AMD_PG_SUPPORT_ATHUB |
967 			AMD_PG_SUPPORT_MMHUB;
968 		if (amdgpu_sriov_vf(adev)) {
969 			/* hypervisor control CG and PG enablement */
970 			adev->cg_flags = 0;
971 			adev->pg_flags = 0;
972 		}
973 		adev->external_rev_id = adev->rev_id + 0x28;
974 		break;
975 	case CHIP_NAVY_FLOUNDER:
976 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
977 			AMD_CG_SUPPORT_GFX_CGCG |
978 			AMD_CG_SUPPORT_GFX_3D_CGCG |
979 			AMD_CG_SUPPORT_VCN_MGCG |
980 			AMD_CG_SUPPORT_JPEG_MGCG |
981 			AMD_CG_SUPPORT_MC_MGCG |
982 			AMD_CG_SUPPORT_MC_LS |
983 			AMD_CG_SUPPORT_HDP_MGCG |
984 			AMD_CG_SUPPORT_HDP_LS |
985 			AMD_CG_SUPPORT_IH_CG;
986 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
987 			AMD_PG_SUPPORT_VCN_DPG |
988 			AMD_PG_SUPPORT_JPEG |
989 			AMD_PG_SUPPORT_ATHUB |
990 			AMD_PG_SUPPORT_MMHUB;
991 		adev->external_rev_id = adev->rev_id + 0x32;
992 		break;
993 
994 	case CHIP_VANGOGH:
995 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
996 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
997 			AMD_CG_SUPPORT_GFX_MGLS |
998 			AMD_CG_SUPPORT_GFX_CP_LS |
999 			AMD_CG_SUPPORT_GFX_RLC_LS |
1000 			AMD_CG_SUPPORT_GFX_CGCG |
1001 			AMD_CG_SUPPORT_GFX_CGLS |
1002 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1003 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1004 			AMD_CG_SUPPORT_MC_MGCG |
1005 			AMD_CG_SUPPORT_MC_LS |
1006 			AMD_CG_SUPPORT_GFX_FGCG |
1007 			AMD_CG_SUPPORT_VCN_MGCG |
1008 			AMD_CG_SUPPORT_JPEG_MGCG;
1009 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1010 			AMD_PG_SUPPORT_VCN |
1011 			AMD_PG_SUPPORT_VCN_DPG |
1012 			AMD_PG_SUPPORT_JPEG;
1013 		if (adev->apu_flags & AMD_APU_IS_VANGOGH)
1014 			adev->external_rev_id = adev->rev_id + 0x01;
1015 		break;
1016 	case CHIP_DIMGREY_CAVEFISH:
1017 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1018 			AMD_CG_SUPPORT_GFX_CGCG |
1019 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1020 			AMD_CG_SUPPORT_VCN_MGCG |
1021 			AMD_CG_SUPPORT_JPEG_MGCG |
1022 			AMD_CG_SUPPORT_MC_MGCG |
1023 			AMD_CG_SUPPORT_MC_LS |
1024 			AMD_CG_SUPPORT_HDP_MGCG |
1025 			AMD_CG_SUPPORT_HDP_LS |
1026 			AMD_CG_SUPPORT_IH_CG;
1027 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1028 			AMD_PG_SUPPORT_VCN_DPG |
1029 			AMD_PG_SUPPORT_JPEG |
1030 			AMD_PG_SUPPORT_ATHUB |
1031 			AMD_PG_SUPPORT_MMHUB;
1032 		adev->external_rev_id = adev->rev_id + 0x3c;
1033 		break;
1034 	default:
1035 		/* FIXME: not supported yet */
1036 		return -EINVAL;
1037 	}
1038 
1039 	if (amdgpu_sriov_vf(adev)) {
1040 		amdgpu_virt_init_setting(adev);
1041 		xgpu_nv_mailbox_set_irq_funcs(adev);
1042 	}
1043 
1044 	return 0;
1045 }
1046 
1047 static int nv_common_late_init(void *handle)
1048 {
1049 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1050 
1051 	if (amdgpu_sriov_vf(adev))
1052 		xgpu_nv_mailbox_get_irq(adev);
1053 
1054 	return 0;
1055 }
1056 
1057 static int nv_common_sw_init(void *handle)
1058 {
1059 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1060 
1061 	if (amdgpu_sriov_vf(adev))
1062 		xgpu_nv_mailbox_add_irq_id(adev);
1063 
1064 	return 0;
1065 }
1066 
1067 static int nv_common_sw_fini(void *handle)
1068 {
1069 	return 0;
1070 }
1071 
1072 static int nv_common_hw_init(void *handle)
1073 {
1074 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1075 
1076 	/* enable pcie gen2/3 link */
1077 	nv_pcie_gen3_enable(adev);
1078 	/* enable aspm */
1079 	nv_program_aspm(adev);
1080 	/* setup nbio registers */
1081 	adev->nbio.funcs->init_registers(adev);
1082 	/* remap HDP registers to a hole in mmio space,
1083 	 * for the purpose of expose those registers
1084 	 * to process space
1085 	 */
1086 	if (adev->nbio.funcs->remap_hdp_registers)
1087 		adev->nbio.funcs->remap_hdp_registers(adev);
1088 	/* enable the doorbell aperture */
1089 	nv_enable_doorbell_aperture(adev, true);
1090 
1091 	return 0;
1092 }
1093 
1094 static int nv_common_hw_fini(void *handle)
1095 {
1096 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1097 
1098 	/* disable the doorbell aperture */
1099 	nv_enable_doorbell_aperture(adev, false);
1100 
1101 	return 0;
1102 }
1103 
1104 static int nv_common_suspend(void *handle)
1105 {
1106 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1107 
1108 	return nv_common_hw_fini(adev);
1109 }
1110 
1111 static int nv_common_resume(void *handle)
1112 {
1113 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1114 
1115 	return nv_common_hw_init(adev);
1116 }
1117 
1118 static bool nv_common_is_idle(void *handle)
1119 {
1120 	return true;
1121 }
1122 
1123 static int nv_common_wait_for_idle(void *handle)
1124 {
1125 	return 0;
1126 }
1127 
1128 static int nv_common_soft_reset(void *handle)
1129 {
1130 	return 0;
1131 }
1132 
1133 static int nv_common_set_clockgating_state(void *handle,
1134 					   enum amd_clockgating_state state)
1135 {
1136 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1137 
1138 	if (amdgpu_sriov_vf(adev))
1139 		return 0;
1140 
1141 	switch (adev->asic_type) {
1142 	case CHIP_NAVI10:
1143 	case CHIP_NAVI14:
1144 	case CHIP_NAVI12:
1145 	case CHIP_SIENNA_CICHLID:
1146 	case CHIP_NAVY_FLOUNDER:
1147 	case CHIP_DIMGREY_CAVEFISH:
1148 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1149 				state == AMD_CG_STATE_GATE);
1150 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1151 				state == AMD_CG_STATE_GATE);
1152 		adev->hdp.funcs->update_clock_gating(adev,
1153 				state == AMD_CG_STATE_GATE);
1154 		adev->smuio.funcs->update_rom_clock_gating(adev,
1155 				state == AMD_CG_STATE_GATE);
1156 		break;
1157 	default:
1158 		break;
1159 	}
1160 	return 0;
1161 }
1162 
1163 static int nv_common_set_powergating_state(void *handle,
1164 					   enum amd_powergating_state state)
1165 {
1166 	/* TODO */
1167 	return 0;
1168 }
1169 
1170 static void nv_common_get_clockgating_state(void *handle, u32 *flags)
1171 {
1172 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1173 
1174 	if (amdgpu_sriov_vf(adev))
1175 		*flags = 0;
1176 
1177 	adev->nbio.funcs->get_clockgating_state(adev, flags);
1178 
1179 	adev->hdp.funcs->get_clock_gating_state(adev, flags);
1180 
1181 	adev->smuio.funcs->get_clock_gating_state(adev, flags);
1182 
1183 	return;
1184 }
1185 
1186 static const struct amd_ip_funcs nv_common_ip_funcs = {
1187 	.name = "nv_common",
1188 	.early_init = nv_common_early_init,
1189 	.late_init = nv_common_late_init,
1190 	.sw_init = nv_common_sw_init,
1191 	.sw_fini = nv_common_sw_fini,
1192 	.hw_init = nv_common_hw_init,
1193 	.hw_fini = nv_common_hw_fini,
1194 	.suspend = nv_common_suspend,
1195 	.resume = nv_common_resume,
1196 	.is_idle = nv_common_is_idle,
1197 	.wait_for_idle = nv_common_wait_for_idle,
1198 	.soft_reset = nv_common_soft_reset,
1199 	.set_clockgating_state = nv_common_set_clockgating_state,
1200 	.set_powergating_state = nv_common_set_powergating_state,
1201 	.get_clockgating_state = nv_common_get_clockgating_state,
1202 };
1203