1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_atombios.h" 30 #include "amdgpu_ih.h" 31 #include "amdgpu_uvd.h" 32 #include "amdgpu_vce.h" 33 #include "amdgpu_ucode.h" 34 #include "amdgpu_psp.h" 35 #include "amdgpu_smu.h" 36 #include "atom.h" 37 #include "amd_pcie.h" 38 39 #include "gc/gc_10_1_0_offset.h" 40 #include "gc/gc_10_1_0_sh_mask.h" 41 #include "hdp/hdp_5_0_0_offset.h" 42 #include "hdp/hdp_5_0_0_sh_mask.h" 43 #include "smuio/smuio_11_0_0_offset.h" 44 45 #include "soc15.h" 46 #include "soc15_common.h" 47 #include "gmc_v10_0.h" 48 #include "gfxhub_v2_0.h" 49 #include "mmhub_v2_0.h" 50 #include "nbio_v2_3.h" 51 #include "nv.h" 52 #include "navi10_ih.h" 53 #include "gfx_v10_0.h" 54 #include "sdma_v5_0.h" 55 #include "vcn_v2_0.h" 56 #include "dce_virtual.h" 57 #include "mes_v10_1.h" 58 #include "mxgpu_nv.h" 59 60 static const struct amd_ip_funcs nv_common_ip_funcs; 61 62 /* 63 * Indirect registers accessor 64 */ 65 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) 66 { 67 unsigned long flags, address, data; 68 u32 r; 69 address = adev->nbio.funcs->get_pcie_index_offset(adev); 70 data = adev->nbio.funcs->get_pcie_data_offset(adev); 71 72 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 73 WREG32(address, reg); 74 (void)RREG32(address); 75 r = RREG32(data); 76 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 77 return r; 78 } 79 80 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 81 { 82 unsigned long flags, address, data; 83 84 address = adev->nbio.funcs->get_pcie_index_offset(adev); 85 data = adev->nbio.funcs->get_pcie_data_offset(adev); 86 87 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 88 WREG32(address, reg); 89 (void)RREG32(address); 90 WREG32(data, v); 91 (void)RREG32(data); 92 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 93 } 94 95 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) 96 { 97 unsigned long flags, address, data; 98 u32 r; 99 100 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 101 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 102 103 spin_lock_irqsave(&adev->didt_idx_lock, flags); 104 WREG32(address, (reg)); 105 r = RREG32(data); 106 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 107 return r; 108 } 109 110 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 111 { 112 unsigned long flags, address, data; 113 114 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 115 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 116 117 spin_lock_irqsave(&adev->didt_idx_lock, flags); 118 WREG32(address, (reg)); 119 WREG32(data, (v)); 120 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 121 } 122 123 static u32 nv_get_config_memsize(struct amdgpu_device *adev) 124 { 125 return adev->nbio.funcs->get_memsize(adev); 126 } 127 128 static u32 nv_get_xclk(struct amdgpu_device *adev) 129 { 130 return adev->clock.spll.reference_freq; 131 } 132 133 134 void nv_grbm_select(struct amdgpu_device *adev, 135 u32 me, u32 pipe, u32 queue, u32 vmid) 136 { 137 u32 grbm_gfx_cntl = 0; 138 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 139 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 140 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 141 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 142 143 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); 144 } 145 146 static void nv_vga_set_state(struct amdgpu_device *adev, bool state) 147 { 148 /* todo */ 149 } 150 151 static bool nv_read_disabled_bios(struct amdgpu_device *adev) 152 { 153 /* todo */ 154 return false; 155 } 156 157 static bool nv_read_bios_from_rom(struct amdgpu_device *adev, 158 u8 *bios, u32 length_bytes) 159 { 160 u32 *dw_ptr; 161 u32 i, length_dw; 162 163 if (bios == NULL) 164 return false; 165 if (length_bytes == 0) 166 return false; 167 /* APU vbios image is part of sbios image */ 168 if (adev->flags & AMD_IS_APU) 169 return false; 170 171 dw_ptr = (u32 *)bios; 172 length_dw = ALIGN(length_bytes, 4) / 4; 173 174 /* set rom index to 0 */ 175 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); 176 /* read out the rom data */ 177 for (i = 0; i < length_dw; i++) 178 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); 179 180 return true; 181 } 182 183 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { 184 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 185 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 186 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 187 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 188 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 189 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 190 #if 0 /* TODO: will set it when SDMA header is available */ 191 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 192 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 193 #endif 194 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 195 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 196 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 197 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 198 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 199 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 200 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 201 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, 202 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 203 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 204 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 205 }; 206 207 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 208 u32 sh_num, u32 reg_offset) 209 { 210 uint32_t val; 211 212 mutex_lock(&adev->grbm_idx_mutex); 213 if (se_num != 0xffffffff || sh_num != 0xffffffff) 214 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 215 216 val = RREG32(reg_offset); 217 218 if (se_num != 0xffffffff || sh_num != 0xffffffff) 219 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 220 mutex_unlock(&adev->grbm_idx_mutex); 221 return val; 222 } 223 224 static uint32_t nv_get_register_value(struct amdgpu_device *adev, 225 bool indexed, u32 se_num, 226 u32 sh_num, u32 reg_offset) 227 { 228 if (indexed) { 229 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset); 230 } else { 231 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 232 return adev->gfx.config.gb_addr_config; 233 return RREG32(reg_offset); 234 } 235 } 236 237 static int nv_read_register(struct amdgpu_device *adev, u32 se_num, 238 u32 sh_num, u32 reg_offset, u32 *value) 239 { 240 uint32_t i; 241 struct soc15_allowed_register_entry *en; 242 243 *value = 0; 244 for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { 245 en = &nv_allowed_read_registers[i]; 246 if (reg_offset != 247 (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) 248 continue; 249 250 *value = nv_get_register_value(adev, 251 nv_allowed_read_registers[i].grbm_indexed, 252 se_num, sh_num, reg_offset); 253 return 0; 254 } 255 return -EINVAL; 256 } 257 258 #if 0 259 static void nv_gpu_pci_config_reset(struct amdgpu_device *adev) 260 { 261 u32 i; 262 263 dev_info(adev->dev, "GPU pci config reset\n"); 264 265 /* disable BM */ 266 pci_clear_master(adev->pdev); 267 /* reset */ 268 amdgpu_pci_config_reset(adev); 269 270 udelay(100); 271 272 /* wait for asic to come out of reset */ 273 for (i = 0; i < adev->usec_timeout; i++) { 274 u32 memsize = nbio_v2_3_get_memsize(adev); 275 if (memsize != 0xffffffff) 276 break; 277 udelay(1); 278 } 279 280 } 281 #endif 282 283 static int nv_asic_mode1_reset(struct amdgpu_device *adev) 284 { 285 u32 i; 286 int ret = 0; 287 288 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 289 290 dev_info(adev->dev, "GPU mode1 reset\n"); 291 292 /* disable BM */ 293 pci_clear_master(adev->pdev); 294 295 pci_save_state(adev->pdev); 296 297 ret = psp_gpu_reset(adev); 298 if (ret) 299 dev_err(adev->dev, "GPU mode1 reset failed\n"); 300 301 pci_restore_state(adev->pdev); 302 303 /* wait for asic to come out of reset */ 304 for (i = 0; i < adev->usec_timeout; i++) { 305 u32 memsize = adev->nbio.funcs->get_memsize(adev); 306 307 if (memsize != 0xffffffff) 308 break; 309 udelay(1); 310 } 311 312 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 313 314 return ret; 315 } 316 317 static enum amd_reset_method 318 nv_asic_reset_method(struct amdgpu_device *adev) 319 { 320 struct smu_context *smu = &adev->smu; 321 322 if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu)) 323 return AMD_RESET_METHOD_BACO; 324 else 325 return AMD_RESET_METHOD_MODE1; 326 } 327 328 static int nv_asic_reset(struct amdgpu_device *adev) 329 { 330 331 /* FIXME: it doesn't work since vega10 */ 332 #if 0 333 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 334 335 nv_gpu_pci_config_reset(adev); 336 337 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 338 #endif 339 int ret = 0; 340 struct smu_context *smu = &adev->smu; 341 342 if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 343 if (!adev->in_suspend) 344 amdgpu_inc_vram_lost(adev); 345 ret = smu_baco_reset(smu); 346 } else { 347 if (!adev->in_suspend) 348 amdgpu_inc_vram_lost(adev); 349 ret = nv_asic_mode1_reset(adev); 350 } 351 352 return ret; 353 } 354 355 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 356 { 357 /* todo */ 358 return 0; 359 } 360 361 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 362 { 363 /* todo */ 364 return 0; 365 } 366 367 static void nv_pcie_gen3_enable(struct amdgpu_device *adev) 368 { 369 if (pci_is_root_bus(adev->pdev->bus)) 370 return; 371 372 if (amdgpu_pcie_gen2 == 0) 373 return; 374 375 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 376 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 377 return; 378 379 /* todo */ 380 } 381 382 static void nv_program_aspm(struct amdgpu_device *adev) 383 { 384 385 if (amdgpu_aspm == 0) 386 return; 387 388 /* todo */ 389 } 390 391 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, 392 bool enable) 393 { 394 adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 395 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 396 } 397 398 static const struct amdgpu_ip_block_version nv_common_ip_block = 399 { 400 .type = AMD_IP_BLOCK_TYPE_COMMON, 401 .major = 1, 402 .minor = 0, 403 .rev = 0, 404 .funcs = &nv_common_ip_funcs, 405 }; 406 407 static int nv_reg_base_init(struct amdgpu_device *adev) 408 { 409 int r; 410 411 if (amdgpu_discovery) { 412 r = amdgpu_discovery_reg_base_init(adev); 413 if (r) { 414 DRM_WARN("failed to init reg base from ip discovery table, " 415 "fallback to legacy init method\n"); 416 goto legacy_init; 417 } 418 419 return 0; 420 } 421 422 legacy_init: 423 switch (adev->asic_type) { 424 case CHIP_NAVI10: 425 navi10_reg_base_init(adev); 426 break; 427 case CHIP_NAVI14: 428 navi14_reg_base_init(adev); 429 break; 430 case CHIP_NAVI12: 431 navi12_reg_base_init(adev); 432 break; 433 default: 434 return -EINVAL; 435 } 436 437 return 0; 438 } 439 440 int nv_set_ip_blocks(struct amdgpu_device *adev) 441 { 442 int r; 443 444 /* Set IP register base before any HW register access */ 445 r = nv_reg_base_init(adev); 446 if (r) 447 return r; 448 449 adev->nbio.funcs = &nbio_v2_3_funcs; 450 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 451 452 adev->nbio.funcs->detect_hw_virt(adev); 453 454 if (amdgpu_sriov_vf(adev)) 455 adev->virt.ops = &xgpu_nv_virt_ops; 456 457 switch (adev->asic_type) { 458 case CHIP_NAVI10: 459 case CHIP_NAVI14: 460 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 461 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 462 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 463 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 464 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 465 is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) 466 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 467 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 468 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 469 #if defined(CONFIG_DRM_AMD_DC) 470 else if (amdgpu_device_has_dc_support(adev)) 471 amdgpu_device_ip_block_add(adev, &dm_ip_block); 472 #endif 473 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 474 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 475 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 476 is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) 477 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 478 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 479 if (adev->enable_mes) 480 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 481 break; 482 case CHIP_NAVI12: 483 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 484 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 485 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 486 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 487 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 488 is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) 489 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 490 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 491 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 492 #if defined(CONFIG_DRM_AMD_DC) 493 else if (amdgpu_device_has_dc_support(adev)) 494 amdgpu_device_ip_block_add(adev, &dm_ip_block); 495 #endif 496 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 497 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 498 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 499 is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) 500 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 501 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 502 break; 503 default: 504 return -EINVAL; 505 } 506 507 return 0; 508 } 509 510 static uint32_t nv_get_rev_id(struct amdgpu_device *adev) 511 { 512 return adev->nbio.funcs->get_rev_id(adev); 513 } 514 515 static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 516 { 517 adev->nbio.funcs->hdp_flush(adev, ring); 518 } 519 520 static void nv_invalidate_hdp(struct amdgpu_device *adev, 521 struct amdgpu_ring *ring) 522 { 523 if (!ring || !ring->funcs->emit_wreg) { 524 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 525 } else { 526 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 527 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 528 } 529 } 530 531 static bool nv_need_full_reset(struct amdgpu_device *adev) 532 { 533 return true; 534 } 535 536 static void nv_get_pcie_usage(struct amdgpu_device *adev, 537 uint64_t *count0, 538 uint64_t *count1) 539 { 540 /*TODO*/ 541 } 542 543 static bool nv_need_reset_on_init(struct amdgpu_device *adev) 544 { 545 #if 0 546 u32 sol_reg; 547 548 if (adev->flags & AMD_IS_APU) 549 return false; 550 551 /* Check sOS sign of life register to confirm sys driver and sOS 552 * are already been loaded. 553 */ 554 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 555 if (sol_reg) 556 return true; 557 #endif 558 /* TODO: re-enable it when mode1 reset is functional */ 559 return false; 560 } 561 562 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev) 563 { 564 565 /* TODO 566 * dummy implement for pcie_replay_count sysfs interface 567 * */ 568 569 return 0; 570 } 571 572 static void nv_init_doorbell_index(struct amdgpu_device *adev) 573 { 574 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; 575 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; 576 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; 577 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; 578 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; 579 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; 580 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; 581 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; 582 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; 583 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; 584 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; 585 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; 586 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; 587 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; 588 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; 589 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; 590 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; 591 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; 592 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; 593 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; 594 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; 595 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; 596 597 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; 598 adev->doorbell_index.sdma_doorbell_range = 20; 599 } 600 601 static const struct amdgpu_asic_funcs nv_asic_funcs = 602 { 603 .read_disabled_bios = &nv_read_disabled_bios, 604 .read_bios_from_rom = &nv_read_bios_from_rom, 605 .read_register = &nv_read_register, 606 .reset = &nv_asic_reset, 607 .reset_method = &nv_asic_reset_method, 608 .set_vga_state = &nv_vga_set_state, 609 .get_xclk = &nv_get_xclk, 610 .set_uvd_clocks = &nv_set_uvd_clocks, 611 .set_vce_clocks = &nv_set_vce_clocks, 612 .get_config_memsize = &nv_get_config_memsize, 613 .flush_hdp = &nv_flush_hdp, 614 .invalidate_hdp = &nv_invalidate_hdp, 615 .init_doorbell_index = &nv_init_doorbell_index, 616 .need_full_reset = &nv_need_full_reset, 617 .get_pcie_usage = &nv_get_pcie_usage, 618 .need_reset_on_init = &nv_need_reset_on_init, 619 .get_pcie_replay_count = &nv_get_pcie_replay_count, 620 }; 621 622 static int nv_common_early_init(void *handle) 623 { 624 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 625 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 626 627 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 628 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 629 adev->smc_rreg = NULL; 630 adev->smc_wreg = NULL; 631 adev->pcie_rreg = &nv_pcie_rreg; 632 adev->pcie_wreg = &nv_pcie_wreg; 633 634 /* TODO: will add them during VCN v2 implementation */ 635 adev->uvd_ctx_rreg = NULL; 636 adev->uvd_ctx_wreg = NULL; 637 638 adev->didt_rreg = &nv_didt_rreg; 639 adev->didt_wreg = &nv_didt_wreg; 640 641 adev->asic_funcs = &nv_asic_funcs; 642 643 adev->rev_id = nv_get_rev_id(adev); 644 adev->external_rev_id = 0xff; 645 switch (adev->asic_type) { 646 case CHIP_NAVI10: 647 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 648 AMD_CG_SUPPORT_GFX_CGCG | 649 AMD_CG_SUPPORT_IH_CG | 650 AMD_CG_SUPPORT_HDP_MGCG | 651 AMD_CG_SUPPORT_HDP_LS | 652 AMD_CG_SUPPORT_SDMA_MGCG | 653 AMD_CG_SUPPORT_SDMA_LS | 654 AMD_CG_SUPPORT_MC_MGCG | 655 AMD_CG_SUPPORT_MC_LS | 656 AMD_CG_SUPPORT_ATHUB_MGCG | 657 AMD_CG_SUPPORT_ATHUB_LS | 658 AMD_CG_SUPPORT_VCN_MGCG | 659 AMD_CG_SUPPORT_BIF_MGCG | 660 AMD_CG_SUPPORT_BIF_LS; 661 adev->pg_flags = AMD_PG_SUPPORT_VCN | 662 AMD_PG_SUPPORT_VCN_DPG | 663 AMD_PG_SUPPORT_ATHUB; 664 adev->external_rev_id = adev->rev_id + 0x1; 665 break; 666 case CHIP_NAVI14: 667 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 668 AMD_CG_SUPPORT_GFX_CGCG | 669 AMD_CG_SUPPORT_IH_CG | 670 AMD_CG_SUPPORT_HDP_MGCG | 671 AMD_CG_SUPPORT_HDP_LS | 672 AMD_CG_SUPPORT_SDMA_MGCG | 673 AMD_CG_SUPPORT_SDMA_LS | 674 AMD_CG_SUPPORT_MC_MGCG | 675 AMD_CG_SUPPORT_MC_LS | 676 AMD_CG_SUPPORT_ATHUB_MGCG | 677 AMD_CG_SUPPORT_ATHUB_LS | 678 AMD_CG_SUPPORT_VCN_MGCG | 679 AMD_CG_SUPPORT_BIF_MGCG | 680 AMD_CG_SUPPORT_BIF_LS; 681 adev->pg_flags = AMD_PG_SUPPORT_VCN | 682 AMD_PG_SUPPORT_VCN_DPG; 683 adev->external_rev_id = adev->rev_id + 20; 684 break; 685 case CHIP_NAVI12: 686 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 687 AMD_CG_SUPPORT_GFX_MGLS | 688 AMD_CG_SUPPORT_GFX_CGCG | 689 AMD_CG_SUPPORT_GFX_CP_LS | 690 AMD_CG_SUPPORT_GFX_RLC_LS | 691 AMD_CG_SUPPORT_IH_CG | 692 AMD_CG_SUPPORT_HDP_MGCG | 693 AMD_CG_SUPPORT_HDP_LS | 694 AMD_CG_SUPPORT_SDMA_MGCG | 695 AMD_CG_SUPPORT_SDMA_LS | 696 AMD_CG_SUPPORT_MC_MGCG | 697 AMD_CG_SUPPORT_MC_LS | 698 AMD_CG_SUPPORT_ATHUB_MGCG | 699 AMD_CG_SUPPORT_ATHUB_LS | 700 AMD_CG_SUPPORT_VCN_MGCG; 701 adev->pg_flags = AMD_PG_SUPPORT_VCN | 702 AMD_PG_SUPPORT_VCN_DPG | 703 AMD_PG_SUPPORT_ATHUB; 704 adev->external_rev_id = adev->rev_id + 0xa; 705 break; 706 default: 707 /* FIXME: not supported yet */ 708 return -EINVAL; 709 } 710 711 if (amdgpu_sriov_vf(adev)) { 712 amdgpu_virt_init_setting(adev); 713 xgpu_nv_mailbox_set_irq_funcs(adev); 714 } 715 716 return 0; 717 } 718 719 static int nv_common_late_init(void *handle) 720 { 721 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 722 723 if (amdgpu_sriov_vf(adev)) 724 xgpu_nv_mailbox_get_irq(adev); 725 726 return 0; 727 } 728 729 static int nv_common_sw_init(void *handle) 730 { 731 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 732 733 if (amdgpu_sriov_vf(adev)) 734 xgpu_nv_mailbox_add_irq_id(adev); 735 736 return 0; 737 } 738 739 static int nv_common_sw_fini(void *handle) 740 { 741 return 0; 742 } 743 744 static int nv_common_hw_init(void *handle) 745 { 746 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 747 748 /* enable pcie gen2/3 link */ 749 nv_pcie_gen3_enable(adev); 750 /* enable aspm */ 751 nv_program_aspm(adev); 752 /* setup nbio registers */ 753 adev->nbio.funcs->init_registers(adev); 754 /* remap HDP registers to a hole in mmio space, 755 * for the purpose of expose those registers 756 * to process space 757 */ 758 if (adev->nbio.funcs->remap_hdp_registers) 759 adev->nbio.funcs->remap_hdp_registers(adev); 760 /* enable the doorbell aperture */ 761 nv_enable_doorbell_aperture(adev, true); 762 763 return 0; 764 } 765 766 static int nv_common_hw_fini(void *handle) 767 { 768 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 769 770 /* disable the doorbell aperture */ 771 nv_enable_doorbell_aperture(adev, false); 772 773 return 0; 774 } 775 776 static int nv_common_suspend(void *handle) 777 { 778 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 779 780 return nv_common_hw_fini(adev); 781 } 782 783 static int nv_common_resume(void *handle) 784 { 785 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 786 787 return nv_common_hw_init(adev); 788 } 789 790 static bool nv_common_is_idle(void *handle) 791 { 792 return true; 793 } 794 795 static int nv_common_wait_for_idle(void *handle) 796 { 797 return 0; 798 } 799 800 static int nv_common_soft_reset(void *handle) 801 { 802 return 0; 803 } 804 805 static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, 806 bool enable) 807 { 808 uint32_t hdp_clk_cntl, hdp_clk_cntl1; 809 uint32_t hdp_mem_pwr_cntl; 810 811 if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | 812 AMD_CG_SUPPORT_HDP_DS | 813 AMD_CG_SUPPORT_HDP_SD))) 814 return; 815 816 hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 817 hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 818 819 /* Before doing clock/power mode switch, 820 * forced on IPH & RC clock */ 821 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 822 IPH_MEM_CLK_SOFT_OVERRIDE, 1); 823 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 824 RC_MEM_CLK_SOFT_OVERRIDE, 1); 825 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 826 827 /* HDP 5.0 doesn't support dynamic power mode switch, 828 * disable clock and power gating before any changing */ 829 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 830 IPH_MEM_POWER_CTRL_EN, 0); 831 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 832 IPH_MEM_POWER_LS_EN, 0); 833 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 834 IPH_MEM_POWER_DS_EN, 0); 835 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 836 IPH_MEM_POWER_SD_EN, 0); 837 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 838 RC_MEM_POWER_CTRL_EN, 0); 839 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 840 RC_MEM_POWER_LS_EN, 0); 841 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 842 RC_MEM_POWER_DS_EN, 0); 843 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 844 RC_MEM_POWER_SD_EN, 0); 845 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 846 847 /* only one clock gating mode (LS/DS/SD) can be enabled */ 848 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 849 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 850 HDP_MEM_POWER_CTRL, 851 IPH_MEM_POWER_LS_EN, enable); 852 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 853 HDP_MEM_POWER_CTRL, 854 RC_MEM_POWER_LS_EN, enable); 855 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { 856 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 857 HDP_MEM_POWER_CTRL, 858 IPH_MEM_POWER_DS_EN, enable); 859 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 860 HDP_MEM_POWER_CTRL, 861 RC_MEM_POWER_DS_EN, enable); 862 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { 863 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 864 HDP_MEM_POWER_CTRL, 865 IPH_MEM_POWER_SD_EN, enable); 866 /* RC should not use shut down mode, fallback to ds */ 867 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 868 HDP_MEM_POWER_CTRL, 869 RC_MEM_POWER_DS_EN, enable); 870 } 871 872 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 873 874 /* restore IPH & RC clock override after clock/power mode changing */ 875 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); 876 } 877 878 static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, 879 bool enable) 880 { 881 uint32_t hdp_clk_cntl; 882 883 if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 884 return; 885 886 hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 887 888 if (enable) { 889 hdp_clk_cntl &= 890 ~(uint32_t) 891 (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 892 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 893 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 894 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 895 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 896 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); 897 } else { 898 hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 899 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 900 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 901 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 902 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 903 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; 904 } 905 906 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 907 } 908 909 static int nv_common_set_clockgating_state(void *handle, 910 enum amd_clockgating_state state) 911 { 912 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 913 914 if (amdgpu_sriov_vf(adev)) 915 return 0; 916 917 switch (adev->asic_type) { 918 case CHIP_NAVI10: 919 case CHIP_NAVI14: 920 case CHIP_NAVI12: 921 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 922 state == AMD_CG_STATE_GATE ? true : false); 923 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 924 state == AMD_CG_STATE_GATE ? true : false); 925 nv_update_hdp_mem_power_gating(adev, 926 state == AMD_CG_STATE_GATE ? true : false); 927 nv_update_hdp_clock_gating(adev, 928 state == AMD_CG_STATE_GATE ? true : false); 929 break; 930 default: 931 break; 932 } 933 return 0; 934 } 935 936 static int nv_common_set_powergating_state(void *handle, 937 enum amd_powergating_state state) 938 { 939 /* TODO */ 940 return 0; 941 } 942 943 static void nv_common_get_clockgating_state(void *handle, u32 *flags) 944 { 945 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 946 uint32_t tmp; 947 948 if (amdgpu_sriov_vf(adev)) 949 *flags = 0; 950 951 adev->nbio.funcs->get_clockgating_state(adev, flags); 952 953 /* AMD_CG_SUPPORT_HDP_MGCG */ 954 tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 955 if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 956 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 957 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 958 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 959 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 960 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) 961 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 962 963 /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ 964 tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 965 if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) 966 *flags |= AMD_CG_SUPPORT_HDP_LS; 967 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) 968 *flags |= AMD_CG_SUPPORT_HDP_DS; 969 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) 970 *flags |= AMD_CG_SUPPORT_HDP_SD; 971 972 return; 973 } 974 975 static const struct amd_ip_funcs nv_common_ip_funcs = { 976 .name = "nv_common", 977 .early_init = nv_common_early_init, 978 .late_init = nv_common_late_init, 979 .sw_init = nv_common_sw_init, 980 .sw_fini = nv_common_sw_fini, 981 .hw_init = nv_common_hw_init, 982 .hw_fini = nv_common_hw_fini, 983 .suspend = nv_common_suspend, 984 .resume = nv_common_resume, 985 .is_idle = nv_common_is_idle, 986 .wait_for_idle = nv_common_wait_for_idle, 987 .soft_reset = nv_common_soft_reset, 988 .set_clockgating_state = nv_common_set_clockgating_state, 989 .set_powergating_state = nv_common_set_powergating_state, 990 .get_clockgating_state = nv_common_get_clockgating_state, 991 }; 992