1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_atombios.h" 30 #include "amdgpu_ih.h" 31 #include "amdgpu_uvd.h" 32 #include "amdgpu_vce.h" 33 #include "amdgpu_ucode.h" 34 #include "amdgpu_psp.h" 35 #include "amdgpu_smu.h" 36 #include "atom.h" 37 #include "amd_pcie.h" 38 39 #include "gc/gc_10_1_0_offset.h" 40 #include "gc/gc_10_1_0_sh_mask.h" 41 #include "hdp/hdp_5_0_0_offset.h" 42 #include "hdp/hdp_5_0_0_sh_mask.h" 43 #include "smuio/smuio_11_0_0_offset.h" 44 #include "mp/mp_11_0_offset.h" 45 46 #include "soc15.h" 47 #include "soc15_common.h" 48 #include "gmc_v10_0.h" 49 #include "gfxhub_v2_0.h" 50 #include "mmhub_v2_0.h" 51 #include "nbio_v2_3.h" 52 #include "nv.h" 53 #include "navi10_ih.h" 54 #include "gfx_v10_0.h" 55 #include "sdma_v5_0.h" 56 #include "vcn_v2_0.h" 57 #include "jpeg_v2_0.h" 58 #include "dce_virtual.h" 59 #include "mes_v10_1.h" 60 #include "mxgpu_nv.h" 61 62 static const struct amd_ip_funcs nv_common_ip_funcs; 63 64 /* 65 * Indirect registers accessor 66 */ 67 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) 68 { 69 unsigned long flags, address, data; 70 u32 r; 71 address = adev->nbio.funcs->get_pcie_index_offset(adev); 72 data = adev->nbio.funcs->get_pcie_data_offset(adev); 73 74 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 75 WREG32(address, reg); 76 (void)RREG32(address); 77 r = RREG32(data); 78 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 79 return r; 80 } 81 82 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 83 { 84 unsigned long flags, address, data; 85 86 address = adev->nbio.funcs->get_pcie_index_offset(adev); 87 data = adev->nbio.funcs->get_pcie_data_offset(adev); 88 89 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 90 WREG32(address, reg); 91 (void)RREG32(address); 92 WREG32(data, v); 93 (void)RREG32(data); 94 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 95 } 96 97 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) 98 { 99 unsigned long flags, address, data; 100 u32 r; 101 102 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 103 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 104 105 spin_lock_irqsave(&adev->didt_idx_lock, flags); 106 WREG32(address, (reg)); 107 r = RREG32(data); 108 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 109 return r; 110 } 111 112 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 113 { 114 unsigned long flags, address, data; 115 116 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 117 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 118 119 spin_lock_irqsave(&adev->didt_idx_lock, flags); 120 WREG32(address, (reg)); 121 WREG32(data, (v)); 122 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 123 } 124 125 static u32 nv_get_config_memsize(struct amdgpu_device *adev) 126 { 127 return adev->nbio.funcs->get_memsize(adev); 128 } 129 130 static u32 nv_get_xclk(struct amdgpu_device *adev) 131 { 132 return adev->clock.spll.reference_freq; 133 } 134 135 136 void nv_grbm_select(struct amdgpu_device *adev, 137 u32 me, u32 pipe, u32 queue, u32 vmid) 138 { 139 u32 grbm_gfx_cntl = 0; 140 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 141 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 142 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 143 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 144 145 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); 146 } 147 148 static void nv_vga_set_state(struct amdgpu_device *adev, bool state) 149 { 150 /* todo */ 151 } 152 153 static bool nv_read_disabled_bios(struct amdgpu_device *adev) 154 { 155 /* todo */ 156 return false; 157 } 158 159 static bool nv_read_bios_from_rom(struct amdgpu_device *adev, 160 u8 *bios, u32 length_bytes) 161 { 162 u32 *dw_ptr; 163 u32 i, length_dw; 164 165 if (bios == NULL) 166 return false; 167 if (length_bytes == 0) 168 return false; 169 /* APU vbios image is part of sbios image */ 170 if (adev->flags & AMD_IS_APU) 171 return false; 172 173 dw_ptr = (u32 *)bios; 174 length_dw = ALIGN(length_bytes, 4) / 4; 175 176 /* set rom index to 0 */ 177 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); 178 /* read out the rom data */ 179 for (i = 0; i < length_dw; i++) 180 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); 181 182 return true; 183 } 184 185 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { 186 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 187 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 188 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 189 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 190 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 191 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 192 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 193 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 194 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 195 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 196 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 197 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 198 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 199 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 200 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 201 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, 202 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 203 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 204 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 205 }; 206 207 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 208 u32 sh_num, u32 reg_offset) 209 { 210 uint32_t val; 211 212 mutex_lock(&adev->grbm_idx_mutex); 213 if (se_num != 0xffffffff || sh_num != 0xffffffff) 214 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 215 216 val = RREG32(reg_offset); 217 218 if (se_num != 0xffffffff || sh_num != 0xffffffff) 219 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 220 mutex_unlock(&adev->grbm_idx_mutex); 221 return val; 222 } 223 224 static uint32_t nv_get_register_value(struct amdgpu_device *adev, 225 bool indexed, u32 se_num, 226 u32 sh_num, u32 reg_offset) 227 { 228 if (indexed) { 229 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset); 230 } else { 231 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 232 return adev->gfx.config.gb_addr_config; 233 return RREG32(reg_offset); 234 } 235 } 236 237 static int nv_read_register(struct amdgpu_device *adev, u32 se_num, 238 u32 sh_num, u32 reg_offset, u32 *value) 239 { 240 uint32_t i; 241 struct soc15_allowed_register_entry *en; 242 243 *value = 0; 244 for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { 245 en = &nv_allowed_read_registers[i]; 246 if (reg_offset != 247 (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) 248 continue; 249 250 *value = nv_get_register_value(adev, 251 nv_allowed_read_registers[i].grbm_indexed, 252 se_num, sh_num, reg_offset); 253 return 0; 254 } 255 return -EINVAL; 256 } 257 258 static int nv_asic_mode1_reset(struct amdgpu_device *adev) 259 { 260 u32 i; 261 int ret = 0; 262 263 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 264 265 dev_info(adev->dev, "GPU mode1 reset\n"); 266 267 /* disable BM */ 268 pci_clear_master(adev->pdev); 269 270 pci_save_state(adev->pdev); 271 272 ret = psp_gpu_reset(adev); 273 if (ret) 274 dev_err(adev->dev, "GPU mode1 reset failed\n"); 275 276 pci_restore_state(adev->pdev); 277 278 /* wait for asic to come out of reset */ 279 for (i = 0; i < adev->usec_timeout; i++) { 280 u32 memsize = adev->nbio.funcs->get_memsize(adev); 281 282 if (memsize != 0xffffffff) 283 break; 284 udelay(1); 285 } 286 287 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 288 289 return ret; 290 } 291 292 static bool nv_asic_supports_baco(struct amdgpu_device *adev) 293 { 294 struct smu_context *smu = &adev->smu; 295 296 if (smu_baco_is_support(smu)) 297 return true; 298 else 299 return false; 300 } 301 302 static enum amd_reset_method 303 nv_asic_reset_method(struct amdgpu_device *adev) 304 { 305 struct smu_context *smu = &adev->smu; 306 307 if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu)) 308 return AMD_RESET_METHOD_BACO; 309 else 310 return AMD_RESET_METHOD_MODE1; 311 } 312 313 static int nv_asic_reset(struct amdgpu_device *adev) 314 { 315 int ret = 0; 316 struct smu_context *smu = &adev->smu; 317 318 if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 319 ret = smu_baco_enter(smu); 320 if (ret) 321 return ret; 322 ret = smu_baco_exit(smu); 323 if (ret) 324 return ret; 325 } else { 326 ret = nv_asic_mode1_reset(adev); 327 } 328 329 return ret; 330 } 331 332 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 333 { 334 /* todo */ 335 return 0; 336 } 337 338 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 339 { 340 /* todo */ 341 return 0; 342 } 343 344 static void nv_pcie_gen3_enable(struct amdgpu_device *adev) 345 { 346 if (pci_is_root_bus(adev->pdev->bus)) 347 return; 348 349 if (amdgpu_pcie_gen2 == 0) 350 return; 351 352 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 353 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 354 return; 355 356 /* todo */ 357 } 358 359 static void nv_program_aspm(struct amdgpu_device *adev) 360 { 361 362 if (amdgpu_aspm == 0) 363 return; 364 365 /* todo */ 366 } 367 368 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, 369 bool enable) 370 { 371 adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 372 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 373 } 374 375 static const struct amdgpu_ip_block_version nv_common_ip_block = 376 { 377 .type = AMD_IP_BLOCK_TYPE_COMMON, 378 .major = 1, 379 .minor = 0, 380 .rev = 0, 381 .funcs = &nv_common_ip_funcs, 382 }; 383 384 static int nv_reg_base_init(struct amdgpu_device *adev) 385 { 386 int r; 387 388 if (amdgpu_discovery) { 389 r = amdgpu_discovery_reg_base_init(adev); 390 if (r) { 391 DRM_WARN("failed to init reg base from ip discovery table, " 392 "fallback to legacy init method\n"); 393 goto legacy_init; 394 } 395 396 return 0; 397 } 398 399 legacy_init: 400 switch (adev->asic_type) { 401 case CHIP_NAVI10: 402 navi10_reg_base_init(adev); 403 break; 404 case CHIP_NAVI14: 405 navi14_reg_base_init(adev); 406 break; 407 case CHIP_NAVI12: 408 navi12_reg_base_init(adev); 409 break; 410 case CHIP_SIENNA_CICHLID: 411 sienna_cichlid_reg_base_init(adev); 412 break; 413 default: 414 return -EINVAL; 415 } 416 417 return 0; 418 } 419 420 int nv_set_ip_blocks(struct amdgpu_device *adev) 421 { 422 int r; 423 424 adev->nbio.funcs = &nbio_v2_3_funcs; 425 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 426 427 if (amdgpu_sriov_vf(adev)) { 428 adev->virt.ops = &xgpu_nv_virt_ops; 429 /* try send GPU_INIT_DATA request to host */ 430 amdgpu_virt_request_init_data(adev); 431 } 432 433 /* Set IP register base before any HW register access */ 434 r = nv_reg_base_init(adev); 435 if (r) 436 return r; 437 438 switch (adev->asic_type) { 439 case CHIP_NAVI10: 440 case CHIP_NAVI14: 441 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 442 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 443 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 444 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 445 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 446 !amdgpu_sriov_vf(adev)) 447 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 448 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 449 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 450 #if defined(CONFIG_DRM_AMD_DC) 451 else if (amdgpu_device_has_dc_support(adev)) 452 amdgpu_device_ip_block_add(adev, &dm_ip_block); 453 #endif 454 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 455 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 456 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 457 !amdgpu_sriov_vf(adev)) 458 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 459 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 460 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 461 if (adev->enable_mes) 462 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 463 break; 464 case CHIP_NAVI12: 465 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 466 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 467 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 468 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 469 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 470 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 471 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 472 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 473 #if defined(CONFIG_DRM_AMD_DC) 474 else if (amdgpu_device_has_dc_support(adev)) 475 amdgpu_device_ip_block_add(adev, &dm_ip_block); 476 #endif 477 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 478 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 479 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 480 !amdgpu_sriov_vf(adev)) 481 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 482 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 483 if (!amdgpu_sriov_vf(adev)) 484 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 485 break; 486 case CHIP_SIENNA_CICHLID: 487 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 488 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 489 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 490 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 491 break; 492 default: 493 return -EINVAL; 494 } 495 496 return 0; 497 } 498 499 static uint32_t nv_get_rev_id(struct amdgpu_device *adev) 500 { 501 return adev->nbio.funcs->get_rev_id(adev); 502 } 503 504 static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 505 { 506 adev->nbio.funcs->hdp_flush(adev, ring); 507 } 508 509 static void nv_invalidate_hdp(struct amdgpu_device *adev, 510 struct amdgpu_ring *ring) 511 { 512 if (!ring || !ring->funcs->emit_wreg) { 513 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 514 } else { 515 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 516 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 517 } 518 } 519 520 static bool nv_need_full_reset(struct amdgpu_device *adev) 521 { 522 return true; 523 } 524 525 static bool nv_need_reset_on_init(struct amdgpu_device *adev) 526 { 527 u32 sol_reg; 528 529 if (adev->flags & AMD_IS_APU) 530 return false; 531 532 /* Check sOS sign of life register to confirm sys driver and sOS 533 * are already been loaded. 534 */ 535 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 536 if (sol_reg) 537 return true; 538 539 return false; 540 } 541 542 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev) 543 { 544 545 /* TODO 546 * dummy implement for pcie_replay_count sysfs interface 547 * */ 548 549 return 0; 550 } 551 552 static void nv_init_doorbell_index(struct amdgpu_device *adev) 553 { 554 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; 555 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; 556 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; 557 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; 558 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; 559 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; 560 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; 561 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; 562 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; 563 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; 564 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; 565 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; 566 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; 567 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; 568 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; 569 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; 570 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; 571 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; 572 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; 573 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; 574 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; 575 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; 576 577 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; 578 adev->doorbell_index.sdma_doorbell_range = 20; 579 } 580 581 static const struct amdgpu_asic_funcs nv_asic_funcs = 582 { 583 .read_disabled_bios = &nv_read_disabled_bios, 584 .read_bios_from_rom = &nv_read_bios_from_rom, 585 .read_register = &nv_read_register, 586 .reset = &nv_asic_reset, 587 .reset_method = &nv_asic_reset_method, 588 .set_vga_state = &nv_vga_set_state, 589 .get_xclk = &nv_get_xclk, 590 .set_uvd_clocks = &nv_set_uvd_clocks, 591 .set_vce_clocks = &nv_set_vce_clocks, 592 .get_config_memsize = &nv_get_config_memsize, 593 .flush_hdp = &nv_flush_hdp, 594 .invalidate_hdp = &nv_invalidate_hdp, 595 .init_doorbell_index = &nv_init_doorbell_index, 596 .need_full_reset = &nv_need_full_reset, 597 .need_reset_on_init = &nv_need_reset_on_init, 598 .get_pcie_replay_count = &nv_get_pcie_replay_count, 599 .supports_baco = &nv_asic_supports_baco, 600 }; 601 602 static int nv_common_early_init(void *handle) 603 { 604 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 605 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 606 607 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 608 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 609 adev->smc_rreg = NULL; 610 adev->smc_wreg = NULL; 611 adev->pcie_rreg = &nv_pcie_rreg; 612 adev->pcie_wreg = &nv_pcie_wreg; 613 614 /* TODO: will add them during VCN v2 implementation */ 615 adev->uvd_ctx_rreg = NULL; 616 adev->uvd_ctx_wreg = NULL; 617 618 adev->didt_rreg = &nv_didt_rreg; 619 adev->didt_wreg = &nv_didt_wreg; 620 621 adev->asic_funcs = &nv_asic_funcs; 622 623 adev->rev_id = nv_get_rev_id(adev); 624 adev->external_rev_id = 0xff; 625 switch (adev->asic_type) { 626 case CHIP_NAVI10: 627 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 628 AMD_CG_SUPPORT_GFX_CGCG | 629 AMD_CG_SUPPORT_IH_CG | 630 AMD_CG_SUPPORT_HDP_MGCG | 631 AMD_CG_SUPPORT_HDP_LS | 632 AMD_CG_SUPPORT_SDMA_MGCG | 633 AMD_CG_SUPPORT_SDMA_LS | 634 AMD_CG_SUPPORT_MC_MGCG | 635 AMD_CG_SUPPORT_MC_LS | 636 AMD_CG_SUPPORT_ATHUB_MGCG | 637 AMD_CG_SUPPORT_ATHUB_LS | 638 AMD_CG_SUPPORT_VCN_MGCG | 639 AMD_CG_SUPPORT_JPEG_MGCG | 640 AMD_CG_SUPPORT_BIF_MGCG | 641 AMD_CG_SUPPORT_BIF_LS; 642 adev->pg_flags = AMD_PG_SUPPORT_VCN | 643 AMD_PG_SUPPORT_VCN_DPG | 644 AMD_PG_SUPPORT_JPEG | 645 AMD_PG_SUPPORT_ATHUB; 646 adev->external_rev_id = adev->rev_id + 0x1; 647 break; 648 case CHIP_NAVI14: 649 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 650 AMD_CG_SUPPORT_GFX_CGCG | 651 AMD_CG_SUPPORT_IH_CG | 652 AMD_CG_SUPPORT_HDP_MGCG | 653 AMD_CG_SUPPORT_HDP_LS | 654 AMD_CG_SUPPORT_SDMA_MGCG | 655 AMD_CG_SUPPORT_SDMA_LS | 656 AMD_CG_SUPPORT_MC_MGCG | 657 AMD_CG_SUPPORT_MC_LS | 658 AMD_CG_SUPPORT_ATHUB_MGCG | 659 AMD_CG_SUPPORT_ATHUB_LS | 660 AMD_CG_SUPPORT_VCN_MGCG | 661 AMD_CG_SUPPORT_JPEG_MGCG | 662 AMD_CG_SUPPORT_BIF_MGCG | 663 AMD_CG_SUPPORT_BIF_LS; 664 adev->pg_flags = AMD_PG_SUPPORT_VCN | 665 AMD_PG_SUPPORT_JPEG | 666 AMD_PG_SUPPORT_VCN_DPG; 667 adev->external_rev_id = adev->rev_id + 20; 668 break; 669 case CHIP_NAVI12: 670 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 671 AMD_CG_SUPPORT_GFX_MGLS | 672 AMD_CG_SUPPORT_GFX_CGCG | 673 AMD_CG_SUPPORT_GFX_CP_LS | 674 AMD_CG_SUPPORT_GFX_RLC_LS | 675 AMD_CG_SUPPORT_IH_CG | 676 AMD_CG_SUPPORT_HDP_MGCG | 677 AMD_CG_SUPPORT_HDP_LS | 678 AMD_CG_SUPPORT_SDMA_MGCG | 679 AMD_CG_SUPPORT_SDMA_LS | 680 AMD_CG_SUPPORT_MC_MGCG | 681 AMD_CG_SUPPORT_MC_LS | 682 AMD_CG_SUPPORT_ATHUB_MGCG | 683 AMD_CG_SUPPORT_ATHUB_LS | 684 AMD_CG_SUPPORT_VCN_MGCG | 685 AMD_CG_SUPPORT_JPEG_MGCG; 686 adev->pg_flags = AMD_PG_SUPPORT_VCN | 687 AMD_PG_SUPPORT_VCN_DPG | 688 AMD_PG_SUPPORT_JPEG | 689 AMD_PG_SUPPORT_ATHUB; 690 /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0, 691 * as a consequence, the rev_id and external_rev_id are wrong. 692 * workaround it by hardcoding rev_id to 0 (default value). 693 */ 694 if (amdgpu_sriov_vf(adev)) 695 adev->rev_id = 0; 696 adev->external_rev_id = adev->rev_id + 0xa; 697 break; 698 case CHIP_SIENNA_CICHLID: 699 adev->cg_flags = 0; 700 adev->pg_flags = 0; 701 adev->external_rev_id = adev->rev_id + 0x28; 702 break; 703 default: 704 /* FIXME: not supported yet */ 705 return -EINVAL; 706 } 707 708 if (amdgpu_sriov_vf(adev)) { 709 amdgpu_virt_init_setting(adev); 710 xgpu_nv_mailbox_set_irq_funcs(adev); 711 } 712 713 return 0; 714 } 715 716 static int nv_common_late_init(void *handle) 717 { 718 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 719 720 if (amdgpu_sriov_vf(adev)) 721 xgpu_nv_mailbox_get_irq(adev); 722 723 return 0; 724 } 725 726 static int nv_common_sw_init(void *handle) 727 { 728 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 729 730 if (amdgpu_sriov_vf(adev)) 731 xgpu_nv_mailbox_add_irq_id(adev); 732 733 return 0; 734 } 735 736 static int nv_common_sw_fini(void *handle) 737 { 738 return 0; 739 } 740 741 static int nv_common_hw_init(void *handle) 742 { 743 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 744 745 /* enable pcie gen2/3 link */ 746 nv_pcie_gen3_enable(adev); 747 /* enable aspm */ 748 nv_program_aspm(adev); 749 /* setup nbio registers */ 750 adev->nbio.funcs->init_registers(adev); 751 /* remap HDP registers to a hole in mmio space, 752 * for the purpose of expose those registers 753 * to process space 754 */ 755 if (adev->nbio.funcs->remap_hdp_registers) 756 adev->nbio.funcs->remap_hdp_registers(adev); 757 /* enable the doorbell aperture */ 758 nv_enable_doorbell_aperture(adev, true); 759 760 return 0; 761 } 762 763 static int nv_common_hw_fini(void *handle) 764 { 765 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 766 767 /* disable the doorbell aperture */ 768 nv_enable_doorbell_aperture(adev, false); 769 770 return 0; 771 } 772 773 static int nv_common_suspend(void *handle) 774 { 775 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 776 777 return nv_common_hw_fini(adev); 778 } 779 780 static int nv_common_resume(void *handle) 781 { 782 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 783 784 return nv_common_hw_init(adev); 785 } 786 787 static bool nv_common_is_idle(void *handle) 788 { 789 return true; 790 } 791 792 static int nv_common_wait_for_idle(void *handle) 793 { 794 return 0; 795 } 796 797 static int nv_common_soft_reset(void *handle) 798 { 799 return 0; 800 } 801 802 static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, 803 bool enable) 804 { 805 uint32_t hdp_clk_cntl, hdp_clk_cntl1; 806 uint32_t hdp_mem_pwr_cntl; 807 808 if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | 809 AMD_CG_SUPPORT_HDP_DS | 810 AMD_CG_SUPPORT_HDP_SD))) 811 return; 812 813 hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 814 hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 815 816 /* Before doing clock/power mode switch, 817 * forced on IPH & RC clock */ 818 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 819 IPH_MEM_CLK_SOFT_OVERRIDE, 1); 820 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 821 RC_MEM_CLK_SOFT_OVERRIDE, 1); 822 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 823 824 /* HDP 5.0 doesn't support dynamic power mode switch, 825 * disable clock and power gating before any changing */ 826 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 827 IPH_MEM_POWER_CTRL_EN, 0); 828 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 829 IPH_MEM_POWER_LS_EN, 0); 830 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 831 IPH_MEM_POWER_DS_EN, 0); 832 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 833 IPH_MEM_POWER_SD_EN, 0); 834 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 835 RC_MEM_POWER_CTRL_EN, 0); 836 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 837 RC_MEM_POWER_LS_EN, 0); 838 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 839 RC_MEM_POWER_DS_EN, 0); 840 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 841 RC_MEM_POWER_SD_EN, 0); 842 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 843 844 /* only one clock gating mode (LS/DS/SD) can be enabled */ 845 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 846 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 847 HDP_MEM_POWER_CTRL, 848 IPH_MEM_POWER_LS_EN, enable); 849 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 850 HDP_MEM_POWER_CTRL, 851 RC_MEM_POWER_LS_EN, enable); 852 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { 853 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 854 HDP_MEM_POWER_CTRL, 855 IPH_MEM_POWER_DS_EN, enable); 856 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 857 HDP_MEM_POWER_CTRL, 858 RC_MEM_POWER_DS_EN, enable); 859 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { 860 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 861 HDP_MEM_POWER_CTRL, 862 IPH_MEM_POWER_SD_EN, enable); 863 /* RC should not use shut down mode, fallback to ds */ 864 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 865 HDP_MEM_POWER_CTRL, 866 RC_MEM_POWER_DS_EN, enable); 867 } 868 869 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 870 871 /* restore IPH & RC clock override after clock/power mode changing */ 872 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); 873 } 874 875 static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, 876 bool enable) 877 { 878 uint32_t hdp_clk_cntl; 879 880 if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 881 return; 882 883 hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 884 885 if (enable) { 886 hdp_clk_cntl &= 887 ~(uint32_t) 888 (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 889 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 890 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 891 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 892 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 893 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); 894 } else { 895 hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 896 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 897 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 898 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 899 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 900 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; 901 } 902 903 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 904 } 905 906 static int nv_common_set_clockgating_state(void *handle, 907 enum amd_clockgating_state state) 908 { 909 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 910 911 if (amdgpu_sriov_vf(adev)) 912 return 0; 913 914 switch (adev->asic_type) { 915 case CHIP_NAVI10: 916 case CHIP_NAVI14: 917 case CHIP_NAVI12: 918 case CHIP_SIENNA_CICHLID: 919 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 920 state == AMD_CG_STATE_GATE); 921 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 922 state == AMD_CG_STATE_GATE); 923 nv_update_hdp_mem_power_gating(adev, 924 state == AMD_CG_STATE_GATE); 925 nv_update_hdp_clock_gating(adev, 926 state == AMD_CG_STATE_GATE); 927 break; 928 default: 929 break; 930 } 931 return 0; 932 } 933 934 static int nv_common_set_powergating_state(void *handle, 935 enum amd_powergating_state state) 936 { 937 /* TODO */ 938 return 0; 939 } 940 941 static void nv_common_get_clockgating_state(void *handle, u32 *flags) 942 { 943 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 944 uint32_t tmp; 945 946 if (amdgpu_sriov_vf(adev)) 947 *flags = 0; 948 949 adev->nbio.funcs->get_clockgating_state(adev, flags); 950 951 /* AMD_CG_SUPPORT_HDP_MGCG */ 952 tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 953 if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 954 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 955 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 956 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 957 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 958 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) 959 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 960 961 /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ 962 tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 963 if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) 964 *flags |= AMD_CG_SUPPORT_HDP_LS; 965 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) 966 *flags |= AMD_CG_SUPPORT_HDP_DS; 967 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) 968 *flags |= AMD_CG_SUPPORT_HDP_SD; 969 970 return; 971 } 972 973 static const struct amd_ip_funcs nv_common_ip_funcs = { 974 .name = "nv_common", 975 .early_init = nv_common_early_init, 976 .late_init = nv_common_late_init, 977 .sw_init = nv_common_sw_init, 978 .sw_fini = nv_common_sw_fini, 979 .hw_init = nv_common_hw_init, 980 .hw_fini = nv_common_hw_fini, 981 .suspend = nv_common_suspend, 982 .resume = nv_common_resume, 983 .is_idle = nv_common_is_idle, 984 .wait_for_idle = nv_common_wait_for_idle, 985 .soft_reset = nv_common_soft_reset, 986 .set_clockgating_state = nv_common_set_clockgating_state, 987 .set_powergating_state = nv_common_set_powergating_state, 988 .get_clockgating_state = nv_common_get_clockgating_state, 989 }; 990