1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <drm/drmP.h> 27 #include "amdgpu.h" 28 #include "amdgpu_atombios.h" 29 #include "amdgpu_ih.h" 30 #include "amdgpu_uvd.h" 31 #include "amdgpu_vce.h" 32 #include "amdgpu_ucode.h" 33 #include "amdgpu_psp.h" 34 #include "atom.h" 35 #include "amd_pcie.h" 36 37 #include "gc/gc_10_1_0_offset.h" 38 #include "gc/gc_10_1_0_sh_mask.h" 39 #include "hdp/hdp_5_0_0_offset.h" 40 #include "hdp/hdp_5_0_0_sh_mask.h" 41 42 #include "soc15.h" 43 #include "soc15_common.h" 44 #include "gmc_v10_0.h" 45 #include "gfxhub_v2_0.h" 46 #include "mmhub_v2_0.h" 47 #include "nv.h" 48 #include "navi10_ih.h" 49 #include "gfx_v10_0.h" 50 #include "sdma_v5_0.h" 51 #include "vcn_v2_0.h" 52 #include "dce_virtual.h" 53 #include "mes_v10_1.h" 54 55 static const struct amd_ip_funcs nv_common_ip_funcs; 56 57 /* 58 * Indirect registers accessor 59 */ 60 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) 61 { 62 unsigned long flags, address, data; 63 u32 r; 64 address = adev->nbio_funcs->get_pcie_index_offset(adev); 65 data = adev->nbio_funcs->get_pcie_data_offset(adev); 66 67 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 68 WREG32(address, reg); 69 (void)RREG32(address); 70 r = RREG32(data); 71 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 72 return r; 73 } 74 75 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 76 { 77 unsigned long flags, address, data; 78 79 address = adev->nbio_funcs->get_pcie_index_offset(adev); 80 data = adev->nbio_funcs->get_pcie_data_offset(adev); 81 82 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 83 WREG32(address, reg); 84 (void)RREG32(address); 85 WREG32(data, v); 86 (void)RREG32(data); 87 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 88 } 89 90 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) 91 { 92 unsigned long flags, address, data; 93 u32 r; 94 95 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 96 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 97 98 spin_lock_irqsave(&adev->didt_idx_lock, flags); 99 WREG32(address, (reg)); 100 r = RREG32(data); 101 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 102 return r; 103 } 104 105 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 106 { 107 unsigned long flags, address, data; 108 109 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 110 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 111 112 spin_lock_irqsave(&adev->didt_idx_lock, flags); 113 WREG32(address, (reg)); 114 WREG32(data, (v)); 115 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 116 } 117 118 static u32 nv_get_config_memsize(struct amdgpu_device *adev) 119 { 120 return adev->nbio_funcs->get_memsize(adev); 121 } 122 123 static u32 nv_get_xclk(struct amdgpu_device *adev) 124 { 125 return adev->clock.spll.reference_freq; 126 } 127 128 129 void nv_grbm_select(struct amdgpu_device *adev, 130 u32 me, u32 pipe, u32 queue, u32 vmid) 131 { 132 u32 grbm_gfx_cntl = 0; 133 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 134 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 135 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 136 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 137 138 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); 139 } 140 141 static void nv_vga_set_state(struct amdgpu_device *adev, bool state) 142 { 143 /* todo */ 144 } 145 146 static bool nv_read_disabled_bios(struct amdgpu_device *adev) 147 { 148 /* todo */ 149 return false; 150 } 151 152 static bool nv_read_bios_from_rom(struct amdgpu_device *adev, 153 u8 *bios, u32 length_bytes) 154 { 155 /* TODO: will implement it when SMU header is available */ 156 return false; 157 } 158 159 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { 160 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 161 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 162 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 163 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 164 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 165 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 166 #if 0 /* TODO: will set it when SDMA header is available */ 167 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 168 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 169 #endif 170 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 171 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 172 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 173 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 174 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 175 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 176 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 177 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 178 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 179 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 180 }; 181 182 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 183 u32 sh_num, u32 reg_offset) 184 { 185 uint32_t val; 186 187 mutex_lock(&adev->grbm_idx_mutex); 188 if (se_num != 0xffffffff || sh_num != 0xffffffff) 189 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 190 191 val = RREG32(reg_offset); 192 193 if (se_num != 0xffffffff || sh_num != 0xffffffff) 194 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 195 mutex_unlock(&adev->grbm_idx_mutex); 196 return val; 197 } 198 199 static uint32_t nv_get_register_value(struct amdgpu_device *adev, 200 bool indexed, u32 se_num, 201 u32 sh_num, u32 reg_offset) 202 { 203 if (indexed) { 204 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset); 205 } else { 206 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 207 return adev->gfx.config.gb_addr_config; 208 return RREG32(reg_offset); 209 } 210 } 211 212 static int nv_read_register(struct amdgpu_device *adev, u32 se_num, 213 u32 sh_num, u32 reg_offset, u32 *value) 214 { 215 uint32_t i; 216 struct soc15_allowed_register_entry *en; 217 218 *value = 0; 219 for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { 220 en = &nv_allowed_read_registers[i]; 221 if (reg_offset != 222 (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) 223 continue; 224 225 *value = nv_get_register_value(adev, 226 nv_allowed_read_registers[i].grbm_indexed, 227 se_num, sh_num, reg_offset); 228 return 0; 229 } 230 return -EINVAL; 231 } 232 233 #if 0 234 static void nv_gpu_pci_config_reset(struct amdgpu_device *adev) 235 { 236 u32 i; 237 238 dev_info(adev->dev, "GPU pci config reset\n"); 239 240 /* disable BM */ 241 pci_clear_master(adev->pdev); 242 /* reset */ 243 amdgpu_pci_config_reset(adev); 244 245 udelay(100); 246 247 /* wait for asic to come out of reset */ 248 for (i = 0; i < adev->usec_timeout; i++) { 249 u32 memsize = nbio_v2_3_get_memsize(adev); 250 if (memsize != 0xffffffff) 251 break; 252 udelay(1); 253 } 254 255 } 256 #endif 257 258 static int nv_asic_reset(struct amdgpu_device *adev) 259 { 260 261 /* FIXME: it doesn't work since vega10 */ 262 #if 0 263 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 264 265 nv_gpu_pci_config_reset(adev); 266 267 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 268 #endif 269 270 return 0; 271 } 272 273 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 274 { 275 /* todo */ 276 return 0; 277 } 278 279 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 280 { 281 /* todo */ 282 return 0; 283 } 284 285 static void nv_pcie_gen3_enable(struct amdgpu_device *adev) 286 { 287 if (pci_is_root_bus(adev->pdev->bus)) 288 return; 289 290 if (amdgpu_pcie_gen2 == 0) 291 return; 292 293 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 294 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 295 return; 296 297 /* todo */ 298 } 299 300 static void nv_program_aspm(struct amdgpu_device *adev) 301 { 302 303 if (amdgpu_aspm == 0) 304 return; 305 306 /* todo */ 307 } 308 309 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, 310 bool enable) 311 { 312 adev->nbio_funcs->enable_doorbell_aperture(adev, enable); 313 adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); 314 } 315 316 static const struct amdgpu_ip_block_version nv_common_ip_block = 317 { 318 .type = AMD_IP_BLOCK_TYPE_COMMON, 319 .major = 1, 320 .minor = 0, 321 .rev = 0, 322 .funcs = &nv_common_ip_funcs, 323 }; 324 325 int nv_set_ip_blocks(struct amdgpu_device *adev) 326 { 327 /* Set IP register base before any HW register access */ 328 switch (adev->asic_type) { 329 case CHIP_NAVI10: 330 navi10_reg_base_init(adev); 331 break; 332 default: 333 return -EINVAL; 334 } 335 336 adev->nbio_funcs = &nbio_v2_3_funcs; 337 338 adev->nbio_funcs->detect_hw_virt(adev); 339 340 switch (adev->asic_type) { 341 case CHIP_NAVI10: 342 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 343 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 344 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 345 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 346 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 347 is_support_sw_smu(adev)) 348 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 349 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 350 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 351 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 352 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 353 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 354 is_support_sw_smu(adev)) 355 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 356 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 357 if (adev->enable_mes) 358 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 359 break; 360 default: 361 return -EINVAL; 362 } 363 364 return 0; 365 } 366 367 static uint32_t nv_get_rev_id(struct amdgpu_device *adev) 368 { 369 return adev->nbio_funcs->get_rev_id(adev); 370 } 371 372 static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 373 { 374 adev->nbio_funcs->hdp_flush(adev, ring); 375 } 376 377 static void nv_invalidate_hdp(struct amdgpu_device *adev, 378 struct amdgpu_ring *ring) 379 { 380 if (!ring || !ring->funcs->emit_wreg) { 381 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 382 } else { 383 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 384 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 385 } 386 } 387 388 static bool nv_need_full_reset(struct amdgpu_device *adev) 389 { 390 return true; 391 } 392 393 static void nv_get_pcie_usage(struct amdgpu_device *adev, 394 uint64_t *count0, 395 uint64_t *count1) 396 { 397 /*TODO*/ 398 } 399 400 static bool nv_need_reset_on_init(struct amdgpu_device *adev) 401 { 402 #if 0 403 u32 sol_reg; 404 405 if (adev->flags & AMD_IS_APU) 406 return false; 407 408 /* Check sOS sign of life register to confirm sys driver and sOS 409 * are already been loaded. 410 */ 411 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 412 if (sol_reg) 413 return true; 414 #endif 415 /* TODO: re-enable it when mode1 reset is functional */ 416 return false; 417 } 418 419 static void nv_init_doorbell_index(struct amdgpu_device *adev) 420 { 421 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; 422 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; 423 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; 424 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; 425 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; 426 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; 427 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; 428 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; 429 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; 430 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; 431 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; 432 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; 433 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; 434 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; 435 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; 436 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; 437 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; 438 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; 439 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; 440 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; 441 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; 442 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; 443 444 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; 445 adev->doorbell_index.sdma_doorbell_range = 20; 446 } 447 448 static const struct amdgpu_asic_funcs nv_asic_funcs = 449 { 450 .read_disabled_bios = &nv_read_disabled_bios, 451 .read_bios_from_rom = &nv_read_bios_from_rom, 452 .read_register = &nv_read_register, 453 .reset = &nv_asic_reset, 454 .set_vga_state = &nv_vga_set_state, 455 .get_xclk = &nv_get_xclk, 456 .set_uvd_clocks = &nv_set_uvd_clocks, 457 .set_vce_clocks = &nv_set_vce_clocks, 458 .get_config_memsize = &nv_get_config_memsize, 459 .flush_hdp = &nv_flush_hdp, 460 .invalidate_hdp = &nv_invalidate_hdp, 461 .init_doorbell_index = &nv_init_doorbell_index, 462 .need_full_reset = &nv_need_full_reset, 463 .get_pcie_usage = &nv_get_pcie_usage, 464 .need_reset_on_init = &nv_need_reset_on_init, 465 }; 466 467 static int nv_common_early_init(void *handle) 468 { 469 bool psp_enabled = false; 470 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 471 472 adev->smc_rreg = NULL; 473 adev->smc_wreg = NULL; 474 adev->pcie_rreg = &nv_pcie_rreg; 475 adev->pcie_wreg = &nv_pcie_wreg; 476 477 /* TODO: will add them during VCN v2 implementation */ 478 adev->uvd_ctx_rreg = NULL; 479 adev->uvd_ctx_wreg = NULL; 480 481 adev->didt_rreg = &nv_didt_rreg; 482 adev->didt_wreg = &nv_didt_wreg; 483 484 adev->asic_funcs = &nv_asic_funcs; 485 486 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) && 487 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP))) 488 psp_enabled = true; 489 490 adev->rev_id = nv_get_rev_id(adev); 491 adev->external_rev_id = 0xff; 492 switch (adev->asic_type) { 493 case CHIP_NAVI10: 494 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 495 AMD_CG_SUPPORT_GFX_CGCG | 496 AMD_CG_SUPPORT_IH_CG | 497 AMD_CG_SUPPORT_HDP_MGCG | 498 AMD_CG_SUPPORT_HDP_LS | 499 AMD_CG_SUPPORT_SDMA_MGCG | 500 AMD_CG_SUPPORT_SDMA_LS | 501 AMD_CG_SUPPORT_MC_MGCG | 502 AMD_CG_SUPPORT_MC_LS | 503 AMD_CG_SUPPORT_ATHUB_MGCG | 504 AMD_CG_SUPPORT_ATHUB_LS | 505 AMD_CG_SUPPORT_VCN_MGCG | 506 AMD_CG_SUPPORT_BIF_MGCG | 507 AMD_CG_SUPPORT_BIF_LS; 508 adev->pg_flags = AMD_PG_SUPPORT_VCN | 509 AMD_PG_SUPPORT_VCN_DPG; 510 adev->external_rev_id = adev->rev_id + 0x1; 511 break; 512 default: 513 /* FIXME: not supported yet */ 514 return -EINVAL; 515 } 516 517 return 0; 518 } 519 520 static int nv_common_late_init(void *handle) 521 { 522 return 0; 523 } 524 525 static int nv_common_sw_init(void *handle) 526 { 527 return 0; 528 } 529 530 static int nv_common_sw_fini(void *handle) 531 { 532 return 0; 533 } 534 535 static int nv_common_hw_init(void *handle) 536 { 537 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 538 539 /* enable pcie gen2/3 link */ 540 nv_pcie_gen3_enable(adev); 541 /* enable aspm */ 542 nv_program_aspm(adev); 543 /* setup nbio registers */ 544 adev->nbio_funcs->init_registers(adev); 545 /* enable the doorbell aperture */ 546 nv_enable_doorbell_aperture(adev, true); 547 548 return 0; 549 } 550 551 static int nv_common_hw_fini(void *handle) 552 { 553 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 554 555 /* disable the doorbell aperture */ 556 nv_enable_doorbell_aperture(adev, false); 557 558 return 0; 559 } 560 561 static int nv_common_suspend(void *handle) 562 { 563 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 564 565 return nv_common_hw_fini(adev); 566 } 567 568 static int nv_common_resume(void *handle) 569 { 570 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 571 572 return nv_common_hw_init(adev); 573 } 574 575 static bool nv_common_is_idle(void *handle) 576 { 577 return true; 578 } 579 580 static int nv_common_wait_for_idle(void *handle) 581 { 582 return 0; 583 } 584 585 static int nv_common_soft_reset(void *handle) 586 { 587 return 0; 588 } 589 590 static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, 591 bool enable) 592 { 593 uint32_t hdp_clk_cntl, hdp_clk_cntl1; 594 uint32_t hdp_mem_pwr_cntl; 595 596 if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | 597 AMD_CG_SUPPORT_HDP_DS | 598 AMD_CG_SUPPORT_HDP_SD))) 599 return; 600 601 hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 602 hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 603 604 /* Before doing clock/power mode switch, 605 * forced on IPH & RC clock */ 606 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 607 IPH_MEM_CLK_SOFT_OVERRIDE, 1); 608 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 609 RC_MEM_CLK_SOFT_OVERRIDE, 1); 610 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 611 612 /* HDP 5.0 doesn't support dynamic power mode switch, 613 * disable clock and power gating before any changing */ 614 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 615 IPH_MEM_POWER_CTRL_EN, 0); 616 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 617 IPH_MEM_POWER_LS_EN, 0); 618 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 619 IPH_MEM_POWER_DS_EN, 0); 620 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 621 IPH_MEM_POWER_SD_EN, 0); 622 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 623 RC_MEM_POWER_CTRL_EN, 0); 624 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 625 RC_MEM_POWER_LS_EN, 0); 626 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 627 RC_MEM_POWER_DS_EN, 0); 628 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 629 RC_MEM_POWER_SD_EN, 0); 630 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 631 632 /* only one clock gating mode (LS/DS/SD) can be enabled */ 633 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 634 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 635 HDP_MEM_POWER_CTRL, 636 IPH_MEM_POWER_LS_EN, enable); 637 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 638 HDP_MEM_POWER_CTRL, 639 RC_MEM_POWER_LS_EN, enable); 640 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { 641 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 642 HDP_MEM_POWER_CTRL, 643 IPH_MEM_POWER_DS_EN, enable); 644 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 645 HDP_MEM_POWER_CTRL, 646 RC_MEM_POWER_DS_EN, enable); 647 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { 648 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 649 HDP_MEM_POWER_CTRL, 650 IPH_MEM_POWER_SD_EN, enable); 651 /* RC should not use shut down mode, fallback to ds */ 652 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 653 HDP_MEM_POWER_CTRL, 654 RC_MEM_POWER_DS_EN, enable); 655 } 656 657 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 658 659 /* restore IPH & RC clock override after clock/power mode changing */ 660 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); 661 } 662 663 static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, 664 bool enable) 665 { 666 uint32_t hdp_clk_cntl; 667 668 if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 669 return; 670 671 hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 672 673 if (enable) { 674 hdp_clk_cntl &= 675 ~(uint32_t) 676 (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 677 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 678 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 679 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 680 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 681 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); 682 } else { 683 hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 684 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 685 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 686 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 687 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 688 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; 689 } 690 691 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 692 } 693 694 static int nv_common_set_clockgating_state(void *handle, 695 enum amd_clockgating_state state) 696 { 697 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 698 699 if (amdgpu_sriov_vf(adev)) 700 return 0; 701 702 switch (adev->asic_type) { 703 case CHIP_NAVI10: 704 adev->nbio_funcs->update_medium_grain_clock_gating(adev, 705 state == AMD_CG_STATE_GATE ? true : false); 706 adev->nbio_funcs->update_medium_grain_light_sleep(adev, 707 state == AMD_CG_STATE_GATE ? true : false); 708 nv_update_hdp_mem_power_gating(adev, 709 state == AMD_CG_STATE_GATE ? true : false); 710 nv_update_hdp_clock_gating(adev, 711 state == AMD_CG_STATE_GATE ? true : false); 712 break; 713 default: 714 break; 715 } 716 return 0; 717 } 718 719 static int nv_common_set_powergating_state(void *handle, 720 enum amd_powergating_state state) 721 { 722 /* TODO */ 723 return 0; 724 } 725 726 static void nv_common_get_clockgating_state(void *handle, u32 *flags) 727 { 728 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 729 uint32_t tmp; 730 731 if (amdgpu_sriov_vf(adev)) 732 *flags = 0; 733 734 adev->nbio_funcs->get_clockgating_state(adev, flags); 735 736 /* AMD_CG_SUPPORT_HDP_MGCG */ 737 tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 738 if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 739 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 740 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 741 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 742 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 743 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) 744 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 745 746 /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ 747 tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 748 if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) 749 *flags |= AMD_CG_SUPPORT_HDP_LS; 750 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) 751 *flags |= AMD_CG_SUPPORT_HDP_DS; 752 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) 753 *flags |= AMD_CG_SUPPORT_HDP_SD; 754 755 return; 756 } 757 758 static const struct amd_ip_funcs nv_common_ip_funcs = { 759 .name = "nv_common", 760 .early_init = nv_common_early_init, 761 .late_init = nv_common_late_init, 762 .sw_init = nv_common_sw_init, 763 .sw_fini = nv_common_sw_fini, 764 .hw_init = nv_common_hw_init, 765 .hw_fini = nv_common_hw_fini, 766 .suspend = nv_common_suspend, 767 .resume = nv_common_resume, 768 .is_idle = nv_common_is_idle, 769 .wait_for_idle = nv_common_wait_for_idle, 770 .soft_reset = nv_common_soft_reset, 771 .set_clockgating_state = nv_common_set_clockgating_state, 772 .set_powergating_state = nv_common_set_powergating_state, 773 .get_clockgating_state = nv_common_get_clockgating_state, 774 }; 775