1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_atombios.h" 30 #include "amdgpu_ih.h" 31 #include "amdgpu_uvd.h" 32 #include "amdgpu_vce.h" 33 #include "amdgpu_ucode.h" 34 #include "amdgpu_psp.h" 35 #include "atom.h" 36 #include "amd_pcie.h" 37 38 #include "uvd/uvd_7_0_offset.h" 39 #include "gc/gc_9_0_offset.h" 40 #include "gc/gc_9_0_sh_mask.h" 41 #include "sdma0/sdma0_4_0_offset.h" 42 #include "sdma1/sdma1_4_0_offset.h" 43 #include "hdp/hdp_4_0_offset.h" 44 #include "hdp/hdp_4_0_sh_mask.h" 45 #include "smuio/smuio_9_0_offset.h" 46 #include "smuio/smuio_9_0_sh_mask.h" 47 #include "nbio/nbio_7_0_default.h" 48 #include "nbio/nbio_7_0_offset.h" 49 #include "nbio/nbio_7_0_sh_mask.h" 50 #include "nbio/nbio_7_0_smn.h" 51 #include "mp/mp_9_0_offset.h" 52 53 #include "soc15.h" 54 #include "soc15_common.h" 55 #include "gfx_v9_0.h" 56 #include "gmc_v9_0.h" 57 #include "gfxhub_v1_0.h" 58 #include "mmhub_v1_0.h" 59 #include "df_v1_7.h" 60 #include "df_v3_6.h" 61 #include "vega10_ih.h" 62 #include "sdma_v4_0.h" 63 #include "uvd_v7_0.h" 64 #include "vce_v4_0.h" 65 #include "vcn_v1_0.h" 66 #include "vcn_v2_5.h" 67 #include "dce_virtual.h" 68 #include "mxgpu_ai.h" 69 #include "amdgpu_smu.h" 70 #include "amdgpu_ras.h" 71 #include "amdgpu_xgmi.h" 72 #include <uapi/linux/kfd_ioctl.h> 73 74 #define mmMP0_MISC_CGTT_CTRL0 0x01b9 75 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 76 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba 77 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 78 79 /* for Vega20 register name change */ 80 #define mmHDP_MEM_POWER_CTRL 0x00d4 81 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L 82 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L 83 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L 84 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L 85 #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 86 /* 87 * Indirect registers accessor 88 */ 89 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) 90 { 91 unsigned long flags, address, data; 92 u32 r; 93 address = adev->nbio_funcs->get_pcie_index_offset(adev); 94 data = adev->nbio_funcs->get_pcie_data_offset(adev); 95 96 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 97 WREG32(address, reg); 98 (void)RREG32(address); 99 r = RREG32(data); 100 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 101 return r; 102 } 103 104 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 105 { 106 unsigned long flags, address, data; 107 108 address = adev->nbio_funcs->get_pcie_index_offset(adev); 109 data = adev->nbio_funcs->get_pcie_data_offset(adev); 110 111 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 112 WREG32(address, reg); 113 (void)RREG32(address); 114 WREG32(data, v); 115 (void)RREG32(data); 116 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 117 } 118 119 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 120 { 121 unsigned long flags, address, data; 122 u32 r; 123 124 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 125 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 126 127 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 128 WREG32(address, ((reg) & 0x1ff)); 129 r = RREG32(data); 130 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 131 return r; 132 } 133 134 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 135 { 136 unsigned long flags, address, data; 137 138 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 139 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 140 141 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 142 WREG32(address, ((reg) & 0x1ff)); 143 WREG32(data, (v)); 144 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 145 } 146 147 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg) 148 { 149 unsigned long flags, address, data; 150 u32 r; 151 152 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 153 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 154 155 spin_lock_irqsave(&adev->didt_idx_lock, flags); 156 WREG32(address, (reg)); 157 r = RREG32(data); 158 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 159 return r; 160 } 161 162 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 163 { 164 unsigned long flags, address, data; 165 166 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 167 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 168 169 spin_lock_irqsave(&adev->didt_idx_lock, flags); 170 WREG32(address, (reg)); 171 WREG32(data, (v)); 172 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 173 } 174 175 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 176 { 177 unsigned long flags; 178 u32 r; 179 180 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 181 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 182 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); 183 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 184 return r; 185 } 186 187 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 188 { 189 unsigned long flags; 190 191 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 192 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 193 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); 194 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 195 } 196 197 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) 198 { 199 unsigned long flags; 200 u32 r; 201 202 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 203 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 204 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); 205 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 206 return r; 207 } 208 209 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 210 { 211 unsigned long flags; 212 213 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 214 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 215 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); 216 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 217 } 218 219 static u32 soc15_get_config_memsize(struct amdgpu_device *adev) 220 { 221 return adev->nbio_funcs->get_memsize(adev); 222 } 223 224 static u32 soc15_get_xclk(struct amdgpu_device *adev) 225 { 226 return adev->clock.spll.reference_freq; 227 } 228 229 230 void soc15_grbm_select(struct amdgpu_device *adev, 231 u32 me, u32 pipe, u32 queue, u32 vmid) 232 { 233 u32 grbm_gfx_cntl = 0; 234 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 235 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 236 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 237 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 238 239 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl); 240 } 241 242 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) 243 { 244 /* todo */ 245 } 246 247 static bool soc15_read_disabled_bios(struct amdgpu_device *adev) 248 { 249 /* todo */ 250 return false; 251 } 252 253 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, 254 u8 *bios, u32 length_bytes) 255 { 256 u32 *dw_ptr; 257 u32 i, length_dw; 258 259 if (bios == NULL) 260 return false; 261 if (length_bytes == 0) 262 return false; 263 /* APU vbios image is part of sbios image */ 264 if (adev->flags & AMD_IS_APU) 265 return false; 266 267 dw_ptr = (u32 *)bios; 268 length_dw = ALIGN(length_bytes, 4) / 4; 269 270 /* set rom index to 0 */ 271 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); 272 /* read out the rom data */ 273 for (i = 0; i < length_dw; i++) 274 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); 275 276 return true; 277 } 278 279 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { 280 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 281 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 282 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 283 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 284 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 285 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 286 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 287 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 288 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 289 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 290 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 291 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 292 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 293 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 294 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 295 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 296 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 297 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 298 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)}, 299 }; 300 301 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 302 u32 sh_num, u32 reg_offset) 303 { 304 uint32_t val; 305 306 mutex_lock(&adev->grbm_idx_mutex); 307 if (se_num != 0xffffffff || sh_num != 0xffffffff) 308 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 309 310 val = RREG32(reg_offset); 311 312 if (se_num != 0xffffffff || sh_num != 0xffffffff) 313 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 314 mutex_unlock(&adev->grbm_idx_mutex); 315 return val; 316 } 317 318 static uint32_t soc15_get_register_value(struct amdgpu_device *adev, 319 bool indexed, u32 se_num, 320 u32 sh_num, u32 reg_offset) 321 { 322 if (indexed) { 323 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset); 324 } else { 325 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 326 return adev->gfx.config.gb_addr_config; 327 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)) 328 return adev->gfx.config.db_debug2; 329 return RREG32(reg_offset); 330 } 331 } 332 333 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, 334 u32 sh_num, u32 reg_offset, u32 *value) 335 { 336 uint32_t i; 337 struct soc15_allowed_register_entry *en; 338 339 *value = 0; 340 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { 341 en = &soc15_allowed_read_registers[i]; 342 if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] 343 + en->reg_offset)) 344 continue; 345 346 *value = soc15_get_register_value(adev, 347 soc15_allowed_read_registers[i].grbm_indexed, 348 se_num, sh_num, reg_offset); 349 return 0; 350 } 351 return -EINVAL; 352 } 353 354 355 /** 356 * soc15_program_register_sequence - program an array of registers. 357 * 358 * @adev: amdgpu_device pointer 359 * @regs: pointer to the register array 360 * @array_size: size of the register array 361 * 362 * Programs an array or registers with and and or masks. 363 * This is a helper for setting golden registers. 364 */ 365 366 void soc15_program_register_sequence(struct amdgpu_device *adev, 367 const struct soc15_reg_golden *regs, 368 const u32 array_size) 369 { 370 const struct soc15_reg_golden *entry; 371 u32 tmp, reg; 372 int i; 373 374 for (i = 0; i < array_size; ++i) { 375 entry = ®s[i]; 376 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; 377 378 if (entry->and_mask == 0xffffffff) { 379 tmp = entry->or_mask; 380 } else { 381 tmp = RREG32(reg); 382 tmp &= ~(entry->and_mask); 383 tmp |= (entry->or_mask & entry->and_mask); 384 } 385 386 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) || 387 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) || 388 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) || 389 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)) 390 WREG32_RLC(reg, tmp); 391 else 392 WREG32(reg, tmp); 393 394 } 395 396 } 397 398 static int soc15_asic_mode1_reset(struct amdgpu_device *adev) 399 { 400 u32 i; 401 int ret = 0; 402 403 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 404 405 dev_info(adev->dev, "GPU mode1 reset\n"); 406 407 /* disable BM */ 408 pci_clear_master(adev->pdev); 409 410 pci_save_state(adev->pdev); 411 412 ret = psp_gpu_reset(adev); 413 if (ret) 414 dev_err(adev->dev, "GPU mode1 reset failed\n"); 415 416 pci_restore_state(adev->pdev); 417 418 /* wait for asic to come out of reset */ 419 for (i = 0; i < adev->usec_timeout; i++) { 420 u32 memsize = adev->nbio_funcs->get_memsize(adev); 421 422 if (memsize != 0xffffffff) 423 break; 424 udelay(1); 425 } 426 427 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 428 429 return ret; 430 } 431 432 static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) 433 { 434 void *pp_handle = adev->powerplay.pp_handle; 435 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 436 437 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { 438 *cap = false; 439 return -ENOENT; 440 } 441 442 return pp_funcs->get_asic_baco_capability(pp_handle, cap); 443 } 444 445 static int soc15_asic_baco_reset(struct amdgpu_device *adev) 446 { 447 void *pp_handle = adev->powerplay.pp_handle; 448 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 449 450 if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) 451 return -ENOENT; 452 453 /* enter BACO state */ 454 if (pp_funcs->set_asic_baco_state(pp_handle, 1)) 455 return -EIO; 456 457 /* exit BACO state */ 458 if (pp_funcs->set_asic_baco_state(pp_handle, 0)) 459 return -EIO; 460 461 dev_info(adev->dev, "GPU BACO reset\n"); 462 463 adev->in_baco_reset = 1; 464 465 return 0; 466 } 467 468 static int soc15_asic_reset(struct amdgpu_device *adev) 469 { 470 int ret; 471 bool baco_reset; 472 473 switch (adev->asic_type) { 474 case CHIP_VEGA10: 475 case CHIP_VEGA12: 476 soc15_asic_get_baco_capability(adev, &baco_reset); 477 break; 478 case CHIP_VEGA20: 479 if (adev->psp.sos_fw_version >= 0x80067) 480 soc15_asic_get_baco_capability(adev, &baco_reset); 481 else 482 baco_reset = false; 483 if (baco_reset) { 484 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); 485 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 486 487 if (hive || (ras && ras->supported)) 488 baco_reset = false; 489 } 490 break; 491 default: 492 baco_reset = false; 493 break; 494 } 495 496 if (baco_reset) 497 ret = soc15_asic_baco_reset(adev); 498 else 499 ret = soc15_asic_mode1_reset(adev); 500 501 return ret; 502 } 503 504 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 505 u32 cntl_reg, u32 status_reg) 506 { 507 return 0; 508 }*/ 509 510 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 511 { 512 /*int r; 513 514 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 515 if (r) 516 return r; 517 518 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 519 */ 520 return 0; 521 } 522 523 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 524 { 525 /* todo */ 526 527 return 0; 528 } 529 530 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) 531 { 532 if (pci_is_root_bus(adev->pdev->bus)) 533 return; 534 535 if (amdgpu_pcie_gen2 == 0) 536 return; 537 538 if (adev->flags & AMD_IS_APU) 539 return; 540 541 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 542 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 543 return; 544 545 /* todo */ 546 } 547 548 static void soc15_program_aspm(struct amdgpu_device *adev) 549 { 550 551 if (amdgpu_aspm == 0) 552 return; 553 554 /* todo */ 555 } 556 557 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, 558 bool enable) 559 { 560 adev->nbio_funcs->enable_doorbell_aperture(adev, enable); 561 adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); 562 } 563 564 static const struct amdgpu_ip_block_version vega10_common_ip_block = 565 { 566 .type = AMD_IP_BLOCK_TYPE_COMMON, 567 .major = 2, 568 .minor = 0, 569 .rev = 0, 570 .funcs = &soc15_common_ip_funcs, 571 }; 572 573 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) 574 { 575 return adev->nbio_funcs->get_rev_id(adev); 576 } 577 578 int soc15_set_ip_blocks(struct amdgpu_device *adev) 579 { 580 /* Set IP register base before any HW register access */ 581 switch (adev->asic_type) { 582 case CHIP_VEGA10: 583 case CHIP_VEGA12: 584 case CHIP_RAVEN: 585 vega10_reg_base_init(adev); 586 break; 587 case CHIP_VEGA20: 588 vega20_reg_base_init(adev); 589 break; 590 case CHIP_ARCTURUS: 591 arct_reg_base_init(adev); 592 break; 593 default: 594 return -EINVAL; 595 } 596 597 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) 598 adev->gmc.xgmi.supported = true; 599 600 if (adev->flags & AMD_IS_APU) 601 adev->nbio_funcs = &nbio_v7_0_funcs; 602 else if (adev->asic_type == CHIP_VEGA20 || 603 adev->asic_type == CHIP_ARCTURUS) 604 adev->nbio_funcs = &nbio_v7_4_funcs; 605 else 606 adev->nbio_funcs = &nbio_v6_1_funcs; 607 608 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) 609 adev->df_funcs = &df_v3_6_funcs; 610 else 611 adev->df_funcs = &df_v1_7_funcs; 612 613 adev->rev_id = soc15_get_rev_id(adev); 614 adev->nbio_funcs->detect_hw_virt(adev); 615 616 if (amdgpu_sriov_vf(adev)) 617 adev->virt.ops = &xgpu_ai_virt_ops; 618 619 switch (adev->asic_type) { 620 case CHIP_VEGA10: 621 case CHIP_VEGA12: 622 case CHIP_VEGA20: 623 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 624 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 625 626 /* For Vega10 SR-IOV, PSP need to be initialized before IH */ 627 if (amdgpu_sriov_vf(adev)) { 628 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 629 if (adev->asic_type == CHIP_VEGA20) 630 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 631 else 632 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 633 } 634 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 635 } else { 636 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 637 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 638 if (adev->asic_type == CHIP_VEGA20) 639 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 640 else 641 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 642 } 643 } 644 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 645 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 646 if (!amdgpu_sriov_vf(adev)) { 647 if (is_support_sw_smu(adev)) 648 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 649 else 650 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 651 } 652 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 653 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 654 #if defined(CONFIG_DRM_AMD_DC) 655 else if (amdgpu_device_has_dc_support(adev)) 656 amdgpu_device_ip_block_add(adev, &dm_ip_block); 657 #endif 658 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { 659 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 660 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 661 } 662 break; 663 case CHIP_RAVEN: 664 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 665 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 666 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 667 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 668 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 669 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 670 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 671 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 672 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 673 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 674 #if defined(CONFIG_DRM_AMD_DC) 675 else if (amdgpu_device_has_dc_support(adev)) 676 amdgpu_device_ip_block_add(adev, &dm_ip_block); 677 #endif 678 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 679 break; 680 case CHIP_ARCTURUS: 681 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 682 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 683 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 684 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 685 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 686 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 687 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 688 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 689 break; 690 default: 691 return -EINVAL; 692 } 693 694 return 0; 695 } 696 697 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 698 { 699 adev->nbio_funcs->hdp_flush(adev, ring); 700 } 701 702 static void soc15_invalidate_hdp(struct amdgpu_device *adev, 703 struct amdgpu_ring *ring) 704 { 705 if (!ring || !ring->funcs->emit_wreg) 706 WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 707 else 708 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 709 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 710 } 711 712 static bool soc15_need_full_reset(struct amdgpu_device *adev) 713 { 714 /* change this when we implement soft reset */ 715 return true; 716 } 717 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 718 uint64_t *count1) 719 { 720 uint32_t perfctr = 0; 721 uint64_t cnt0_of, cnt1_of; 722 int tmp; 723 724 /* This reports 0 on APUs, so return to avoid writing/reading registers 725 * that may or may not be different from their GPU counterparts 726 */ 727 if (adev->flags & AMD_IS_APU) 728 return; 729 730 /* Set the 2 events that we wish to watch, defined above */ 731 /* Reg 40 is # received msgs */ 732 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 733 /* Pre-VG20, Reg 104 is # of posted requests sent. On VG20 it's 108 */ 734 if (adev->asic_type == CHIP_VEGA20) 735 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, 736 EVENT1_SEL, 108); 737 else 738 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, 739 EVENT1_SEL, 104); 740 741 /* Write to enable desired perf counters */ 742 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr); 743 /* Zero out and enable the perf counters 744 * Write 0x5: 745 * Bit 0 = Start all counters(1) 746 * Bit 2 = Global counter reset enable(1) 747 */ 748 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); 749 750 msleep(1000); 751 752 /* Load the shadow and disable the perf counters 753 * Write 0x2: 754 * Bit 0 = Stop counters(0) 755 * Bit 1 = Load the shadow counters(1) 756 */ 757 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); 758 759 /* Read register values to get any >32bit overflow */ 760 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK); 761 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 762 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 763 764 /* Get the values and add the overflow */ 765 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 766 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 767 } 768 769 static bool soc15_need_reset_on_init(struct amdgpu_device *adev) 770 { 771 u32 sol_reg; 772 773 /* Just return false for soc15 GPUs. Reset does not seem to 774 * be necessary. 775 */ 776 if (!amdgpu_passthrough(adev)) 777 return false; 778 779 if (adev->flags & AMD_IS_APU) 780 return false; 781 782 /* Check sOS sign of life register to confirm sys driver and sOS 783 * are already been loaded. 784 */ 785 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 786 if (sol_reg) 787 return true; 788 789 return false; 790 } 791 792 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev) 793 { 794 uint64_t nak_r, nak_g; 795 796 /* Get the number of NAKs received and generated */ 797 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK); 798 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED); 799 800 /* Add the total number of NAKs, i.e the number of replays */ 801 return (nak_r + nak_g); 802 } 803 804 static const struct amdgpu_asic_funcs soc15_asic_funcs = 805 { 806 .read_disabled_bios = &soc15_read_disabled_bios, 807 .read_bios_from_rom = &soc15_read_bios_from_rom, 808 .read_register = &soc15_read_register, 809 .reset = &soc15_asic_reset, 810 .set_vga_state = &soc15_vga_set_state, 811 .get_xclk = &soc15_get_xclk, 812 .set_uvd_clocks = &soc15_set_uvd_clocks, 813 .set_vce_clocks = &soc15_set_vce_clocks, 814 .get_config_memsize = &soc15_get_config_memsize, 815 .flush_hdp = &soc15_flush_hdp, 816 .invalidate_hdp = &soc15_invalidate_hdp, 817 .need_full_reset = &soc15_need_full_reset, 818 .init_doorbell_index = &vega10_doorbell_index_init, 819 .get_pcie_usage = &soc15_get_pcie_usage, 820 .need_reset_on_init = &soc15_need_reset_on_init, 821 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 822 }; 823 824 static const struct amdgpu_asic_funcs vega20_asic_funcs = 825 { 826 .read_disabled_bios = &soc15_read_disabled_bios, 827 .read_bios_from_rom = &soc15_read_bios_from_rom, 828 .read_register = &soc15_read_register, 829 .reset = &soc15_asic_reset, 830 .set_vga_state = &soc15_vga_set_state, 831 .get_xclk = &soc15_get_xclk, 832 .set_uvd_clocks = &soc15_set_uvd_clocks, 833 .set_vce_clocks = &soc15_set_vce_clocks, 834 .get_config_memsize = &soc15_get_config_memsize, 835 .flush_hdp = &soc15_flush_hdp, 836 .invalidate_hdp = &soc15_invalidate_hdp, 837 .need_full_reset = &soc15_need_full_reset, 838 .init_doorbell_index = &vega20_doorbell_index_init, 839 .get_pcie_usage = &soc15_get_pcie_usage, 840 .need_reset_on_init = &soc15_need_reset_on_init, 841 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 842 }; 843 844 static int soc15_common_early_init(void *handle) 845 { 846 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 847 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 848 849 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 850 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 851 adev->smc_rreg = NULL; 852 adev->smc_wreg = NULL; 853 adev->pcie_rreg = &soc15_pcie_rreg; 854 adev->pcie_wreg = &soc15_pcie_wreg; 855 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; 856 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; 857 adev->didt_rreg = &soc15_didt_rreg; 858 adev->didt_wreg = &soc15_didt_wreg; 859 adev->gc_cac_rreg = &soc15_gc_cac_rreg; 860 adev->gc_cac_wreg = &soc15_gc_cac_wreg; 861 adev->se_cac_rreg = &soc15_se_cac_rreg; 862 adev->se_cac_wreg = &soc15_se_cac_wreg; 863 864 865 adev->external_rev_id = 0xFF; 866 switch (adev->asic_type) { 867 case CHIP_VEGA10: 868 adev->asic_funcs = &soc15_asic_funcs; 869 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 870 AMD_CG_SUPPORT_GFX_MGLS | 871 AMD_CG_SUPPORT_GFX_RLC_LS | 872 AMD_CG_SUPPORT_GFX_CP_LS | 873 AMD_CG_SUPPORT_GFX_3D_CGCG | 874 AMD_CG_SUPPORT_GFX_3D_CGLS | 875 AMD_CG_SUPPORT_GFX_CGCG | 876 AMD_CG_SUPPORT_GFX_CGLS | 877 AMD_CG_SUPPORT_BIF_MGCG | 878 AMD_CG_SUPPORT_BIF_LS | 879 AMD_CG_SUPPORT_HDP_LS | 880 AMD_CG_SUPPORT_DRM_MGCG | 881 AMD_CG_SUPPORT_DRM_LS | 882 AMD_CG_SUPPORT_ROM_MGCG | 883 AMD_CG_SUPPORT_DF_MGCG | 884 AMD_CG_SUPPORT_SDMA_MGCG | 885 AMD_CG_SUPPORT_SDMA_LS | 886 AMD_CG_SUPPORT_MC_MGCG | 887 AMD_CG_SUPPORT_MC_LS; 888 adev->pg_flags = 0; 889 adev->external_rev_id = 0x1; 890 break; 891 case CHIP_VEGA12: 892 adev->asic_funcs = &soc15_asic_funcs; 893 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 894 AMD_CG_SUPPORT_GFX_MGLS | 895 AMD_CG_SUPPORT_GFX_CGCG | 896 AMD_CG_SUPPORT_GFX_CGLS | 897 AMD_CG_SUPPORT_GFX_3D_CGCG | 898 AMD_CG_SUPPORT_GFX_3D_CGLS | 899 AMD_CG_SUPPORT_GFX_CP_LS | 900 AMD_CG_SUPPORT_MC_LS | 901 AMD_CG_SUPPORT_MC_MGCG | 902 AMD_CG_SUPPORT_SDMA_MGCG | 903 AMD_CG_SUPPORT_SDMA_LS | 904 AMD_CG_SUPPORT_BIF_MGCG | 905 AMD_CG_SUPPORT_BIF_LS | 906 AMD_CG_SUPPORT_HDP_MGCG | 907 AMD_CG_SUPPORT_HDP_LS | 908 AMD_CG_SUPPORT_ROM_MGCG | 909 AMD_CG_SUPPORT_VCE_MGCG | 910 AMD_CG_SUPPORT_UVD_MGCG; 911 adev->pg_flags = 0; 912 adev->external_rev_id = adev->rev_id + 0x14; 913 break; 914 case CHIP_VEGA20: 915 adev->asic_funcs = &vega20_asic_funcs; 916 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 917 AMD_CG_SUPPORT_GFX_MGLS | 918 AMD_CG_SUPPORT_GFX_CGCG | 919 AMD_CG_SUPPORT_GFX_CGLS | 920 AMD_CG_SUPPORT_GFX_3D_CGCG | 921 AMD_CG_SUPPORT_GFX_3D_CGLS | 922 AMD_CG_SUPPORT_GFX_CP_LS | 923 AMD_CG_SUPPORT_MC_LS | 924 AMD_CG_SUPPORT_MC_MGCG | 925 AMD_CG_SUPPORT_SDMA_MGCG | 926 AMD_CG_SUPPORT_SDMA_LS | 927 AMD_CG_SUPPORT_BIF_MGCG | 928 AMD_CG_SUPPORT_BIF_LS | 929 AMD_CG_SUPPORT_HDP_MGCG | 930 AMD_CG_SUPPORT_HDP_LS | 931 AMD_CG_SUPPORT_ROM_MGCG | 932 AMD_CG_SUPPORT_VCE_MGCG | 933 AMD_CG_SUPPORT_UVD_MGCG; 934 adev->pg_flags = 0; 935 adev->external_rev_id = adev->rev_id + 0x28; 936 break; 937 case CHIP_RAVEN: 938 adev->asic_funcs = &soc15_asic_funcs; 939 if (adev->rev_id >= 0x8) 940 adev->external_rev_id = adev->rev_id + 0x79; 941 else if (adev->pdev->device == 0x15d8) 942 adev->external_rev_id = adev->rev_id + 0x41; 943 else if (adev->rev_id == 1) 944 adev->external_rev_id = adev->rev_id + 0x20; 945 else 946 adev->external_rev_id = adev->rev_id + 0x01; 947 948 if (adev->rev_id >= 0x8) { 949 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 950 AMD_CG_SUPPORT_GFX_MGLS | 951 AMD_CG_SUPPORT_GFX_CP_LS | 952 AMD_CG_SUPPORT_GFX_3D_CGCG | 953 AMD_CG_SUPPORT_GFX_3D_CGLS | 954 AMD_CG_SUPPORT_GFX_CGCG | 955 AMD_CG_SUPPORT_GFX_CGLS | 956 AMD_CG_SUPPORT_BIF_LS | 957 AMD_CG_SUPPORT_HDP_LS | 958 AMD_CG_SUPPORT_ROM_MGCG | 959 AMD_CG_SUPPORT_MC_MGCG | 960 AMD_CG_SUPPORT_MC_LS | 961 AMD_CG_SUPPORT_SDMA_MGCG | 962 AMD_CG_SUPPORT_SDMA_LS | 963 AMD_CG_SUPPORT_VCN_MGCG; 964 965 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 966 } else if (adev->pdev->device == 0x15d8) { 967 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 968 AMD_CG_SUPPORT_GFX_MGLS | 969 AMD_CG_SUPPORT_GFX_CP_LS | 970 AMD_CG_SUPPORT_GFX_3D_CGCG | 971 AMD_CG_SUPPORT_GFX_3D_CGLS | 972 AMD_CG_SUPPORT_GFX_CGCG | 973 AMD_CG_SUPPORT_GFX_CGLS | 974 AMD_CG_SUPPORT_BIF_LS | 975 AMD_CG_SUPPORT_HDP_LS | 976 AMD_CG_SUPPORT_ROM_MGCG | 977 AMD_CG_SUPPORT_MC_MGCG | 978 AMD_CG_SUPPORT_MC_LS | 979 AMD_CG_SUPPORT_SDMA_MGCG | 980 AMD_CG_SUPPORT_SDMA_LS; 981 982 adev->pg_flags = AMD_PG_SUPPORT_SDMA | 983 AMD_PG_SUPPORT_MMHUB | 984 AMD_PG_SUPPORT_VCN | 985 AMD_PG_SUPPORT_VCN_DPG; 986 } else { 987 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 988 AMD_CG_SUPPORT_GFX_MGLS | 989 AMD_CG_SUPPORT_GFX_RLC_LS | 990 AMD_CG_SUPPORT_GFX_CP_LS | 991 AMD_CG_SUPPORT_GFX_3D_CGCG | 992 AMD_CG_SUPPORT_GFX_3D_CGLS | 993 AMD_CG_SUPPORT_GFX_CGCG | 994 AMD_CG_SUPPORT_GFX_CGLS | 995 AMD_CG_SUPPORT_BIF_MGCG | 996 AMD_CG_SUPPORT_BIF_LS | 997 AMD_CG_SUPPORT_HDP_MGCG | 998 AMD_CG_SUPPORT_HDP_LS | 999 AMD_CG_SUPPORT_DRM_MGCG | 1000 AMD_CG_SUPPORT_DRM_LS | 1001 AMD_CG_SUPPORT_ROM_MGCG | 1002 AMD_CG_SUPPORT_MC_MGCG | 1003 AMD_CG_SUPPORT_MC_LS | 1004 AMD_CG_SUPPORT_SDMA_MGCG | 1005 AMD_CG_SUPPORT_SDMA_LS | 1006 AMD_CG_SUPPORT_VCN_MGCG; 1007 1008 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 1009 } 1010 1011 if (adev->pm.pp_feature & PP_GFXOFF_MASK) 1012 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1013 AMD_PG_SUPPORT_CP | 1014 AMD_PG_SUPPORT_RLC_SMU_HS; 1015 break; 1016 case CHIP_ARCTURUS: 1017 adev->asic_funcs = &vega20_asic_funcs; 1018 adev->cg_flags = 0; 1019 adev->pg_flags = 0; 1020 adev->external_rev_id = adev->rev_id + 0x32; 1021 break; 1022 default: 1023 /* FIXME: not supported yet */ 1024 return -EINVAL; 1025 } 1026 1027 if (amdgpu_sriov_vf(adev)) { 1028 amdgpu_virt_init_setting(adev); 1029 xgpu_ai_mailbox_set_irq_funcs(adev); 1030 } 1031 1032 return 0; 1033 } 1034 1035 static int soc15_common_late_init(void *handle) 1036 { 1037 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1038 1039 if (amdgpu_sriov_vf(adev)) 1040 xgpu_ai_mailbox_get_irq(adev); 1041 1042 return 0; 1043 } 1044 1045 static int soc15_common_sw_init(void *handle) 1046 { 1047 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1048 1049 if (amdgpu_sriov_vf(adev)) 1050 xgpu_ai_mailbox_add_irq_id(adev); 1051 1052 adev->df_funcs->sw_init(adev); 1053 1054 return 0; 1055 } 1056 1057 static int soc15_common_sw_fini(void *handle) 1058 { 1059 return 0; 1060 } 1061 1062 static void soc15_doorbell_range_init(struct amdgpu_device *adev) 1063 { 1064 int i; 1065 struct amdgpu_ring *ring; 1066 1067 /* Two reasons to skip 1068 * 1, Host driver already programmed them 1069 * 2, To avoid registers program violations in SR-IOV 1070 */ 1071 if (!amdgpu_virt_support_skip_setting(adev)) { 1072 for (i = 0; i < adev->sdma.num_instances; i++) { 1073 ring = &adev->sdma.instance[i].ring; 1074 adev->nbio_funcs->sdma_doorbell_range(adev, i, 1075 ring->use_doorbell, ring->doorbell_index, 1076 adev->doorbell_index.sdma_doorbell_range); 1077 } 1078 } 1079 1080 adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, 1081 adev->irq.ih.doorbell_index); 1082 } 1083 1084 static int soc15_common_hw_init(void *handle) 1085 { 1086 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1087 1088 /* enable pcie gen2/3 link */ 1089 soc15_pcie_gen3_enable(adev); 1090 /* enable aspm */ 1091 soc15_program_aspm(adev); 1092 /* setup nbio registers */ 1093 adev->nbio_funcs->init_registers(adev); 1094 /* remap HDP registers to a hole in mmio space, 1095 * for the purpose of expose those registers 1096 * to process space 1097 */ 1098 if (adev->nbio_funcs->remap_hdp_registers) 1099 adev->nbio_funcs->remap_hdp_registers(adev); 1100 1101 /* enable the doorbell aperture */ 1102 soc15_enable_doorbell_aperture(adev, true); 1103 /* HW doorbell routing policy: doorbell writing not 1104 * in SDMA/IH/MM/ACV range will be routed to CP. So 1105 * we need to init SDMA/IH/MM/ACV doorbell range prior 1106 * to CP ip block init and ring test. 1107 */ 1108 soc15_doorbell_range_init(adev); 1109 1110 return 0; 1111 } 1112 1113 static int soc15_common_hw_fini(void *handle) 1114 { 1115 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1116 1117 /* disable the doorbell aperture */ 1118 soc15_enable_doorbell_aperture(adev, false); 1119 if (amdgpu_sriov_vf(adev)) 1120 xgpu_ai_mailbox_put_irq(adev); 1121 1122 return 0; 1123 } 1124 1125 static int soc15_common_suspend(void *handle) 1126 { 1127 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1128 1129 return soc15_common_hw_fini(adev); 1130 } 1131 1132 static int soc15_common_resume(void *handle) 1133 { 1134 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1135 1136 return soc15_common_hw_init(adev); 1137 } 1138 1139 static bool soc15_common_is_idle(void *handle) 1140 { 1141 return true; 1142 } 1143 1144 static int soc15_common_wait_for_idle(void *handle) 1145 { 1146 return 0; 1147 } 1148 1149 static int soc15_common_soft_reset(void *handle) 1150 { 1151 return 0; 1152 } 1153 1154 static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable) 1155 { 1156 uint32_t def, data; 1157 1158 if (adev->asic_type == CHIP_VEGA20) { 1159 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL)); 1160 1161 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1162 data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | 1163 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | 1164 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | 1165 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK; 1166 else 1167 data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | 1168 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | 1169 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | 1170 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK); 1171 1172 if (def != data) 1173 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data); 1174 } else { 1175 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); 1176 1177 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1178 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1179 else 1180 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1181 1182 if (def != data) 1183 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); 1184 } 1185 } 1186 1187 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) 1188 { 1189 uint32_t def, data; 1190 1191 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1192 1193 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG)) 1194 data &= ~(0x01000000 | 1195 0x02000000 | 1196 0x04000000 | 1197 0x08000000 | 1198 0x10000000 | 1199 0x20000000 | 1200 0x40000000 | 1201 0x80000000); 1202 else 1203 data |= (0x01000000 | 1204 0x02000000 | 1205 0x04000000 | 1206 0x08000000 | 1207 0x10000000 | 1208 0x20000000 | 1209 0x40000000 | 1210 0x80000000); 1211 1212 if (def != data) 1213 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data); 1214 } 1215 1216 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable) 1217 { 1218 uint32_t def, data; 1219 1220 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1221 1222 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1223 data |= 1; 1224 else 1225 data &= ~1; 1226 1227 if (def != data) 1228 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data); 1229 } 1230 1231 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1232 bool enable) 1233 { 1234 uint32_t def, data; 1235 1236 def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); 1237 1238 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1239 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1240 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1241 else 1242 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1243 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1244 1245 if (def != data) 1246 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data); 1247 } 1248 1249 static int soc15_common_set_clockgating_state(void *handle, 1250 enum amd_clockgating_state state) 1251 { 1252 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1253 1254 if (amdgpu_sriov_vf(adev)) 1255 return 0; 1256 1257 switch (adev->asic_type) { 1258 case CHIP_VEGA10: 1259 case CHIP_VEGA12: 1260 case CHIP_VEGA20: 1261 adev->nbio_funcs->update_medium_grain_clock_gating(adev, 1262 state == AMD_CG_STATE_GATE ? true : false); 1263 adev->nbio_funcs->update_medium_grain_light_sleep(adev, 1264 state == AMD_CG_STATE_GATE ? true : false); 1265 soc15_update_hdp_light_sleep(adev, 1266 state == AMD_CG_STATE_GATE ? true : false); 1267 soc15_update_drm_clock_gating(adev, 1268 state == AMD_CG_STATE_GATE ? true : false); 1269 soc15_update_drm_light_sleep(adev, 1270 state == AMD_CG_STATE_GATE ? true : false); 1271 soc15_update_rom_medium_grain_clock_gating(adev, 1272 state == AMD_CG_STATE_GATE ? true : false); 1273 adev->df_funcs->update_medium_grain_clock_gating(adev, 1274 state == AMD_CG_STATE_GATE ? true : false); 1275 break; 1276 case CHIP_RAVEN: 1277 adev->nbio_funcs->update_medium_grain_clock_gating(adev, 1278 state == AMD_CG_STATE_GATE ? true : false); 1279 adev->nbio_funcs->update_medium_grain_light_sleep(adev, 1280 state == AMD_CG_STATE_GATE ? true : false); 1281 soc15_update_hdp_light_sleep(adev, 1282 state == AMD_CG_STATE_GATE ? true : false); 1283 soc15_update_drm_clock_gating(adev, 1284 state == AMD_CG_STATE_GATE ? true : false); 1285 soc15_update_drm_light_sleep(adev, 1286 state == AMD_CG_STATE_GATE ? true : false); 1287 soc15_update_rom_medium_grain_clock_gating(adev, 1288 state == AMD_CG_STATE_GATE ? true : false); 1289 break; 1290 default: 1291 break; 1292 } 1293 return 0; 1294 } 1295 1296 static void soc15_common_get_clockgating_state(void *handle, u32 *flags) 1297 { 1298 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1299 int data; 1300 1301 if (amdgpu_sriov_vf(adev)) 1302 *flags = 0; 1303 1304 adev->nbio_funcs->get_clockgating_state(adev, flags); 1305 1306 /* AMD_CG_SUPPORT_HDP_LS */ 1307 data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); 1308 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1309 *flags |= AMD_CG_SUPPORT_HDP_LS; 1310 1311 /* AMD_CG_SUPPORT_DRM_MGCG */ 1312 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1313 if (!(data & 0x01000000)) 1314 *flags |= AMD_CG_SUPPORT_DRM_MGCG; 1315 1316 /* AMD_CG_SUPPORT_DRM_LS */ 1317 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1318 if (data & 0x1) 1319 *flags |= AMD_CG_SUPPORT_DRM_LS; 1320 1321 /* AMD_CG_SUPPORT_ROM_MGCG */ 1322 data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); 1323 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1324 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1325 1326 adev->df_funcs->get_clockgating_state(adev, flags); 1327 } 1328 1329 static int soc15_common_set_powergating_state(void *handle, 1330 enum amd_powergating_state state) 1331 { 1332 /* todo */ 1333 return 0; 1334 } 1335 1336 const struct amd_ip_funcs soc15_common_ip_funcs = { 1337 .name = "soc15_common", 1338 .early_init = soc15_common_early_init, 1339 .late_init = soc15_common_late_init, 1340 .sw_init = soc15_common_sw_init, 1341 .sw_fini = soc15_common_sw_fini, 1342 .hw_init = soc15_common_hw_init, 1343 .hw_fini = soc15_common_hw_fini, 1344 .suspend = soc15_common_suspend, 1345 .resume = soc15_common_resume, 1346 .is_idle = soc15_common_is_idle, 1347 .wait_for_idle = soc15_common_wait_for_idle, 1348 .soft_reset = soc15_common_soft_reset, 1349 .set_clockgating_state = soc15_common_set_clockgating_state, 1350 .set_powergating_state = soc15_common_set_powergating_state, 1351 .get_clockgating_state= soc15_common_get_clockgating_state, 1352 }; 1353