1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_atombios.h" 30 #include "amdgpu_ih.h" 31 #include "amdgpu_uvd.h" 32 #include "amdgpu_vce.h" 33 #include "amdgpu_ucode.h" 34 #include "amdgpu_psp.h" 35 #include "atom.h" 36 #include "amd_pcie.h" 37 38 #include "uvd/uvd_7_0_offset.h" 39 #include "gc/gc_9_0_offset.h" 40 #include "gc/gc_9_0_sh_mask.h" 41 #include "sdma0/sdma0_4_0_offset.h" 42 #include "sdma1/sdma1_4_0_offset.h" 43 #include "nbio/nbio_7_0_default.h" 44 #include "nbio/nbio_7_0_offset.h" 45 #include "nbio/nbio_7_0_sh_mask.h" 46 #include "nbio/nbio_7_0_smn.h" 47 #include "mp/mp_9_0_offset.h" 48 49 #include "soc15.h" 50 #include "soc15_common.h" 51 #include "gfx_v9_0.h" 52 #include "gmc_v9_0.h" 53 #include "gfxhub_v1_0.h" 54 #include "mmhub_v1_0.h" 55 #include "df_v1_7.h" 56 #include "df_v3_6.h" 57 #include "nbio_v6_1.h" 58 #include "nbio_v7_0.h" 59 #include "nbio_v7_4.h" 60 #include "hdp_v4_0.h" 61 #include "vega10_ih.h" 62 #include "vega20_ih.h" 63 #include "navi10_ih.h" 64 #include "sdma_v4_0.h" 65 #include "uvd_v7_0.h" 66 #include "vce_v4_0.h" 67 #include "vcn_v1_0.h" 68 #include "vcn_v2_0.h" 69 #include "jpeg_v2_0.h" 70 #include "vcn_v2_5.h" 71 #include "jpeg_v2_5.h" 72 #include "smuio_v9_0.h" 73 #include "smuio_v11_0.h" 74 #include "dce_virtual.h" 75 #include "mxgpu_ai.h" 76 #include "amdgpu_smu.h" 77 #include "amdgpu_ras.h" 78 #include "amdgpu_xgmi.h" 79 #include <uapi/linux/kfd_ioctl.h> 80 81 #define mmMP0_MISC_CGTT_CTRL0 0x01b9 82 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 83 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba 84 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 85 86 /* 87 * Indirect registers accessor 88 */ 89 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) 90 { 91 unsigned long address, data; 92 address = adev->nbio.funcs->get_pcie_index_offset(adev); 93 data = adev->nbio.funcs->get_pcie_data_offset(adev); 94 95 return amdgpu_device_indirect_rreg(adev, address, data, reg); 96 } 97 98 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 99 { 100 unsigned long address, data; 101 102 address = adev->nbio.funcs->get_pcie_index_offset(adev); 103 data = adev->nbio.funcs->get_pcie_data_offset(adev); 104 105 amdgpu_device_indirect_wreg(adev, address, data, reg, v); 106 } 107 108 static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg) 109 { 110 unsigned long address, data; 111 address = adev->nbio.funcs->get_pcie_index_offset(adev); 112 data = adev->nbio.funcs->get_pcie_data_offset(adev); 113 114 return amdgpu_device_indirect_rreg64(adev, address, data, reg); 115 } 116 117 static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) 118 { 119 unsigned long address, data; 120 121 address = adev->nbio.funcs->get_pcie_index_offset(adev); 122 data = adev->nbio.funcs->get_pcie_data_offset(adev); 123 124 amdgpu_device_indirect_wreg64(adev, address, data, reg, v); 125 } 126 127 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 128 { 129 unsigned long flags, address, data; 130 u32 r; 131 132 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 133 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 134 135 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 136 WREG32(address, ((reg) & 0x1ff)); 137 r = RREG32(data); 138 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 139 return r; 140 } 141 142 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 143 { 144 unsigned long flags, address, data; 145 146 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 147 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 148 149 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 150 WREG32(address, ((reg) & 0x1ff)); 151 WREG32(data, (v)); 152 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 153 } 154 155 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg) 156 { 157 unsigned long flags, address, data; 158 u32 r; 159 160 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 161 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 162 163 spin_lock_irqsave(&adev->didt_idx_lock, flags); 164 WREG32(address, (reg)); 165 r = RREG32(data); 166 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 167 return r; 168 } 169 170 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 171 { 172 unsigned long flags, address, data; 173 174 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 175 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 176 177 spin_lock_irqsave(&adev->didt_idx_lock, flags); 178 WREG32(address, (reg)); 179 WREG32(data, (v)); 180 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 181 } 182 183 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 184 { 185 unsigned long flags; 186 u32 r; 187 188 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 189 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 190 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); 191 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 192 return r; 193 } 194 195 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 196 { 197 unsigned long flags; 198 199 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 200 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 201 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); 202 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 203 } 204 205 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) 206 { 207 unsigned long flags; 208 u32 r; 209 210 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 211 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 212 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); 213 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 214 return r; 215 } 216 217 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 218 { 219 unsigned long flags; 220 221 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 222 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 223 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); 224 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 225 } 226 227 static u32 soc15_get_config_memsize(struct amdgpu_device *adev) 228 { 229 return adev->nbio.funcs->get_memsize(adev); 230 } 231 232 static u32 soc15_get_xclk(struct amdgpu_device *adev) 233 { 234 u32 reference_clock = adev->clock.spll.reference_freq; 235 236 if (adev->asic_type == CHIP_RENOIR) 237 return 10000; 238 if (adev->asic_type == CHIP_RAVEN) 239 return reference_clock / 4; 240 241 return reference_clock; 242 } 243 244 245 void soc15_grbm_select(struct amdgpu_device *adev, 246 u32 me, u32 pipe, u32 queue, u32 vmid) 247 { 248 u32 grbm_gfx_cntl = 0; 249 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 250 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 251 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 252 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 253 254 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl); 255 } 256 257 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) 258 { 259 /* todo */ 260 } 261 262 static bool soc15_read_disabled_bios(struct amdgpu_device *adev) 263 { 264 /* todo */ 265 return false; 266 } 267 268 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, 269 u8 *bios, u32 length_bytes) 270 { 271 u32 *dw_ptr; 272 u32 i, length_dw; 273 uint32_t rom_index_offset; 274 uint32_t rom_data_offset; 275 276 if (bios == NULL) 277 return false; 278 if (length_bytes == 0) 279 return false; 280 /* APU vbios image is part of sbios image */ 281 if (adev->flags & AMD_IS_APU) 282 return false; 283 284 dw_ptr = (u32 *)bios; 285 length_dw = ALIGN(length_bytes, 4) / 4; 286 287 rom_index_offset = 288 adev->smuio.funcs->get_rom_index_offset(adev); 289 rom_data_offset = 290 adev->smuio.funcs->get_rom_data_offset(adev); 291 292 /* set rom index to 0 */ 293 WREG32(rom_index_offset, 0); 294 /* read out the rom data */ 295 for (i = 0; i < length_dw; i++) 296 dw_ptr[i] = RREG32(rom_data_offset); 297 298 return true; 299 } 300 301 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { 302 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 303 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 304 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 305 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 306 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 307 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 308 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 309 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 310 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 311 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 312 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 313 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 314 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 315 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 316 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 317 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, 318 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 319 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 320 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 321 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)}, 322 }; 323 324 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 325 u32 sh_num, u32 reg_offset) 326 { 327 uint32_t val; 328 329 mutex_lock(&adev->grbm_idx_mutex); 330 if (se_num != 0xffffffff || sh_num != 0xffffffff) 331 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 332 333 val = RREG32(reg_offset); 334 335 if (se_num != 0xffffffff || sh_num != 0xffffffff) 336 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 337 mutex_unlock(&adev->grbm_idx_mutex); 338 return val; 339 } 340 341 static uint32_t soc15_get_register_value(struct amdgpu_device *adev, 342 bool indexed, u32 se_num, 343 u32 sh_num, u32 reg_offset) 344 { 345 if (indexed) { 346 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset); 347 } else { 348 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 349 return adev->gfx.config.gb_addr_config; 350 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)) 351 return adev->gfx.config.db_debug2; 352 return RREG32(reg_offset); 353 } 354 } 355 356 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, 357 u32 sh_num, u32 reg_offset, u32 *value) 358 { 359 uint32_t i; 360 struct soc15_allowed_register_entry *en; 361 362 *value = 0; 363 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { 364 en = &soc15_allowed_read_registers[i]; 365 if (adev->reg_offset[en->hwip][en->inst] && 366 reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] 367 + en->reg_offset)) 368 continue; 369 370 *value = soc15_get_register_value(adev, 371 soc15_allowed_read_registers[i].grbm_indexed, 372 se_num, sh_num, reg_offset); 373 return 0; 374 } 375 return -EINVAL; 376 } 377 378 379 /** 380 * soc15_program_register_sequence - program an array of registers. 381 * 382 * @adev: amdgpu_device pointer 383 * @regs: pointer to the register array 384 * @array_size: size of the register array 385 * 386 * Programs an array or registers with and and or masks. 387 * This is a helper for setting golden registers. 388 */ 389 390 void soc15_program_register_sequence(struct amdgpu_device *adev, 391 const struct soc15_reg_golden *regs, 392 const u32 array_size) 393 { 394 const struct soc15_reg_golden *entry; 395 u32 tmp, reg; 396 int i; 397 398 for (i = 0; i < array_size; ++i) { 399 entry = ®s[i]; 400 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; 401 402 if (entry->and_mask == 0xffffffff) { 403 tmp = entry->or_mask; 404 } else { 405 tmp = RREG32(reg); 406 tmp &= ~(entry->and_mask); 407 tmp |= (entry->or_mask & entry->and_mask); 408 } 409 410 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) || 411 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) || 412 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) || 413 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)) 414 WREG32_RLC(reg, tmp); 415 else 416 WREG32(reg, tmp); 417 418 } 419 420 } 421 422 static int soc15_asic_mode1_reset(struct amdgpu_device *adev) 423 { 424 u32 i; 425 int ret = 0; 426 427 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 428 429 dev_info(adev->dev, "GPU mode1 reset\n"); 430 431 /* disable BM */ 432 pci_clear_master(adev->pdev); 433 434 amdgpu_device_cache_pci_state(adev->pdev); 435 436 ret = psp_gpu_reset(adev); 437 if (ret) 438 dev_err(adev->dev, "GPU mode1 reset failed\n"); 439 440 amdgpu_device_load_pci_state(adev->pdev); 441 442 /* wait for asic to come out of reset */ 443 for (i = 0; i < adev->usec_timeout; i++) { 444 u32 memsize = adev->nbio.funcs->get_memsize(adev); 445 446 if (memsize != 0xffffffff) 447 break; 448 udelay(1); 449 } 450 451 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 452 453 return ret; 454 } 455 456 static int soc15_asic_baco_reset(struct amdgpu_device *adev) 457 { 458 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 459 int ret = 0; 460 461 /* avoid NBIF got stuck when do RAS recovery in BACO reset */ 462 if (ras && ras->supported) 463 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); 464 465 ret = amdgpu_dpm_baco_reset(adev); 466 if (ret) 467 return ret; 468 469 /* re-enable doorbell interrupt after BACO exit */ 470 if (ras && ras->supported) 471 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); 472 473 return 0; 474 } 475 476 static enum amd_reset_method 477 soc15_asic_reset_method(struct amdgpu_device *adev) 478 { 479 bool baco_reset = false; 480 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 481 482 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || 483 amdgpu_reset_method == AMD_RESET_METHOD_MODE2 || 484 amdgpu_reset_method == AMD_RESET_METHOD_BACO || 485 amdgpu_reset_method == AMD_RESET_METHOD_PCI) 486 return amdgpu_reset_method; 487 488 if (amdgpu_reset_method != -1) 489 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 490 amdgpu_reset_method); 491 492 switch (adev->asic_type) { 493 case CHIP_RAVEN: 494 case CHIP_RENOIR: 495 return AMD_RESET_METHOD_MODE2; 496 case CHIP_VEGA10: 497 case CHIP_VEGA12: 498 case CHIP_ARCTURUS: 499 baco_reset = amdgpu_dpm_is_baco_supported(adev); 500 break; 501 case CHIP_VEGA20: 502 if (adev->psp.sos_fw_version >= 0x80067) 503 baco_reset = amdgpu_dpm_is_baco_supported(adev); 504 505 /* 506 * 1. PMFW version > 0x284300: all cases use baco 507 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco 508 */ 509 if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400) 510 baco_reset = false; 511 break; 512 default: 513 break; 514 } 515 516 if (baco_reset) 517 return AMD_RESET_METHOD_BACO; 518 else 519 return AMD_RESET_METHOD_MODE1; 520 } 521 522 static int soc15_asic_reset(struct amdgpu_device *adev) 523 { 524 /* original raven doesn't have full asic reset */ 525 if ((adev->apu_flags & AMD_APU_IS_RAVEN) && 526 !(adev->apu_flags & AMD_APU_IS_RAVEN2)) 527 return 0; 528 529 switch (soc15_asic_reset_method(adev)) { 530 case AMD_RESET_METHOD_PCI: 531 dev_info(adev->dev, "PCI reset\n"); 532 return amdgpu_device_pci_reset(adev); 533 case AMD_RESET_METHOD_BACO: 534 dev_info(adev->dev, "BACO reset\n"); 535 return soc15_asic_baco_reset(adev); 536 case AMD_RESET_METHOD_MODE2: 537 dev_info(adev->dev, "MODE2 reset\n"); 538 return amdgpu_dpm_mode2_reset(adev); 539 default: 540 dev_info(adev->dev, "MODE1 reset\n"); 541 return soc15_asic_mode1_reset(adev); 542 } 543 } 544 545 static bool soc15_supports_baco(struct amdgpu_device *adev) 546 { 547 switch (adev->asic_type) { 548 case CHIP_VEGA10: 549 case CHIP_VEGA12: 550 case CHIP_ARCTURUS: 551 return amdgpu_dpm_is_baco_supported(adev); 552 case CHIP_VEGA20: 553 if (adev->psp.sos_fw_version >= 0x80067) 554 return amdgpu_dpm_is_baco_supported(adev); 555 return false; 556 default: 557 return false; 558 } 559 } 560 561 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 562 u32 cntl_reg, u32 status_reg) 563 { 564 return 0; 565 }*/ 566 567 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 568 { 569 /*int r; 570 571 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 572 if (r) 573 return r; 574 575 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 576 */ 577 return 0; 578 } 579 580 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 581 { 582 /* todo */ 583 584 return 0; 585 } 586 587 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) 588 { 589 if (pci_is_root_bus(adev->pdev->bus)) 590 return; 591 592 if (amdgpu_pcie_gen2 == 0) 593 return; 594 595 if (adev->flags & AMD_IS_APU) 596 return; 597 598 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 599 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 600 return; 601 602 /* todo */ 603 } 604 605 static void soc15_program_aspm(struct amdgpu_device *adev) 606 { 607 608 if (amdgpu_aspm == 0) 609 return; 610 611 /* todo */ 612 } 613 614 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, 615 bool enable) 616 { 617 adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 618 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 619 } 620 621 static const struct amdgpu_ip_block_version vega10_common_ip_block = 622 { 623 .type = AMD_IP_BLOCK_TYPE_COMMON, 624 .major = 2, 625 .minor = 0, 626 .rev = 0, 627 .funcs = &soc15_common_ip_funcs, 628 }; 629 630 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) 631 { 632 return adev->nbio.funcs->get_rev_id(adev); 633 } 634 635 static void soc15_reg_base_init(struct amdgpu_device *adev) 636 { 637 int r; 638 639 /* Set IP register base before any HW register access */ 640 switch (adev->asic_type) { 641 case CHIP_VEGA10: 642 case CHIP_VEGA12: 643 case CHIP_RAVEN: 644 vega10_reg_base_init(adev); 645 break; 646 case CHIP_RENOIR: 647 /* It's safe to do ip discovery here for Renior, 648 * it doesn't support SRIOV. */ 649 if (amdgpu_discovery) { 650 r = amdgpu_discovery_reg_base_init(adev); 651 if (r == 0) 652 break; 653 DRM_WARN("failed to init reg base from ip discovery table, " 654 "fallback to legacy init method\n"); 655 } 656 vega10_reg_base_init(adev); 657 break; 658 case CHIP_VEGA20: 659 vega20_reg_base_init(adev); 660 break; 661 case CHIP_ARCTURUS: 662 arct_reg_base_init(adev); 663 break; 664 default: 665 DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type); 666 break; 667 } 668 } 669 670 void soc15_set_virt_ops(struct amdgpu_device *adev) 671 { 672 adev->virt.ops = &xgpu_ai_virt_ops; 673 674 /* init soc15 reg base early enough so we can 675 * request request full access for sriov before 676 * set_ip_blocks. */ 677 soc15_reg_base_init(adev); 678 } 679 680 int soc15_set_ip_blocks(struct amdgpu_device *adev) 681 { 682 /* for bare metal case */ 683 if (!amdgpu_sriov_vf(adev)) 684 soc15_reg_base_init(adev); 685 686 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) 687 adev->gmc.xgmi.supported = true; 688 689 if (adev->flags & AMD_IS_APU) { 690 adev->nbio.funcs = &nbio_v7_0_funcs; 691 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 692 } else if (adev->asic_type == CHIP_VEGA20 || 693 adev->asic_type == CHIP_ARCTURUS) { 694 adev->nbio.funcs = &nbio_v7_4_funcs; 695 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 696 } else { 697 adev->nbio.funcs = &nbio_v6_1_funcs; 698 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 699 } 700 adev->hdp.funcs = &hdp_v4_0_funcs; 701 702 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) 703 adev->df.funcs = &df_v3_6_funcs; 704 else 705 adev->df.funcs = &df_v1_7_funcs; 706 707 if (adev->asic_type == CHIP_VEGA20 || 708 adev->asic_type == CHIP_ARCTURUS) 709 adev->smuio.funcs = &smuio_v11_0_funcs; 710 else 711 adev->smuio.funcs = &smuio_v9_0_funcs; 712 713 adev->rev_id = soc15_get_rev_id(adev); 714 715 switch (adev->asic_type) { 716 case CHIP_VEGA10: 717 case CHIP_VEGA12: 718 case CHIP_VEGA20: 719 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 720 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 721 722 /* For Vega10 SR-IOV, PSP need to be initialized before IH */ 723 if (amdgpu_sriov_vf(adev)) { 724 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 725 if (adev->asic_type == CHIP_VEGA20) 726 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 727 else 728 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 729 } 730 if (adev->asic_type == CHIP_VEGA20) 731 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 732 else 733 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 734 } else { 735 if (adev->asic_type == CHIP_VEGA20) 736 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 737 else 738 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 739 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 740 if (adev->asic_type == CHIP_VEGA20) 741 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 742 else 743 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 744 } 745 } 746 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 747 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 748 if (is_support_sw_smu(adev)) { 749 if (!amdgpu_sriov_vf(adev)) 750 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 751 } else { 752 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 753 } 754 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 755 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 756 #if defined(CONFIG_DRM_AMD_DC) 757 else if (amdgpu_device_has_dc_support(adev)) 758 amdgpu_device_ip_block_add(adev, &dm_ip_block); 759 #endif 760 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { 761 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 762 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 763 } 764 break; 765 case CHIP_RAVEN: 766 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 767 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 768 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 769 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 770 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 771 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 772 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 773 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 774 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 775 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 776 #if defined(CONFIG_DRM_AMD_DC) 777 else if (amdgpu_device_has_dc_support(adev)) 778 amdgpu_device_ip_block_add(adev, &dm_ip_block); 779 #endif 780 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 781 break; 782 case CHIP_ARCTURUS: 783 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 784 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 785 786 if (amdgpu_sriov_vf(adev)) { 787 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 788 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 789 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 790 } else { 791 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 792 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 793 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 794 } 795 796 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 797 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 798 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 799 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 800 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 801 802 if (amdgpu_sriov_vf(adev)) { 803 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 804 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 805 } else { 806 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 807 } 808 if (!amdgpu_sriov_vf(adev)) 809 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 810 break; 811 case CHIP_RENOIR: 812 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 813 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 814 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 815 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 816 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 817 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 818 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 819 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 820 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 821 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 822 #if defined(CONFIG_DRM_AMD_DC) 823 else if (amdgpu_device_has_dc_support(adev)) 824 amdgpu_device_ip_block_add(adev, &dm_ip_block); 825 #endif 826 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 827 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 828 break; 829 default: 830 return -EINVAL; 831 } 832 833 return 0; 834 } 835 836 static bool soc15_need_full_reset(struct amdgpu_device *adev) 837 { 838 /* change this when we implement soft reset */ 839 return true; 840 } 841 842 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 843 uint64_t *count1) 844 { 845 uint32_t perfctr = 0; 846 uint64_t cnt0_of, cnt1_of; 847 int tmp; 848 849 /* This reports 0 on APUs, so return to avoid writing/reading registers 850 * that may or may not be different from their GPU counterparts 851 */ 852 if (adev->flags & AMD_IS_APU) 853 return; 854 855 /* Set the 2 events that we wish to watch, defined above */ 856 /* Reg 40 is # received msgs */ 857 /* Reg 104 is # of posted requests sent */ 858 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 859 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 860 861 /* Write to enable desired perf counters */ 862 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr); 863 /* Zero out and enable the perf counters 864 * Write 0x5: 865 * Bit 0 = Start all counters(1) 866 * Bit 2 = Global counter reset enable(1) 867 */ 868 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); 869 870 msleep(1000); 871 872 /* Load the shadow and disable the perf counters 873 * Write 0x2: 874 * Bit 0 = Stop counters(0) 875 * Bit 1 = Load the shadow counters(1) 876 */ 877 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); 878 879 /* Read register values to get any >32bit overflow */ 880 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK); 881 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 882 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 883 884 /* Get the values and add the overflow */ 885 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 886 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 887 } 888 889 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 890 uint64_t *count1) 891 { 892 uint32_t perfctr = 0; 893 uint64_t cnt0_of, cnt1_of; 894 int tmp; 895 896 /* This reports 0 on APUs, so return to avoid writing/reading registers 897 * that may or may not be different from their GPU counterparts 898 */ 899 if (adev->flags & AMD_IS_APU) 900 return; 901 902 /* Set the 2 events that we wish to watch, defined above */ 903 /* Reg 40 is # received msgs */ 904 /* Reg 108 is # of posted requests sent on VG20 */ 905 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, 906 EVENT0_SEL, 40); 907 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, 908 EVENT1_SEL, 108); 909 910 /* Write to enable desired perf counters */ 911 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr); 912 /* Zero out and enable the perf counters 913 * Write 0x5: 914 * Bit 0 = Start all counters(1) 915 * Bit 2 = Global counter reset enable(1) 916 */ 917 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); 918 919 msleep(1000); 920 921 /* Load the shadow and disable the perf counters 922 * Write 0x2: 923 * Bit 0 = Stop counters(0) 924 * Bit 1 = Load the shadow counters(1) 925 */ 926 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); 927 928 /* Read register values to get any >32bit overflow */ 929 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3); 930 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER); 931 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER); 932 933 /* Get the values and add the overflow */ 934 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32); 935 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32); 936 } 937 938 static bool soc15_need_reset_on_init(struct amdgpu_device *adev) 939 { 940 u32 sol_reg; 941 942 /* Just return false for soc15 GPUs. Reset does not seem to 943 * be necessary. 944 */ 945 if (!amdgpu_passthrough(adev)) 946 return false; 947 948 if (adev->flags & AMD_IS_APU) 949 return false; 950 951 /* Check sOS sign of life register to confirm sys driver and sOS 952 * are already been loaded. 953 */ 954 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 955 if (sol_reg) 956 return true; 957 958 return false; 959 } 960 961 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev) 962 { 963 uint64_t nak_r, nak_g; 964 965 /* Get the number of NAKs received and generated */ 966 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK); 967 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED); 968 969 /* Add the total number of NAKs, i.e the number of replays */ 970 return (nak_r + nak_g); 971 } 972 973 static void soc15_pre_asic_init(struct amdgpu_device *adev) 974 { 975 gmc_v9_0_restore_registers(adev); 976 } 977 978 static const struct amdgpu_asic_funcs soc15_asic_funcs = 979 { 980 .read_disabled_bios = &soc15_read_disabled_bios, 981 .read_bios_from_rom = &soc15_read_bios_from_rom, 982 .read_register = &soc15_read_register, 983 .reset = &soc15_asic_reset, 984 .reset_method = &soc15_asic_reset_method, 985 .set_vga_state = &soc15_vga_set_state, 986 .get_xclk = &soc15_get_xclk, 987 .set_uvd_clocks = &soc15_set_uvd_clocks, 988 .set_vce_clocks = &soc15_set_vce_clocks, 989 .get_config_memsize = &soc15_get_config_memsize, 990 .need_full_reset = &soc15_need_full_reset, 991 .init_doorbell_index = &vega10_doorbell_index_init, 992 .get_pcie_usage = &soc15_get_pcie_usage, 993 .need_reset_on_init = &soc15_need_reset_on_init, 994 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 995 .supports_baco = &soc15_supports_baco, 996 .pre_asic_init = &soc15_pre_asic_init, 997 }; 998 999 static const struct amdgpu_asic_funcs vega20_asic_funcs = 1000 { 1001 .read_disabled_bios = &soc15_read_disabled_bios, 1002 .read_bios_from_rom = &soc15_read_bios_from_rom, 1003 .read_register = &soc15_read_register, 1004 .reset = &soc15_asic_reset, 1005 .reset_method = &soc15_asic_reset_method, 1006 .set_vga_state = &soc15_vga_set_state, 1007 .get_xclk = &soc15_get_xclk, 1008 .set_uvd_clocks = &soc15_set_uvd_clocks, 1009 .set_vce_clocks = &soc15_set_vce_clocks, 1010 .get_config_memsize = &soc15_get_config_memsize, 1011 .need_full_reset = &soc15_need_full_reset, 1012 .init_doorbell_index = &vega20_doorbell_index_init, 1013 .get_pcie_usage = &vega20_get_pcie_usage, 1014 .need_reset_on_init = &soc15_need_reset_on_init, 1015 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 1016 .supports_baco = &soc15_supports_baco, 1017 .pre_asic_init = &soc15_pre_asic_init, 1018 }; 1019 1020 static int soc15_common_early_init(void *handle) 1021 { 1022 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 1023 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1024 1025 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 1026 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 1027 adev->smc_rreg = NULL; 1028 adev->smc_wreg = NULL; 1029 adev->pcie_rreg = &soc15_pcie_rreg; 1030 adev->pcie_wreg = &soc15_pcie_wreg; 1031 adev->pcie_rreg64 = &soc15_pcie_rreg64; 1032 adev->pcie_wreg64 = &soc15_pcie_wreg64; 1033 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; 1034 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; 1035 adev->didt_rreg = &soc15_didt_rreg; 1036 adev->didt_wreg = &soc15_didt_wreg; 1037 adev->gc_cac_rreg = &soc15_gc_cac_rreg; 1038 adev->gc_cac_wreg = &soc15_gc_cac_wreg; 1039 adev->se_cac_rreg = &soc15_se_cac_rreg; 1040 adev->se_cac_wreg = &soc15_se_cac_wreg; 1041 1042 1043 adev->external_rev_id = 0xFF; 1044 switch (adev->asic_type) { 1045 case CHIP_VEGA10: 1046 adev->asic_funcs = &soc15_asic_funcs; 1047 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1048 AMD_CG_SUPPORT_GFX_MGLS | 1049 AMD_CG_SUPPORT_GFX_RLC_LS | 1050 AMD_CG_SUPPORT_GFX_CP_LS | 1051 AMD_CG_SUPPORT_GFX_3D_CGCG | 1052 AMD_CG_SUPPORT_GFX_3D_CGLS | 1053 AMD_CG_SUPPORT_GFX_CGCG | 1054 AMD_CG_SUPPORT_GFX_CGLS | 1055 AMD_CG_SUPPORT_BIF_MGCG | 1056 AMD_CG_SUPPORT_BIF_LS | 1057 AMD_CG_SUPPORT_HDP_LS | 1058 AMD_CG_SUPPORT_DRM_MGCG | 1059 AMD_CG_SUPPORT_DRM_LS | 1060 AMD_CG_SUPPORT_ROM_MGCG | 1061 AMD_CG_SUPPORT_DF_MGCG | 1062 AMD_CG_SUPPORT_SDMA_MGCG | 1063 AMD_CG_SUPPORT_SDMA_LS | 1064 AMD_CG_SUPPORT_MC_MGCG | 1065 AMD_CG_SUPPORT_MC_LS; 1066 adev->pg_flags = 0; 1067 adev->external_rev_id = 0x1; 1068 break; 1069 case CHIP_VEGA12: 1070 adev->asic_funcs = &soc15_asic_funcs; 1071 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1072 AMD_CG_SUPPORT_GFX_MGLS | 1073 AMD_CG_SUPPORT_GFX_CGCG | 1074 AMD_CG_SUPPORT_GFX_CGLS | 1075 AMD_CG_SUPPORT_GFX_3D_CGCG | 1076 AMD_CG_SUPPORT_GFX_3D_CGLS | 1077 AMD_CG_SUPPORT_GFX_CP_LS | 1078 AMD_CG_SUPPORT_MC_LS | 1079 AMD_CG_SUPPORT_MC_MGCG | 1080 AMD_CG_SUPPORT_SDMA_MGCG | 1081 AMD_CG_SUPPORT_SDMA_LS | 1082 AMD_CG_SUPPORT_BIF_MGCG | 1083 AMD_CG_SUPPORT_BIF_LS | 1084 AMD_CG_SUPPORT_HDP_MGCG | 1085 AMD_CG_SUPPORT_HDP_LS | 1086 AMD_CG_SUPPORT_ROM_MGCG | 1087 AMD_CG_SUPPORT_VCE_MGCG | 1088 AMD_CG_SUPPORT_UVD_MGCG; 1089 adev->pg_flags = 0; 1090 adev->external_rev_id = adev->rev_id + 0x14; 1091 break; 1092 case CHIP_VEGA20: 1093 adev->asic_funcs = &vega20_asic_funcs; 1094 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1095 AMD_CG_SUPPORT_GFX_MGLS | 1096 AMD_CG_SUPPORT_GFX_CGCG | 1097 AMD_CG_SUPPORT_GFX_CGLS | 1098 AMD_CG_SUPPORT_GFX_3D_CGCG | 1099 AMD_CG_SUPPORT_GFX_3D_CGLS | 1100 AMD_CG_SUPPORT_GFX_CP_LS | 1101 AMD_CG_SUPPORT_MC_LS | 1102 AMD_CG_SUPPORT_MC_MGCG | 1103 AMD_CG_SUPPORT_SDMA_MGCG | 1104 AMD_CG_SUPPORT_SDMA_LS | 1105 AMD_CG_SUPPORT_BIF_MGCG | 1106 AMD_CG_SUPPORT_BIF_LS | 1107 AMD_CG_SUPPORT_HDP_MGCG | 1108 AMD_CG_SUPPORT_HDP_LS | 1109 AMD_CG_SUPPORT_ROM_MGCG | 1110 AMD_CG_SUPPORT_VCE_MGCG | 1111 AMD_CG_SUPPORT_UVD_MGCG; 1112 adev->pg_flags = 0; 1113 adev->external_rev_id = adev->rev_id + 0x28; 1114 break; 1115 case CHIP_RAVEN: 1116 adev->asic_funcs = &soc15_asic_funcs; 1117 if (adev->pdev->device == 0x15dd) 1118 adev->apu_flags |= AMD_APU_IS_RAVEN; 1119 if (adev->pdev->device == 0x15d8) 1120 adev->apu_flags |= AMD_APU_IS_PICASSO; 1121 if (adev->rev_id >= 0x8) 1122 adev->apu_flags |= AMD_APU_IS_RAVEN2; 1123 1124 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1125 adev->external_rev_id = adev->rev_id + 0x79; 1126 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 1127 adev->external_rev_id = adev->rev_id + 0x41; 1128 else if (adev->rev_id == 1) 1129 adev->external_rev_id = adev->rev_id + 0x20; 1130 else 1131 adev->external_rev_id = adev->rev_id + 0x01; 1132 1133 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 1134 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1135 AMD_CG_SUPPORT_GFX_MGLS | 1136 AMD_CG_SUPPORT_GFX_CP_LS | 1137 AMD_CG_SUPPORT_GFX_3D_CGCG | 1138 AMD_CG_SUPPORT_GFX_3D_CGLS | 1139 AMD_CG_SUPPORT_GFX_CGCG | 1140 AMD_CG_SUPPORT_GFX_CGLS | 1141 AMD_CG_SUPPORT_BIF_LS | 1142 AMD_CG_SUPPORT_HDP_LS | 1143 AMD_CG_SUPPORT_MC_MGCG | 1144 AMD_CG_SUPPORT_MC_LS | 1145 AMD_CG_SUPPORT_SDMA_MGCG | 1146 AMD_CG_SUPPORT_SDMA_LS | 1147 AMD_CG_SUPPORT_VCN_MGCG; 1148 1149 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 1150 } else if (adev->apu_flags & AMD_APU_IS_PICASSO) { 1151 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1152 AMD_CG_SUPPORT_GFX_MGLS | 1153 AMD_CG_SUPPORT_GFX_CP_LS | 1154 AMD_CG_SUPPORT_GFX_3D_CGCG | 1155 AMD_CG_SUPPORT_GFX_3D_CGLS | 1156 AMD_CG_SUPPORT_GFX_CGCG | 1157 AMD_CG_SUPPORT_GFX_CGLS | 1158 AMD_CG_SUPPORT_BIF_LS | 1159 AMD_CG_SUPPORT_HDP_LS | 1160 AMD_CG_SUPPORT_MC_MGCG | 1161 AMD_CG_SUPPORT_MC_LS | 1162 AMD_CG_SUPPORT_SDMA_MGCG | 1163 AMD_CG_SUPPORT_SDMA_LS; 1164 1165 adev->pg_flags = AMD_PG_SUPPORT_SDMA | 1166 AMD_PG_SUPPORT_MMHUB | 1167 AMD_PG_SUPPORT_VCN; 1168 } else { 1169 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1170 AMD_CG_SUPPORT_GFX_MGLS | 1171 AMD_CG_SUPPORT_GFX_RLC_LS | 1172 AMD_CG_SUPPORT_GFX_CP_LS | 1173 AMD_CG_SUPPORT_GFX_3D_CGCG | 1174 AMD_CG_SUPPORT_GFX_3D_CGLS | 1175 AMD_CG_SUPPORT_GFX_CGCG | 1176 AMD_CG_SUPPORT_GFX_CGLS | 1177 AMD_CG_SUPPORT_BIF_MGCG | 1178 AMD_CG_SUPPORT_BIF_LS | 1179 AMD_CG_SUPPORT_HDP_MGCG | 1180 AMD_CG_SUPPORT_HDP_LS | 1181 AMD_CG_SUPPORT_DRM_MGCG | 1182 AMD_CG_SUPPORT_DRM_LS | 1183 AMD_CG_SUPPORT_MC_MGCG | 1184 AMD_CG_SUPPORT_MC_LS | 1185 AMD_CG_SUPPORT_SDMA_MGCG | 1186 AMD_CG_SUPPORT_SDMA_LS | 1187 AMD_CG_SUPPORT_VCN_MGCG; 1188 1189 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 1190 } 1191 break; 1192 case CHIP_ARCTURUS: 1193 adev->asic_funcs = &vega20_asic_funcs; 1194 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1195 AMD_CG_SUPPORT_GFX_MGLS | 1196 AMD_CG_SUPPORT_GFX_CGCG | 1197 AMD_CG_SUPPORT_GFX_CGLS | 1198 AMD_CG_SUPPORT_GFX_CP_LS | 1199 AMD_CG_SUPPORT_HDP_MGCG | 1200 AMD_CG_SUPPORT_HDP_LS | 1201 AMD_CG_SUPPORT_SDMA_MGCG | 1202 AMD_CG_SUPPORT_SDMA_LS | 1203 AMD_CG_SUPPORT_MC_MGCG | 1204 AMD_CG_SUPPORT_MC_LS | 1205 AMD_CG_SUPPORT_IH_CG | 1206 AMD_CG_SUPPORT_VCN_MGCG | 1207 AMD_CG_SUPPORT_JPEG_MGCG; 1208 adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG; 1209 adev->external_rev_id = adev->rev_id + 0x32; 1210 break; 1211 case CHIP_RENOIR: 1212 adev->asic_funcs = &soc15_asic_funcs; 1213 if ((adev->pdev->device == 0x1636) || 1214 (adev->pdev->device == 0x164c)) 1215 adev->apu_flags |= AMD_APU_IS_RENOIR; 1216 else 1217 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; 1218 1219 if (adev->apu_flags & AMD_APU_IS_RENOIR) 1220 adev->external_rev_id = adev->rev_id + 0x91; 1221 else 1222 adev->external_rev_id = adev->rev_id + 0xa1; 1223 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1224 AMD_CG_SUPPORT_GFX_MGLS | 1225 AMD_CG_SUPPORT_GFX_3D_CGCG | 1226 AMD_CG_SUPPORT_GFX_3D_CGLS | 1227 AMD_CG_SUPPORT_GFX_CGCG | 1228 AMD_CG_SUPPORT_GFX_CGLS | 1229 AMD_CG_SUPPORT_GFX_CP_LS | 1230 AMD_CG_SUPPORT_MC_MGCG | 1231 AMD_CG_SUPPORT_MC_LS | 1232 AMD_CG_SUPPORT_SDMA_MGCG | 1233 AMD_CG_SUPPORT_SDMA_LS | 1234 AMD_CG_SUPPORT_BIF_LS | 1235 AMD_CG_SUPPORT_HDP_LS | 1236 AMD_CG_SUPPORT_VCN_MGCG | 1237 AMD_CG_SUPPORT_JPEG_MGCG | 1238 AMD_CG_SUPPORT_IH_CG | 1239 AMD_CG_SUPPORT_ATHUB_LS | 1240 AMD_CG_SUPPORT_ATHUB_MGCG | 1241 AMD_CG_SUPPORT_DF_MGCG; 1242 adev->pg_flags = AMD_PG_SUPPORT_SDMA | 1243 AMD_PG_SUPPORT_VCN | 1244 AMD_PG_SUPPORT_JPEG | 1245 AMD_PG_SUPPORT_VCN_DPG; 1246 break; 1247 default: 1248 /* FIXME: not supported yet */ 1249 return -EINVAL; 1250 } 1251 1252 if (amdgpu_sriov_vf(adev)) { 1253 amdgpu_virt_init_setting(adev); 1254 xgpu_ai_mailbox_set_irq_funcs(adev); 1255 } 1256 1257 return 0; 1258 } 1259 1260 static int soc15_common_late_init(void *handle) 1261 { 1262 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1263 int r = 0; 1264 1265 if (amdgpu_sriov_vf(adev)) 1266 xgpu_ai_mailbox_get_irq(adev); 1267 1268 if (adev->hdp.funcs->reset_ras_error_count) 1269 adev->hdp.funcs->reset_ras_error_count(adev); 1270 1271 if (adev->nbio.funcs->ras_late_init) 1272 r = adev->nbio.funcs->ras_late_init(adev); 1273 1274 return r; 1275 } 1276 1277 static int soc15_common_sw_init(void *handle) 1278 { 1279 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1280 1281 if (amdgpu_sriov_vf(adev)) 1282 xgpu_ai_mailbox_add_irq_id(adev); 1283 1284 adev->df.funcs->sw_init(adev); 1285 1286 return 0; 1287 } 1288 1289 static int soc15_common_sw_fini(void *handle) 1290 { 1291 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1292 1293 amdgpu_nbio_ras_fini(adev); 1294 adev->df.funcs->sw_fini(adev); 1295 return 0; 1296 } 1297 1298 static void soc15_doorbell_range_init(struct amdgpu_device *adev) 1299 { 1300 int i; 1301 struct amdgpu_ring *ring; 1302 1303 /* sdma/ih doorbell range are programed by hypervisor */ 1304 if (!amdgpu_sriov_vf(adev)) { 1305 for (i = 0; i < adev->sdma.num_instances; i++) { 1306 ring = &adev->sdma.instance[i].ring; 1307 adev->nbio.funcs->sdma_doorbell_range(adev, i, 1308 ring->use_doorbell, ring->doorbell_index, 1309 adev->doorbell_index.sdma_doorbell_range); 1310 } 1311 1312 adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, 1313 adev->irq.ih.doorbell_index); 1314 } 1315 } 1316 1317 static int soc15_common_hw_init(void *handle) 1318 { 1319 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1320 1321 /* enable pcie gen2/3 link */ 1322 soc15_pcie_gen3_enable(adev); 1323 /* enable aspm */ 1324 soc15_program_aspm(adev); 1325 /* setup nbio registers */ 1326 adev->nbio.funcs->init_registers(adev); 1327 /* remap HDP registers to a hole in mmio space, 1328 * for the purpose of expose those registers 1329 * to process space 1330 */ 1331 if (adev->nbio.funcs->remap_hdp_registers) 1332 adev->nbio.funcs->remap_hdp_registers(adev); 1333 1334 /* enable the doorbell aperture */ 1335 soc15_enable_doorbell_aperture(adev, true); 1336 /* HW doorbell routing policy: doorbell writing not 1337 * in SDMA/IH/MM/ACV range will be routed to CP. So 1338 * we need to init SDMA/IH/MM/ACV doorbell range prior 1339 * to CP ip block init and ring test. 1340 */ 1341 soc15_doorbell_range_init(adev); 1342 1343 return 0; 1344 } 1345 1346 static int soc15_common_hw_fini(void *handle) 1347 { 1348 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1349 1350 /* disable the doorbell aperture */ 1351 soc15_enable_doorbell_aperture(adev, false); 1352 if (amdgpu_sriov_vf(adev)) 1353 xgpu_ai_mailbox_put_irq(adev); 1354 1355 if (adev->nbio.ras_if && 1356 amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { 1357 if (adev->nbio.funcs->init_ras_controller_interrupt) 1358 amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0); 1359 if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) 1360 amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0); 1361 } 1362 1363 return 0; 1364 } 1365 1366 static int soc15_common_suspend(void *handle) 1367 { 1368 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1369 1370 return soc15_common_hw_fini(adev); 1371 } 1372 1373 static int soc15_common_resume(void *handle) 1374 { 1375 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1376 1377 return soc15_common_hw_init(adev); 1378 } 1379 1380 static bool soc15_common_is_idle(void *handle) 1381 { 1382 return true; 1383 } 1384 1385 static int soc15_common_wait_for_idle(void *handle) 1386 { 1387 return 0; 1388 } 1389 1390 static int soc15_common_soft_reset(void *handle) 1391 { 1392 return 0; 1393 } 1394 1395 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) 1396 { 1397 uint32_t def, data; 1398 1399 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1400 1401 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG)) 1402 data &= ~(0x01000000 | 1403 0x02000000 | 1404 0x04000000 | 1405 0x08000000 | 1406 0x10000000 | 1407 0x20000000 | 1408 0x40000000 | 1409 0x80000000); 1410 else 1411 data |= (0x01000000 | 1412 0x02000000 | 1413 0x04000000 | 1414 0x08000000 | 1415 0x10000000 | 1416 0x20000000 | 1417 0x40000000 | 1418 0x80000000); 1419 1420 if (def != data) 1421 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data); 1422 } 1423 1424 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable) 1425 { 1426 uint32_t def, data; 1427 1428 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1429 1430 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1431 data |= 1; 1432 else 1433 data &= ~1; 1434 1435 if (def != data) 1436 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data); 1437 } 1438 1439 static int soc15_common_set_clockgating_state(void *handle, 1440 enum amd_clockgating_state state) 1441 { 1442 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1443 1444 if (amdgpu_sriov_vf(adev)) 1445 return 0; 1446 1447 switch (adev->asic_type) { 1448 case CHIP_VEGA10: 1449 case CHIP_VEGA12: 1450 case CHIP_VEGA20: 1451 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1452 state == AMD_CG_STATE_GATE); 1453 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1454 state == AMD_CG_STATE_GATE); 1455 adev->hdp.funcs->update_clock_gating(adev, 1456 state == AMD_CG_STATE_GATE); 1457 soc15_update_drm_clock_gating(adev, 1458 state == AMD_CG_STATE_GATE); 1459 soc15_update_drm_light_sleep(adev, 1460 state == AMD_CG_STATE_GATE); 1461 adev->smuio.funcs->update_rom_clock_gating(adev, 1462 state == AMD_CG_STATE_GATE); 1463 adev->df.funcs->update_medium_grain_clock_gating(adev, 1464 state == AMD_CG_STATE_GATE); 1465 break; 1466 case CHIP_RAVEN: 1467 case CHIP_RENOIR: 1468 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1469 state == AMD_CG_STATE_GATE); 1470 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1471 state == AMD_CG_STATE_GATE); 1472 adev->hdp.funcs->update_clock_gating(adev, 1473 state == AMD_CG_STATE_GATE); 1474 soc15_update_drm_clock_gating(adev, 1475 state == AMD_CG_STATE_GATE); 1476 soc15_update_drm_light_sleep(adev, 1477 state == AMD_CG_STATE_GATE); 1478 break; 1479 case CHIP_ARCTURUS: 1480 adev->hdp.funcs->update_clock_gating(adev, 1481 state == AMD_CG_STATE_GATE); 1482 break; 1483 default: 1484 break; 1485 } 1486 return 0; 1487 } 1488 1489 static void soc15_common_get_clockgating_state(void *handle, u32 *flags) 1490 { 1491 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1492 int data; 1493 1494 if (amdgpu_sriov_vf(adev)) 1495 *flags = 0; 1496 1497 adev->nbio.funcs->get_clockgating_state(adev, flags); 1498 1499 adev->hdp.funcs->get_clock_gating_state(adev, flags); 1500 1501 /* AMD_CG_SUPPORT_DRM_MGCG */ 1502 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1503 if (!(data & 0x01000000)) 1504 *flags |= AMD_CG_SUPPORT_DRM_MGCG; 1505 1506 /* AMD_CG_SUPPORT_DRM_LS */ 1507 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1508 if (data & 0x1) 1509 *flags |= AMD_CG_SUPPORT_DRM_LS; 1510 1511 /* AMD_CG_SUPPORT_ROM_MGCG */ 1512 adev->smuio.funcs->get_clock_gating_state(adev, flags); 1513 1514 adev->df.funcs->get_clockgating_state(adev, flags); 1515 } 1516 1517 static int soc15_common_set_powergating_state(void *handle, 1518 enum amd_powergating_state state) 1519 { 1520 /* todo */ 1521 return 0; 1522 } 1523 1524 const struct amd_ip_funcs soc15_common_ip_funcs = { 1525 .name = "soc15_common", 1526 .early_init = soc15_common_early_init, 1527 .late_init = soc15_common_late_init, 1528 .sw_init = soc15_common_sw_init, 1529 .sw_fini = soc15_common_sw_fini, 1530 .hw_init = soc15_common_hw_init, 1531 .hw_fini = soc15_common_hw_fini, 1532 .suspend = soc15_common_suspend, 1533 .resume = soc15_common_resume, 1534 .is_idle = soc15_common_is_idle, 1535 .wait_for_idle = soc15_common_wait_for_idle, 1536 .soft_reset = soc15_common_soft_reset, 1537 .set_clockgating_state = soc15_common_set_clockgating_state, 1538 .set_powergating_state = soc15_common_set_powergating_state, 1539 .get_clockgating_state= soc15_common_get_clockgating_state, 1540 }; 1541