1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_atombios.h" 30 #include "amdgpu_ih.h" 31 #include "amdgpu_uvd.h" 32 #include "amdgpu_vce.h" 33 #include "amdgpu_ucode.h" 34 #include "amdgpu_psp.h" 35 #include "atom.h" 36 #include "amd_pcie.h" 37 38 #include "uvd/uvd_7_0_offset.h" 39 #include "gc/gc_9_0_offset.h" 40 #include "gc/gc_9_0_sh_mask.h" 41 #include "sdma0/sdma0_4_0_offset.h" 42 #include "sdma1/sdma1_4_0_offset.h" 43 #include "nbio/nbio_7_0_default.h" 44 #include "nbio/nbio_7_0_offset.h" 45 #include "nbio/nbio_7_0_sh_mask.h" 46 #include "nbio/nbio_7_0_smn.h" 47 #include "mp/mp_9_0_offset.h" 48 49 #include "soc15.h" 50 #include "soc15_common.h" 51 #include "gfx_v9_0.h" 52 #include "gmc_v9_0.h" 53 #include "gfxhub_v1_0.h" 54 #include "mmhub_v1_0.h" 55 #include "df_v1_7.h" 56 #include "df_v3_6.h" 57 #include "nbio_v6_1.h" 58 #include "nbio_v7_0.h" 59 #include "nbio_v7_4.h" 60 #include "hdp_v4_0.h" 61 #include "vega10_ih.h" 62 #include "vega20_ih.h" 63 #include "navi10_ih.h" 64 #include "sdma_v4_0.h" 65 #include "uvd_v7_0.h" 66 #include "vce_v4_0.h" 67 #include "vcn_v1_0.h" 68 #include "vcn_v2_0.h" 69 #include "jpeg_v2_0.h" 70 #include "vcn_v2_5.h" 71 #include "jpeg_v2_5.h" 72 #include "smuio_v9_0.h" 73 #include "smuio_v11_0.h" 74 #include "dce_virtual.h" 75 #include "mxgpu_ai.h" 76 #include "amdgpu_smu.h" 77 #include "amdgpu_ras.h" 78 #include "amdgpu_xgmi.h" 79 #include <uapi/linux/kfd_ioctl.h> 80 81 #define mmMP0_MISC_CGTT_CTRL0 0x01b9 82 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 83 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba 84 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 85 86 /* 87 * Indirect registers accessor 88 */ 89 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) 90 { 91 unsigned long address, data; 92 address = adev->nbio.funcs->get_pcie_index_offset(adev); 93 data = adev->nbio.funcs->get_pcie_data_offset(adev); 94 95 return amdgpu_device_indirect_rreg(adev, address, data, reg); 96 } 97 98 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 99 { 100 unsigned long address, data; 101 102 address = adev->nbio.funcs->get_pcie_index_offset(adev); 103 data = adev->nbio.funcs->get_pcie_data_offset(adev); 104 105 amdgpu_device_indirect_wreg(adev, address, data, reg, v); 106 } 107 108 static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg) 109 { 110 unsigned long address, data; 111 address = adev->nbio.funcs->get_pcie_index_offset(adev); 112 data = adev->nbio.funcs->get_pcie_data_offset(adev); 113 114 return amdgpu_device_indirect_rreg64(adev, address, data, reg); 115 } 116 117 static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) 118 { 119 unsigned long address, data; 120 121 address = adev->nbio.funcs->get_pcie_index_offset(adev); 122 data = adev->nbio.funcs->get_pcie_data_offset(adev); 123 124 amdgpu_device_indirect_wreg64(adev, address, data, reg, v); 125 } 126 127 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 128 { 129 unsigned long flags, address, data; 130 u32 r; 131 132 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 133 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 134 135 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 136 WREG32(address, ((reg) & 0x1ff)); 137 r = RREG32(data); 138 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 139 return r; 140 } 141 142 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 143 { 144 unsigned long flags, address, data; 145 146 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 147 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 148 149 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 150 WREG32(address, ((reg) & 0x1ff)); 151 WREG32(data, (v)); 152 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 153 } 154 155 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg) 156 { 157 unsigned long flags, address, data; 158 u32 r; 159 160 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 161 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 162 163 spin_lock_irqsave(&adev->didt_idx_lock, flags); 164 WREG32(address, (reg)); 165 r = RREG32(data); 166 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 167 return r; 168 } 169 170 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 171 { 172 unsigned long flags, address, data; 173 174 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 175 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 176 177 spin_lock_irqsave(&adev->didt_idx_lock, flags); 178 WREG32(address, (reg)); 179 WREG32(data, (v)); 180 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 181 } 182 183 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 184 { 185 unsigned long flags; 186 u32 r; 187 188 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 189 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 190 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); 191 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 192 return r; 193 } 194 195 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 196 { 197 unsigned long flags; 198 199 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 200 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 201 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); 202 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 203 } 204 205 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) 206 { 207 unsigned long flags; 208 u32 r; 209 210 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 211 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 212 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); 213 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 214 return r; 215 } 216 217 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 218 { 219 unsigned long flags; 220 221 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 222 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 223 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); 224 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 225 } 226 227 static u32 soc15_get_config_memsize(struct amdgpu_device *adev) 228 { 229 return adev->nbio.funcs->get_memsize(adev); 230 } 231 232 static u32 soc15_get_xclk(struct amdgpu_device *adev) 233 { 234 u32 reference_clock = adev->clock.spll.reference_freq; 235 236 if (adev->asic_type == CHIP_RAVEN) 237 return reference_clock / 4; 238 239 return reference_clock; 240 } 241 242 243 void soc15_grbm_select(struct amdgpu_device *adev, 244 u32 me, u32 pipe, u32 queue, u32 vmid) 245 { 246 u32 grbm_gfx_cntl = 0; 247 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 248 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 249 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 250 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 251 252 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl); 253 } 254 255 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) 256 { 257 /* todo */ 258 } 259 260 static bool soc15_read_disabled_bios(struct amdgpu_device *adev) 261 { 262 /* todo */ 263 return false; 264 } 265 266 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, 267 u8 *bios, u32 length_bytes) 268 { 269 u32 *dw_ptr; 270 u32 i, length_dw; 271 uint32_t rom_index_offset; 272 uint32_t rom_data_offset; 273 274 if (bios == NULL) 275 return false; 276 if (length_bytes == 0) 277 return false; 278 /* APU vbios image is part of sbios image */ 279 if (adev->flags & AMD_IS_APU) 280 return false; 281 282 dw_ptr = (u32 *)bios; 283 length_dw = ALIGN(length_bytes, 4) / 4; 284 285 rom_index_offset = 286 adev->smuio.funcs->get_rom_index_offset(adev); 287 rom_data_offset = 288 adev->smuio.funcs->get_rom_data_offset(adev); 289 290 /* set rom index to 0 */ 291 WREG32(rom_index_offset, 0); 292 /* read out the rom data */ 293 for (i = 0; i < length_dw; i++) 294 dw_ptr[i] = RREG32(rom_data_offset); 295 296 return true; 297 } 298 299 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { 300 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 301 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 302 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 303 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 304 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 305 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 306 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 307 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 308 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 309 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 310 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 311 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 312 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 313 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 314 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 315 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, 316 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 317 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 318 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 319 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)}, 320 }; 321 322 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 323 u32 sh_num, u32 reg_offset) 324 { 325 uint32_t val; 326 327 mutex_lock(&adev->grbm_idx_mutex); 328 if (se_num != 0xffffffff || sh_num != 0xffffffff) 329 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 330 331 val = RREG32(reg_offset); 332 333 if (se_num != 0xffffffff || sh_num != 0xffffffff) 334 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 335 mutex_unlock(&adev->grbm_idx_mutex); 336 return val; 337 } 338 339 static uint32_t soc15_get_register_value(struct amdgpu_device *adev, 340 bool indexed, u32 se_num, 341 u32 sh_num, u32 reg_offset) 342 { 343 if (indexed) { 344 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset); 345 } else { 346 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 347 return adev->gfx.config.gb_addr_config; 348 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)) 349 return adev->gfx.config.db_debug2; 350 return RREG32(reg_offset); 351 } 352 } 353 354 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, 355 u32 sh_num, u32 reg_offset, u32 *value) 356 { 357 uint32_t i; 358 struct soc15_allowed_register_entry *en; 359 360 *value = 0; 361 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { 362 en = &soc15_allowed_read_registers[i]; 363 if (adev->reg_offset[en->hwip][en->inst] && 364 reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] 365 + en->reg_offset)) 366 continue; 367 368 *value = soc15_get_register_value(adev, 369 soc15_allowed_read_registers[i].grbm_indexed, 370 se_num, sh_num, reg_offset); 371 return 0; 372 } 373 return -EINVAL; 374 } 375 376 377 /** 378 * soc15_program_register_sequence - program an array of registers. 379 * 380 * @adev: amdgpu_device pointer 381 * @regs: pointer to the register array 382 * @array_size: size of the register array 383 * 384 * Programs an array or registers with and and or masks. 385 * This is a helper for setting golden registers. 386 */ 387 388 void soc15_program_register_sequence(struct amdgpu_device *adev, 389 const struct soc15_reg_golden *regs, 390 const u32 array_size) 391 { 392 const struct soc15_reg_golden *entry; 393 u32 tmp, reg; 394 int i; 395 396 for (i = 0; i < array_size; ++i) { 397 entry = ®s[i]; 398 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; 399 400 if (entry->and_mask == 0xffffffff) { 401 tmp = entry->or_mask; 402 } else { 403 tmp = RREG32(reg); 404 tmp &= ~(entry->and_mask); 405 tmp |= (entry->or_mask & entry->and_mask); 406 } 407 408 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) || 409 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) || 410 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) || 411 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)) 412 WREG32_RLC(reg, tmp); 413 else 414 WREG32(reg, tmp); 415 416 } 417 418 } 419 420 static int soc15_asic_mode1_reset(struct amdgpu_device *adev) 421 { 422 u32 i; 423 int ret = 0; 424 425 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 426 427 dev_info(adev->dev, "GPU mode1 reset\n"); 428 429 /* disable BM */ 430 pci_clear_master(adev->pdev); 431 432 amdgpu_device_cache_pci_state(adev->pdev); 433 434 ret = psp_gpu_reset(adev); 435 if (ret) 436 dev_err(adev->dev, "GPU mode1 reset failed\n"); 437 438 amdgpu_device_load_pci_state(adev->pdev); 439 440 /* wait for asic to come out of reset */ 441 for (i = 0; i < adev->usec_timeout; i++) { 442 u32 memsize = adev->nbio.funcs->get_memsize(adev); 443 444 if (memsize != 0xffffffff) 445 break; 446 udelay(1); 447 } 448 449 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 450 451 return ret; 452 } 453 454 static int soc15_asic_baco_reset(struct amdgpu_device *adev) 455 { 456 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 457 int ret = 0; 458 459 /* avoid NBIF got stuck when do RAS recovery in BACO reset */ 460 if (ras && ras->supported) 461 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); 462 463 ret = amdgpu_dpm_baco_reset(adev); 464 if (ret) 465 return ret; 466 467 /* re-enable doorbell interrupt after BACO exit */ 468 if (ras && ras->supported) 469 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); 470 471 return 0; 472 } 473 474 static enum amd_reset_method 475 soc15_asic_reset_method(struct amdgpu_device *adev) 476 { 477 bool baco_reset = false; 478 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 479 480 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || 481 amdgpu_reset_method == AMD_RESET_METHOD_MODE2 || 482 amdgpu_reset_method == AMD_RESET_METHOD_BACO) 483 return amdgpu_reset_method; 484 485 if (amdgpu_reset_method != -1) 486 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 487 amdgpu_reset_method); 488 489 switch (adev->asic_type) { 490 case CHIP_RAVEN: 491 case CHIP_RENOIR: 492 return AMD_RESET_METHOD_MODE2; 493 case CHIP_VEGA10: 494 case CHIP_VEGA12: 495 case CHIP_ARCTURUS: 496 baco_reset = amdgpu_dpm_is_baco_supported(adev); 497 break; 498 case CHIP_VEGA20: 499 if (adev->psp.sos_fw_version >= 0x80067) 500 baco_reset = amdgpu_dpm_is_baco_supported(adev); 501 502 /* 503 * 1. PMFW version > 0x284300: all cases use baco 504 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco 505 */ 506 if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400) 507 baco_reset = false; 508 break; 509 default: 510 break; 511 } 512 513 if (baco_reset) 514 return AMD_RESET_METHOD_BACO; 515 else 516 return AMD_RESET_METHOD_MODE1; 517 } 518 519 static int soc15_asic_reset(struct amdgpu_device *adev) 520 { 521 /* original raven doesn't have full asic reset */ 522 if ((adev->apu_flags & AMD_APU_IS_RAVEN) && 523 !(adev->apu_flags & AMD_APU_IS_RAVEN2)) 524 return 0; 525 526 switch (soc15_asic_reset_method(adev)) { 527 case AMD_RESET_METHOD_BACO: 528 dev_info(adev->dev, "BACO reset\n"); 529 return soc15_asic_baco_reset(adev); 530 case AMD_RESET_METHOD_MODE2: 531 dev_info(adev->dev, "MODE2 reset\n"); 532 return amdgpu_dpm_mode2_reset(adev); 533 default: 534 dev_info(adev->dev, "MODE1 reset\n"); 535 return soc15_asic_mode1_reset(adev); 536 } 537 } 538 539 static bool soc15_supports_baco(struct amdgpu_device *adev) 540 { 541 switch (adev->asic_type) { 542 case CHIP_VEGA10: 543 case CHIP_VEGA12: 544 case CHIP_ARCTURUS: 545 return amdgpu_dpm_is_baco_supported(adev); 546 case CHIP_VEGA20: 547 if (adev->psp.sos_fw_version >= 0x80067) 548 return amdgpu_dpm_is_baco_supported(adev); 549 return false; 550 default: 551 return false; 552 } 553 } 554 555 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 556 u32 cntl_reg, u32 status_reg) 557 { 558 return 0; 559 }*/ 560 561 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 562 { 563 /*int r; 564 565 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 566 if (r) 567 return r; 568 569 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 570 */ 571 return 0; 572 } 573 574 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 575 { 576 /* todo */ 577 578 return 0; 579 } 580 581 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) 582 { 583 if (pci_is_root_bus(adev->pdev->bus)) 584 return; 585 586 if (amdgpu_pcie_gen2 == 0) 587 return; 588 589 if (adev->flags & AMD_IS_APU) 590 return; 591 592 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 593 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 594 return; 595 596 /* todo */ 597 } 598 599 static void soc15_program_aspm(struct amdgpu_device *adev) 600 { 601 602 if (amdgpu_aspm == 0) 603 return; 604 605 /* todo */ 606 } 607 608 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, 609 bool enable) 610 { 611 adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 612 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 613 } 614 615 static const struct amdgpu_ip_block_version vega10_common_ip_block = 616 { 617 .type = AMD_IP_BLOCK_TYPE_COMMON, 618 .major = 2, 619 .minor = 0, 620 .rev = 0, 621 .funcs = &soc15_common_ip_funcs, 622 }; 623 624 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) 625 { 626 return adev->nbio.funcs->get_rev_id(adev); 627 } 628 629 static void soc15_reg_base_init(struct amdgpu_device *adev) 630 { 631 int r; 632 633 /* Set IP register base before any HW register access */ 634 switch (adev->asic_type) { 635 case CHIP_VEGA10: 636 case CHIP_VEGA12: 637 case CHIP_RAVEN: 638 vega10_reg_base_init(adev); 639 break; 640 case CHIP_RENOIR: 641 /* It's safe to do ip discovery here for Renior, 642 * it doesn't support SRIOV. */ 643 if (amdgpu_discovery) { 644 r = amdgpu_discovery_reg_base_init(adev); 645 if (r == 0) 646 break; 647 DRM_WARN("failed to init reg base from ip discovery table, " 648 "fallback to legacy init method\n"); 649 } 650 vega10_reg_base_init(adev); 651 break; 652 case CHIP_VEGA20: 653 vega20_reg_base_init(adev); 654 break; 655 case CHIP_ARCTURUS: 656 arct_reg_base_init(adev); 657 break; 658 default: 659 DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type); 660 break; 661 } 662 } 663 664 void soc15_set_virt_ops(struct amdgpu_device *adev) 665 { 666 adev->virt.ops = &xgpu_ai_virt_ops; 667 668 /* init soc15 reg base early enough so we can 669 * request request full access for sriov before 670 * set_ip_blocks. */ 671 soc15_reg_base_init(adev); 672 } 673 674 int soc15_set_ip_blocks(struct amdgpu_device *adev) 675 { 676 /* for bare metal case */ 677 if (!amdgpu_sriov_vf(adev)) 678 soc15_reg_base_init(adev); 679 680 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) 681 adev->gmc.xgmi.supported = true; 682 683 if (adev->flags & AMD_IS_APU) { 684 adev->nbio.funcs = &nbio_v7_0_funcs; 685 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 686 } else if (adev->asic_type == CHIP_VEGA20 || 687 adev->asic_type == CHIP_ARCTURUS) { 688 adev->nbio.funcs = &nbio_v7_4_funcs; 689 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 690 } else { 691 adev->nbio.funcs = &nbio_v6_1_funcs; 692 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 693 } 694 adev->hdp.funcs = &hdp_v4_0_funcs; 695 696 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) 697 adev->df.funcs = &df_v3_6_funcs; 698 else 699 adev->df.funcs = &df_v1_7_funcs; 700 701 if (adev->asic_type == CHIP_VEGA20 || 702 adev->asic_type == CHIP_ARCTURUS) 703 adev->smuio.funcs = &smuio_v11_0_funcs; 704 else 705 adev->smuio.funcs = &smuio_v9_0_funcs; 706 707 adev->rev_id = soc15_get_rev_id(adev); 708 709 switch (adev->asic_type) { 710 case CHIP_VEGA10: 711 case CHIP_VEGA12: 712 case CHIP_VEGA20: 713 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 714 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 715 716 /* For Vega10 SR-IOV, PSP need to be initialized before IH */ 717 if (amdgpu_sriov_vf(adev)) { 718 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 719 if (adev->asic_type == CHIP_VEGA20) 720 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 721 else 722 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 723 } 724 if (adev->asic_type == CHIP_VEGA20) 725 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 726 else 727 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 728 } else { 729 if (adev->asic_type == CHIP_VEGA20) 730 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 731 else 732 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 733 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 734 if (adev->asic_type == CHIP_VEGA20) 735 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 736 else 737 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 738 } 739 } 740 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 741 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 742 if (is_support_sw_smu(adev)) { 743 if (!amdgpu_sriov_vf(adev)) 744 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 745 } else { 746 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 747 } 748 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 749 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 750 #if defined(CONFIG_DRM_AMD_DC) 751 else if (amdgpu_device_has_dc_support(adev)) 752 amdgpu_device_ip_block_add(adev, &dm_ip_block); 753 #endif 754 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { 755 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 756 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 757 } 758 break; 759 case CHIP_RAVEN: 760 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 761 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 762 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 763 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 764 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 765 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 766 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 767 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 768 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 769 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 770 #if defined(CONFIG_DRM_AMD_DC) 771 else if (amdgpu_device_has_dc_support(adev)) 772 amdgpu_device_ip_block_add(adev, &dm_ip_block); 773 #endif 774 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 775 break; 776 case CHIP_ARCTURUS: 777 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 778 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 779 780 if (amdgpu_sriov_vf(adev)) { 781 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 782 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 783 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 784 } else { 785 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 786 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 787 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 788 } 789 790 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 791 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 792 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 793 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 794 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 795 796 if (amdgpu_sriov_vf(adev)) { 797 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 798 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 799 } else { 800 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 801 } 802 if (!amdgpu_sriov_vf(adev)) 803 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 804 break; 805 case CHIP_RENOIR: 806 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 807 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 808 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 809 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 810 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 811 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 812 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 813 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 814 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 815 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 816 #if defined(CONFIG_DRM_AMD_DC) 817 else if (amdgpu_device_has_dc_support(adev)) 818 amdgpu_device_ip_block_add(adev, &dm_ip_block); 819 #endif 820 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 821 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 822 break; 823 default: 824 return -EINVAL; 825 } 826 827 return 0; 828 } 829 830 static bool soc15_need_full_reset(struct amdgpu_device *adev) 831 { 832 /* change this when we implement soft reset */ 833 return true; 834 } 835 836 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 837 uint64_t *count1) 838 { 839 uint32_t perfctr = 0; 840 uint64_t cnt0_of, cnt1_of; 841 int tmp; 842 843 /* This reports 0 on APUs, so return to avoid writing/reading registers 844 * that may or may not be different from their GPU counterparts 845 */ 846 if (adev->flags & AMD_IS_APU) 847 return; 848 849 /* Set the 2 events that we wish to watch, defined above */ 850 /* Reg 40 is # received msgs */ 851 /* Reg 104 is # of posted requests sent */ 852 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 853 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 854 855 /* Write to enable desired perf counters */ 856 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr); 857 /* Zero out and enable the perf counters 858 * Write 0x5: 859 * Bit 0 = Start all counters(1) 860 * Bit 2 = Global counter reset enable(1) 861 */ 862 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); 863 864 msleep(1000); 865 866 /* Load the shadow and disable the perf counters 867 * Write 0x2: 868 * Bit 0 = Stop counters(0) 869 * Bit 1 = Load the shadow counters(1) 870 */ 871 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); 872 873 /* Read register values to get any >32bit overflow */ 874 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK); 875 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 876 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 877 878 /* Get the values and add the overflow */ 879 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 880 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 881 } 882 883 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 884 uint64_t *count1) 885 { 886 uint32_t perfctr = 0; 887 uint64_t cnt0_of, cnt1_of; 888 int tmp; 889 890 /* This reports 0 on APUs, so return to avoid writing/reading registers 891 * that may or may not be different from their GPU counterparts 892 */ 893 if (adev->flags & AMD_IS_APU) 894 return; 895 896 /* Set the 2 events that we wish to watch, defined above */ 897 /* Reg 40 is # received msgs */ 898 /* Reg 108 is # of posted requests sent on VG20 */ 899 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, 900 EVENT0_SEL, 40); 901 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, 902 EVENT1_SEL, 108); 903 904 /* Write to enable desired perf counters */ 905 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr); 906 /* Zero out and enable the perf counters 907 * Write 0x5: 908 * Bit 0 = Start all counters(1) 909 * Bit 2 = Global counter reset enable(1) 910 */ 911 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); 912 913 msleep(1000); 914 915 /* Load the shadow and disable the perf counters 916 * Write 0x2: 917 * Bit 0 = Stop counters(0) 918 * Bit 1 = Load the shadow counters(1) 919 */ 920 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); 921 922 /* Read register values to get any >32bit overflow */ 923 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3); 924 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER); 925 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER); 926 927 /* Get the values and add the overflow */ 928 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32); 929 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32); 930 } 931 932 static bool soc15_need_reset_on_init(struct amdgpu_device *adev) 933 { 934 u32 sol_reg; 935 936 /* Just return false for soc15 GPUs. Reset does not seem to 937 * be necessary. 938 */ 939 if (!amdgpu_passthrough(adev)) 940 return false; 941 942 if (adev->flags & AMD_IS_APU) 943 return false; 944 945 /* Check sOS sign of life register to confirm sys driver and sOS 946 * are already been loaded. 947 */ 948 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 949 if (sol_reg) 950 return true; 951 952 return false; 953 } 954 955 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev) 956 { 957 uint64_t nak_r, nak_g; 958 959 /* Get the number of NAKs received and generated */ 960 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK); 961 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED); 962 963 /* Add the total number of NAKs, i.e the number of replays */ 964 return (nak_r + nak_g); 965 } 966 967 static void soc15_pre_asic_init(struct amdgpu_device *adev) 968 { 969 gmc_v9_0_restore_registers(adev); 970 } 971 972 static const struct amdgpu_asic_funcs soc15_asic_funcs = 973 { 974 .read_disabled_bios = &soc15_read_disabled_bios, 975 .read_bios_from_rom = &soc15_read_bios_from_rom, 976 .read_register = &soc15_read_register, 977 .reset = &soc15_asic_reset, 978 .reset_method = &soc15_asic_reset_method, 979 .set_vga_state = &soc15_vga_set_state, 980 .get_xclk = &soc15_get_xclk, 981 .set_uvd_clocks = &soc15_set_uvd_clocks, 982 .set_vce_clocks = &soc15_set_vce_clocks, 983 .get_config_memsize = &soc15_get_config_memsize, 984 .need_full_reset = &soc15_need_full_reset, 985 .init_doorbell_index = &vega10_doorbell_index_init, 986 .get_pcie_usage = &soc15_get_pcie_usage, 987 .need_reset_on_init = &soc15_need_reset_on_init, 988 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 989 .supports_baco = &soc15_supports_baco, 990 .pre_asic_init = &soc15_pre_asic_init, 991 }; 992 993 static const struct amdgpu_asic_funcs vega20_asic_funcs = 994 { 995 .read_disabled_bios = &soc15_read_disabled_bios, 996 .read_bios_from_rom = &soc15_read_bios_from_rom, 997 .read_register = &soc15_read_register, 998 .reset = &soc15_asic_reset, 999 .reset_method = &soc15_asic_reset_method, 1000 .set_vga_state = &soc15_vga_set_state, 1001 .get_xclk = &soc15_get_xclk, 1002 .set_uvd_clocks = &soc15_set_uvd_clocks, 1003 .set_vce_clocks = &soc15_set_vce_clocks, 1004 .get_config_memsize = &soc15_get_config_memsize, 1005 .need_full_reset = &soc15_need_full_reset, 1006 .init_doorbell_index = &vega20_doorbell_index_init, 1007 .get_pcie_usage = &vega20_get_pcie_usage, 1008 .need_reset_on_init = &soc15_need_reset_on_init, 1009 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 1010 .supports_baco = &soc15_supports_baco, 1011 .pre_asic_init = &soc15_pre_asic_init, 1012 }; 1013 1014 static int soc15_common_early_init(void *handle) 1015 { 1016 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 1017 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1018 1019 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 1020 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 1021 adev->smc_rreg = NULL; 1022 adev->smc_wreg = NULL; 1023 adev->pcie_rreg = &soc15_pcie_rreg; 1024 adev->pcie_wreg = &soc15_pcie_wreg; 1025 adev->pcie_rreg64 = &soc15_pcie_rreg64; 1026 adev->pcie_wreg64 = &soc15_pcie_wreg64; 1027 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; 1028 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; 1029 adev->didt_rreg = &soc15_didt_rreg; 1030 adev->didt_wreg = &soc15_didt_wreg; 1031 adev->gc_cac_rreg = &soc15_gc_cac_rreg; 1032 adev->gc_cac_wreg = &soc15_gc_cac_wreg; 1033 adev->se_cac_rreg = &soc15_se_cac_rreg; 1034 adev->se_cac_wreg = &soc15_se_cac_wreg; 1035 1036 1037 adev->external_rev_id = 0xFF; 1038 switch (adev->asic_type) { 1039 case CHIP_VEGA10: 1040 adev->asic_funcs = &soc15_asic_funcs; 1041 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1042 AMD_CG_SUPPORT_GFX_MGLS | 1043 AMD_CG_SUPPORT_GFX_RLC_LS | 1044 AMD_CG_SUPPORT_GFX_CP_LS | 1045 AMD_CG_SUPPORT_GFX_3D_CGCG | 1046 AMD_CG_SUPPORT_GFX_3D_CGLS | 1047 AMD_CG_SUPPORT_GFX_CGCG | 1048 AMD_CG_SUPPORT_GFX_CGLS | 1049 AMD_CG_SUPPORT_BIF_MGCG | 1050 AMD_CG_SUPPORT_BIF_LS | 1051 AMD_CG_SUPPORT_HDP_LS | 1052 AMD_CG_SUPPORT_DRM_MGCG | 1053 AMD_CG_SUPPORT_DRM_LS | 1054 AMD_CG_SUPPORT_ROM_MGCG | 1055 AMD_CG_SUPPORT_DF_MGCG | 1056 AMD_CG_SUPPORT_SDMA_MGCG | 1057 AMD_CG_SUPPORT_SDMA_LS | 1058 AMD_CG_SUPPORT_MC_MGCG | 1059 AMD_CG_SUPPORT_MC_LS; 1060 adev->pg_flags = 0; 1061 adev->external_rev_id = 0x1; 1062 break; 1063 case CHIP_VEGA12: 1064 adev->asic_funcs = &soc15_asic_funcs; 1065 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1066 AMD_CG_SUPPORT_GFX_MGLS | 1067 AMD_CG_SUPPORT_GFX_CGCG | 1068 AMD_CG_SUPPORT_GFX_CGLS | 1069 AMD_CG_SUPPORT_GFX_3D_CGCG | 1070 AMD_CG_SUPPORT_GFX_3D_CGLS | 1071 AMD_CG_SUPPORT_GFX_CP_LS | 1072 AMD_CG_SUPPORT_MC_LS | 1073 AMD_CG_SUPPORT_MC_MGCG | 1074 AMD_CG_SUPPORT_SDMA_MGCG | 1075 AMD_CG_SUPPORT_SDMA_LS | 1076 AMD_CG_SUPPORT_BIF_MGCG | 1077 AMD_CG_SUPPORT_BIF_LS | 1078 AMD_CG_SUPPORT_HDP_MGCG | 1079 AMD_CG_SUPPORT_HDP_LS | 1080 AMD_CG_SUPPORT_ROM_MGCG | 1081 AMD_CG_SUPPORT_VCE_MGCG | 1082 AMD_CG_SUPPORT_UVD_MGCG; 1083 adev->pg_flags = 0; 1084 adev->external_rev_id = adev->rev_id + 0x14; 1085 break; 1086 case CHIP_VEGA20: 1087 adev->asic_funcs = &vega20_asic_funcs; 1088 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1089 AMD_CG_SUPPORT_GFX_MGLS | 1090 AMD_CG_SUPPORT_GFX_CGCG | 1091 AMD_CG_SUPPORT_GFX_CGLS | 1092 AMD_CG_SUPPORT_GFX_3D_CGCG | 1093 AMD_CG_SUPPORT_GFX_3D_CGLS | 1094 AMD_CG_SUPPORT_GFX_CP_LS | 1095 AMD_CG_SUPPORT_MC_LS | 1096 AMD_CG_SUPPORT_MC_MGCG | 1097 AMD_CG_SUPPORT_SDMA_MGCG | 1098 AMD_CG_SUPPORT_SDMA_LS | 1099 AMD_CG_SUPPORT_BIF_MGCG | 1100 AMD_CG_SUPPORT_BIF_LS | 1101 AMD_CG_SUPPORT_HDP_MGCG | 1102 AMD_CG_SUPPORT_HDP_LS | 1103 AMD_CG_SUPPORT_ROM_MGCG | 1104 AMD_CG_SUPPORT_VCE_MGCG | 1105 AMD_CG_SUPPORT_UVD_MGCG; 1106 adev->pg_flags = 0; 1107 adev->external_rev_id = adev->rev_id + 0x28; 1108 break; 1109 case CHIP_RAVEN: 1110 adev->asic_funcs = &soc15_asic_funcs; 1111 if (adev->pdev->device == 0x15dd) 1112 adev->apu_flags |= AMD_APU_IS_RAVEN; 1113 if (adev->pdev->device == 0x15d8) 1114 adev->apu_flags |= AMD_APU_IS_PICASSO; 1115 if (adev->rev_id >= 0x8) 1116 adev->apu_flags |= AMD_APU_IS_RAVEN2; 1117 1118 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1119 adev->external_rev_id = adev->rev_id + 0x79; 1120 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 1121 adev->external_rev_id = adev->rev_id + 0x41; 1122 else if (adev->rev_id == 1) 1123 adev->external_rev_id = adev->rev_id + 0x20; 1124 else 1125 adev->external_rev_id = adev->rev_id + 0x01; 1126 1127 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 1128 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1129 AMD_CG_SUPPORT_GFX_MGLS | 1130 AMD_CG_SUPPORT_GFX_CP_LS | 1131 AMD_CG_SUPPORT_GFX_3D_CGCG | 1132 AMD_CG_SUPPORT_GFX_3D_CGLS | 1133 AMD_CG_SUPPORT_GFX_CGCG | 1134 AMD_CG_SUPPORT_GFX_CGLS | 1135 AMD_CG_SUPPORT_BIF_LS | 1136 AMD_CG_SUPPORT_HDP_LS | 1137 AMD_CG_SUPPORT_MC_MGCG | 1138 AMD_CG_SUPPORT_MC_LS | 1139 AMD_CG_SUPPORT_SDMA_MGCG | 1140 AMD_CG_SUPPORT_SDMA_LS | 1141 AMD_CG_SUPPORT_VCN_MGCG; 1142 1143 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 1144 } else if (adev->apu_flags & AMD_APU_IS_PICASSO) { 1145 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1146 AMD_CG_SUPPORT_GFX_MGLS | 1147 AMD_CG_SUPPORT_GFX_CP_LS | 1148 AMD_CG_SUPPORT_GFX_3D_CGCG | 1149 AMD_CG_SUPPORT_GFX_3D_CGLS | 1150 AMD_CG_SUPPORT_GFX_CGCG | 1151 AMD_CG_SUPPORT_GFX_CGLS | 1152 AMD_CG_SUPPORT_BIF_LS | 1153 AMD_CG_SUPPORT_HDP_LS | 1154 AMD_CG_SUPPORT_MC_MGCG | 1155 AMD_CG_SUPPORT_MC_LS | 1156 AMD_CG_SUPPORT_SDMA_MGCG | 1157 AMD_CG_SUPPORT_SDMA_LS; 1158 1159 adev->pg_flags = AMD_PG_SUPPORT_SDMA | 1160 AMD_PG_SUPPORT_MMHUB | 1161 AMD_PG_SUPPORT_VCN; 1162 } else { 1163 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1164 AMD_CG_SUPPORT_GFX_MGLS | 1165 AMD_CG_SUPPORT_GFX_RLC_LS | 1166 AMD_CG_SUPPORT_GFX_CP_LS | 1167 AMD_CG_SUPPORT_GFX_3D_CGCG | 1168 AMD_CG_SUPPORT_GFX_3D_CGLS | 1169 AMD_CG_SUPPORT_GFX_CGCG | 1170 AMD_CG_SUPPORT_GFX_CGLS | 1171 AMD_CG_SUPPORT_BIF_MGCG | 1172 AMD_CG_SUPPORT_BIF_LS | 1173 AMD_CG_SUPPORT_HDP_MGCG | 1174 AMD_CG_SUPPORT_HDP_LS | 1175 AMD_CG_SUPPORT_DRM_MGCG | 1176 AMD_CG_SUPPORT_DRM_LS | 1177 AMD_CG_SUPPORT_MC_MGCG | 1178 AMD_CG_SUPPORT_MC_LS | 1179 AMD_CG_SUPPORT_SDMA_MGCG | 1180 AMD_CG_SUPPORT_SDMA_LS | 1181 AMD_CG_SUPPORT_VCN_MGCG; 1182 1183 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 1184 } 1185 break; 1186 case CHIP_ARCTURUS: 1187 adev->asic_funcs = &vega20_asic_funcs; 1188 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1189 AMD_CG_SUPPORT_GFX_MGLS | 1190 AMD_CG_SUPPORT_GFX_CGCG | 1191 AMD_CG_SUPPORT_GFX_CGLS | 1192 AMD_CG_SUPPORT_GFX_CP_LS | 1193 AMD_CG_SUPPORT_HDP_MGCG | 1194 AMD_CG_SUPPORT_HDP_LS | 1195 AMD_CG_SUPPORT_SDMA_MGCG | 1196 AMD_CG_SUPPORT_SDMA_LS | 1197 AMD_CG_SUPPORT_MC_MGCG | 1198 AMD_CG_SUPPORT_MC_LS | 1199 AMD_CG_SUPPORT_IH_CG | 1200 AMD_CG_SUPPORT_VCN_MGCG | 1201 AMD_CG_SUPPORT_JPEG_MGCG; 1202 adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG; 1203 adev->external_rev_id = adev->rev_id + 0x32; 1204 break; 1205 case CHIP_RENOIR: 1206 adev->asic_funcs = &soc15_asic_funcs; 1207 if ((adev->pdev->device == 0x1636) || 1208 (adev->pdev->device == 0x164c)) 1209 adev->apu_flags |= AMD_APU_IS_RENOIR; 1210 else 1211 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; 1212 1213 if (adev->apu_flags & AMD_APU_IS_RENOIR) 1214 adev->external_rev_id = adev->rev_id + 0x91; 1215 else 1216 adev->external_rev_id = adev->rev_id + 0xa1; 1217 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1218 AMD_CG_SUPPORT_GFX_MGLS | 1219 AMD_CG_SUPPORT_GFX_3D_CGCG | 1220 AMD_CG_SUPPORT_GFX_3D_CGLS | 1221 AMD_CG_SUPPORT_GFX_CGCG | 1222 AMD_CG_SUPPORT_GFX_CGLS | 1223 AMD_CG_SUPPORT_GFX_CP_LS | 1224 AMD_CG_SUPPORT_MC_MGCG | 1225 AMD_CG_SUPPORT_MC_LS | 1226 AMD_CG_SUPPORT_SDMA_MGCG | 1227 AMD_CG_SUPPORT_SDMA_LS | 1228 AMD_CG_SUPPORT_BIF_LS | 1229 AMD_CG_SUPPORT_HDP_LS | 1230 AMD_CG_SUPPORT_VCN_MGCG | 1231 AMD_CG_SUPPORT_JPEG_MGCG | 1232 AMD_CG_SUPPORT_IH_CG | 1233 AMD_CG_SUPPORT_ATHUB_LS | 1234 AMD_CG_SUPPORT_ATHUB_MGCG | 1235 AMD_CG_SUPPORT_DF_MGCG; 1236 adev->pg_flags = AMD_PG_SUPPORT_SDMA | 1237 AMD_PG_SUPPORT_VCN | 1238 AMD_PG_SUPPORT_JPEG | 1239 AMD_PG_SUPPORT_VCN_DPG; 1240 break; 1241 default: 1242 /* FIXME: not supported yet */ 1243 return -EINVAL; 1244 } 1245 1246 if (amdgpu_sriov_vf(adev)) { 1247 amdgpu_virt_init_setting(adev); 1248 xgpu_ai_mailbox_set_irq_funcs(adev); 1249 } 1250 1251 return 0; 1252 } 1253 1254 static int soc15_common_late_init(void *handle) 1255 { 1256 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1257 int r = 0; 1258 1259 if (amdgpu_sriov_vf(adev)) 1260 xgpu_ai_mailbox_get_irq(adev); 1261 1262 if (adev->hdp.funcs->reset_ras_error_count) 1263 adev->hdp.funcs->reset_ras_error_count(adev); 1264 1265 if (adev->nbio.funcs->ras_late_init) 1266 r = adev->nbio.funcs->ras_late_init(adev); 1267 1268 return r; 1269 } 1270 1271 static int soc15_common_sw_init(void *handle) 1272 { 1273 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1274 1275 if (amdgpu_sriov_vf(adev)) 1276 xgpu_ai_mailbox_add_irq_id(adev); 1277 1278 adev->df.funcs->sw_init(adev); 1279 1280 return 0; 1281 } 1282 1283 static int soc15_common_sw_fini(void *handle) 1284 { 1285 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1286 1287 amdgpu_nbio_ras_fini(adev); 1288 adev->df.funcs->sw_fini(adev); 1289 return 0; 1290 } 1291 1292 static void soc15_doorbell_range_init(struct amdgpu_device *adev) 1293 { 1294 int i; 1295 struct amdgpu_ring *ring; 1296 1297 /* sdma/ih doorbell range are programed by hypervisor */ 1298 if (!amdgpu_sriov_vf(adev)) { 1299 for (i = 0; i < adev->sdma.num_instances; i++) { 1300 ring = &adev->sdma.instance[i].ring; 1301 adev->nbio.funcs->sdma_doorbell_range(adev, i, 1302 ring->use_doorbell, ring->doorbell_index, 1303 adev->doorbell_index.sdma_doorbell_range); 1304 } 1305 1306 adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, 1307 adev->irq.ih.doorbell_index); 1308 } 1309 } 1310 1311 static int soc15_common_hw_init(void *handle) 1312 { 1313 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1314 1315 /* enable pcie gen2/3 link */ 1316 soc15_pcie_gen3_enable(adev); 1317 /* enable aspm */ 1318 soc15_program_aspm(adev); 1319 /* setup nbio registers */ 1320 adev->nbio.funcs->init_registers(adev); 1321 /* remap HDP registers to a hole in mmio space, 1322 * for the purpose of expose those registers 1323 * to process space 1324 */ 1325 if (adev->nbio.funcs->remap_hdp_registers) 1326 adev->nbio.funcs->remap_hdp_registers(adev); 1327 1328 /* enable the doorbell aperture */ 1329 soc15_enable_doorbell_aperture(adev, true); 1330 /* HW doorbell routing policy: doorbell writing not 1331 * in SDMA/IH/MM/ACV range will be routed to CP. So 1332 * we need to init SDMA/IH/MM/ACV doorbell range prior 1333 * to CP ip block init and ring test. 1334 */ 1335 soc15_doorbell_range_init(adev); 1336 1337 return 0; 1338 } 1339 1340 static int soc15_common_hw_fini(void *handle) 1341 { 1342 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1343 1344 /* disable the doorbell aperture */ 1345 soc15_enable_doorbell_aperture(adev, false); 1346 if (amdgpu_sriov_vf(adev)) 1347 xgpu_ai_mailbox_put_irq(adev); 1348 1349 if (adev->nbio.ras_if && 1350 amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { 1351 if (adev->nbio.funcs->init_ras_controller_interrupt) 1352 amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0); 1353 if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) 1354 amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0); 1355 } 1356 1357 return 0; 1358 } 1359 1360 static int soc15_common_suspend(void *handle) 1361 { 1362 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1363 1364 return soc15_common_hw_fini(adev); 1365 } 1366 1367 static int soc15_common_resume(void *handle) 1368 { 1369 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1370 1371 return soc15_common_hw_init(adev); 1372 } 1373 1374 static bool soc15_common_is_idle(void *handle) 1375 { 1376 return true; 1377 } 1378 1379 static int soc15_common_wait_for_idle(void *handle) 1380 { 1381 return 0; 1382 } 1383 1384 static int soc15_common_soft_reset(void *handle) 1385 { 1386 return 0; 1387 } 1388 1389 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) 1390 { 1391 uint32_t def, data; 1392 1393 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1394 1395 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG)) 1396 data &= ~(0x01000000 | 1397 0x02000000 | 1398 0x04000000 | 1399 0x08000000 | 1400 0x10000000 | 1401 0x20000000 | 1402 0x40000000 | 1403 0x80000000); 1404 else 1405 data |= (0x01000000 | 1406 0x02000000 | 1407 0x04000000 | 1408 0x08000000 | 1409 0x10000000 | 1410 0x20000000 | 1411 0x40000000 | 1412 0x80000000); 1413 1414 if (def != data) 1415 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data); 1416 } 1417 1418 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable) 1419 { 1420 uint32_t def, data; 1421 1422 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1423 1424 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1425 data |= 1; 1426 else 1427 data &= ~1; 1428 1429 if (def != data) 1430 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data); 1431 } 1432 1433 static int soc15_common_set_clockgating_state(void *handle, 1434 enum amd_clockgating_state state) 1435 { 1436 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1437 1438 if (amdgpu_sriov_vf(adev)) 1439 return 0; 1440 1441 switch (adev->asic_type) { 1442 case CHIP_VEGA10: 1443 case CHIP_VEGA12: 1444 case CHIP_VEGA20: 1445 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1446 state == AMD_CG_STATE_GATE); 1447 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1448 state == AMD_CG_STATE_GATE); 1449 adev->hdp.funcs->update_clock_gating(adev, 1450 state == AMD_CG_STATE_GATE); 1451 soc15_update_drm_clock_gating(adev, 1452 state == AMD_CG_STATE_GATE); 1453 soc15_update_drm_light_sleep(adev, 1454 state == AMD_CG_STATE_GATE); 1455 adev->smuio.funcs->update_rom_clock_gating(adev, 1456 state == AMD_CG_STATE_GATE); 1457 adev->df.funcs->update_medium_grain_clock_gating(adev, 1458 state == AMD_CG_STATE_GATE); 1459 break; 1460 case CHIP_RAVEN: 1461 case CHIP_RENOIR: 1462 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1463 state == AMD_CG_STATE_GATE); 1464 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1465 state == AMD_CG_STATE_GATE); 1466 adev->hdp.funcs->update_clock_gating(adev, 1467 state == AMD_CG_STATE_GATE); 1468 soc15_update_drm_clock_gating(adev, 1469 state == AMD_CG_STATE_GATE); 1470 soc15_update_drm_light_sleep(adev, 1471 state == AMD_CG_STATE_GATE); 1472 break; 1473 case CHIP_ARCTURUS: 1474 adev->hdp.funcs->update_clock_gating(adev, 1475 state == AMD_CG_STATE_GATE); 1476 break; 1477 default: 1478 break; 1479 } 1480 return 0; 1481 } 1482 1483 static void soc15_common_get_clockgating_state(void *handle, u32 *flags) 1484 { 1485 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1486 int data; 1487 1488 if (amdgpu_sriov_vf(adev)) 1489 *flags = 0; 1490 1491 adev->nbio.funcs->get_clockgating_state(adev, flags); 1492 1493 adev->hdp.funcs->get_clock_gating_state(adev, flags); 1494 1495 /* AMD_CG_SUPPORT_DRM_MGCG */ 1496 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1497 if (!(data & 0x01000000)) 1498 *flags |= AMD_CG_SUPPORT_DRM_MGCG; 1499 1500 /* AMD_CG_SUPPORT_DRM_LS */ 1501 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1502 if (data & 0x1) 1503 *flags |= AMD_CG_SUPPORT_DRM_LS; 1504 1505 /* AMD_CG_SUPPORT_ROM_MGCG */ 1506 adev->smuio.funcs->get_clock_gating_state(adev, flags); 1507 1508 adev->df.funcs->get_clockgating_state(adev, flags); 1509 } 1510 1511 static int soc15_common_set_powergating_state(void *handle, 1512 enum amd_powergating_state state) 1513 { 1514 /* todo */ 1515 return 0; 1516 } 1517 1518 const struct amd_ip_funcs soc15_common_ip_funcs = { 1519 .name = "soc15_common", 1520 .early_init = soc15_common_early_init, 1521 .late_init = soc15_common_late_init, 1522 .sw_init = soc15_common_sw_init, 1523 .sw_fini = soc15_common_sw_fini, 1524 .hw_init = soc15_common_hw_init, 1525 .hw_fini = soc15_common_hw_fini, 1526 .suspend = soc15_common_suspend, 1527 .resume = soc15_common_resume, 1528 .is_idle = soc15_common_is_idle, 1529 .wait_for_idle = soc15_common_wait_for_idle, 1530 .soft_reset = soc15_common_soft_reset, 1531 .set_clockgating_state = soc15_common_set_clockgating_state, 1532 .set_powergating_state = soc15_common_set_powergating_state, 1533 .get_clockgating_state= soc15_common_get_clockgating_state, 1534 }; 1535