1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <drm/drmP.h> 27 #include "amdgpu.h" 28 #include "amdgpu_atombios.h" 29 #include "amdgpu_ih.h" 30 #include "amdgpu_uvd.h" 31 #include "amdgpu_vce.h" 32 #include "amdgpu_ucode.h" 33 #include "amdgpu_psp.h" 34 #include "atom.h" 35 #include "amd_pcie.h" 36 37 #include "uvd/uvd_7_0_offset.h" 38 #include "gc/gc_9_0_offset.h" 39 #include "gc/gc_9_0_sh_mask.h" 40 #include "sdma0/sdma0_4_0_offset.h" 41 #include "sdma1/sdma1_4_0_offset.h" 42 #include "hdp/hdp_4_0_offset.h" 43 #include "hdp/hdp_4_0_sh_mask.h" 44 #include "smuio/smuio_9_0_offset.h" 45 #include "smuio/smuio_9_0_sh_mask.h" 46 #include "nbio/nbio_7_0_default.h" 47 #include "nbio/nbio_7_0_offset.h" 48 #include "nbio/nbio_7_0_sh_mask.h" 49 #include "nbio/nbio_7_0_smn.h" 50 #include "mp/mp_9_0_offset.h" 51 52 #include "soc15.h" 53 #include "soc15_common.h" 54 #include "gfx_v9_0.h" 55 #include "gmc_v9_0.h" 56 #include "gfxhub_v1_0.h" 57 #include "mmhub_v1_0.h" 58 #include "df_v1_7.h" 59 #include "df_v3_6.h" 60 #include "vega10_ih.h" 61 #include "sdma_v4_0.h" 62 #include "uvd_v7_0.h" 63 #include "vce_v4_0.h" 64 #include "vcn_v1_0.h" 65 #include "dce_virtual.h" 66 #include "mxgpu_ai.h" 67 #include "amdgpu_smu.h" 68 #include "amdgpu_ras.h" 69 #include "amdgpu_xgmi.h" 70 #include <uapi/linux/kfd_ioctl.h> 71 72 #define mmMP0_MISC_CGTT_CTRL0 0x01b9 73 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 74 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba 75 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 76 77 /* for Vega20 register name change */ 78 #define mmHDP_MEM_POWER_CTRL 0x00d4 79 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L 80 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L 81 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L 82 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L 83 #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 84 /* 85 * Indirect registers accessor 86 */ 87 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) 88 { 89 unsigned long flags, address, data; 90 u32 r; 91 address = adev->nbio_funcs->get_pcie_index_offset(adev); 92 data = adev->nbio_funcs->get_pcie_data_offset(adev); 93 94 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 95 WREG32(address, reg); 96 (void)RREG32(address); 97 r = RREG32(data); 98 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 99 return r; 100 } 101 102 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 103 { 104 unsigned long flags, address, data; 105 106 address = adev->nbio_funcs->get_pcie_index_offset(adev); 107 data = adev->nbio_funcs->get_pcie_data_offset(adev); 108 109 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 110 WREG32(address, reg); 111 (void)RREG32(address); 112 WREG32(data, v); 113 (void)RREG32(data); 114 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 115 } 116 117 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 118 { 119 unsigned long flags, address, data; 120 u32 r; 121 122 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 123 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 124 125 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 126 WREG32(address, ((reg) & 0x1ff)); 127 r = RREG32(data); 128 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 129 return r; 130 } 131 132 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 133 { 134 unsigned long flags, address, data; 135 136 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 137 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 138 139 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 140 WREG32(address, ((reg) & 0x1ff)); 141 WREG32(data, (v)); 142 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 143 } 144 145 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg) 146 { 147 unsigned long flags, address, data; 148 u32 r; 149 150 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 151 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 152 153 spin_lock_irqsave(&adev->didt_idx_lock, flags); 154 WREG32(address, (reg)); 155 r = RREG32(data); 156 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 157 return r; 158 } 159 160 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 161 { 162 unsigned long flags, address, data; 163 164 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 165 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 166 167 spin_lock_irqsave(&adev->didt_idx_lock, flags); 168 WREG32(address, (reg)); 169 WREG32(data, (v)); 170 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 171 } 172 173 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 174 { 175 unsigned long flags; 176 u32 r; 177 178 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 179 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 180 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); 181 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 182 return r; 183 } 184 185 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 186 { 187 unsigned long flags; 188 189 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 190 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 191 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); 192 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 193 } 194 195 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) 196 { 197 unsigned long flags; 198 u32 r; 199 200 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 201 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 202 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); 203 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 204 return r; 205 } 206 207 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 208 { 209 unsigned long flags; 210 211 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 212 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 213 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); 214 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 215 } 216 217 static u32 soc15_get_config_memsize(struct amdgpu_device *adev) 218 { 219 return adev->nbio_funcs->get_memsize(adev); 220 } 221 222 static u32 soc15_get_xclk(struct amdgpu_device *adev) 223 { 224 return adev->clock.spll.reference_freq; 225 } 226 227 228 void soc15_grbm_select(struct amdgpu_device *adev, 229 u32 me, u32 pipe, u32 queue, u32 vmid) 230 { 231 u32 grbm_gfx_cntl = 0; 232 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 233 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 234 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 235 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 236 237 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl); 238 } 239 240 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) 241 { 242 /* todo */ 243 } 244 245 static bool soc15_read_disabled_bios(struct amdgpu_device *adev) 246 { 247 /* todo */ 248 return false; 249 } 250 251 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, 252 u8 *bios, u32 length_bytes) 253 { 254 u32 *dw_ptr; 255 u32 i, length_dw; 256 257 if (bios == NULL) 258 return false; 259 if (length_bytes == 0) 260 return false; 261 /* APU vbios image is part of sbios image */ 262 if (adev->flags & AMD_IS_APU) 263 return false; 264 265 dw_ptr = (u32 *)bios; 266 length_dw = ALIGN(length_bytes, 4) / 4; 267 268 /* set rom index to 0 */ 269 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); 270 /* read out the rom data */ 271 for (i = 0; i < length_dw; i++) 272 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); 273 274 return true; 275 } 276 277 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { 278 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 279 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 280 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 281 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 282 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 283 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 284 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 285 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 286 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 287 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 288 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 289 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 290 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 291 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 292 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 293 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 294 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 295 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 296 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)}, 297 }; 298 299 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 300 u32 sh_num, u32 reg_offset) 301 { 302 uint32_t val; 303 304 mutex_lock(&adev->grbm_idx_mutex); 305 if (se_num != 0xffffffff || sh_num != 0xffffffff) 306 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 307 308 val = RREG32(reg_offset); 309 310 if (se_num != 0xffffffff || sh_num != 0xffffffff) 311 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 312 mutex_unlock(&adev->grbm_idx_mutex); 313 return val; 314 } 315 316 static uint32_t soc15_get_register_value(struct amdgpu_device *adev, 317 bool indexed, u32 se_num, 318 u32 sh_num, u32 reg_offset) 319 { 320 if (indexed) { 321 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset); 322 } else { 323 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 324 return adev->gfx.config.gb_addr_config; 325 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)) 326 return adev->gfx.config.db_debug2; 327 return RREG32(reg_offset); 328 } 329 } 330 331 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, 332 u32 sh_num, u32 reg_offset, u32 *value) 333 { 334 uint32_t i; 335 struct soc15_allowed_register_entry *en; 336 337 *value = 0; 338 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { 339 en = &soc15_allowed_read_registers[i]; 340 if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] 341 + en->reg_offset)) 342 continue; 343 344 *value = soc15_get_register_value(adev, 345 soc15_allowed_read_registers[i].grbm_indexed, 346 se_num, sh_num, reg_offset); 347 return 0; 348 } 349 return -EINVAL; 350 } 351 352 353 /** 354 * soc15_program_register_sequence - program an array of registers. 355 * 356 * @adev: amdgpu_device pointer 357 * @regs: pointer to the register array 358 * @array_size: size of the register array 359 * 360 * Programs an array or registers with and and or masks. 361 * This is a helper for setting golden registers. 362 */ 363 364 void soc15_program_register_sequence(struct amdgpu_device *adev, 365 const struct soc15_reg_golden *regs, 366 const u32 array_size) 367 { 368 const struct soc15_reg_golden *entry; 369 u32 tmp, reg; 370 int i; 371 372 for (i = 0; i < array_size; ++i) { 373 entry = ®s[i]; 374 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; 375 376 if (entry->and_mask == 0xffffffff) { 377 tmp = entry->or_mask; 378 } else { 379 tmp = RREG32(reg); 380 tmp &= ~(entry->and_mask); 381 tmp |= (entry->or_mask & entry->and_mask); 382 } 383 384 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) || 385 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) || 386 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) || 387 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)) 388 WREG32_RLC(reg, tmp); 389 else 390 WREG32(reg, tmp); 391 392 } 393 394 } 395 396 static int soc15_asic_mode1_reset(struct amdgpu_device *adev) 397 { 398 u32 i; 399 int ret = 0; 400 401 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 402 403 dev_info(adev->dev, "GPU mode1 reset\n"); 404 405 /* disable BM */ 406 pci_clear_master(adev->pdev); 407 408 pci_save_state(adev->pdev); 409 410 ret = psp_gpu_reset(adev); 411 if (ret) 412 dev_err(adev->dev, "GPU mode1 reset failed\n"); 413 414 pci_restore_state(adev->pdev); 415 416 /* wait for asic to come out of reset */ 417 for (i = 0; i < adev->usec_timeout; i++) { 418 u32 memsize = adev->nbio_funcs->get_memsize(adev); 419 420 if (memsize != 0xffffffff) 421 break; 422 udelay(1); 423 } 424 425 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 426 427 return ret; 428 } 429 430 static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) 431 { 432 void *pp_handle = adev->powerplay.pp_handle; 433 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 434 435 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { 436 *cap = false; 437 return -ENOENT; 438 } 439 440 return pp_funcs->get_asic_baco_capability(pp_handle, cap); 441 } 442 443 static int soc15_asic_baco_reset(struct amdgpu_device *adev) 444 { 445 void *pp_handle = adev->powerplay.pp_handle; 446 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 447 448 if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) 449 return -ENOENT; 450 451 /* enter BACO state */ 452 if (pp_funcs->set_asic_baco_state(pp_handle, 1)) 453 return -EIO; 454 455 /* exit BACO state */ 456 if (pp_funcs->set_asic_baco_state(pp_handle, 0)) 457 return -EIO; 458 459 dev_info(adev->dev, "GPU BACO reset\n"); 460 461 adev->in_baco_reset = 1; 462 463 return 0; 464 } 465 466 static int soc15_asic_reset(struct amdgpu_device *adev) 467 { 468 int ret; 469 bool baco_reset; 470 471 switch (adev->asic_type) { 472 case CHIP_VEGA10: 473 case CHIP_VEGA12: 474 soc15_asic_get_baco_capability(adev, &baco_reset); 475 break; 476 case CHIP_VEGA20: 477 if (adev->psp.sos_fw_version >= 0x80067) 478 soc15_asic_get_baco_capability(adev, &baco_reset); 479 else 480 baco_reset = false; 481 if (baco_reset) { 482 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); 483 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 484 485 if (hive || (ras && ras->supported)) 486 baco_reset = false; 487 } 488 break; 489 default: 490 baco_reset = false; 491 break; 492 } 493 494 if (baco_reset) 495 ret = soc15_asic_baco_reset(adev); 496 else 497 ret = soc15_asic_mode1_reset(adev); 498 499 return ret; 500 } 501 502 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 503 u32 cntl_reg, u32 status_reg) 504 { 505 return 0; 506 }*/ 507 508 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 509 { 510 /*int r; 511 512 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 513 if (r) 514 return r; 515 516 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 517 */ 518 return 0; 519 } 520 521 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 522 { 523 /* todo */ 524 525 return 0; 526 } 527 528 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) 529 { 530 if (pci_is_root_bus(adev->pdev->bus)) 531 return; 532 533 if (amdgpu_pcie_gen2 == 0) 534 return; 535 536 if (adev->flags & AMD_IS_APU) 537 return; 538 539 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 540 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 541 return; 542 543 /* todo */ 544 } 545 546 static void soc15_program_aspm(struct amdgpu_device *adev) 547 { 548 549 if (amdgpu_aspm == 0) 550 return; 551 552 /* todo */ 553 } 554 555 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, 556 bool enable) 557 { 558 adev->nbio_funcs->enable_doorbell_aperture(adev, enable); 559 adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); 560 } 561 562 static const struct amdgpu_ip_block_version vega10_common_ip_block = 563 { 564 .type = AMD_IP_BLOCK_TYPE_COMMON, 565 .major = 2, 566 .minor = 0, 567 .rev = 0, 568 .funcs = &soc15_common_ip_funcs, 569 }; 570 571 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) 572 { 573 return adev->nbio_funcs->get_rev_id(adev); 574 } 575 576 int soc15_set_ip_blocks(struct amdgpu_device *adev) 577 { 578 /* Set IP register base before any HW register access */ 579 switch (adev->asic_type) { 580 case CHIP_VEGA10: 581 case CHIP_VEGA12: 582 case CHIP_RAVEN: 583 vega10_reg_base_init(adev); 584 break; 585 case CHIP_VEGA20: 586 vega20_reg_base_init(adev); 587 break; 588 default: 589 return -EINVAL; 590 } 591 592 if (adev->asic_type == CHIP_VEGA20) 593 adev->gmc.xgmi.supported = true; 594 595 if (adev->flags & AMD_IS_APU) 596 adev->nbio_funcs = &nbio_v7_0_funcs; 597 else if (adev->asic_type == CHIP_VEGA20) 598 adev->nbio_funcs = &nbio_v7_4_funcs; 599 else 600 adev->nbio_funcs = &nbio_v6_1_funcs; 601 602 if (adev->asic_type == CHIP_VEGA20) 603 adev->df_funcs = &df_v3_6_funcs; 604 else 605 adev->df_funcs = &df_v1_7_funcs; 606 607 adev->rev_id = soc15_get_rev_id(adev); 608 adev->nbio_funcs->detect_hw_virt(adev); 609 610 if (amdgpu_sriov_vf(adev)) 611 adev->virt.ops = &xgpu_ai_virt_ops; 612 613 switch (adev->asic_type) { 614 case CHIP_VEGA10: 615 case CHIP_VEGA12: 616 case CHIP_VEGA20: 617 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 618 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 619 620 /* For Vega10 SR-IOV, PSP need to be initialized before IH */ 621 if (amdgpu_sriov_vf(adev)) { 622 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 623 if (adev->asic_type == CHIP_VEGA20) 624 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 625 else 626 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 627 } 628 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 629 } else { 630 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 631 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 632 if (adev->asic_type == CHIP_VEGA20) 633 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 634 else 635 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 636 } 637 } 638 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 639 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 640 if (!amdgpu_sriov_vf(adev)) { 641 if (is_support_sw_smu(adev)) 642 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 643 else 644 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 645 } 646 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 647 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 648 #if defined(CONFIG_DRM_AMD_DC) 649 else if (amdgpu_device_has_dc_support(adev)) 650 amdgpu_device_ip_block_add(adev, &dm_ip_block); 651 #else 652 # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." 653 #endif 654 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { 655 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 656 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 657 } 658 break; 659 case CHIP_RAVEN: 660 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 661 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 662 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 663 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 664 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 665 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 666 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 667 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 668 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 669 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 670 #if defined(CONFIG_DRM_AMD_DC) 671 else if (amdgpu_device_has_dc_support(adev)) 672 amdgpu_device_ip_block_add(adev, &dm_ip_block); 673 #else 674 # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." 675 #endif 676 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 677 break; 678 default: 679 return -EINVAL; 680 } 681 682 return 0; 683 } 684 685 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 686 { 687 adev->nbio_funcs->hdp_flush(adev, ring); 688 } 689 690 static void soc15_invalidate_hdp(struct amdgpu_device *adev, 691 struct amdgpu_ring *ring) 692 { 693 if (!ring || !ring->funcs->emit_wreg) 694 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 695 else 696 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 697 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 698 } 699 700 static bool soc15_need_full_reset(struct amdgpu_device *adev) 701 { 702 /* change this when we implement soft reset */ 703 return true; 704 } 705 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 706 uint64_t *count1) 707 { 708 uint32_t perfctr = 0; 709 uint64_t cnt0_of, cnt1_of; 710 int tmp; 711 712 /* This reports 0 on APUs, so return to avoid writing/reading registers 713 * that may or may not be different from their GPU counterparts 714 */ 715 if (adev->flags & AMD_IS_APU) 716 return; 717 718 /* Set the 2 events that we wish to watch, defined above */ 719 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ 720 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 721 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 722 723 /* Write to enable desired perf counters */ 724 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr); 725 /* Zero out and enable the perf counters 726 * Write 0x5: 727 * Bit 0 = Start all counters(1) 728 * Bit 2 = Global counter reset enable(1) 729 */ 730 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); 731 732 msleep(1000); 733 734 /* Load the shadow and disable the perf counters 735 * Write 0x2: 736 * Bit 0 = Stop counters(0) 737 * Bit 1 = Load the shadow counters(1) 738 */ 739 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); 740 741 /* Read register values to get any >32bit overflow */ 742 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK); 743 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 744 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 745 746 /* Get the values and add the overflow */ 747 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 748 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 749 } 750 751 static bool soc15_need_reset_on_init(struct amdgpu_device *adev) 752 { 753 u32 sol_reg; 754 755 /* Just return false for soc15 GPUs. Reset does not seem to 756 * be necessary. 757 */ 758 if (!amdgpu_passthrough(adev)) 759 return false; 760 761 if (adev->flags & AMD_IS_APU) 762 return false; 763 764 /* Check sOS sign of life register to confirm sys driver and sOS 765 * are already been loaded. 766 */ 767 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 768 if (sol_reg) 769 return true; 770 771 return false; 772 } 773 774 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev) 775 { 776 uint64_t nak_r, nak_g; 777 778 /* Get the number of NAKs received and generated */ 779 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK); 780 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED); 781 782 /* Add the total number of NAKs, i.e the number of replays */ 783 return (nak_r + nak_g); 784 } 785 786 static const struct amdgpu_asic_funcs soc15_asic_funcs = 787 { 788 .read_disabled_bios = &soc15_read_disabled_bios, 789 .read_bios_from_rom = &soc15_read_bios_from_rom, 790 .read_register = &soc15_read_register, 791 .reset = &soc15_asic_reset, 792 .set_vga_state = &soc15_vga_set_state, 793 .get_xclk = &soc15_get_xclk, 794 .set_uvd_clocks = &soc15_set_uvd_clocks, 795 .set_vce_clocks = &soc15_set_vce_clocks, 796 .get_config_memsize = &soc15_get_config_memsize, 797 .flush_hdp = &soc15_flush_hdp, 798 .invalidate_hdp = &soc15_invalidate_hdp, 799 .need_full_reset = &soc15_need_full_reset, 800 .init_doorbell_index = &vega10_doorbell_index_init, 801 .get_pcie_usage = &soc15_get_pcie_usage, 802 .need_reset_on_init = &soc15_need_reset_on_init, 803 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 804 }; 805 806 static const struct amdgpu_asic_funcs vega20_asic_funcs = 807 { 808 .read_disabled_bios = &soc15_read_disabled_bios, 809 .read_bios_from_rom = &soc15_read_bios_from_rom, 810 .read_register = &soc15_read_register, 811 .reset = &soc15_asic_reset, 812 .set_vga_state = &soc15_vga_set_state, 813 .get_xclk = &soc15_get_xclk, 814 .set_uvd_clocks = &soc15_set_uvd_clocks, 815 .set_vce_clocks = &soc15_set_vce_clocks, 816 .get_config_memsize = &soc15_get_config_memsize, 817 .flush_hdp = &soc15_flush_hdp, 818 .invalidate_hdp = &soc15_invalidate_hdp, 819 .need_full_reset = &soc15_need_full_reset, 820 .init_doorbell_index = &vega20_doorbell_index_init, 821 .get_pcie_usage = &soc15_get_pcie_usage, 822 .need_reset_on_init = &soc15_need_reset_on_init, 823 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 824 }; 825 826 static int soc15_common_early_init(void *handle) 827 { 828 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 829 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 830 831 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 832 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 833 adev->smc_rreg = NULL; 834 adev->smc_wreg = NULL; 835 adev->pcie_rreg = &soc15_pcie_rreg; 836 adev->pcie_wreg = &soc15_pcie_wreg; 837 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; 838 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; 839 adev->didt_rreg = &soc15_didt_rreg; 840 adev->didt_wreg = &soc15_didt_wreg; 841 adev->gc_cac_rreg = &soc15_gc_cac_rreg; 842 adev->gc_cac_wreg = &soc15_gc_cac_wreg; 843 adev->se_cac_rreg = &soc15_se_cac_rreg; 844 adev->se_cac_wreg = &soc15_se_cac_wreg; 845 846 847 adev->external_rev_id = 0xFF; 848 switch (adev->asic_type) { 849 case CHIP_VEGA10: 850 adev->asic_funcs = &soc15_asic_funcs; 851 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 852 AMD_CG_SUPPORT_GFX_MGLS | 853 AMD_CG_SUPPORT_GFX_RLC_LS | 854 AMD_CG_SUPPORT_GFX_CP_LS | 855 AMD_CG_SUPPORT_GFX_3D_CGCG | 856 AMD_CG_SUPPORT_GFX_3D_CGLS | 857 AMD_CG_SUPPORT_GFX_CGCG | 858 AMD_CG_SUPPORT_GFX_CGLS | 859 AMD_CG_SUPPORT_BIF_MGCG | 860 AMD_CG_SUPPORT_BIF_LS | 861 AMD_CG_SUPPORT_HDP_LS | 862 AMD_CG_SUPPORT_DRM_MGCG | 863 AMD_CG_SUPPORT_DRM_LS | 864 AMD_CG_SUPPORT_ROM_MGCG | 865 AMD_CG_SUPPORT_DF_MGCG | 866 AMD_CG_SUPPORT_SDMA_MGCG | 867 AMD_CG_SUPPORT_SDMA_LS | 868 AMD_CG_SUPPORT_MC_MGCG | 869 AMD_CG_SUPPORT_MC_LS; 870 adev->pg_flags = 0; 871 adev->external_rev_id = 0x1; 872 break; 873 case CHIP_VEGA12: 874 adev->asic_funcs = &soc15_asic_funcs; 875 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 876 AMD_CG_SUPPORT_GFX_MGLS | 877 AMD_CG_SUPPORT_GFX_CGCG | 878 AMD_CG_SUPPORT_GFX_CGLS | 879 AMD_CG_SUPPORT_GFX_3D_CGCG | 880 AMD_CG_SUPPORT_GFX_3D_CGLS | 881 AMD_CG_SUPPORT_GFX_CP_LS | 882 AMD_CG_SUPPORT_MC_LS | 883 AMD_CG_SUPPORT_MC_MGCG | 884 AMD_CG_SUPPORT_SDMA_MGCG | 885 AMD_CG_SUPPORT_SDMA_LS | 886 AMD_CG_SUPPORT_BIF_MGCG | 887 AMD_CG_SUPPORT_BIF_LS | 888 AMD_CG_SUPPORT_HDP_MGCG | 889 AMD_CG_SUPPORT_HDP_LS | 890 AMD_CG_SUPPORT_ROM_MGCG | 891 AMD_CG_SUPPORT_VCE_MGCG | 892 AMD_CG_SUPPORT_UVD_MGCG; 893 adev->pg_flags = 0; 894 adev->external_rev_id = adev->rev_id + 0x14; 895 break; 896 case CHIP_VEGA20: 897 adev->asic_funcs = &vega20_asic_funcs; 898 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 899 AMD_CG_SUPPORT_GFX_MGLS | 900 AMD_CG_SUPPORT_GFX_CGCG | 901 AMD_CG_SUPPORT_GFX_CGLS | 902 AMD_CG_SUPPORT_GFX_3D_CGCG | 903 AMD_CG_SUPPORT_GFX_3D_CGLS | 904 AMD_CG_SUPPORT_GFX_CP_LS | 905 AMD_CG_SUPPORT_MC_LS | 906 AMD_CG_SUPPORT_MC_MGCG | 907 AMD_CG_SUPPORT_SDMA_MGCG | 908 AMD_CG_SUPPORT_SDMA_LS | 909 AMD_CG_SUPPORT_BIF_MGCG | 910 AMD_CG_SUPPORT_BIF_LS | 911 AMD_CG_SUPPORT_HDP_MGCG | 912 AMD_CG_SUPPORT_HDP_LS | 913 AMD_CG_SUPPORT_ROM_MGCG | 914 AMD_CG_SUPPORT_VCE_MGCG | 915 AMD_CG_SUPPORT_UVD_MGCG; 916 adev->pg_flags = 0; 917 adev->external_rev_id = adev->rev_id + 0x28; 918 break; 919 case CHIP_RAVEN: 920 adev->asic_funcs = &soc15_asic_funcs; 921 if (adev->rev_id >= 0x8) 922 adev->external_rev_id = adev->rev_id + 0x79; 923 else if (adev->pdev->device == 0x15d8) 924 adev->external_rev_id = adev->rev_id + 0x41; 925 else if (adev->rev_id == 1) 926 adev->external_rev_id = adev->rev_id + 0x20; 927 else 928 adev->external_rev_id = adev->rev_id + 0x01; 929 930 if (adev->rev_id >= 0x8) { 931 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 932 AMD_CG_SUPPORT_GFX_MGLS | 933 AMD_CG_SUPPORT_GFX_CP_LS | 934 AMD_CG_SUPPORT_GFX_3D_CGCG | 935 AMD_CG_SUPPORT_GFX_3D_CGLS | 936 AMD_CG_SUPPORT_GFX_CGCG | 937 AMD_CG_SUPPORT_GFX_CGLS | 938 AMD_CG_SUPPORT_BIF_LS | 939 AMD_CG_SUPPORT_HDP_LS | 940 AMD_CG_SUPPORT_ROM_MGCG | 941 AMD_CG_SUPPORT_MC_MGCG | 942 AMD_CG_SUPPORT_MC_LS | 943 AMD_CG_SUPPORT_SDMA_MGCG | 944 AMD_CG_SUPPORT_SDMA_LS | 945 AMD_CG_SUPPORT_VCN_MGCG; 946 947 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 948 } else if (adev->pdev->device == 0x15d8) { 949 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 950 AMD_CG_SUPPORT_GFX_MGLS | 951 AMD_CG_SUPPORT_GFX_CP_LS | 952 AMD_CG_SUPPORT_GFX_3D_CGCG | 953 AMD_CG_SUPPORT_GFX_3D_CGLS | 954 AMD_CG_SUPPORT_GFX_CGCG | 955 AMD_CG_SUPPORT_GFX_CGLS | 956 AMD_CG_SUPPORT_BIF_LS | 957 AMD_CG_SUPPORT_HDP_LS | 958 AMD_CG_SUPPORT_ROM_MGCG | 959 AMD_CG_SUPPORT_MC_MGCG | 960 AMD_CG_SUPPORT_MC_LS | 961 AMD_CG_SUPPORT_SDMA_MGCG | 962 AMD_CG_SUPPORT_SDMA_LS; 963 964 adev->pg_flags = AMD_PG_SUPPORT_SDMA | 965 AMD_PG_SUPPORT_MMHUB | 966 AMD_PG_SUPPORT_VCN | 967 AMD_PG_SUPPORT_VCN_DPG; 968 } else { 969 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 970 AMD_CG_SUPPORT_GFX_MGLS | 971 AMD_CG_SUPPORT_GFX_RLC_LS | 972 AMD_CG_SUPPORT_GFX_CP_LS | 973 AMD_CG_SUPPORT_GFX_3D_CGCG | 974 AMD_CG_SUPPORT_GFX_3D_CGLS | 975 AMD_CG_SUPPORT_GFX_CGCG | 976 AMD_CG_SUPPORT_GFX_CGLS | 977 AMD_CG_SUPPORT_BIF_MGCG | 978 AMD_CG_SUPPORT_BIF_LS | 979 AMD_CG_SUPPORT_HDP_MGCG | 980 AMD_CG_SUPPORT_HDP_LS | 981 AMD_CG_SUPPORT_DRM_MGCG | 982 AMD_CG_SUPPORT_DRM_LS | 983 AMD_CG_SUPPORT_ROM_MGCG | 984 AMD_CG_SUPPORT_MC_MGCG | 985 AMD_CG_SUPPORT_MC_LS | 986 AMD_CG_SUPPORT_SDMA_MGCG | 987 AMD_CG_SUPPORT_SDMA_LS | 988 AMD_CG_SUPPORT_VCN_MGCG; 989 990 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 991 } 992 993 if (adev->pm.pp_feature & PP_GFXOFF_MASK) 994 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 995 AMD_PG_SUPPORT_CP | 996 AMD_PG_SUPPORT_RLC_SMU_HS; 997 break; 998 default: 999 /* FIXME: not supported yet */ 1000 return -EINVAL; 1001 } 1002 1003 if (amdgpu_sriov_vf(adev)) { 1004 amdgpu_virt_init_setting(adev); 1005 xgpu_ai_mailbox_set_irq_funcs(adev); 1006 } 1007 1008 return 0; 1009 } 1010 1011 static int soc15_common_late_init(void *handle) 1012 { 1013 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1014 1015 if (amdgpu_sriov_vf(adev)) 1016 xgpu_ai_mailbox_get_irq(adev); 1017 1018 return 0; 1019 } 1020 1021 static int soc15_common_sw_init(void *handle) 1022 { 1023 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1024 1025 if (amdgpu_sriov_vf(adev)) 1026 xgpu_ai_mailbox_add_irq_id(adev); 1027 1028 adev->df_funcs->sw_init(adev); 1029 1030 return 0; 1031 } 1032 1033 static int soc15_common_sw_fini(void *handle) 1034 { 1035 return 0; 1036 } 1037 1038 static void soc15_doorbell_range_init(struct amdgpu_device *adev) 1039 { 1040 int i; 1041 struct amdgpu_ring *ring; 1042 1043 /* Two reasons to skip 1044 * 1, Host driver already programmed them 1045 * 2, To avoid registers program violations in SR-IOV 1046 */ 1047 if (!amdgpu_virt_support_skip_setting(adev)) { 1048 for (i = 0; i < adev->sdma.num_instances; i++) { 1049 ring = &adev->sdma.instance[i].ring; 1050 adev->nbio_funcs->sdma_doorbell_range(adev, i, 1051 ring->use_doorbell, ring->doorbell_index, 1052 adev->doorbell_index.sdma_doorbell_range); 1053 } 1054 } 1055 1056 adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, 1057 adev->irq.ih.doorbell_index); 1058 } 1059 1060 static int soc15_common_hw_init(void *handle) 1061 { 1062 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1063 1064 /* enable pcie gen2/3 link */ 1065 soc15_pcie_gen3_enable(adev); 1066 /* enable aspm */ 1067 soc15_program_aspm(adev); 1068 /* setup nbio registers */ 1069 adev->nbio_funcs->init_registers(adev); 1070 /* remap HDP registers to a hole in mmio space, 1071 * for the purpose of expose those registers 1072 * to process space 1073 */ 1074 if (adev->nbio_funcs->remap_hdp_registers) 1075 adev->nbio_funcs->remap_hdp_registers(adev); 1076 1077 /* enable the doorbell aperture */ 1078 soc15_enable_doorbell_aperture(adev, true); 1079 /* HW doorbell routing policy: doorbell writing not 1080 * in SDMA/IH/MM/ACV range will be routed to CP. So 1081 * we need to init SDMA/IH/MM/ACV doorbell range prior 1082 * to CP ip block init and ring test. 1083 */ 1084 soc15_doorbell_range_init(adev); 1085 1086 return 0; 1087 } 1088 1089 static int soc15_common_hw_fini(void *handle) 1090 { 1091 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1092 1093 /* disable the doorbell aperture */ 1094 soc15_enable_doorbell_aperture(adev, false); 1095 if (amdgpu_sriov_vf(adev)) 1096 xgpu_ai_mailbox_put_irq(adev); 1097 1098 return 0; 1099 } 1100 1101 static int soc15_common_suspend(void *handle) 1102 { 1103 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1104 1105 return soc15_common_hw_fini(adev); 1106 } 1107 1108 static int soc15_common_resume(void *handle) 1109 { 1110 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1111 1112 return soc15_common_hw_init(adev); 1113 } 1114 1115 static bool soc15_common_is_idle(void *handle) 1116 { 1117 return true; 1118 } 1119 1120 static int soc15_common_wait_for_idle(void *handle) 1121 { 1122 return 0; 1123 } 1124 1125 static int soc15_common_soft_reset(void *handle) 1126 { 1127 return 0; 1128 } 1129 1130 static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable) 1131 { 1132 uint32_t def, data; 1133 1134 if (adev->asic_type == CHIP_VEGA20) { 1135 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL)); 1136 1137 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1138 data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | 1139 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | 1140 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | 1141 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK; 1142 else 1143 data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | 1144 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | 1145 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | 1146 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK); 1147 1148 if (def != data) 1149 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data); 1150 } else { 1151 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); 1152 1153 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1154 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1155 else 1156 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1157 1158 if (def != data) 1159 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); 1160 } 1161 } 1162 1163 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) 1164 { 1165 uint32_t def, data; 1166 1167 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1168 1169 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG)) 1170 data &= ~(0x01000000 | 1171 0x02000000 | 1172 0x04000000 | 1173 0x08000000 | 1174 0x10000000 | 1175 0x20000000 | 1176 0x40000000 | 1177 0x80000000); 1178 else 1179 data |= (0x01000000 | 1180 0x02000000 | 1181 0x04000000 | 1182 0x08000000 | 1183 0x10000000 | 1184 0x20000000 | 1185 0x40000000 | 1186 0x80000000); 1187 1188 if (def != data) 1189 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data); 1190 } 1191 1192 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable) 1193 { 1194 uint32_t def, data; 1195 1196 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1197 1198 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1199 data |= 1; 1200 else 1201 data &= ~1; 1202 1203 if (def != data) 1204 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data); 1205 } 1206 1207 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1208 bool enable) 1209 { 1210 uint32_t def, data; 1211 1212 def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); 1213 1214 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1215 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1216 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1217 else 1218 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1219 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1220 1221 if (def != data) 1222 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data); 1223 } 1224 1225 static int soc15_common_set_clockgating_state(void *handle, 1226 enum amd_clockgating_state state) 1227 { 1228 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1229 1230 if (amdgpu_sriov_vf(adev)) 1231 return 0; 1232 1233 switch (adev->asic_type) { 1234 case CHIP_VEGA10: 1235 case CHIP_VEGA12: 1236 case CHIP_VEGA20: 1237 adev->nbio_funcs->update_medium_grain_clock_gating(adev, 1238 state == AMD_CG_STATE_GATE ? true : false); 1239 adev->nbio_funcs->update_medium_grain_light_sleep(adev, 1240 state == AMD_CG_STATE_GATE ? true : false); 1241 soc15_update_hdp_light_sleep(adev, 1242 state == AMD_CG_STATE_GATE ? true : false); 1243 soc15_update_drm_clock_gating(adev, 1244 state == AMD_CG_STATE_GATE ? true : false); 1245 soc15_update_drm_light_sleep(adev, 1246 state == AMD_CG_STATE_GATE ? true : false); 1247 soc15_update_rom_medium_grain_clock_gating(adev, 1248 state == AMD_CG_STATE_GATE ? true : false); 1249 adev->df_funcs->update_medium_grain_clock_gating(adev, 1250 state == AMD_CG_STATE_GATE ? true : false); 1251 break; 1252 case CHIP_RAVEN: 1253 adev->nbio_funcs->update_medium_grain_clock_gating(adev, 1254 state == AMD_CG_STATE_GATE ? true : false); 1255 adev->nbio_funcs->update_medium_grain_light_sleep(adev, 1256 state == AMD_CG_STATE_GATE ? true : false); 1257 soc15_update_hdp_light_sleep(adev, 1258 state == AMD_CG_STATE_GATE ? true : false); 1259 soc15_update_drm_clock_gating(adev, 1260 state == AMD_CG_STATE_GATE ? true : false); 1261 soc15_update_drm_light_sleep(adev, 1262 state == AMD_CG_STATE_GATE ? true : false); 1263 soc15_update_rom_medium_grain_clock_gating(adev, 1264 state == AMD_CG_STATE_GATE ? true : false); 1265 break; 1266 default: 1267 break; 1268 } 1269 return 0; 1270 } 1271 1272 static void soc15_common_get_clockgating_state(void *handle, u32 *flags) 1273 { 1274 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1275 int data; 1276 1277 if (amdgpu_sriov_vf(adev)) 1278 *flags = 0; 1279 1280 adev->nbio_funcs->get_clockgating_state(adev, flags); 1281 1282 /* AMD_CG_SUPPORT_HDP_LS */ 1283 data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); 1284 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1285 *flags |= AMD_CG_SUPPORT_HDP_LS; 1286 1287 /* AMD_CG_SUPPORT_DRM_MGCG */ 1288 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1289 if (!(data & 0x01000000)) 1290 *flags |= AMD_CG_SUPPORT_DRM_MGCG; 1291 1292 /* AMD_CG_SUPPORT_DRM_LS */ 1293 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1294 if (data & 0x1) 1295 *flags |= AMD_CG_SUPPORT_DRM_LS; 1296 1297 /* AMD_CG_SUPPORT_ROM_MGCG */ 1298 data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); 1299 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1300 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1301 1302 adev->df_funcs->get_clockgating_state(adev, flags); 1303 } 1304 1305 static int soc15_common_set_powergating_state(void *handle, 1306 enum amd_powergating_state state) 1307 { 1308 /* todo */ 1309 return 0; 1310 } 1311 1312 const struct amd_ip_funcs soc15_common_ip_funcs = { 1313 .name = "soc15_common", 1314 .early_init = soc15_common_early_init, 1315 .late_init = soc15_common_late_init, 1316 .sw_init = soc15_common_sw_init, 1317 .sw_fini = soc15_common_sw_fini, 1318 .hw_init = soc15_common_hw_init, 1319 .hw_fini = soc15_common_hw_fini, 1320 .suspend = soc15_common_suspend, 1321 .resume = soc15_common_resume, 1322 .is_idle = soc15_common_is_idle, 1323 .wait_for_idle = soc15_common_wait_for_idle, 1324 .soft_reset = soc15_common_soft_reset, 1325 .set_clockgating_state = soc15_common_set_clockgating_state, 1326 .set_powergating_state = soc15_common_set_powergating_state, 1327 .get_clockgating_state= soc15_common_get_clockgating_state, 1328 }; 1329