1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/pci.h> 25 #include <linux/slab.h> 26 27 #include "amdgpu.h" 28 #include "amdgpu_atombios.h" 29 #include "amdgpu_ih.h" 30 #include "amdgpu_uvd.h" 31 #include "amdgpu_vce.h" 32 #include "amdgpu_ucode.h" 33 #include "atom.h" 34 #include "amd_pcie.h" 35 36 #include "gmc/gmc_8_1_d.h" 37 #include "gmc/gmc_8_1_sh_mask.h" 38 39 #include "oss/oss_3_0_d.h" 40 #include "oss/oss_3_0_sh_mask.h" 41 42 #include "bif/bif_5_0_d.h" 43 #include "bif/bif_5_0_sh_mask.h" 44 45 #include "gca/gfx_8_0_d.h" 46 #include "gca/gfx_8_0_sh_mask.h" 47 48 #include "smu/smu_7_1_1_d.h" 49 #include "smu/smu_7_1_1_sh_mask.h" 50 51 #include "uvd/uvd_5_0_d.h" 52 #include "uvd/uvd_5_0_sh_mask.h" 53 54 #include "vce/vce_3_0_d.h" 55 #include "vce/vce_3_0_sh_mask.h" 56 57 #include "dce/dce_10_0_d.h" 58 #include "dce/dce_10_0_sh_mask.h" 59 60 #include "vid.h" 61 #include "vi.h" 62 #include "gmc_v8_0.h" 63 #include "gmc_v7_0.h" 64 #include "gfx_v8_0.h" 65 #include "sdma_v2_4.h" 66 #include "sdma_v3_0.h" 67 #include "dce_v10_0.h" 68 #include "dce_v11_0.h" 69 #include "iceland_ih.h" 70 #include "tonga_ih.h" 71 #include "cz_ih.h" 72 #include "uvd_v5_0.h" 73 #include "uvd_v6_0.h" 74 #include "vce_v3_0.h" 75 #if defined(CONFIG_DRM_AMD_ACP) 76 #include "amdgpu_acp.h" 77 #endif 78 #include "dce_virtual.h" 79 #include "mxgpu_vi.h" 80 #include "amdgpu_dm.h" 81 82 /* 83 * Indirect registers accessor 84 */ 85 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 86 { 87 unsigned long flags; 88 u32 r; 89 90 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 91 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 92 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 93 r = RREG32_NO_KIQ(mmPCIE_DATA); 94 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 95 return r; 96 } 97 98 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 99 { 100 unsigned long flags; 101 102 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 103 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 104 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 105 WREG32_NO_KIQ(mmPCIE_DATA, v); 106 (void)RREG32_NO_KIQ(mmPCIE_DATA); 107 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 108 } 109 110 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 111 { 112 unsigned long flags; 113 u32 r; 114 115 spin_lock_irqsave(&adev->smc_idx_lock, flags); 116 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 117 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11); 118 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 119 return r; 120 } 121 122 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 123 { 124 unsigned long flags; 125 126 spin_lock_irqsave(&adev->smc_idx_lock, flags); 127 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 128 WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v)); 129 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 130 } 131 132 /* smu_8_0_d.h */ 133 #define mmMP0PUB_IND_INDEX 0x180 134 #define mmMP0PUB_IND_DATA 0x181 135 136 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 137 { 138 unsigned long flags; 139 u32 r; 140 141 spin_lock_irqsave(&adev->smc_idx_lock, flags); 142 WREG32(mmMP0PUB_IND_INDEX, (reg)); 143 r = RREG32(mmMP0PUB_IND_DATA); 144 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 145 return r; 146 } 147 148 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 149 { 150 unsigned long flags; 151 152 spin_lock_irqsave(&adev->smc_idx_lock, flags); 153 WREG32(mmMP0PUB_IND_INDEX, (reg)); 154 WREG32(mmMP0PUB_IND_DATA, (v)); 155 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 156 } 157 158 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 159 { 160 unsigned long flags; 161 u32 r; 162 163 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 164 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 165 r = RREG32(mmUVD_CTX_DATA); 166 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 167 return r; 168 } 169 170 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 171 { 172 unsigned long flags; 173 174 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 175 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 176 WREG32(mmUVD_CTX_DATA, (v)); 177 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 178 } 179 180 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 181 { 182 unsigned long flags; 183 u32 r; 184 185 spin_lock_irqsave(&adev->didt_idx_lock, flags); 186 WREG32(mmDIDT_IND_INDEX, (reg)); 187 r = RREG32(mmDIDT_IND_DATA); 188 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 189 return r; 190 } 191 192 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 193 { 194 unsigned long flags; 195 196 spin_lock_irqsave(&adev->didt_idx_lock, flags); 197 WREG32(mmDIDT_IND_INDEX, (reg)); 198 WREG32(mmDIDT_IND_DATA, (v)); 199 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 200 } 201 202 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 203 { 204 unsigned long flags; 205 u32 r; 206 207 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 208 WREG32(mmGC_CAC_IND_INDEX, (reg)); 209 r = RREG32(mmGC_CAC_IND_DATA); 210 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 211 return r; 212 } 213 214 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 215 { 216 unsigned long flags; 217 218 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 219 WREG32(mmGC_CAC_IND_INDEX, (reg)); 220 WREG32(mmGC_CAC_IND_DATA, (v)); 221 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 222 } 223 224 225 static const u32 tonga_mgcg_cgcg_init[] = 226 { 227 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 228 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 229 mmPCIE_DATA, 0x000f0000, 0x00000000, 230 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 231 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 232 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 233 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 234 }; 235 236 static const u32 fiji_mgcg_cgcg_init[] = 237 { 238 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 239 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 240 mmPCIE_DATA, 0x000f0000, 0x00000000, 241 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 242 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 243 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 244 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 245 }; 246 247 static const u32 iceland_mgcg_cgcg_init[] = 248 { 249 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 250 mmPCIE_DATA, 0x000f0000, 0x00000000, 251 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 252 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 253 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 254 }; 255 256 static const u32 cz_mgcg_cgcg_init[] = 257 { 258 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 259 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 260 mmPCIE_DATA, 0x000f0000, 0x00000000, 261 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 262 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 263 }; 264 265 static const u32 stoney_mgcg_cgcg_init[] = 266 { 267 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 268 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 269 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 270 }; 271 272 static void vi_init_golden_registers(struct amdgpu_device *adev) 273 { 274 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 275 mutex_lock(&adev->grbm_idx_mutex); 276 277 if (amdgpu_sriov_vf(adev)) { 278 xgpu_vi_init_golden_registers(adev); 279 mutex_unlock(&adev->grbm_idx_mutex); 280 return; 281 } 282 283 switch (adev->asic_type) { 284 case CHIP_TOPAZ: 285 amdgpu_device_program_register_sequence(adev, 286 iceland_mgcg_cgcg_init, 287 ARRAY_SIZE(iceland_mgcg_cgcg_init)); 288 break; 289 case CHIP_FIJI: 290 amdgpu_device_program_register_sequence(adev, 291 fiji_mgcg_cgcg_init, 292 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 293 break; 294 case CHIP_TONGA: 295 amdgpu_device_program_register_sequence(adev, 296 tonga_mgcg_cgcg_init, 297 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 298 break; 299 case CHIP_CARRIZO: 300 amdgpu_device_program_register_sequence(adev, 301 cz_mgcg_cgcg_init, 302 ARRAY_SIZE(cz_mgcg_cgcg_init)); 303 break; 304 case CHIP_STONEY: 305 amdgpu_device_program_register_sequence(adev, 306 stoney_mgcg_cgcg_init, 307 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 308 break; 309 case CHIP_POLARIS10: 310 case CHIP_POLARIS11: 311 case CHIP_POLARIS12: 312 case CHIP_VEGAM: 313 default: 314 break; 315 } 316 mutex_unlock(&adev->grbm_idx_mutex); 317 } 318 319 /** 320 * vi_get_xclk - get the xclk 321 * 322 * @adev: amdgpu_device pointer 323 * 324 * Returns the reference clock used by the gfx engine 325 * (VI). 326 */ 327 static u32 vi_get_xclk(struct amdgpu_device *adev) 328 { 329 u32 reference_clock = adev->clock.spll.reference_freq; 330 u32 tmp; 331 332 if (adev->flags & AMD_IS_APU) 333 return reference_clock; 334 335 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 336 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 337 return 1000; 338 339 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 340 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 341 return reference_clock / 4; 342 343 return reference_clock; 344 } 345 346 /** 347 * vi_srbm_select - select specific register instances 348 * 349 * @adev: amdgpu_device pointer 350 * @me: selected ME (micro engine) 351 * @pipe: pipe 352 * @queue: queue 353 * @vmid: VMID 354 * 355 * Switches the currently active registers instances. Some 356 * registers are instanced per VMID, others are instanced per 357 * me/pipe/queue combination. 358 */ 359 void vi_srbm_select(struct amdgpu_device *adev, 360 u32 me, u32 pipe, u32 queue, u32 vmid) 361 { 362 u32 srbm_gfx_cntl = 0; 363 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 364 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 365 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 366 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 367 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 368 } 369 370 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 371 { 372 /* todo */ 373 } 374 375 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 376 { 377 u32 bus_cntl; 378 u32 d1vga_control = 0; 379 u32 d2vga_control = 0; 380 u32 vga_render_control = 0; 381 u32 rom_cntl; 382 bool r; 383 384 bus_cntl = RREG32(mmBUS_CNTL); 385 if (adev->mode_info.num_crtc) { 386 d1vga_control = RREG32(mmD1VGA_CONTROL); 387 d2vga_control = RREG32(mmD2VGA_CONTROL); 388 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 389 } 390 rom_cntl = RREG32_SMC(ixROM_CNTL); 391 392 /* enable the rom */ 393 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 394 if (adev->mode_info.num_crtc) { 395 /* Disable VGA mode */ 396 WREG32(mmD1VGA_CONTROL, 397 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 398 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 399 WREG32(mmD2VGA_CONTROL, 400 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 401 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 402 WREG32(mmVGA_RENDER_CONTROL, 403 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 404 } 405 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 406 407 r = amdgpu_read_bios(adev); 408 409 /* restore regs */ 410 WREG32(mmBUS_CNTL, bus_cntl); 411 if (adev->mode_info.num_crtc) { 412 WREG32(mmD1VGA_CONTROL, d1vga_control); 413 WREG32(mmD2VGA_CONTROL, d2vga_control); 414 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 415 } 416 WREG32_SMC(ixROM_CNTL, rom_cntl); 417 return r; 418 } 419 420 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 421 u8 *bios, u32 length_bytes) 422 { 423 u32 *dw_ptr; 424 unsigned long flags; 425 u32 i, length_dw; 426 427 if (bios == NULL) 428 return false; 429 if (length_bytes == 0) 430 return false; 431 /* APU vbios image is part of sbios image */ 432 if (adev->flags & AMD_IS_APU) 433 return false; 434 435 dw_ptr = (u32 *)bios; 436 length_dw = ALIGN(length_bytes, 4) / 4; 437 /* take the smc lock since we are using the smc index */ 438 spin_lock_irqsave(&adev->smc_idx_lock, flags); 439 /* set rom index to 0 */ 440 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 441 WREG32(mmSMC_IND_DATA_11, 0); 442 /* set index to data for continous read */ 443 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 444 for (i = 0; i < length_dw; i++) 445 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 446 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 447 448 return true; 449 } 450 451 static void vi_detect_hw_virtualization(struct amdgpu_device *adev) 452 { 453 uint32_t reg = 0; 454 455 if (adev->asic_type == CHIP_TONGA || 456 adev->asic_type == CHIP_FIJI) { 457 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 458 /* bit0: 0 means pf and 1 means vf */ 459 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) 460 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; 461 /* bit31: 0 means disable IOV and 1 means enable */ 462 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) 463 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 464 } 465 466 if (reg == 0) { 467 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ 468 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 469 } 470 } 471 472 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 473 {mmGRBM_STATUS}, 474 {mmGRBM_STATUS2}, 475 {mmGRBM_STATUS_SE0}, 476 {mmGRBM_STATUS_SE1}, 477 {mmGRBM_STATUS_SE2}, 478 {mmGRBM_STATUS_SE3}, 479 {mmSRBM_STATUS}, 480 {mmSRBM_STATUS2}, 481 {mmSRBM_STATUS3}, 482 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, 483 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, 484 {mmCP_STAT}, 485 {mmCP_STALLED_STAT1}, 486 {mmCP_STALLED_STAT2}, 487 {mmCP_STALLED_STAT3}, 488 {mmCP_CPF_BUSY_STAT}, 489 {mmCP_CPF_STALLED_STAT1}, 490 {mmCP_CPF_STATUS}, 491 {mmCP_CPC_BUSY_STAT}, 492 {mmCP_CPC_STALLED_STAT1}, 493 {mmCP_CPC_STATUS}, 494 {mmGB_ADDR_CONFIG}, 495 {mmMC_ARB_RAMCFG}, 496 {mmGB_TILE_MODE0}, 497 {mmGB_TILE_MODE1}, 498 {mmGB_TILE_MODE2}, 499 {mmGB_TILE_MODE3}, 500 {mmGB_TILE_MODE4}, 501 {mmGB_TILE_MODE5}, 502 {mmGB_TILE_MODE6}, 503 {mmGB_TILE_MODE7}, 504 {mmGB_TILE_MODE8}, 505 {mmGB_TILE_MODE9}, 506 {mmGB_TILE_MODE10}, 507 {mmGB_TILE_MODE11}, 508 {mmGB_TILE_MODE12}, 509 {mmGB_TILE_MODE13}, 510 {mmGB_TILE_MODE14}, 511 {mmGB_TILE_MODE15}, 512 {mmGB_TILE_MODE16}, 513 {mmGB_TILE_MODE17}, 514 {mmGB_TILE_MODE18}, 515 {mmGB_TILE_MODE19}, 516 {mmGB_TILE_MODE20}, 517 {mmGB_TILE_MODE21}, 518 {mmGB_TILE_MODE22}, 519 {mmGB_TILE_MODE23}, 520 {mmGB_TILE_MODE24}, 521 {mmGB_TILE_MODE25}, 522 {mmGB_TILE_MODE26}, 523 {mmGB_TILE_MODE27}, 524 {mmGB_TILE_MODE28}, 525 {mmGB_TILE_MODE29}, 526 {mmGB_TILE_MODE30}, 527 {mmGB_TILE_MODE31}, 528 {mmGB_MACROTILE_MODE0}, 529 {mmGB_MACROTILE_MODE1}, 530 {mmGB_MACROTILE_MODE2}, 531 {mmGB_MACROTILE_MODE3}, 532 {mmGB_MACROTILE_MODE4}, 533 {mmGB_MACROTILE_MODE5}, 534 {mmGB_MACROTILE_MODE6}, 535 {mmGB_MACROTILE_MODE7}, 536 {mmGB_MACROTILE_MODE8}, 537 {mmGB_MACROTILE_MODE9}, 538 {mmGB_MACROTILE_MODE10}, 539 {mmGB_MACROTILE_MODE11}, 540 {mmGB_MACROTILE_MODE12}, 541 {mmGB_MACROTILE_MODE13}, 542 {mmGB_MACROTILE_MODE14}, 543 {mmGB_MACROTILE_MODE15}, 544 {mmCC_RB_BACKEND_DISABLE, true}, 545 {mmGC_USER_RB_BACKEND_DISABLE, true}, 546 {mmGB_BACKEND_MAP, false}, 547 {mmPA_SC_RASTER_CONFIG, true}, 548 {mmPA_SC_RASTER_CONFIG_1, true}, 549 }; 550 551 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 552 bool indexed, u32 se_num, 553 u32 sh_num, u32 reg_offset) 554 { 555 if (indexed) { 556 uint32_t val; 557 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 558 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 559 560 switch (reg_offset) { 561 case mmCC_RB_BACKEND_DISABLE: 562 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 563 case mmGC_USER_RB_BACKEND_DISABLE: 564 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 565 case mmPA_SC_RASTER_CONFIG: 566 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 567 case mmPA_SC_RASTER_CONFIG_1: 568 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 569 } 570 571 mutex_lock(&adev->grbm_idx_mutex); 572 if (se_num != 0xffffffff || sh_num != 0xffffffff) 573 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 574 575 val = RREG32(reg_offset); 576 577 if (se_num != 0xffffffff || sh_num != 0xffffffff) 578 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 579 mutex_unlock(&adev->grbm_idx_mutex); 580 return val; 581 } else { 582 unsigned idx; 583 584 switch (reg_offset) { 585 case mmGB_ADDR_CONFIG: 586 return adev->gfx.config.gb_addr_config; 587 case mmMC_ARB_RAMCFG: 588 return adev->gfx.config.mc_arb_ramcfg; 589 case mmGB_TILE_MODE0: 590 case mmGB_TILE_MODE1: 591 case mmGB_TILE_MODE2: 592 case mmGB_TILE_MODE3: 593 case mmGB_TILE_MODE4: 594 case mmGB_TILE_MODE5: 595 case mmGB_TILE_MODE6: 596 case mmGB_TILE_MODE7: 597 case mmGB_TILE_MODE8: 598 case mmGB_TILE_MODE9: 599 case mmGB_TILE_MODE10: 600 case mmGB_TILE_MODE11: 601 case mmGB_TILE_MODE12: 602 case mmGB_TILE_MODE13: 603 case mmGB_TILE_MODE14: 604 case mmGB_TILE_MODE15: 605 case mmGB_TILE_MODE16: 606 case mmGB_TILE_MODE17: 607 case mmGB_TILE_MODE18: 608 case mmGB_TILE_MODE19: 609 case mmGB_TILE_MODE20: 610 case mmGB_TILE_MODE21: 611 case mmGB_TILE_MODE22: 612 case mmGB_TILE_MODE23: 613 case mmGB_TILE_MODE24: 614 case mmGB_TILE_MODE25: 615 case mmGB_TILE_MODE26: 616 case mmGB_TILE_MODE27: 617 case mmGB_TILE_MODE28: 618 case mmGB_TILE_MODE29: 619 case mmGB_TILE_MODE30: 620 case mmGB_TILE_MODE31: 621 idx = (reg_offset - mmGB_TILE_MODE0); 622 return adev->gfx.config.tile_mode_array[idx]; 623 case mmGB_MACROTILE_MODE0: 624 case mmGB_MACROTILE_MODE1: 625 case mmGB_MACROTILE_MODE2: 626 case mmGB_MACROTILE_MODE3: 627 case mmGB_MACROTILE_MODE4: 628 case mmGB_MACROTILE_MODE5: 629 case mmGB_MACROTILE_MODE6: 630 case mmGB_MACROTILE_MODE7: 631 case mmGB_MACROTILE_MODE8: 632 case mmGB_MACROTILE_MODE9: 633 case mmGB_MACROTILE_MODE10: 634 case mmGB_MACROTILE_MODE11: 635 case mmGB_MACROTILE_MODE12: 636 case mmGB_MACROTILE_MODE13: 637 case mmGB_MACROTILE_MODE14: 638 case mmGB_MACROTILE_MODE15: 639 idx = (reg_offset - mmGB_MACROTILE_MODE0); 640 return adev->gfx.config.macrotile_mode_array[idx]; 641 default: 642 return RREG32(reg_offset); 643 } 644 } 645 } 646 647 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 648 u32 sh_num, u32 reg_offset, u32 *value) 649 { 650 uint32_t i; 651 652 *value = 0; 653 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 654 bool indexed = vi_allowed_read_registers[i].grbm_indexed; 655 656 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 657 continue; 658 659 *value = vi_get_register_value(adev, indexed, se_num, sh_num, 660 reg_offset); 661 return 0; 662 } 663 return -EINVAL; 664 } 665 666 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 667 { 668 u32 i; 669 670 dev_info(adev->dev, "GPU pci config reset\n"); 671 672 /* disable BM */ 673 pci_clear_master(adev->pdev); 674 /* reset */ 675 amdgpu_device_pci_config_reset(adev); 676 677 udelay(100); 678 679 /* wait for asic to come out of reset */ 680 for (i = 0; i < adev->usec_timeout; i++) { 681 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 682 /* enable BM */ 683 pci_set_master(adev->pdev); 684 adev->has_hw_reset = true; 685 return 0; 686 } 687 udelay(1); 688 } 689 return -EINVAL; 690 } 691 692 int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) 693 { 694 void *pp_handle = adev->powerplay.pp_handle; 695 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 696 697 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { 698 *cap = false; 699 return -ENOENT; 700 } 701 702 return pp_funcs->get_asic_baco_capability(pp_handle, cap); 703 } 704 705 int smu7_asic_baco_reset(struct amdgpu_device *adev) 706 { 707 void *pp_handle = adev->powerplay.pp_handle; 708 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 709 710 if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) 711 return -ENOENT; 712 713 /* enter BACO state */ 714 if (pp_funcs->set_asic_baco_state(pp_handle, 1)) 715 return -EIO; 716 717 /* exit BACO state */ 718 if (pp_funcs->set_asic_baco_state(pp_handle, 0)) 719 return -EIO; 720 721 dev_info(adev->dev, "GPU BACO reset\n"); 722 723 return 0; 724 } 725 726 /** 727 * vi_asic_pci_config_reset - soft reset GPU 728 * 729 * @adev: amdgpu_device pointer 730 * 731 * Use PCI Config method to reset the GPU. 732 * 733 * Returns 0 for success. 734 */ 735 static int vi_asic_pci_config_reset(struct amdgpu_device *adev) 736 { 737 int r; 738 739 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 740 741 r = vi_gpu_pci_config_reset(adev); 742 743 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 744 745 return r; 746 } 747 748 static enum amd_reset_method 749 vi_asic_reset_method(struct amdgpu_device *adev) 750 { 751 bool baco_reset; 752 753 switch (adev->asic_type) { 754 case CHIP_FIJI: 755 case CHIP_TONGA: 756 case CHIP_POLARIS10: 757 case CHIP_POLARIS11: 758 case CHIP_POLARIS12: 759 case CHIP_TOPAZ: 760 smu7_asic_get_baco_capability(adev, &baco_reset); 761 break; 762 default: 763 baco_reset = false; 764 break; 765 } 766 767 if (baco_reset) 768 return AMD_RESET_METHOD_BACO; 769 else 770 return AMD_RESET_METHOD_LEGACY; 771 } 772 773 /** 774 * vi_asic_reset - soft reset GPU 775 * 776 * @adev: amdgpu_device pointer 777 * 778 * Look up which blocks are hung and attempt 779 * to reset them. 780 * Returns 0 for success. 781 */ 782 static int vi_asic_reset(struct amdgpu_device *adev) 783 { 784 int r; 785 786 if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) 787 r = smu7_asic_baco_reset(adev); 788 else 789 r = vi_asic_pci_config_reset(adev); 790 791 return r; 792 } 793 794 static u32 vi_get_config_memsize(struct amdgpu_device *adev) 795 { 796 return RREG32(mmCONFIG_MEMSIZE); 797 } 798 799 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 800 u32 cntl_reg, u32 status_reg) 801 { 802 int r, i; 803 struct atom_clock_dividers dividers; 804 uint32_t tmp; 805 806 r = amdgpu_atombios_get_clock_dividers(adev, 807 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 808 clock, false, ÷rs); 809 if (r) 810 return r; 811 812 tmp = RREG32_SMC(cntl_reg); 813 814 if (adev->flags & AMD_IS_APU) 815 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK; 816 else 817 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 818 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 819 tmp |= dividers.post_divider; 820 WREG32_SMC(cntl_reg, tmp); 821 822 for (i = 0; i < 100; i++) { 823 tmp = RREG32_SMC(status_reg); 824 if (adev->flags & AMD_IS_APU) { 825 if (tmp & 0x10000) 826 break; 827 } else { 828 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK) 829 break; 830 } 831 mdelay(10); 832 } 833 if (i == 100) 834 return -ETIMEDOUT; 835 return 0; 836 } 837 838 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0 839 #define ixGNB_CLK1_STATUS 0xD822010C 840 #define ixGNB_CLK2_DFS_CNTL 0xD8220110 841 #define ixGNB_CLK2_STATUS 0xD822012C 842 #define ixGNB_CLK3_DFS_CNTL 0xD8220130 843 #define ixGNB_CLK3_STATUS 0xD822014C 844 845 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 846 { 847 int r; 848 849 if (adev->flags & AMD_IS_APU) { 850 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS); 851 if (r) 852 return r; 853 854 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS); 855 if (r) 856 return r; 857 } else { 858 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 859 if (r) 860 return r; 861 862 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 863 if (r) 864 return r; 865 } 866 867 return 0; 868 } 869 870 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 871 { 872 int r, i; 873 struct atom_clock_dividers dividers; 874 u32 tmp; 875 u32 reg_ctrl; 876 u32 reg_status; 877 u32 status_mask; 878 u32 reg_mask; 879 880 if (adev->flags & AMD_IS_APU) { 881 reg_ctrl = ixGNB_CLK3_DFS_CNTL; 882 reg_status = ixGNB_CLK3_STATUS; 883 status_mask = 0x00010000; 884 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 885 } else { 886 reg_ctrl = ixCG_ECLK_CNTL; 887 reg_status = ixCG_ECLK_STATUS; 888 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK; 889 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 890 } 891 892 r = amdgpu_atombios_get_clock_dividers(adev, 893 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 894 ecclk, false, ÷rs); 895 if (r) 896 return r; 897 898 for (i = 0; i < 100; i++) { 899 if (RREG32_SMC(reg_status) & status_mask) 900 break; 901 mdelay(10); 902 } 903 904 if (i == 100) 905 return -ETIMEDOUT; 906 907 tmp = RREG32_SMC(reg_ctrl); 908 tmp &= ~reg_mask; 909 tmp |= dividers.post_divider; 910 WREG32_SMC(reg_ctrl, tmp); 911 912 for (i = 0; i < 100; i++) { 913 if (RREG32_SMC(reg_status) & status_mask) 914 break; 915 mdelay(10); 916 } 917 918 if (i == 100) 919 return -ETIMEDOUT; 920 921 return 0; 922 } 923 924 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 925 { 926 if (pci_is_root_bus(adev->pdev->bus)) 927 return; 928 929 if (amdgpu_pcie_gen2 == 0) 930 return; 931 932 if (adev->flags & AMD_IS_APU) 933 return; 934 935 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 936 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 937 return; 938 939 /* todo */ 940 } 941 942 static void vi_program_aspm(struct amdgpu_device *adev) 943 { 944 945 if (amdgpu_aspm == 0) 946 return; 947 948 /* todo */ 949 } 950 951 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 952 bool enable) 953 { 954 u32 tmp; 955 956 /* not necessary on CZ */ 957 if (adev->flags & AMD_IS_APU) 958 return; 959 960 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 961 if (enable) 962 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 963 else 964 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 965 966 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 967 } 968 969 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 970 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 971 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 972 973 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 974 { 975 if (adev->flags & AMD_IS_APU) 976 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 977 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 978 else 979 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 980 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 981 } 982 983 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 984 { 985 if (!ring || !ring->funcs->emit_wreg) { 986 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 987 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL); 988 } else { 989 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 990 } 991 } 992 993 static void vi_invalidate_hdp(struct amdgpu_device *adev, 994 struct amdgpu_ring *ring) 995 { 996 if (!ring || !ring->funcs->emit_wreg) { 997 WREG32(mmHDP_DEBUG0, 1); 998 RREG32(mmHDP_DEBUG0); 999 } else { 1000 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1); 1001 } 1002 } 1003 1004 static bool vi_need_full_reset(struct amdgpu_device *adev) 1005 { 1006 switch (adev->asic_type) { 1007 case CHIP_CARRIZO: 1008 case CHIP_STONEY: 1009 /* CZ has hang issues with full reset at the moment */ 1010 return false; 1011 case CHIP_FIJI: 1012 case CHIP_TONGA: 1013 /* XXX: soft reset should work on fiji and tonga */ 1014 return true; 1015 case CHIP_POLARIS10: 1016 case CHIP_POLARIS11: 1017 case CHIP_POLARIS12: 1018 case CHIP_TOPAZ: 1019 default: 1020 /* change this when we support soft reset */ 1021 return true; 1022 } 1023 } 1024 1025 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 1026 uint64_t *count1) 1027 { 1028 uint32_t perfctr = 0; 1029 uint64_t cnt0_of, cnt1_of; 1030 int tmp; 1031 1032 /* This reports 0 on APUs, so return to avoid writing/reading registers 1033 * that may or may not be different from their GPU counterparts 1034 */ 1035 if (adev->flags & AMD_IS_APU) 1036 return; 1037 1038 /* Set the 2 events that we wish to watch, defined above */ 1039 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ 1040 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 1041 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 1042 1043 /* Write to enable desired perf counters */ 1044 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr); 1045 /* Zero out and enable the perf counters 1046 * Write 0x5: 1047 * Bit 0 = Start all counters(1) 1048 * Bit 2 = Global counter reset enable(1) 1049 */ 1050 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005); 1051 1052 msleep(1000); 1053 1054 /* Load the shadow and disable the perf counters 1055 * Write 0x2: 1056 * Bit 0 = Stop counters(0) 1057 * Bit 1 = Load the shadow counters(1) 1058 */ 1059 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002); 1060 1061 /* Read register values to get any >32bit overflow */ 1062 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK); 1063 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 1064 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 1065 1066 /* Get the values and add the overflow */ 1067 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 1068 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 1069 } 1070 1071 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev) 1072 { 1073 uint64_t nak_r, nak_g; 1074 1075 /* Get the number of NAKs received and generated */ 1076 nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK); 1077 nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED); 1078 1079 /* Add the total number of NAKs, i.e the number of replays */ 1080 return (nak_r + nak_g); 1081 } 1082 1083 static bool vi_need_reset_on_init(struct amdgpu_device *adev) 1084 { 1085 u32 clock_cntl, pc; 1086 1087 if (adev->flags & AMD_IS_APU) 1088 return false; 1089 1090 /* check if the SMC is already running */ 1091 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); 1092 pc = RREG32_SMC(ixSMC_PC_C); 1093 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) && 1094 (0x20100 <= pc)) 1095 return true; 1096 1097 return false; 1098 } 1099 1100 static const struct amdgpu_asic_funcs vi_asic_funcs = 1101 { 1102 .read_disabled_bios = &vi_read_disabled_bios, 1103 .read_bios_from_rom = &vi_read_bios_from_rom, 1104 .read_register = &vi_read_register, 1105 .reset = &vi_asic_reset, 1106 .reset_method = &vi_asic_reset_method, 1107 .set_vga_state = &vi_vga_set_state, 1108 .get_xclk = &vi_get_xclk, 1109 .set_uvd_clocks = &vi_set_uvd_clocks, 1110 .set_vce_clocks = &vi_set_vce_clocks, 1111 .get_config_memsize = &vi_get_config_memsize, 1112 .flush_hdp = &vi_flush_hdp, 1113 .invalidate_hdp = &vi_invalidate_hdp, 1114 .need_full_reset = &vi_need_full_reset, 1115 .init_doorbell_index = &legacy_doorbell_index_init, 1116 .get_pcie_usage = &vi_get_pcie_usage, 1117 .need_reset_on_init = &vi_need_reset_on_init, 1118 .get_pcie_replay_count = &vi_get_pcie_replay_count, 1119 }; 1120 1121 #define CZ_REV_BRISTOL(rev) \ 1122 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) 1123 1124 static int vi_common_early_init(void *handle) 1125 { 1126 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1127 1128 if (adev->flags & AMD_IS_APU) { 1129 adev->smc_rreg = &cz_smc_rreg; 1130 adev->smc_wreg = &cz_smc_wreg; 1131 } else { 1132 adev->smc_rreg = &vi_smc_rreg; 1133 adev->smc_wreg = &vi_smc_wreg; 1134 } 1135 adev->pcie_rreg = &vi_pcie_rreg; 1136 adev->pcie_wreg = &vi_pcie_wreg; 1137 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1138 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1139 adev->didt_rreg = &vi_didt_rreg; 1140 adev->didt_wreg = &vi_didt_wreg; 1141 adev->gc_cac_rreg = &vi_gc_cac_rreg; 1142 adev->gc_cac_wreg = &vi_gc_cac_wreg; 1143 1144 adev->asic_funcs = &vi_asic_funcs; 1145 1146 adev->rev_id = vi_get_rev_id(adev); 1147 adev->external_rev_id = 0xFF; 1148 switch (adev->asic_type) { 1149 case CHIP_TOPAZ: 1150 adev->cg_flags = 0; 1151 adev->pg_flags = 0; 1152 adev->external_rev_id = 0x1; 1153 break; 1154 case CHIP_FIJI: 1155 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1156 AMD_CG_SUPPORT_GFX_MGLS | 1157 AMD_CG_SUPPORT_GFX_RLC_LS | 1158 AMD_CG_SUPPORT_GFX_CP_LS | 1159 AMD_CG_SUPPORT_GFX_CGTS | 1160 AMD_CG_SUPPORT_GFX_CGTS_LS | 1161 AMD_CG_SUPPORT_GFX_CGCG | 1162 AMD_CG_SUPPORT_GFX_CGLS | 1163 AMD_CG_SUPPORT_SDMA_MGCG | 1164 AMD_CG_SUPPORT_SDMA_LS | 1165 AMD_CG_SUPPORT_BIF_LS | 1166 AMD_CG_SUPPORT_HDP_MGCG | 1167 AMD_CG_SUPPORT_HDP_LS | 1168 AMD_CG_SUPPORT_ROM_MGCG | 1169 AMD_CG_SUPPORT_MC_MGCG | 1170 AMD_CG_SUPPORT_MC_LS | 1171 AMD_CG_SUPPORT_UVD_MGCG; 1172 adev->pg_flags = 0; 1173 adev->external_rev_id = adev->rev_id + 0x3c; 1174 break; 1175 case CHIP_TONGA: 1176 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1177 AMD_CG_SUPPORT_GFX_CGCG | 1178 AMD_CG_SUPPORT_GFX_CGLS | 1179 AMD_CG_SUPPORT_SDMA_MGCG | 1180 AMD_CG_SUPPORT_SDMA_LS | 1181 AMD_CG_SUPPORT_BIF_LS | 1182 AMD_CG_SUPPORT_HDP_MGCG | 1183 AMD_CG_SUPPORT_HDP_LS | 1184 AMD_CG_SUPPORT_ROM_MGCG | 1185 AMD_CG_SUPPORT_MC_MGCG | 1186 AMD_CG_SUPPORT_MC_LS | 1187 AMD_CG_SUPPORT_DRM_LS | 1188 AMD_CG_SUPPORT_UVD_MGCG; 1189 adev->pg_flags = 0; 1190 adev->external_rev_id = adev->rev_id + 0x14; 1191 break; 1192 case CHIP_POLARIS11: 1193 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1194 AMD_CG_SUPPORT_GFX_RLC_LS | 1195 AMD_CG_SUPPORT_GFX_CP_LS | 1196 AMD_CG_SUPPORT_GFX_CGCG | 1197 AMD_CG_SUPPORT_GFX_CGLS | 1198 AMD_CG_SUPPORT_GFX_3D_CGCG | 1199 AMD_CG_SUPPORT_GFX_3D_CGLS | 1200 AMD_CG_SUPPORT_SDMA_MGCG | 1201 AMD_CG_SUPPORT_SDMA_LS | 1202 AMD_CG_SUPPORT_BIF_MGCG | 1203 AMD_CG_SUPPORT_BIF_LS | 1204 AMD_CG_SUPPORT_HDP_MGCG | 1205 AMD_CG_SUPPORT_HDP_LS | 1206 AMD_CG_SUPPORT_ROM_MGCG | 1207 AMD_CG_SUPPORT_MC_MGCG | 1208 AMD_CG_SUPPORT_MC_LS | 1209 AMD_CG_SUPPORT_DRM_LS | 1210 AMD_CG_SUPPORT_UVD_MGCG | 1211 AMD_CG_SUPPORT_VCE_MGCG; 1212 adev->pg_flags = 0; 1213 adev->external_rev_id = adev->rev_id + 0x5A; 1214 break; 1215 case CHIP_POLARIS10: 1216 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1217 AMD_CG_SUPPORT_GFX_RLC_LS | 1218 AMD_CG_SUPPORT_GFX_CP_LS | 1219 AMD_CG_SUPPORT_GFX_CGCG | 1220 AMD_CG_SUPPORT_GFX_CGLS | 1221 AMD_CG_SUPPORT_GFX_3D_CGCG | 1222 AMD_CG_SUPPORT_GFX_3D_CGLS | 1223 AMD_CG_SUPPORT_SDMA_MGCG | 1224 AMD_CG_SUPPORT_SDMA_LS | 1225 AMD_CG_SUPPORT_BIF_MGCG | 1226 AMD_CG_SUPPORT_BIF_LS | 1227 AMD_CG_SUPPORT_HDP_MGCG | 1228 AMD_CG_SUPPORT_HDP_LS | 1229 AMD_CG_SUPPORT_ROM_MGCG | 1230 AMD_CG_SUPPORT_MC_MGCG | 1231 AMD_CG_SUPPORT_MC_LS | 1232 AMD_CG_SUPPORT_DRM_LS | 1233 AMD_CG_SUPPORT_UVD_MGCG | 1234 AMD_CG_SUPPORT_VCE_MGCG; 1235 adev->pg_flags = 0; 1236 adev->external_rev_id = adev->rev_id + 0x50; 1237 break; 1238 case CHIP_POLARIS12: 1239 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1240 AMD_CG_SUPPORT_GFX_RLC_LS | 1241 AMD_CG_SUPPORT_GFX_CP_LS | 1242 AMD_CG_SUPPORT_GFX_CGCG | 1243 AMD_CG_SUPPORT_GFX_CGLS | 1244 AMD_CG_SUPPORT_GFX_3D_CGCG | 1245 AMD_CG_SUPPORT_GFX_3D_CGLS | 1246 AMD_CG_SUPPORT_SDMA_MGCG | 1247 AMD_CG_SUPPORT_SDMA_LS | 1248 AMD_CG_SUPPORT_BIF_MGCG | 1249 AMD_CG_SUPPORT_BIF_LS | 1250 AMD_CG_SUPPORT_HDP_MGCG | 1251 AMD_CG_SUPPORT_HDP_LS | 1252 AMD_CG_SUPPORT_ROM_MGCG | 1253 AMD_CG_SUPPORT_MC_MGCG | 1254 AMD_CG_SUPPORT_MC_LS | 1255 AMD_CG_SUPPORT_DRM_LS | 1256 AMD_CG_SUPPORT_UVD_MGCG | 1257 AMD_CG_SUPPORT_VCE_MGCG; 1258 adev->pg_flags = 0; 1259 adev->external_rev_id = adev->rev_id + 0x64; 1260 break; 1261 case CHIP_VEGAM: 1262 adev->cg_flags = 0; 1263 /*AMD_CG_SUPPORT_GFX_MGCG | 1264 AMD_CG_SUPPORT_GFX_RLC_LS | 1265 AMD_CG_SUPPORT_GFX_CP_LS | 1266 AMD_CG_SUPPORT_GFX_CGCG | 1267 AMD_CG_SUPPORT_GFX_CGLS | 1268 AMD_CG_SUPPORT_GFX_3D_CGCG | 1269 AMD_CG_SUPPORT_GFX_3D_CGLS | 1270 AMD_CG_SUPPORT_SDMA_MGCG | 1271 AMD_CG_SUPPORT_SDMA_LS | 1272 AMD_CG_SUPPORT_BIF_MGCG | 1273 AMD_CG_SUPPORT_BIF_LS | 1274 AMD_CG_SUPPORT_HDP_MGCG | 1275 AMD_CG_SUPPORT_HDP_LS | 1276 AMD_CG_SUPPORT_ROM_MGCG | 1277 AMD_CG_SUPPORT_MC_MGCG | 1278 AMD_CG_SUPPORT_MC_LS | 1279 AMD_CG_SUPPORT_DRM_LS | 1280 AMD_CG_SUPPORT_UVD_MGCG | 1281 AMD_CG_SUPPORT_VCE_MGCG;*/ 1282 adev->pg_flags = 0; 1283 adev->external_rev_id = adev->rev_id + 0x6E; 1284 break; 1285 case CHIP_CARRIZO: 1286 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1287 AMD_CG_SUPPORT_GFX_MGCG | 1288 AMD_CG_SUPPORT_GFX_MGLS | 1289 AMD_CG_SUPPORT_GFX_RLC_LS | 1290 AMD_CG_SUPPORT_GFX_CP_LS | 1291 AMD_CG_SUPPORT_GFX_CGTS | 1292 AMD_CG_SUPPORT_GFX_CGTS_LS | 1293 AMD_CG_SUPPORT_GFX_CGCG | 1294 AMD_CG_SUPPORT_GFX_CGLS | 1295 AMD_CG_SUPPORT_BIF_LS | 1296 AMD_CG_SUPPORT_HDP_MGCG | 1297 AMD_CG_SUPPORT_HDP_LS | 1298 AMD_CG_SUPPORT_SDMA_MGCG | 1299 AMD_CG_SUPPORT_SDMA_LS | 1300 AMD_CG_SUPPORT_VCE_MGCG; 1301 /* rev0 hardware requires workarounds to support PG */ 1302 adev->pg_flags = 0; 1303 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { 1304 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | 1305 AMD_PG_SUPPORT_GFX_PIPELINE | 1306 AMD_PG_SUPPORT_CP | 1307 AMD_PG_SUPPORT_UVD | 1308 AMD_PG_SUPPORT_VCE; 1309 } 1310 adev->external_rev_id = adev->rev_id + 0x1; 1311 break; 1312 case CHIP_STONEY: 1313 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1314 AMD_CG_SUPPORT_GFX_MGCG | 1315 AMD_CG_SUPPORT_GFX_MGLS | 1316 AMD_CG_SUPPORT_GFX_RLC_LS | 1317 AMD_CG_SUPPORT_GFX_CP_LS | 1318 AMD_CG_SUPPORT_GFX_CGTS | 1319 AMD_CG_SUPPORT_GFX_CGTS_LS | 1320 AMD_CG_SUPPORT_GFX_CGLS | 1321 AMD_CG_SUPPORT_BIF_LS | 1322 AMD_CG_SUPPORT_HDP_MGCG | 1323 AMD_CG_SUPPORT_HDP_LS | 1324 AMD_CG_SUPPORT_SDMA_MGCG | 1325 AMD_CG_SUPPORT_SDMA_LS | 1326 AMD_CG_SUPPORT_VCE_MGCG; 1327 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1328 AMD_PG_SUPPORT_GFX_SMG | 1329 AMD_PG_SUPPORT_GFX_PIPELINE | 1330 AMD_PG_SUPPORT_CP | 1331 AMD_PG_SUPPORT_UVD | 1332 AMD_PG_SUPPORT_VCE; 1333 adev->external_rev_id = adev->rev_id + 0x61; 1334 break; 1335 default: 1336 /* FIXME: not supported yet */ 1337 return -EINVAL; 1338 } 1339 1340 if (amdgpu_sriov_vf(adev)) { 1341 amdgpu_virt_init_setting(adev); 1342 xgpu_vi_mailbox_set_irq_funcs(adev); 1343 } 1344 1345 return 0; 1346 } 1347 1348 static int vi_common_late_init(void *handle) 1349 { 1350 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1351 1352 if (amdgpu_sriov_vf(adev)) 1353 xgpu_vi_mailbox_get_irq(adev); 1354 1355 return 0; 1356 } 1357 1358 static int vi_common_sw_init(void *handle) 1359 { 1360 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1361 1362 if (amdgpu_sriov_vf(adev)) 1363 xgpu_vi_mailbox_add_irq_id(adev); 1364 1365 return 0; 1366 } 1367 1368 static int vi_common_sw_fini(void *handle) 1369 { 1370 return 0; 1371 } 1372 1373 static int vi_common_hw_init(void *handle) 1374 { 1375 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1376 1377 /* move the golden regs per IP block */ 1378 vi_init_golden_registers(adev); 1379 /* enable pcie gen2/3 link */ 1380 vi_pcie_gen3_enable(adev); 1381 /* enable aspm */ 1382 vi_program_aspm(adev); 1383 /* enable the doorbell aperture */ 1384 vi_enable_doorbell_aperture(adev, true); 1385 1386 return 0; 1387 } 1388 1389 static int vi_common_hw_fini(void *handle) 1390 { 1391 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1392 1393 /* enable the doorbell aperture */ 1394 vi_enable_doorbell_aperture(adev, false); 1395 1396 if (amdgpu_sriov_vf(adev)) 1397 xgpu_vi_mailbox_put_irq(adev); 1398 1399 return 0; 1400 } 1401 1402 static int vi_common_suspend(void *handle) 1403 { 1404 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1405 1406 return vi_common_hw_fini(adev); 1407 } 1408 1409 static int vi_common_resume(void *handle) 1410 { 1411 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1412 1413 return vi_common_hw_init(adev); 1414 } 1415 1416 static bool vi_common_is_idle(void *handle) 1417 { 1418 return true; 1419 } 1420 1421 static int vi_common_wait_for_idle(void *handle) 1422 { 1423 return 0; 1424 } 1425 1426 static int vi_common_soft_reset(void *handle) 1427 { 1428 return 0; 1429 } 1430 1431 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1432 bool enable) 1433 { 1434 uint32_t temp, data; 1435 1436 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1437 1438 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1439 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1440 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1441 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1442 else 1443 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1444 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1445 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1446 1447 if (temp != data) 1448 WREG32_PCIE(ixPCIE_CNTL2, data); 1449 } 1450 1451 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1452 bool enable) 1453 { 1454 uint32_t temp, data; 1455 1456 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1457 1458 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1459 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1460 else 1461 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1462 1463 if (temp != data) 1464 WREG32(mmHDP_HOST_PATH_CNTL, data); 1465 } 1466 1467 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1468 bool enable) 1469 { 1470 uint32_t temp, data; 1471 1472 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1473 1474 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1475 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1476 else 1477 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1478 1479 if (temp != data) 1480 WREG32(mmHDP_MEM_POWER_LS, data); 1481 } 1482 1483 static void vi_update_drm_light_sleep(struct amdgpu_device *adev, 1484 bool enable) 1485 { 1486 uint32_t temp, data; 1487 1488 temp = data = RREG32(0x157a); 1489 1490 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1491 data |= 1; 1492 else 1493 data &= ~1; 1494 1495 if (temp != data) 1496 WREG32(0x157a, data); 1497 } 1498 1499 1500 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1501 bool enable) 1502 { 1503 uint32_t temp, data; 1504 1505 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1506 1507 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1508 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1509 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1510 else 1511 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1512 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1513 1514 if (temp != data) 1515 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1516 } 1517 1518 static int vi_common_set_clockgating_state_by_smu(void *handle, 1519 enum amd_clockgating_state state) 1520 { 1521 uint32_t msg_id, pp_state = 0; 1522 uint32_t pp_support_state = 0; 1523 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1524 1525 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1526 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1527 pp_support_state = PP_STATE_SUPPORT_LS; 1528 pp_state = PP_STATE_LS; 1529 } 1530 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1531 pp_support_state |= PP_STATE_SUPPORT_CG; 1532 pp_state |= PP_STATE_CG; 1533 } 1534 if (state == AMD_CG_STATE_UNGATE) 1535 pp_state = 0; 1536 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1537 PP_BLOCK_SYS_MC, 1538 pp_support_state, 1539 pp_state); 1540 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1541 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1542 } 1543 1544 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1545 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1546 pp_support_state = PP_STATE_SUPPORT_LS; 1547 pp_state = PP_STATE_LS; 1548 } 1549 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1550 pp_support_state |= PP_STATE_SUPPORT_CG; 1551 pp_state |= PP_STATE_CG; 1552 } 1553 if (state == AMD_CG_STATE_UNGATE) 1554 pp_state = 0; 1555 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1556 PP_BLOCK_SYS_SDMA, 1557 pp_support_state, 1558 pp_state); 1559 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1560 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1561 } 1562 1563 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1564 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1565 pp_support_state = PP_STATE_SUPPORT_LS; 1566 pp_state = PP_STATE_LS; 1567 } 1568 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1569 pp_support_state |= PP_STATE_SUPPORT_CG; 1570 pp_state |= PP_STATE_CG; 1571 } 1572 if (state == AMD_CG_STATE_UNGATE) 1573 pp_state = 0; 1574 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1575 PP_BLOCK_SYS_HDP, 1576 pp_support_state, 1577 pp_state); 1578 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1579 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1580 } 1581 1582 1583 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { 1584 if (state == AMD_CG_STATE_UNGATE) 1585 pp_state = 0; 1586 else 1587 pp_state = PP_STATE_LS; 1588 1589 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1590 PP_BLOCK_SYS_BIF, 1591 PP_STATE_SUPPORT_LS, 1592 pp_state); 1593 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1594 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1595 } 1596 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { 1597 if (state == AMD_CG_STATE_UNGATE) 1598 pp_state = 0; 1599 else 1600 pp_state = PP_STATE_CG; 1601 1602 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1603 PP_BLOCK_SYS_BIF, 1604 PP_STATE_SUPPORT_CG, 1605 pp_state); 1606 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1607 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1608 } 1609 1610 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { 1611 1612 if (state == AMD_CG_STATE_UNGATE) 1613 pp_state = 0; 1614 else 1615 pp_state = PP_STATE_LS; 1616 1617 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1618 PP_BLOCK_SYS_DRM, 1619 PP_STATE_SUPPORT_LS, 1620 pp_state); 1621 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1622 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1623 } 1624 1625 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { 1626 1627 if (state == AMD_CG_STATE_UNGATE) 1628 pp_state = 0; 1629 else 1630 pp_state = PP_STATE_CG; 1631 1632 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1633 PP_BLOCK_SYS_ROM, 1634 PP_STATE_SUPPORT_CG, 1635 pp_state); 1636 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1637 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1638 } 1639 return 0; 1640 } 1641 1642 static int vi_common_set_clockgating_state(void *handle, 1643 enum amd_clockgating_state state) 1644 { 1645 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1646 1647 if (amdgpu_sriov_vf(adev)) 1648 return 0; 1649 1650 switch (adev->asic_type) { 1651 case CHIP_FIJI: 1652 vi_update_bif_medium_grain_light_sleep(adev, 1653 state == AMD_CG_STATE_GATE); 1654 vi_update_hdp_medium_grain_clock_gating(adev, 1655 state == AMD_CG_STATE_GATE); 1656 vi_update_hdp_light_sleep(adev, 1657 state == AMD_CG_STATE_GATE); 1658 vi_update_rom_medium_grain_clock_gating(adev, 1659 state == AMD_CG_STATE_GATE); 1660 break; 1661 case CHIP_CARRIZO: 1662 case CHIP_STONEY: 1663 vi_update_bif_medium_grain_light_sleep(adev, 1664 state == AMD_CG_STATE_GATE); 1665 vi_update_hdp_medium_grain_clock_gating(adev, 1666 state == AMD_CG_STATE_GATE); 1667 vi_update_hdp_light_sleep(adev, 1668 state == AMD_CG_STATE_GATE); 1669 vi_update_drm_light_sleep(adev, 1670 state == AMD_CG_STATE_GATE); 1671 break; 1672 case CHIP_TONGA: 1673 case CHIP_POLARIS10: 1674 case CHIP_POLARIS11: 1675 case CHIP_POLARIS12: 1676 case CHIP_VEGAM: 1677 vi_common_set_clockgating_state_by_smu(adev, state); 1678 default: 1679 break; 1680 } 1681 return 0; 1682 } 1683 1684 static int vi_common_set_powergating_state(void *handle, 1685 enum amd_powergating_state state) 1686 { 1687 return 0; 1688 } 1689 1690 static void vi_common_get_clockgating_state(void *handle, u32 *flags) 1691 { 1692 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1693 int data; 1694 1695 if (amdgpu_sriov_vf(adev)) 1696 *flags = 0; 1697 1698 /* AMD_CG_SUPPORT_BIF_LS */ 1699 data = RREG32_PCIE(ixPCIE_CNTL2); 1700 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 1701 *flags |= AMD_CG_SUPPORT_BIF_LS; 1702 1703 /* AMD_CG_SUPPORT_HDP_LS */ 1704 data = RREG32(mmHDP_MEM_POWER_LS); 1705 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1706 *flags |= AMD_CG_SUPPORT_HDP_LS; 1707 1708 /* AMD_CG_SUPPORT_HDP_MGCG */ 1709 data = RREG32(mmHDP_HOST_PATH_CNTL); 1710 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK)) 1711 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 1712 1713 /* AMD_CG_SUPPORT_ROM_MGCG */ 1714 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1715 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1716 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1717 } 1718 1719 static const struct amd_ip_funcs vi_common_ip_funcs = { 1720 .name = "vi_common", 1721 .early_init = vi_common_early_init, 1722 .late_init = vi_common_late_init, 1723 .sw_init = vi_common_sw_init, 1724 .sw_fini = vi_common_sw_fini, 1725 .hw_init = vi_common_hw_init, 1726 .hw_fini = vi_common_hw_fini, 1727 .suspend = vi_common_suspend, 1728 .resume = vi_common_resume, 1729 .is_idle = vi_common_is_idle, 1730 .wait_for_idle = vi_common_wait_for_idle, 1731 .soft_reset = vi_common_soft_reset, 1732 .set_clockgating_state = vi_common_set_clockgating_state, 1733 .set_powergating_state = vi_common_set_powergating_state, 1734 .get_clockgating_state = vi_common_get_clockgating_state, 1735 }; 1736 1737 static const struct amdgpu_ip_block_version vi_common_ip_block = 1738 { 1739 .type = AMD_IP_BLOCK_TYPE_COMMON, 1740 .major = 1, 1741 .minor = 0, 1742 .rev = 0, 1743 .funcs = &vi_common_ip_funcs, 1744 }; 1745 1746 int vi_set_ip_blocks(struct amdgpu_device *adev) 1747 { 1748 /* in early init stage, vbios code won't work */ 1749 vi_detect_hw_virtualization(adev); 1750 1751 if (amdgpu_sriov_vf(adev)) 1752 adev->virt.ops = &xgpu_vi_virt_ops; 1753 1754 switch (adev->asic_type) { 1755 case CHIP_TOPAZ: 1756 /* topaz has no DCE, UVD, VCE */ 1757 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1758 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 1759 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 1760 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1761 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); 1762 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1763 if (adev->enable_virtual_display) 1764 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1765 break; 1766 case CHIP_FIJI: 1767 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1768 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 1769 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1770 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1771 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1772 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1773 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1774 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1775 #if defined(CONFIG_DRM_AMD_DC) 1776 else if (amdgpu_device_has_dc_support(adev)) 1777 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1778 #endif 1779 else 1780 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block); 1781 if (!amdgpu_sriov_vf(adev)) { 1782 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1783 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1784 } 1785 break; 1786 case CHIP_TONGA: 1787 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1788 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1789 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1790 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1791 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1792 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1793 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1794 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1795 #if defined(CONFIG_DRM_AMD_DC) 1796 else if (amdgpu_device_has_dc_support(adev)) 1797 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1798 #endif 1799 else 1800 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block); 1801 if (!amdgpu_sriov_vf(adev)) { 1802 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block); 1803 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1804 } 1805 break; 1806 case CHIP_POLARIS10: 1807 case CHIP_POLARIS11: 1808 case CHIP_POLARIS12: 1809 case CHIP_VEGAM: 1810 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1811 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 1812 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1813 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1814 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); 1815 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1816 if (adev->enable_virtual_display) 1817 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1818 #if defined(CONFIG_DRM_AMD_DC) 1819 else if (amdgpu_device_has_dc_support(adev)) 1820 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1821 #endif 1822 else 1823 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); 1824 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); 1825 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1826 break; 1827 case CHIP_CARRIZO: 1828 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1829 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1830 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1831 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1832 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1833 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1834 if (adev->enable_virtual_display) 1835 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1836 #if defined(CONFIG_DRM_AMD_DC) 1837 else if (amdgpu_device_has_dc_support(adev)) 1838 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1839 #endif 1840 else 1841 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1842 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1843 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); 1844 #if defined(CONFIG_DRM_AMD_ACP) 1845 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1846 #endif 1847 break; 1848 case CHIP_STONEY: 1849 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1850 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1851 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1852 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block); 1853 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1854 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1855 if (adev->enable_virtual_display) 1856 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1857 #if defined(CONFIG_DRM_AMD_DC) 1858 else if (amdgpu_device_has_dc_support(adev)) 1859 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1860 #endif 1861 else 1862 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1863 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); 1864 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1865 #if defined(CONFIG_DRM_AMD_ACP) 1866 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1867 #endif 1868 break; 1869 default: 1870 /* FIXME: not supported yet */ 1871 return -EINVAL; 1872 } 1873 1874 return 0; 1875 } 1876 1877 void legacy_doorbell_index_init(struct amdgpu_device *adev) 1878 { 1879 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ; 1880 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0; 1881 adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1; 1882 adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2; 1883 adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3; 1884 adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4; 1885 adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5; 1886 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6; 1887 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7; 1888 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0; 1889 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0; 1890 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1; 1891 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH; 1892 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT; 1893 } 1894