1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/slab.h> 24 #include <drm/drmP.h> 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_ih.h" 28 #include "amdgpu_uvd.h" 29 #include "amdgpu_vce.h" 30 #include "amdgpu_ucode.h" 31 #include "atom.h" 32 #include "amd_pcie.h" 33 34 #include "gmc/gmc_8_1_d.h" 35 #include "gmc/gmc_8_1_sh_mask.h" 36 37 #include "oss/oss_3_0_d.h" 38 #include "oss/oss_3_0_sh_mask.h" 39 40 #include "bif/bif_5_0_d.h" 41 #include "bif/bif_5_0_sh_mask.h" 42 43 #include "gca/gfx_8_0_d.h" 44 #include "gca/gfx_8_0_sh_mask.h" 45 46 #include "smu/smu_7_1_1_d.h" 47 #include "smu/smu_7_1_1_sh_mask.h" 48 49 #include "uvd/uvd_5_0_d.h" 50 #include "uvd/uvd_5_0_sh_mask.h" 51 52 #include "vce/vce_3_0_d.h" 53 #include "vce/vce_3_0_sh_mask.h" 54 55 #include "dce/dce_10_0_d.h" 56 #include "dce/dce_10_0_sh_mask.h" 57 58 #include "vid.h" 59 #include "vi.h" 60 #include "vi_dpm.h" 61 #include "gmc_v8_0.h" 62 #include "gmc_v7_0.h" 63 #include "gfx_v8_0.h" 64 #include "sdma_v2_4.h" 65 #include "sdma_v3_0.h" 66 #include "dce_v10_0.h" 67 #include "dce_v11_0.h" 68 #include "iceland_ih.h" 69 #include "tonga_ih.h" 70 #include "cz_ih.h" 71 #include "uvd_v5_0.h" 72 #include "uvd_v6_0.h" 73 #include "vce_v3_0.h" 74 #include "amdgpu_powerplay.h" 75 #if defined(CONFIG_DRM_AMD_ACP) 76 #include "amdgpu_acp.h" 77 #endif 78 #include "dce_virtual.h" 79 #include "mxgpu_vi.h" 80 #include "amdgpu_dm.h" 81 82 /* 83 * Indirect registers accessor 84 */ 85 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 86 { 87 unsigned long flags; 88 u32 r; 89 90 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 91 WREG32(mmPCIE_INDEX, reg); 92 (void)RREG32(mmPCIE_INDEX); 93 r = RREG32(mmPCIE_DATA); 94 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 95 return r; 96 } 97 98 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 99 { 100 unsigned long flags; 101 102 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 103 WREG32(mmPCIE_INDEX, reg); 104 (void)RREG32(mmPCIE_INDEX); 105 WREG32(mmPCIE_DATA, v); 106 (void)RREG32(mmPCIE_DATA); 107 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 108 } 109 110 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 111 { 112 unsigned long flags; 113 u32 r; 114 115 spin_lock_irqsave(&adev->smc_idx_lock, flags); 116 WREG32(mmSMC_IND_INDEX_11, (reg)); 117 r = RREG32(mmSMC_IND_DATA_11); 118 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 119 return r; 120 } 121 122 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 123 { 124 unsigned long flags; 125 126 spin_lock_irqsave(&adev->smc_idx_lock, flags); 127 WREG32(mmSMC_IND_INDEX_11, (reg)); 128 WREG32(mmSMC_IND_DATA_11, (v)); 129 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 130 } 131 132 /* smu_8_0_d.h */ 133 #define mmMP0PUB_IND_INDEX 0x180 134 #define mmMP0PUB_IND_DATA 0x181 135 136 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 137 { 138 unsigned long flags; 139 u32 r; 140 141 spin_lock_irqsave(&adev->smc_idx_lock, flags); 142 WREG32(mmMP0PUB_IND_INDEX, (reg)); 143 r = RREG32(mmMP0PUB_IND_DATA); 144 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 145 return r; 146 } 147 148 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 149 { 150 unsigned long flags; 151 152 spin_lock_irqsave(&adev->smc_idx_lock, flags); 153 WREG32(mmMP0PUB_IND_INDEX, (reg)); 154 WREG32(mmMP0PUB_IND_DATA, (v)); 155 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 156 } 157 158 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 159 { 160 unsigned long flags; 161 u32 r; 162 163 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 164 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 165 r = RREG32(mmUVD_CTX_DATA); 166 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 167 return r; 168 } 169 170 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 171 { 172 unsigned long flags; 173 174 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 175 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 176 WREG32(mmUVD_CTX_DATA, (v)); 177 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 178 } 179 180 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 181 { 182 unsigned long flags; 183 u32 r; 184 185 spin_lock_irqsave(&adev->didt_idx_lock, flags); 186 WREG32(mmDIDT_IND_INDEX, (reg)); 187 r = RREG32(mmDIDT_IND_DATA); 188 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 189 return r; 190 } 191 192 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 193 { 194 unsigned long flags; 195 196 spin_lock_irqsave(&adev->didt_idx_lock, flags); 197 WREG32(mmDIDT_IND_INDEX, (reg)); 198 WREG32(mmDIDT_IND_DATA, (v)); 199 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 200 } 201 202 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 203 { 204 unsigned long flags; 205 u32 r; 206 207 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 208 WREG32(mmGC_CAC_IND_INDEX, (reg)); 209 r = RREG32(mmGC_CAC_IND_DATA); 210 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 211 return r; 212 } 213 214 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 215 { 216 unsigned long flags; 217 218 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 219 WREG32(mmGC_CAC_IND_INDEX, (reg)); 220 WREG32(mmGC_CAC_IND_DATA, (v)); 221 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 222 } 223 224 225 static const u32 tonga_mgcg_cgcg_init[] = 226 { 227 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 228 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 229 mmPCIE_DATA, 0x000f0000, 0x00000000, 230 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 231 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 232 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 233 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 234 }; 235 236 static const u32 fiji_mgcg_cgcg_init[] = 237 { 238 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 239 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 240 mmPCIE_DATA, 0x000f0000, 0x00000000, 241 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 242 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 243 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 244 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 245 }; 246 247 static const u32 iceland_mgcg_cgcg_init[] = 248 { 249 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 250 mmPCIE_DATA, 0x000f0000, 0x00000000, 251 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 252 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 253 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 254 }; 255 256 static const u32 cz_mgcg_cgcg_init[] = 257 { 258 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 259 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 260 mmPCIE_DATA, 0x000f0000, 0x00000000, 261 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 262 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 263 }; 264 265 static const u32 stoney_mgcg_cgcg_init[] = 266 { 267 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 268 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 269 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 270 }; 271 272 static void vi_init_golden_registers(struct amdgpu_device *adev) 273 { 274 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 275 mutex_lock(&adev->grbm_idx_mutex); 276 277 if (amdgpu_sriov_vf(adev)) { 278 xgpu_vi_init_golden_registers(adev); 279 mutex_unlock(&adev->grbm_idx_mutex); 280 return; 281 } 282 283 switch (adev->asic_type) { 284 case CHIP_TOPAZ: 285 amdgpu_device_program_register_sequence(adev, 286 iceland_mgcg_cgcg_init, 287 ARRAY_SIZE(iceland_mgcg_cgcg_init)); 288 break; 289 case CHIP_FIJI: 290 amdgpu_device_program_register_sequence(adev, 291 fiji_mgcg_cgcg_init, 292 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 293 break; 294 case CHIP_TONGA: 295 amdgpu_device_program_register_sequence(adev, 296 tonga_mgcg_cgcg_init, 297 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 298 break; 299 case CHIP_CARRIZO: 300 amdgpu_device_program_register_sequence(adev, 301 cz_mgcg_cgcg_init, 302 ARRAY_SIZE(cz_mgcg_cgcg_init)); 303 break; 304 case CHIP_STONEY: 305 amdgpu_device_program_register_sequence(adev, 306 stoney_mgcg_cgcg_init, 307 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 308 break; 309 case CHIP_POLARIS11: 310 case CHIP_POLARIS10: 311 case CHIP_POLARIS12: 312 default: 313 break; 314 } 315 mutex_unlock(&adev->grbm_idx_mutex); 316 } 317 318 /** 319 * vi_get_xclk - get the xclk 320 * 321 * @adev: amdgpu_device pointer 322 * 323 * Returns the reference clock used by the gfx engine 324 * (VI). 325 */ 326 static u32 vi_get_xclk(struct amdgpu_device *adev) 327 { 328 u32 reference_clock = adev->clock.spll.reference_freq; 329 u32 tmp; 330 331 if (adev->flags & AMD_IS_APU) 332 return reference_clock; 333 334 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 335 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 336 return 1000; 337 338 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 339 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 340 return reference_clock / 4; 341 342 return reference_clock; 343 } 344 345 /** 346 * vi_srbm_select - select specific register instances 347 * 348 * @adev: amdgpu_device pointer 349 * @me: selected ME (micro engine) 350 * @pipe: pipe 351 * @queue: queue 352 * @vmid: VMID 353 * 354 * Switches the currently active registers instances. Some 355 * registers are instanced per VMID, others are instanced per 356 * me/pipe/queue combination. 357 */ 358 void vi_srbm_select(struct amdgpu_device *adev, 359 u32 me, u32 pipe, u32 queue, u32 vmid) 360 { 361 u32 srbm_gfx_cntl = 0; 362 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 363 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 364 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 365 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 366 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 367 } 368 369 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 370 { 371 /* todo */ 372 } 373 374 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 375 { 376 u32 bus_cntl; 377 u32 d1vga_control = 0; 378 u32 d2vga_control = 0; 379 u32 vga_render_control = 0; 380 u32 rom_cntl; 381 bool r; 382 383 bus_cntl = RREG32(mmBUS_CNTL); 384 if (adev->mode_info.num_crtc) { 385 d1vga_control = RREG32(mmD1VGA_CONTROL); 386 d2vga_control = RREG32(mmD2VGA_CONTROL); 387 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 388 } 389 rom_cntl = RREG32_SMC(ixROM_CNTL); 390 391 /* enable the rom */ 392 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 393 if (adev->mode_info.num_crtc) { 394 /* Disable VGA mode */ 395 WREG32(mmD1VGA_CONTROL, 396 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 397 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 398 WREG32(mmD2VGA_CONTROL, 399 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 400 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 401 WREG32(mmVGA_RENDER_CONTROL, 402 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 403 } 404 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 405 406 r = amdgpu_read_bios(adev); 407 408 /* restore regs */ 409 WREG32(mmBUS_CNTL, bus_cntl); 410 if (adev->mode_info.num_crtc) { 411 WREG32(mmD1VGA_CONTROL, d1vga_control); 412 WREG32(mmD2VGA_CONTROL, d2vga_control); 413 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 414 } 415 WREG32_SMC(ixROM_CNTL, rom_cntl); 416 return r; 417 } 418 419 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 420 u8 *bios, u32 length_bytes) 421 { 422 u32 *dw_ptr; 423 unsigned long flags; 424 u32 i, length_dw; 425 426 if (bios == NULL) 427 return false; 428 if (length_bytes == 0) 429 return false; 430 /* APU vbios image is part of sbios image */ 431 if (adev->flags & AMD_IS_APU) 432 return false; 433 434 dw_ptr = (u32 *)bios; 435 length_dw = ALIGN(length_bytes, 4) / 4; 436 /* take the smc lock since we are using the smc index */ 437 spin_lock_irqsave(&adev->smc_idx_lock, flags); 438 /* set rom index to 0 */ 439 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 440 WREG32(mmSMC_IND_DATA_11, 0); 441 /* set index to data for continous read */ 442 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 443 for (i = 0; i < length_dw; i++) 444 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 445 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 446 447 return true; 448 } 449 450 static void vi_detect_hw_virtualization(struct amdgpu_device *adev) 451 { 452 uint32_t reg = 0; 453 454 if (adev->asic_type == CHIP_TONGA || 455 adev->asic_type == CHIP_FIJI) { 456 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 457 /* bit0: 0 means pf and 1 means vf */ 458 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) 459 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; 460 /* bit31: 0 means disable IOV and 1 means enable */ 461 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) 462 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 463 } 464 465 if (reg == 0) { 466 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ 467 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 468 } 469 } 470 471 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 472 {mmGRBM_STATUS}, 473 {mmGRBM_STATUS2}, 474 {mmGRBM_STATUS_SE0}, 475 {mmGRBM_STATUS_SE1}, 476 {mmGRBM_STATUS_SE2}, 477 {mmGRBM_STATUS_SE3}, 478 {mmSRBM_STATUS}, 479 {mmSRBM_STATUS2}, 480 {mmSRBM_STATUS3}, 481 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, 482 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, 483 {mmCP_STAT}, 484 {mmCP_STALLED_STAT1}, 485 {mmCP_STALLED_STAT2}, 486 {mmCP_STALLED_STAT3}, 487 {mmCP_CPF_BUSY_STAT}, 488 {mmCP_CPF_STALLED_STAT1}, 489 {mmCP_CPF_STATUS}, 490 {mmCP_CPC_BUSY_STAT}, 491 {mmCP_CPC_STALLED_STAT1}, 492 {mmCP_CPC_STATUS}, 493 {mmGB_ADDR_CONFIG}, 494 {mmMC_ARB_RAMCFG}, 495 {mmGB_TILE_MODE0}, 496 {mmGB_TILE_MODE1}, 497 {mmGB_TILE_MODE2}, 498 {mmGB_TILE_MODE3}, 499 {mmGB_TILE_MODE4}, 500 {mmGB_TILE_MODE5}, 501 {mmGB_TILE_MODE6}, 502 {mmGB_TILE_MODE7}, 503 {mmGB_TILE_MODE8}, 504 {mmGB_TILE_MODE9}, 505 {mmGB_TILE_MODE10}, 506 {mmGB_TILE_MODE11}, 507 {mmGB_TILE_MODE12}, 508 {mmGB_TILE_MODE13}, 509 {mmGB_TILE_MODE14}, 510 {mmGB_TILE_MODE15}, 511 {mmGB_TILE_MODE16}, 512 {mmGB_TILE_MODE17}, 513 {mmGB_TILE_MODE18}, 514 {mmGB_TILE_MODE19}, 515 {mmGB_TILE_MODE20}, 516 {mmGB_TILE_MODE21}, 517 {mmGB_TILE_MODE22}, 518 {mmGB_TILE_MODE23}, 519 {mmGB_TILE_MODE24}, 520 {mmGB_TILE_MODE25}, 521 {mmGB_TILE_MODE26}, 522 {mmGB_TILE_MODE27}, 523 {mmGB_TILE_MODE28}, 524 {mmGB_TILE_MODE29}, 525 {mmGB_TILE_MODE30}, 526 {mmGB_TILE_MODE31}, 527 {mmGB_MACROTILE_MODE0}, 528 {mmGB_MACROTILE_MODE1}, 529 {mmGB_MACROTILE_MODE2}, 530 {mmGB_MACROTILE_MODE3}, 531 {mmGB_MACROTILE_MODE4}, 532 {mmGB_MACROTILE_MODE5}, 533 {mmGB_MACROTILE_MODE6}, 534 {mmGB_MACROTILE_MODE7}, 535 {mmGB_MACROTILE_MODE8}, 536 {mmGB_MACROTILE_MODE9}, 537 {mmGB_MACROTILE_MODE10}, 538 {mmGB_MACROTILE_MODE11}, 539 {mmGB_MACROTILE_MODE12}, 540 {mmGB_MACROTILE_MODE13}, 541 {mmGB_MACROTILE_MODE14}, 542 {mmGB_MACROTILE_MODE15}, 543 {mmCC_RB_BACKEND_DISABLE, true}, 544 {mmGC_USER_RB_BACKEND_DISABLE, true}, 545 {mmGB_BACKEND_MAP, false}, 546 {mmPA_SC_RASTER_CONFIG, true}, 547 {mmPA_SC_RASTER_CONFIG_1, true}, 548 }; 549 550 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 551 bool indexed, u32 se_num, 552 u32 sh_num, u32 reg_offset) 553 { 554 if (indexed) { 555 uint32_t val; 556 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 557 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 558 559 switch (reg_offset) { 560 case mmCC_RB_BACKEND_DISABLE: 561 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 562 case mmGC_USER_RB_BACKEND_DISABLE: 563 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 564 case mmPA_SC_RASTER_CONFIG: 565 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 566 case mmPA_SC_RASTER_CONFIG_1: 567 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 568 } 569 570 mutex_lock(&adev->grbm_idx_mutex); 571 if (se_num != 0xffffffff || sh_num != 0xffffffff) 572 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 573 574 val = RREG32(reg_offset); 575 576 if (se_num != 0xffffffff || sh_num != 0xffffffff) 577 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 578 mutex_unlock(&adev->grbm_idx_mutex); 579 return val; 580 } else { 581 unsigned idx; 582 583 switch (reg_offset) { 584 case mmGB_ADDR_CONFIG: 585 return adev->gfx.config.gb_addr_config; 586 case mmMC_ARB_RAMCFG: 587 return adev->gfx.config.mc_arb_ramcfg; 588 case mmGB_TILE_MODE0: 589 case mmGB_TILE_MODE1: 590 case mmGB_TILE_MODE2: 591 case mmGB_TILE_MODE3: 592 case mmGB_TILE_MODE4: 593 case mmGB_TILE_MODE5: 594 case mmGB_TILE_MODE6: 595 case mmGB_TILE_MODE7: 596 case mmGB_TILE_MODE8: 597 case mmGB_TILE_MODE9: 598 case mmGB_TILE_MODE10: 599 case mmGB_TILE_MODE11: 600 case mmGB_TILE_MODE12: 601 case mmGB_TILE_MODE13: 602 case mmGB_TILE_MODE14: 603 case mmGB_TILE_MODE15: 604 case mmGB_TILE_MODE16: 605 case mmGB_TILE_MODE17: 606 case mmGB_TILE_MODE18: 607 case mmGB_TILE_MODE19: 608 case mmGB_TILE_MODE20: 609 case mmGB_TILE_MODE21: 610 case mmGB_TILE_MODE22: 611 case mmGB_TILE_MODE23: 612 case mmGB_TILE_MODE24: 613 case mmGB_TILE_MODE25: 614 case mmGB_TILE_MODE26: 615 case mmGB_TILE_MODE27: 616 case mmGB_TILE_MODE28: 617 case mmGB_TILE_MODE29: 618 case mmGB_TILE_MODE30: 619 case mmGB_TILE_MODE31: 620 idx = (reg_offset - mmGB_TILE_MODE0); 621 return adev->gfx.config.tile_mode_array[idx]; 622 case mmGB_MACROTILE_MODE0: 623 case mmGB_MACROTILE_MODE1: 624 case mmGB_MACROTILE_MODE2: 625 case mmGB_MACROTILE_MODE3: 626 case mmGB_MACROTILE_MODE4: 627 case mmGB_MACROTILE_MODE5: 628 case mmGB_MACROTILE_MODE6: 629 case mmGB_MACROTILE_MODE7: 630 case mmGB_MACROTILE_MODE8: 631 case mmGB_MACROTILE_MODE9: 632 case mmGB_MACROTILE_MODE10: 633 case mmGB_MACROTILE_MODE11: 634 case mmGB_MACROTILE_MODE12: 635 case mmGB_MACROTILE_MODE13: 636 case mmGB_MACROTILE_MODE14: 637 case mmGB_MACROTILE_MODE15: 638 idx = (reg_offset - mmGB_MACROTILE_MODE0); 639 return adev->gfx.config.macrotile_mode_array[idx]; 640 default: 641 return RREG32(reg_offset); 642 } 643 } 644 } 645 646 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 647 u32 sh_num, u32 reg_offset, u32 *value) 648 { 649 uint32_t i; 650 651 *value = 0; 652 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 653 bool indexed = vi_allowed_read_registers[i].grbm_indexed; 654 655 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 656 continue; 657 658 *value = vi_get_register_value(adev, indexed, se_num, sh_num, 659 reg_offset); 660 return 0; 661 } 662 return -EINVAL; 663 } 664 665 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 666 { 667 u32 i; 668 669 dev_info(adev->dev, "GPU pci config reset\n"); 670 671 /* disable BM */ 672 pci_clear_master(adev->pdev); 673 /* reset */ 674 amdgpu_device_pci_config_reset(adev); 675 676 udelay(100); 677 678 /* wait for asic to come out of reset */ 679 for (i = 0; i < adev->usec_timeout; i++) { 680 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 681 /* enable BM */ 682 pci_set_master(adev->pdev); 683 adev->has_hw_reset = true; 684 return 0; 685 } 686 udelay(1); 687 } 688 return -EINVAL; 689 } 690 691 /** 692 * vi_asic_reset - soft reset GPU 693 * 694 * @adev: amdgpu_device pointer 695 * 696 * Look up which blocks are hung and attempt 697 * to reset them. 698 * Returns 0 for success. 699 */ 700 static int vi_asic_reset(struct amdgpu_device *adev) 701 { 702 int r; 703 704 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 705 706 r = vi_gpu_pci_config_reset(adev); 707 708 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 709 710 return r; 711 } 712 713 static u32 vi_get_config_memsize(struct amdgpu_device *adev) 714 { 715 return RREG32(mmCONFIG_MEMSIZE); 716 } 717 718 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 719 u32 cntl_reg, u32 status_reg) 720 { 721 int r, i; 722 struct atom_clock_dividers dividers; 723 uint32_t tmp; 724 725 r = amdgpu_atombios_get_clock_dividers(adev, 726 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 727 clock, false, ÷rs); 728 if (r) 729 return r; 730 731 tmp = RREG32_SMC(cntl_reg); 732 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 733 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 734 tmp |= dividers.post_divider; 735 WREG32_SMC(cntl_reg, tmp); 736 737 for (i = 0; i < 100; i++) { 738 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) 739 break; 740 mdelay(10); 741 } 742 if (i == 100) 743 return -ETIMEDOUT; 744 745 return 0; 746 } 747 748 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 749 { 750 int r; 751 752 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 753 if (r) 754 return r; 755 756 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 757 if (r) 758 return r; 759 760 return 0; 761 } 762 763 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 764 { 765 int r, i; 766 struct atom_clock_dividers dividers; 767 u32 tmp; 768 769 r = amdgpu_atombios_get_clock_dividers(adev, 770 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 771 ecclk, false, ÷rs); 772 if (r) 773 return r; 774 775 for (i = 0; i < 100; i++) { 776 if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) 777 break; 778 mdelay(10); 779 } 780 if (i == 100) 781 return -ETIMEDOUT; 782 783 tmp = RREG32_SMC(ixCG_ECLK_CNTL); 784 tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | 785 CG_ECLK_CNTL__ECLK_DIVIDER_MASK); 786 tmp |= dividers.post_divider; 787 WREG32_SMC(ixCG_ECLK_CNTL, tmp); 788 789 for (i = 0; i < 100; i++) { 790 if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) 791 break; 792 mdelay(10); 793 } 794 if (i == 100) 795 return -ETIMEDOUT; 796 797 return 0; 798 } 799 800 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 801 { 802 if (pci_is_root_bus(adev->pdev->bus)) 803 return; 804 805 if (amdgpu_pcie_gen2 == 0) 806 return; 807 808 if (adev->flags & AMD_IS_APU) 809 return; 810 811 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 812 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 813 return; 814 815 /* todo */ 816 } 817 818 static void vi_program_aspm(struct amdgpu_device *adev) 819 { 820 821 if (amdgpu_aspm == 0) 822 return; 823 824 /* todo */ 825 } 826 827 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 828 bool enable) 829 { 830 u32 tmp; 831 832 /* not necessary on CZ */ 833 if (adev->flags & AMD_IS_APU) 834 return; 835 836 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 837 if (enable) 838 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 839 else 840 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 841 842 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 843 } 844 845 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 846 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 847 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 848 849 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 850 { 851 if (adev->flags & AMD_IS_APU) 852 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 853 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 854 else 855 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 856 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 857 } 858 859 static const struct amdgpu_asic_funcs vi_asic_funcs = 860 { 861 .read_disabled_bios = &vi_read_disabled_bios, 862 .read_bios_from_rom = &vi_read_bios_from_rom, 863 .read_register = &vi_read_register, 864 .reset = &vi_asic_reset, 865 .set_vga_state = &vi_vga_set_state, 866 .get_xclk = &vi_get_xclk, 867 .set_uvd_clocks = &vi_set_uvd_clocks, 868 .set_vce_clocks = &vi_set_vce_clocks, 869 .get_config_memsize = &vi_get_config_memsize, 870 }; 871 872 #define CZ_REV_BRISTOL(rev) \ 873 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) 874 875 static int vi_common_early_init(void *handle) 876 { 877 bool smc_enabled = false; 878 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 879 880 if (adev->flags & AMD_IS_APU) { 881 adev->smc_rreg = &cz_smc_rreg; 882 adev->smc_wreg = &cz_smc_wreg; 883 } else { 884 adev->smc_rreg = &vi_smc_rreg; 885 adev->smc_wreg = &vi_smc_wreg; 886 } 887 adev->pcie_rreg = &vi_pcie_rreg; 888 adev->pcie_wreg = &vi_pcie_wreg; 889 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 890 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 891 adev->didt_rreg = &vi_didt_rreg; 892 adev->didt_wreg = &vi_didt_wreg; 893 adev->gc_cac_rreg = &vi_gc_cac_rreg; 894 adev->gc_cac_wreg = &vi_gc_cac_wreg; 895 896 adev->asic_funcs = &vi_asic_funcs; 897 898 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && 899 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) 900 smc_enabled = true; 901 902 adev->rev_id = vi_get_rev_id(adev); 903 adev->external_rev_id = 0xFF; 904 switch (adev->asic_type) { 905 case CHIP_TOPAZ: 906 adev->cg_flags = 0; 907 adev->pg_flags = 0; 908 adev->external_rev_id = 0x1; 909 break; 910 case CHIP_FIJI: 911 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 912 AMD_CG_SUPPORT_GFX_MGLS | 913 AMD_CG_SUPPORT_GFX_RLC_LS | 914 AMD_CG_SUPPORT_GFX_CP_LS | 915 AMD_CG_SUPPORT_GFX_CGTS | 916 AMD_CG_SUPPORT_GFX_CGTS_LS | 917 AMD_CG_SUPPORT_GFX_CGCG | 918 AMD_CG_SUPPORT_GFX_CGLS | 919 AMD_CG_SUPPORT_SDMA_MGCG | 920 AMD_CG_SUPPORT_SDMA_LS | 921 AMD_CG_SUPPORT_BIF_LS | 922 AMD_CG_SUPPORT_HDP_MGCG | 923 AMD_CG_SUPPORT_HDP_LS | 924 AMD_CG_SUPPORT_ROM_MGCG | 925 AMD_CG_SUPPORT_MC_MGCG | 926 AMD_CG_SUPPORT_MC_LS | 927 AMD_CG_SUPPORT_UVD_MGCG; 928 adev->pg_flags = 0; 929 adev->external_rev_id = adev->rev_id + 0x3c; 930 break; 931 case CHIP_TONGA: 932 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 933 AMD_CG_SUPPORT_GFX_CGCG | 934 AMD_CG_SUPPORT_GFX_CGLS | 935 AMD_CG_SUPPORT_SDMA_MGCG | 936 AMD_CG_SUPPORT_SDMA_LS | 937 AMD_CG_SUPPORT_BIF_LS | 938 AMD_CG_SUPPORT_HDP_MGCG | 939 AMD_CG_SUPPORT_HDP_LS | 940 AMD_CG_SUPPORT_ROM_MGCG | 941 AMD_CG_SUPPORT_MC_MGCG | 942 AMD_CG_SUPPORT_MC_LS | 943 AMD_CG_SUPPORT_DRM_LS | 944 AMD_CG_SUPPORT_UVD_MGCG; 945 adev->pg_flags = 0; 946 adev->external_rev_id = adev->rev_id + 0x14; 947 break; 948 case CHIP_POLARIS11: 949 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 950 AMD_CG_SUPPORT_GFX_RLC_LS | 951 AMD_CG_SUPPORT_GFX_CP_LS | 952 AMD_CG_SUPPORT_GFX_CGCG | 953 AMD_CG_SUPPORT_GFX_CGLS | 954 AMD_CG_SUPPORT_GFX_3D_CGCG | 955 AMD_CG_SUPPORT_GFX_3D_CGLS | 956 AMD_CG_SUPPORT_SDMA_MGCG | 957 AMD_CG_SUPPORT_SDMA_LS | 958 AMD_CG_SUPPORT_BIF_MGCG | 959 AMD_CG_SUPPORT_BIF_LS | 960 AMD_CG_SUPPORT_HDP_MGCG | 961 AMD_CG_SUPPORT_HDP_LS | 962 AMD_CG_SUPPORT_ROM_MGCG | 963 AMD_CG_SUPPORT_MC_MGCG | 964 AMD_CG_SUPPORT_MC_LS | 965 AMD_CG_SUPPORT_DRM_LS | 966 AMD_CG_SUPPORT_UVD_MGCG | 967 AMD_CG_SUPPORT_VCE_MGCG; 968 adev->pg_flags = 0; 969 adev->external_rev_id = adev->rev_id + 0x5A; 970 break; 971 case CHIP_POLARIS10: 972 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 973 AMD_CG_SUPPORT_GFX_RLC_LS | 974 AMD_CG_SUPPORT_GFX_CP_LS | 975 AMD_CG_SUPPORT_GFX_CGCG | 976 AMD_CG_SUPPORT_GFX_CGLS | 977 AMD_CG_SUPPORT_GFX_3D_CGCG | 978 AMD_CG_SUPPORT_GFX_3D_CGLS | 979 AMD_CG_SUPPORT_SDMA_MGCG | 980 AMD_CG_SUPPORT_SDMA_LS | 981 AMD_CG_SUPPORT_BIF_MGCG | 982 AMD_CG_SUPPORT_BIF_LS | 983 AMD_CG_SUPPORT_HDP_MGCG | 984 AMD_CG_SUPPORT_HDP_LS | 985 AMD_CG_SUPPORT_ROM_MGCG | 986 AMD_CG_SUPPORT_MC_MGCG | 987 AMD_CG_SUPPORT_MC_LS | 988 AMD_CG_SUPPORT_DRM_LS | 989 AMD_CG_SUPPORT_UVD_MGCG | 990 AMD_CG_SUPPORT_VCE_MGCG; 991 adev->pg_flags = 0; 992 adev->external_rev_id = adev->rev_id + 0x50; 993 break; 994 case CHIP_POLARIS12: 995 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 996 AMD_CG_SUPPORT_GFX_RLC_LS | 997 AMD_CG_SUPPORT_GFX_CP_LS | 998 AMD_CG_SUPPORT_GFX_CGCG | 999 AMD_CG_SUPPORT_GFX_CGLS | 1000 AMD_CG_SUPPORT_GFX_3D_CGCG | 1001 AMD_CG_SUPPORT_GFX_3D_CGLS | 1002 AMD_CG_SUPPORT_SDMA_MGCG | 1003 AMD_CG_SUPPORT_SDMA_LS | 1004 AMD_CG_SUPPORT_BIF_MGCG | 1005 AMD_CG_SUPPORT_BIF_LS | 1006 AMD_CG_SUPPORT_HDP_MGCG | 1007 AMD_CG_SUPPORT_HDP_LS | 1008 AMD_CG_SUPPORT_ROM_MGCG | 1009 AMD_CG_SUPPORT_MC_MGCG | 1010 AMD_CG_SUPPORT_MC_LS | 1011 AMD_CG_SUPPORT_DRM_LS | 1012 AMD_CG_SUPPORT_UVD_MGCG | 1013 AMD_CG_SUPPORT_VCE_MGCG; 1014 adev->pg_flags = 0; 1015 adev->external_rev_id = adev->rev_id + 0x64; 1016 break; 1017 case CHIP_CARRIZO: 1018 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1019 AMD_CG_SUPPORT_GFX_MGCG | 1020 AMD_CG_SUPPORT_GFX_MGLS | 1021 AMD_CG_SUPPORT_GFX_RLC_LS | 1022 AMD_CG_SUPPORT_GFX_CP_LS | 1023 AMD_CG_SUPPORT_GFX_CGTS | 1024 AMD_CG_SUPPORT_GFX_CGTS_LS | 1025 AMD_CG_SUPPORT_GFX_CGCG | 1026 AMD_CG_SUPPORT_GFX_CGLS | 1027 AMD_CG_SUPPORT_BIF_LS | 1028 AMD_CG_SUPPORT_HDP_MGCG | 1029 AMD_CG_SUPPORT_HDP_LS | 1030 AMD_CG_SUPPORT_SDMA_MGCG | 1031 AMD_CG_SUPPORT_SDMA_LS | 1032 AMD_CG_SUPPORT_VCE_MGCG; 1033 /* rev0 hardware requires workarounds to support PG */ 1034 adev->pg_flags = 0; 1035 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { 1036 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | 1037 AMD_PG_SUPPORT_GFX_PIPELINE | 1038 AMD_PG_SUPPORT_CP | 1039 AMD_PG_SUPPORT_UVD | 1040 AMD_PG_SUPPORT_VCE; 1041 } 1042 adev->external_rev_id = adev->rev_id + 0x1; 1043 break; 1044 case CHIP_STONEY: 1045 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1046 AMD_CG_SUPPORT_GFX_MGCG | 1047 AMD_CG_SUPPORT_GFX_MGLS | 1048 AMD_CG_SUPPORT_GFX_RLC_LS | 1049 AMD_CG_SUPPORT_GFX_CP_LS | 1050 AMD_CG_SUPPORT_GFX_CGTS | 1051 AMD_CG_SUPPORT_GFX_CGTS_LS | 1052 AMD_CG_SUPPORT_GFX_CGLS | 1053 AMD_CG_SUPPORT_BIF_LS | 1054 AMD_CG_SUPPORT_HDP_MGCG | 1055 AMD_CG_SUPPORT_HDP_LS | 1056 AMD_CG_SUPPORT_SDMA_MGCG | 1057 AMD_CG_SUPPORT_SDMA_LS | 1058 AMD_CG_SUPPORT_VCE_MGCG; 1059 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1060 AMD_PG_SUPPORT_GFX_SMG | 1061 AMD_PG_SUPPORT_GFX_PIPELINE | 1062 AMD_PG_SUPPORT_CP | 1063 AMD_PG_SUPPORT_UVD | 1064 AMD_PG_SUPPORT_VCE; 1065 adev->external_rev_id = adev->rev_id + 0x61; 1066 break; 1067 default: 1068 /* FIXME: not supported yet */ 1069 return -EINVAL; 1070 } 1071 1072 if (amdgpu_sriov_vf(adev)) { 1073 amdgpu_virt_init_setting(adev); 1074 xgpu_vi_mailbox_set_irq_funcs(adev); 1075 } 1076 1077 /* vi use smc load by default */ 1078 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); 1079 1080 amdgpu_device_get_pcie_info(adev); 1081 1082 return 0; 1083 } 1084 1085 static int vi_common_late_init(void *handle) 1086 { 1087 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1088 1089 if (amdgpu_sriov_vf(adev)) 1090 xgpu_vi_mailbox_get_irq(adev); 1091 1092 return 0; 1093 } 1094 1095 static int vi_common_sw_init(void *handle) 1096 { 1097 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1098 1099 if (amdgpu_sriov_vf(adev)) 1100 xgpu_vi_mailbox_add_irq_id(adev); 1101 1102 return 0; 1103 } 1104 1105 static int vi_common_sw_fini(void *handle) 1106 { 1107 return 0; 1108 } 1109 1110 static int vi_common_hw_init(void *handle) 1111 { 1112 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1113 1114 /* move the golden regs per IP block */ 1115 vi_init_golden_registers(adev); 1116 /* enable pcie gen2/3 link */ 1117 vi_pcie_gen3_enable(adev); 1118 /* enable aspm */ 1119 vi_program_aspm(adev); 1120 /* enable the doorbell aperture */ 1121 vi_enable_doorbell_aperture(adev, true); 1122 1123 return 0; 1124 } 1125 1126 static int vi_common_hw_fini(void *handle) 1127 { 1128 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1129 1130 /* enable the doorbell aperture */ 1131 vi_enable_doorbell_aperture(adev, false); 1132 1133 if (amdgpu_sriov_vf(adev)) 1134 xgpu_vi_mailbox_put_irq(adev); 1135 1136 return 0; 1137 } 1138 1139 static int vi_common_suspend(void *handle) 1140 { 1141 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1142 1143 return vi_common_hw_fini(adev); 1144 } 1145 1146 static int vi_common_resume(void *handle) 1147 { 1148 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1149 1150 return vi_common_hw_init(adev); 1151 } 1152 1153 static bool vi_common_is_idle(void *handle) 1154 { 1155 return true; 1156 } 1157 1158 static int vi_common_wait_for_idle(void *handle) 1159 { 1160 return 0; 1161 } 1162 1163 static int vi_common_soft_reset(void *handle) 1164 { 1165 return 0; 1166 } 1167 1168 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1169 bool enable) 1170 { 1171 uint32_t temp, data; 1172 1173 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1174 1175 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1176 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1177 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1178 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1179 else 1180 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1181 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1182 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1183 1184 if (temp != data) 1185 WREG32_PCIE(ixPCIE_CNTL2, data); 1186 } 1187 1188 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1189 bool enable) 1190 { 1191 uint32_t temp, data; 1192 1193 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1194 1195 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1196 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1197 else 1198 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1199 1200 if (temp != data) 1201 WREG32(mmHDP_HOST_PATH_CNTL, data); 1202 } 1203 1204 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1205 bool enable) 1206 { 1207 uint32_t temp, data; 1208 1209 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1210 1211 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1212 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1213 else 1214 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1215 1216 if (temp != data) 1217 WREG32(mmHDP_MEM_POWER_LS, data); 1218 } 1219 1220 static void vi_update_drm_light_sleep(struct amdgpu_device *adev, 1221 bool enable) 1222 { 1223 uint32_t temp, data; 1224 1225 temp = data = RREG32(0x157a); 1226 1227 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1228 data |= 1; 1229 else 1230 data &= ~1; 1231 1232 if (temp != data) 1233 WREG32(0x157a, data); 1234 } 1235 1236 1237 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1238 bool enable) 1239 { 1240 uint32_t temp, data; 1241 1242 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1243 1244 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1245 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1246 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1247 else 1248 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1249 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1250 1251 if (temp != data) 1252 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1253 } 1254 1255 static int vi_common_set_clockgating_state_by_smu(void *handle, 1256 enum amd_clockgating_state state) 1257 { 1258 uint32_t msg_id, pp_state = 0; 1259 uint32_t pp_support_state = 0; 1260 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1261 1262 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1263 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1264 pp_support_state = AMD_CG_SUPPORT_MC_LS; 1265 pp_state = PP_STATE_LS; 1266 } 1267 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1268 pp_support_state |= AMD_CG_SUPPORT_MC_MGCG; 1269 pp_state |= PP_STATE_CG; 1270 } 1271 if (state == AMD_CG_STATE_UNGATE) 1272 pp_state = 0; 1273 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1274 PP_BLOCK_SYS_MC, 1275 pp_support_state, 1276 pp_state); 1277 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1278 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1279 } 1280 1281 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1282 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1283 pp_support_state = AMD_CG_SUPPORT_SDMA_LS; 1284 pp_state = PP_STATE_LS; 1285 } 1286 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1287 pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG; 1288 pp_state |= PP_STATE_CG; 1289 } 1290 if (state == AMD_CG_STATE_UNGATE) 1291 pp_state = 0; 1292 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1293 PP_BLOCK_SYS_SDMA, 1294 pp_support_state, 1295 pp_state); 1296 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1297 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1298 } 1299 1300 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1301 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1302 pp_support_state = AMD_CG_SUPPORT_HDP_LS; 1303 pp_state = PP_STATE_LS; 1304 } 1305 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1306 pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG; 1307 pp_state |= PP_STATE_CG; 1308 } 1309 if (state == AMD_CG_STATE_UNGATE) 1310 pp_state = 0; 1311 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1312 PP_BLOCK_SYS_HDP, 1313 pp_support_state, 1314 pp_state); 1315 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1316 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1317 } 1318 1319 1320 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { 1321 if (state == AMD_CG_STATE_UNGATE) 1322 pp_state = 0; 1323 else 1324 pp_state = PP_STATE_LS; 1325 1326 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1327 PP_BLOCK_SYS_BIF, 1328 PP_STATE_SUPPORT_LS, 1329 pp_state); 1330 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1331 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1332 } 1333 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { 1334 if (state == AMD_CG_STATE_UNGATE) 1335 pp_state = 0; 1336 else 1337 pp_state = PP_STATE_CG; 1338 1339 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1340 PP_BLOCK_SYS_BIF, 1341 PP_STATE_SUPPORT_CG, 1342 pp_state); 1343 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1344 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1345 } 1346 1347 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { 1348 1349 if (state == AMD_CG_STATE_UNGATE) 1350 pp_state = 0; 1351 else 1352 pp_state = PP_STATE_LS; 1353 1354 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1355 PP_BLOCK_SYS_DRM, 1356 PP_STATE_SUPPORT_LS, 1357 pp_state); 1358 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1359 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1360 } 1361 1362 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { 1363 1364 if (state == AMD_CG_STATE_UNGATE) 1365 pp_state = 0; 1366 else 1367 pp_state = PP_STATE_CG; 1368 1369 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1370 PP_BLOCK_SYS_ROM, 1371 PP_STATE_SUPPORT_CG, 1372 pp_state); 1373 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1374 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1375 } 1376 return 0; 1377 } 1378 1379 static int vi_common_set_clockgating_state(void *handle, 1380 enum amd_clockgating_state state) 1381 { 1382 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1383 1384 if (amdgpu_sriov_vf(adev)) 1385 return 0; 1386 1387 switch (adev->asic_type) { 1388 case CHIP_FIJI: 1389 vi_update_bif_medium_grain_light_sleep(adev, 1390 state == AMD_CG_STATE_GATE); 1391 vi_update_hdp_medium_grain_clock_gating(adev, 1392 state == AMD_CG_STATE_GATE); 1393 vi_update_hdp_light_sleep(adev, 1394 state == AMD_CG_STATE_GATE); 1395 vi_update_rom_medium_grain_clock_gating(adev, 1396 state == AMD_CG_STATE_GATE); 1397 break; 1398 case CHIP_CARRIZO: 1399 case CHIP_STONEY: 1400 vi_update_bif_medium_grain_light_sleep(adev, 1401 state == AMD_CG_STATE_GATE); 1402 vi_update_hdp_medium_grain_clock_gating(adev, 1403 state == AMD_CG_STATE_GATE); 1404 vi_update_hdp_light_sleep(adev, 1405 state == AMD_CG_STATE_GATE); 1406 vi_update_drm_light_sleep(adev, 1407 state == AMD_CG_STATE_GATE); 1408 break; 1409 case CHIP_TONGA: 1410 case CHIP_POLARIS10: 1411 case CHIP_POLARIS11: 1412 case CHIP_POLARIS12: 1413 vi_common_set_clockgating_state_by_smu(adev, state); 1414 default: 1415 break; 1416 } 1417 return 0; 1418 } 1419 1420 static int vi_common_set_powergating_state(void *handle, 1421 enum amd_powergating_state state) 1422 { 1423 return 0; 1424 } 1425 1426 static void vi_common_get_clockgating_state(void *handle, u32 *flags) 1427 { 1428 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1429 int data; 1430 1431 if (amdgpu_sriov_vf(adev)) 1432 *flags = 0; 1433 1434 /* AMD_CG_SUPPORT_BIF_LS */ 1435 data = RREG32_PCIE(ixPCIE_CNTL2); 1436 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 1437 *flags |= AMD_CG_SUPPORT_BIF_LS; 1438 1439 /* AMD_CG_SUPPORT_HDP_LS */ 1440 data = RREG32(mmHDP_MEM_POWER_LS); 1441 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1442 *flags |= AMD_CG_SUPPORT_HDP_LS; 1443 1444 /* AMD_CG_SUPPORT_HDP_MGCG */ 1445 data = RREG32(mmHDP_HOST_PATH_CNTL); 1446 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK)) 1447 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 1448 1449 /* AMD_CG_SUPPORT_ROM_MGCG */ 1450 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1451 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1452 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1453 } 1454 1455 static const struct amd_ip_funcs vi_common_ip_funcs = { 1456 .name = "vi_common", 1457 .early_init = vi_common_early_init, 1458 .late_init = vi_common_late_init, 1459 .sw_init = vi_common_sw_init, 1460 .sw_fini = vi_common_sw_fini, 1461 .hw_init = vi_common_hw_init, 1462 .hw_fini = vi_common_hw_fini, 1463 .suspend = vi_common_suspend, 1464 .resume = vi_common_resume, 1465 .is_idle = vi_common_is_idle, 1466 .wait_for_idle = vi_common_wait_for_idle, 1467 .soft_reset = vi_common_soft_reset, 1468 .set_clockgating_state = vi_common_set_clockgating_state, 1469 .set_powergating_state = vi_common_set_powergating_state, 1470 .get_clockgating_state = vi_common_get_clockgating_state, 1471 }; 1472 1473 static const struct amdgpu_ip_block_version vi_common_ip_block = 1474 { 1475 .type = AMD_IP_BLOCK_TYPE_COMMON, 1476 .major = 1, 1477 .minor = 0, 1478 .rev = 0, 1479 .funcs = &vi_common_ip_funcs, 1480 }; 1481 1482 int vi_set_ip_blocks(struct amdgpu_device *adev) 1483 { 1484 /* in early init stage, vbios code won't work */ 1485 vi_detect_hw_virtualization(adev); 1486 1487 if (amdgpu_sriov_vf(adev)) 1488 adev->virt.ops = &xgpu_vi_virt_ops; 1489 1490 switch (adev->asic_type) { 1491 case CHIP_TOPAZ: 1492 /* topaz has no DCE, UVD, VCE */ 1493 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1494 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 1495 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 1496 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1497 if (adev->enable_virtual_display) 1498 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1499 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1500 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); 1501 break; 1502 case CHIP_FIJI: 1503 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1504 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 1505 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1506 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1507 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1508 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1509 #if defined(CONFIG_DRM_AMD_DC) 1510 else if (amdgpu_device_has_dc_support(adev)) 1511 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1512 #endif 1513 else 1514 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block); 1515 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1516 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1517 if (!amdgpu_sriov_vf(adev)) { 1518 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1519 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1520 } 1521 break; 1522 case CHIP_TONGA: 1523 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1524 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1525 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1526 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1527 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1528 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1529 #if defined(CONFIG_DRM_AMD_DC) 1530 else if (amdgpu_device_has_dc_support(adev)) 1531 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1532 #endif 1533 else 1534 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block); 1535 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1536 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1537 if (!amdgpu_sriov_vf(adev)) { 1538 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block); 1539 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1540 } 1541 break; 1542 case CHIP_POLARIS11: 1543 case CHIP_POLARIS10: 1544 case CHIP_POLARIS12: 1545 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1546 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 1547 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1548 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1549 if (adev->enable_virtual_display) 1550 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1551 #if defined(CONFIG_DRM_AMD_DC) 1552 else if (amdgpu_device_has_dc_support(adev)) 1553 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1554 #endif 1555 else 1556 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); 1557 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1558 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); 1559 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); 1560 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1561 break; 1562 case CHIP_CARRIZO: 1563 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1564 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1565 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1566 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1567 if (adev->enable_virtual_display) 1568 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1569 #if defined(CONFIG_DRM_AMD_DC) 1570 else if (amdgpu_device_has_dc_support(adev)) 1571 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1572 #endif 1573 else 1574 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1575 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1576 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1577 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1578 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); 1579 #if defined(CONFIG_DRM_AMD_ACP) 1580 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1581 #endif 1582 break; 1583 case CHIP_STONEY: 1584 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1585 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1586 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1587 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1588 if (adev->enable_virtual_display) 1589 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1590 #if defined(CONFIG_DRM_AMD_DC) 1591 else if (amdgpu_device_has_dc_support(adev)) 1592 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1593 #endif 1594 else 1595 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1596 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block); 1597 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1598 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); 1599 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1600 #if defined(CONFIG_DRM_AMD_ACP) 1601 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1602 #endif 1603 break; 1604 default: 1605 /* FIXME: not supported yet */ 1606 return -EINVAL; 1607 } 1608 1609 return 0; 1610 } 1611