1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/slab.h> 24 #include <drm/drmP.h> 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_ih.h" 28 #include "amdgpu_uvd.h" 29 #include "amdgpu_vce.h" 30 #include "amdgpu_ucode.h" 31 #include "atom.h" 32 #include "amd_pcie.h" 33 34 #include "gmc/gmc_8_1_d.h" 35 #include "gmc/gmc_8_1_sh_mask.h" 36 37 #include "oss/oss_3_0_d.h" 38 #include "oss/oss_3_0_sh_mask.h" 39 40 #include "bif/bif_5_0_d.h" 41 #include "bif/bif_5_0_sh_mask.h" 42 43 #include "gca/gfx_8_0_d.h" 44 #include "gca/gfx_8_0_sh_mask.h" 45 46 #include "smu/smu_7_1_1_d.h" 47 #include "smu/smu_7_1_1_sh_mask.h" 48 49 #include "uvd/uvd_5_0_d.h" 50 #include "uvd/uvd_5_0_sh_mask.h" 51 52 #include "vce/vce_3_0_d.h" 53 #include "vce/vce_3_0_sh_mask.h" 54 55 #include "dce/dce_10_0_d.h" 56 #include "dce/dce_10_0_sh_mask.h" 57 58 #include "vid.h" 59 #include "vi.h" 60 #include "vi_dpm.h" 61 #include "gmc_v8_0.h" 62 #include "gmc_v7_0.h" 63 #include "gfx_v8_0.h" 64 #include "sdma_v2_4.h" 65 #include "sdma_v3_0.h" 66 #include "dce_v10_0.h" 67 #include "dce_v11_0.h" 68 #include "iceland_ih.h" 69 #include "tonga_ih.h" 70 #include "cz_ih.h" 71 #include "uvd_v5_0.h" 72 #include "uvd_v6_0.h" 73 #include "vce_v3_0.h" 74 #include "amdgpu_powerplay.h" 75 #if defined(CONFIG_DRM_AMD_ACP) 76 #include "amdgpu_acp.h" 77 #endif 78 #include "dce_virtual.h" 79 #include "mxgpu_vi.h" 80 81 /* 82 * Indirect registers accessor 83 */ 84 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 85 { 86 unsigned long flags; 87 u32 r; 88 89 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 90 WREG32(mmPCIE_INDEX, reg); 91 (void)RREG32(mmPCIE_INDEX); 92 r = RREG32(mmPCIE_DATA); 93 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 94 return r; 95 } 96 97 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 98 { 99 unsigned long flags; 100 101 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 102 WREG32(mmPCIE_INDEX, reg); 103 (void)RREG32(mmPCIE_INDEX); 104 WREG32(mmPCIE_DATA, v); 105 (void)RREG32(mmPCIE_DATA); 106 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 107 } 108 109 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 110 { 111 unsigned long flags; 112 u32 r; 113 114 spin_lock_irqsave(&adev->smc_idx_lock, flags); 115 WREG32(mmSMC_IND_INDEX_11, (reg)); 116 r = RREG32(mmSMC_IND_DATA_11); 117 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 118 return r; 119 } 120 121 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 122 { 123 unsigned long flags; 124 125 spin_lock_irqsave(&adev->smc_idx_lock, flags); 126 WREG32(mmSMC_IND_INDEX_11, (reg)); 127 WREG32(mmSMC_IND_DATA_11, (v)); 128 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 129 } 130 131 /* smu_8_0_d.h */ 132 #define mmMP0PUB_IND_INDEX 0x180 133 #define mmMP0PUB_IND_DATA 0x181 134 135 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 136 { 137 unsigned long flags; 138 u32 r; 139 140 spin_lock_irqsave(&adev->smc_idx_lock, flags); 141 WREG32(mmMP0PUB_IND_INDEX, (reg)); 142 r = RREG32(mmMP0PUB_IND_DATA); 143 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 144 return r; 145 } 146 147 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 148 { 149 unsigned long flags; 150 151 spin_lock_irqsave(&adev->smc_idx_lock, flags); 152 WREG32(mmMP0PUB_IND_INDEX, (reg)); 153 WREG32(mmMP0PUB_IND_DATA, (v)); 154 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 155 } 156 157 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 158 { 159 unsigned long flags; 160 u32 r; 161 162 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 163 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 164 r = RREG32(mmUVD_CTX_DATA); 165 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 166 return r; 167 } 168 169 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 170 { 171 unsigned long flags; 172 173 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 174 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 175 WREG32(mmUVD_CTX_DATA, (v)); 176 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 177 } 178 179 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 180 { 181 unsigned long flags; 182 u32 r; 183 184 spin_lock_irqsave(&adev->didt_idx_lock, flags); 185 WREG32(mmDIDT_IND_INDEX, (reg)); 186 r = RREG32(mmDIDT_IND_DATA); 187 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 188 return r; 189 } 190 191 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 192 { 193 unsigned long flags; 194 195 spin_lock_irqsave(&adev->didt_idx_lock, flags); 196 WREG32(mmDIDT_IND_INDEX, (reg)); 197 WREG32(mmDIDT_IND_DATA, (v)); 198 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 199 } 200 201 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 202 { 203 unsigned long flags; 204 u32 r; 205 206 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 207 WREG32(mmGC_CAC_IND_INDEX, (reg)); 208 r = RREG32(mmGC_CAC_IND_DATA); 209 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 210 return r; 211 } 212 213 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 214 { 215 unsigned long flags; 216 217 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 218 WREG32(mmGC_CAC_IND_INDEX, (reg)); 219 WREG32(mmGC_CAC_IND_DATA, (v)); 220 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 221 } 222 223 224 static const u32 tonga_mgcg_cgcg_init[] = 225 { 226 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 227 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 228 mmPCIE_DATA, 0x000f0000, 0x00000000, 229 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 230 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 231 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 232 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 233 }; 234 235 static const u32 fiji_mgcg_cgcg_init[] = 236 { 237 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 238 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 239 mmPCIE_DATA, 0x000f0000, 0x00000000, 240 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 241 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 242 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 243 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 244 }; 245 246 static const u32 iceland_mgcg_cgcg_init[] = 247 { 248 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 249 mmPCIE_DATA, 0x000f0000, 0x00000000, 250 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 251 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 252 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 253 }; 254 255 static const u32 cz_mgcg_cgcg_init[] = 256 { 257 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 258 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 259 mmPCIE_DATA, 0x000f0000, 0x00000000, 260 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 261 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 262 }; 263 264 static const u32 stoney_mgcg_cgcg_init[] = 265 { 266 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 267 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 268 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 269 }; 270 271 static void vi_init_golden_registers(struct amdgpu_device *adev) 272 { 273 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 274 mutex_lock(&adev->grbm_idx_mutex); 275 276 if (amdgpu_sriov_vf(adev)) { 277 xgpu_vi_init_golden_registers(adev); 278 mutex_unlock(&adev->grbm_idx_mutex); 279 return; 280 } 281 282 switch (adev->asic_type) { 283 case CHIP_TOPAZ: 284 amdgpu_program_register_sequence(adev, 285 iceland_mgcg_cgcg_init, 286 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); 287 break; 288 case CHIP_FIJI: 289 amdgpu_program_register_sequence(adev, 290 fiji_mgcg_cgcg_init, 291 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); 292 break; 293 case CHIP_TONGA: 294 amdgpu_program_register_sequence(adev, 295 tonga_mgcg_cgcg_init, 296 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); 297 break; 298 case CHIP_CARRIZO: 299 amdgpu_program_register_sequence(adev, 300 cz_mgcg_cgcg_init, 301 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 302 break; 303 case CHIP_STONEY: 304 amdgpu_program_register_sequence(adev, 305 stoney_mgcg_cgcg_init, 306 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); 307 break; 308 case CHIP_POLARIS11: 309 case CHIP_POLARIS10: 310 case CHIP_POLARIS12: 311 default: 312 break; 313 } 314 mutex_unlock(&adev->grbm_idx_mutex); 315 } 316 317 /** 318 * vi_get_xclk - get the xclk 319 * 320 * @adev: amdgpu_device pointer 321 * 322 * Returns the reference clock used by the gfx engine 323 * (VI). 324 */ 325 static u32 vi_get_xclk(struct amdgpu_device *adev) 326 { 327 u32 reference_clock = adev->clock.spll.reference_freq; 328 u32 tmp; 329 330 if (adev->flags & AMD_IS_APU) 331 return reference_clock; 332 333 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 334 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 335 return 1000; 336 337 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 338 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 339 return reference_clock / 4; 340 341 return reference_clock; 342 } 343 344 /** 345 * vi_srbm_select - select specific register instances 346 * 347 * @adev: amdgpu_device pointer 348 * @me: selected ME (micro engine) 349 * @pipe: pipe 350 * @queue: queue 351 * @vmid: VMID 352 * 353 * Switches the currently active registers instances. Some 354 * registers are instanced per VMID, others are instanced per 355 * me/pipe/queue combination. 356 */ 357 void vi_srbm_select(struct amdgpu_device *adev, 358 u32 me, u32 pipe, u32 queue, u32 vmid) 359 { 360 u32 srbm_gfx_cntl = 0; 361 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 362 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 363 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 364 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 365 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 366 } 367 368 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 369 { 370 /* todo */ 371 } 372 373 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 374 { 375 u32 bus_cntl; 376 u32 d1vga_control = 0; 377 u32 d2vga_control = 0; 378 u32 vga_render_control = 0; 379 u32 rom_cntl; 380 bool r; 381 382 bus_cntl = RREG32(mmBUS_CNTL); 383 if (adev->mode_info.num_crtc) { 384 d1vga_control = RREG32(mmD1VGA_CONTROL); 385 d2vga_control = RREG32(mmD2VGA_CONTROL); 386 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 387 } 388 rom_cntl = RREG32_SMC(ixROM_CNTL); 389 390 /* enable the rom */ 391 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 392 if (adev->mode_info.num_crtc) { 393 /* Disable VGA mode */ 394 WREG32(mmD1VGA_CONTROL, 395 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 396 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 397 WREG32(mmD2VGA_CONTROL, 398 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 399 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 400 WREG32(mmVGA_RENDER_CONTROL, 401 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 402 } 403 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 404 405 r = amdgpu_read_bios(adev); 406 407 /* restore regs */ 408 WREG32(mmBUS_CNTL, bus_cntl); 409 if (adev->mode_info.num_crtc) { 410 WREG32(mmD1VGA_CONTROL, d1vga_control); 411 WREG32(mmD2VGA_CONTROL, d2vga_control); 412 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 413 } 414 WREG32_SMC(ixROM_CNTL, rom_cntl); 415 return r; 416 } 417 418 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 419 u8 *bios, u32 length_bytes) 420 { 421 u32 *dw_ptr; 422 unsigned long flags; 423 u32 i, length_dw; 424 425 if (bios == NULL) 426 return false; 427 if (length_bytes == 0) 428 return false; 429 /* APU vbios image is part of sbios image */ 430 if (adev->flags & AMD_IS_APU) 431 return false; 432 433 dw_ptr = (u32 *)bios; 434 length_dw = ALIGN(length_bytes, 4) / 4; 435 /* take the smc lock since we are using the smc index */ 436 spin_lock_irqsave(&adev->smc_idx_lock, flags); 437 /* set rom index to 0 */ 438 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 439 WREG32(mmSMC_IND_DATA_11, 0); 440 /* set index to data for continous read */ 441 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 442 for (i = 0; i < length_dw; i++) 443 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 444 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 445 446 return true; 447 } 448 449 static void vi_detect_hw_virtualization(struct amdgpu_device *adev) 450 { 451 uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 452 /* bit0: 0 means pf and 1 means vf */ 453 /* bit31: 0 means disable IOV and 1 means enable */ 454 if (reg & 1) 455 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; 456 457 if (reg & 0x80000000) 458 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 459 460 if (reg == 0) { 461 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ 462 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 463 } 464 } 465 466 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 467 {mmGRBM_STATUS}, 468 {mmGRBM_STATUS2}, 469 {mmGRBM_STATUS_SE0}, 470 {mmGRBM_STATUS_SE1}, 471 {mmGRBM_STATUS_SE2}, 472 {mmGRBM_STATUS_SE3}, 473 {mmSRBM_STATUS}, 474 {mmSRBM_STATUS2}, 475 {mmSRBM_STATUS3}, 476 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, 477 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, 478 {mmCP_STAT}, 479 {mmCP_STALLED_STAT1}, 480 {mmCP_STALLED_STAT2}, 481 {mmCP_STALLED_STAT3}, 482 {mmCP_CPF_BUSY_STAT}, 483 {mmCP_CPF_STALLED_STAT1}, 484 {mmCP_CPF_STATUS}, 485 {mmCP_CPC_BUSY_STAT}, 486 {mmCP_CPC_STALLED_STAT1}, 487 {mmCP_CPC_STATUS}, 488 {mmGB_ADDR_CONFIG}, 489 {mmMC_ARB_RAMCFG}, 490 {mmGB_TILE_MODE0}, 491 {mmGB_TILE_MODE1}, 492 {mmGB_TILE_MODE2}, 493 {mmGB_TILE_MODE3}, 494 {mmGB_TILE_MODE4}, 495 {mmGB_TILE_MODE5}, 496 {mmGB_TILE_MODE6}, 497 {mmGB_TILE_MODE7}, 498 {mmGB_TILE_MODE8}, 499 {mmGB_TILE_MODE9}, 500 {mmGB_TILE_MODE10}, 501 {mmGB_TILE_MODE11}, 502 {mmGB_TILE_MODE12}, 503 {mmGB_TILE_MODE13}, 504 {mmGB_TILE_MODE14}, 505 {mmGB_TILE_MODE15}, 506 {mmGB_TILE_MODE16}, 507 {mmGB_TILE_MODE17}, 508 {mmGB_TILE_MODE18}, 509 {mmGB_TILE_MODE19}, 510 {mmGB_TILE_MODE20}, 511 {mmGB_TILE_MODE21}, 512 {mmGB_TILE_MODE22}, 513 {mmGB_TILE_MODE23}, 514 {mmGB_TILE_MODE24}, 515 {mmGB_TILE_MODE25}, 516 {mmGB_TILE_MODE26}, 517 {mmGB_TILE_MODE27}, 518 {mmGB_TILE_MODE28}, 519 {mmGB_TILE_MODE29}, 520 {mmGB_TILE_MODE30}, 521 {mmGB_TILE_MODE31}, 522 {mmGB_MACROTILE_MODE0}, 523 {mmGB_MACROTILE_MODE1}, 524 {mmGB_MACROTILE_MODE2}, 525 {mmGB_MACROTILE_MODE3}, 526 {mmGB_MACROTILE_MODE4}, 527 {mmGB_MACROTILE_MODE5}, 528 {mmGB_MACROTILE_MODE6}, 529 {mmGB_MACROTILE_MODE7}, 530 {mmGB_MACROTILE_MODE8}, 531 {mmGB_MACROTILE_MODE9}, 532 {mmGB_MACROTILE_MODE10}, 533 {mmGB_MACROTILE_MODE11}, 534 {mmGB_MACROTILE_MODE12}, 535 {mmGB_MACROTILE_MODE13}, 536 {mmGB_MACROTILE_MODE14}, 537 {mmGB_MACROTILE_MODE15}, 538 {mmCC_RB_BACKEND_DISABLE, true}, 539 {mmGC_USER_RB_BACKEND_DISABLE, true}, 540 {mmGB_BACKEND_MAP, false}, 541 {mmPA_SC_RASTER_CONFIG, true}, 542 {mmPA_SC_RASTER_CONFIG_1, true}, 543 }; 544 545 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 546 bool indexed, u32 se_num, 547 u32 sh_num, u32 reg_offset) 548 { 549 if (indexed) { 550 uint32_t val; 551 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 552 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 553 554 switch (reg_offset) { 555 case mmCC_RB_BACKEND_DISABLE: 556 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 557 case mmGC_USER_RB_BACKEND_DISABLE: 558 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 559 case mmPA_SC_RASTER_CONFIG: 560 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 561 case mmPA_SC_RASTER_CONFIG_1: 562 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 563 } 564 565 mutex_lock(&adev->grbm_idx_mutex); 566 if (se_num != 0xffffffff || sh_num != 0xffffffff) 567 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 568 569 val = RREG32(reg_offset); 570 571 if (se_num != 0xffffffff || sh_num != 0xffffffff) 572 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 573 mutex_unlock(&adev->grbm_idx_mutex); 574 return val; 575 } else { 576 unsigned idx; 577 578 switch (reg_offset) { 579 case mmGB_ADDR_CONFIG: 580 return adev->gfx.config.gb_addr_config; 581 case mmMC_ARB_RAMCFG: 582 return adev->gfx.config.mc_arb_ramcfg; 583 case mmGB_TILE_MODE0: 584 case mmGB_TILE_MODE1: 585 case mmGB_TILE_MODE2: 586 case mmGB_TILE_MODE3: 587 case mmGB_TILE_MODE4: 588 case mmGB_TILE_MODE5: 589 case mmGB_TILE_MODE6: 590 case mmGB_TILE_MODE7: 591 case mmGB_TILE_MODE8: 592 case mmGB_TILE_MODE9: 593 case mmGB_TILE_MODE10: 594 case mmGB_TILE_MODE11: 595 case mmGB_TILE_MODE12: 596 case mmGB_TILE_MODE13: 597 case mmGB_TILE_MODE14: 598 case mmGB_TILE_MODE15: 599 case mmGB_TILE_MODE16: 600 case mmGB_TILE_MODE17: 601 case mmGB_TILE_MODE18: 602 case mmGB_TILE_MODE19: 603 case mmGB_TILE_MODE20: 604 case mmGB_TILE_MODE21: 605 case mmGB_TILE_MODE22: 606 case mmGB_TILE_MODE23: 607 case mmGB_TILE_MODE24: 608 case mmGB_TILE_MODE25: 609 case mmGB_TILE_MODE26: 610 case mmGB_TILE_MODE27: 611 case mmGB_TILE_MODE28: 612 case mmGB_TILE_MODE29: 613 case mmGB_TILE_MODE30: 614 case mmGB_TILE_MODE31: 615 idx = (reg_offset - mmGB_TILE_MODE0); 616 return adev->gfx.config.tile_mode_array[idx]; 617 case mmGB_MACROTILE_MODE0: 618 case mmGB_MACROTILE_MODE1: 619 case mmGB_MACROTILE_MODE2: 620 case mmGB_MACROTILE_MODE3: 621 case mmGB_MACROTILE_MODE4: 622 case mmGB_MACROTILE_MODE5: 623 case mmGB_MACROTILE_MODE6: 624 case mmGB_MACROTILE_MODE7: 625 case mmGB_MACROTILE_MODE8: 626 case mmGB_MACROTILE_MODE9: 627 case mmGB_MACROTILE_MODE10: 628 case mmGB_MACROTILE_MODE11: 629 case mmGB_MACROTILE_MODE12: 630 case mmGB_MACROTILE_MODE13: 631 case mmGB_MACROTILE_MODE14: 632 case mmGB_MACROTILE_MODE15: 633 idx = (reg_offset - mmGB_MACROTILE_MODE0); 634 return adev->gfx.config.macrotile_mode_array[idx]; 635 default: 636 return RREG32(reg_offset); 637 } 638 } 639 } 640 641 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 642 u32 sh_num, u32 reg_offset, u32 *value) 643 { 644 uint32_t i; 645 646 *value = 0; 647 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 648 bool indexed = vi_allowed_read_registers[i].grbm_indexed; 649 650 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 651 continue; 652 653 *value = vi_get_register_value(adev, indexed, se_num, sh_num, 654 reg_offset); 655 return 0; 656 } 657 return -EINVAL; 658 } 659 660 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 661 { 662 u32 i; 663 664 dev_info(adev->dev, "GPU pci config reset\n"); 665 666 /* disable BM */ 667 pci_clear_master(adev->pdev); 668 /* reset */ 669 amdgpu_pci_config_reset(adev); 670 671 udelay(100); 672 673 /* wait for asic to come out of reset */ 674 for (i = 0; i < adev->usec_timeout; i++) { 675 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 676 /* enable BM */ 677 pci_set_master(adev->pdev); 678 adev->has_hw_reset = true; 679 return 0; 680 } 681 udelay(1); 682 } 683 return -EINVAL; 684 } 685 686 /** 687 * vi_asic_reset - soft reset GPU 688 * 689 * @adev: amdgpu_device pointer 690 * 691 * Look up which blocks are hung and attempt 692 * to reset them. 693 * Returns 0 for success. 694 */ 695 static int vi_asic_reset(struct amdgpu_device *adev) 696 { 697 int r; 698 699 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 700 701 r = vi_gpu_pci_config_reset(adev); 702 703 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 704 705 return r; 706 } 707 708 static u32 vi_get_config_memsize(struct amdgpu_device *adev) 709 { 710 return RREG32(mmCONFIG_MEMSIZE); 711 } 712 713 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 714 u32 cntl_reg, u32 status_reg) 715 { 716 int r, i; 717 struct atom_clock_dividers dividers; 718 uint32_t tmp; 719 720 r = amdgpu_atombios_get_clock_dividers(adev, 721 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 722 clock, false, ÷rs); 723 if (r) 724 return r; 725 726 tmp = RREG32_SMC(cntl_reg); 727 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 728 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 729 tmp |= dividers.post_divider; 730 WREG32_SMC(cntl_reg, tmp); 731 732 for (i = 0; i < 100; i++) { 733 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) 734 break; 735 mdelay(10); 736 } 737 if (i == 100) 738 return -ETIMEDOUT; 739 740 return 0; 741 } 742 743 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 744 { 745 int r; 746 747 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 748 if (r) 749 return r; 750 751 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 752 if (r) 753 return r; 754 755 return 0; 756 } 757 758 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 759 { 760 int r, i; 761 struct atom_clock_dividers dividers; 762 u32 tmp; 763 764 r = amdgpu_atombios_get_clock_dividers(adev, 765 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 766 ecclk, false, ÷rs); 767 if (r) 768 return r; 769 770 for (i = 0; i < 100; i++) { 771 if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) 772 break; 773 mdelay(10); 774 } 775 if (i == 100) 776 return -ETIMEDOUT; 777 778 tmp = RREG32_SMC(ixCG_ECLK_CNTL); 779 tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | 780 CG_ECLK_CNTL__ECLK_DIVIDER_MASK); 781 tmp |= dividers.post_divider; 782 WREG32_SMC(ixCG_ECLK_CNTL, tmp); 783 784 for (i = 0; i < 100; i++) { 785 if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) 786 break; 787 mdelay(10); 788 } 789 if (i == 100) 790 return -ETIMEDOUT; 791 792 return 0; 793 } 794 795 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 796 { 797 if (pci_is_root_bus(adev->pdev->bus)) 798 return; 799 800 if (amdgpu_pcie_gen2 == 0) 801 return; 802 803 if (adev->flags & AMD_IS_APU) 804 return; 805 806 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 807 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 808 return; 809 810 /* todo */ 811 } 812 813 static void vi_program_aspm(struct amdgpu_device *adev) 814 { 815 816 if (amdgpu_aspm == 0) 817 return; 818 819 /* todo */ 820 } 821 822 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 823 bool enable) 824 { 825 u32 tmp; 826 827 /* not necessary on CZ */ 828 if (adev->flags & AMD_IS_APU) 829 return; 830 831 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 832 if (enable) 833 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 834 else 835 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 836 837 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 838 } 839 840 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 841 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 842 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 843 844 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 845 { 846 if (adev->flags & AMD_IS_APU) 847 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 848 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 849 else 850 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 851 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 852 } 853 854 static const struct amdgpu_asic_funcs vi_asic_funcs = 855 { 856 .read_disabled_bios = &vi_read_disabled_bios, 857 .read_bios_from_rom = &vi_read_bios_from_rom, 858 .read_register = &vi_read_register, 859 .reset = &vi_asic_reset, 860 .set_vga_state = &vi_vga_set_state, 861 .get_xclk = &vi_get_xclk, 862 .set_uvd_clocks = &vi_set_uvd_clocks, 863 .set_vce_clocks = &vi_set_vce_clocks, 864 .get_config_memsize = &vi_get_config_memsize, 865 }; 866 867 #define CZ_REV_BRISTOL(rev) \ 868 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) 869 870 static int vi_common_early_init(void *handle) 871 { 872 bool smc_enabled = false; 873 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 874 875 if (adev->flags & AMD_IS_APU) { 876 adev->smc_rreg = &cz_smc_rreg; 877 adev->smc_wreg = &cz_smc_wreg; 878 } else { 879 adev->smc_rreg = &vi_smc_rreg; 880 adev->smc_wreg = &vi_smc_wreg; 881 } 882 adev->pcie_rreg = &vi_pcie_rreg; 883 adev->pcie_wreg = &vi_pcie_wreg; 884 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 885 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 886 adev->didt_rreg = &vi_didt_rreg; 887 adev->didt_wreg = &vi_didt_wreg; 888 adev->gc_cac_rreg = &vi_gc_cac_rreg; 889 adev->gc_cac_wreg = &vi_gc_cac_wreg; 890 891 adev->asic_funcs = &vi_asic_funcs; 892 893 if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && 894 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) 895 smc_enabled = true; 896 897 adev->rev_id = vi_get_rev_id(adev); 898 adev->external_rev_id = 0xFF; 899 switch (adev->asic_type) { 900 case CHIP_TOPAZ: 901 adev->cg_flags = 0; 902 adev->pg_flags = 0; 903 adev->external_rev_id = 0x1; 904 break; 905 case CHIP_FIJI: 906 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 907 AMD_CG_SUPPORT_GFX_MGLS | 908 AMD_CG_SUPPORT_GFX_RLC_LS | 909 AMD_CG_SUPPORT_GFX_CP_LS | 910 AMD_CG_SUPPORT_GFX_CGTS | 911 AMD_CG_SUPPORT_GFX_CGTS_LS | 912 AMD_CG_SUPPORT_GFX_CGCG | 913 AMD_CG_SUPPORT_GFX_CGLS | 914 AMD_CG_SUPPORT_SDMA_MGCG | 915 AMD_CG_SUPPORT_SDMA_LS | 916 AMD_CG_SUPPORT_BIF_LS | 917 AMD_CG_SUPPORT_HDP_MGCG | 918 AMD_CG_SUPPORT_HDP_LS | 919 AMD_CG_SUPPORT_ROM_MGCG | 920 AMD_CG_SUPPORT_MC_MGCG | 921 AMD_CG_SUPPORT_MC_LS | 922 AMD_CG_SUPPORT_UVD_MGCG; 923 adev->pg_flags = 0; 924 adev->external_rev_id = adev->rev_id + 0x3c; 925 break; 926 case CHIP_TONGA: 927 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 928 AMD_CG_SUPPORT_GFX_CGCG | 929 AMD_CG_SUPPORT_GFX_CGLS | 930 AMD_CG_SUPPORT_SDMA_MGCG | 931 AMD_CG_SUPPORT_SDMA_LS | 932 AMD_CG_SUPPORT_BIF_LS | 933 AMD_CG_SUPPORT_HDP_MGCG | 934 AMD_CG_SUPPORT_HDP_LS | 935 AMD_CG_SUPPORT_ROM_MGCG | 936 AMD_CG_SUPPORT_MC_MGCG | 937 AMD_CG_SUPPORT_MC_LS | 938 AMD_CG_SUPPORT_DRM_LS | 939 AMD_CG_SUPPORT_UVD_MGCG; 940 adev->pg_flags = 0; 941 adev->external_rev_id = adev->rev_id + 0x14; 942 break; 943 case CHIP_POLARIS11: 944 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 945 AMD_CG_SUPPORT_GFX_RLC_LS | 946 AMD_CG_SUPPORT_GFX_CP_LS | 947 AMD_CG_SUPPORT_GFX_CGCG | 948 AMD_CG_SUPPORT_GFX_CGLS | 949 AMD_CG_SUPPORT_GFX_3D_CGCG | 950 AMD_CG_SUPPORT_GFX_3D_CGLS | 951 AMD_CG_SUPPORT_SDMA_MGCG | 952 AMD_CG_SUPPORT_SDMA_LS | 953 AMD_CG_SUPPORT_BIF_MGCG | 954 AMD_CG_SUPPORT_BIF_LS | 955 AMD_CG_SUPPORT_HDP_MGCG | 956 AMD_CG_SUPPORT_HDP_LS | 957 AMD_CG_SUPPORT_ROM_MGCG | 958 AMD_CG_SUPPORT_MC_MGCG | 959 AMD_CG_SUPPORT_MC_LS | 960 AMD_CG_SUPPORT_DRM_LS | 961 AMD_CG_SUPPORT_UVD_MGCG | 962 AMD_CG_SUPPORT_VCE_MGCG; 963 adev->pg_flags = 0; 964 adev->external_rev_id = adev->rev_id + 0x5A; 965 break; 966 case CHIP_POLARIS10: 967 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 968 AMD_CG_SUPPORT_GFX_RLC_LS | 969 AMD_CG_SUPPORT_GFX_CP_LS | 970 AMD_CG_SUPPORT_GFX_CGCG | 971 AMD_CG_SUPPORT_GFX_CGLS | 972 AMD_CG_SUPPORT_GFX_3D_CGCG | 973 AMD_CG_SUPPORT_GFX_3D_CGLS | 974 AMD_CG_SUPPORT_SDMA_MGCG | 975 AMD_CG_SUPPORT_SDMA_LS | 976 AMD_CG_SUPPORT_BIF_MGCG | 977 AMD_CG_SUPPORT_BIF_LS | 978 AMD_CG_SUPPORT_HDP_MGCG | 979 AMD_CG_SUPPORT_HDP_LS | 980 AMD_CG_SUPPORT_ROM_MGCG | 981 AMD_CG_SUPPORT_MC_MGCG | 982 AMD_CG_SUPPORT_MC_LS | 983 AMD_CG_SUPPORT_DRM_LS | 984 AMD_CG_SUPPORT_UVD_MGCG | 985 AMD_CG_SUPPORT_VCE_MGCG; 986 adev->pg_flags = 0; 987 adev->external_rev_id = adev->rev_id + 0x50; 988 break; 989 case CHIP_POLARIS12: 990 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 991 AMD_CG_SUPPORT_GFX_RLC_LS | 992 AMD_CG_SUPPORT_GFX_CP_LS | 993 AMD_CG_SUPPORT_GFX_CGCG | 994 AMD_CG_SUPPORT_GFX_CGLS | 995 AMD_CG_SUPPORT_GFX_3D_CGCG | 996 AMD_CG_SUPPORT_GFX_3D_CGLS | 997 AMD_CG_SUPPORT_SDMA_MGCG | 998 AMD_CG_SUPPORT_SDMA_LS | 999 AMD_CG_SUPPORT_BIF_MGCG | 1000 AMD_CG_SUPPORT_BIF_LS | 1001 AMD_CG_SUPPORT_HDP_MGCG | 1002 AMD_CG_SUPPORT_HDP_LS | 1003 AMD_CG_SUPPORT_ROM_MGCG | 1004 AMD_CG_SUPPORT_MC_MGCG | 1005 AMD_CG_SUPPORT_MC_LS | 1006 AMD_CG_SUPPORT_DRM_LS | 1007 AMD_CG_SUPPORT_UVD_MGCG | 1008 AMD_CG_SUPPORT_VCE_MGCG; 1009 adev->pg_flags = 0; 1010 adev->external_rev_id = adev->rev_id + 0x64; 1011 break; 1012 case CHIP_CARRIZO: 1013 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1014 AMD_CG_SUPPORT_GFX_MGCG | 1015 AMD_CG_SUPPORT_GFX_MGLS | 1016 AMD_CG_SUPPORT_GFX_RLC_LS | 1017 AMD_CG_SUPPORT_GFX_CP_LS | 1018 AMD_CG_SUPPORT_GFX_CGTS | 1019 AMD_CG_SUPPORT_GFX_CGTS_LS | 1020 AMD_CG_SUPPORT_GFX_CGCG | 1021 AMD_CG_SUPPORT_GFX_CGLS | 1022 AMD_CG_SUPPORT_BIF_LS | 1023 AMD_CG_SUPPORT_HDP_MGCG | 1024 AMD_CG_SUPPORT_HDP_LS | 1025 AMD_CG_SUPPORT_SDMA_MGCG | 1026 AMD_CG_SUPPORT_SDMA_LS | 1027 AMD_CG_SUPPORT_VCE_MGCG; 1028 /* rev0 hardware requires workarounds to support PG */ 1029 adev->pg_flags = 0; 1030 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { 1031 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1032 AMD_PG_SUPPORT_GFX_SMG | 1033 AMD_PG_SUPPORT_GFX_PIPELINE | 1034 AMD_PG_SUPPORT_CP | 1035 AMD_PG_SUPPORT_UVD | 1036 AMD_PG_SUPPORT_VCE; 1037 } 1038 adev->external_rev_id = adev->rev_id + 0x1; 1039 break; 1040 case CHIP_STONEY: 1041 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1042 AMD_CG_SUPPORT_GFX_MGCG | 1043 AMD_CG_SUPPORT_GFX_MGLS | 1044 AMD_CG_SUPPORT_GFX_RLC_LS | 1045 AMD_CG_SUPPORT_GFX_CP_LS | 1046 AMD_CG_SUPPORT_GFX_CGTS | 1047 AMD_CG_SUPPORT_GFX_CGTS_LS | 1048 AMD_CG_SUPPORT_GFX_CGCG | 1049 AMD_CG_SUPPORT_GFX_CGLS | 1050 AMD_CG_SUPPORT_BIF_LS | 1051 AMD_CG_SUPPORT_HDP_MGCG | 1052 AMD_CG_SUPPORT_HDP_LS | 1053 AMD_CG_SUPPORT_SDMA_MGCG | 1054 AMD_CG_SUPPORT_SDMA_LS | 1055 AMD_CG_SUPPORT_VCE_MGCG; 1056 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1057 AMD_PG_SUPPORT_GFX_SMG | 1058 AMD_PG_SUPPORT_GFX_PIPELINE | 1059 AMD_PG_SUPPORT_CP | 1060 AMD_PG_SUPPORT_UVD | 1061 AMD_PG_SUPPORT_VCE; 1062 adev->external_rev_id = adev->rev_id + 0x61; 1063 break; 1064 default: 1065 /* FIXME: not supported yet */ 1066 return -EINVAL; 1067 } 1068 1069 if (amdgpu_sriov_vf(adev)) { 1070 amdgpu_virt_init_setting(adev); 1071 xgpu_vi_mailbox_set_irq_funcs(adev); 1072 } 1073 1074 /* vi use smc load by default */ 1075 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); 1076 1077 amdgpu_get_pcie_info(adev); 1078 1079 return 0; 1080 } 1081 1082 static int vi_common_late_init(void *handle) 1083 { 1084 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1085 1086 if (amdgpu_sriov_vf(adev)) 1087 xgpu_vi_mailbox_get_irq(adev); 1088 1089 return 0; 1090 } 1091 1092 static int vi_common_sw_init(void *handle) 1093 { 1094 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1095 1096 if (amdgpu_sriov_vf(adev)) 1097 xgpu_vi_mailbox_add_irq_id(adev); 1098 1099 return 0; 1100 } 1101 1102 static int vi_common_sw_fini(void *handle) 1103 { 1104 return 0; 1105 } 1106 1107 static int vi_common_hw_init(void *handle) 1108 { 1109 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1110 1111 /* move the golden regs per IP block */ 1112 vi_init_golden_registers(adev); 1113 /* enable pcie gen2/3 link */ 1114 vi_pcie_gen3_enable(adev); 1115 /* enable aspm */ 1116 vi_program_aspm(adev); 1117 /* enable the doorbell aperture */ 1118 vi_enable_doorbell_aperture(adev, true); 1119 1120 return 0; 1121 } 1122 1123 static int vi_common_hw_fini(void *handle) 1124 { 1125 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1126 1127 /* enable the doorbell aperture */ 1128 vi_enable_doorbell_aperture(adev, false); 1129 1130 if (amdgpu_sriov_vf(adev)) 1131 xgpu_vi_mailbox_put_irq(adev); 1132 1133 return 0; 1134 } 1135 1136 static int vi_common_suspend(void *handle) 1137 { 1138 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1139 1140 return vi_common_hw_fini(adev); 1141 } 1142 1143 static int vi_common_resume(void *handle) 1144 { 1145 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1146 1147 return vi_common_hw_init(adev); 1148 } 1149 1150 static bool vi_common_is_idle(void *handle) 1151 { 1152 return true; 1153 } 1154 1155 static int vi_common_wait_for_idle(void *handle) 1156 { 1157 return 0; 1158 } 1159 1160 static int vi_common_soft_reset(void *handle) 1161 { 1162 return 0; 1163 } 1164 1165 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1166 bool enable) 1167 { 1168 uint32_t temp, data; 1169 1170 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1171 1172 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1173 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1174 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1175 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1176 else 1177 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1178 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1179 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1180 1181 if (temp != data) 1182 WREG32_PCIE(ixPCIE_CNTL2, data); 1183 } 1184 1185 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1186 bool enable) 1187 { 1188 uint32_t temp, data; 1189 1190 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1191 1192 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1193 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1194 else 1195 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1196 1197 if (temp != data) 1198 WREG32(mmHDP_HOST_PATH_CNTL, data); 1199 } 1200 1201 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1202 bool enable) 1203 { 1204 uint32_t temp, data; 1205 1206 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1207 1208 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1209 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1210 else 1211 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1212 1213 if (temp != data) 1214 WREG32(mmHDP_MEM_POWER_LS, data); 1215 } 1216 1217 static void vi_update_drm_light_sleep(struct amdgpu_device *adev, 1218 bool enable) 1219 { 1220 uint32_t temp, data; 1221 1222 temp = data = RREG32(0x157a); 1223 1224 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1225 data |= 1; 1226 else 1227 data &= ~1; 1228 1229 if (temp != data) 1230 WREG32(0x157a, data); 1231 } 1232 1233 1234 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1235 bool enable) 1236 { 1237 uint32_t temp, data; 1238 1239 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1240 1241 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1242 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1243 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1244 else 1245 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1246 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1247 1248 if (temp != data) 1249 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1250 } 1251 1252 static int vi_common_set_clockgating_state_by_smu(void *handle, 1253 enum amd_clockgating_state state) 1254 { 1255 uint32_t msg_id, pp_state = 0; 1256 uint32_t pp_support_state = 0; 1257 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1258 void *pp_handle = adev->powerplay.pp_handle; 1259 1260 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1261 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1262 pp_support_state = AMD_CG_SUPPORT_MC_LS; 1263 pp_state = PP_STATE_LS; 1264 } 1265 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1266 pp_support_state |= AMD_CG_SUPPORT_MC_MGCG; 1267 pp_state |= PP_STATE_CG; 1268 } 1269 if (state == AMD_CG_STATE_UNGATE) 1270 pp_state = 0; 1271 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1272 PP_BLOCK_SYS_MC, 1273 pp_support_state, 1274 pp_state); 1275 amd_set_clockgating_by_smu(pp_handle, msg_id); 1276 } 1277 1278 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1279 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1280 pp_support_state = AMD_CG_SUPPORT_SDMA_LS; 1281 pp_state = PP_STATE_LS; 1282 } 1283 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1284 pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG; 1285 pp_state |= PP_STATE_CG; 1286 } 1287 if (state == AMD_CG_STATE_UNGATE) 1288 pp_state = 0; 1289 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1290 PP_BLOCK_SYS_SDMA, 1291 pp_support_state, 1292 pp_state); 1293 amd_set_clockgating_by_smu(pp_handle, msg_id); 1294 } 1295 1296 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1297 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1298 pp_support_state = AMD_CG_SUPPORT_HDP_LS; 1299 pp_state = PP_STATE_LS; 1300 } 1301 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1302 pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG; 1303 pp_state |= PP_STATE_CG; 1304 } 1305 if (state == AMD_CG_STATE_UNGATE) 1306 pp_state = 0; 1307 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1308 PP_BLOCK_SYS_HDP, 1309 pp_support_state, 1310 pp_state); 1311 amd_set_clockgating_by_smu(pp_handle, msg_id); 1312 } 1313 1314 1315 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { 1316 if (state == AMD_CG_STATE_UNGATE) 1317 pp_state = 0; 1318 else 1319 pp_state = PP_STATE_LS; 1320 1321 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1322 PP_BLOCK_SYS_BIF, 1323 PP_STATE_SUPPORT_LS, 1324 pp_state); 1325 amd_set_clockgating_by_smu(pp_handle, msg_id); 1326 } 1327 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { 1328 if (state == AMD_CG_STATE_UNGATE) 1329 pp_state = 0; 1330 else 1331 pp_state = PP_STATE_CG; 1332 1333 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1334 PP_BLOCK_SYS_BIF, 1335 PP_STATE_SUPPORT_CG, 1336 pp_state); 1337 amd_set_clockgating_by_smu(pp_handle, msg_id); 1338 } 1339 1340 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { 1341 1342 if (state == AMD_CG_STATE_UNGATE) 1343 pp_state = 0; 1344 else 1345 pp_state = PP_STATE_LS; 1346 1347 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1348 PP_BLOCK_SYS_DRM, 1349 PP_STATE_SUPPORT_LS, 1350 pp_state); 1351 amd_set_clockgating_by_smu(pp_handle, msg_id); 1352 } 1353 1354 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { 1355 1356 if (state == AMD_CG_STATE_UNGATE) 1357 pp_state = 0; 1358 else 1359 pp_state = PP_STATE_CG; 1360 1361 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1362 PP_BLOCK_SYS_ROM, 1363 PP_STATE_SUPPORT_CG, 1364 pp_state); 1365 amd_set_clockgating_by_smu(pp_handle, msg_id); 1366 } 1367 return 0; 1368 } 1369 1370 static int vi_common_set_clockgating_state(void *handle, 1371 enum amd_clockgating_state state) 1372 { 1373 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1374 1375 if (amdgpu_sriov_vf(adev)) 1376 return 0; 1377 1378 switch (adev->asic_type) { 1379 case CHIP_FIJI: 1380 vi_update_bif_medium_grain_light_sleep(adev, 1381 state == AMD_CG_STATE_GATE); 1382 vi_update_hdp_medium_grain_clock_gating(adev, 1383 state == AMD_CG_STATE_GATE); 1384 vi_update_hdp_light_sleep(adev, 1385 state == AMD_CG_STATE_GATE); 1386 vi_update_rom_medium_grain_clock_gating(adev, 1387 state == AMD_CG_STATE_GATE); 1388 break; 1389 case CHIP_CARRIZO: 1390 case CHIP_STONEY: 1391 vi_update_bif_medium_grain_light_sleep(adev, 1392 state == AMD_CG_STATE_GATE); 1393 vi_update_hdp_medium_grain_clock_gating(adev, 1394 state == AMD_CG_STATE_GATE); 1395 vi_update_hdp_light_sleep(adev, 1396 state == AMD_CG_STATE_GATE); 1397 vi_update_drm_light_sleep(adev, 1398 state == AMD_CG_STATE_GATE); 1399 break; 1400 case CHIP_TONGA: 1401 case CHIP_POLARIS10: 1402 case CHIP_POLARIS11: 1403 case CHIP_POLARIS12: 1404 vi_common_set_clockgating_state_by_smu(adev, state); 1405 default: 1406 break; 1407 } 1408 return 0; 1409 } 1410 1411 static int vi_common_set_powergating_state(void *handle, 1412 enum amd_powergating_state state) 1413 { 1414 return 0; 1415 } 1416 1417 static void vi_common_get_clockgating_state(void *handle, u32 *flags) 1418 { 1419 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1420 int data; 1421 1422 if (amdgpu_sriov_vf(adev)) 1423 *flags = 0; 1424 1425 /* AMD_CG_SUPPORT_BIF_LS */ 1426 data = RREG32_PCIE(ixPCIE_CNTL2); 1427 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 1428 *flags |= AMD_CG_SUPPORT_BIF_LS; 1429 1430 /* AMD_CG_SUPPORT_HDP_LS */ 1431 data = RREG32(mmHDP_MEM_POWER_LS); 1432 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1433 *flags |= AMD_CG_SUPPORT_HDP_LS; 1434 1435 /* AMD_CG_SUPPORT_HDP_MGCG */ 1436 data = RREG32(mmHDP_HOST_PATH_CNTL); 1437 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK)) 1438 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 1439 1440 /* AMD_CG_SUPPORT_ROM_MGCG */ 1441 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1442 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1443 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1444 } 1445 1446 static const struct amd_ip_funcs vi_common_ip_funcs = { 1447 .name = "vi_common", 1448 .early_init = vi_common_early_init, 1449 .late_init = vi_common_late_init, 1450 .sw_init = vi_common_sw_init, 1451 .sw_fini = vi_common_sw_fini, 1452 .hw_init = vi_common_hw_init, 1453 .hw_fini = vi_common_hw_fini, 1454 .suspend = vi_common_suspend, 1455 .resume = vi_common_resume, 1456 .is_idle = vi_common_is_idle, 1457 .wait_for_idle = vi_common_wait_for_idle, 1458 .soft_reset = vi_common_soft_reset, 1459 .set_clockgating_state = vi_common_set_clockgating_state, 1460 .set_powergating_state = vi_common_set_powergating_state, 1461 .get_clockgating_state = vi_common_get_clockgating_state, 1462 }; 1463 1464 static const struct amdgpu_ip_block_version vi_common_ip_block = 1465 { 1466 .type = AMD_IP_BLOCK_TYPE_COMMON, 1467 .major = 1, 1468 .minor = 0, 1469 .rev = 0, 1470 .funcs = &vi_common_ip_funcs, 1471 }; 1472 1473 int vi_set_ip_blocks(struct amdgpu_device *adev) 1474 { 1475 /* in early init stage, vbios code won't work */ 1476 vi_detect_hw_virtualization(adev); 1477 1478 if (amdgpu_sriov_vf(adev)) 1479 adev->virt.ops = &xgpu_vi_virt_ops; 1480 1481 switch (adev->asic_type) { 1482 case CHIP_TOPAZ: 1483 /* topaz has no DCE, UVD, VCE */ 1484 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1485 amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block); 1486 amdgpu_ip_block_add(adev, &iceland_ih_ip_block); 1487 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1488 if (adev->enable_virtual_display) 1489 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1490 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1491 amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block); 1492 break; 1493 case CHIP_FIJI: 1494 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1495 amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block); 1496 amdgpu_ip_block_add(adev, &tonga_ih_ip_block); 1497 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1498 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1499 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1500 else 1501 amdgpu_ip_block_add(adev, &dce_v10_1_ip_block); 1502 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1503 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1504 if (!amdgpu_sriov_vf(adev)) { 1505 amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); 1506 amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); 1507 } 1508 break; 1509 case CHIP_TONGA: 1510 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1511 amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); 1512 amdgpu_ip_block_add(adev, &tonga_ih_ip_block); 1513 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1514 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1515 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1516 else 1517 amdgpu_ip_block_add(adev, &dce_v10_0_ip_block); 1518 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1519 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1520 if (!amdgpu_sriov_vf(adev)) { 1521 amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block); 1522 amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); 1523 } 1524 break; 1525 case CHIP_POLARIS11: 1526 case CHIP_POLARIS10: 1527 case CHIP_POLARIS12: 1528 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1529 amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block); 1530 amdgpu_ip_block_add(adev, &tonga_ih_ip_block); 1531 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1532 if (adev->enable_virtual_display) 1533 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1534 else 1535 amdgpu_ip_block_add(adev, &dce_v11_2_ip_block); 1536 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1537 amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block); 1538 amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block); 1539 amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); 1540 break; 1541 case CHIP_CARRIZO: 1542 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1543 amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); 1544 amdgpu_ip_block_add(adev, &cz_ih_ip_block); 1545 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1546 if (adev->enable_virtual_display) 1547 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1548 else 1549 amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); 1550 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1551 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1552 amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); 1553 amdgpu_ip_block_add(adev, &vce_v3_1_ip_block); 1554 #if defined(CONFIG_DRM_AMD_ACP) 1555 amdgpu_ip_block_add(adev, &acp_ip_block); 1556 #endif 1557 break; 1558 case CHIP_STONEY: 1559 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1560 amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); 1561 amdgpu_ip_block_add(adev, &cz_ih_ip_block); 1562 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1563 if (adev->enable_virtual_display) 1564 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1565 else 1566 amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); 1567 amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block); 1568 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1569 amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block); 1570 amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); 1571 #if defined(CONFIG_DRM_AMD_ACP) 1572 amdgpu_ip_block_add(adev, &acp_ip_block); 1573 #endif 1574 break; 1575 default: 1576 /* FIXME: not supported yet */ 1577 return -EINVAL; 1578 } 1579 1580 return 0; 1581 } 1582