1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/slab.h> 24 #include "drmP.h" 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_ih.h" 28 #include "amdgpu_uvd.h" 29 #include "amdgpu_vce.h" 30 #include "amdgpu_ucode.h" 31 #include "atom.h" 32 #include "amd_pcie.h" 33 34 #include "gmc/gmc_8_1_d.h" 35 #include "gmc/gmc_8_1_sh_mask.h" 36 37 #include "oss/oss_3_0_d.h" 38 #include "oss/oss_3_0_sh_mask.h" 39 40 #include "bif/bif_5_0_d.h" 41 #include "bif/bif_5_0_sh_mask.h" 42 43 #include "gca/gfx_8_0_d.h" 44 #include "gca/gfx_8_0_sh_mask.h" 45 46 #include "smu/smu_7_1_1_d.h" 47 #include "smu/smu_7_1_1_sh_mask.h" 48 49 #include "uvd/uvd_5_0_d.h" 50 #include "uvd/uvd_5_0_sh_mask.h" 51 52 #include "vce/vce_3_0_d.h" 53 #include "vce/vce_3_0_sh_mask.h" 54 55 #include "dce/dce_10_0_d.h" 56 #include "dce/dce_10_0_sh_mask.h" 57 58 #include "vid.h" 59 #include "vi.h" 60 #include "vi_dpm.h" 61 #include "gmc_v8_0.h" 62 #include "gmc_v7_0.h" 63 #include "gfx_v8_0.h" 64 #include "sdma_v2_4.h" 65 #include "sdma_v3_0.h" 66 #include "dce_v10_0.h" 67 #include "dce_v11_0.h" 68 #include "iceland_ih.h" 69 #include "tonga_ih.h" 70 #include "cz_ih.h" 71 #include "uvd_v5_0.h" 72 #include "uvd_v6_0.h" 73 #include "vce_v3_0.h" 74 #include "amdgpu_powerplay.h" 75 #if defined(CONFIG_DRM_AMD_ACP) 76 #include "amdgpu_acp.h" 77 #endif 78 #include "dce_virtual.h" 79 #include "mxgpu_vi.h" 80 81 /* 82 * Indirect registers accessor 83 */ 84 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 85 { 86 unsigned long flags; 87 u32 r; 88 89 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 90 WREG32(mmPCIE_INDEX, reg); 91 (void)RREG32(mmPCIE_INDEX); 92 r = RREG32(mmPCIE_DATA); 93 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 94 return r; 95 } 96 97 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 98 { 99 unsigned long flags; 100 101 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 102 WREG32(mmPCIE_INDEX, reg); 103 (void)RREG32(mmPCIE_INDEX); 104 WREG32(mmPCIE_DATA, v); 105 (void)RREG32(mmPCIE_DATA); 106 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 107 } 108 109 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 110 { 111 unsigned long flags; 112 u32 r; 113 114 spin_lock_irqsave(&adev->smc_idx_lock, flags); 115 WREG32(mmSMC_IND_INDEX_11, (reg)); 116 r = RREG32(mmSMC_IND_DATA_11); 117 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 118 return r; 119 } 120 121 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 122 { 123 unsigned long flags; 124 125 spin_lock_irqsave(&adev->smc_idx_lock, flags); 126 WREG32(mmSMC_IND_INDEX_11, (reg)); 127 WREG32(mmSMC_IND_DATA_11, (v)); 128 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 129 } 130 131 /* smu_8_0_d.h */ 132 #define mmMP0PUB_IND_INDEX 0x180 133 #define mmMP0PUB_IND_DATA 0x181 134 135 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 136 { 137 unsigned long flags; 138 u32 r; 139 140 spin_lock_irqsave(&adev->smc_idx_lock, flags); 141 WREG32(mmMP0PUB_IND_INDEX, (reg)); 142 r = RREG32(mmMP0PUB_IND_DATA); 143 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 144 return r; 145 } 146 147 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 148 { 149 unsigned long flags; 150 151 spin_lock_irqsave(&adev->smc_idx_lock, flags); 152 WREG32(mmMP0PUB_IND_INDEX, (reg)); 153 WREG32(mmMP0PUB_IND_DATA, (v)); 154 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 155 } 156 157 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 158 { 159 unsigned long flags; 160 u32 r; 161 162 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 163 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 164 r = RREG32(mmUVD_CTX_DATA); 165 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 166 return r; 167 } 168 169 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 170 { 171 unsigned long flags; 172 173 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 174 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 175 WREG32(mmUVD_CTX_DATA, (v)); 176 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 177 } 178 179 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 180 { 181 unsigned long flags; 182 u32 r; 183 184 spin_lock_irqsave(&adev->didt_idx_lock, flags); 185 WREG32(mmDIDT_IND_INDEX, (reg)); 186 r = RREG32(mmDIDT_IND_DATA); 187 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 188 return r; 189 } 190 191 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 192 { 193 unsigned long flags; 194 195 spin_lock_irqsave(&adev->didt_idx_lock, flags); 196 WREG32(mmDIDT_IND_INDEX, (reg)); 197 WREG32(mmDIDT_IND_DATA, (v)); 198 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 199 } 200 201 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 202 { 203 unsigned long flags; 204 u32 r; 205 206 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 207 WREG32(mmGC_CAC_IND_INDEX, (reg)); 208 r = RREG32(mmGC_CAC_IND_DATA); 209 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 210 return r; 211 } 212 213 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 214 { 215 unsigned long flags; 216 217 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 218 WREG32(mmGC_CAC_IND_INDEX, (reg)); 219 WREG32(mmGC_CAC_IND_DATA, (v)); 220 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 221 } 222 223 224 static const u32 tonga_mgcg_cgcg_init[] = 225 { 226 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 227 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 228 mmPCIE_DATA, 0x000f0000, 0x00000000, 229 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 230 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 231 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 232 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 233 }; 234 235 static const u32 fiji_mgcg_cgcg_init[] = 236 { 237 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 238 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 239 mmPCIE_DATA, 0x000f0000, 0x00000000, 240 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 241 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 242 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 243 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 244 }; 245 246 static const u32 iceland_mgcg_cgcg_init[] = 247 { 248 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 249 mmPCIE_DATA, 0x000f0000, 0x00000000, 250 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 251 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 252 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 253 }; 254 255 static const u32 cz_mgcg_cgcg_init[] = 256 { 257 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 258 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 259 mmPCIE_DATA, 0x000f0000, 0x00000000, 260 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 261 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 262 }; 263 264 static const u32 stoney_mgcg_cgcg_init[] = 265 { 266 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 267 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 268 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 269 }; 270 271 static void vi_init_golden_registers(struct amdgpu_device *adev) 272 { 273 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 274 mutex_lock(&adev->grbm_idx_mutex); 275 276 if (amdgpu_sriov_vf(adev)) { 277 xgpu_vi_init_golden_registers(adev); 278 mutex_unlock(&adev->grbm_idx_mutex); 279 return; 280 } 281 282 switch (adev->asic_type) { 283 case CHIP_TOPAZ: 284 amdgpu_program_register_sequence(adev, 285 iceland_mgcg_cgcg_init, 286 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); 287 break; 288 case CHIP_FIJI: 289 amdgpu_program_register_sequence(adev, 290 fiji_mgcg_cgcg_init, 291 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); 292 break; 293 case CHIP_TONGA: 294 amdgpu_program_register_sequence(adev, 295 tonga_mgcg_cgcg_init, 296 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); 297 break; 298 case CHIP_CARRIZO: 299 amdgpu_program_register_sequence(adev, 300 cz_mgcg_cgcg_init, 301 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 302 break; 303 case CHIP_STONEY: 304 amdgpu_program_register_sequence(adev, 305 stoney_mgcg_cgcg_init, 306 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); 307 break; 308 case CHIP_POLARIS11: 309 case CHIP_POLARIS10: 310 case CHIP_POLARIS12: 311 default: 312 break; 313 } 314 mutex_unlock(&adev->grbm_idx_mutex); 315 } 316 317 /** 318 * vi_get_xclk - get the xclk 319 * 320 * @adev: amdgpu_device pointer 321 * 322 * Returns the reference clock used by the gfx engine 323 * (VI). 324 */ 325 static u32 vi_get_xclk(struct amdgpu_device *adev) 326 { 327 u32 reference_clock = adev->clock.spll.reference_freq; 328 u32 tmp; 329 330 if (adev->flags & AMD_IS_APU) 331 return reference_clock; 332 333 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 334 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 335 return 1000; 336 337 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 338 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 339 return reference_clock / 4; 340 341 return reference_clock; 342 } 343 344 /** 345 * vi_srbm_select - select specific register instances 346 * 347 * @adev: amdgpu_device pointer 348 * @me: selected ME (micro engine) 349 * @pipe: pipe 350 * @queue: queue 351 * @vmid: VMID 352 * 353 * Switches the currently active registers instances. Some 354 * registers are instanced per VMID, others are instanced per 355 * me/pipe/queue combination. 356 */ 357 void vi_srbm_select(struct amdgpu_device *adev, 358 u32 me, u32 pipe, u32 queue, u32 vmid) 359 { 360 u32 srbm_gfx_cntl = 0; 361 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 362 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 363 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 364 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 365 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 366 } 367 368 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 369 { 370 /* todo */ 371 } 372 373 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 374 { 375 u32 bus_cntl; 376 u32 d1vga_control = 0; 377 u32 d2vga_control = 0; 378 u32 vga_render_control = 0; 379 u32 rom_cntl; 380 bool r; 381 382 bus_cntl = RREG32(mmBUS_CNTL); 383 if (adev->mode_info.num_crtc) { 384 d1vga_control = RREG32(mmD1VGA_CONTROL); 385 d2vga_control = RREG32(mmD2VGA_CONTROL); 386 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 387 } 388 rom_cntl = RREG32_SMC(ixROM_CNTL); 389 390 /* enable the rom */ 391 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 392 if (adev->mode_info.num_crtc) { 393 /* Disable VGA mode */ 394 WREG32(mmD1VGA_CONTROL, 395 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 396 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 397 WREG32(mmD2VGA_CONTROL, 398 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 399 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 400 WREG32(mmVGA_RENDER_CONTROL, 401 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 402 } 403 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 404 405 r = amdgpu_read_bios(adev); 406 407 /* restore regs */ 408 WREG32(mmBUS_CNTL, bus_cntl); 409 if (adev->mode_info.num_crtc) { 410 WREG32(mmD1VGA_CONTROL, d1vga_control); 411 WREG32(mmD2VGA_CONTROL, d2vga_control); 412 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 413 } 414 WREG32_SMC(ixROM_CNTL, rom_cntl); 415 return r; 416 } 417 418 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 419 u8 *bios, u32 length_bytes) 420 { 421 u32 *dw_ptr; 422 unsigned long flags; 423 u32 i, length_dw; 424 425 if (bios == NULL) 426 return false; 427 if (length_bytes == 0) 428 return false; 429 /* APU vbios image is part of sbios image */ 430 if (adev->flags & AMD_IS_APU) 431 return false; 432 433 dw_ptr = (u32 *)bios; 434 length_dw = ALIGN(length_bytes, 4) / 4; 435 /* take the smc lock since we are using the smc index */ 436 spin_lock_irqsave(&adev->smc_idx_lock, flags); 437 /* set rom index to 0 */ 438 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 439 WREG32(mmSMC_IND_DATA_11, 0); 440 /* set index to data for continous read */ 441 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 442 for (i = 0; i < length_dw; i++) 443 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 444 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 445 446 return true; 447 } 448 449 static void vi_detect_hw_virtualization(struct amdgpu_device *adev) 450 { 451 uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 452 /* bit0: 0 means pf and 1 means vf */ 453 /* bit31: 0 means disable IOV and 1 means enable */ 454 if (reg & 1) 455 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; 456 457 if (reg & 0x80000000) 458 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 459 460 if (reg == 0) { 461 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ 462 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 463 } 464 } 465 466 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 467 }; 468 469 static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { 470 }; 471 472 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 473 {mmGRBM_STATUS, false}, 474 {mmGRBM_STATUS2, false}, 475 {mmGRBM_STATUS_SE0, false}, 476 {mmGRBM_STATUS_SE1, false}, 477 {mmGRBM_STATUS_SE2, false}, 478 {mmGRBM_STATUS_SE3, false}, 479 {mmSRBM_STATUS, false}, 480 {mmSRBM_STATUS2, false}, 481 {mmSRBM_STATUS3, false}, 482 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false}, 483 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false}, 484 {mmCP_STAT, false}, 485 {mmCP_STALLED_STAT1, false}, 486 {mmCP_STALLED_STAT2, false}, 487 {mmCP_STALLED_STAT3, false}, 488 {mmCP_CPF_BUSY_STAT, false}, 489 {mmCP_CPF_STALLED_STAT1, false}, 490 {mmCP_CPF_STATUS, false}, 491 {mmCP_CPC_BUSY_STAT, false}, 492 {mmCP_CPC_STALLED_STAT1, false}, 493 {mmCP_CPC_STATUS, false}, 494 {mmGB_ADDR_CONFIG, false}, 495 {mmMC_ARB_RAMCFG, false}, 496 {mmGB_TILE_MODE0, false}, 497 {mmGB_TILE_MODE1, false}, 498 {mmGB_TILE_MODE2, false}, 499 {mmGB_TILE_MODE3, false}, 500 {mmGB_TILE_MODE4, false}, 501 {mmGB_TILE_MODE5, false}, 502 {mmGB_TILE_MODE6, false}, 503 {mmGB_TILE_MODE7, false}, 504 {mmGB_TILE_MODE8, false}, 505 {mmGB_TILE_MODE9, false}, 506 {mmGB_TILE_MODE10, false}, 507 {mmGB_TILE_MODE11, false}, 508 {mmGB_TILE_MODE12, false}, 509 {mmGB_TILE_MODE13, false}, 510 {mmGB_TILE_MODE14, false}, 511 {mmGB_TILE_MODE15, false}, 512 {mmGB_TILE_MODE16, false}, 513 {mmGB_TILE_MODE17, false}, 514 {mmGB_TILE_MODE18, false}, 515 {mmGB_TILE_MODE19, false}, 516 {mmGB_TILE_MODE20, false}, 517 {mmGB_TILE_MODE21, false}, 518 {mmGB_TILE_MODE22, false}, 519 {mmGB_TILE_MODE23, false}, 520 {mmGB_TILE_MODE24, false}, 521 {mmGB_TILE_MODE25, false}, 522 {mmGB_TILE_MODE26, false}, 523 {mmGB_TILE_MODE27, false}, 524 {mmGB_TILE_MODE28, false}, 525 {mmGB_TILE_MODE29, false}, 526 {mmGB_TILE_MODE30, false}, 527 {mmGB_TILE_MODE31, false}, 528 {mmGB_MACROTILE_MODE0, false}, 529 {mmGB_MACROTILE_MODE1, false}, 530 {mmGB_MACROTILE_MODE2, false}, 531 {mmGB_MACROTILE_MODE3, false}, 532 {mmGB_MACROTILE_MODE4, false}, 533 {mmGB_MACROTILE_MODE5, false}, 534 {mmGB_MACROTILE_MODE6, false}, 535 {mmGB_MACROTILE_MODE7, false}, 536 {mmGB_MACROTILE_MODE8, false}, 537 {mmGB_MACROTILE_MODE9, false}, 538 {mmGB_MACROTILE_MODE10, false}, 539 {mmGB_MACROTILE_MODE11, false}, 540 {mmGB_MACROTILE_MODE12, false}, 541 {mmGB_MACROTILE_MODE13, false}, 542 {mmGB_MACROTILE_MODE14, false}, 543 {mmGB_MACROTILE_MODE15, false}, 544 {mmCC_RB_BACKEND_DISABLE, false, true}, 545 {mmGC_USER_RB_BACKEND_DISABLE, false, true}, 546 {mmGB_BACKEND_MAP, false, false}, 547 {mmPA_SC_RASTER_CONFIG, false, true}, 548 {mmPA_SC_RASTER_CONFIG_1, false, true}, 549 }; 550 551 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 552 bool indexed, u32 se_num, 553 u32 sh_num, u32 reg_offset) 554 { 555 if (indexed) { 556 uint32_t val; 557 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 558 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 559 560 switch (reg_offset) { 561 case mmCC_RB_BACKEND_DISABLE: 562 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 563 case mmGC_USER_RB_BACKEND_DISABLE: 564 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 565 case mmPA_SC_RASTER_CONFIG: 566 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 567 case mmPA_SC_RASTER_CONFIG_1: 568 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 569 } 570 571 mutex_lock(&adev->grbm_idx_mutex); 572 if (se_num != 0xffffffff || sh_num != 0xffffffff) 573 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 574 575 val = RREG32(reg_offset); 576 577 if (se_num != 0xffffffff || sh_num != 0xffffffff) 578 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 579 mutex_unlock(&adev->grbm_idx_mutex); 580 return val; 581 } else { 582 unsigned idx; 583 584 switch (reg_offset) { 585 case mmGB_ADDR_CONFIG: 586 return adev->gfx.config.gb_addr_config; 587 case mmMC_ARB_RAMCFG: 588 return adev->gfx.config.mc_arb_ramcfg; 589 case mmGB_TILE_MODE0: 590 case mmGB_TILE_MODE1: 591 case mmGB_TILE_MODE2: 592 case mmGB_TILE_MODE3: 593 case mmGB_TILE_MODE4: 594 case mmGB_TILE_MODE5: 595 case mmGB_TILE_MODE6: 596 case mmGB_TILE_MODE7: 597 case mmGB_TILE_MODE8: 598 case mmGB_TILE_MODE9: 599 case mmGB_TILE_MODE10: 600 case mmGB_TILE_MODE11: 601 case mmGB_TILE_MODE12: 602 case mmGB_TILE_MODE13: 603 case mmGB_TILE_MODE14: 604 case mmGB_TILE_MODE15: 605 case mmGB_TILE_MODE16: 606 case mmGB_TILE_MODE17: 607 case mmGB_TILE_MODE18: 608 case mmGB_TILE_MODE19: 609 case mmGB_TILE_MODE20: 610 case mmGB_TILE_MODE21: 611 case mmGB_TILE_MODE22: 612 case mmGB_TILE_MODE23: 613 case mmGB_TILE_MODE24: 614 case mmGB_TILE_MODE25: 615 case mmGB_TILE_MODE26: 616 case mmGB_TILE_MODE27: 617 case mmGB_TILE_MODE28: 618 case mmGB_TILE_MODE29: 619 case mmGB_TILE_MODE30: 620 case mmGB_TILE_MODE31: 621 idx = (reg_offset - mmGB_TILE_MODE0); 622 return adev->gfx.config.tile_mode_array[idx]; 623 case mmGB_MACROTILE_MODE0: 624 case mmGB_MACROTILE_MODE1: 625 case mmGB_MACROTILE_MODE2: 626 case mmGB_MACROTILE_MODE3: 627 case mmGB_MACROTILE_MODE4: 628 case mmGB_MACROTILE_MODE5: 629 case mmGB_MACROTILE_MODE6: 630 case mmGB_MACROTILE_MODE7: 631 case mmGB_MACROTILE_MODE8: 632 case mmGB_MACROTILE_MODE9: 633 case mmGB_MACROTILE_MODE10: 634 case mmGB_MACROTILE_MODE11: 635 case mmGB_MACROTILE_MODE12: 636 case mmGB_MACROTILE_MODE13: 637 case mmGB_MACROTILE_MODE14: 638 case mmGB_MACROTILE_MODE15: 639 idx = (reg_offset - mmGB_MACROTILE_MODE0); 640 return adev->gfx.config.macrotile_mode_array[idx]; 641 default: 642 return RREG32(reg_offset); 643 } 644 } 645 } 646 647 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 648 u32 sh_num, u32 reg_offset, u32 *value) 649 { 650 const struct amdgpu_allowed_register_entry *asic_register_table = NULL; 651 const struct amdgpu_allowed_register_entry *asic_register_entry; 652 uint32_t size, i; 653 654 *value = 0; 655 switch (adev->asic_type) { 656 case CHIP_TOPAZ: 657 asic_register_table = tonga_allowed_read_registers; 658 size = ARRAY_SIZE(tonga_allowed_read_registers); 659 break; 660 case CHIP_FIJI: 661 case CHIP_TONGA: 662 case CHIP_POLARIS11: 663 case CHIP_POLARIS10: 664 case CHIP_POLARIS12: 665 case CHIP_CARRIZO: 666 case CHIP_STONEY: 667 asic_register_table = cz_allowed_read_registers; 668 size = ARRAY_SIZE(cz_allowed_read_registers); 669 break; 670 default: 671 return -EINVAL; 672 } 673 674 if (asic_register_table) { 675 for (i = 0; i < size; i++) { 676 asic_register_entry = asic_register_table + i; 677 if (reg_offset != asic_register_entry->reg_offset) 678 continue; 679 if (!asic_register_entry->untouched) 680 *value = vi_get_register_value(adev, 681 asic_register_entry->grbm_indexed, 682 se_num, sh_num, reg_offset); 683 return 0; 684 } 685 } 686 687 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 688 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 689 continue; 690 691 if (!vi_allowed_read_registers[i].untouched) 692 *value = vi_get_register_value(adev, 693 vi_allowed_read_registers[i].grbm_indexed, 694 se_num, sh_num, reg_offset); 695 return 0; 696 } 697 return -EINVAL; 698 } 699 700 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 701 { 702 u32 i; 703 704 dev_info(adev->dev, "GPU pci config reset\n"); 705 706 /* disable BM */ 707 pci_clear_master(adev->pdev); 708 /* reset */ 709 amdgpu_pci_config_reset(adev); 710 711 udelay(100); 712 713 /* wait for asic to come out of reset */ 714 for (i = 0; i < adev->usec_timeout; i++) { 715 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 716 /* enable BM */ 717 pci_set_master(adev->pdev); 718 adev->has_hw_reset = true; 719 return 0; 720 } 721 udelay(1); 722 } 723 return -EINVAL; 724 } 725 726 /** 727 * vi_asic_reset - soft reset GPU 728 * 729 * @adev: amdgpu_device pointer 730 * 731 * Look up which blocks are hung and attempt 732 * to reset them. 733 * Returns 0 for success. 734 */ 735 static int vi_asic_reset(struct amdgpu_device *adev) 736 { 737 int r; 738 739 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 740 741 r = vi_gpu_pci_config_reset(adev); 742 743 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 744 745 return r; 746 } 747 748 static u32 vi_get_config_memsize(struct amdgpu_device *adev) 749 { 750 return RREG32(mmCONFIG_MEMSIZE); 751 } 752 753 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 754 u32 cntl_reg, u32 status_reg) 755 { 756 int r, i; 757 struct atom_clock_dividers dividers; 758 uint32_t tmp; 759 760 r = amdgpu_atombios_get_clock_dividers(adev, 761 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 762 clock, false, ÷rs); 763 if (r) 764 return r; 765 766 tmp = RREG32_SMC(cntl_reg); 767 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 768 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 769 tmp |= dividers.post_divider; 770 WREG32_SMC(cntl_reg, tmp); 771 772 for (i = 0; i < 100; i++) { 773 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) 774 break; 775 mdelay(10); 776 } 777 if (i == 100) 778 return -ETIMEDOUT; 779 780 return 0; 781 } 782 783 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 784 { 785 int r; 786 787 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 788 if (r) 789 return r; 790 791 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 792 if (r) 793 return r; 794 795 return 0; 796 } 797 798 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 799 { 800 int r, i; 801 struct atom_clock_dividers dividers; 802 u32 tmp; 803 804 r = amdgpu_atombios_get_clock_dividers(adev, 805 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 806 ecclk, false, ÷rs); 807 if (r) 808 return r; 809 810 for (i = 0; i < 100; i++) { 811 if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) 812 break; 813 mdelay(10); 814 } 815 if (i == 100) 816 return -ETIMEDOUT; 817 818 tmp = RREG32_SMC(ixCG_ECLK_CNTL); 819 tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | 820 CG_ECLK_CNTL__ECLK_DIVIDER_MASK); 821 tmp |= dividers.post_divider; 822 WREG32_SMC(ixCG_ECLK_CNTL, tmp); 823 824 for (i = 0; i < 100; i++) { 825 if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) 826 break; 827 mdelay(10); 828 } 829 if (i == 100) 830 return -ETIMEDOUT; 831 832 return 0; 833 } 834 835 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 836 { 837 if (pci_is_root_bus(adev->pdev->bus)) 838 return; 839 840 if (amdgpu_pcie_gen2 == 0) 841 return; 842 843 if (adev->flags & AMD_IS_APU) 844 return; 845 846 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 847 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 848 return; 849 850 /* todo */ 851 } 852 853 static void vi_program_aspm(struct amdgpu_device *adev) 854 { 855 856 if (amdgpu_aspm == 0) 857 return; 858 859 /* todo */ 860 } 861 862 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 863 bool enable) 864 { 865 u32 tmp; 866 867 /* not necessary on CZ */ 868 if (adev->flags & AMD_IS_APU) 869 return; 870 871 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 872 if (enable) 873 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 874 else 875 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 876 877 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 878 } 879 880 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 881 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 882 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 883 884 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 885 { 886 if (adev->flags & AMD_IS_APU) 887 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 888 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 889 else 890 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 891 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 892 } 893 894 static const struct amdgpu_asic_funcs vi_asic_funcs = 895 { 896 .read_disabled_bios = &vi_read_disabled_bios, 897 .read_bios_from_rom = &vi_read_bios_from_rom, 898 .read_register = &vi_read_register, 899 .reset = &vi_asic_reset, 900 .set_vga_state = &vi_vga_set_state, 901 .get_xclk = &vi_get_xclk, 902 .set_uvd_clocks = &vi_set_uvd_clocks, 903 .set_vce_clocks = &vi_set_vce_clocks, 904 .get_config_memsize = &vi_get_config_memsize, 905 }; 906 907 #define CZ_REV_BRISTOL(rev) \ 908 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) 909 910 static int vi_common_early_init(void *handle) 911 { 912 bool smc_enabled = false; 913 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 914 915 if (adev->flags & AMD_IS_APU) { 916 adev->smc_rreg = &cz_smc_rreg; 917 adev->smc_wreg = &cz_smc_wreg; 918 } else { 919 adev->smc_rreg = &vi_smc_rreg; 920 adev->smc_wreg = &vi_smc_wreg; 921 } 922 adev->pcie_rreg = &vi_pcie_rreg; 923 adev->pcie_wreg = &vi_pcie_wreg; 924 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 925 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 926 adev->didt_rreg = &vi_didt_rreg; 927 adev->didt_wreg = &vi_didt_wreg; 928 adev->gc_cac_rreg = &vi_gc_cac_rreg; 929 adev->gc_cac_wreg = &vi_gc_cac_wreg; 930 931 adev->asic_funcs = &vi_asic_funcs; 932 933 if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && 934 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) 935 smc_enabled = true; 936 937 if (amdgpu_sriov_vf(adev)) { 938 amdgpu_virt_init_setting(adev); 939 xgpu_vi_mailbox_set_irq_funcs(adev); 940 } 941 942 adev->rev_id = vi_get_rev_id(adev); 943 adev->external_rev_id = 0xFF; 944 switch (adev->asic_type) { 945 case CHIP_TOPAZ: 946 adev->cg_flags = 0; 947 adev->pg_flags = 0; 948 adev->external_rev_id = 0x1; 949 break; 950 case CHIP_FIJI: 951 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 952 AMD_CG_SUPPORT_GFX_MGLS | 953 AMD_CG_SUPPORT_GFX_RLC_LS | 954 AMD_CG_SUPPORT_GFX_CP_LS | 955 AMD_CG_SUPPORT_GFX_CGTS | 956 AMD_CG_SUPPORT_GFX_CGTS_LS | 957 AMD_CG_SUPPORT_GFX_CGCG | 958 AMD_CG_SUPPORT_GFX_CGLS | 959 AMD_CG_SUPPORT_SDMA_MGCG | 960 AMD_CG_SUPPORT_SDMA_LS | 961 AMD_CG_SUPPORT_BIF_LS | 962 AMD_CG_SUPPORT_HDP_MGCG | 963 AMD_CG_SUPPORT_HDP_LS | 964 AMD_CG_SUPPORT_ROM_MGCG | 965 AMD_CG_SUPPORT_MC_MGCG | 966 AMD_CG_SUPPORT_MC_LS | 967 AMD_CG_SUPPORT_UVD_MGCG; 968 adev->pg_flags = 0; 969 adev->external_rev_id = adev->rev_id + 0x3c; 970 break; 971 case CHIP_TONGA: 972 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 973 AMD_CG_SUPPORT_GFX_CGCG | 974 AMD_CG_SUPPORT_GFX_CGLS | 975 AMD_CG_SUPPORT_SDMA_MGCG | 976 AMD_CG_SUPPORT_SDMA_LS | 977 AMD_CG_SUPPORT_BIF_LS | 978 AMD_CG_SUPPORT_HDP_MGCG | 979 AMD_CG_SUPPORT_HDP_LS | 980 AMD_CG_SUPPORT_ROM_MGCG | 981 AMD_CG_SUPPORT_MC_MGCG | 982 AMD_CG_SUPPORT_MC_LS | 983 AMD_CG_SUPPORT_DRM_LS | 984 AMD_CG_SUPPORT_UVD_MGCG; 985 adev->pg_flags = 0; 986 adev->external_rev_id = adev->rev_id + 0x14; 987 break; 988 case CHIP_POLARIS11: 989 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 990 AMD_CG_SUPPORT_GFX_RLC_LS | 991 AMD_CG_SUPPORT_GFX_CP_LS | 992 AMD_CG_SUPPORT_GFX_CGCG | 993 AMD_CG_SUPPORT_GFX_CGLS | 994 AMD_CG_SUPPORT_GFX_3D_CGCG | 995 AMD_CG_SUPPORT_GFX_3D_CGLS | 996 AMD_CG_SUPPORT_SDMA_MGCG | 997 AMD_CG_SUPPORT_SDMA_LS | 998 AMD_CG_SUPPORT_BIF_MGCG | 999 AMD_CG_SUPPORT_BIF_LS | 1000 AMD_CG_SUPPORT_HDP_MGCG | 1001 AMD_CG_SUPPORT_HDP_LS | 1002 AMD_CG_SUPPORT_ROM_MGCG | 1003 AMD_CG_SUPPORT_MC_MGCG | 1004 AMD_CG_SUPPORT_MC_LS | 1005 AMD_CG_SUPPORT_DRM_LS | 1006 AMD_CG_SUPPORT_UVD_MGCG | 1007 AMD_CG_SUPPORT_VCE_MGCG; 1008 adev->pg_flags = 0; 1009 adev->external_rev_id = adev->rev_id + 0x5A; 1010 break; 1011 case CHIP_POLARIS10: 1012 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1013 AMD_CG_SUPPORT_GFX_RLC_LS | 1014 AMD_CG_SUPPORT_GFX_CP_LS | 1015 AMD_CG_SUPPORT_GFX_CGCG | 1016 AMD_CG_SUPPORT_GFX_CGLS | 1017 AMD_CG_SUPPORT_GFX_3D_CGCG | 1018 AMD_CG_SUPPORT_GFX_3D_CGLS | 1019 AMD_CG_SUPPORT_SDMA_MGCG | 1020 AMD_CG_SUPPORT_SDMA_LS | 1021 AMD_CG_SUPPORT_BIF_MGCG | 1022 AMD_CG_SUPPORT_BIF_LS | 1023 AMD_CG_SUPPORT_HDP_MGCG | 1024 AMD_CG_SUPPORT_HDP_LS | 1025 AMD_CG_SUPPORT_ROM_MGCG | 1026 AMD_CG_SUPPORT_MC_MGCG | 1027 AMD_CG_SUPPORT_MC_LS | 1028 AMD_CG_SUPPORT_DRM_LS | 1029 AMD_CG_SUPPORT_UVD_MGCG | 1030 AMD_CG_SUPPORT_VCE_MGCG; 1031 adev->pg_flags = 0; 1032 adev->external_rev_id = adev->rev_id + 0x50; 1033 break; 1034 case CHIP_POLARIS12: 1035 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1036 AMD_CG_SUPPORT_GFX_RLC_LS | 1037 AMD_CG_SUPPORT_GFX_CP_LS | 1038 AMD_CG_SUPPORT_GFX_CGCG | 1039 AMD_CG_SUPPORT_GFX_CGLS | 1040 AMD_CG_SUPPORT_GFX_3D_CGCG | 1041 AMD_CG_SUPPORT_GFX_3D_CGLS | 1042 AMD_CG_SUPPORT_SDMA_MGCG | 1043 AMD_CG_SUPPORT_SDMA_LS | 1044 AMD_CG_SUPPORT_BIF_MGCG | 1045 AMD_CG_SUPPORT_BIF_LS | 1046 AMD_CG_SUPPORT_HDP_MGCG | 1047 AMD_CG_SUPPORT_HDP_LS | 1048 AMD_CG_SUPPORT_ROM_MGCG | 1049 AMD_CG_SUPPORT_MC_MGCG | 1050 AMD_CG_SUPPORT_MC_LS | 1051 AMD_CG_SUPPORT_DRM_LS | 1052 AMD_CG_SUPPORT_UVD_MGCG | 1053 AMD_CG_SUPPORT_VCE_MGCG; 1054 adev->pg_flags = 0; 1055 adev->external_rev_id = adev->rev_id + 0x64; 1056 break; 1057 case CHIP_CARRIZO: 1058 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1059 AMD_CG_SUPPORT_GFX_MGCG | 1060 AMD_CG_SUPPORT_GFX_MGLS | 1061 AMD_CG_SUPPORT_GFX_RLC_LS | 1062 AMD_CG_SUPPORT_GFX_CP_LS | 1063 AMD_CG_SUPPORT_GFX_CGTS | 1064 AMD_CG_SUPPORT_GFX_CGTS_LS | 1065 AMD_CG_SUPPORT_GFX_CGCG | 1066 AMD_CG_SUPPORT_GFX_CGLS | 1067 AMD_CG_SUPPORT_BIF_LS | 1068 AMD_CG_SUPPORT_HDP_MGCG | 1069 AMD_CG_SUPPORT_HDP_LS | 1070 AMD_CG_SUPPORT_SDMA_MGCG | 1071 AMD_CG_SUPPORT_SDMA_LS | 1072 AMD_CG_SUPPORT_VCE_MGCG; 1073 /* rev0 hardware requires workarounds to support PG */ 1074 adev->pg_flags = 0; 1075 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { 1076 adev->pg_flags |= 1077 AMD_PG_SUPPORT_GFX_SMG | 1078 AMD_PG_SUPPORT_GFX_PIPELINE | 1079 AMD_PG_SUPPORT_CP | 1080 AMD_PG_SUPPORT_UVD | 1081 AMD_PG_SUPPORT_VCE; 1082 } 1083 adev->external_rev_id = adev->rev_id + 0x1; 1084 break; 1085 case CHIP_STONEY: 1086 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1087 AMD_CG_SUPPORT_GFX_MGCG | 1088 AMD_CG_SUPPORT_GFX_MGLS | 1089 AMD_CG_SUPPORT_GFX_RLC_LS | 1090 AMD_CG_SUPPORT_GFX_CP_LS | 1091 AMD_CG_SUPPORT_GFX_CGTS | 1092 AMD_CG_SUPPORT_GFX_CGTS_LS | 1093 AMD_CG_SUPPORT_GFX_CGCG | 1094 AMD_CG_SUPPORT_GFX_CGLS | 1095 AMD_CG_SUPPORT_BIF_LS | 1096 AMD_CG_SUPPORT_HDP_MGCG | 1097 AMD_CG_SUPPORT_HDP_LS | 1098 AMD_CG_SUPPORT_SDMA_MGCG | 1099 AMD_CG_SUPPORT_SDMA_LS | 1100 AMD_CG_SUPPORT_VCE_MGCG; 1101 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1102 AMD_PG_SUPPORT_GFX_SMG | 1103 AMD_PG_SUPPORT_GFX_PIPELINE | 1104 AMD_PG_SUPPORT_CP | 1105 AMD_PG_SUPPORT_UVD | 1106 AMD_PG_SUPPORT_VCE; 1107 adev->external_rev_id = adev->rev_id + 0x61; 1108 break; 1109 default: 1110 /* FIXME: not supported yet */ 1111 return -EINVAL; 1112 } 1113 1114 /* vi use smc load by default */ 1115 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); 1116 1117 amdgpu_get_pcie_info(adev); 1118 1119 return 0; 1120 } 1121 1122 static int vi_common_late_init(void *handle) 1123 { 1124 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1125 1126 if (amdgpu_sriov_vf(adev)) 1127 xgpu_vi_mailbox_get_irq(adev); 1128 1129 return 0; 1130 } 1131 1132 static int vi_common_sw_init(void *handle) 1133 { 1134 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1135 1136 if (amdgpu_sriov_vf(adev)) 1137 xgpu_vi_mailbox_add_irq_id(adev); 1138 1139 return 0; 1140 } 1141 1142 static int vi_common_sw_fini(void *handle) 1143 { 1144 return 0; 1145 } 1146 1147 static int vi_common_hw_init(void *handle) 1148 { 1149 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1150 1151 /* move the golden regs per IP block */ 1152 vi_init_golden_registers(adev); 1153 /* enable pcie gen2/3 link */ 1154 vi_pcie_gen3_enable(adev); 1155 /* enable aspm */ 1156 vi_program_aspm(adev); 1157 /* enable the doorbell aperture */ 1158 vi_enable_doorbell_aperture(adev, true); 1159 1160 return 0; 1161 } 1162 1163 static int vi_common_hw_fini(void *handle) 1164 { 1165 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1166 1167 /* enable the doorbell aperture */ 1168 vi_enable_doorbell_aperture(adev, false); 1169 1170 if (amdgpu_sriov_vf(adev)) 1171 xgpu_vi_mailbox_put_irq(adev); 1172 1173 return 0; 1174 } 1175 1176 static int vi_common_suspend(void *handle) 1177 { 1178 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1179 1180 return vi_common_hw_fini(adev); 1181 } 1182 1183 static int vi_common_resume(void *handle) 1184 { 1185 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1186 1187 return vi_common_hw_init(adev); 1188 } 1189 1190 static bool vi_common_is_idle(void *handle) 1191 { 1192 return true; 1193 } 1194 1195 static int vi_common_wait_for_idle(void *handle) 1196 { 1197 return 0; 1198 } 1199 1200 static int vi_common_soft_reset(void *handle) 1201 { 1202 return 0; 1203 } 1204 1205 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1206 bool enable) 1207 { 1208 uint32_t temp, data; 1209 1210 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1211 1212 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1213 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1214 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1215 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1216 else 1217 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1218 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1219 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1220 1221 if (temp != data) 1222 WREG32_PCIE(ixPCIE_CNTL2, data); 1223 } 1224 1225 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1226 bool enable) 1227 { 1228 uint32_t temp, data; 1229 1230 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1231 1232 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1233 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1234 else 1235 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1236 1237 if (temp != data) 1238 WREG32(mmHDP_HOST_PATH_CNTL, data); 1239 } 1240 1241 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1242 bool enable) 1243 { 1244 uint32_t temp, data; 1245 1246 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1247 1248 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1249 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1250 else 1251 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1252 1253 if (temp != data) 1254 WREG32(mmHDP_MEM_POWER_LS, data); 1255 } 1256 1257 static void vi_update_drm_light_sleep(struct amdgpu_device *adev, 1258 bool enable) 1259 { 1260 uint32_t temp, data; 1261 1262 temp = data = RREG32(0x157a); 1263 1264 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1265 data |= 1; 1266 else 1267 data &= ~1; 1268 1269 if (temp != data) 1270 WREG32(0x157a, data); 1271 } 1272 1273 1274 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1275 bool enable) 1276 { 1277 uint32_t temp, data; 1278 1279 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1280 1281 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1282 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1283 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1284 else 1285 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1286 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1287 1288 if (temp != data) 1289 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1290 } 1291 1292 static int vi_common_set_clockgating_state_by_smu(void *handle, 1293 enum amd_clockgating_state state) 1294 { 1295 uint32_t msg_id, pp_state = 0; 1296 uint32_t pp_support_state = 0; 1297 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1298 void *pp_handle = adev->powerplay.pp_handle; 1299 1300 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1301 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1302 pp_support_state = AMD_CG_SUPPORT_MC_LS; 1303 pp_state = PP_STATE_LS; 1304 } 1305 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1306 pp_support_state |= AMD_CG_SUPPORT_MC_MGCG; 1307 pp_state |= PP_STATE_CG; 1308 } 1309 if (state == AMD_CG_STATE_UNGATE) 1310 pp_state = 0; 1311 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1312 PP_BLOCK_SYS_MC, 1313 pp_support_state, 1314 pp_state); 1315 amd_set_clockgating_by_smu(pp_handle, msg_id); 1316 } 1317 1318 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1319 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1320 pp_support_state = AMD_CG_SUPPORT_SDMA_LS; 1321 pp_state = PP_STATE_LS; 1322 } 1323 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1324 pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG; 1325 pp_state |= PP_STATE_CG; 1326 } 1327 if (state == AMD_CG_STATE_UNGATE) 1328 pp_state = 0; 1329 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1330 PP_BLOCK_SYS_SDMA, 1331 pp_support_state, 1332 pp_state); 1333 amd_set_clockgating_by_smu(pp_handle, msg_id); 1334 } 1335 1336 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1337 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1338 pp_support_state = AMD_CG_SUPPORT_HDP_LS; 1339 pp_state = PP_STATE_LS; 1340 } 1341 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1342 pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG; 1343 pp_state |= PP_STATE_CG; 1344 } 1345 if (state == AMD_CG_STATE_UNGATE) 1346 pp_state = 0; 1347 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1348 PP_BLOCK_SYS_HDP, 1349 pp_support_state, 1350 pp_state); 1351 amd_set_clockgating_by_smu(pp_handle, msg_id); 1352 } 1353 1354 1355 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { 1356 if (state == AMD_CG_STATE_UNGATE) 1357 pp_state = 0; 1358 else 1359 pp_state = PP_STATE_LS; 1360 1361 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1362 PP_BLOCK_SYS_BIF, 1363 PP_STATE_SUPPORT_LS, 1364 pp_state); 1365 amd_set_clockgating_by_smu(pp_handle, msg_id); 1366 } 1367 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { 1368 if (state == AMD_CG_STATE_UNGATE) 1369 pp_state = 0; 1370 else 1371 pp_state = PP_STATE_CG; 1372 1373 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1374 PP_BLOCK_SYS_BIF, 1375 PP_STATE_SUPPORT_CG, 1376 pp_state); 1377 amd_set_clockgating_by_smu(pp_handle, msg_id); 1378 } 1379 1380 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { 1381 1382 if (state == AMD_CG_STATE_UNGATE) 1383 pp_state = 0; 1384 else 1385 pp_state = PP_STATE_LS; 1386 1387 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1388 PP_BLOCK_SYS_DRM, 1389 PP_STATE_SUPPORT_LS, 1390 pp_state); 1391 amd_set_clockgating_by_smu(pp_handle, msg_id); 1392 } 1393 1394 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { 1395 1396 if (state == AMD_CG_STATE_UNGATE) 1397 pp_state = 0; 1398 else 1399 pp_state = PP_STATE_CG; 1400 1401 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1402 PP_BLOCK_SYS_ROM, 1403 PP_STATE_SUPPORT_CG, 1404 pp_state); 1405 amd_set_clockgating_by_smu(pp_handle, msg_id); 1406 } 1407 return 0; 1408 } 1409 1410 static int vi_common_set_clockgating_state(void *handle, 1411 enum amd_clockgating_state state) 1412 { 1413 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1414 1415 if (amdgpu_sriov_vf(adev)) 1416 return 0; 1417 1418 switch (adev->asic_type) { 1419 case CHIP_FIJI: 1420 vi_update_bif_medium_grain_light_sleep(adev, 1421 state == AMD_CG_STATE_GATE); 1422 vi_update_hdp_medium_grain_clock_gating(adev, 1423 state == AMD_CG_STATE_GATE); 1424 vi_update_hdp_light_sleep(adev, 1425 state == AMD_CG_STATE_GATE); 1426 vi_update_rom_medium_grain_clock_gating(adev, 1427 state == AMD_CG_STATE_GATE); 1428 break; 1429 case CHIP_CARRIZO: 1430 case CHIP_STONEY: 1431 vi_update_bif_medium_grain_light_sleep(adev, 1432 state == AMD_CG_STATE_GATE); 1433 vi_update_hdp_medium_grain_clock_gating(adev, 1434 state == AMD_CG_STATE_GATE); 1435 vi_update_hdp_light_sleep(adev, 1436 state == AMD_CG_STATE_GATE); 1437 vi_update_drm_light_sleep(adev, 1438 state == AMD_CG_STATE_GATE); 1439 break; 1440 case CHIP_TONGA: 1441 case CHIP_POLARIS10: 1442 case CHIP_POLARIS11: 1443 case CHIP_POLARIS12: 1444 vi_common_set_clockgating_state_by_smu(adev, state); 1445 default: 1446 break; 1447 } 1448 return 0; 1449 } 1450 1451 static int vi_common_set_powergating_state(void *handle, 1452 enum amd_powergating_state state) 1453 { 1454 return 0; 1455 } 1456 1457 static void vi_common_get_clockgating_state(void *handle, u32 *flags) 1458 { 1459 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1460 int data; 1461 1462 if (amdgpu_sriov_vf(adev)) 1463 *flags = 0; 1464 1465 /* AMD_CG_SUPPORT_BIF_LS */ 1466 data = RREG32_PCIE(ixPCIE_CNTL2); 1467 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 1468 *flags |= AMD_CG_SUPPORT_BIF_LS; 1469 1470 /* AMD_CG_SUPPORT_HDP_LS */ 1471 data = RREG32(mmHDP_MEM_POWER_LS); 1472 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1473 *flags |= AMD_CG_SUPPORT_HDP_LS; 1474 1475 /* AMD_CG_SUPPORT_HDP_MGCG */ 1476 data = RREG32(mmHDP_HOST_PATH_CNTL); 1477 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK)) 1478 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 1479 1480 /* AMD_CG_SUPPORT_ROM_MGCG */ 1481 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1482 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1483 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1484 } 1485 1486 static const struct amd_ip_funcs vi_common_ip_funcs = { 1487 .name = "vi_common", 1488 .early_init = vi_common_early_init, 1489 .late_init = vi_common_late_init, 1490 .sw_init = vi_common_sw_init, 1491 .sw_fini = vi_common_sw_fini, 1492 .hw_init = vi_common_hw_init, 1493 .hw_fini = vi_common_hw_fini, 1494 .suspend = vi_common_suspend, 1495 .resume = vi_common_resume, 1496 .is_idle = vi_common_is_idle, 1497 .wait_for_idle = vi_common_wait_for_idle, 1498 .soft_reset = vi_common_soft_reset, 1499 .set_clockgating_state = vi_common_set_clockgating_state, 1500 .set_powergating_state = vi_common_set_powergating_state, 1501 .get_clockgating_state = vi_common_get_clockgating_state, 1502 }; 1503 1504 static const struct amdgpu_ip_block_version vi_common_ip_block = 1505 { 1506 .type = AMD_IP_BLOCK_TYPE_COMMON, 1507 .major = 1, 1508 .minor = 0, 1509 .rev = 0, 1510 .funcs = &vi_common_ip_funcs, 1511 }; 1512 1513 int vi_set_ip_blocks(struct amdgpu_device *adev) 1514 { 1515 /* in early init stage, vbios code won't work */ 1516 vi_detect_hw_virtualization(adev); 1517 1518 if (amdgpu_sriov_vf(adev)) 1519 adev->virt.ops = &xgpu_vi_virt_ops; 1520 1521 switch (adev->asic_type) { 1522 case CHIP_TOPAZ: 1523 /* topaz has no DCE, UVD, VCE */ 1524 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1525 amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block); 1526 amdgpu_ip_block_add(adev, &iceland_ih_ip_block); 1527 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1528 if (adev->enable_virtual_display) 1529 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1530 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1531 amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block); 1532 break; 1533 case CHIP_FIJI: 1534 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1535 amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block); 1536 amdgpu_ip_block_add(adev, &tonga_ih_ip_block); 1537 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1538 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1539 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1540 else 1541 amdgpu_ip_block_add(adev, &dce_v10_1_ip_block); 1542 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1543 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1544 if (!amdgpu_sriov_vf(adev)) { 1545 amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); 1546 amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); 1547 } 1548 break; 1549 case CHIP_TONGA: 1550 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1551 amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); 1552 amdgpu_ip_block_add(adev, &tonga_ih_ip_block); 1553 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1554 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1555 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1556 else 1557 amdgpu_ip_block_add(adev, &dce_v10_0_ip_block); 1558 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1559 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1560 if (!amdgpu_sriov_vf(adev)) { 1561 amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block); 1562 amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); 1563 } 1564 break; 1565 case CHIP_POLARIS11: 1566 case CHIP_POLARIS10: 1567 case CHIP_POLARIS12: 1568 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1569 amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block); 1570 amdgpu_ip_block_add(adev, &tonga_ih_ip_block); 1571 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1572 if (adev->enable_virtual_display) 1573 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1574 else 1575 amdgpu_ip_block_add(adev, &dce_v11_2_ip_block); 1576 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1577 amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block); 1578 amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block); 1579 amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); 1580 break; 1581 case CHIP_CARRIZO: 1582 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1583 amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); 1584 amdgpu_ip_block_add(adev, &cz_ih_ip_block); 1585 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1586 if (adev->enable_virtual_display) 1587 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1588 else 1589 amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); 1590 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1591 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1592 amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); 1593 amdgpu_ip_block_add(adev, &vce_v3_1_ip_block); 1594 #if defined(CONFIG_DRM_AMD_ACP) 1595 amdgpu_ip_block_add(adev, &acp_ip_block); 1596 #endif 1597 break; 1598 case CHIP_STONEY: 1599 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1600 amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); 1601 amdgpu_ip_block_add(adev, &cz_ih_ip_block); 1602 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1603 if (adev->enable_virtual_display) 1604 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1605 else 1606 amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); 1607 amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block); 1608 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1609 amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block); 1610 amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); 1611 #if defined(CONFIG_DRM_AMD_ACP) 1612 amdgpu_ip_block_add(adev, &acp_ip_block); 1613 #endif 1614 break; 1615 default: 1616 /* FIXME: not supported yet */ 1617 return -EINVAL; 1618 } 1619 1620 return 0; 1621 } 1622