1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include "drmP.h" 27 #include "amdgpu.h" 28 #include "amdgpu_atombios.h" 29 #include "amdgpu_ih.h" 30 #include "amdgpu_uvd.h" 31 #include "amdgpu_vce.h" 32 #include "amdgpu_ucode.h" 33 #include "atom.h" 34 #include "amd_pcie.h" 35 36 #include "gmc/gmc_8_1_d.h" 37 #include "gmc/gmc_8_1_sh_mask.h" 38 39 #include "oss/oss_3_0_d.h" 40 #include "oss/oss_3_0_sh_mask.h" 41 42 #include "bif/bif_5_0_d.h" 43 #include "bif/bif_5_0_sh_mask.h" 44 45 #include "gca/gfx_8_0_d.h" 46 #include "gca/gfx_8_0_sh_mask.h" 47 48 #include "smu/smu_7_1_1_d.h" 49 #include "smu/smu_7_1_1_sh_mask.h" 50 51 #include "uvd/uvd_5_0_d.h" 52 #include "uvd/uvd_5_0_sh_mask.h" 53 54 #include "vce/vce_3_0_d.h" 55 #include "vce/vce_3_0_sh_mask.h" 56 57 #include "dce/dce_10_0_d.h" 58 #include "dce/dce_10_0_sh_mask.h" 59 60 #include "vid.h" 61 #include "vi.h" 62 #include "vi_dpm.h" 63 #include "gmc_v8_0.h" 64 #include "gmc_v7_0.h" 65 #include "gfx_v8_0.h" 66 #include "sdma_v2_4.h" 67 #include "sdma_v3_0.h" 68 #include "dce_v10_0.h" 69 #include "dce_v11_0.h" 70 #include "iceland_ih.h" 71 #include "tonga_ih.h" 72 #include "cz_ih.h" 73 #include "uvd_v5_0.h" 74 #include "uvd_v6_0.h" 75 #include "vce_v3_0.h" 76 #include "amdgpu_powerplay.h" 77 #if defined(CONFIG_DRM_AMD_ACP) 78 #include "amdgpu_acp.h" 79 #endif 80 #include "dce_virtual.h" 81 82 MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); 83 MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin"); 84 MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); 85 MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin"); 86 MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); 87 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); 88 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); 89 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); 90 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); 91 92 /* 93 * Indirect registers accessor 94 */ 95 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 96 { 97 unsigned long flags; 98 u32 r; 99 100 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 101 WREG32(mmPCIE_INDEX, reg); 102 (void)RREG32(mmPCIE_INDEX); 103 r = RREG32(mmPCIE_DATA); 104 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 105 return r; 106 } 107 108 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 109 { 110 unsigned long flags; 111 112 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 113 WREG32(mmPCIE_INDEX, reg); 114 (void)RREG32(mmPCIE_INDEX); 115 WREG32(mmPCIE_DATA, v); 116 (void)RREG32(mmPCIE_DATA); 117 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 118 } 119 120 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 121 { 122 unsigned long flags; 123 u32 r; 124 125 spin_lock_irqsave(&adev->smc_idx_lock, flags); 126 WREG32(mmSMC_IND_INDEX_11, (reg)); 127 r = RREG32(mmSMC_IND_DATA_11); 128 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 129 return r; 130 } 131 132 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 133 { 134 unsigned long flags; 135 136 spin_lock_irqsave(&adev->smc_idx_lock, flags); 137 WREG32(mmSMC_IND_INDEX_11, (reg)); 138 WREG32(mmSMC_IND_DATA_11, (v)); 139 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 140 } 141 142 /* smu_8_0_d.h */ 143 #define mmMP0PUB_IND_INDEX 0x180 144 #define mmMP0PUB_IND_DATA 0x181 145 146 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 147 { 148 unsigned long flags; 149 u32 r; 150 151 spin_lock_irqsave(&adev->smc_idx_lock, flags); 152 WREG32(mmMP0PUB_IND_INDEX, (reg)); 153 r = RREG32(mmMP0PUB_IND_DATA); 154 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 155 return r; 156 } 157 158 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 159 { 160 unsigned long flags; 161 162 spin_lock_irqsave(&adev->smc_idx_lock, flags); 163 WREG32(mmMP0PUB_IND_INDEX, (reg)); 164 WREG32(mmMP0PUB_IND_DATA, (v)); 165 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 166 } 167 168 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 169 { 170 unsigned long flags; 171 u32 r; 172 173 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 174 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 175 r = RREG32(mmUVD_CTX_DATA); 176 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 177 return r; 178 } 179 180 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 181 { 182 unsigned long flags; 183 184 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 185 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 186 WREG32(mmUVD_CTX_DATA, (v)); 187 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 188 } 189 190 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 191 { 192 unsigned long flags; 193 u32 r; 194 195 spin_lock_irqsave(&adev->didt_idx_lock, flags); 196 WREG32(mmDIDT_IND_INDEX, (reg)); 197 r = RREG32(mmDIDT_IND_DATA); 198 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 199 return r; 200 } 201 202 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 203 { 204 unsigned long flags; 205 206 spin_lock_irqsave(&adev->didt_idx_lock, flags); 207 WREG32(mmDIDT_IND_INDEX, (reg)); 208 WREG32(mmDIDT_IND_DATA, (v)); 209 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 210 } 211 212 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 213 { 214 unsigned long flags; 215 u32 r; 216 217 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 218 WREG32(mmGC_CAC_IND_INDEX, (reg)); 219 r = RREG32(mmGC_CAC_IND_DATA); 220 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 221 return r; 222 } 223 224 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 225 { 226 unsigned long flags; 227 228 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 229 WREG32(mmGC_CAC_IND_INDEX, (reg)); 230 WREG32(mmGC_CAC_IND_DATA, (v)); 231 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 232 } 233 234 235 static const u32 tonga_mgcg_cgcg_init[] = 236 { 237 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 238 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 239 mmPCIE_DATA, 0x000f0000, 0x00000000, 240 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 241 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 242 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 243 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 244 }; 245 246 static const u32 fiji_mgcg_cgcg_init[] = 247 { 248 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 249 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 250 mmPCIE_DATA, 0x000f0000, 0x00000000, 251 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 252 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 253 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 254 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 255 }; 256 257 static const u32 iceland_mgcg_cgcg_init[] = 258 { 259 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 260 mmPCIE_DATA, 0x000f0000, 0x00000000, 261 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 262 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 263 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 264 }; 265 266 static const u32 cz_mgcg_cgcg_init[] = 267 { 268 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 269 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 270 mmPCIE_DATA, 0x000f0000, 0x00000000, 271 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 272 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 273 }; 274 275 static const u32 stoney_mgcg_cgcg_init[] = 276 { 277 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 278 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 279 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 280 }; 281 282 static void vi_init_golden_registers(struct amdgpu_device *adev) 283 { 284 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 285 mutex_lock(&adev->grbm_idx_mutex); 286 287 switch (adev->asic_type) { 288 case CHIP_TOPAZ: 289 amdgpu_program_register_sequence(adev, 290 iceland_mgcg_cgcg_init, 291 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); 292 break; 293 case CHIP_FIJI: 294 amdgpu_program_register_sequence(adev, 295 fiji_mgcg_cgcg_init, 296 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); 297 break; 298 case CHIP_TONGA: 299 amdgpu_program_register_sequence(adev, 300 tonga_mgcg_cgcg_init, 301 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); 302 break; 303 case CHIP_CARRIZO: 304 amdgpu_program_register_sequence(adev, 305 cz_mgcg_cgcg_init, 306 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 307 break; 308 case CHIP_STONEY: 309 amdgpu_program_register_sequence(adev, 310 stoney_mgcg_cgcg_init, 311 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); 312 break; 313 case CHIP_POLARIS11: 314 case CHIP_POLARIS10: 315 default: 316 break; 317 } 318 mutex_unlock(&adev->grbm_idx_mutex); 319 } 320 321 /** 322 * vi_get_xclk - get the xclk 323 * 324 * @adev: amdgpu_device pointer 325 * 326 * Returns the reference clock used by the gfx engine 327 * (VI). 328 */ 329 static u32 vi_get_xclk(struct amdgpu_device *adev) 330 { 331 u32 reference_clock = adev->clock.spll.reference_freq; 332 u32 tmp; 333 334 if (adev->flags & AMD_IS_APU) 335 return reference_clock; 336 337 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 338 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 339 return 1000; 340 341 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 342 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 343 return reference_clock / 4; 344 345 return reference_clock; 346 } 347 348 /** 349 * vi_srbm_select - select specific register instances 350 * 351 * @adev: amdgpu_device pointer 352 * @me: selected ME (micro engine) 353 * @pipe: pipe 354 * @queue: queue 355 * @vmid: VMID 356 * 357 * Switches the currently active registers instances. Some 358 * registers are instanced per VMID, others are instanced per 359 * me/pipe/queue combination. 360 */ 361 void vi_srbm_select(struct amdgpu_device *adev, 362 u32 me, u32 pipe, u32 queue, u32 vmid) 363 { 364 u32 srbm_gfx_cntl = 0; 365 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 366 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 367 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 368 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 369 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 370 } 371 372 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 373 { 374 /* todo */ 375 } 376 377 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 378 { 379 u32 bus_cntl; 380 u32 d1vga_control = 0; 381 u32 d2vga_control = 0; 382 u32 vga_render_control = 0; 383 u32 rom_cntl; 384 bool r; 385 386 bus_cntl = RREG32(mmBUS_CNTL); 387 if (adev->mode_info.num_crtc) { 388 d1vga_control = RREG32(mmD1VGA_CONTROL); 389 d2vga_control = RREG32(mmD2VGA_CONTROL); 390 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 391 } 392 rom_cntl = RREG32_SMC(ixROM_CNTL); 393 394 /* enable the rom */ 395 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 396 if (adev->mode_info.num_crtc) { 397 /* Disable VGA mode */ 398 WREG32(mmD1VGA_CONTROL, 399 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 400 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 401 WREG32(mmD2VGA_CONTROL, 402 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 403 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 404 WREG32(mmVGA_RENDER_CONTROL, 405 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 406 } 407 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 408 409 r = amdgpu_read_bios(adev); 410 411 /* restore regs */ 412 WREG32(mmBUS_CNTL, bus_cntl); 413 if (adev->mode_info.num_crtc) { 414 WREG32(mmD1VGA_CONTROL, d1vga_control); 415 WREG32(mmD2VGA_CONTROL, d2vga_control); 416 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 417 } 418 WREG32_SMC(ixROM_CNTL, rom_cntl); 419 return r; 420 } 421 422 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 423 u8 *bios, u32 length_bytes) 424 { 425 u32 *dw_ptr; 426 unsigned long flags; 427 u32 i, length_dw; 428 429 if (bios == NULL) 430 return false; 431 if (length_bytes == 0) 432 return false; 433 /* APU vbios image is part of sbios image */ 434 if (adev->flags & AMD_IS_APU) 435 return false; 436 437 dw_ptr = (u32 *)bios; 438 length_dw = ALIGN(length_bytes, 4) / 4; 439 /* take the smc lock since we are using the smc index */ 440 spin_lock_irqsave(&adev->smc_idx_lock, flags); 441 /* set rom index to 0 */ 442 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 443 WREG32(mmSMC_IND_DATA_11, 0); 444 /* set index to data for continous read */ 445 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 446 for (i = 0; i < length_dw; i++) 447 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 448 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 449 450 return true; 451 } 452 453 static void vi_detect_hw_virtualization(struct amdgpu_device *adev) 454 { 455 uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 456 /* bit0: 0 means pf and 1 means vf */ 457 /* bit31: 0 means disable IOV and 1 means enable */ 458 if (reg & 1) 459 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF; 460 461 if (reg & 0x80000000) 462 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 463 464 if (reg == 0) { 465 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ 466 adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; 467 } 468 } 469 470 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 471 {mmGB_MACROTILE_MODE7, true}, 472 }; 473 474 static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { 475 {mmGB_TILE_MODE7, true}, 476 {mmGB_TILE_MODE12, true}, 477 {mmGB_TILE_MODE17, true}, 478 {mmGB_TILE_MODE23, true}, 479 {mmGB_MACROTILE_MODE7, true}, 480 }; 481 482 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 483 {mmGRBM_STATUS, false}, 484 {mmGRBM_STATUS2, false}, 485 {mmGRBM_STATUS_SE0, false}, 486 {mmGRBM_STATUS_SE1, false}, 487 {mmGRBM_STATUS_SE2, false}, 488 {mmGRBM_STATUS_SE3, false}, 489 {mmSRBM_STATUS, false}, 490 {mmSRBM_STATUS2, false}, 491 {mmSRBM_STATUS3, false}, 492 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false}, 493 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false}, 494 {mmCP_STAT, false}, 495 {mmCP_STALLED_STAT1, false}, 496 {mmCP_STALLED_STAT2, false}, 497 {mmCP_STALLED_STAT3, false}, 498 {mmCP_CPF_BUSY_STAT, false}, 499 {mmCP_CPF_STALLED_STAT1, false}, 500 {mmCP_CPF_STATUS, false}, 501 {mmCP_CPC_BUSY_STAT, false}, 502 {mmCP_CPC_STALLED_STAT1, false}, 503 {mmCP_CPC_STATUS, false}, 504 {mmGB_ADDR_CONFIG, false}, 505 {mmMC_ARB_RAMCFG, false}, 506 {mmGB_TILE_MODE0, false}, 507 {mmGB_TILE_MODE1, false}, 508 {mmGB_TILE_MODE2, false}, 509 {mmGB_TILE_MODE3, false}, 510 {mmGB_TILE_MODE4, false}, 511 {mmGB_TILE_MODE5, false}, 512 {mmGB_TILE_MODE6, false}, 513 {mmGB_TILE_MODE7, false}, 514 {mmGB_TILE_MODE8, false}, 515 {mmGB_TILE_MODE9, false}, 516 {mmGB_TILE_MODE10, false}, 517 {mmGB_TILE_MODE11, false}, 518 {mmGB_TILE_MODE12, false}, 519 {mmGB_TILE_MODE13, false}, 520 {mmGB_TILE_MODE14, false}, 521 {mmGB_TILE_MODE15, false}, 522 {mmGB_TILE_MODE16, false}, 523 {mmGB_TILE_MODE17, false}, 524 {mmGB_TILE_MODE18, false}, 525 {mmGB_TILE_MODE19, false}, 526 {mmGB_TILE_MODE20, false}, 527 {mmGB_TILE_MODE21, false}, 528 {mmGB_TILE_MODE22, false}, 529 {mmGB_TILE_MODE23, false}, 530 {mmGB_TILE_MODE24, false}, 531 {mmGB_TILE_MODE25, false}, 532 {mmGB_TILE_MODE26, false}, 533 {mmGB_TILE_MODE27, false}, 534 {mmGB_TILE_MODE28, false}, 535 {mmGB_TILE_MODE29, false}, 536 {mmGB_TILE_MODE30, false}, 537 {mmGB_TILE_MODE31, false}, 538 {mmGB_MACROTILE_MODE0, false}, 539 {mmGB_MACROTILE_MODE1, false}, 540 {mmGB_MACROTILE_MODE2, false}, 541 {mmGB_MACROTILE_MODE3, false}, 542 {mmGB_MACROTILE_MODE4, false}, 543 {mmGB_MACROTILE_MODE5, false}, 544 {mmGB_MACROTILE_MODE6, false}, 545 {mmGB_MACROTILE_MODE7, false}, 546 {mmGB_MACROTILE_MODE8, false}, 547 {mmGB_MACROTILE_MODE9, false}, 548 {mmGB_MACROTILE_MODE10, false}, 549 {mmGB_MACROTILE_MODE11, false}, 550 {mmGB_MACROTILE_MODE12, false}, 551 {mmGB_MACROTILE_MODE13, false}, 552 {mmGB_MACROTILE_MODE14, false}, 553 {mmGB_MACROTILE_MODE15, false}, 554 {mmCC_RB_BACKEND_DISABLE, false, true}, 555 {mmGC_USER_RB_BACKEND_DISABLE, false, true}, 556 {mmGB_BACKEND_MAP, false, false}, 557 {mmPA_SC_RASTER_CONFIG, false, true}, 558 {mmPA_SC_RASTER_CONFIG_1, false, true}, 559 }; 560 561 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 562 bool indexed, u32 se_num, 563 u32 sh_num, u32 reg_offset) 564 { 565 if (indexed) { 566 uint32_t val; 567 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 568 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 569 570 switch (reg_offset) { 571 case mmCC_RB_BACKEND_DISABLE: 572 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 573 case mmGC_USER_RB_BACKEND_DISABLE: 574 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 575 case mmPA_SC_RASTER_CONFIG: 576 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 577 case mmPA_SC_RASTER_CONFIG_1: 578 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 579 } 580 581 mutex_lock(&adev->grbm_idx_mutex); 582 if (se_num != 0xffffffff || sh_num != 0xffffffff) 583 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 584 585 val = RREG32(reg_offset); 586 587 if (se_num != 0xffffffff || sh_num != 0xffffffff) 588 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 589 mutex_unlock(&adev->grbm_idx_mutex); 590 return val; 591 } else { 592 unsigned idx; 593 594 switch (reg_offset) { 595 case mmGB_ADDR_CONFIG: 596 return adev->gfx.config.gb_addr_config; 597 case mmMC_ARB_RAMCFG: 598 return adev->gfx.config.mc_arb_ramcfg; 599 case mmGB_TILE_MODE0: 600 case mmGB_TILE_MODE1: 601 case mmGB_TILE_MODE2: 602 case mmGB_TILE_MODE3: 603 case mmGB_TILE_MODE4: 604 case mmGB_TILE_MODE5: 605 case mmGB_TILE_MODE6: 606 case mmGB_TILE_MODE7: 607 case mmGB_TILE_MODE8: 608 case mmGB_TILE_MODE9: 609 case mmGB_TILE_MODE10: 610 case mmGB_TILE_MODE11: 611 case mmGB_TILE_MODE12: 612 case mmGB_TILE_MODE13: 613 case mmGB_TILE_MODE14: 614 case mmGB_TILE_MODE15: 615 case mmGB_TILE_MODE16: 616 case mmGB_TILE_MODE17: 617 case mmGB_TILE_MODE18: 618 case mmGB_TILE_MODE19: 619 case mmGB_TILE_MODE20: 620 case mmGB_TILE_MODE21: 621 case mmGB_TILE_MODE22: 622 case mmGB_TILE_MODE23: 623 case mmGB_TILE_MODE24: 624 case mmGB_TILE_MODE25: 625 case mmGB_TILE_MODE26: 626 case mmGB_TILE_MODE27: 627 case mmGB_TILE_MODE28: 628 case mmGB_TILE_MODE29: 629 case mmGB_TILE_MODE30: 630 case mmGB_TILE_MODE31: 631 idx = (reg_offset - mmGB_TILE_MODE0); 632 return adev->gfx.config.tile_mode_array[idx]; 633 case mmGB_MACROTILE_MODE0: 634 case mmGB_MACROTILE_MODE1: 635 case mmGB_MACROTILE_MODE2: 636 case mmGB_MACROTILE_MODE3: 637 case mmGB_MACROTILE_MODE4: 638 case mmGB_MACROTILE_MODE5: 639 case mmGB_MACROTILE_MODE6: 640 case mmGB_MACROTILE_MODE7: 641 case mmGB_MACROTILE_MODE8: 642 case mmGB_MACROTILE_MODE9: 643 case mmGB_MACROTILE_MODE10: 644 case mmGB_MACROTILE_MODE11: 645 case mmGB_MACROTILE_MODE12: 646 case mmGB_MACROTILE_MODE13: 647 case mmGB_MACROTILE_MODE14: 648 case mmGB_MACROTILE_MODE15: 649 idx = (reg_offset - mmGB_MACROTILE_MODE0); 650 return adev->gfx.config.macrotile_mode_array[idx]; 651 default: 652 return RREG32(reg_offset); 653 } 654 } 655 } 656 657 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 658 u32 sh_num, u32 reg_offset, u32 *value) 659 { 660 const struct amdgpu_allowed_register_entry *asic_register_table = NULL; 661 const struct amdgpu_allowed_register_entry *asic_register_entry; 662 uint32_t size, i; 663 664 *value = 0; 665 switch (adev->asic_type) { 666 case CHIP_TOPAZ: 667 asic_register_table = tonga_allowed_read_registers; 668 size = ARRAY_SIZE(tonga_allowed_read_registers); 669 break; 670 case CHIP_FIJI: 671 case CHIP_TONGA: 672 case CHIP_POLARIS11: 673 case CHIP_POLARIS10: 674 case CHIP_CARRIZO: 675 case CHIP_STONEY: 676 asic_register_table = cz_allowed_read_registers; 677 size = ARRAY_SIZE(cz_allowed_read_registers); 678 break; 679 default: 680 return -EINVAL; 681 } 682 683 if (asic_register_table) { 684 for (i = 0; i < size; i++) { 685 asic_register_entry = asic_register_table + i; 686 if (reg_offset != asic_register_entry->reg_offset) 687 continue; 688 if (!asic_register_entry->untouched) 689 *value = vi_get_register_value(adev, 690 asic_register_entry->grbm_indexed, 691 se_num, sh_num, reg_offset); 692 return 0; 693 } 694 } 695 696 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 697 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 698 continue; 699 700 if (!vi_allowed_read_registers[i].untouched) 701 *value = vi_get_register_value(adev, 702 vi_allowed_read_registers[i].grbm_indexed, 703 se_num, sh_num, reg_offset); 704 return 0; 705 } 706 return -EINVAL; 707 } 708 709 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 710 { 711 u32 i; 712 713 dev_info(adev->dev, "GPU pci config reset\n"); 714 715 /* disable BM */ 716 pci_clear_master(adev->pdev); 717 /* reset */ 718 amdgpu_pci_config_reset(adev); 719 720 udelay(100); 721 722 /* wait for asic to come out of reset */ 723 for (i = 0; i < adev->usec_timeout; i++) { 724 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 725 /* enable BM */ 726 pci_set_master(adev->pdev); 727 return 0; 728 } 729 udelay(1); 730 } 731 return -EINVAL; 732 } 733 734 /** 735 * vi_asic_reset - soft reset GPU 736 * 737 * @adev: amdgpu_device pointer 738 * 739 * Look up which blocks are hung and attempt 740 * to reset them. 741 * Returns 0 for success. 742 */ 743 static int vi_asic_reset(struct amdgpu_device *adev) 744 { 745 int r; 746 747 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 748 749 r = vi_gpu_pci_config_reset(adev); 750 751 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 752 753 return r; 754 } 755 756 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 757 u32 cntl_reg, u32 status_reg) 758 { 759 int r, i; 760 struct atom_clock_dividers dividers; 761 uint32_t tmp; 762 763 r = amdgpu_atombios_get_clock_dividers(adev, 764 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 765 clock, false, ÷rs); 766 if (r) 767 return r; 768 769 tmp = RREG32_SMC(cntl_reg); 770 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 771 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 772 tmp |= dividers.post_divider; 773 WREG32_SMC(cntl_reg, tmp); 774 775 for (i = 0; i < 100; i++) { 776 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) 777 break; 778 mdelay(10); 779 } 780 if (i == 100) 781 return -ETIMEDOUT; 782 783 return 0; 784 } 785 786 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 787 { 788 int r; 789 790 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 791 if (r) 792 return r; 793 794 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 795 796 return 0; 797 } 798 799 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 800 { 801 /* todo */ 802 803 return 0; 804 } 805 806 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 807 { 808 if (pci_is_root_bus(adev->pdev->bus)) 809 return; 810 811 if (amdgpu_pcie_gen2 == 0) 812 return; 813 814 if (adev->flags & AMD_IS_APU) 815 return; 816 817 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 818 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 819 return; 820 821 /* todo */ 822 } 823 824 static void vi_program_aspm(struct amdgpu_device *adev) 825 { 826 827 if (amdgpu_aspm == 0) 828 return; 829 830 /* todo */ 831 } 832 833 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 834 bool enable) 835 { 836 u32 tmp; 837 838 /* not necessary on CZ */ 839 if (adev->flags & AMD_IS_APU) 840 return; 841 842 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 843 if (enable) 844 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 845 else 846 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 847 848 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 849 } 850 851 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 852 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 853 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 854 855 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 856 { 857 if (adev->flags & AMD_IS_APU) 858 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 859 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 860 else 861 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 862 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 863 } 864 865 static const struct amdgpu_asic_funcs vi_asic_funcs = 866 { 867 .read_disabled_bios = &vi_read_disabled_bios, 868 .read_bios_from_rom = &vi_read_bios_from_rom, 869 .detect_hw_virtualization = vi_detect_hw_virtualization, 870 .read_register = &vi_read_register, 871 .reset = &vi_asic_reset, 872 .set_vga_state = &vi_vga_set_state, 873 .get_xclk = &vi_get_xclk, 874 .set_uvd_clocks = &vi_set_uvd_clocks, 875 .set_vce_clocks = &vi_set_vce_clocks, 876 }; 877 878 static int vi_common_early_init(void *handle) 879 { 880 bool smc_enabled = false; 881 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 882 883 if (adev->flags & AMD_IS_APU) { 884 adev->smc_rreg = &cz_smc_rreg; 885 adev->smc_wreg = &cz_smc_wreg; 886 } else { 887 adev->smc_rreg = &vi_smc_rreg; 888 adev->smc_wreg = &vi_smc_wreg; 889 } 890 adev->pcie_rreg = &vi_pcie_rreg; 891 adev->pcie_wreg = &vi_pcie_wreg; 892 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 893 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 894 adev->didt_rreg = &vi_didt_rreg; 895 adev->didt_wreg = &vi_didt_wreg; 896 adev->gc_cac_rreg = &vi_gc_cac_rreg; 897 adev->gc_cac_wreg = &vi_gc_cac_wreg; 898 899 adev->asic_funcs = &vi_asic_funcs; 900 901 if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && 902 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) 903 smc_enabled = true; 904 905 adev->rev_id = vi_get_rev_id(adev); 906 adev->external_rev_id = 0xFF; 907 switch (adev->asic_type) { 908 case CHIP_TOPAZ: 909 adev->cg_flags = 0; 910 adev->pg_flags = 0; 911 adev->external_rev_id = 0x1; 912 break; 913 case CHIP_FIJI: 914 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 915 AMD_CG_SUPPORT_GFX_MGLS | 916 AMD_CG_SUPPORT_GFX_RLC_LS | 917 AMD_CG_SUPPORT_GFX_CP_LS | 918 AMD_CG_SUPPORT_GFX_CGTS | 919 AMD_CG_SUPPORT_GFX_CGTS_LS | 920 AMD_CG_SUPPORT_GFX_CGCG | 921 AMD_CG_SUPPORT_GFX_CGLS | 922 AMD_CG_SUPPORT_SDMA_MGCG | 923 AMD_CG_SUPPORT_SDMA_LS | 924 AMD_CG_SUPPORT_BIF_LS | 925 AMD_CG_SUPPORT_HDP_MGCG | 926 AMD_CG_SUPPORT_HDP_LS | 927 AMD_CG_SUPPORT_ROM_MGCG | 928 AMD_CG_SUPPORT_MC_MGCG | 929 AMD_CG_SUPPORT_MC_LS | 930 AMD_CG_SUPPORT_UVD_MGCG; 931 adev->pg_flags = 0; 932 adev->external_rev_id = adev->rev_id + 0x3c; 933 break; 934 case CHIP_TONGA: 935 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 936 AMD_CG_SUPPORT_GFX_CGCG | 937 AMD_CG_SUPPORT_GFX_CGLS | 938 AMD_CG_SUPPORT_SDMA_MGCG | 939 AMD_CG_SUPPORT_SDMA_LS | 940 AMD_CG_SUPPORT_BIF_LS | 941 AMD_CG_SUPPORT_HDP_MGCG | 942 AMD_CG_SUPPORT_HDP_LS | 943 AMD_CG_SUPPORT_ROM_MGCG | 944 AMD_CG_SUPPORT_MC_MGCG | 945 AMD_CG_SUPPORT_MC_LS | 946 AMD_CG_SUPPORT_DRM_LS | 947 AMD_CG_SUPPORT_UVD_MGCG; 948 adev->pg_flags = 0; 949 adev->external_rev_id = adev->rev_id + 0x14; 950 break; 951 case CHIP_POLARIS11: 952 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 953 AMD_CG_SUPPORT_GFX_RLC_LS | 954 AMD_CG_SUPPORT_GFX_CP_LS | 955 AMD_CG_SUPPORT_GFX_CGCG | 956 AMD_CG_SUPPORT_GFX_CGLS | 957 AMD_CG_SUPPORT_GFX_3D_CGCG | 958 AMD_CG_SUPPORT_GFX_3D_CGLS | 959 AMD_CG_SUPPORT_SDMA_MGCG | 960 AMD_CG_SUPPORT_SDMA_LS | 961 AMD_CG_SUPPORT_BIF_MGCG | 962 AMD_CG_SUPPORT_BIF_LS | 963 AMD_CG_SUPPORT_HDP_MGCG | 964 AMD_CG_SUPPORT_HDP_LS | 965 AMD_CG_SUPPORT_ROM_MGCG | 966 AMD_CG_SUPPORT_MC_MGCG | 967 AMD_CG_SUPPORT_MC_LS | 968 AMD_CG_SUPPORT_DRM_LS | 969 AMD_CG_SUPPORT_UVD_MGCG | 970 AMD_CG_SUPPORT_VCE_MGCG; 971 adev->pg_flags = 0; 972 adev->external_rev_id = adev->rev_id + 0x5A; 973 break; 974 case CHIP_POLARIS10: 975 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 976 AMD_CG_SUPPORT_GFX_RLC_LS | 977 AMD_CG_SUPPORT_GFX_CP_LS | 978 AMD_CG_SUPPORT_GFX_CGCG | 979 AMD_CG_SUPPORT_GFX_CGLS | 980 AMD_CG_SUPPORT_GFX_3D_CGCG | 981 AMD_CG_SUPPORT_GFX_3D_CGLS | 982 AMD_CG_SUPPORT_SDMA_MGCG | 983 AMD_CG_SUPPORT_SDMA_LS | 984 AMD_CG_SUPPORT_BIF_MGCG | 985 AMD_CG_SUPPORT_BIF_LS | 986 AMD_CG_SUPPORT_HDP_MGCG | 987 AMD_CG_SUPPORT_HDP_LS | 988 AMD_CG_SUPPORT_ROM_MGCG | 989 AMD_CG_SUPPORT_MC_MGCG | 990 AMD_CG_SUPPORT_MC_LS | 991 AMD_CG_SUPPORT_DRM_LS | 992 AMD_CG_SUPPORT_UVD_MGCG | 993 AMD_CG_SUPPORT_VCE_MGCG; 994 adev->pg_flags = 0; 995 adev->external_rev_id = adev->rev_id + 0x50; 996 break; 997 case CHIP_CARRIZO: 998 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 999 AMD_CG_SUPPORT_GFX_MGCG | 1000 AMD_CG_SUPPORT_GFX_MGLS | 1001 AMD_CG_SUPPORT_GFX_RLC_LS | 1002 AMD_CG_SUPPORT_GFX_CP_LS | 1003 AMD_CG_SUPPORT_GFX_CGTS | 1004 AMD_CG_SUPPORT_GFX_MGLS | 1005 AMD_CG_SUPPORT_GFX_CGTS_LS | 1006 AMD_CG_SUPPORT_GFX_CGCG | 1007 AMD_CG_SUPPORT_GFX_CGLS | 1008 AMD_CG_SUPPORT_BIF_LS | 1009 AMD_CG_SUPPORT_HDP_MGCG | 1010 AMD_CG_SUPPORT_HDP_LS | 1011 AMD_CG_SUPPORT_SDMA_MGCG | 1012 AMD_CG_SUPPORT_SDMA_LS | 1013 AMD_CG_SUPPORT_VCE_MGCG; 1014 /* rev0 hardware requires workarounds to support PG */ 1015 adev->pg_flags = 0; 1016 if (adev->rev_id != 0x00) { 1017 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1018 AMD_PG_SUPPORT_GFX_SMG | 1019 AMD_PG_SUPPORT_GFX_PIPELINE | 1020 AMD_PG_SUPPORT_CP | 1021 AMD_PG_SUPPORT_UVD | 1022 AMD_PG_SUPPORT_VCE; 1023 } 1024 adev->external_rev_id = adev->rev_id + 0x1; 1025 break; 1026 case CHIP_STONEY: 1027 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1028 AMD_CG_SUPPORT_GFX_MGCG | 1029 AMD_CG_SUPPORT_GFX_MGLS | 1030 AMD_CG_SUPPORT_GFX_RLC_LS | 1031 AMD_CG_SUPPORT_GFX_CP_LS | 1032 AMD_CG_SUPPORT_GFX_CGTS | 1033 AMD_CG_SUPPORT_GFX_MGLS | 1034 AMD_CG_SUPPORT_GFX_CGTS_LS | 1035 AMD_CG_SUPPORT_GFX_CGCG | 1036 AMD_CG_SUPPORT_GFX_CGLS | 1037 AMD_CG_SUPPORT_BIF_LS | 1038 AMD_CG_SUPPORT_HDP_MGCG | 1039 AMD_CG_SUPPORT_HDP_LS | 1040 AMD_CG_SUPPORT_SDMA_MGCG | 1041 AMD_CG_SUPPORT_SDMA_LS | 1042 AMD_CG_SUPPORT_VCE_MGCG; 1043 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1044 AMD_PG_SUPPORT_GFX_SMG | 1045 AMD_PG_SUPPORT_GFX_PIPELINE | 1046 AMD_PG_SUPPORT_CP | 1047 AMD_PG_SUPPORT_UVD | 1048 AMD_PG_SUPPORT_VCE; 1049 adev->external_rev_id = adev->rev_id + 0x61; 1050 break; 1051 default: 1052 /* FIXME: not supported yet */ 1053 return -EINVAL; 1054 } 1055 1056 /* in early init stage, vbios code won't work */ 1057 if (adev->asic_funcs->detect_hw_virtualization) 1058 amdgpu_asic_detect_hw_virtualization(adev); 1059 1060 if (amdgpu_smc_load_fw && smc_enabled) 1061 adev->firmware.smu_load = true; 1062 1063 amdgpu_get_pcie_info(adev); 1064 1065 return 0; 1066 } 1067 1068 static int vi_common_sw_init(void *handle) 1069 { 1070 return 0; 1071 } 1072 1073 static int vi_common_sw_fini(void *handle) 1074 { 1075 return 0; 1076 } 1077 1078 static int vi_common_hw_init(void *handle) 1079 { 1080 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1081 1082 /* move the golden regs per IP block */ 1083 vi_init_golden_registers(adev); 1084 /* enable pcie gen2/3 link */ 1085 vi_pcie_gen3_enable(adev); 1086 /* enable aspm */ 1087 vi_program_aspm(adev); 1088 /* enable the doorbell aperture */ 1089 vi_enable_doorbell_aperture(adev, true); 1090 1091 return 0; 1092 } 1093 1094 static int vi_common_hw_fini(void *handle) 1095 { 1096 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1097 1098 /* enable the doorbell aperture */ 1099 vi_enable_doorbell_aperture(adev, false); 1100 1101 return 0; 1102 } 1103 1104 static int vi_common_suspend(void *handle) 1105 { 1106 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1107 1108 return vi_common_hw_fini(adev); 1109 } 1110 1111 static int vi_common_resume(void *handle) 1112 { 1113 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1114 1115 return vi_common_hw_init(adev); 1116 } 1117 1118 static bool vi_common_is_idle(void *handle) 1119 { 1120 return true; 1121 } 1122 1123 static int vi_common_wait_for_idle(void *handle) 1124 { 1125 return 0; 1126 } 1127 1128 static int vi_common_soft_reset(void *handle) 1129 { 1130 return 0; 1131 } 1132 1133 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1134 bool enable) 1135 { 1136 uint32_t temp, data; 1137 1138 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1139 1140 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1141 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1142 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1143 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1144 else 1145 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1146 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1147 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1148 1149 if (temp != data) 1150 WREG32_PCIE(ixPCIE_CNTL2, data); 1151 } 1152 1153 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1154 bool enable) 1155 { 1156 uint32_t temp, data; 1157 1158 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1159 1160 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1161 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1162 else 1163 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1164 1165 if (temp != data) 1166 WREG32(mmHDP_HOST_PATH_CNTL, data); 1167 } 1168 1169 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1170 bool enable) 1171 { 1172 uint32_t temp, data; 1173 1174 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1175 1176 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1177 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1178 else 1179 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1180 1181 if (temp != data) 1182 WREG32(mmHDP_MEM_POWER_LS, data); 1183 } 1184 1185 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1186 bool enable) 1187 { 1188 uint32_t temp, data; 1189 1190 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1191 1192 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1193 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1194 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1195 else 1196 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1197 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1198 1199 if (temp != data) 1200 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1201 } 1202 1203 static int vi_common_set_clockgating_state_by_smu(void *handle, 1204 enum amd_clockgating_state state) 1205 { 1206 uint32_t msg_id, pp_state = 0; 1207 uint32_t pp_support_state = 0; 1208 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1209 void *pp_handle = adev->powerplay.pp_handle; 1210 1211 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1212 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1213 pp_support_state = AMD_CG_SUPPORT_MC_LS; 1214 pp_state = PP_STATE_LS; 1215 } 1216 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1217 pp_support_state |= AMD_CG_SUPPORT_MC_MGCG; 1218 pp_state |= PP_STATE_CG; 1219 } 1220 if (state == AMD_CG_STATE_UNGATE) 1221 pp_state = 0; 1222 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1223 PP_BLOCK_SYS_MC, 1224 pp_support_state, 1225 pp_state); 1226 amd_set_clockgating_by_smu(pp_handle, msg_id); 1227 } 1228 1229 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1230 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1231 pp_support_state = AMD_CG_SUPPORT_SDMA_LS; 1232 pp_state = PP_STATE_LS; 1233 } 1234 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1235 pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG; 1236 pp_state |= PP_STATE_CG; 1237 } 1238 if (state == AMD_CG_STATE_UNGATE) 1239 pp_state = 0; 1240 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1241 PP_BLOCK_SYS_SDMA, 1242 pp_support_state, 1243 pp_state); 1244 amd_set_clockgating_by_smu(pp_handle, msg_id); 1245 } 1246 1247 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1248 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1249 pp_support_state = AMD_CG_SUPPORT_HDP_LS; 1250 pp_state = PP_STATE_LS; 1251 } 1252 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1253 pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG; 1254 pp_state |= PP_STATE_CG; 1255 } 1256 if (state == AMD_CG_STATE_UNGATE) 1257 pp_state = 0; 1258 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1259 PP_BLOCK_SYS_HDP, 1260 pp_support_state, 1261 pp_state); 1262 amd_set_clockgating_by_smu(pp_handle, msg_id); 1263 } 1264 1265 1266 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { 1267 if (state == AMD_CG_STATE_UNGATE) 1268 pp_state = 0; 1269 else 1270 pp_state = PP_STATE_LS; 1271 1272 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1273 PP_BLOCK_SYS_BIF, 1274 PP_STATE_SUPPORT_LS, 1275 pp_state); 1276 amd_set_clockgating_by_smu(pp_handle, msg_id); 1277 } 1278 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { 1279 if (state == AMD_CG_STATE_UNGATE) 1280 pp_state = 0; 1281 else 1282 pp_state = PP_STATE_CG; 1283 1284 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1285 PP_BLOCK_SYS_BIF, 1286 PP_STATE_SUPPORT_CG, 1287 pp_state); 1288 amd_set_clockgating_by_smu(pp_handle, msg_id); 1289 } 1290 1291 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { 1292 1293 if (state == AMD_CG_STATE_UNGATE) 1294 pp_state = 0; 1295 else 1296 pp_state = PP_STATE_LS; 1297 1298 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1299 PP_BLOCK_SYS_DRM, 1300 PP_STATE_SUPPORT_LS, 1301 pp_state); 1302 amd_set_clockgating_by_smu(pp_handle, msg_id); 1303 } 1304 1305 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { 1306 1307 if (state == AMD_CG_STATE_UNGATE) 1308 pp_state = 0; 1309 else 1310 pp_state = PP_STATE_CG; 1311 1312 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1313 PP_BLOCK_SYS_ROM, 1314 PP_STATE_SUPPORT_CG, 1315 pp_state); 1316 amd_set_clockgating_by_smu(pp_handle, msg_id); 1317 } 1318 return 0; 1319 } 1320 1321 static int vi_common_set_clockgating_state(void *handle, 1322 enum amd_clockgating_state state) 1323 { 1324 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1325 1326 switch (adev->asic_type) { 1327 case CHIP_FIJI: 1328 vi_update_bif_medium_grain_light_sleep(adev, 1329 state == AMD_CG_STATE_GATE ? true : false); 1330 vi_update_hdp_medium_grain_clock_gating(adev, 1331 state == AMD_CG_STATE_GATE ? true : false); 1332 vi_update_hdp_light_sleep(adev, 1333 state == AMD_CG_STATE_GATE ? true : false); 1334 vi_update_rom_medium_grain_clock_gating(adev, 1335 state == AMD_CG_STATE_GATE ? true : false); 1336 break; 1337 case CHIP_CARRIZO: 1338 case CHIP_STONEY: 1339 vi_update_bif_medium_grain_light_sleep(adev, 1340 state == AMD_CG_STATE_GATE ? true : false); 1341 vi_update_hdp_medium_grain_clock_gating(adev, 1342 state == AMD_CG_STATE_GATE ? true : false); 1343 vi_update_hdp_light_sleep(adev, 1344 state == AMD_CG_STATE_GATE ? true : false); 1345 break; 1346 case CHIP_TONGA: 1347 case CHIP_POLARIS10: 1348 case CHIP_POLARIS11: 1349 vi_common_set_clockgating_state_by_smu(adev, state); 1350 default: 1351 break; 1352 } 1353 return 0; 1354 } 1355 1356 static int vi_common_set_powergating_state(void *handle, 1357 enum amd_powergating_state state) 1358 { 1359 return 0; 1360 } 1361 1362 static const struct amd_ip_funcs vi_common_ip_funcs = { 1363 .name = "vi_common", 1364 .early_init = vi_common_early_init, 1365 .late_init = NULL, 1366 .sw_init = vi_common_sw_init, 1367 .sw_fini = vi_common_sw_fini, 1368 .hw_init = vi_common_hw_init, 1369 .hw_fini = vi_common_hw_fini, 1370 .suspend = vi_common_suspend, 1371 .resume = vi_common_resume, 1372 .is_idle = vi_common_is_idle, 1373 .wait_for_idle = vi_common_wait_for_idle, 1374 .soft_reset = vi_common_soft_reset, 1375 .set_clockgating_state = vi_common_set_clockgating_state, 1376 .set_powergating_state = vi_common_set_powergating_state, 1377 }; 1378 1379 static const struct amdgpu_ip_block_version vi_common_ip_block = 1380 { 1381 .type = AMD_IP_BLOCK_TYPE_COMMON, 1382 .major = 1, 1383 .minor = 0, 1384 .rev = 0, 1385 .funcs = &vi_common_ip_funcs, 1386 }; 1387 1388 int vi_set_ip_blocks(struct amdgpu_device *adev) 1389 { 1390 switch (adev->asic_type) { 1391 case CHIP_TOPAZ: 1392 /* topaz has no DCE, UVD, VCE */ 1393 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1394 amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block); 1395 amdgpu_ip_block_add(adev, &iceland_ih_ip_block); 1396 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1397 if (adev->enable_virtual_display) 1398 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1399 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1400 amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block); 1401 break; 1402 case CHIP_FIJI: 1403 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1404 amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block); 1405 amdgpu_ip_block_add(adev, &tonga_ih_ip_block); 1406 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1407 if (adev->enable_virtual_display) 1408 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1409 else 1410 amdgpu_ip_block_add(adev, &dce_v10_1_ip_block); 1411 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1412 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1413 amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); 1414 amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); 1415 break; 1416 case CHIP_TONGA: 1417 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1418 amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); 1419 amdgpu_ip_block_add(adev, &tonga_ih_ip_block); 1420 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1421 if (adev->enable_virtual_display) 1422 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1423 else 1424 amdgpu_ip_block_add(adev, &dce_v10_0_ip_block); 1425 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1426 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1427 amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block); 1428 amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); 1429 break; 1430 case CHIP_POLARIS11: 1431 case CHIP_POLARIS10: 1432 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1433 amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block); 1434 amdgpu_ip_block_add(adev, &tonga_ih_ip_block); 1435 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1436 if (adev->enable_virtual_display) 1437 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1438 else 1439 amdgpu_ip_block_add(adev, &dce_v11_2_ip_block); 1440 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1441 amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block); 1442 amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block); 1443 amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); 1444 break; 1445 case CHIP_CARRIZO: 1446 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1447 amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); 1448 amdgpu_ip_block_add(adev, &cz_ih_ip_block); 1449 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1450 if (adev->enable_virtual_display) 1451 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1452 else 1453 amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); 1454 amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); 1455 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1456 amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); 1457 amdgpu_ip_block_add(adev, &vce_v3_1_ip_block); 1458 #if defined(CONFIG_DRM_AMD_ACP) 1459 amdgpu_ip_block_add(adev, &acp_ip_block); 1460 #endif 1461 break; 1462 case CHIP_STONEY: 1463 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1464 amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); 1465 amdgpu_ip_block_add(adev, &cz_ih_ip_block); 1466 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 1467 if (adev->enable_virtual_display) 1468 amdgpu_ip_block_add(adev, &dce_virtual_ip_block); 1469 else 1470 amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); 1471 amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block); 1472 amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); 1473 amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block); 1474 amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); 1475 #if defined(CONFIG_DRM_AMD_ACP) 1476 amdgpu_ip_block_add(adev, &acp_ip_block); 1477 #endif 1478 break; 1479 default: 1480 /* FIXME: not supported yet */ 1481 return -EINVAL; 1482 } 1483 1484 return 0; 1485 } 1486