1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include "drmP.h" 27 #include "amdgpu.h" 28 #include "amdgpu_atombios.h" 29 #include "amdgpu_ih.h" 30 #include "amdgpu_uvd.h" 31 #include "amdgpu_vce.h" 32 #include "amdgpu_ucode.h" 33 #include "atom.h" 34 #include "amd_pcie.h" 35 36 #include "gmc/gmc_8_1_d.h" 37 #include "gmc/gmc_8_1_sh_mask.h" 38 39 #include "oss/oss_3_0_d.h" 40 #include "oss/oss_3_0_sh_mask.h" 41 42 #include "bif/bif_5_0_d.h" 43 #include "bif/bif_5_0_sh_mask.h" 44 45 #include "gca/gfx_8_0_d.h" 46 #include "gca/gfx_8_0_sh_mask.h" 47 48 #include "smu/smu_7_1_1_d.h" 49 #include "smu/smu_7_1_1_sh_mask.h" 50 51 #include "uvd/uvd_5_0_d.h" 52 #include "uvd/uvd_5_0_sh_mask.h" 53 54 #include "vce/vce_3_0_d.h" 55 #include "vce/vce_3_0_sh_mask.h" 56 57 #include "dce/dce_10_0_d.h" 58 #include "dce/dce_10_0_sh_mask.h" 59 60 #include "vid.h" 61 #include "vi.h" 62 #include "vi_dpm.h" 63 #include "gmc_v8_0.h" 64 #include "gmc_v7_0.h" 65 #include "gfx_v8_0.h" 66 #include "sdma_v2_4.h" 67 #include "sdma_v3_0.h" 68 #include "dce_v10_0.h" 69 #include "dce_v11_0.h" 70 #include "iceland_ih.h" 71 #include "tonga_ih.h" 72 #include "cz_ih.h" 73 #include "uvd_v5_0.h" 74 #include "uvd_v6_0.h" 75 #include "vce_v3_0.h" 76 #include "amdgpu_powerplay.h" 77 #if defined(CONFIG_DRM_AMD_ACP) 78 #include "amdgpu_acp.h" 79 #endif 80 81 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); 82 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); 83 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); 84 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); 85 86 /* 87 * Indirect registers accessor 88 */ 89 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 90 { 91 unsigned long flags; 92 u32 r; 93 94 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 95 WREG32(mmPCIE_INDEX, reg); 96 (void)RREG32(mmPCIE_INDEX); 97 r = RREG32(mmPCIE_DATA); 98 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 99 return r; 100 } 101 102 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 103 { 104 unsigned long flags; 105 106 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 107 WREG32(mmPCIE_INDEX, reg); 108 (void)RREG32(mmPCIE_INDEX); 109 WREG32(mmPCIE_DATA, v); 110 (void)RREG32(mmPCIE_DATA); 111 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 112 } 113 114 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 115 { 116 unsigned long flags; 117 u32 r; 118 119 spin_lock_irqsave(&adev->smc_idx_lock, flags); 120 WREG32(mmSMC_IND_INDEX_0, (reg)); 121 r = RREG32(mmSMC_IND_DATA_0); 122 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 123 return r; 124 } 125 126 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 127 { 128 unsigned long flags; 129 130 spin_lock_irqsave(&adev->smc_idx_lock, flags); 131 WREG32(mmSMC_IND_INDEX_0, (reg)); 132 WREG32(mmSMC_IND_DATA_0, (v)); 133 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 134 } 135 136 /* smu_8_0_d.h */ 137 #define mmMP0PUB_IND_INDEX 0x180 138 #define mmMP0PUB_IND_DATA 0x181 139 140 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 141 { 142 unsigned long flags; 143 u32 r; 144 145 spin_lock_irqsave(&adev->smc_idx_lock, flags); 146 WREG32(mmMP0PUB_IND_INDEX, (reg)); 147 r = RREG32(mmMP0PUB_IND_DATA); 148 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 149 return r; 150 } 151 152 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 153 { 154 unsigned long flags; 155 156 spin_lock_irqsave(&adev->smc_idx_lock, flags); 157 WREG32(mmMP0PUB_IND_INDEX, (reg)); 158 WREG32(mmMP0PUB_IND_DATA, (v)); 159 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 160 } 161 162 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 163 { 164 unsigned long flags; 165 u32 r; 166 167 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 168 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 169 r = RREG32(mmUVD_CTX_DATA); 170 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 171 return r; 172 } 173 174 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 175 { 176 unsigned long flags; 177 178 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 179 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 180 WREG32(mmUVD_CTX_DATA, (v)); 181 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 182 } 183 184 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 185 { 186 unsigned long flags; 187 u32 r; 188 189 spin_lock_irqsave(&adev->didt_idx_lock, flags); 190 WREG32(mmDIDT_IND_INDEX, (reg)); 191 r = RREG32(mmDIDT_IND_DATA); 192 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 193 return r; 194 } 195 196 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 197 { 198 unsigned long flags; 199 200 spin_lock_irqsave(&adev->didt_idx_lock, flags); 201 WREG32(mmDIDT_IND_INDEX, (reg)); 202 WREG32(mmDIDT_IND_DATA, (v)); 203 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 204 } 205 206 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 207 { 208 unsigned long flags; 209 u32 r; 210 211 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 212 WREG32(mmGC_CAC_IND_INDEX, (reg)); 213 r = RREG32(mmGC_CAC_IND_DATA); 214 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 215 return r; 216 } 217 218 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 219 { 220 unsigned long flags; 221 222 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 223 WREG32(mmGC_CAC_IND_INDEX, (reg)); 224 WREG32(mmGC_CAC_IND_DATA, (v)); 225 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 226 } 227 228 229 static const u32 tonga_mgcg_cgcg_init[] = 230 { 231 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 232 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 233 mmPCIE_DATA, 0x000f0000, 0x00000000, 234 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 235 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 236 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 237 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 238 }; 239 240 static const u32 fiji_mgcg_cgcg_init[] = 241 { 242 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 243 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 244 mmPCIE_DATA, 0x000f0000, 0x00000000, 245 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 246 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 247 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 248 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 249 }; 250 251 static const u32 iceland_mgcg_cgcg_init[] = 252 { 253 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 254 mmPCIE_DATA, 0x000f0000, 0x00000000, 255 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 256 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 257 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 258 }; 259 260 static const u32 cz_mgcg_cgcg_init[] = 261 { 262 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 263 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 264 mmPCIE_DATA, 0x000f0000, 0x00000000, 265 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 266 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 267 }; 268 269 static const u32 stoney_mgcg_cgcg_init[] = 270 { 271 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 272 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 273 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 274 }; 275 276 static void vi_init_golden_registers(struct amdgpu_device *adev) 277 { 278 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 279 mutex_lock(&adev->grbm_idx_mutex); 280 281 switch (adev->asic_type) { 282 case CHIP_TOPAZ: 283 amdgpu_program_register_sequence(adev, 284 iceland_mgcg_cgcg_init, 285 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); 286 break; 287 case CHIP_FIJI: 288 amdgpu_program_register_sequence(adev, 289 fiji_mgcg_cgcg_init, 290 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); 291 break; 292 case CHIP_TONGA: 293 amdgpu_program_register_sequence(adev, 294 tonga_mgcg_cgcg_init, 295 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); 296 break; 297 case CHIP_CARRIZO: 298 amdgpu_program_register_sequence(adev, 299 cz_mgcg_cgcg_init, 300 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 301 break; 302 case CHIP_STONEY: 303 amdgpu_program_register_sequence(adev, 304 stoney_mgcg_cgcg_init, 305 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); 306 break; 307 case CHIP_POLARIS11: 308 case CHIP_POLARIS10: 309 default: 310 break; 311 } 312 mutex_unlock(&adev->grbm_idx_mutex); 313 } 314 315 /** 316 * vi_get_xclk - get the xclk 317 * 318 * @adev: amdgpu_device pointer 319 * 320 * Returns the reference clock used by the gfx engine 321 * (VI). 322 */ 323 static u32 vi_get_xclk(struct amdgpu_device *adev) 324 { 325 u32 reference_clock = adev->clock.spll.reference_freq; 326 u32 tmp; 327 328 if (adev->flags & AMD_IS_APU) 329 return reference_clock; 330 331 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 332 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 333 return 1000; 334 335 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 336 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 337 return reference_clock / 4; 338 339 return reference_clock; 340 } 341 342 /** 343 * vi_srbm_select - select specific register instances 344 * 345 * @adev: amdgpu_device pointer 346 * @me: selected ME (micro engine) 347 * @pipe: pipe 348 * @queue: queue 349 * @vmid: VMID 350 * 351 * Switches the currently active registers instances. Some 352 * registers are instanced per VMID, others are instanced per 353 * me/pipe/queue combination. 354 */ 355 void vi_srbm_select(struct amdgpu_device *adev, 356 u32 me, u32 pipe, u32 queue, u32 vmid) 357 { 358 u32 srbm_gfx_cntl = 0; 359 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 360 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 361 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 362 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 363 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 364 } 365 366 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 367 { 368 /* todo */ 369 } 370 371 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 372 { 373 u32 bus_cntl; 374 u32 d1vga_control = 0; 375 u32 d2vga_control = 0; 376 u32 vga_render_control = 0; 377 u32 rom_cntl; 378 bool r; 379 380 bus_cntl = RREG32(mmBUS_CNTL); 381 if (adev->mode_info.num_crtc) { 382 d1vga_control = RREG32(mmD1VGA_CONTROL); 383 d2vga_control = RREG32(mmD2VGA_CONTROL); 384 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 385 } 386 rom_cntl = RREG32_SMC(ixROM_CNTL); 387 388 /* enable the rom */ 389 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 390 if (adev->mode_info.num_crtc) { 391 /* Disable VGA mode */ 392 WREG32(mmD1VGA_CONTROL, 393 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 394 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 395 WREG32(mmD2VGA_CONTROL, 396 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 397 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 398 WREG32(mmVGA_RENDER_CONTROL, 399 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 400 } 401 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 402 403 r = amdgpu_read_bios(adev); 404 405 /* restore regs */ 406 WREG32(mmBUS_CNTL, bus_cntl); 407 if (adev->mode_info.num_crtc) { 408 WREG32(mmD1VGA_CONTROL, d1vga_control); 409 WREG32(mmD2VGA_CONTROL, d2vga_control); 410 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 411 } 412 WREG32_SMC(ixROM_CNTL, rom_cntl); 413 return r; 414 } 415 416 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 417 u8 *bios, u32 length_bytes) 418 { 419 u32 *dw_ptr; 420 unsigned long flags; 421 u32 i, length_dw; 422 423 if (bios == NULL) 424 return false; 425 if (length_bytes == 0) 426 return false; 427 /* APU vbios image is part of sbios image */ 428 if (adev->flags & AMD_IS_APU) 429 return false; 430 431 dw_ptr = (u32 *)bios; 432 length_dw = ALIGN(length_bytes, 4) / 4; 433 /* take the smc lock since we are using the smc index */ 434 spin_lock_irqsave(&adev->smc_idx_lock, flags); 435 /* set rom index to 0 */ 436 WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX); 437 WREG32(mmSMC_IND_DATA_0, 0); 438 /* set index to data for continous read */ 439 WREG32(mmSMC_IND_INDEX_0, ixROM_DATA); 440 for (i = 0; i < length_dw; i++) 441 dw_ptr[i] = RREG32(mmSMC_IND_DATA_0); 442 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 443 444 return true; 445 } 446 447 static u32 vi_get_virtual_caps(struct amdgpu_device *adev) 448 { 449 u32 caps = 0; 450 u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 451 452 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) 453 caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; 454 455 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) 456 caps |= AMDGPU_VIRT_CAPS_IS_VF; 457 458 return caps; 459 } 460 461 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 462 {mmGB_MACROTILE_MODE7, true}, 463 }; 464 465 static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { 466 {mmGB_TILE_MODE7, true}, 467 {mmGB_TILE_MODE12, true}, 468 {mmGB_TILE_MODE17, true}, 469 {mmGB_TILE_MODE23, true}, 470 {mmGB_MACROTILE_MODE7, true}, 471 }; 472 473 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 474 {mmGRBM_STATUS, false}, 475 {mmGRBM_STATUS2, false}, 476 {mmGRBM_STATUS_SE0, false}, 477 {mmGRBM_STATUS_SE1, false}, 478 {mmGRBM_STATUS_SE2, false}, 479 {mmGRBM_STATUS_SE3, false}, 480 {mmSRBM_STATUS, false}, 481 {mmSRBM_STATUS2, false}, 482 {mmSRBM_STATUS3, false}, 483 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false}, 484 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false}, 485 {mmCP_STAT, false}, 486 {mmCP_STALLED_STAT1, false}, 487 {mmCP_STALLED_STAT2, false}, 488 {mmCP_STALLED_STAT3, false}, 489 {mmCP_CPF_BUSY_STAT, false}, 490 {mmCP_CPF_STALLED_STAT1, false}, 491 {mmCP_CPF_STATUS, false}, 492 {mmCP_CPC_BUSY_STAT, false}, 493 {mmCP_CPC_STALLED_STAT1, false}, 494 {mmCP_CPC_STATUS, false}, 495 {mmGB_ADDR_CONFIG, false}, 496 {mmMC_ARB_RAMCFG, false}, 497 {mmGB_TILE_MODE0, false}, 498 {mmGB_TILE_MODE1, false}, 499 {mmGB_TILE_MODE2, false}, 500 {mmGB_TILE_MODE3, false}, 501 {mmGB_TILE_MODE4, false}, 502 {mmGB_TILE_MODE5, false}, 503 {mmGB_TILE_MODE6, false}, 504 {mmGB_TILE_MODE7, false}, 505 {mmGB_TILE_MODE8, false}, 506 {mmGB_TILE_MODE9, false}, 507 {mmGB_TILE_MODE10, false}, 508 {mmGB_TILE_MODE11, false}, 509 {mmGB_TILE_MODE12, false}, 510 {mmGB_TILE_MODE13, false}, 511 {mmGB_TILE_MODE14, false}, 512 {mmGB_TILE_MODE15, false}, 513 {mmGB_TILE_MODE16, false}, 514 {mmGB_TILE_MODE17, false}, 515 {mmGB_TILE_MODE18, false}, 516 {mmGB_TILE_MODE19, false}, 517 {mmGB_TILE_MODE20, false}, 518 {mmGB_TILE_MODE21, false}, 519 {mmGB_TILE_MODE22, false}, 520 {mmGB_TILE_MODE23, false}, 521 {mmGB_TILE_MODE24, false}, 522 {mmGB_TILE_MODE25, false}, 523 {mmGB_TILE_MODE26, false}, 524 {mmGB_TILE_MODE27, false}, 525 {mmGB_TILE_MODE28, false}, 526 {mmGB_TILE_MODE29, false}, 527 {mmGB_TILE_MODE30, false}, 528 {mmGB_TILE_MODE31, false}, 529 {mmGB_MACROTILE_MODE0, false}, 530 {mmGB_MACROTILE_MODE1, false}, 531 {mmGB_MACROTILE_MODE2, false}, 532 {mmGB_MACROTILE_MODE3, false}, 533 {mmGB_MACROTILE_MODE4, false}, 534 {mmGB_MACROTILE_MODE5, false}, 535 {mmGB_MACROTILE_MODE6, false}, 536 {mmGB_MACROTILE_MODE7, false}, 537 {mmGB_MACROTILE_MODE8, false}, 538 {mmGB_MACROTILE_MODE9, false}, 539 {mmGB_MACROTILE_MODE10, false}, 540 {mmGB_MACROTILE_MODE11, false}, 541 {mmGB_MACROTILE_MODE12, false}, 542 {mmGB_MACROTILE_MODE13, false}, 543 {mmGB_MACROTILE_MODE14, false}, 544 {mmGB_MACROTILE_MODE15, false}, 545 {mmCC_RB_BACKEND_DISABLE, false, true}, 546 {mmGC_USER_RB_BACKEND_DISABLE, false, true}, 547 {mmGB_BACKEND_MAP, false, false}, 548 {mmPA_SC_RASTER_CONFIG, false, true}, 549 {mmPA_SC_RASTER_CONFIG_1, false, true}, 550 }; 551 552 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 553 u32 sh_num, u32 reg_offset) 554 { 555 uint32_t val; 556 557 mutex_lock(&adev->grbm_idx_mutex); 558 if (se_num != 0xffffffff || sh_num != 0xffffffff) 559 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 560 561 val = RREG32(reg_offset); 562 563 if (se_num != 0xffffffff || sh_num != 0xffffffff) 564 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 565 mutex_unlock(&adev->grbm_idx_mutex); 566 return val; 567 } 568 569 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 570 u32 sh_num, u32 reg_offset, u32 *value) 571 { 572 const struct amdgpu_allowed_register_entry *asic_register_table = NULL; 573 const struct amdgpu_allowed_register_entry *asic_register_entry; 574 uint32_t size, i; 575 576 *value = 0; 577 switch (adev->asic_type) { 578 case CHIP_TOPAZ: 579 asic_register_table = tonga_allowed_read_registers; 580 size = ARRAY_SIZE(tonga_allowed_read_registers); 581 break; 582 case CHIP_FIJI: 583 case CHIP_TONGA: 584 case CHIP_POLARIS11: 585 case CHIP_POLARIS10: 586 case CHIP_CARRIZO: 587 case CHIP_STONEY: 588 asic_register_table = cz_allowed_read_registers; 589 size = ARRAY_SIZE(cz_allowed_read_registers); 590 break; 591 default: 592 return -EINVAL; 593 } 594 595 if (asic_register_table) { 596 for (i = 0; i < size; i++) { 597 asic_register_entry = asic_register_table + i; 598 if (reg_offset != asic_register_entry->reg_offset) 599 continue; 600 if (!asic_register_entry->untouched) 601 *value = asic_register_entry->grbm_indexed ? 602 vi_read_indexed_register(adev, se_num, 603 sh_num, reg_offset) : 604 RREG32(reg_offset); 605 return 0; 606 } 607 } 608 609 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 610 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 611 continue; 612 613 if (!vi_allowed_read_registers[i].untouched) 614 *value = vi_allowed_read_registers[i].grbm_indexed ? 615 vi_read_indexed_register(adev, se_num, 616 sh_num, reg_offset) : 617 RREG32(reg_offset); 618 return 0; 619 } 620 return -EINVAL; 621 } 622 623 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 624 { 625 u32 i; 626 627 dev_info(adev->dev, "GPU pci config reset\n"); 628 629 /* disable BM */ 630 pci_clear_master(adev->pdev); 631 /* reset */ 632 amdgpu_pci_config_reset(adev); 633 634 udelay(100); 635 636 /* wait for asic to come out of reset */ 637 for (i = 0; i < adev->usec_timeout; i++) { 638 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 639 /* enable BM */ 640 pci_set_master(adev->pdev); 641 return 0; 642 } 643 udelay(1); 644 } 645 return -EINVAL; 646 } 647 648 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) 649 { 650 u32 tmp = RREG32(mmBIOS_SCRATCH_3); 651 652 if (hung) 653 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; 654 else 655 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; 656 657 WREG32(mmBIOS_SCRATCH_3, tmp); 658 } 659 660 /** 661 * vi_asic_reset - soft reset GPU 662 * 663 * @adev: amdgpu_device pointer 664 * 665 * Look up which blocks are hung and attempt 666 * to reset them. 667 * Returns 0 for success. 668 */ 669 static int vi_asic_reset(struct amdgpu_device *adev) 670 { 671 int r; 672 673 vi_set_bios_scratch_engine_hung(adev, true); 674 675 r = vi_gpu_pci_config_reset(adev); 676 677 vi_set_bios_scratch_engine_hung(adev, false); 678 679 return r; 680 } 681 682 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 683 u32 cntl_reg, u32 status_reg) 684 { 685 int r, i; 686 struct atom_clock_dividers dividers; 687 uint32_t tmp; 688 689 r = amdgpu_atombios_get_clock_dividers(adev, 690 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 691 clock, false, ÷rs); 692 if (r) 693 return r; 694 695 tmp = RREG32_SMC(cntl_reg); 696 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 697 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 698 tmp |= dividers.post_divider; 699 WREG32_SMC(cntl_reg, tmp); 700 701 for (i = 0; i < 100; i++) { 702 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) 703 break; 704 mdelay(10); 705 } 706 if (i == 100) 707 return -ETIMEDOUT; 708 709 return 0; 710 } 711 712 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 713 { 714 int r; 715 716 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 717 if (r) 718 return r; 719 720 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 721 722 return 0; 723 } 724 725 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 726 { 727 /* todo */ 728 729 return 0; 730 } 731 732 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 733 { 734 if (pci_is_root_bus(adev->pdev->bus)) 735 return; 736 737 if (amdgpu_pcie_gen2 == 0) 738 return; 739 740 if (adev->flags & AMD_IS_APU) 741 return; 742 743 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 744 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 745 return; 746 747 /* todo */ 748 } 749 750 static void vi_program_aspm(struct amdgpu_device *adev) 751 { 752 753 if (amdgpu_aspm == 0) 754 return; 755 756 /* todo */ 757 } 758 759 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 760 bool enable) 761 { 762 u32 tmp; 763 764 /* not necessary on CZ */ 765 if (adev->flags & AMD_IS_APU) 766 return; 767 768 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 769 if (enable) 770 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 771 else 772 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 773 774 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 775 } 776 777 /* topaz has no DCE, UVD, VCE */ 778 static const struct amdgpu_ip_block_version topaz_ip_blocks[] = 779 { 780 /* ORDER MATTERS! */ 781 { 782 .type = AMD_IP_BLOCK_TYPE_COMMON, 783 .major = 2, 784 .minor = 0, 785 .rev = 0, 786 .funcs = &vi_common_ip_funcs, 787 }, 788 { 789 .type = AMD_IP_BLOCK_TYPE_GMC, 790 .major = 7, 791 .minor = 4, 792 .rev = 0, 793 .funcs = &gmc_v7_0_ip_funcs, 794 }, 795 { 796 .type = AMD_IP_BLOCK_TYPE_IH, 797 .major = 2, 798 .minor = 4, 799 .rev = 0, 800 .funcs = &iceland_ih_ip_funcs, 801 }, 802 { 803 .type = AMD_IP_BLOCK_TYPE_SMC, 804 .major = 7, 805 .minor = 1, 806 .rev = 0, 807 .funcs = &amdgpu_pp_ip_funcs, 808 }, 809 { 810 .type = AMD_IP_BLOCK_TYPE_GFX, 811 .major = 8, 812 .minor = 0, 813 .rev = 0, 814 .funcs = &gfx_v8_0_ip_funcs, 815 }, 816 { 817 .type = AMD_IP_BLOCK_TYPE_SDMA, 818 .major = 2, 819 .minor = 4, 820 .rev = 0, 821 .funcs = &sdma_v2_4_ip_funcs, 822 }, 823 }; 824 825 static const struct amdgpu_ip_block_version tonga_ip_blocks[] = 826 { 827 /* ORDER MATTERS! */ 828 { 829 .type = AMD_IP_BLOCK_TYPE_COMMON, 830 .major = 2, 831 .minor = 0, 832 .rev = 0, 833 .funcs = &vi_common_ip_funcs, 834 }, 835 { 836 .type = AMD_IP_BLOCK_TYPE_GMC, 837 .major = 8, 838 .minor = 0, 839 .rev = 0, 840 .funcs = &gmc_v8_0_ip_funcs, 841 }, 842 { 843 .type = AMD_IP_BLOCK_TYPE_IH, 844 .major = 3, 845 .minor = 0, 846 .rev = 0, 847 .funcs = &tonga_ih_ip_funcs, 848 }, 849 { 850 .type = AMD_IP_BLOCK_TYPE_SMC, 851 .major = 7, 852 .minor = 1, 853 .rev = 0, 854 .funcs = &amdgpu_pp_ip_funcs, 855 }, 856 { 857 .type = AMD_IP_BLOCK_TYPE_DCE, 858 .major = 10, 859 .minor = 0, 860 .rev = 0, 861 .funcs = &dce_v10_0_ip_funcs, 862 }, 863 { 864 .type = AMD_IP_BLOCK_TYPE_GFX, 865 .major = 8, 866 .minor = 0, 867 .rev = 0, 868 .funcs = &gfx_v8_0_ip_funcs, 869 }, 870 { 871 .type = AMD_IP_BLOCK_TYPE_SDMA, 872 .major = 3, 873 .minor = 0, 874 .rev = 0, 875 .funcs = &sdma_v3_0_ip_funcs, 876 }, 877 { 878 .type = AMD_IP_BLOCK_TYPE_UVD, 879 .major = 5, 880 .minor = 0, 881 .rev = 0, 882 .funcs = &uvd_v5_0_ip_funcs, 883 }, 884 { 885 .type = AMD_IP_BLOCK_TYPE_VCE, 886 .major = 3, 887 .minor = 0, 888 .rev = 0, 889 .funcs = &vce_v3_0_ip_funcs, 890 }, 891 }; 892 893 static const struct amdgpu_ip_block_version fiji_ip_blocks[] = 894 { 895 /* ORDER MATTERS! */ 896 { 897 .type = AMD_IP_BLOCK_TYPE_COMMON, 898 .major = 2, 899 .minor = 0, 900 .rev = 0, 901 .funcs = &vi_common_ip_funcs, 902 }, 903 { 904 .type = AMD_IP_BLOCK_TYPE_GMC, 905 .major = 8, 906 .minor = 5, 907 .rev = 0, 908 .funcs = &gmc_v8_0_ip_funcs, 909 }, 910 { 911 .type = AMD_IP_BLOCK_TYPE_IH, 912 .major = 3, 913 .minor = 0, 914 .rev = 0, 915 .funcs = &tonga_ih_ip_funcs, 916 }, 917 { 918 .type = AMD_IP_BLOCK_TYPE_SMC, 919 .major = 7, 920 .minor = 1, 921 .rev = 0, 922 .funcs = &amdgpu_pp_ip_funcs, 923 }, 924 { 925 .type = AMD_IP_BLOCK_TYPE_DCE, 926 .major = 10, 927 .minor = 1, 928 .rev = 0, 929 .funcs = &dce_v10_0_ip_funcs, 930 }, 931 { 932 .type = AMD_IP_BLOCK_TYPE_GFX, 933 .major = 8, 934 .minor = 0, 935 .rev = 0, 936 .funcs = &gfx_v8_0_ip_funcs, 937 }, 938 { 939 .type = AMD_IP_BLOCK_TYPE_SDMA, 940 .major = 3, 941 .minor = 0, 942 .rev = 0, 943 .funcs = &sdma_v3_0_ip_funcs, 944 }, 945 { 946 .type = AMD_IP_BLOCK_TYPE_UVD, 947 .major = 6, 948 .minor = 0, 949 .rev = 0, 950 .funcs = &uvd_v6_0_ip_funcs, 951 }, 952 { 953 .type = AMD_IP_BLOCK_TYPE_VCE, 954 .major = 3, 955 .minor = 0, 956 .rev = 0, 957 .funcs = &vce_v3_0_ip_funcs, 958 }, 959 }; 960 961 static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = 962 { 963 /* ORDER MATTERS! */ 964 { 965 .type = AMD_IP_BLOCK_TYPE_COMMON, 966 .major = 2, 967 .minor = 0, 968 .rev = 0, 969 .funcs = &vi_common_ip_funcs, 970 }, 971 { 972 .type = AMD_IP_BLOCK_TYPE_GMC, 973 .major = 8, 974 .minor = 1, 975 .rev = 0, 976 .funcs = &gmc_v8_0_ip_funcs, 977 }, 978 { 979 .type = AMD_IP_BLOCK_TYPE_IH, 980 .major = 3, 981 .minor = 1, 982 .rev = 0, 983 .funcs = &tonga_ih_ip_funcs, 984 }, 985 { 986 .type = AMD_IP_BLOCK_TYPE_SMC, 987 .major = 7, 988 .minor = 2, 989 .rev = 0, 990 .funcs = &amdgpu_pp_ip_funcs, 991 }, 992 { 993 .type = AMD_IP_BLOCK_TYPE_DCE, 994 .major = 11, 995 .minor = 2, 996 .rev = 0, 997 .funcs = &dce_v11_0_ip_funcs, 998 }, 999 { 1000 .type = AMD_IP_BLOCK_TYPE_GFX, 1001 .major = 8, 1002 .minor = 0, 1003 .rev = 0, 1004 .funcs = &gfx_v8_0_ip_funcs, 1005 }, 1006 { 1007 .type = AMD_IP_BLOCK_TYPE_SDMA, 1008 .major = 3, 1009 .minor = 1, 1010 .rev = 0, 1011 .funcs = &sdma_v3_0_ip_funcs, 1012 }, 1013 { 1014 .type = AMD_IP_BLOCK_TYPE_UVD, 1015 .major = 6, 1016 .minor = 3, 1017 .rev = 0, 1018 .funcs = &uvd_v6_0_ip_funcs, 1019 }, 1020 { 1021 .type = AMD_IP_BLOCK_TYPE_VCE, 1022 .major = 3, 1023 .minor = 4, 1024 .rev = 0, 1025 .funcs = &vce_v3_0_ip_funcs, 1026 }, 1027 }; 1028 1029 static const struct amdgpu_ip_block_version cz_ip_blocks[] = 1030 { 1031 /* ORDER MATTERS! */ 1032 { 1033 .type = AMD_IP_BLOCK_TYPE_COMMON, 1034 .major = 2, 1035 .minor = 0, 1036 .rev = 0, 1037 .funcs = &vi_common_ip_funcs, 1038 }, 1039 { 1040 .type = AMD_IP_BLOCK_TYPE_GMC, 1041 .major = 8, 1042 .minor = 0, 1043 .rev = 0, 1044 .funcs = &gmc_v8_0_ip_funcs, 1045 }, 1046 { 1047 .type = AMD_IP_BLOCK_TYPE_IH, 1048 .major = 3, 1049 .minor = 0, 1050 .rev = 0, 1051 .funcs = &cz_ih_ip_funcs, 1052 }, 1053 { 1054 .type = AMD_IP_BLOCK_TYPE_SMC, 1055 .major = 8, 1056 .minor = 0, 1057 .rev = 0, 1058 .funcs = &amdgpu_pp_ip_funcs 1059 }, 1060 { 1061 .type = AMD_IP_BLOCK_TYPE_DCE, 1062 .major = 11, 1063 .minor = 0, 1064 .rev = 0, 1065 .funcs = &dce_v11_0_ip_funcs, 1066 }, 1067 { 1068 .type = AMD_IP_BLOCK_TYPE_GFX, 1069 .major = 8, 1070 .minor = 0, 1071 .rev = 0, 1072 .funcs = &gfx_v8_0_ip_funcs, 1073 }, 1074 { 1075 .type = AMD_IP_BLOCK_TYPE_SDMA, 1076 .major = 3, 1077 .minor = 0, 1078 .rev = 0, 1079 .funcs = &sdma_v3_0_ip_funcs, 1080 }, 1081 { 1082 .type = AMD_IP_BLOCK_TYPE_UVD, 1083 .major = 6, 1084 .minor = 0, 1085 .rev = 0, 1086 .funcs = &uvd_v6_0_ip_funcs, 1087 }, 1088 { 1089 .type = AMD_IP_BLOCK_TYPE_VCE, 1090 .major = 3, 1091 .minor = 0, 1092 .rev = 0, 1093 .funcs = &vce_v3_0_ip_funcs, 1094 }, 1095 #if defined(CONFIG_DRM_AMD_ACP) 1096 { 1097 .type = AMD_IP_BLOCK_TYPE_ACP, 1098 .major = 2, 1099 .minor = 2, 1100 .rev = 0, 1101 .funcs = &acp_ip_funcs, 1102 }, 1103 #endif 1104 }; 1105 1106 int vi_set_ip_blocks(struct amdgpu_device *adev) 1107 { 1108 switch (adev->asic_type) { 1109 case CHIP_TOPAZ: 1110 adev->ip_blocks = topaz_ip_blocks; 1111 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); 1112 break; 1113 case CHIP_FIJI: 1114 adev->ip_blocks = fiji_ip_blocks; 1115 adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); 1116 break; 1117 case CHIP_TONGA: 1118 adev->ip_blocks = tonga_ip_blocks; 1119 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); 1120 break; 1121 case CHIP_POLARIS11: 1122 case CHIP_POLARIS10: 1123 adev->ip_blocks = polaris11_ip_blocks; 1124 adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); 1125 break; 1126 case CHIP_CARRIZO: 1127 case CHIP_STONEY: 1128 adev->ip_blocks = cz_ip_blocks; 1129 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); 1130 break; 1131 default: 1132 /* FIXME: not supported yet */ 1133 return -EINVAL; 1134 } 1135 1136 return 0; 1137 } 1138 1139 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 1140 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 1141 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 1142 1143 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 1144 { 1145 if (adev->flags & AMD_IS_APU) 1146 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 1147 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 1148 else 1149 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 1150 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 1151 } 1152 1153 static const struct amdgpu_asic_funcs vi_asic_funcs = 1154 { 1155 .read_disabled_bios = &vi_read_disabled_bios, 1156 .read_bios_from_rom = &vi_read_bios_from_rom, 1157 .read_register = &vi_read_register, 1158 .reset = &vi_asic_reset, 1159 .set_vga_state = &vi_vga_set_state, 1160 .get_xclk = &vi_get_xclk, 1161 .set_uvd_clocks = &vi_set_uvd_clocks, 1162 .set_vce_clocks = &vi_set_vce_clocks, 1163 .get_virtual_caps = &vi_get_virtual_caps, 1164 }; 1165 1166 static int vi_common_early_init(void *handle) 1167 { 1168 bool smc_enabled = false; 1169 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1170 1171 if (adev->flags & AMD_IS_APU) { 1172 adev->smc_rreg = &cz_smc_rreg; 1173 adev->smc_wreg = &cz_smc_wreg; 1174 } else { 1175 adev->smc_rreg = &vi_smc_rreg; 1176 adev->smc_wreg = &vi_smc_wreg; 1177 } 1178 adev->pcie_rreg = &vi_pcie_rreg; 1179 adev->pcie_wreg = &vi_pcie_wreg; 1180 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1181 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1182 adev->didt_rreg = &vi_didt_rreg; 1183 adev->didt_wreg = &vi_didt_wreg; 1184 adev->gc_cac_rreg = &vi_gc_cac_rreg; 1185 adev->gc_cac_wreg = &vi_gc_cac_wreg; 1186 1187 adev->asic_funcs = &vi_asic_funcs; 1188 1189 if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && 1190 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) 1191 smc_enabled = true; 1192 1193 adev->rev_id = vi_get_rev_id(adev); 1194 adev->external_rev_id = 0xFF; 1195 switch (adev->asic_type) { 1196 case CHIP_TOPAZ: 1197 adev->cg_flags = 0; 1198 adev->pg_flags = 0; 1199 adev->external_rev_id = 0x1; 1200 break; 1201 case CHIP_FIJI: 1202 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1203 AMD_CG_SUPPORT_GFX_MGLS | 1204 AMD_CG_SUPPORT_GFX_RLC_LS | 1205 AMD_CG_SUPPORT_GFX_CP_LS | 1206 AMD_CG_SUPPORT_GFX_CGTS | 1207 AMD_CG_SUPPORT_GFX_CGTS_LS | 1208 AMD_CG_SUPPORT_GFX_CGCG | 1209 AMD_CG_SUPPORT_GFX_CGLS | 1210 AMD_CG_SUPPORT_SDMA_MGCG | 1211 AMD_CG_SUPPORT_SDMA_LS | 1212 AMD_CG_SUPPORT_BIF_LS | 1213 AMD_CG_SUPPORT_HDP_MGCG | 1214 AMD_CG_SUPPORT_HDP_LS | 1215 AMD_CG_SUPPORT_ROM_MGCG | 1216 AMD_CG_SUPPORT_MC_MGCG | 1217 AMD_CG_SUPPORT_MC_LS; 1218 adev->pg_flags = 0; 1219 adev->external_rev_id = adev->rev_id + 0x3c; 1220 break; 1221 case CHIP_TONGA: 1222 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; 1223 adev->pg_flags = 0; 1224 adev->external_rev_id = adev->rev_id + 0x14; 1225 break; 1226 case CHIP_POLARIS11: 1227 adev->cg_flags = 0; 1228 adev->pg_flags = 0; 1229 adev->external_rev_id = adev->rev_id + 0x5A; 1230 break; 1231 case CHIP_POLARIS10: 1232 adev->cg_flags = 0; 1233 adev->pg_flags = 0; 1234 adev->external_rev_id = adev->rev_id + 0x50; 1235 break; 1236 case CHIP_CARRIZO: 1237 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1238 AMD_CG_SUPPORT_GFX_MGCG | 1239 AMD_CG_SUPPORT_GFX_MGLS | 1240 AMD_CG_SUPPORT_GFX_RLC_LS | 1241 AMD_CG_SUPPORT_GFX_CP_LS | 1242 AMD_CG_SUPPORT_GFX_CGTS | 1243 AMD_CG_SUPPORT_GFX_MGLS | 1244 AMD_CG_SUPPORT_GFX_CGTS_LS | 1245 AMD_CG_SUPPORT_GFX_CGCG | 1246 AMD_CG_SUPPORT_GFX_CGLS | 1247 AMD_CG_SUPPORT_BIF_LS | 1248 AMD_CG_SUPPORT_HDP_MGCG | 1249 AMD_CG_SUPPORT_HDP_LS | 1250 AMD_CG_SUPPORT_SDMA_MGCG | 1251 AMD_CG_SUPPORT_SDMA_LS; 1252 adev->pg_flags = 0; 1253 adev->external_rev_id = adev->rev_id + 0x1; 1254 break; 1255 case CHIP_STONEY: 1256 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1257 AMD_CG_SUPPORT_GFX_MGCG | 1258 AMD_CG_SUPPORT_GFX_MGLS | 1259 AMD_CG_SUPPORT_GFX_RLC_LS | 1260 AMD_CG_SUPPORT_GFX_CP_LS | 1261 AMD_CG_SUPPORT_GFX_CGTS | 1262 AMD_CG_SUPPORT_GFX_MGLS | 1263 AMD_CG_SUPPORT_GFX_CGTS_LS | 1264 AMD_CG_SUPPORT_GFX_CGCG | 1265 AMD_CG_SUPPORT_GFX_CGLS | 1266 AMD_CG_SUPPORT_BIF_LS | 1267 AMD_CG_SUPPORT_HDP_MGCG | 1268 AMD_CG_SUPPORT_HDP_LS | 1269 AMD_CG_SUPPORT_SDMA_MGCG | 1270 AMD_CG_SUPPORT_SDMA_LS; 1271 adev->external_rev_id = adev->rev_id + 0x1; 1272 break; 1273 default: 1274 /* FIXME: not supported yet */ 1275 return -EINVAL; 1276 } 1277 1278 if (amdgpu_smc_load_fw && smc_enabled) 1279 adev->firmware.smu_load = true; 1280 1281 amdgpu_get_pcie_info(adev); 1282 1283 return 0; 1284 } 1285 1286 static int vi_common_sw_init(void *handle) 1287 { 1288 return 0; 1289 } 1290 1291 static int vi_common_sw_fini(void *handle) 1292 { 1293 return 0; 1294 } 1295 1296 static int vi_common_hw_init(void *handle) 1297 { 1298 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1299 1300 /* move the golden regs per IP block */ 1301 vi_init_golden_registers(adev); 1302 /* enable pcie gen2/3 link */ 1303 vi_pcie_gen3_enable(adev); 1304 /* enable aspm */ 1305 vi_program_aspm(adev); 1306 /* enable the doorbell aperture */ 1307 vi_enable_doorbell_aperture(adev, true); 1308 1309 return 0; 1310 } 1311 1312 static int vi_common_hw_fini(void *handle) 1313 { 1314 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1315 1316 /* enable the doorbell aperture */ 1317 vi_enable_doorbell_aperture(adev, false); 1318 1319 return 0; 1320 } 1321 1322 static int vi_common_suspend(void *handle) 1323 { 1324 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1325 1326 return vi_common_hw_fini(adev); 1327 } 1328 1329 static int vi_common_resume(void *handle) 1330 { 1331 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1332 1333 return vi_common_hw_init(adev); 1334 } 1335 1336 static bool vi_common_is_idle(void *handle) 1337 { 1338 return true; 1339 } 1340 1341 static int vi_common_wait_for_idle(void *handle) 1342 { 1343 return 0; 1344 } 1345 1346 static int vi_common_soft_reset(void *handle) 1347 { 1348 return 0; 1349 } 1350 1351 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1352 bool enable) 1353 { 1354 uint32_t temp, data; 1355 1356 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1357 1358 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1359 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1360 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1361 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1362 else 1363 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1364 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1365 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1366 1367 if (temp != data) 1368 WREG32_PCIE(ixPCIE_CNTL2, data); 1369 } 1370 1371 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1372 bool enable) 1373 { 1374 uint32_t temp, data; 1375 1376 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1377 1378 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1379 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1380 else 1381 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1382 1383 if (temp != data) 1384 WREG32(mmHDP_HOST_PATH_CNTL, data); 1385 } 1386 1387 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1388 bool enable) 1389 { 1390 uint32_t temp, data; 1391 1392 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1393 1394 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1395 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1396 else 1397 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1398 1399 if (temp != data) 1400 WREG32(mmHDP_MEM_POWER_LS, data); 1401 } 1402 1403 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1404 bool enable) 1405 { 1406 uint32_t temp, data; 1407 1408 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1409 1410 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1411 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1412 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1413 else 1414 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1415 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1416 1417 if (temp != data) 1418 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1419 } 1420 1421 static int vi_common_set_clockgating_state(void *handle, 1422 enum amd_clockgating_state state) 1423 { 1424 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1425 1426 switch (adev->asic_type) { 1427 case CHIP_FIJI: 1428 vi_update_bif_medium_grain_light_sleep(adev, 1429 state == AMD_CG_STATE_GATE ? true : false); 1430 vi_update_hdp_medium_grain_clock_gating(adev, 1431 state == AMD_CG_STATE_GATE ? true : false); 1432 vi_update_hdp_light_sleep(adev, 1433 state == AMD_CG_STATE_GATE ? true : false); 1434 vi_update_rom_medium_grain_clock_gating(adev, 1435 state == AMD_CG_STATE_GATE ? true : false); 1436 break; 1437 case CHIP_CARRIZO: 1438 case CHIP_STONEY: 1439 vi_update_bif_medium_grain_light_sleep(adev, 1440 state == AMD_CG_STATE_GATE ? true : false); 1441 vi_update_hdp_medium_grain_clock_gating(adev, 1442 state == AMD_CG_STATE_GATE ? true : false); 1443 vi_update_hdp_light_sleep(adev, 1444 state == AMD_CG_STATE_GATE ? true : false); 1445 break; 1446 default: 1447 break; 1448 } 1449 return 0; 1450 } 1451 1452 static int vi_common_set_powergating_state(void *handle, 1453 enum amd_powergating_state state) 1454 { 1455 return 0; 1456 } 1457 1458 const struct amd_ip_funcs vi_common_ip_funcs = { 1459 .name = "vi_common", 1460 .early_init = vi_common_early_init, 1461 .late_init = NULL, 1462 .sw_init = vi_common_sw_init, 1463 .sw_fini = vi_common_sw_fini, 1464 .hw_init = vi_common_hw_init, 1465 .hw_fini = vi_common_hw_fini, 1466 .suspend = vi_common_suspend, 1467 .resume = vi_common_resume, 1468 .is_idle = vi_common_is_idle, 1469 .wait_for_idle = vi_common_wait_for_idle, 1470 .soft_reset = vi_common_soft_reset, 1471 .set_clockgating_state = vi_common_set_clockgating_state, 1472 .set_powergating_state = vi_common_set_powergating_state, 1473 }; 1474 1475