1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include "drmP.h" 27 #include "amdgpu.h" 28 #include "amdgpu_atombios.h" 29 #include "amdgpu_ih.h" 30 #include "amdgpu_uvd.h" 31 #include "amdgpu_vce.h" 32 #include "amdgpu_ucode.h" 33 #include "atom.h" 34 #include "amd_pcie.h" 35 36 #include "gmc/gmc_8_1_d.h" 37 #include "gmc/gmc_8_1_sh_mask.h" 38 39 #include "oss/oss_3_0_d.h" 40 #include "oss/oss_3_0_sh_mask.h" 41 42 #include "bif/bif_5_0_d.h" 43 #include "bif/bif_5_0_sh_mask.h" 44 45 #include "gca/gfx_8_0_d.h" 46 #include "gca/gfx_8_0_sh_mask.h" 47 48 #include "smu/smu_7_1_1_d.h" 49 #include "smu/smu_7_1_1_sh_mask.h" 50 51 #include "uvd/uvd_5_0_d.h" 52 #include "uvd/uvd_5_0_sh_mask.h" 53 54 #include "vce/vce_3_0_d.h" 55 #include "vce/vce_3_0_sh_mask.h" 56 57 #include "dce/dce_10_0_d.h" 58 #include "dce/dce_10_0_sh_mask.h" 59 60 #include "vid.h" 61 #include "vi.h" 62 #include "vi_dpm.h" 63 #include "gmc_v8_0.h" 64 #include "gmc_v7_0.h" 65 #include "gfx_v8_0.h" 66 #include "sdma_v2_4.h" 67 #include "sdma_v3_0.h" 68 #include "dce_v10_0.h" 69 #include "dce_v11_0.h" 70 #include "iceland_ih.h" 71 #include "tonga_ih.h" 72 #include "cz_ih.h" 73 #include "uvd_v5_0.h" 74 #include "uvd_v6_0.h" 75 #include "vce_v3_0.h" 76 #include "amdgpu_powerplay.h" 77 #if defined(CONFIG_DRM_AMD_ACP) 78 #include "amdgpu_acp.h" 79 #endif 80 81 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); 82 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); 83 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); 84 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); 85 86 /* 87 * Indirect registers accessor 88 */ 89 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 90 { 91 unsigned long flags; 92 u32 r; 93 94 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 95 WREG32(mmPCIE_INDEX, reg); 96 (void)RREG32(mmPCIE_INDEX); 97 r = RREG32(mmPCIE_DATA); 98 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 99 return r; 100 } 101 102 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 103 { 104 unsigned long flags; 105 106 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 107 WREG32(mmPCIE_INDEX, reg); 108 (void)RREG32(mmPCIE_INDEX); 109 WREG32(mmPCIE_DATA, v); 110 (void)RREG32(mmPCIE_DATA); 111 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 112 } 113 114 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 115 { 116 unsigned long flags; 117 u32 r; 118 119 spin_lock_irqsave(&adev->smc_idx_lock, flags); 120 WREG32(mmSMC_IND_INDEX_0, (reg)); 121 r = RREG32(mmSMC_IND_DATA_0); 122 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 123 return r; 124 } 125 126 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 127 { 128 unsigned long flags; 129 130 spin_lock_irqsave(&adev->smc_idx_lock, flags); 131 WREG32(mmSMC_IND_INDEX_0, (reg)); 132 WREG32(mmSMC_IND_DATA_0, (v)); 133 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 134 } 135 136 /* smu_8_0_d.h */ 137 #define mmMP0PUB_IND_INDEX 0x180 138 #define mmMP0PUB_IND_DATA 0x181 139 140 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 141 { 142 unsigned long flags; 143 u32 r; 144 145 spin_lock_irqsave(&adev->smc_idx_lock, flags); 146 WREG32(mmMP0PUB_IND_INDEX, (reg)); 147 r = RREG32(mmMP0PUB_IND_DATA); 148 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 149 return r; 150 } 151 152 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 153 { 154 unsigned long flags; 155 156 spin_lock_irqsave(&adev->smc_idx_lock, flags); 157 WREG32(mmMP0PUB_IND_INDEX, (reg)); 158 WREG32(mmMP0PUB_IND_DATA, (v)); 159 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 160 } 161 162 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 163 { 164 unsigned long flags; 165 u32 r; 166 167 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 168 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 169 r = RREG32(mmUVD_CTX_DATA); 170 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 171 return r; 172 } 173 174 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 175 { 176 unsigned long flags; 177 178 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 179 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 180 WREG32(mmUVD_CTX_DATA, (v)); 181 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 182 } 183 184 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 185 { 186 unsigned long flags; 187 u32 r; 188 189 spin_lock_irqsave(&adev->didt_idx_lock, flags); 190 WREG32(mmDIDT_IND_INDEX, (reg)); 191 r = RREG32(mmDIDT_IND_DATA); 192 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 193 return r; 194 } 195 196 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 197 { 198 unsigned long flags; 199 200 spin_lock_irqsave(&adev->didt_idx_lock, flags); 201 WREG32(mmDIDT_IND_INDEX, (reg)); 202 WREG32(mmDIDT_IND_DATA, (v)); 203 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 204 } 205 206 static const u32 tonga_mgcg_cgcg_init[] = 207 { 208 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 209 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 210 mmPCIE_DATA, 0x000f0000, 0x00000000, 211 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 212 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 213 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 214 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 215 }; 216 217 static const u32 fiji_mgcg_cgcg_init[] = 218 { 219 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 220 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 221 mmPCIE_DATA, 0x000f0000, 0x00000000, 222 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 223 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 224 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 225 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 226 }; 227 228 static const u32 iceland_mgcg_cgcg_init[] = 229 { 230 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 231 mmPCIE_DATA, 0x000f0000, 0x00000000, 232 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 233 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 234 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 235 }; 236 237 static const u32 cz_mgcg_cgcg_init[] = 238 { 239 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 240 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 241 mmPCIE_DATA, 0x000f0000, 0x00000000, 242 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 243 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 244 }; 245 246 static const u32 stoney_mgcg_cgcg_init[] = 247 { 248 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 249 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 250 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 251 }; 252 253 static void vi_init_golden_registers(struct amdgpu_device *adev) 254 { 255 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 256 mutex_lock(&adev->grbm_idx_mutex); 257 258 switch (adev->asic_type) { 259 case CHIP_TOPAZ: 260 amdgpu_program_register_sequence(adev, 261 iceland_mgcg_cgcg_init, 262 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); 263 break; 264 case CHIP_FIJI: 265 amdgpu_program_register_sequence(adev, 266 fiji_mgcg_cgcg_init, 267 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); 268 break; 269 case CHIP_TONGA: 270 amdgpu_program_register_sequence(adev, 271 tonga_mgcg_cgcg_init, 272 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); 273 break; 274 case CHIP_CARRIZO: 275 amdgpu_program_register_sequence(adev, 276 cz_mgcg_cgcg_init, 277 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 278 break; 279 case CHIP_STONEY: 280 amdgpu_program_register_sequence(adev, 281 stoney_mgcg_cgcg_init, 282 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); 283 break; 284 case CHIP_POLARIS11: 285 case CHIP_POLARIS10: 286 default: 287 break; 288 } 289 mutex_unlock(&adev->grbm_idx_mutex); 290 } 291 292 /** 293 * vi_get_xclk - get the xclk 294 * 295 * @adev: amdgpu_device pointer 296 * 297 * Returns the reference clock used by the gfx engine 298 * (VI). 299 */ 300 static u32 vi_get_xclk(struct amdgpu_device *adev) 301 { 302 u32 reference_clock = adev->clock.spll.reference_freq; 303 u32 tmp; 304 305 if (adev->flags & AMD_IS_APU) 306 return reference_clock; 307 308 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 309 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 310 return 1000; 311 312 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 313 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 314 return reference_clock / 4; 315 316 return reference_clock; 317 } 318 319 /** 320 * vi_srbm_select - select specific register instances 321 * 322 * @adev: amdgpu_device pointer 323 * @me: selected ME (micro engine) 324 * @pipe: pipe 325 * @queue: queue 326 * @vmid: VMID 327 * 328 * Switches the currently active registers instances. Some 329 * registers are instanced per VMID, others are instanced per 330 * me/pipe/queue combination. 331 */ 332 void vi_srbm_select(struct amdgpu_device *adev, 333 u32 me, u32 pipe, u32 queue, u32 vmid) 334 { 335 u32 srbm_gfx_cntl = 0; 336 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 337 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 338 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 339 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 340 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 341 } 342 343 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 344 { 345 /* todo */ 346 } 347 348 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 349 { 350 u32 bus_cntl; 351 u32 d1vga_control = 0; 352 u32 d2vga_control = 0; 353 u32 vga_render_control = 0; 354 u32 rom_cntl; 355 bool r; 356 357 bus_cntl = RREG32(mmBUS_CNTL); 358 if (adev->mode_info.num_crtc) { 359 d1vga_control = RREG32(mmD1VGA_CONTROL); 360 d2vga_control = RREG32(mmD2VGA_CONTROL); 361 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 362 } 363 rom_cntl = RREG32_SMC(ixROM_CNTL); 364 365 /* enable the rom */ 366 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 367 if (adev->mode_info.num_crtc) { 368 /* Disable VGA mode */ 369 WREG32(mmD1VGA_CONTROL, 370 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 371 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 372 WREG32(mmD2VGA_CONTROL, 373 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 374 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 375 WREG32(mmVGA_RENDER_CONTROL, 376 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 377 } 378 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 379 380 r = amdgpu_read_bios(adev); 381 382 /* restore regs */ 383 WREG32(mmBUS_CNTL, bus_cntl); 384 if (adev->mode_info.num_crtc) { 385 WREG32(mmD1VGA_CONTROL, d1vga_control); 386 WREG32(mmD2VGA_CONTROL, d2vga_control); 387 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 388 } 389 WREG32_SMC(ixROM_CNTL, rom_cntl); 390 return r; 391 } 392 393 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 394 u8 *bios, u32 length_bytes) 395 { 396 u32 *dw_ptr; 397 unsigned long flags; 398 u32 i, length_dw; 399 400 if (bios == NULL) 401 return false; 402 if (length_bytes == 0) 403 return false; 404 /* APU vbios image is part of sbios image */ 405 if (adev->flags & AMD_IS_APU) 406 return false; 407 408 dw_ptr = (u32 *)bios; 409 length_dw = ALIGN(length_bytes, 4) / 4; 410 /* take the smc lock since we are using the smc index */ 411 spin_lock_irqsave(&adev->smc_idx_lock, flags); 412 /* set rom index to 0 */ 413 WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX); 414 WREG32(mmSMC_IND_DATA_0, 0); 415 /* set index to data for continous read */ 416 WREG32(mmSMC_IND_INDEX_0, ixROM_DATA); 417 for (i = 0; i < length_dw; i++) 418 dw_ptr[i] = RREG32(mmSMC_IND_DATA_0); 419 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 420 421 return true; 422 } 423 424 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 425 {mmGB_MACROTILE_MODE7, true}, 426 }; 427 428 static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { 429 {mmGB_TILE_MODE7, true}, 430 {mmGB_TILE_MODE12, true}, 431 {mmGB_TILE_MODE17, true}, 432 {mmGB_TILE_MODE23, true}, 433 {mmGB_MACROTILE_MODE7, true}, 434 }; 435 436 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 437 {mmGRBM_STATUS, false}, 438 {mmGRBM_STATUS2, false}, 439 {mmGRBM_STATUS_SE0, false}, 440 {mmGRBM_STATUS_SE1, false}, 441 {mmGRBM_STATUS_SE2, false}, 442 {mmGRBM_STATUS_SE3, false}, 443 {mmSRBM_STATUS, false}, 444 {mmSRBM_STATUS2, false}, 445 {mmSRBM_STATUS3, false}, 446 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false}, 447 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false}, 448 {mmCP_STAT, false}, 449 {mmCP_STALLED_STAT1, false}, 450 {mmCP_STALLED_STAT2, false}, 451 {mmCP_STALLED_STAT3, false}, 452 {mmCP_CPF_BUSY_STAT, false}, 453 {mmCP_CPF_STALLED_STAT1, false}, 454 {mmCP_CPF_STATUS, false}, 455 {mmCP_CPC_BUSY_STAT, false}, 456 {mmCP_CPC_STALLED_STAT1, false}, 457 {mmCP_CPC_STATUS, false}, 458 {mmGB_ADDR_CONFIG, false}, 459 {mmMC_ARB_RAMCFG, false}, 460 {mmGB_TILE_MODE0, false}, 461 {mmGB_TILE_MODE1, false}, 462 {mmGB_TILE_MODE2, false}, 463 {mmGB_TILE_MODE3, false}, 464 {mmGB_TILE_MODE4, false}, 465 {mmGB_TILE_MODE5, false}, 466 {mmGB_TILE_MODE6, false}, 467 {mmGB_TILE_MODE7, false}, 468 {mmGB_TILE_MODE8, false}, 469 {mmGB_TILE_MODE9, false}, 470 {mmGB_TILE_MODE10, false}, 471 {mmGB_TILE_MODE11, false}, 472 {mmGB_TILE_MODE12, false}, 473 {mmGB_TILE_MODE13, false}, 474 {mmGB_TILE_MODE14, false}, 475 {mmGB_TILE_MODE15, false}, 476 {mmGB_TILE_MODE16, false}, 477 {mmGB_TILE_MODE17, false}, 478 {mmGB_TILE_MODE18, false}, 479 {mmGB_TILE_MODE19, false}, 480 {mmGB_TILE_MODE20, false}, 481 {mmGB_TILE_MODE21, false}, 482 {mmGB_TILE_MODE22, false}, 483 {mmGB_TILE_MODE23, false}, 484 {mmGB_TILE_MODE24, false}, 485 {mmGB_TILE_MODE25, false}, 486 {mmGB_TILE_MODE26, false}, 487 {mmGB_TILE_MODE27, false}, 488 {mmGB_TILE_MODE28, false}, 489 {mmGB_TILE_MODE29, false}, 490 {mmGB_TILE_MODE30, false}, 491 {mmGB_TILE_MODE31, false}, 492 {mmGB_MACROTILE_MODE0, false}, 493 {mmGB_MACROTILE_MODE1, false}, 494 {mmGB_MACROTILE_MODE2, false}, 495 {mmGB_MACROTILE_MODE3, false}, 496 {mmGB_MACROTILE_MODE4, false}, 497 {mmGB_MACROTILE_MODE5, false}, 498 {mmGB_MACROTILE_MODE6, false}, 499 {mmGB_MACROTILE_MODE7, false}, 500 {mmGB_MACROTILE_MODE8, false}, 501 {mmGB_MACROTILE_MODE9, false}, 502 {mmGB_MACROTILE_MODE10, false}, 503 {mmGB_MACROTILE_MODE11, false}, 504 {mmGB_MACROTILE_MODE12, false}, 505 {mmGB_MACROTILE_MODE13, false}, 506 {mmGB_MACROTILE_MODE14, false}, 507 {mmGB_MACROTILE_MODE15, false}, 508 {mmCC_RB_BACKEND_DISABLE, false, true}, 509 {mmGC_USER_RB_BACKEND_DISABLE, false, true}, 510 {mmGB_BACKEND_MAP, false, false}, 511 {mmPA_SC_RASTER_CONFIG, false, true}, 512 {mmPA_SC_RASTER_CONFIG_1, false, true}, 513 }; 514 515 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 516 u32 sh_num, u32 reg_offset) 517 { 518 uint32_t val; 519 520 mutex_lock(&adev->grbm_idx_mutex); 521 if (se_num != 0xffffffff || sh_num != 0xffffffff) 522 gfx_v8_0_select_se_sh(adev, se_num, sh_num); 523 524 val = RREG32(reg_offset); 525 526 if (se_num != 0xffffffff || sh_num != 0xffffffff) 527 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 528 mutex_unlock(&adev->grbm_idx_mutex); 529 return val; 530 } 531 532 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 533 u32 sh_num, u32 reg_offset, u32 *value) 534 { 535 const struct amdgpu_allowed_register_entry *asic_register_table = NULL; 536 const struct amdgpu_allowed_register_entry *asic_register_entry; 537 uint32_t size, i; 538 539 *value = 0; 540 switch (adev->asic_type) { 541 case CHIP_TOPAZ: 542 asic_register_table = tonga_allowed_read_registers; 543 size = ARRAY_SIZE(tonga_allowed_read_registers); 544 break; 545 case CHIP_FIJI: 546 case CHIP_TONGA: 547 case CHIP_POLARIS11: 548 case CHIP_POLARIS10: 549 case CHIP_CARRIZO: 550 case CHIP_STONEY: 551 asic_register_table = cz_allowed_read_registers; 552 size = ARRAY_SIZE(cz_allowed_read_registers); 553 break; 554 default: 555 return -EINVAL; 556 } 557 558 if (asic_register_table) { 559 for (i = 0; i < size; i++) { 560 asic_register_entry = asic_register_table + i; 561 if (reg_offset != asic_register_entry->reg_offset) 562 continue; 563 if (!asic_register_entry->untouched) 564 *value = asic_register_entry->grbm_indexed ? 565 vi_read_indexed_register(adev, se_num, 566 sh_num, reg_offset) : 567 RREG32(reg_offset); 568 return 0; 569 } 570 } 571 572 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 573 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 574 continue; 575 576 if (!vi_allowed_read_registers[i].untouched) 577 *value = vi_allowed_read_registers[i].grbm_indexed ? 578 vi_read_indexed_register(adev, se_num, 579 sh_num, reg_offset) : 580 RREG32(reg_offset); 581 return 0; 582 } 583 return -EINVAL; 584 } 585 586 static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) 587 { 588 u32 i; 589 590 dev_info(adev->dev, "GPU pci config reset\n"); 591 592 /* disable BM */ 593 pci_clear_master(adev->pdev); 594 /* reset */ 595 amdgpu_pci_config_reset(adev); 596 597 udelay(100); 598 599 /* wait for asic to come out of reset */ 600 for (i = 0; i < adev->usec_timeout; i++) { 601 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) 602 break; 603 udelay(1); 604 } 605 606 } 607 608 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) 609 { 610 u32 tmp = RREG32(mmBIOS_SCRATCH_3); 611 612 if (hung) 613 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; 614 else 615 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; 616 617 WREG32(mmBIOS_SCRATCH_3, tmp); 618 } 619 620 /** 621 * vi_asic_reset - soft reset GPU 622 * 623 * @adev: amdgpu_device pointer 624 * 625 * Look up which blocks are hung and attempt 626 * to reset them. 627 * Returns 0 for success. 628 */ 629 static int vi_asic_reset(struct amdgpu_device *adev) 630 { 631 vi_set_bios_scratch_engine_hung(adev, true); 632 633 vi_gpu_pci_config_reset(adev); 634 635 vi_set_bios_scratch_engine_hung(adev, false); 636 637 return 0; 638 } 639 640 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 641 u32 cntl_reg, u32 status_reg) 642 { 643 int r, i; 644 struct atom_clock_dividers dividers; 645 uint32_t tmp; 646 647 r = amdgpu_atombios_get_clock_dividers(adev, 648 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 649 clock, false, ÷rs); 650 if (r) 651 return r; 652 653 tmp = RREG32_SMC(cntl_reg); 654 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 655 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 656 tmp |= dividers.post_divider; 657 WREG32_SMC(cntl_reg, tmp); 658 659 for (i = 0; i < 100; i++) { 660 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) 661 break; 662 mdelay(10); 663 } 664 if (i == 100) 665 return -ETIMEDOUT; 666 667 return 0; 668 } 669 670 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 671 { 672 int r; 673 674 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 675 if (r) 676 return r; 677 678 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 679 680 return 0; 681 } 682 683 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 684 { 685 /* todo */ 686 687 return 0; 688 } 689 690 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 691 { 692 if (pci_is_root_bus(adev->pdev->bus)) 693 return; 694 695 if (amdgpu_pcie_gen2 == 0) 696 return; 697 698 if (adev->flags & AMD_IS_APU) 699 return; 700 701 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 702 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 703 return; 704 705 /* todo */ 706 } 707 708 static void vi_program_aspm(struct amdgpu_device *adev) 709 { 710 711 if (amdgpu_aspm == 0) 712 return; 713 714 /* todo */ 715 } 716 717 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 718 bool enable) 719 { 720 u32 tmp; 721 722 /* not necessary on CZ */ 723 if (adev->flags & AMD_IS_APU) 724 return; 725 726 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 727 if (enable) 728 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 729 else 730 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 731 732 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 733 } 734 735 /* topaz has no DCE, UVD, VCE */ 736 static const struct amdgpu_ip_block_version topaz_ip_blocks[] = 737 { 738 /* ORDER MATTERS! */ 739 { 740 .type = AMD_IP_BLOCK_TYPE_COMMON, 741 .major = 2, 742 .minor = 0, 743 .rev = 0, 744 .funcs = &vi_common_ip_funcs, 745 }, 746 { 747 .type = AMD_IP_BLOCK_TYPE_GMC, 748 .major = 7, 749 .minor = 4, 750 .rev = 0, 751 .funcs = &gmc_v7_0_ip_funcs, 752 }, 753 { 754 .type = AMD_IP_BLOCK_TYPE_IH, 755 .major = 2, 756 .minor = 4, 757 .rev = 0, 758 .funcs = &iceland_ih_ip_funcs, 759 }, 760 { 761 .type = AMD_IP_BLOCK_TYPE_SMC, 762 .major = 7, 763 .minor = 1, 764 .rev = 0, 765 .funcs = &amdgpu_pp_ip_funcs, 766 }, 767 { 768 .type = AMD_IP_BLOCK_TYPE_GFX, 769 .major = 8, 770 .minor = 0, 771 .rev = 0, 772 .funcs = &gfx_v8_0_ip_funcs, 773 }, 774 { 775 .type = AMD_IP_BLOCK_TYPE_SDMA, 776 .major = 2, 777 .minor = 4, 778 .rev = 0, 779 .funcs = &sdma_v2_4_ip_funcs, 780 }, 781 }; 782 783 static const struct amdgpu_ip_block_version tonga_ip_blocks[] = 784 { 785 /* ORDER MATTERS! */ 786 { 787 .type = AMD_IP_BLOCK_TYPE_COMMON, 788 .major = 2, 789 .minor = 0, 790 .rev = 0, 791 .funcs = &vi_common_ip_funcs, 792 }, 793 { 794 .type = AMD_IP_BLOCK_TYPE_GMC, 795 .major = 8, 796 .minor = 0, 797 .rev = 0, 798 .funcs = &gmc_v8_0_ip_funcs, 799 }, 800 { 801 .type = AMD_IP_BLOCK_TYPE_IH, 802 .major = 3, 803 .minor = 0, 804 .rev = 0, 805 .funcs = &tonga_ih_ip_funcs, 806 }, 807 { 808 .type = AMD_IP_BLOCK_TYPE_SMC, 809 .major = 7, 810 .minor = 1, 811 .rev = 0, 812 .funcs = &amdgpu_pp_ip_funcs, 813 }, 814 { 815 .type = AMD_IP_BLOCK_TYPE_DCE, 816 .major = 10, 817 .minor = 0, 818 .rev = 0, 819 .funcs = &dce_v10_0_ip_funcs, 820 }, 821 { 822 .type = AMD_IP_BLOCK_TYPE_GFX, 823 .major = 8, 824 .minor = 0, 825 .rev = 0, 826 .funcs = &gfx_v8_0_ip_funcs, 827 }, 828 { 829 .type = AMD_IP_BLOCK_TYPE_SDMA, 830 .major = 3, 831 .minor = 0, 832 .rev = 0, 833 .funcs = &sdma_v3_0_ip_funcs, 834 }, 835 { 836 .type = AMD_IP_BLOCK_TYPE_UVD, 837 .major = 5, 838 .minor = 0, 839 .rev = 0, 840 .funcs = &uvd_v5_0_ip_funcs, 841 }, 842 { 843 .type = AMD_IP_BLOCK_TYPE_VCE, 844 .major = 3, 845 .minor = 0, 846 .rev = 0, 847 .funcs = &vce_v3_0_ip_funcs, 848 }, 849 }; 850 851 static const struct amdgpu_ip_block_version fiji_ip_blocks[] = 852 { 853 /* ORDER MATTERS! */ 854 { 855 .type = AMD_IP_BLOCK_TYPE_COMMON, 856 .major = 2, 857 .minor = 0, 858 .rev = 0, 859 .funcs = &vi_common_ip_funcs, 860 }, 861 { 862 .type = AMD_IP_BLOCK_TYPE_GMC, 863 .major = 8, 864 .minor = 5, 865 .rev = 0, 866 .funcs = &gmc_v8_0_ip_funcs, 867 }, 868 { 869 .type = AMD_IP_BLOCK_TYPE_IH, 870 .major = 3, 871 .minor = 0, 872 .rev = 0, 873 .funcs = &tonga_ih_ip_funcs, 874 }, 875 { 876 .type = AMD_IP_BLOCK_TYPE_SMC, 877 .major = 7, 878 .minor = 1, 879 .rev = 0, 880 .funcs = &amdgpu_pp_ip_funcs, 881 }, 882 { 883 .type = AMD_IP_BLOCK_TYPE_DCE, 884 .major = 10, 885 .minor = 1, 886 .rev = 0, 887 .funcs = &dce_v10_0_ip_funcs, 888 }, 889 { 890 .type = AMD_IP_BLOCK_TYPE_GFX, 891 .major = 8, 892 .minor = 0, 893 .rev = 0, 894 .funcs = &gfx_v8_0_ip_funcs, 895 }, 896 { 897 .type = AMD_IP_BLOCK_TYPE_SDMA, 898 .major = 3, 899 .minor = 0, 900 .rev = 0, 901 .funcs = &sdma_v3_0_ip_funcs, 902 }, 903 { 904 .type = AMD_IP_BLOCK_TYPE_UVD, 905 .major = 6, 906 .minor = 0, 907 .rev = 0, 908 .funcs = &uvd_v6_0_ip_funcs, 909 }, 910 { 911 .type = AMD_IP_BLOCK_TYPE_VCE, 912 .major = 3, 913 .minor = 0, 914 .rev = 0, 915 .funcs = &vce_v3_0_ip_funcs, 916 }, 917 }; 918 919 static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = 920 { 921 /* ORDER MATTERS! */ 922 { 923 .type = AMD_IP_BLOCK_TYPE_COMMON, 924 .major = 2, 925 .minor = 0, 926 .rev = 0, 927 .funcs = &vi_common_ip_funcs, 928 }, 929 { 930 .type = AMD_IP_BLOCK_TYPE_GMC, 931 .major = 8, 932 .minor = 1, 933 .rev = 0, 934 .funcs = &gmc_v8_0_ip_funcs, 935 }, 936 { 937 .type = AMD_IP_BLOCK_TYPE_IH, 938 .major = 3, 939 .minor = 1, 940 .rev = 0, 941 .funcs = &tonga_ih_ip_funcs, 942 }, 943 { 944 .type = AMD_IP_BLOCK_TYPE_SMC, 945 .major = 7, 946 .minor = 2, 947 .rev = 0, 948 .funcs = &amdgpu_pp_ip_funcs, 949 }, 950 { 951 .type = AMD_IP_BLOCK_TYPE_DCE, 952 .major = 11, 953 .minor = 2, 954 .rev = 0, 955 .funcs = &dce_v11_0_ip_funcs, 956 }, 957 { 958 .type = AMD_IP_BLOCK_TYPE_GFX, 959 .major = 8, 960 .minor = 0, 961 .rev = 0, 962 .funcs = &gfx_v8_0_ip_funcs, 963 }, 964 { 965 .type = AMD_IP_BLOCK_TYPE_SDMA, 966 .major = 3, 967 .minor = 1, 968 .rev = 0, 969 .funcs = &sdma_v3_0_ip_funcs, 970 }, 971 { 972 .type = AMD_IP_BLOCK_TYPE_UVD, 973 .major = 6, 974 .minor = 3, 975 .rev = 0, 976 .funcs = &uvd_v6_0_ip_funcs, 977 }, 978 { 979 .type = AMD_IP_BLOCK_TYPE_VCE, 980 .major = 3, 981 .minor = 4, 982 .rev = 0, 983 .funcs = &vce_v3_0_ip_funcs, 984 }, 985 }; 986 987 static const struct amdgpu_ip_block_version cz_ip_blocks[] = 988 { 989 /* ORDER MATTERS! */ 990 { 991 .type = AMD_IP_BLOCK_TYPE_COMMON, 992 .major = 2, 993 .minor = 0, 994 .rev = 0, 995 .funcs = &vi_common_ip_funcs, 996 }, 997 { 998 .type = AMD_IP_BLOCK_TYPE_GMC, 999 .major = 8, 1000 .minor = 0, 1001 .rev = 0, 1002 .funcs = &gmc_v8_0_ip_funcs, 1003 }, 1004 { 1005 .type = AMD_IP_BLOCK_TYPE_IH, 1006 .major = 3, 1007 .minor = 0, 1008 .rev = 0, 1009 .funcs = &cz_ih_ip_funcs, 1010 }, 1011 { 1012 .type = AMD_IP_BLOCK_TYPE_SMC, 1013 .major = 8, 1014 .minor = 0, 1015 .rev = 0, 1016 .funcs = &amdgpu_pp_ip_funcs 1017 }, 1018 { 1019 .type = AMD_IP_BLOCK_TYPE_DCE, 1020 .major = 11, 1021 .minor = 0, 1022 .rev = 0, 1023 .funcs = &dce_v11_0_ip_funcs, 1024 }, 1025 { 1026 .type = AMD_IP_BLOCK_TYPE_GFX, 1027 .major = 8, 1028 .minor = 0, 1029 .rev = 0, 1030 .funcs = &gfx_v8_0_ip_funcs, 1031 }, 1032 { 1033 .type = AMD_IP_BLOCK_TYPE_SDMA, 1034 .major = 3, 1035 .minor = 0, 1036 .rev = 0, 1037 .funcs = &sdma_v3_0_ip_funcs, 1038 }, 1039 { 1040 .type = AMD_IP_BLOCK_TYPE_UVD, 1041 .major = 6, 1042 .minor = 0, 1043 .rev = 0, 1044 .funcs = &uvd_v6_0_ip_funcs, 1045 }, 1046 { 1047 .type = AMD_IP_BLOCK_TYPE_VCE, 1048 .major = 3, 1049 .minor = 0, 1050 .rev = 0, 1051 .funcs = &vce_v3_0_ip_funcs, 1052 }, 1053 #if defined(CONFIG_DRM_AMD_ACP) 1054 { 1055 .type = AMD_IP_BLOCK_TYPE_ACP, 1056 .major = 2, 1057 .minor = 2, 1058 .rev = 0, 1059 .funcs = &acp_ip_funcs, 1060 }, 1061 #endif 1062 }; 1063 1064 int vi_set_ip_blocks(struct amdgpu_device *adev) 1065 { 1066 switch (adev->asic_type) { 1067 case CHIP_TOPAZ: 1068 adev->ip_blocks = topaz_ip_blocks; 1069 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); 1070 break; 1071 case CHIP_FIJI: 1072 adev->ip_blocks = fiji_ip_blocks; 1073 adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); 1074 break; 1075 case CHIP_TONGA: 1076 adev->ip_blocks = tonga_ip_blocks; 1077 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); 1078 break; 1079 case CHIP_POLARIS11: 1080 case CHIP_POLARIS10: 1081 adev->ip_blocks = polaris11_ip_blocks; 1082 adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); 1083 break; 1084 case CHIP_CARRIZO: 1085 case CHIP_STONEY: 1086 adev->ip_blocks = cz_ip_blocks; 1087 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); 1088 break; 1089 default: 1090 /* FIXME: not supported yet */ 1091 return -EINVAL; 1092 } 1093 1094 return 0; 1095 } 1096 1097 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 1098 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 1099 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 1100 1101 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 1102 { 1103 if (adev->flags & AMD_IS_APU) 1104 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 1105 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 1106 else 1107 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 1108 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 1109 } 1110 1111 static const struct amdgpu_asic_funcs vi_asic_funcs = 1112 { 1113 .read_disabled_bios = &vi_read_disabled_bios, 1114 .read_bios_from_rom = &vi_read_bios_from_rom, 1115 .read_register = &vi_read_register, 1116 .reset = &vi_asic_reset, 1117 .set_vga_state = &vi_vga_set_state, 1118 .get_xclk = &vi_get_xclk, 1119 .set_uvd_clocks = &vi_set_uvd_clocks, 1120 .set_vce_clocks = &vi_set_vce_clocks, 1121 /* these should be moved to their own ip modules */ 1122 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, 1123 .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, 1124 }; 1125 1126 static int vi_common_early_init(void *handle) 1127 { 1128 bool smc_enabled = false; 1129 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1130 1131 if (adev->flags & AMD_IS_APU) { 1132 adev->smc_rreg = &cz_smc_rreg; 1133 adev->smc_wreg = &cz_smc_wreg; 1134 } else { 1135 adev->smc_rreg = &vi_smc_rreg; 1136 adev->smc_wreg = &vi_smc_wreg; 1137 } 1138 adev->pcie_rreg = &vi_pcie_rreg; 1139 adev->pcie_wreg = &vi_pcie_wreg; 1140 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1141 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1142 adev->didt_rreg = &vi_didt_rreg; 1143 adev->didt_wreg = &vi_didt_wreg; 1144 1145 adev->asic_funcs = &vi_asic_funcs; 1146 1147 if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && 1148 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) 1149 smc_enabled = true; 1150 1151 adev->rev_id = vi_get_rev_id(adev); 1152 adev->external_rev_id = 0xFF; 1153 switch (adev->asic_type) { 1154 case CHIP_TOPAZ: 1155 adev->cg_flags = 0; 1156 adev->pg_flags = 0; 1157 adev->external_rev_id = 0x1; 1158 break; 1159 case CHIP_FIJI: 1160 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1161 AMD_CG_SUPPORT_GFX_MGLS | 1162 AMD_CG_SUPPORT_GFX_RLC_LS | 1163 AMD_CG_SUPPORT_GFX_CP_LS | 1164 AMD_CG_SUPPORT_GFX_CGTS | 1165 AMD_CG_SUPPORT_GFX_CGTS_LS | 1166 AMD_CG_SUPPORT_GFX_CGCG | 1167 AMD_CG_SUPPORT_GFX_CGLS | 1168 AMD_CG_SUPPORT_SDMA_MGCG | 1169 AMD_CG_SUPPORT_SDMA_LS | 1170 AMD_CG_SUPPORT_BIF_LS | 1171 AMD_CG_SUPPORT_HDP_MGCG | 1172 AMD_CG_SUPPORT_HDP_LS | 1173 AMD_CG_SUPPORT_ROM_MGCG | 1174 AMD_CG_SUPPORT_MC_MGCG | 1175 AMD_CG_SUPPORT_MC_LS; 1176 adev->pg_flags = 0; 1177 adev->external_rev_id = adev->rev_id + 0x3c; 1178 break; 1179 case CHIP_TONGA: 1180 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; 1181 adev->pg_flags = 0; 1182 adev->external_rev_id = adev->rev_id + 0x14; 1183 break; 1184 case CHIP_POLARIS11: 1185 adev->cg_flags = 0; 1186 adev->pg_flags = 0; 1187 adev->external_rev_id = adev->rev_id + 0x5A; 1188 break; 1189 case CHIP_POLARIS10: 1190 adev->cg_flags = 0; 1191 adev->pg_flags = 0; 1192 adev->external_rev_id = adev->rev_id + 0x50; 1193 break; 1194 case CHIP_CARRIZO: 1195 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1196 AMD_CG_SUPPORT_GFX_MGCG | 1197 AMD_CG_SUPPORT_GFX_MGLS | 1198 AMD_CG_SUPPORT_GFX_RLC_LS | 1199 AMD_CG_SUPPORT_GFX_CP_LS | 1200 AMD_CG_SUPPORT_GFX_CGTS | 1201 AMD_CG_SUPPORT_GFX_MGLS | 1202 AMD_CG_SUPPORT_GFX_CGTS_LS | 1203 AMD_CG_SUPPORT_GFX_CGCG | 1204 AMD_CG_SUPPORT_GFX_CGLS | 1205 AMD_CG_SUPPORT_BIF_LS | 1206 AMD_CG_SUPPORT_HDP_MGCG | 1207 AMD_CG_SUPPORT_HDP_LS | 1208 AMD_CG_SUPPORT_SDMA_MGCG | 1209 AMD_CG_SUPPORT_SDMA_LS; 1210 adev->pg_flags = 0; 1211 adev->external_rev_id = adev->rev_id + 0x1; 1212 break; 1213 case CHIP_STONEY: 1214 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1215 AMD_CG_SUPPORT_GFX_MGCG | 1216 AMD_CG_SUPPORT_GFX_MGLS | 1217 AMD_CG_SUPPORT_BIF_LS | 1218 AMD_CG_SUPPORT_HDP_MGCG | 1219 AMD_CG_SUPPORT_HDP_LS | 1220 AMD_CG_SUPPORT_SDMA_MGCG | 1221 AMD_CG_SUPPORT_SDMA_LS; 1222 adev->pg_flags = 0; 1223 adev->external_rev_id = adev->rev_id + 0x1; 1224 break; 1225 default: 1226 /* FIXME: not supported yet */ 1227 return -EINVAL; 1228 } 1229 1230 if (amdgpu_smc_load_fw && smc_enabled) 1231 adev->firmware.smu_load = true; 1232 1233 amdgpu_get_pcie_info(adev); 1234 1235 return 0; 1236 } 1237 1238 static int vi_common_sw_init(void *handle) 1239 { 1240 return 0; 1241 } 1242 1243 static int vi_common_sw_fini(void *handle) 1244 { 1245 return 0; 1246 } 1247 1248 static int vi_common_hw_init(void *handle) 1249 { 1250 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1251 1252 /* move the golden regs per IP block */ 1253 vi_init_golden_registers(adev); 1254 /* enable pcie gen2/3 link */ 1255 vi_pcie_gen3_enable(adev); 1256 /* enable aspm */ 1257 vi_program_aspm(adev); 1258 /* enable the doorbell aperture */ 1259 vi_enable_doorbell_aperture(adev, true); 1260 1261 return 0; 1262 } 1263 1264 static int vi_common_hw_fini(void *handle) 1265 { 1266 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1267 1268 /* enable the doorbell aperture */ 1269 vi_enable_doorbell_aperture(adev, false); 1270 1271 return 0; 1272 } 1273 1274 static int vi_common_suspend(void *handle) 1275 { 1276 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1277 1278 return vi_common_hw_fini(adev); 1279 } 1280 1281 static int vi_common_resume(void *handle) 1282 { 1283 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1284 1285 return vi_common_hw_init(adev); 1286 } 1287 1288 static bool vi_common_is_idle(void *handle) 1289 { 1290 return true; 1291 } 1292 1293 static int vi_common_wait_for_idle(void *handle) 1294 { 1295 return 0; 1296 } 1297 1298 static int vi_common_soft_reset(void *handle) 1299 { 1300 return 0; 1301 } 1302 1303 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1304 bool enable) 1305 { 1306 uint32_t temp, data; 1307 1308 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1309 1310 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1311 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1312 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1313 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1314 else 1315 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1316 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1317 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1318 1319 if (temp != data) 1320 WREG32_PCIE(ixPCIE_CNTL2, data); 1321 } 1322 1323 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1324 bool enable) 1325 { 1326 uint32_t temp, data; 1327 1328 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1329 1330 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1331 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1332 else 1333 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1334 1335 if (temp != data) 1336 WREG32(mmHDP_HOST_PATH_CNTL, data); 1337 } 1338 1339 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1340 bool enable) 1341 { 1342 uint32_t temp, data; 1343 1344 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1345 1346 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1347 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1348 else 1349 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1350 1351 if (temp != data) 1352 WREG32(mmHDP_MEM_POWER_LS, data); 1353 } 1354 1355 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1356 bool enable) 1357 { 1358 uint32_t temp, data; 1359 1360 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1361 1362 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1363 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1364 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1365 else 1366 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1367 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1368 1369 if (temp != data) 1370 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1371 } 1372 1373 static int vi_common_set_clockgating_state(void *handle, 1374 enum amd_clockgating_state state) 1375 { 1376 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1377 1378 switch (adev->asic_type) { 1379 case CHIP_FIJI: 1380 vi_update_bif_medium_grain_light_sleep(adev, 1381 state == AMD_CG_STATE_GATE ? true : false); 1382 vi_update_hdp_medium_grain_clock_gating(adev, 1383 state == AMD_CG_STATE_GATE ? true : false); 1384 vi_update_hdp_light_sleep(adev, 1385 state == AMD_CG_STATE_GATE ? true : false); 1386 vi_update_rom_medium_grain_clock_gating(adev, 1387 state == AMD_CG_STATE_GATE ? true : false); 1388 break; 1389 case CHIP_CARRIZO: 1390 case CHIP_STONEY: 1391 vi_update_bif_medium_grain_light_sleep(adev, 1392 state == AMD_CG_STATE_GATE ? true : false); 1393 vi_update_hdp_medium_grain_clock_gating(adev, 1394 state == AMD_CG_STATE_GATE ? true : false); 1395 vi_update_hdp_light_sleep(adev, 1396 state == AMD_CG_STATE_GATE ? true : false); 1397 break; 1398 default: 1399 break; 1400 } 1401 return 0; 1402 } 1403 1404 static int vi_common_set_powergating_state(void *handle, 1405 enum amd_powergating_state state) 1406 { 1407 return 0; 1408 } 1409 1410 const struct amd_ip_funcs vi_common_ip_funcs = { 1411 .name = "vi_common", 1412 .early_init = vi_common_early_init, 1413 .late_init = NULL, 1414 .sw_init = vi_common_sw_init, 1415 .sw_fini = vi_common_sw_fini, 1416 .hw_init = vi_common_hw_init, 1417 .hw_fini = vi_common_hw_fini, 1418 .suspend = vi_common_suspend, 1419 .resume = vi_common_resume, 1420 .is_idle = vi_common_is_idle, 1421 .wait_for_idle = vi_common_wait_for_idle, 1422 .soft_reset = vi_common_soft_reset, 1423 .set_clockgating_state = vi_common_set_clockgating_state, 1424 .set_powergating_state = vi_common_set_powergating_state, 1425 }; 1426 1427