1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include "drmP.h" 27 #include "amdgpu.h" 28 #include "amdgpu_atombios.h" 29 #include "amdgpu_ih.h" 30 #include "amdgpu_uvd.h" 31 #include "amdgpu_vce.h" 32 #include "amdgpu_ucode.h" 33 #include "atom.h" 34 #include "amd_pcie.h" 35 36 #include "gmc/gmc_8_1_d.h" 37 #include "gmc/gmc_8_1_sh_mask.h" 38 39 #include "oss/oss_3_0_d.h" 40 #include "oss/oss_3_0_sh_mask.h" 41 42 #include "bif/bif_5_0_d.h" 43 #include "bif/bif_5_0_sh_mask.h" 44 45 #include "gca/gfx_8_0_d.h" 46 #include "gca/gfx_8_0_sh_mask.h" 47 48 #include "smu/smu_7_1_1_d.h" 49 #include "smu/smu_7_1_1_sh_mask.h" 50 51 #include "uvd/uvd_5_0_d.h" 52 #include "uvd/uvd_5_0_sh_mask.h" 53 54 #include "vce/vce_3_0_d.h" 55 #include "vce/vce_3_0_sh_mask.h" 56 57 #include "dce/dce_10_0_d.h" 58 #include "dce/dce_10_0_sh_mask.h" 59 60 #include "vid.h" 61 #include "vi.h" 62 #include "vi_dpm.h" 63 #include "gmc_v8_0.h" 64 #include "gmc_v7_0.h" 65 #include "gfx_v8_0.h" 66 #include "sdma_v2_4.h" 67 #include "sdma_v3_0.h" 68 #include "dce_v10_0.h" 69 #include "dce_v11_0.h" 70 #include "iceland_ih.h" 71 #include "tonga_ih.h" 72 #include "cz_ih.h" 73 #include "uvd_v5_0.h" 74 #include "uvd_v6_0.h" 75 #include "vce_v3_0.h" 76 #include "amdgpu_powerplay.h" 77 #if defined(CONFIG_DRM_AMD_ACP) 78 #include "amdgpu_acp.h" 79 #endif 80 #include "dce_virtual.h" 81 82 MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); 83 MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin"); 84 MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); 85 MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin"); 86 MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); 87 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); 88 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); 89 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); 90 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); 91 92 /* 93 * Indirect registers accessor 94 */ 95 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 96 { 97 unsigned long flags; 98 u32 r; 99 100 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 101 WREG32(mmPCIE_INDEX, reg); 102 (void)RREG32(mmPCIE_INDEX); 103 r = RREG32(mmPCIE_DATA); 104 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 105 return r; 106 } 107 108 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 109 { 110 unsigned long flags; 111 112 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 113 WREG32(mmPCIE_INDEX, reg); 114 (void)RREG32(mmPCIE_INDEX); 115 WREG32(mmPCIE_DATA, v); 116 (void)RREG32(mmPCIE_DATA); 117 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 118 } 119 120 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 121 { 122 unsigned long flags; 123 u32 r; 124 125 spin_lock_irqsave(&adev->smc_idx_lock, flags); 126 WREG32(mmSMC_IND_INDEX_0, (reg)); 127 r = RREG32(mmSMC_IND_DATA_0); 128 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 129 return r; 130 } 131 132 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 133 { 134 unsigned long flags; 135 136 spin_lock_irqsave(&adev->smc_idx_lock, flags); 137 WREG32(mmSMC_IND_INDEX_0, (reg)); 138 WREG32(mmSMC_IND_DATA_0, (v)); 139 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 140 } 141 142 /* smu_8_0_d.h */ 143 #define mmMP0PUB_IND_INDEX 0x180 144 #define mmMP0PUB_IND_DATA 0x181 145 146 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 147 { 148 unsigned long flags; 149 u32 r; 150 151 spin_lock_irqsave(&adev->smc_idx_lock, flags); 152 WREG32(mmMP0PUB_IND_INDEX, (reg)); 153 r = RREG32(mmMP0PUB_IND_DATA); 154 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 155 return r; 156 } 157 158 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 159 { 160 unsigned long flags; 161 162 spin_lock_irqsave(&adev->smc_idx_lock, flags); 163 WREG32(mmMP0PUB_IND_INDEX, (reg)); 164 WREG32(mmMP0PUB_IND_DATA, (v)); 165 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 166 } 167 168 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 169 { 170 unsigned long flags; 171 u32 r; 172 173 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 174 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 175 r = RREG32(mmUVD_CTX_DATA); 176 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 177 return r; 178 } 179 180 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 181 { 182 unsigned long flags; 183 184 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 185 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 186 WREG32(mmUVD_CTX_DATA, (v)); 187 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 188 } 189 190 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 191 { 192 unsigned long flags; 193 u32 r; 194 195 spin_lock_irqsave(&adev->didt_idx_lock, flags); 196 WREG32(mmDIDT_IND_INDEX, (reg)); 197 r = RREG32(mmDIDT_IND_DATA); 198 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 199 return r; 200 } 201 202 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 203 { 204 unsigned long flags; 205 206 spin_lock_irqsave(&adev->didt_idx_lock, flags); 207 WREG32(mmDIDT_IND_INDEX, (reg)); 208 WREG32(mmDIDT_IND_DATA, (v)); 209 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 210 } 211 212 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 213 { 214 unsigned long flags; 215 u32 r; 216 217 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 218 WREG32(mmGC_CAC_IND_INDEX, (reg)); 219 r = RREG32(mmGC_CAC_IND_DATA); 220 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 221 return r; 222 } 223 224 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 225 { 226 unsigned long flags; 227 228 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 229 WREG32(mmGC_CAC_IND_INDEX, (reg)); 230 WREG32(mmGC_CAC_IND_DATA, (v)); 231 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 232 } 233 234 235 static const u32 tonga_mgcg_cgcg_init[] = 236 { 237 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 238 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 239 mmPCIE_DATA, 0x000f0000, 0x00000000, 240 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 241 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 242 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 243 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 244 }; 245 246 static const u32 fiji_mgcg_cgcg_init[] = 247 { 248 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 249 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 250 mmPCIE_DATA, 0x000f0000, 0x00000000, 251 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 252 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 253 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 254 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 255 }; 256 257 static const u32 iceland_mgcg_cgcg_init[] = 258 { 259 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 260 mmPCIE_DATA, 0x000f0000, 0x00000000, 261 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 262 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 263 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 264 }; 265 266 static const u32 cz_mgcg_cgcg_init[] = 267 { 268 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 269 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 270 mmPCIE_DATA, 0x000f0000, 0x00000000, 271 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 272 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 273 }; 274 275 static const u32 stoney_mgcg_cgcg_init[] = 276 { 277 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 278 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 279 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 280 }; 281 282 static void vi_init_golden_registers(struct amdgpu_device *adev) 283 { 284 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 285 mutex_lock(&adev->grbm_idx_mutex); 286 287 switch (adev->asic_type) { 288 case CHIP_TOPAZ: 289 amdgpu_program_register_sequence(adev, 290 iceland_mgcg_cgcg_init, 291 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); 292 break; 293 case CHIP_FIJI: 294 amdgpu_program_register_sequence(adev, 295 fiji_mgcg_cgcg_init, 296 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); 297 break; 298 case CHIP_TONGA: 299 amdgpu_program_register_sequence(adev, 300 tonga_mgcg_cgcg_init, 301 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); 302 break; 303 case CHIP_CARRIZO: 304 amdgpu_program_register_sequence(adev, 305 cz_mgcg_cgcg_init, 306 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 307 break; 308 case CHIP_STONEY: 309 amdgpu_program_register_sequence(adev, 310 stoney_mgcg_cgcg_init, 311 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); 312 break; 313 case CHIP_POLARIS11: 314 case CHIP_POLARIS10: 315 default: 316 break; 317 } 318 mutex_unlock(&adev->grbm_idx_mutex); 319 } 320 321 /** 322 * vi_get_xclk - get the xclk 323 * 324 * @adev: amdgpu_device pointer 325 * 326 * Returns the reference clock used by the gfx engine 327 * (VI). 328 */ 329 static u32 vi_get_xclk(struct amdgpu_device *adev) 330 { 331 u32 reference_clock = adev->clock.spll.reference_freq; 332 u32 tmp; 333 334 if (adev->flags & AMD_IS_APU) 335 return reference_clock; 336 337 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 338 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 339 return 1000; 340 341 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 342 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 343 return reference_clock / 4; 344 345 return reference_clock; 346 } 347 348 /** 349 * vi_srbm_select - select specific register instances 350 * 351 * @adev: amdgpu_device pointer 352 * @me: selected ME (micro engine) 353 * @pipe: pipe 354 * @queue: queue 355 * @vmid: VMID 356 * 357 * Switches the currently active registers instances. Some 358 * registers are instanced per VMID, others are instanced per 359 * me/pipe/queue combination. 360 */ 361 void vi_srbm_select(struct amdgpu_device *adev, 362 u32 me, u32 pipe, u32 queue, u32 vmid) 363 { 364 u32 srbm_gfx_cntl = 0; 365 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 366 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 367 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 368 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 369 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 370 } 371 372 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 373 { 374 /* todo */ 375 } 376 377 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 378 { 379 u32 bus_cntl; 380 u32 d1vga_control = 0; 381 u32 d2vga_control = 0; 382 u32 vga_render_control = 0; 383 u32 rom_cntl; 384 bool r; 385 386 bus_cntl = RREG32(mmBUS_CNTL); 387 if (adev->mode_info.num_crtc) { 388 d1vga_control = RREG32(mmD1VGA_CONTROL); 389 d2vga_control = RREG32(mmD2VGA_CONTROL); 390 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 391 } 392 rom_cntl = RREG32_SMC(ixROM_CNTL); 393 394 /* enable the rom */ 395 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 396 if (adev->mode_info.num_crtc) { 397 /* Disable VGA mode */ 398 WREG32(mmD1VGA_CONTROL, 399 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 400 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 401 WREG32(mmD2VGA_CONTROL, 402 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 403 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 404 WREG32(mmVGA_RENDER_CONTROL, 405 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 406 } 407 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 408 409 r = amdgpu_read_bios(adev); 410 411 /* restore regs */ 412 WREG32(mmBUS_CNTL, bus_cntl); 413 if (adev->mode_info.num_crtc) { 414 WREG32(mmD1VGA_CONTROL, d1vga_control); 415 WREG32(mmD2VGA_CONTROL, d2vga_control); 416 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 417 } 418 WREG32_SMC(ixROM_CNTL, rom_cntl); 419 return r; 420 } 421 422 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 423 u8 *bios, u32 length_bytes) 424 { 425 u32 *dw_ptr; 426 unsigned long flags; 427 u32 i, length_dw; 428 429 if (bios == NULL) 430 return false; 431 if (length_bytes == 0) 432 return false; 433 /* APU vbios image is part of sbios image */ 434 if (adev->flags & AMD_IS_APU) 435 return false; 436 437 dw_ptr = (u32 *)bios; 438 length_dw = ALIGN(length_bytes, 4) / 4; 439 /* take the smc lock since we are using the smc index */ 440 spin_lock_irqsave(&adev->smc_idx_lock, flags); 441 /* set rom index to 0 */ 442 WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX); 443 WREG32(mmSMC_IND_DATA_0, 0); 444 /* set index to data for continous read */ 445 WREG32(mmSMC_IND_INDEX_0, ixROM_DATA); 446 for (i = 0; i < length_dw; i++) 447 dw_ptr[i] = RREG32(mmSMC_IND_DATA_0); 448 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 449 450 return true; 451 } 452 453 static void vi_detect_hw_virtualization(struct amdgpu_device *adev) 454 { 455 uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 456 /* bit0: 0 means pf and 1 means vf */ 457 /* bit31: 0 means disable IOV and 1 means enable */ 458 if (reg & 1) 459 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF; 460 461 if (reg & 0x80000000) 462 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 463 464 if (reg == 0) { 465 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ 466 adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; 467 } 468 } 469 470 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 471 {mmGB_MACROTILE_MODE7, true}, 472 }; 473 474 static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { 475 {mmGB_TILE_MODE7, true}, 476 {mmGB_TILE_MODE12, true}, 477 {mmGB_TILE_MODE17, true}, 478 {mmGB_TILE_MODE23, true}, 479 {mmGB_MACROTILE_MODE7, true}, 480 }; 481 482 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 483 {mmGRBM_STATUS, false}, 484 {mmGRBM_STATUS2, false}, 485 {mmGRBM_STATUS_SE0, false}, 486 {mmGRBM_STATUS_SE1, false}, 487 {mmGRBM_STATUS_SE2, false}, 488 {mmGRBM_STATUS_SE3, false}, 489 {mmSRBM_STATUS, false}, 490 {mmSRBM_STATUS2, false}, 491 {mmSRBM_STATUS3, false}, 492 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false}, 493 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false}, 494 {mmCP_STAT, false}, 495 {mmCP_STALLED_STAT1, false}, 496 {mmCP_STALLED_STAT2, false}, 497 {mmCP_STALLED_STAT3, false}, 498 {mmCP_CPF_BUSY_STAT, false}, 499 {mmCP_CPF_STALLED_STAT1, false}, 500 {mmCP_CPF_STATUS, false}, 501 {mmCP_CPC_BUSY_STAT, false}, 502 {mmCP_CPC_STALLED_STAT1, false}, 503 {mmCP_CPC_STATUS, false}, 504 {mmGB_ADDR_CONFIG, false}, 505 {mmMC_ARB_RAMCFG, false}, 506 {mmGB_TILE_MODE0, false}, 507 {mmGB_TILE_MODE1, false}, 508 {mmGB_TILE_MODE2, false}, 509 {mmGB_TILE_MODE3, false}, 510 {mmGB_TILE_MODE4, false}, 511 {mmGB_TILE_MODE5, false}, 512 {mmGB_TILE_MODE6, false}, 513 {mmGB_TILE_MODE7, false}, 514 {mmGB_TILE_MODE8, false}, 515 {mmGB_TILE_MODE9, false}, 516 {mmGB_TILE_MODE10, false}, 517 {mmGB_TILE_MODE11, false}, 518 {mmGB_TILE_MODE12, false}, 519 {mmGB_TILE_MODE13, false}, 520 {mmGB_TILE_MODE14, false}, 521 {mmGB_TILE_MODE15, false}, 522 {mmGB_TILE_MODE16, false}, 523 {mmGB_TILE_MODE17, false}, 524 {mmGB_TILE_MODE18, false}, 525 {mmGB_TILE_MODE19, false}, 526 {mmGB_TILE_MODE20, false}, 527 {mmGB_TILE_MODE21, false}, 528 {mmGB_TILE_MODE22, false}, 529 {mmGB_TILE_MODE23, false}, 530 {mmGB_TILE_MODE24, false}, 531 {mmGB_TILE_MODE25, false}, 532 {mmGB_TILE_MODE26, false}, 533 {mmGB_TILE_MODE27, false}, 534 {mmGB_TILE_MODE28, false}, 535 {mmGB_TILE_MODE29, false}, 536 {mmGB_TILE_MODE30, false}, 537 {mmGB_TILE_MODE31, false}, 538 {mmGB_MACROTILE_MODE0, false}, 539 {mmGB_MACROTILE_MODE1, false}, 540 {mmGB_MACROTILE_MODE2, false}, 541 {mmGB_MACROTILE_MODE3, false}, 542 {mmGB_MACROTILE_MODE4, false}, 543 {mmGB_MACROTILE_MODE5, false}, 544 {mmGB_MACROTILE_MODE6, false}, 545 {mmGB_MACROTILE_MODE7, false}, 546 {mmGB_MACROTILE_MODE8, false}, 547 {mmGB_MACROTILE_MODE9, false}, 548 {mmGB_MACROTILE_MODE10, false}, 549 {mmGB_MACROTILE_MODE11, false}, 550 {mmGB_MACROTILE_MODE12, false}, 551 {mmGB_MACROTILE_MODE13, false}, 552 {mmGB_MACROTILE_MODE14, false}, 553 {mmGB_MACROTILE_MODE15, false}, 554 {mmCC_RB_BACKEND_DISABLE, false, true}, 555 {mmGC_USER_RB_BACKEND_DISABLE, false, true}, 556 {mmGB_BACKEND_MAP, false, false}, 557 {mmPA_SC_RASTER_CONFIG, false, true}, 558 {mmPA_SC_RASTER_CONFIG_1, false, true}, 559 }; 560 561 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 562 u32 sh_num, u32 reg_offset) 563 { 564 uint32_t val; 565 566 mutex_lock(&adev->grbm_idx_mutex); 567 if (se_num != 0xffffffff || sh_num != 0xffffffff) 568 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 569 570 val = RREG32(reg_offset); 571 572 if (se_num != 0xffffffff || sh_num != 0xffffffff) 573 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 574 mutex_unlock(&adev->grbm_idx_mutex); 575 return val; 576 } 577 578 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 579 u32 sh_num, u32 reg_offset, u32 *value) 580 { 581 const struct amdgpu_allowed_register_entry *asic_register_table = NULL; 582 const struct amdgpu_allowed_register_entry *asic_register_entry; 583 uint32_t size, i; 584 585 *value = 0; 586 switch (adev->asic_type) { 587 case CHIP_TOPAZ: 588 asic_register_table = tonga_allowed_read_registers; 589 size = ARRAY_SIZE(tonga_allowed_read_registers); 590 break; 591 case CHIP_FIJI: 592 case CHIP_TONGA: 593 case CHIP_POLARIS11: 594 case CHIP_POLARIS10: 595 case CHIP_CARRIZO: 596 case CHIP_STONEY: 597 asic_register_table = cz_allowed_read_registers; 598 size = ARRAY_SIZE(cz_allowed_read_registers); 599 break; 600 default: 601 return -EINVAL; 602 } 603 604 if (asic_register_table) { 605 for (i = 0; i < size; i++) { 606 asic_register_entry = asic_register_table + i; 607 if (reg_offset != asic_register_entry->reg_offset) 608 continue; 609 if (!asic_register_entry->untouched) 610 *value = asic_register_entry->grbm_indexed ? 611 vi_read_indexed_register(adev, se_num, 612 sh_num, reg_offset) : 613 RREG32(reg_offset); 614 return 0; 615 } 616 } 617 618 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 619 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 620 continue; 621 622 if (!vi_allowed_read_registers[i].untouched) 623 *value = vi_allowed_read_registers[i].grbm_indexed ? 624 vi_read_indexed_register(adev, se_num, 625 sh_num, reg_offset) : 626 RREG32(reg_offset); 627 return 0; 628 } 629 return -EINVAL; 630 } 631 632 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 633 { 634 u32 i; 635 636 dev_info(adev->dev, "GPU pci config reset\n"); 637 638 /* disable BM */ 639 pci_clear_master(adev->pdev); 640 /* reset */ 641 amdgpu_pci_config_reset(adev); 642 643 udelay(100); 644 645 /* wait for asic to come out of reset */ 646 for (i = 0; i < adev->usec_timeout; i++) { 647 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 648 /* enable BM */ 649 pci_set_master(adev->pdev); 650 return 0; 651 } 652 udelay(1); 653 } 654 return -EINVAL; 655 } 656 657 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) 658 { 659 u32 tmp = RREG32(mmBIOS_SCRATCH_3); 660 661 if (hung) 662 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; 663 else 664 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; 665 666 WREG32(mmBIOS_SCRATCH_3, tmp); 667 } 668 669 /** 670 * vi_asic_reset - soft reset GPU 671 * 672 * @adev: amdgpu_device pointer 673 * 674 * Look up which blocks are hung and attempt 675 * to reset them. 676 * Returns 0 for success. 677 */ 678 static int vi_asic_reset(struct amdgpu_device *adev) 679 { 680 int r; 681 682 vi_set_bios_scratch_engine_hung(adev, true); 683 684 r = vi_gpu_pci_config_reset(adev); 685 686 vi_set_bios_scratch_engine_hung(adev, false); 687 688 return r; 689 } 690 691 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 692 u32 cntl_reg, u32 status_reg) 693 { 694 int r, i; 695 struct atom_clock_dividers dividers; 696 uint32_t tmp; 697 698 r = amdgpu_atombios_get_clock_dividers(adev, 699 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 700 clock, false, ÷rs); 701 if (r) 702 return r; 703 704 tmp = RREG32_SMC(cntl_reg); 705 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 706 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 707 tmp |= dividers.post_divider; 708 WREG32_SMC(cntl_reg, tmp); 709 710 for (i = 0; i < 100; i++) { 711 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) 712 break; 713 mdelay(10); 714 } 715 if (i == 100) 716 return -ETIMEDOUT; 717 718 return 0; 719 } 720 721 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 722 { 723 int r; 724 725 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 726 if (r) 727 return r; 728 729 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 730 731 return 0; 732 } 733 734 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 735 { 736 /* todo */ 737 738 return 0; 739 } 740 741 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 742 { 743 if (pci_is_root_bus(adev->pdev->bus)) 744 return; 745 746 if (amdgpu_pcie_gen2 == 0) 747 return; 748 749 if (adev->flags & AMD_IS_APU) 750 return; 751 752 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 753 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 754 return; 755 756 /* todo */ 757 } 758 759 static void vi_program_aspm(struct amdgpu_device *adev) 760 { 761 762 if (amdgpu_aspm == 0) 763 return; 764 765 /* todo */ 766 } 767 768 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 769 bool enable) 770 { 771 u32 tmp; 772 773 /* not necessary on CZ */ 774 if (adev->flags & AMD_IS_APU) 775 return; 776 777 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 778 if (enable) 779 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 780 else 781 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 782 783 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 784 } 785 786 /* topaz has no DCE, UVD, VCE */ 787 static const struct amdgpu_ip_block_version topaz_ip_blocks[] = 788 { 789 /* ORDER MATTERS! */ 790 { 791 .type = AMD_IP_BLOCK_TYPE_COMMON, 792 .major = 2, 793 .minor = 0, 794 .rev = 0, 795 .funcs = &vi_common_ip_funcs, 796 }, 797 { 798 .type = AMD_IP_BLOCK_TYPE_GMC, 799 .major = 7, 800 .minor = 4, 801 .rev = 0, 802 .funcs = &gmc_v7_0_ip_funcs, 803 }, 804 { 805 .type = AMD_IP_BLOCK_TYPE_IH, 806 .major = 2, 807 .minor = 4, 808 .rev = 0, 809 .funcs = &iceland_ih_ip_funcs, 810 }, 811 { 812 .type = AMD_IP_BLOCK_TYPE_SMC, 813 .major = 7, 814 .minor = 1, 815 .rev = 0, 816 .funcs = &amdgpu_pp_ip_funcs, 817 }, 818 { 819 .type = AMD_IP_BLOCK_TYPE_GFX, 820 .major = 8, 821 .minor = 0, 822 .rev = 0, 823 .funcs = &gfx_v8_0_ip_funcs, 824 }, 825 { 826 .type = AMD_IP_BLOCK_TYPE_SDMA, 827 .major = 2, 828 .minor = 4, 829 .rev = 0, 830 .funcs = &sdma_v2_4_ip_funcs, 831 }, 832 }; 833 834 static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] = 835 { 836 /* ORDER MATTERS! */ 837 { 838 .type = AMD_IP_BLOCK_TYPE_COMMON, 839 .major = 2, 840 .minor = 0, 841 .rev = 0, 842 .funcs = &vi_common_ip_funcs, 843 }, 844 { 845 .type = AMD_IP_BLOCK_TYPE_GMC, 846 .major = 7, 847 .minor = 4, 848 .rev = 0, 849 .funcs = &gmc_v7_0_ip_funcs, 850 }, 851 { 852 .type = AMD_IP_BLOCK_TYPE_IH, 853 .major = 2, 854 .minor = 4, 855 .rev = 0, 856 .funcs = &iceland_ih_ip_funcs, 857 }, 858 { 859 .type = AMD_IP_BLOCK_TYPE_SMC, 860 .major = 7, 861 .minor = 1, 862 .rev = 0, 863 .funcs = &amdgpu_pp_ip_funcs, 864 }, 865 { 866 .type = AMD_IP_BLOCK_TYPE_DCE, 867 .major = 1, 868 .minor = 0, 869 .rev = 0, 870 .funcs = &dce_virtual_ip_funcs, 871 }, 872 { 873 .type = AMD_IP_BLOCK_TYPE_GFX, 874 .major = 8, 875 .minor = 0, 876 .rev = 0, 877 .funcs = &gfx_v8_0_ip_funcs, 878 }, 879 { 880 .type = AMD_IP_BLOCK_TYPE_SDMA, 881 .major = 2, 882 .minor = 4, 883 .rev = 0, 884 .funcs = &sdma_v2_4_ip_funcs, 885 }, 886 }; 887 888 static const struct amdgpu_ip_block_version tonga_ip_blocks[] = 889 { 890 /* ORDER MATTERS! */ 891 { 892 .type = AMD_IP_BLOCK_TYPE_COMMON, 893 .major = 2, 894 .minor = 0, 895 .rev = 0, 896 .funcs = &vi_common_ip_funcs, 897 }, 898 { 899 .type = AMD_IP_BLOCK_TYPE_GMC, 900 .major = 8, 901 .minor = 0, 902 .rev = 0, 903 .funcs = &gmc_v8_0_ip_funcs, 904 }, 905 { 906 .type = AMD_IP_BLOCK_TYPE_IH, 907 .major = 3, 908 .minor = 0, 909 .rev = 0, 910 .funcs = &tonga_ih_ip_funcs, 911 }, 912 { 913 .type = AMD_IP_BLOCK_TYPE_SMC, 914 .major = 7, 915 .minor = 1, 916 .rev = 0, 917 .funcs = &amdgpu_pp_ip_funcs, 918 }, 919 { 920 .type = AMD_IP_BLOCK_TYPE_DCE, 921 .major = 10, 922 .minor = 0, 923 .rev = 0, 924 .funcs = &dce_v10_0_ip_funcs, 925 }, 926 { 927 .type = AMD_IP_BLOCK_TYPE_GFX, 928 .major = 8, 929 .minor = 0, 930 .rev = 0, 931 .funcs = &gfx_v8_0_ip_funcs, 932 }, 933 { 934 .type = AMD_IP_BLOCK_TYPE_SDMA, 935 .major = 3, 936 .minor = 0, 937 .rev = 0, 938 .funcs = &sdma_v3_0_ip_funcs, 939 }, 940 { 941 .type = AMD_IP_BLOCK_TYPE_UVD, 942 .major = 5, 943 .minor = 0, 944 .rev = 0, 945 .funcs = &uvd_v5_0_ip_funcs, 946 }, 947 { 948 .type = AMD_IP_BLOCK_TYPE_VCE, 949 .major = 3, 950 .minor = 0, 951 .rev = 0, 952 .funcs = &vce_v3_0_ip_funcs, 953 }, 954 }; 955 956 static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] = 957 { 958 /* ORDER MATTERS! */ 959 { 960 .type = AMD_IP_BLOCK_TYPE_COMMON, 961 .major = 2, 962 .minor = 0, 963 .rev = 0, 964 .funcs = &vi_common_ip_funcs, 965 }, 966 { 967 .type = AMD_IP_BLOCK_TYPE_GMC, 968 .major = 8, 969 .minor = 0, 970 .rev = 0, 971 .funcs = &gmc_v8_0_ip_funcs, 972 }, 973 { 974 .type = AMD_IP_BLOCK_TYPE_IH, 975 .major = 3, 976 .minor = 0, 977 .rev = 0, 978 .funcs = &tonga_ih_ip_funcs, 979 }, 980 { 981 .type = AMD_IP_BLOCK_TYPE_SMC, 982 .major = 7, 983 .minor = 1, 984 .rev = 0, 985 .funcs = &amdgpu_pp_ip_funcs, 986 }, 987 { 988 .type = AMD_IP_BLOCK_TYPE_DCE, 989 .major = 10, 990 .minor = 0, 991 .rev = 0, 992 .funcs = &dce_virtual_ip_funcs, 993 }, 994 { 995 .type = AMD_IP_BLOCK_TYPE_GFX, 996 .major = 8, 997 .minor = 0, 998 .rev = 0, 999 .funcs = &gfx_v8_0_ip_funcs, 1000 }, 1001 { 1002 .type = AMD_IP_BLOCK_TYPE_SDMA, 1003 .major = 3, 1004 .minor = 0, 1005 .rev = 0, 1006 .funcs = &sdma_v3_0_ip_funcs, 1007 }, 1008 { 1009 .type = AMD_IP_BLOCK_TYPE_UVD, 1010 .major = 5, 1011 .minor = 0, 1012 .rev = 0, 1013 .funcs = &uvd_v5_0_ip_funcs, 1014 }, 1015 { 1016 .type = AMD_IP_BLOCK_TYPE_VCE, 1017 .major = 3, 1018 .minor = 0, 1019 .rev = 0, 1020 .funcs = &vce_v3_0_ip_funcs, 1021 }, 1022 }; 1023 1024 static const struct amdgpu_ip_block_version fiji_ip_blocks[] = 1025 { 1026 /* ORDER MATTERS! */ 1027 { 1028 .type = AMD_IP_BLOCK_TYPE_COMMON, 1029 .major = 2, 1030 .minor = 0, 1031 .rev = 0, 1032 .funcs = &vi_common_ip_funcs, 1033 }, 1034 { 1035 .type = AMD_IP_BLOCK_TYPE_GMC, 1036 .major = 8, 1037 .minor = 5, 1038 .rev = 0, 1039 .funcs = &gmc_v8_0_ip_funcs, 1040 }, 1041 { 1042 .type = AMD_IP_BLOCK_TYPE_IH, 1043 .major = 3, 1044 .minor = 0, 1045 .rev = 0, 1046 .funcs = &tonga_ih_ip_funcs, 1047 }, 1048 { 1049 .type = AMD_IP_BLOCK_TYPE_SMC, 1050 .major = 7, 1051 .minor = 1, 1052 .rev = 0, 1053 .funcs = &amdgpu_pp_ip_funcs, 1054 }, 1055 { 1056 .type = AMD_IP_BLOCK_TYPE_DCE, 1057 .major = 10, 1058 .minor = 1, 1059 .rev = 0, 1060 .funcs = &dce_v10_0_ip_funcs, 1061 }, 1062 { 1063 .type = AMD_IP_BLOCK_TYPE_GFX, 1064 .major = 8, 1065 .minor = 0, 1066 .rev = 0, 1067 .funcs = &gfx_v8_0_ip_funcs, 1068 }, 1069 { 1070 .type = AMD_IP_BLOCK_TYPE_SDMA, 1071 .major = 3, 1072 .minor = 0, 1073 .rev = 0, 1074 .funcs = &sdma_v3_0_ip_funcs, 1075 }, 1076 { 1077 .type = AMD_IP_BLOCK_TYPE_UVD, 1078 .major = 6, 1079 .minor = 0, 1080 .rev = 0, 1081 .funcs = &uvd_v6_0_ip_funcs, 1082 }, 1083 { 1084 .type = AMD_IP_BLOCK_TYPE_VCE, 1085 .major = 3, 1086 .minor = 0, 1087 .rev = 0, 1088 .funcs = &vce_v3_0_ip_funcs, 1089 }, 1090 }; 1091 1092 static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] = 1093 { 1094 /* ORDER MATTERS! */ 1095 { 1096 .type = AMD_IP_BLOCK_TYPE_COMMON, 1097 .major = 2, 1098 .minor = 0, 1099 .rev = 0, 1100 .funcs = &vi_common_ip_funcs, 1101 }, 1102 { 1103 .type = AMD_IP_BLOCK_TYPE_GMC, 1104 .major = 8, 1105 .minor = 5, 1106 .rev = 0, 1107 .funcs = &gmc_v8_0_ip_funcs, 1108 }, 1109 { 1110 .type = AMD_IP_BLOCK_TYPE_IH, 1111 .major = 3, 1112 .minor = 0, 1113 .rev = 0, 1114 .funcs = &tonga_ih_ip_funcs, 1115 }, 1116 { 1117 .type = AMD_IP_BLOCK_TYPE_SMC, 1118 .major = 7, 1119 .minor = 1, 1120 .rev = 0, 1121 .funcs = &amdgpu_pp_ip_funcs, 1122 }, 1123 { 1124 .type = AMD_IP_BLOCK_TYPE_DCE, 1125 .major = 10, 1126 .minor = 1, 1127 .rev = 0, 1128 .funcs = &dce_virtual_ip_funcs, 1129 }, 1130 { 1131 .type = AMD_IP_BLOCK_TYPE_GFX, 1132 .major = 8, 1133 .minor = 0, 1134 .rev = 0, 1135 .funcs = &gfx_v8_0_ip_funcs, 1136 }, 1137 { 1138 .type = AMD_IP_BLOCK_TYPE_SDMA, 1139 .major = 3, 1140 .minor = 0, 1141 .rev = 0, 1142 .funcs = &sdma_v3_0_ip_funcs, 1143 }, 1144 { 1145 .type = AMD_IP_BLOCK_TYPE_UVD, 1146 .major = 6, 1147 .minor = 0, 1148 .rev = 0, 1149 .funcs = &uvd_v6_0_ip_funcs, 1150 }, 1151 { 1152 .type = AMD_IP_BLOCK_TYPE_VCE, 1153 .major = 3, 1154 .minor = 0, 1155 .rev = 0, 1156 .funcs = &vce_v3_0_ip_funcs, 1157 }, 1158 }; 1159 1160 static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = 1161 { 1162 /* ORDER MATTERS! */ 1163 { 1164 .type = AMD_IP_BLOCK_TYPE_COMMON, 1165 .major = 2, 1166 .minor = 0, 1167 .rev = 0, 1168 .funcs = &vi_common_ip_funcs, 1169 }, 1170 { 1171 .type = AMD_IP_BLOCK_TYPE_GMC, 1172 .major = 8, 1173 .minor = 1, 1174 .rev = 0, 1175 .funcs = &gmc_v8_0_ip_funcs, 1176 }, 1177 { 1178 .type = AMD_IP_BLOCK_TYPE_IH, 1179 .major = 3, 1180 .minor = 1, 1181 .rev = 0, 1182 .funcs = &tonga_ih_ip_funcs, 1183 }, 1184 { 1185 .type = AMD_IP_BLOCK_TYPE_SMC, 1186 .major = 7, 1187 .minor = 2, 1188 .rev = 0, 1189 .funcs = &amdgpu_pp_ip_funcs, 1190 }, 1191 { 1192 .type = AMD_IP_BLOCK_TYPE_DCE, 1193 .major = 11, 1194 .minor = 2, 1195 .rev = 0, 1196 .funcs = &dce_v11_0_ip_funcs, 1197 }, 1198 { 1199 .type = AMD_IP_BLOCK_TYPE_GFX, 1200 .major = 8, 1201 .minor = 0, 1202 .rev = 0, 1203 .funcs = &gfx_v8_0_ip_funcs, 1204 }, 1205 { 1206 .type = AMD_IP_BLOCK_TYPE_SDMA, 1207 .major = 3, 1208 .minor = 1, 1209 .rev = 0, 1210 .funcs = &sdma_v3_0_ip_funcs, 1211 }, 1212 { 1213 .type = AMD_IP_BLOCK_TYPE_UVD, 1214 .major = 6, 1215 .minor = 3, 1216 .rev = 0, 1217 .funcs = &uvd_v6_0_ip_funcs, 1218 }, 1219 { 1220 .type = AMD_IP_BLOCK_TYPE_VCE, 1221 .major = 3, 1222 .minor = 4, 1223 .rev = 0, 1224 .funcs = &vce_v3_0_ip_funcs, 1225 }, 1226 }; 1227 1228 static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] = 1229 { 1230 /* ORDER MATTERS! */ 1231 { 1232 .type = AMD_IP_BLOCK_TYPE_COMMON, 1233 .major = 2, 1234 .minor = 0, 1235 .rev = 0, 1236 .funcs = &vi_common_ip_funcs, 1237 }, 1238 { 1239 .type = AMD_IP_BLOCK_TYPE_GMC, 1240 .major = 8, 1241 .minor = 1, 1242 .rev = 0, 1243 .funcs = &gmc_v8_0_ip_funcs, 1244 }, 1245 { 1246 .type = AMD_IP_BLOCK_TYPE_IH, 1247 .major = 3, 1248 .minor = 1, 1249 .rev = 0, 1250 .funcs = &tonga_ih_ip_funcs, 1251 }, 1252 { 1253 .type = AMD_IP_BLOCK_TYPE_SMC, 1254 .major = 7, 1255 .minor = 2, 1256 .rev = 0, 1257 .funcs = &amdgpu_pp_ip_funcs, 1258 }, 1259 { 1260 .type = AMD_IP_BLOCK_TYPE_DCE, 1261 .major = 11, 1262 .minor = 2, 1263 .rev = 0, 1264 .funcs = &dce_virtual_ip_funcs, 1265 }, 1266 { 1267 .type = AMD_IP_BLOCK_TYPE_GFX, 1268 .major = 8, 1269 .minor = 0, 1270 .rev = 0, 1271 .funcs = &gfx_v8_0_ip_funcs, 1272 }, 1273 { 1274 .type = AMD_IP_BLOCK_TYPE_SDMA, 1275 .major = 3, 1276 .minor = 1, 1277 .rev = 0, 1278 .funcs = &sdma_v3_0_ip_funcs, 1279 }, 1280 { 1281 .type = AMD_IP_BLOCK_TYPE_UVD, 1282 .major = 6, 1283 .minor = 3, 1284 .rev = 0, 1285 .funcs = &uvd_v6_0_ip_funcs, 1286 }, 1287 { 1288 .type = AMD_IP_BLOCK_TYPE_VCE, 1289 .major = 3, 1290 .minor = 4, 1291 .rev = 0, 1292 .funcs = &vce_v3_0_ip_funcs, 1293 }, 1294 }; 1295 1296 static const struct amdgpu_ip_block_version cz_ip_blocks[] = 1297 { 1298 /* ORDER MATTERS! */ 1299 { 1300 .type = AMD_IP_BLOCK_TYPE_COMMON, 1301 .major = 2, 1302 .minor = 0, 1303 .rev = 0, 1304 .funcs = &vi_common_ip_funcs, 1305 }, 1306 { 1307 .type = AMD_IP_BLOCK_TYPE_GMC, 1308 .major = 8, 1309 .minor = 0, 1310 .rev = 0, 1311 .funcs = &gmc_v8_0_ip_funcs, 1312 }, 1313 { 1314 .type = AMD_IP_BLOCK_TYPE_IH, 1315 .major = 3, 1316 .minor = 0, 1317 .rev = 0, 1318 .funcs = &cz_ih_ip_funcs, 1319 }, 1320 { 1321 .type = AMD_IP_BLOCK_TYPE_SMC, 1322 .major = 8, 1323 .minor = 0, 1324 .rev = 0, 1325 .funcs = &amdgpu_pp_ip_funcs 1326 }, 1327 { 1328 .type = AMD_IP_BLOCK_TYPE_DCE, 1329 .major = 11, 1330 .minor = 0, 1331 .rev = 0, 1332 .funcs = &dce_v11_0_ip_funcs, 1333 }, 1334 { 1335 .type = AMD_IP_BLOCK_TYPE_GFX, 1336 .major = 8, 1337 .minor = 0, 1338 .rev = 0, 1339 .funcs = &gfx_v8_0_ip_funcs, 1340 }, 1341 { 1342 .type = AMD_IP_BLOCK_TYPE_SDMA, 1343 .major = 3, 1344 .minor = 0, 1345 .rev = 0, 1346 .funcs = &sdma_v3_0_ip_funcs, 1347 }, 1348 { 1349 .type = AMD_IP_BLOCK_TYPE_UVD, 1350 .major = 6, 1351 .minor = 0, 1352 .rev = 0, 1353 .funcs = &uvd_v6_0_ip_funcs, 1354 }, 1355 { 1356 .type = AMD_IP_BLOCK_TYPE_VCE, 1357 .major = 3, 1358 .minor = 0, 1359 .rev = 0, 1360 .funcs = &vce_v3_0_ip_funcs, 1361 }, 1362 #if defined(CONFIG_DRM_AMD_ACP) 1363 { 1364 .type = AMD_IP_BLOCK_TYPE_ACP, 1365 .major = 2, 1366 .minor = 2, 1367 .rev = 0, 1368 .funcs = &acp_ip_funcs, 1369 }, 1370 #endif 1371 }; 1372 1373 static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] = 1374 { 1375 /* ORDER MATTERS! */ 1376 { 1377 .type = AMD_IP_BLOCK_TYPE_COMMON, 1378 .major = 2, 1379 .minor = 0, 1380 .rev = 0, 1381 .funcs = &vi_common_ip_funcs, 1382 }, 1383 { 1384 .type = AMD_IP_BLOCK_TYPE_GMC, 1385 .major = 8, 1386 .minor = 0, 1387 .rev = 0, 1388 .funcs = &gmc_v8_0_ip_funcs, 1389 }, 1390 { 1391 .type = AMD_IP_BLOCK_TYPE_IH, 1392 .major = 3, 1393 .minor = 0, 1394 .rev = 0, 1395 .funcs = &cz_ih_ip_funcs, 1396 }, 1397 { 1398 .type = AMD_IP_BLOCK_TYPE_SMC, 1399 .major = 8, 1400 .minor = 0, 1401 .rev = 0, 1402 .funcs = &amdgpu_pp_ip_funcs 1403 }, 1404 { 1405 .type = AMD_IP_BLOCK_TYPE_DCE, 1406 .major = 11, 1407 .minor = 0, 1408 .rev = 0, 1409 .funcs = &dce_virtual_ip_funcs, 1410 }, 1411 { 1412 .type = AMD_IP_BLOCK_TYPE_GFX, 1413 .major = 8, 1414 .minor = 0, 1415 .rev = 0, 1416 .funcs = &gfx_v8_0_ip_funcs, 1417 }, 1418 { 1419 .type = AMD_IP_BLOCK_TYPE_SDMA, 1420 .major = 3, 1421 .minor = 0, 1422 .rev = 0, 1423 .funcs = &sdma_v3_0_ip_funcs, 1424 }, 1425 { 1426 .type = AMD_IP_BLOCK_TYPE_UVD, 1427 .major = 6, 1428 .minor = 0, 1429 .rev = 0, 1430 .funcs = &uvd_v6_0_ip_funcs, 1431 }, 1432 { 1433 .type = AMD_IP_BLOCK_TYPE_VCE, 1434 .major = 3, 1435 .minor = 0, 1436 .rev = 0, 1437 .funcs = &vce_v3_0_ip_funcs, 1438 }, 1439 #if defined(CONFIG_DRM_AMD_ACP) 1440 { 1441 .type = AMD_IP_BLOCK_TYPE_ACP, 1442 .major = 2, 1443 .minor = 2, 1444 .rev = 0, 1445 .funcs = &acp_ip_funcs, 1446 }, 1447 #endif 1448 }; 1449 1450 int vi_set_ip_blocks(struct amdgpu_device *adev) 1451 { 1452 if (adev->enable_virtual_display) { 1453 switch (adev->asic_type) { 1454 case CHIP_TOPAZ: 1455 adev->ip_blocks = topaz_ip_blocks_vd; 1456 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd); 1457 break; 1458 case CHIP_FIJI: 1459 adev->ip_blocks = fiji_ip_blocks_vd; 1460 adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd); 1461 break; 1462 case CHIP_TONGA: 1463 adev->ip_blocks = tonga_ip_blocks_vd; 1464 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd); 1465 break; 1466 case CHIP_POLARIS11: 1467 case CHIP_POLARIS10: 1468 adev->ip_blocks = polaris11_ip_blocks_vd; 1469 adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd); 1470 break; 1471 1472 case CHIP_CARRIZO: 1473 case CHIP_STONEY: 1474 adev->ip_blocks = cz_ip_blocks_vd; 1475 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd); 1476 break; 1477 default: 1478 /* FIXME: not supported yet */ 1479 return -EINVAL; 1480 } 1481 } else { 1482 switch (adev->asic_type) { 1483 case CHIP_TOPAZ: 1484 adev->ip_blocks = topaz_ip_blocks; 1485 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); 1486 break; 1487 case CHIP_FIJI: 1488 adev->ip_blocks = fiji_ip_blocks; 1489 adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); 1490 break; 1491 case CHIP_TONGA: 1492 adev->ip_blocks = tonga_ip_blocks; 1493 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); 1494 break; 1495 case CHIP_POLARIS11: 1496 case CHIP_POLARIS10: 1497 adev->ip_blocks = polaris11_ip_blocks; 1498 adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); 1499 break; 1500 case CHIP_CARRIZO: 1501 case CHIP_STONEY: 1502 adev->ip_blocks = cz_ip_blocks; 1503 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); 1504 break; 1505 default: 1506 /* FIXME: not supported yet */ 1507 return -EINVAL; 1508 } 1509 } 1510 1511 return 0; 1512 } 1513 1514 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 1515 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 1516 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 1517 1518 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 1519 { 1520 if (adev->flags & AMD_IS_APU) 1521 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 1522 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 1523 else 1524 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 1525 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 1526 } 1527 1528 static const struct amdgpu_asic_funcs vi_asic_funcs = 1529 { 1530 .read_disabled_bios = &vi_read_disabled_bios, 1531 .read_bios_from_rom = &vi_read_bios_from_rom, 1532 .detect_hw_virtualization = vi_detect_hw_virtualization, 1533 .read_register = &vi_read_register, 1534 .reset = &vi_asic_reset, 1535 .set_vga_state = &vi_vga_set_state, 1536 .get_xclk = &vi_get_xclk, 1537 .set_uvd_clocks = &vi_set_uvd_clocks, 1538 .set_vce_clocks = &vi_set_vce_clocks, 1539 }; 1540 1541 static int vi_common_early_init(void *handle) 1542 { 1543 bool smc_enabled = false; 1544 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1545 1546 if (adev->flags & AMD_IS_APU) { 1547 adev->smc_rreg = &cz_smc_rreg; 1548 adev->smc_wreg = &cz_smc_wreg; 1549 } else { 1550 adev->smc_rreg = &vi_smc_rreg; 1551 adev->smc_wreg = &vi_smc_wreg; 1552 } 1553 adev->pcie_rreg = &vi_pcie_rreg; 1554 adev->pcie_wreg = &vi_pcie_wreg; 1555 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1556 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1557 adev->didt_rreg = &vi_didt_rreg; 1558 adev->didt_wreg = &vi_didt_wreg; 1559 adev->gc_cac_rreg = &vi_gc_cac_rreg; 1560 adev->gc_cac_wreg = &vi_gc_cac_wreg; 1561 1562 adev->asic_funcs = &vi_asic_funcs; 1563 1564 if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && 1565 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) 1566 smc_enabled = true; 1567 1568 adev->rev_id = vi_get_rev_id(adev); 1569 adev->external_rev_id = 0xFF; 1570 switch (adev->asic_type) { 1571 case CHIP_TOPAZ: 1572 adev->cg_flags = 0; 1573 adev->pg_flags = 0; 1574 adev->external_rev_id = 0x1; 1575 break; 1576 case CHIP_FIJI: 1577 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1578 AMD_CG_SUPPORT_GFX_MGLS | 1579 AMD_CG_SUPPORT_GFX_RLC_LS | 1580 AMD_CG_SUPPORT_GFX_CP_LS | 1581 AMD_CG_SUPPORT_GFX_CGTS | 1582 AMD_CG_SUPPORT_GFX_CGTS_LS | 1583 AMD_CG_SUPPORT_GFX_CGCG | 1584 AMD_CG_SUPPORT_GFX_CGLS | 1585 AMD_CG_SUPPORT_SDMA_MGCG | 1586 AMD_CG_SUPPORT_SDMA_LS | 1587 AMD_CG_SUPPORT_BIF_LS | 1588 AMD_CG_SUPPORT_HDP_MGCG | 1589 AMD_CG_SUPPORT_HDP_LS | 1590 AMD_CG_SUPPORT_ROM_MGCG | 1591 AMD_CG_SUPPORT_MC_MGCG | 1592 AMD_CG_SUPPORT_MC_LS; 1593 adev->pg_flags = 0; 1594 adev->external_rev_id = adev->rev_id + 0x3c; 1595 break; 1596 case CHIP_TONGA: 1597 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; 1598 adev->pg_flags = 0; 1599 adev->external_rev_id = adev->rev_id + 0x14; 1600 break; 1601 case CHIP_POLARIS11: 1602 adev->cg_flags = 0; 1603 adev->pg_flags = 0; 1604 adev->external_rev_id = adev->rev_id + 0x5A; 1605 break; 1606 case CHIP_POLARIS10: 1607 adev->cg_flags = 0; 1608 adev->pg_flags = 0; 1609 adev->external_rev_id = adev->rev_id + 0x50; 1610 break; 1611 case CHIP_CARRIZO: 1612 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1613 AMD_CG_SUPPORT_GFX_MGCG | 1614 AMD_CG_SUPPORT_GFX_MGLS | 1615 AMD_CG_SUPPORT_GFX_RLC_LS | 1616 AMD_CG_SUPPORT_GFX_CP_LS | 1617 AMD_CG_SUPPORT_GFX_CGTS | 1618 AMD_CG_SUPPORT_GFX_MGLS | 1619 AMD_CG_SUPPORT_GFX_CGTS_LS | 1620 AMD_CG_SUPPORT_GFX_CGCG | 1621 AMD_CG_SUPPORT_GFX_CGLS | 1622 AMD_CG_SUPPORT_BIF_LS | 1623 AMD_CG_SUPPORT_HDP_MGCG | 1624 AMD_CG_SUPPORT_HDP_LS | 1625 AMD_CG_SUPPORT_SDMA_MGCG | 1626 AMD_CG_SUPPORT_SDMA_LS | 1627 AMD_CG_SUPPORT_VCE_MGCG; 1628 /* rev0 hardware requires workarounds to support PG */ 1629 adev->pg_flags = 0; 1630 if (adev->rev_id != 0x00) { 1631 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1632 AMD_PG_SUPPORT_GFX_SMG | 1633 AMD_PG_SUPPORT_GFX_PIPELINE | 1634 AMD_PG_SUPPORT_UVD | 1635 AMD_PG_SUPPORT_VCE; 1636 } 1637 adev->external_rev_id = adev->rev_id + 0x1; 1638 break; 1639 case CHIP_STONEY: 1640 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1641 AMD_CG_SUPPORT_GFX_MGCG | 1642 AMD_CG_SUPPORT_GFX_MGLS | 1643 AMD_CG_SUPPORT_GFX_RLC_LS | 1644 AMD_CG_SUPPORT_GFX_CP_LS | 1645 AMD_CG_SUPPORT_GFX_CGTS | 1646 AMD_CG_SUPPORT_GFX_MGLS | 1647 AMD_CG_SUPPORT_GFX_CGTS_LS | 1648 AMD_CG_SUPPORT_GFX_CGCG | 1649 AMD_CG_SUPPORT_GFX_CGLS | 1650 AMD_CG_SUPPORT_BIF_LS | 1651 AMD_CG_SUPPORT_HDP_MGCG | 1652 AMD_CG_SUPPORT_HDP_LS | 1653 AMD_CG_SUPPORT_SDMA_MGCG | 1654 AMD_CG_SUPPORT_SDMA_LS | 1655 AMD_CG_SUPPORT_VCE_MGCG; 1656 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1657 AMD_PG_SUPPORT_GFX_SMG | 1658 AMD_PG_SUPPORT_GFX_PIPELINE | 1659 AMD_PG_SUPPORT_UVD | 1660 AMD_PG_SUPPORT_VCE; 1661 adev->external_rev_id = adev->rev_id + 0x61; 1662 break; 1663 default: 1664 /* FIXME: not supported yet */ 1665 return -EINVAL; 1666 } 1667 1668 /* in early init stage, vbios code won't work */ 1669 if (adev->asic_funcs->detect_hw_virtualization) 1670 amdgpu_asic_detect_hw_virtualization(adev); 1671 1672 if (amdgpu_smc_load_fw && smc_enabled) 1673 adev->firmware.smu_load = true; 1674 1675 amdgpu_get_pcie_info(adev); 1676 1677 return 0; 1678 } 1679 1680 static int vi_common_sw_init(void *handle) 1681 { 1682 return 0; 1683 } 1684 1685 static int vi_common_sw_fini(void *handle) 1686 { 1687 return 0; 1688 } 1689 1690 static int vi_common_hw_init(void *handle) 1691 { 1692 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1693 1694 /* move the golden regs per IP block */ 1695 vi_init_golden_registers(adev); 1696 /* enable pcie gen2/3 link */ 1697 vi_pcie_gen3_enable(adev); 1698 /* enable aspm */ 1699 vi_program_aspm(adev); 1700 /* enable the doorbell aperture */ 1701 vi_enable_doorbell_aperture(adev, true); 1702 1703 return 0; 1704 } 1705 1706 static int vi_common_hw_fini(void *handle) 1707 { 1708 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1709 1710 /* enable the doorbell aperture */ 1711 vi_enable_doorbell_aperture(adev, false); 1712 1713 return 0; 1714 } 1715 1716 static int vi_common_suspend(void *handle) 1717 { 1718 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1719 1720 return vi_common_hw_fini(adev); 1721 } 1722 1723 static int vi_common_resume(void *handle) 1724 { 1725 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1726 1727 return vi_common_hw_init(adev); 1728 } 1729 1730 static bool vi_common_is_idle(void *handle) 1731 { 1732 return true; 1733 } 1734 1735 static int vi_common_wait_for_idle(void *handle) 1736 { 1737 return 0; 1738 } 1739 1740 static int vi_common_soft_reset(void *handle) 1741 { 1742 return 0; 1743 } 1744 1745 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1746 bool enable) 1747 { 1748 uint32_t temp, data; 1749 1750 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1751 1752 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1753 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1754 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1755 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1756 else 1757 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1758 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1759 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1760 1761 if (temp != data) 1762 WREG32_PCIE(ixPCIE_CNTL2, data); 1763 } 1764 1765 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1766 bool enable) 1767 { 1768 uint32_t temp, data; 1769 1770 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1771 1772 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1773 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1774 else 1775 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1776 1777 if (temp != data) 1778 WREG32(mmHDP_HOST_PATH_CNTL, data); 1779 } 1780 1781 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1782 bool enable) 1783 { 1784 uint32_t temp, data; 1785 1786 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1787 1788 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1789 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1790 else 1791 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1792 1793 if (temp != data) 1794 WREG32(mmHDP_MEM_POWER_LS, data); 1795 } 1796 1797 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1798 bool enable) 1799 { 1800 uint32_t temp, data; 1801 1802 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1803 1804 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1805 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1806 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1807 else 1808 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1809 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1810 1811 if (temp != data) 1812 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1813 } 1814 1815 static int vi_common_set_clockgating_state_by_smu(void *handle, 1816 enum amd_clockgating_state state) 1817 { 1818 uint32_t msg_id, pp_state; 1819 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1820 void *pp_handle = adev->powerplay.pp_handle; 1821 1822 if (state == AMD_CG_STATE_UNGATE) 1823 pp_state = 0; 1824 else 1825 pp_state = PP_STATE_CG | PP_STATE_LS; 1826 1827 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1828 PP_BLOCK_SYS_MC, 1829 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, 1830 pp_state); 1831 amd_set_clockgating_by_smu(pp_handle, msg_id); 1832 1833 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1834 PP_BLOCK_SYS_SDMA, 1835 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, 1836 pp_state); 1837 amd_set_clockgating_by_smu(pp_handle, msg_id); 1838 1839 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1840 PP_BLOCK_SYS_HDP, 1841 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, 1842 pp_state); 1843 amd_set_clockgating_by_smu(pp_handle, msg_id); 1844 1845 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1846 PP_BLOCK_SYS_BIF, 1847 PP_STATE_SUPPORT_LS, 1848 pp_state); 1849 amd_set_clockgating_by_smu(pp_handle, msg_id); 1850 1851 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1852 PP_BLOCK_SYS_BIF, 1853 PP_STATE_SUPPORT_CG, 1854 pp_state); 1855 amd_set_clockgating_by_smu(pp_handle, msg_id); 1856 1857 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1858 PP_BLOCK_SYS_DRM, 1859 PP_STATE_SUPPORT_LS, 1860 pp_state); 1861 amd_set_clockgating_by_smu(pp_handle, msg_id); 1862 1863 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1864 PP_BLOCK_SYS_ROM, 1865 PP_STATE_SUPPORT_CG, 1866 pp_state); 1867 amd_set_clockgating_by_smu(pp_handle, msg_id); 1868 1869 return 0; 1870 } 1871 1872 static int vi_common_set_clockgating_state(void *handle, 1873 enum amd_clockgating_state state) 1874 { 1875 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1876 1877 switch (adev->asic_type) { 1878 case CHIP_FIJI: 1879 vi_update_bif_medium_grain_light_sleep(adev, 1880 state == AMD_CG_STATE_GATE ? true : false); 1881 vi_update_hdp_medium_grain_clock_gating(adev, 1882 state == AMD_CG_STATE_GATE ? true : false); 1883 vi_update_hdp_light_sleep(adev, 1884 state == AMD_CG_STATE_GATE ? true : false); 1885 vi_update_rom_medium_grain_clock_gating(adev, 1886 state == AMD_CG_STATE_GATE ? true : false); 1887 break; 1888 case CHIP_CARRIZO: 1889 case CHIP_STONEY: 1890 vi_update_bif_medium_grain_light_sleep(adev, 1891 state == AMD_CG_STATE_GATE ? true : false); 1892 vi_update_hdp_medium_grain_clock_gating(adev, 1893 state == AMD_CG_STATE_GATE ? true : false); 1894 vi_update_hdp_light_sleep(adev, 1895 state == AMD_CG_STATE_GATE ? true : false); 1896 break; 1897 case CHIP_TONGA: 1898 case CHIP_POLARIS10: 1899 case CHIP_POLARIS11: 1900 vi_common_set_clockgating_state_by_smu(adev, state); 1901 default: 1902 break; 1903 } 1904 return 0; 1905 } 1906 1907 static int vi_common_set_powergating_state(void *handle, 1908 enum amd_powergating_state state) 1909 { 1910 return 0; 1911 } 1912 1913 const struct amd_ip_funcs vi_common_ip_funcs = { 1914 .name = "vi_common", 1915 .early_init = vi_common_early_init, 1916 .late_init = NULL, 1917 .sw_init = vi_common_sw_init, 1918 .sw_fini = vi_common_sw_fini, 1919 .hw_init = vi_common_hw_init, 1920 .hw_fini = vi_common_hw_fini, 1921 .suspend = vi_common_suspend, 1922 .resume = vi_common_resume, 1923 .is_idle = vi_common_is_idle, 1924 .wait_for_idle = vi_common_wait_for_idle, 1925 .soft_reset = vi_common_soft_reset, 1926 .set_clockgating_state = vi_common_set_clockgating_state, 1927 .set_powergating_state = vi_common_set_powergating_state, 1928 }; 1929 1930