1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/slab.h> 24 #include <drm/drmP.h> 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_ih.h" 28 #include "amdgpu_uvd.h" 29 #include "amdgpu_vce.h" 30 #include "amdgpu_ucode.h" 31 #include "atom.h" 32 #include "amd_pcie.h" 33 34 #include "gmc/gmc_8_1_d.h" 35 #include "gmc/gmc_8_1_sh_mask.h" 36 37 #include "oss/oss_3_0_d.h" 38 #include "oss/oss_3_0_sh_mask.h" 39 40 #include "bif/bif_5_0_d.h" 41 #include "bif/bif_5_0_sh_mask.h" 42 43 #include "gca/gfx_8_0_d.h" 44 #include "gca/gfx_8_0_sh_mask.h" 45 46 #include "smu/smu_7_1_1_d.h" 47 #include "smu/smu_7_1_1_sh_mask.h" 48 49 #include "uvd/uvd_5_0_d.h" 50 #include "uvd/uvd_5_0_sh_mask.h" 51 52 #include "vce/vce_3_0_d.h" 53 #include "vce/vce_3_0_sh_mask.h" 54 55 #include "dce/dce_10_0_d.h" 56 #include "dce/dce_10_0_sh_mask.h" 57 58 #include "vid.h" 59 #include "vi.h" 60 #include "vi_dpm.h" 61 #include "gmc_v8_0.h" 62 #include "gmc_v7_0.h" 63 #include "gfx_v8_0.h" 64 #include "sdma_v2_4.h" 65 #include "sdma_v3_0.h" 66 #include "dce_v10_0.h" 67 #include "dce_v11_0.h" 68 #include "iceland_ih.h" 69 #include "tonga_ih.h" 70 #include "cz_ih.h" 71 #include "uvd_v5_0.h" 72 #include "uvd_v6_0.h" 73 #include "vce_v3_0.h" 74 #if defined(CONFIG_DRM_AMD_ACP) 75 #include "amdgpu_acp.h" 76 #endif 77 #include "dce_virtual.h" 78 #include "mxgpu_vi.h" 79 #include "amdgpu_dm.h" 80 81 /* 82 * Indirect registers accessor 83 */ 84 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 85 { 86 unsigned long flags; 87 u32 r; 88 89 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 90 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 91 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 92 r = RREG32_NO_KIQ(mmPCIE_DATA); 93 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 94 return r; 95 } 96 97 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 98 { 99 unsigned long flags; 100 101 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 102 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 103 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 104 WREG32_NO_KIQ(mmPCIE_DATA, v); 105 (void)RREG32_NO_KIQ(mmPCIE_DATA); 106 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 107 } 108 109 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 110 { 111 unsigned long flags; 112 u32 r; 113 114 spin_lock_irqsave(&adev->smc_idx_lock, flags); 115 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 116 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11); 117 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 118 return r; 119 } 120 121 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 122 { 123 unsigned long flags; 124 125 spin_lock_irqsave(&adev->smc_idx_lock, flags); 126 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 127 WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v)); 128 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 129 } 130 131 /* smu_8_0_d.h */ 132 #define mmMP0PUB_IND_INDEX 0x180 133 #define mmMP0PUB_IND_DATA 0x181 134 135 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 136 { 137 unsigned long flags; 138 u32 r; 139 140 spin_lock_irqsave(&adev->smc_idx_lock, flags); 141 WREG32(mmMP0PUB_IND_INDEX, (reg)); 142 r = RREG32(mmMP0PUB_IND_DATA); 143 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 144 return r; 145 } 146 147 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 148 { 149 unsigned long flags; 150 151 spin_lock_irqsave(&adev->smc_idx_lock, flags); 152 WREG32(mmMP0PUB_IND_INDEX, (reg)); 153 WREG32(mmMP0PUB_IND_DATA, (v)); 154 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 155 } 156 157 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 158 { 159 unsigned long flags; 160 u32 r; 161 162 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 163 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 164 r = RREG32(mmUVD_CTX_DATA); 165 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 166 return r; 167 } 168 169 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 170 { 171 unsigned long flags; 172 173 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 174 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 175 WREG32(mmUVD_CTX_DATA, (v)); 176 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 177 } 178 179 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 180 { 181 unsigned long flags; 182 u32 r; 183 184 spin_lock_irqsave(&adev->didt_idx_lock, flags); 185 WREG32(mmDIDT_IND_INDEX, (reg)); 186 r = RREG32(mmDIDT_IND_DATA); 187 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 188 return r; 189 } 190 191 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 192 { 193 unsigned long flags; 194 195 spin_lock_irqsave(&adev->didt_idx_lock, flags); 196 WREG32(mmDIDT_IND_INDEX, (reg)); 197 WREG32(mmDIDT_IND_DATA, (v)); 198 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 199 } 200 201 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 202 { 203 unsigned long flags; 204 u32 r; 205 206 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 207 WREG32(mmGC_CAC_IND_INDEX, (reg)); 208 r = RREG32(mmGC_CAC_IND_DATA); 209 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 210 return r; 211 } 212 213 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 214 { 215 unsigned long flags; 216 217 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 218 WREG32(mmGC_CAC_IND_INDEX, (reg)); 219 WREG32(mmGC_CAC_IND_DATA, (v)); 220 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 221 } 222 223 224 static const u32 tonga_mgcg_cgcg_init[] = 225 { 226 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 227 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 228 mmPCIE_DATA, 0x000f0000, 0x00000000, 229 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 230 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 231 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 232 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 233 }; 234 235 static const u32 fiji_mgcg_cgcg_init[] = 236 { 237 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 238 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 239 mmPCIE_DATA, 0x000f0000, 0x00000000, 240 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 241 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 242 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 243 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 244 }; 245 246 static const u32 iceland_mgcg_cgcg_init[] = 247 { 248 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 249 mmPCIE_DATA, 0x000f0000, 0x00000000, 250 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 251 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 252 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 253 }; 254 255 static const u32 cz_mgcg_cgcg_init[] = 256 { 257 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 258 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 259 mmPCIE_DATA, 0x000f0000, 0x00000000, 260 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 261 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 262 }; 263 264 static const u32 stoney_mgcg_cgcg_init[] = 265 { 266 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 267 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 268 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 269 }; 270 271 static void vi_init_golden_registers(struct amdgpu_device *adev) 272 { 273 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 274 mutex_lock(&adev->grbm_idx_mutex); 275 276 if (amdgpu_sriov_vf(adev)) { 277 xgpu_vi_init_golden_registers(adev); 278 mutex_unlock(&adev->grbm_idx_mutex); 279 return; 280 } 281 282 switch (adev->asic_type) { 283 case CHIP_TOPAZ: 284 amdgpu_device_program_register_sequence(adev, 285 iceland_mgcg_cgcg_init, 286 ARRAY_SIZE(iceland_mgcg_cgcg_init)); 287 break; 288 case CHIP_FIJI: 289 amdgpu_device_program_register_sequence(adev, 290 fiji_mgcg_cgcg_init, 291 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 292 break; 293 case CHIP_TONGA: 294 amdgpu_device_program_register_sequence(adev, 295 tonga_mgcg_cgcg_init, 296 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 297 break; 298 case CHIP_CARRIZO: 299 amdgpu_device_program_register_sequence(adev, 300 cz_mgcg_cgcg_init, 301 ARRAY_SIZE(cz_mgcg_cgcg_init)); 302 break; 303 case CHIP_STONEY: 304 amdgpu_device_program_register_sequence(adev, 305 stoney_mgcg_cgcg_init, 306 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 307 break; 308 case CHIP_POLARIS10: 309 case CHIP_POLARIS11: 310 case CHIP_POLARIS12: 311 case CHIP_VEGAM: 312 default: 313 break; 314 } 315 mutex_unlock(&adev->grbm_idx_mutex); 316 } 317 318 /** 319 * vi_get_xclk - get the xclk 320 * 321 * @adev: amdgpu_device pointer 322 * 323 * Returns the reference clock used by the gfx engine 324 * (VI). 325 */ 326 static u32 vi_get_xclk(struct amdgpu_device *adev) 327 { 328 u32 reference_clock = adev->clock.spll.reference_freq; 329 u32 tmp; 330 331 if (adev->flags & AMD_IS_APU) 332 return reference_clock; 333 334 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 335 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 336 return 1000; 337 338 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 339 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 340 return reference_clock / 4; 341 342 return reference_clock; 343 } 344 345 /** 346 * vi_srbm_select - select specific register instances 347 * 348 * @adev: amdgpu_device pointer 349 * @me: selected ME (micro engine) 350 * @pipe: pipe 351 * @queue: queue 352 * @vmid: VMID 353 * 354 * Switches the currently active registers instances. Some 355 * registers are instanced per VMID, others are instanced per 356 * me/pipe/queue combination. 357 */ 358 void vi_srbm_select(struct amdgpu_device *adev, 359 u32 me, u32 pipe, u32 queue, u32 vmid) 360 { 361 u32 srbm_gfx_cntl = 0; 362 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 363 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 364 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 365 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 366 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 367 } 368 369 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 370 { 371 /* todo */ 372 } 373 374 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 375 { 376 u32 bus_cntl; 377 u32 d1vga_control = 0; 378 u32 d2vga_control = 0; 379 u32 vga_render_control = 0; 380 u32 rom_cntl; 381 bool r; 382 383 bus_cntl = RREG32(mmBUS_CNTL); 384 if (adev->mode_info.num_crtc) { 385 d1vga_control = RREG32(mmD1VGA_CONTROL); 386 d2vga_control = RREG32(mmD2VGA_CONTROL); 387 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 388 } 389 rom_cntl = RREG32_SMC(ixROM_CNTL); 390 391 /* enable the rom */ 392 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 393 if (adev->mode_info.num_crtc) { 394 /* Disable VGA mode */ 395 WREG32(mmD1VGA_CONTROL, 396 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 397 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 398 WREG32(mmD2VGA_CONTROL, 399 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 400 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 401 WREG32(mmVGA_RENDER_CONTROL, 402 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 403 } 404 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 405 406 r = amdgpu_read_bios(adev); 407 408 /* restore regs */ 409 WREG32(mmBUS_CNTL, bus_cntl); 410 if (adev->mode_info.num_crtc) { 411 WREG32(mmD1VGA_CONTROL, d1vga_control); 412 WREG32(mmD2VGA_CONTROL, d2vga_control); 413 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 414 } 415 WREG32_SMC(ixROM_CNTL, rom_cntl); 416 return r; 417 } 418 419 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 420 u8 *bios, u32 length_bytes) 421 { 422 u32 *dw_ptr; 423 unsigned long flags; 424 u32 i, length_dw; 425 426 if (bios == NULL) 427 return false; 428 if (length_bytes == 0) 429 return false; 430 /* APU vbios image is part of sbios image */ 431 if (adev->flags & AMD_IS_APU) 432 return false; 433 434 dw_ptr = (u32 *)bios; 435 length_dw = ALIGN(length_bytes, 4) / 4; 436 /* take the smc lock since we are using the smc index */ 437 spin_lock_irqsave(&adev->smc_idx_lock, flags); 438 /* set rom index to 0 */ 439 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 440 WREG32(mmSMC_IND_DATA_11, 0); 441 /* set index to data for continous read */ 442 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 443 for (i = 0; i < length_dw; i++) 444 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 445 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 446 447 return true; 448 } 449 450 static void vi_detect_hw_virtualization(struct amdgpu_device *adev) 451 { 452 uint32_t reg = 0; 453 454 if (adev->asic_type == CHIP_TONGA || 455 adev->asic_type == CHIP_FIJI) { 456 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 457 /* bit0: 0 means pf and 1 means vf */ 458 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) 459 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; 460 /* bit31: 0 means disable IOV and 1 means enable */ 461 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) 462 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 463 } 464 465 if (reg == 0) { 466 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ 467 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 468 } 469 } 470 471 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 472 {mmGRBM_STATUS}, 473 {mmGRBM_STATUS2}, 474 {mmGRBM_STATUS_SE0}, 475 {mmGRBM_STATUS_SE1}, 476 {mmGRBM_STATUS_SE2}, 477 {mmGRBM_STATUS_SE3}, 478 {mmSRBM_STATUS}, 479 {mmSRBM_STATUS2}, 480 {mmSRBM_STATUS3}, 481 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, 482 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, 483 {mmCP_STAT}, 484 {mmCP_STALLED_STAT1}, 485 {mmCP_STALLED_STAT2}, 486 {mmCP_STALLED_STAT3}, 487 {mmCP_CPF_BUSY_STAT}, 488 {mmCP_CPF_STALLED_STAT1}, 489 {mmCP_CPF_STATUS}, 490 {mmCP_CPC_BUSY_STAT}, 491 {mmCP_CPC_STALLED_STAT1}, 492 {mmCP_CPC_STATUS}, 493 {mmGB_ADDR_CONFIG}, 494 {mmMC_ARB_RAMCFG}, 495 {mmGB_TILE_MODE0}, 496 {mmGB_TILE_MODE1}, 497 {mmGB_TILE_MODE2}, 498 {mmGB_TILE_MODE3}, 499 {mmGB_TILE_MODE4}, 500 {mmGB_TILE_MODE5}, 501 {mmGB_TILE_MODE6}, 502 {mmGB_TILE_MODE7}, 503 {mmGB_TILE_MODE8}, 504 {mmGB_TILE_MODE9}, 505 {mmGB_TILE_MODE10}, 506 {mmGB_TILE_MODE11}, 507 {mmGB_TILE_MODE12}, 508 {mmGB_TILE_MODE13}, 509 {mmGB_TILE_MODE14}, 510 {mmGB_TILE_MODE15}, 511 {mmGB_TILE_MODE16}, 512 {mmGB_TILE_MODE17}, 513 {mmGB_TILE_MODE18}, 514 {mmGB_TILE_MODE19}, 515 {mmGB_TILE_MODE20}, 516 {mmGB_TILE_MODE21}, 517 {mmGB_TILE_MODE22}, 518 {mmGB_TILE_MODE23}, 519 {mmGB_TILE_MODE24}, 520 {mmGB_TILE_MODE25}, 521 {mmGB_TILE_MODE26}, 522 {mmGB_TILE_MODE27}, 523 {mmGB_TILE_MODE28}, 524 {mmGB_TILE_MODE29}, 525 {mmGB_TILE_MODE30}, 526 {mmGB_TILE_MODE31}, 527 {mmGB_MACROTILE_MODE0}, 528 {mmGB_MACROTILE_MODE1}, 529 {mmGB_MACROTILE_MODE2}, 530 {mmGB_MACROTILE_MODE3}, 531 {mmGB_MACROTILE_MODE4}, 532 {mmGB_MACROTILE_MODE5}, 533 {mmGB_MACROTILE_MODE6}, 534 {mmGB_MACROTILE_MODE7}, 535 {mmGB_MACROTILE_MODE8}, 536 {mmGB_MACROTILE_MODE9}, 537 {mmGB_MACROTILE_MODE10}, 538 {mmGB_MACROTILE_MODE11}, 539 {mmGB_MACROTILE_MODE12}, 540 {mmGB_MACROTILE_MODE13}, 541 {mmGB_MACROTILE_MODE14}, 542 {mmGB_MACROTILE_MODE15}, 543 {mmCC_RB_BACKEND_DISABLE, true}, 544 {mmGC_USER_RB_BACKEND_DISABLE, true}, 545 {mmGB_BACKEND_MAP, false}, 546 {mmPA_SC_RASTER_CONFIG, true}, 547 {mmPA_SC_RASTER_CONFIG_1, true}, 548 }; 549 550 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 551 bool indexed, u32 se_num, 552 u32 sh_num, u32 reg_offset) 553 { 554 if (indexed) { 555 uint32_t val; 556 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 557 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 558 559 switch (reg_offset) { 560 case mmCC_RB_BACKEND_DISABLE: 561 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 562 case mmGC_USER_RB_BACKEND_DISABLE: 563 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 564 case mmPA_SC_RASTER_CONFIG: 565 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 566 case mmPA_SC_RASTER_CONFIG_1: 567 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 568 } 569 570 mutex_lock(&adev->grbm_idx_mutex); 571 if (se_num != 0xffffffff || sh_num != 0xffffffff) 572 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 573 574 val = RREG32(reg_offset); 575 576 if (se_num != 0xffffffff || sh_num != 0xffffffff) 577 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 578 mutex_unlock(&adev->grbm_idx_mutex); 579 return val; 580 } else { 581 unsigned idx; 582 583 switch (reg_offset) { 584 case mmGB_ADDR_CONFIG: 585 return adev->gfx.config.gb_addr_config; 586 case mmMC_ARB_RAMCFG: 587 return adev->gfx.config.mc_arb_ramcfg; 588 case mmGB_TILE_MODE0: 589 case mmGB_TILE_MODE1: 590 case mmGB_TILE_MODE2: 591 case mmGB_TILE_MODE3: 592 case mmGB_TILE_MODE4: 593 case mmGB_TILE_MODE5: 594 case mmGB_TILE_MODE6: 595 case mmGB_TILE_MODE7: 596 case mmGB_TILE_MODE8: 597 case mmGB_TILE_MODE9: 598 case mmGB_TILE_MODE10: 599 case mmGB_TILE_MODE11: 600 case mmGB_TILE_MODE12: 601 case mmGB_TILE_MODE13: 602 case mmGB_TILE_MODE14: 603 case mmGB_TILE_MODE15: 604 case mmGB_TILE_MODE16: 605 case mmGB_TILE_MODE17: 606 case mmGB_TILE_MODE18: 607 case mmGB_TILE_MODE19: 608 case mmGB_TILE_MODE20: 609 case mmGB_TILE_MODE21: 610 case mmGB_TILE_MODE22: 611 case mmGB_TILE_MODE23: 612 case mmGB_TILE_MODE24: 613 case mmGB_TILE_MODE25: 614 case mmGB_TILE_MODE26: 615 case mmGB_TILE_MODE27: 616 case mmGB_TILE_MODE28: 617 case mmGB_TILE_MODE29: 618 case mmGB_TILE_MODE30: 619 case mmGB_TILE_MODE31: 620 idx = (reg_offset - mmGB_TILE_MODE0); 621 return adev->gfx.config.tile_mode_array[idx]; 622 case mmGB_MACROTILE_MODE0: 623 case mmGB_MACROTILE_MODE1: 624 case mmGB_MACROTILE_MODE2: 625 case mmGB_MACROTILE_MODE3: 626 case mmGB_MACROTILE_MODE4: 627 case mmGB_MACROTILE_MODE5: 628 case mmGB_MACROTILE_MODE6: 629 case mmGB_MACROTILE_MODE7: 630 case mmGB_MACROTILE_MODE8: 631 case mmGB_MACROTILE_MODE9: 632 case mmGB_MACROTILE_MODE10: 633 case mmGB_MACROTILE_MODE11: 634 case mmGB_MACROTILE_MODE12: 635 case mmGB_MACROTILE_MODE13: 636 case mmGB_MACROTILE_MODE14: 637 case mmGB_MACROTILE_MODE15: 638 idx = (reg_offset - mmGB_MACROTILE_MODE0); 639 return adev->gfx.config.macrotile_mode_array[idx]; 640 default: 641 return RREG32(reg_offset); 642 } 643 } 644 } 645 646 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 647 u32 sh_num, u32 reg_offset, u32 *value) 648 { 649 uint32_t i; 650 651 *value = 0; 652 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 653 bool indexed = vi_allowed_read_registers[i].grbm_indexed; 654 655 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 656 continue; 657 658 *value = vi_get_register_value(adev, indexed, se_num, sh_num, 659 reg_offset); 660 return 0; 661 } 662 return -EINVAL; 663 } 664 665 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 666 { 667 u32 i; 668 669 dev_info(adev->dev, "GPU pci config reset\n"); 670 671 /* disable BM */ 672 pci_clear_master(adev->pdev); 673 /* reset */ 674 amdgpu_device_pci_config_reset(adev); 675 676 udelay(100); 677 678 /* wait for asic to come out of reset */ 679 for (i = 0; i < adev->usec_timeout; i++) { 680 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 681 /* enable BM */ 682 pci_set_master(adev->pdev); 683 adev->has_hw_reset = true; 684 return 0; 685 } 686 udelay(1); 687 } 688 return -EINVAL; 689 } 690 691 /** 692 * vi_asic_reset - soft reset GPU 693 * 694 * @adev: amdgpu_device pointer 695 * 696 * Look up which blocks are hung and attempt 697 * to reset them. 698 * Returns 0 for success. 699 */ 700 static int vi_asic_reset(struct amdgpu_device *adev) 701 { 702 int r; 703 704 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 705 706 r = vi_gpu_pci_config_reset(adev); 707 708 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 709 710 return r; 711 } 712 713 static u32 vi_get_config_memsize(struct amdgpu_device *adev) 714 { 715 return RREG32(mmCONFIG_MEMSIZE); 716 } 717 718 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 719 u32 cntl_reg, u32 status_reg) 720 { 721 int r, i; 722 struct atom_clock_dividers dividers; 723 uint32_t tmp; 724 725 r = amdgpu_atombios_get_clock_dividers(adev, 726 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 727 clock, false, ÷rs); 728 if (r) 729 return r; 730 731 tmp = RREG32_SMC(cntl_reg); 732 733 if (adev->flags & AMD_IS_APU) 734 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK; 735 else 736 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 737 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 738 tmp |= dividers.post_divider; 739 WREG32_SMC(cntl_reg, tmp); 740 741 for (i = 0; i < 100; i++) { 742 tmp = RREG32_SMC(status_reg); 743 if (adev->flags & AMD_IS_APU) { 744 if (tmp & 0x10000) 745 break; 746 } else { 747 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK) 748 break; 749 } 750 mdelay(10); 751 } 752 if (i == 100) 753 return -ETIMEDOUT; 754 return 0; 755 } 756 757 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0 758 #define ixGNB_CLK1_STATUS 0xD822010C 759 #define ixGNB_CLK2_DFS_CNTL 0xD8220110 760 #define ixGNB_CLK2_STATUS 0xD822012C 761 #define ixGNB_CLK3_DFS_CNTL 0xD8220130 762 #define ixGNB_CLK3_STATUS 0xD822014C 763 764 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 765 { 766 int r; 767 768 if (adev->flags & AMD_IS_APU) { 769 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS); 770 if (r) 771 return r; 772 773 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS); 774 if (r) 775 return r; 776 } else { 777 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 778 if (r) 779 return r; 780 781 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 782 if (r) 783 return r; 784 } 785 786 return 0; 787 } 788 789 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 790 { 791 int r, i; 792 struct atom_clock_dividers dividers; 793 u32 tmp; 794 u32 reg_ctrl; 795 u32 reg_status; 796 u32 status_mask; 797 u32 reg_mask; 798 799 if (adev->flags & AMD_IS_APU) { 800 reg_ctrl = ixGNB_CLK3_DFS_CNTL; 801 reg_status = ixGNB_CLK3_STATUS; 802 status_mask = 0x00010000; 803 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 804 } else { 805 reg_ctrl = ixCG_ECLK_CNTL; 806 reg_status = ixCG_ECLK_STATUS; 807 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK; 808 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 809 } 810 811 r = amdgpu_atombios_get_clock_dividers(adev, 812 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 813 ecclk, false, ÷rs); 814 if (r) 815 return r; 816 817 for (i = 0; i < 100; i++) { 818 if (RREG32_SMC(reg_status) & status_mask) 819 break; 820 mdelay(10); 821 } 822 823 if (i == 100) 824 return -ETIMEDOUT; 825 826 tmp = RREG32_SMC(reg_ctrl); 827 tmp &= ~reg_mask; 828 tmp |= dividers.post_divider; 829 WREG32_SMC(reg_ctrl, tmp); 830 831 for (i = 0; i < 100; i++) { 832 if (RREG32_SMC(reg_status) & status_mask) 833 break; 834 mdelay(10); 835 } 836 837 if (i == 100) 838 return -ETIMEDOUT; 839 840 return 0; 841 } 842 843 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 844 { 845 if (pci_is_root_bus(adev->pdev->bus)) 846 return; 847 848 if (amdgpu_pcie_gen2 == 0) 849 return; 850 851 if (adev->flags & AMD_IS_APU) 852 return; 853 854 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 855 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 856 return; 857 858 /* todo */ 859 } 860 861 static void vi_program_aspm(struct amdgpu_device *adev) 862 { 863 864 if (amdgpu_aspm == 0) 865 return; 866 867 /* todo */ 868 } 869 870 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 871 bool enable) 872 { 873 u32 tmp; 874 875 /* not necessary on CZ */ 876 if (adev->flags & AMD_IS_APU) 877 return; 878 879 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 880 if (enable) 881 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 882 else 883 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 884 885 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 886 } 887 888 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 889 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 890 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 891 892 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 893 { 894 if (adev->flags & AMD_IS_APU) 895 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 896 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 897 else 898 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 899 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 900 } 901 902 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 903 { 904 if (!ring || !ring->funcs->emit_wreg) { 905 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 906 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL); 907 } else { 908 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 909 } 910 } 911 912 static void vi_invalidate_hdp(struct amdgpu_device *adev, 913 struct amdgpu_ring *ring) 914 { 915 if (!ring || !ring->funcs->emit_wreg) { 916 WREG32(mmHDP_DEBUG0, 1); 917 RREG32(mmHDP_DEBUG0); 918 } else { 919 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1); 920 } 921 } 922 923 static bool vi_need_full_reset(struct amdgpu_device *adev) 924 { 925 switch (adev->asic_type) { 926 case CHIP_CARRIZO: 927 case CHIP_STONEY: 928 /* CZ has hang issues with full reset at the moment */ 929 return false; 930 case CHIP_FIJI: 931 case CHIP_TONGA: 932 /* XXX: soft reset should work on fiji and tonga */ 933 return true; 934 case CHIP_POLARIS10: 935 case CHIP_POLARIS11: 936 case CHIP_POLARIS12: 937 case CHIP_TOPAZ: 938 default: 939 /* change this when we support soft reset */ 940 return true; 941 } 942 } 943 944 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 945 uint64_t *count1) 946 { 947 uint32_t perfctr = 0; 948 uint64_t cnt0_of, cnt1_of; 949 int tmp; 950 951 /* This reports 0 on APUs, so return to avoid writing/reading registers 952 * that may or may not be different from their GPU counterparts 953 */ 954 if (adev->flags & AMD_IS_APU) 955 return; 956 957 /* Set the 2 events that we wish to watch, defined above */ 958 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ 959 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 960 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 961 962 /* Write to enable desired perf counters */ 963 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr); 964 /* Zero out and enable the perf counters 965 * Write 0x5: 966 * Bit 0 = Start all counters(1) 967 * Bit 2 = Global counter reset enable(1) 968 */ 969 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005); 970 971 msleep(1000); 972 973 /* Load the shadow and disable the perf counters 974 * Write 0x2: 975 * Bit 0 = Stop counters(0) 976 * Bit 1 = Load the shadow counters(1) 977 */ 978 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002); 979 980 /* Read register values to get any >32bit overflow */ 981 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK); 982 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 983 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 984 985 /* Get the values and add the overflow */ 986 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 987 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 988 } 989 990 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev) 991 { 992 uint64_t nak_r, nak_g; 993 994 /* Get the number of NAKs received and generated */ 995 nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK); 996 nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED); 997 998 /* Add the total number of NAKs, i.e the number of replays */ 999 return (nak_r + nak_g); 1000 } 1001 1002 static bool vi_need_reset_on_init(struct amdgpu_device *adev) 1003 { 1004 u32 clock_cntl, pc; 1005 1006 if (adev->flags & AMD_IS_APU) 1007 return false; 1008 1009 /* check if the SMC is already running */ 1010 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); 1011 pc = RREG32_SMC(ixSMC_PC_C); 1012 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) && 1013 (0x20100 <= pc)) 1014 return true; 1015 1016 return false; 1017 } 1018 1019 static const struct amdgpu_asic_funcs vi_asic_funcs = 1020 { 1021 .read_disabled_bios = &vi_read_disabled_bios, 1022 .read_bios_from_rom = &vi_read_bios_from_rom, 1023 .read_register = &vi_read_register, 1024 .reset = &vi_asic_reset, 1025 .set_vga_state = &vi_vga_set_state, 1026 .get_xclk = &vi_get_xclk, 1027 .set_uvd_clocks = &vi_set_uvd_clocks, 1028 .set_vce_clocks = &vi_set_vce_clocks, 1029 .get_config_memsize = &vi_get_config_memsize, 1030 .flush_hdp = &vi_flush_hdp, 1031 .invalidate_hdp = &vi_invalidate_hdp, 1032 .need_full_reset = &vi_need_full_reset, 1033 .init_doorbell_index = &legacy_doorbell_index_init, 1034 .get_pcie_usage = &vi_get_pcie_usage, 1035 .need_reset_on_init = &vi_need_reset_on_init, 1036 .get_pcie_replay_count = &vi_get_pcie_replay_count, 1037 }; 1038 1039 #define CZ_REV_BRISTOL(rev) \ 1040 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) 1041 1042 static int vi_common_early_init(void *handle) 1043 { 1044 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1045 1046 if (adev->flags & AMD_IS_APU) { 1047 adev->smc_rreg = &cz_smc_rreg; 1048 adev->smc_wreg = &cz_smc_wreg; 1049 } else { 1050 adev->smc_rreg = &vi_smc_rreg; 1051 adev->smc_wreg = &vi_smc_wreg; 1052 } 1053 adev->pcie_rreg = &vi_pcie_rreg; 1054 adev->pcie_wreg = &vi_pcie_wreg; 1055 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1056 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1057 adev->didt_rreg = &vi_didt_rreg; 1058 adev->didt_wreg = &vi_didt_wreg; 1059 adev->gc_cac_rreg = &vi_gc_cac_rreg; 1060 adev->gc_cac_wreg = &vi_gc_cac_wreg; 1061 1062 adev->asic_funcs = &vi_asic_funcs; 1063 1064 adev->rev_id = vi_get_rev_id(adev); 1065 adev->external_rev_id = 0xFF; 1066 switch (adev->asic_type) { 1067 case CHIP_TOPAZ: 1068 adev->cg_flags = 0; 1069 adev->pg_flags = 0; 1070 adev->external_rev_id = 0x1; 1071 break; 1072 case CHIP_FIJI: 1073 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1074 AMD_CG_SUPPORT_GFX_MGLS | 1075 AMD_CG_SUPPORT_GFX_RLC_LS | 1076 AMD_CG_SUPPORT_GFX_CP_LS | 1077 AMD_CG_SUPPORT_GFX_CGTS | 1078 AMD_CG_SUPPORT_GFX_CGTS_LS | 1079 AMD_CG_SUPPORT_GFX_CGCG | 1080 AMD_CG_SUPPORT_GFX_CGLS | 1081 AMD_CG_SUPPORT_SDMA_MGCG | 1082 AMD_CG_SUPPORT_SDMA_LS | 1083 AMD_CG_SUPPORT_BIF_LS | 1084 AMD_CG_SUPPORT_HDP_MGCG | 1085 AMD_CG_SUPPORT_HDP_LS | 1086 AMD_CG_SUPPORT_ROM_MGCG | 1087 AMD_CG_SUPPORT_MC_MGCG | 1088 AMD_CG_SUPPORT_MC_LS | 1089 AMD_CG_SUPPORT_UVD_MGCG; 1090 adev->pg_flags = 0; 1091 adev->external_rev_id = adev->rev_id + 0x3c; 1092 break; 1093 case CHIP_TONGA: 1094 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1095 AMD_CG_SUPPORT_GFX_CGCG | 1096 AMD_CG_SUPPORT_GFX_CGLS | 1097 AMD_CG_SUPPORT_SDMA_MGCG | 1098 AMD_CG_SUPPORT_SDMA_LS | 1099 AMD_CG_SUPPORT_BIF_LS | 1100 AMD_CG_SUPPORT_HDP_MGCG | 1101 AMD_CG_SUPPORT_HDP_LS | 1102 AMD_CG_SUPPORT_ROM_MGCG | 1103 AMD_CG_SUPPORT_MC_MGCG | 1104 AMD_CG_SUPPORT_MC_LS | 1105 AMD_CG_SUPPORT_DRM_LS | 1106 AMD_CG_SUPPORT_UVD_MGCG; 1107 adev->pg_flags = 0; 1108 adev->external_rev_id = adev->rev_id + 0x14; 1109 break; 1110 case CHIP_POLARIS11: 1111 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1112 AMD_CG_SUPPORT_GFX_RLC_LS | 1113 AMD_CG_SUPPORT_GFX_CP_LS | 1114 AMD_CG_SUPPORT_GFX_CGCG | 1115 AMD_CG_SUPPORT_GFX_CGLS | 1116 AMD_CG_SUPPORT_GFX_3D_CGCG | 1117 AMD_CG_SUPPORT_GFX_3D_CGLS | 1118 AMD_CG_SUPPORT_SDMA_MGCG | 1119 AMD_CG_SUPPORT_SDMA_LS | 1120 AMD_CG_SUPPORT_BIF_MGCG | 1121 AMD_CG_SUPPORT_BIF_LS | 1122 AMD_CG_SUPPORT_HDP_MGCG | 1123 AMD_CG_SUPPORT_HDP_LS | 1124 AMD_CG_SUPPORT_ROM_MGCG | 1125 AMD_CG_SUPPORT_MC_MGCG | 1126 AMD_CG_SUPPORT_MC_LS | 1127 AMD_CG_SUPPORT_DRM_LS | 1128 AMD_CG_SUPPORT_UVD_MGCG | 1129 AMD_CG_SUPPORT_VCE_MGCG; 1130 adev->pg_flags = 0; 1131 adev->external_rev_id = adev->rev_id + 0x5A; 1132 break; 1133 case CHIP_POLARIS10: 1134 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1135 AMD_CG_SUPPORT_GFX_RLC_LS | 1136 AMD_CG_SUPPORT_GFX_CP_LS | 1137 AMD_CG_SUPPORT_GFX_CGCG | 1138 AMD_CG_SUPPORT_GFX_CGLS | 1139 AMD_CG_SUPPORT_GFX_3D_CGCG | 1140 AMD_CG_SUPPORT_GFX_3D_CGLS | 1141 AMD_CG_SUPPORT_SDMA_MGCG | 1142 AMD_CG_SUPPORT_SDMA_LS | 1143 AMD_CG_SUPPORT_BIF_MGCG | 1144 AMD_CG_SUPPORT_BIF_LS | 1145 AMD_CG_SUPPORT_HDP_MGCG | 1146 AMD_CG_SUPPORT_HDP_LS | 1147 AMD_CG_SUPPORT_ROM_MGCG | 1148 AMD_CG_SUPPORT_MC_MGCG | 1149 AMD_CG_SUPPORT_MC_LS | 1150 AMD_CG_SUPPORT_DRM_LS | 1151 AMD_CG_SUPPORT_UVD_MGCG | 1152 AMD_CG_SUPPORT_VCE_MGCG; 1153 adev->pg_flags = 0; 1154 adev->external_rev_id = adev->rev_id + 0x50; 1155 break; 1156 case CHIP_POLARIS12: 1157 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1158 AMD_CG_SUPPORT_GFX_RLC_LS | 1159 AMD_CG_SUPPORT_GFX_CP_LS | 1160 AMD_CG_SUPPORT_GFX_CGCG | 1161 AMD_CG_SUPPORT_GFX_CGLS | 1162 AMD_CG_SUPPORT_GFX_3D_CGCG | 1163 AMD_CG_SUPPORT_GFX_3D_CGLS | 1164 AMD_CG_SUPPORT_SDMA_MGCG | 1165 AMD_CG_SUPPORT_SDMA_LS | 1166 AMD_CG_SUPPORT_BIF_MGCG | 1167 AMD_CG_SUPPORT_BIF_LS | 1168 AMD_CG_SUPPORT_HDP_MGCG | 1169 AMD_CG_SUPPORT_HDP_LS | 1170 AMD_CG_SUPPORT_ROM_MGCG | 1171 AMD_CG_SUPPORT_MC_MGCG | 1172 AMD_CG_SUPPORT_MC_LS | 1173 AMD_CG_SUPPORT_DRM_LS | 1174 AMD_CG_SUPPORT_UVD_MGCG | 1175 AMD_CG_SUPPORT_VCE_MGCG; 1176 adev->pg_flags = 0; 1177 adev->external_rev_id = adev->rev_id + 0x64; 1178 break; 1179 case CHIP_VEGAM: 1180 adev->cg_flags = 0; 1181 /*AMD_CG_SUPPORT_GFX_MGCG | 1182 AMD_CG_SUPPORT_GFX_RLC_LS | 1183 AMD_CG_SUPPORT_GFX_CP_LS | 1184 AMD_CG_SUPPORT_GFX_CGCG | 1185 AMD_CG_SUPPORT_GFX_CGLS | 1186 AMD_CG_SUPPORT_GFX_3D_CGCG | 1187 AMD_CG_SUPPORT_GFX_3D_CGLS | 1188 AMD_CG_SUPPORT_SDMA_MGCG | 1189 AMD_CG_SUPPORT_SDMA_LS | 1190 AMD_CG_SUPPORT_BIF_MGCG | 1191 AMD_CG_SUPPORT_BIF_LS | 1192 AMD_CG_SUPPORT_HDP_MGCG | 1193 AMD_CG_SUPPORT_HDP_LS | 1194 AMD_CG_SUPPORT_ROM_MGCG | 1195 AMD_CG_SUPPORT_MC_MGCG | 1196 AMD_CG_SUPPORT_MC_LS | 1197 AMD_CG_SUPPORT_DRM_LS | 1198 AMD_CG_SUPPORT_UVD_MGCG | 1199 AMD_CG_SUPPORT_VCE_MGCG;*/ 1200 adev->pg_flags = 0; 1201 adev->external_rev_id = adev->rev_id + 0x6E; 1202 break; 1203 case CHIP_CARRIZO: 1204 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1205 AMD_CG_SUPPORT_GFX_MGCG | 1206 AMD_CG_SUPPORT_GFX_MGLS | 1207 AMD_CG_SUPPORT_GFX_RLC_LS | 1208 AMD_CG_SUPPORT_GFX_CP_LS | 1209 AMD_CG_SUPPORT_GFX_CGTS | 1210 AMD_CG_SUPPORT_GFX_CGTS_LS | 1211 AMD_CG_SUPPORT_GFX_CGCG | 1212 AMD_CG_SUPPORT_GFX_CGLS | 1213 AMD_CG_SUPPORT_BIF_LS | 1214 AMD_CG_SUPPORT_HDP_MGCG | 1215 AMD_CG_SUPPORT_HDP_LS | 1216 AMD_CG_SUPPORT_SDMA_MGCG | 1217 AMD_CG_SUPPORT_SDMA_LS | 1218 AMD_CG_SUPPORT_VCE_MGCG; 1219 /* rev0 hardware requires workarounds to support PG */ 1220 adev->pg_flags = 0; 1221 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { 1222 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | 1223 AMD_PG_SUPPORT_GFX_PIPELINE | 1224 AMD_PG_SUPPORT_CP | 1225 AMD_PG_SUPPORT_UVD | 1226 AMD_PG_SUPPORT_VCE; 1227 } 1228 adev->external_rev_id = adev->rev_id + 0x1; 1229 break; 1230 case CHIP_STONEY: 1231 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1232 AMD_CG_SUPPORT_GFX_MGCG | 1233 AMD_CG_SUPPORT_GFX_MGLS | 1234 AMD_CG_SUPPORT_GFX_RLC_LS | 1235 AMD_CG_SUPPORT_GFX_CP_LS | 1236 AMD_CG_SUPPORT_GFX_CGTS | 1237 AMD_CG_SUPPORT_GFX_CGTS_LS | 1238 AMD_CG_SUPPORT_GFX_CGLS | 1239 AMD_CG_SUPPORT_BIF_LS | 1240 AMD_CG_SUPPORT_HDP_MGCG | 1241 AMD_CG_SUPPORT_HDP_LS | 1242 AMD_CG_SUPPORT_SDMA_MGCG | 1243 AMD_CG_SUPPORT_SDMA_LS | 1244 AMD_CG_SUPPORT_VCE_MGCG; 1245 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1246 AMD_PG_SUPPORT_GFX_SMG | 1247 AMD_PG_SUPPORT_GFX_PIPELINE | 1248 AMD_PG_SUPPORT_CP | 1249 AMD_PG_SUPPORT_UVD | 1250 AMD_PG_SUPPORT_VCE; 1251 adev->external_rev_id = adev->rev_id + 0x61; 1252 break; 1253 default: 1254 /* FIXME: not supported yet */ 1255 return -EINVAL; 1256 } 1257 1258 if (amdgpu_sriov_vf(adev)) { 1259 amdgpu_virt_init_setting(adev); 1260 xgpu_vi_mailbox_set_irq_funcs(adev); 1261 } 1262 1263 return 0; 1264 } 1265 1266 static int vi_common_late_init(void *handle) 1267 { 1268 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1269 1270 if (amdgpu_sriov_vf(adev)) 1271 xgpu_vi_mailbox_get_irq(adev); 1272 1273 return 0; 1274 } 1275 1276 static int vi_common_sw_init(void *handle) 1277 { 1278 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1279 1280 if (amdgpu_sriov_vf(adev)) 1281 xgpu_vi_mailbox_add_irq_id(adev); 1282 1283 return 0; 1284 } 1285 1286 static int vi_common_sw_fini(void *handle) 1287 { 1288 return 0; 1289 } 1290 1291 static int vi_common_hw_init(void *handle) 1292 { 1293 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1294 1295 /* move the golden regs per IP block */ 1296 vi_init_golden_registers(adev); 1297 /* enable pcie gen2/3 link */ 1298 vi_pcie_gen3_enable(adev); 1299 /* enable aspm */ 1300 vi_program_aspm(adev); 1301 /* enable the doorbell aperture */ 1302 vi_enable_doorbell_aperture(adev, true); 1303 1304 return 0; 1305 } 1306 1307 static int vi_common_hw_fini(void *handle) 1308 { 1309 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1310 1311 /* enable the doorbell aperture */ 1312 vi_enable_doorbell_aperture(adev, false); 1313 1314 if (amdgpu_sriov_vf(adev)) 1315 xgpu_vi_mailbox_put_irq(adev); 1316 1317 return 0; 1318 } 1319 1320 static int vi_common_suspend(void *handle) 1321 { 1322 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1323 1324 return vi_common_hw_fini(adev); 1325 } 1326 1327 static int vi_common_resume(void *handle) 1328 { 1329 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1330 1331 return vi_common_hw_init(adev); 1332 } 1333 1334 static bool vi_common_is_idle(void *handle) 1335 { 1336 return true; 1337 } 1338 1339 static int vi_common_wait_for_idle(void *handle) 1340 { 1341 return 0; 1342 } 1343 1344 static int vi_common_soft_reset(void *handle) 1345 { 1346 return 0; 1347 } 1348 1349 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1350 bool enable) 1351 { 1352 uint32_t temp, data; 1353 1354 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1355 1356 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1357 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1358 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1359 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1360 else 1361 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1362 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1363 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1364 1365 if (temp != data) 1366 WREG32_PCIE(ixPCIE_CNTL2, data); 1367 } 1368 1369 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1370 bool enable) 1371 { 1372 uint32_t temp, data; 1373 1374 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1375 1376 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1377 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1378 else 1379 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1380 1381 if (temp != data) 1382 WREG32(mmHDP_HOST_PATH_CNTL, data); 1383 } 1384 1385 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1386 bool enable) 1387 { 1388 uint32_t temp, data; 1389 1390 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1391 1392 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1393 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1394 else 1395 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1396 1397 if (temp != data) 1398 WREG32(mmHDP_MEM_POWER_LS, data); 1399 } 1400 1401 static void vi_update_drm_light_sleep(struct amdgpu_device *adev, 1402 bool enable) 1403 { 1404 uint32_t temp, data; 1405 1406 temp = data = RREG32(0x157a); 1407 1408 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1409 data |= 1; 1410 else 1411 data &= ~1; 1412 1413 if (temp != data) 1414 WREG32(0x157a, data); 1415 } 1416 1417 1418 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1419 bool enable) 1420 { 1421 uint32_t temp, data; 1422 1423 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1424 1425 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1426 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1427 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1428 else 1429 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1430 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1431 1432 if (temp != data) 1433 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1434 } 1435 1436 static int vi_common_set_clockgating_state_by_smu(void *handle, 1437 enum amd_clockgating_state state) 1438 { 1439 uint32_t msg_id, pp_state = 0; 1440 uint32_t pp_support_state = 0; 1441 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1442 1443 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1444 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1445 pp_support_state = PP_STATE_SUPPORT_LS; 1446 pp_state = PP_STATE_LS; 1447 } 1448 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1449 pp_support_state |= PP_STATE_SUPPORT_CG; 1450 pp_state |= PP_STATE_CG; 1451 } 1452 if (state == AMD_CG_STATE_UNGATE) 1453 pp_state = 0; 1454 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1455 PP_BLOCK_SYS_MC, 1456 pp_support_state, 1457 pp_state); 1458 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1459 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1460 } 1461 1462 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1463 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1464 pp_support_state = PP_STATE_SUPPORT_LS; 1465 pp_state = PP_STATE_LS; 1466 } 1467 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1468 pp_support_state |= PP_STATE_SUPPORT_CG; 1469 pp_state |= PP_STATE_CG; 1470 } 1471 if (state == AMD_CG_STATE_UNGATE) 1472 pp_state = 0; 1473 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1474 PP_BLOCK_SYS_SDMA, 1475 pp_support_state, 1476 pp_state); 1477 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1478 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1479 } 1480 1481 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1482 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1483 pp_support_state = PP_STATE_SUPPORT_LS; 1484 pp_state = PP_STATE_LS; 1485 } 1486 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1487 pp_support_state |= PP_STATE_SUPPORT_CG; 1488 pp_state |= PP_STATE_CG; 1489 } 1490 if (state == AMD_CG_STATE_UNGATE) 1491 pp_state = 0; 1492 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1493 PP_BLOCK_SYS_HDP, 1494 pp_support_state, 1495 pp_state); 1496 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1497 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1498 } 1499 1500 1501 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { 1502 if (state == AMD_CG_STATE_UNGATE) 1503 pp_state = 0; 1504 else 1505 pp_state = PP_STATE_LS; 1506 1507 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1508 PP_BLOCK_SYS_BIF, 1509 PP_STATE_SUPPORT_LS, 1510 pp_state); 1511 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1512 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1513 } 1514 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { 1515 if (state == AMD_CG_STATE_UNGATE) 1516 pp_state = 0; 1517 else 1518 pp_state = PP_STATE_CG; 1519 1520 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1521 PP_BLOCK_SYS_BIF, 1522 PP_STATE_SUPPORT_CG, 1523 pp_state); 1524 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1525 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1526 } 1527 1528 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { 1529 1530 if (state == AMD_CG_STATE_UNGATE) 1531 pp_state = 0; 1532 else 1533 pp_state = PP_STATE_LS; 1534 1535 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1536 PP_BLOCK_SYS_DRM, 1537 PP_STATE_SUPPORT_LS, 1538 pp_state); 1539 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1540 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1541 } 1542 1543 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { 1544 1545 if (state == AMD_CG_STATE_UNGATE) 1546 pp_state = 0; 1547 else 1548 pp_state = PP_STATE_CG; 1549 1550 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1551 PP_BLOCK_SYS_ROM, 1552 PP_STATE_SUPPORT_CG, 1553 pp_state); 1554 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1555 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1556 } 1557 return 0; 1558 } 1559 1560 static int vi_common_set_clockgating_state(void *handle, 1561 enum amd_clockgating_state state) 1562 { 1563 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1564 1565 if (amdgpu_sriov_vf(adev)) 1566 return 0; 1567 1568 switch (adev->asic_type) { 1569 case CHIP_FIJI: 1570 vi_update_bif_medium_grain_light_sleep(adev, 1571 state == AMD_CG_STATE_GATE); 1572 vi_update_hdp_medium_grain_clock_gating(adev, 1573 state == AMD_CG_STATE_GATE); 1574 vi_update_hdp_light_sleep(adev, 1575 state == AMD_CG_STATE_GATE); 1576 vi_update_rom_medium_grain_clock_gating(adev, 1577 state == AMD_CG_STATE_GATE); 1578 break; 1579 case CHIP_CARRIZO: 1580 case CHIP_STONEY: 1581 vi_update_bif_medium_grain_light_sleep(adev, 1582 state == AMD_CG_STATE_GATE); 1583 vi_update_hdp_medium_grain_clock_gating(adev, 1584 state == AMD_CG_STATE_GATE); 1585 vi_update_hdp_light_sleep(adev, 1586 state == AMD_CG_STATE_GATE); 1587 vi_update_drm_light_sleep(adev, 1588 state == AMD_CG_STATE_GATE); 1589 break; 1590 case CHIP_TONGA: 1591 case CHIP_POLARIS10: 1592 case CHIP_POLARIS11: 1593 case CHIP_POLARIS12: 1594 case CHIP_VEGAM: 1595 vi_common_set_clockgating_state_by_smu(adev, state); 1596 default: 1597 break; 1598 } 1599 return 0; 1600 } 1601 1602 static int vi_common_set_powergating_state(void *handle, 1603 enum amd_powergating_state state) 1604 { 1605 return 0; 1606 } 1607 1608 static void vi_common_get_clockgating_state(void *handle, u32 *flags) 1609 { 1610 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1611 int data; 1612 1613 if (amdgpu_sriov_vf(adev)) 1614 *flags = 0; 1615 1616 /* AMD_CG_SUPPORT_BIF_LS */ 1617 data = RREG32_PCIE(ixPCIE_CNTL2); 1618 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 1619 *flags |= AMD_CG_SUPPORT_BIF_LS; 1620 1621 /* AMD_CG_SUPPORT_HDP_LS */ 1622 data = RREG32(mmHDP_MEM_POWER_LS); 1623 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1624 *flags |= AMD_CG_SUPPORT_HDP_LS; 1625 1626 /* AMD_CG_SUPPORT_HDP_MGCG */ 1627 data = RREG32(mmHDP_HOST_PATH_CNTL); 1628 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK)) 1629 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 1630 1631 /* AMD_CG_SUPPORT_ROM_MGCG */ 1632 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1633 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1634 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1635 } 1636 1637 static const struct amd_ip_funcs vi_common_ip_funcs = { 1638 .name = "vi_common", 1639 .early_init = vi_common_early_init, 1640 .late_init = vi_common_late_init, 1641 .sw_init = vi_common_sw_init, 1642 .sw_fini = vi_common_sw_fini, 1643 .hw_init = vi_common_hw_init, 1644 .hw_fini = vi_common_hw_fini, 1645 .suspend = vi_common_suspend, 1646 .resume = vi_common_resume, 1647 .is_idle = vi_common_is_idle, 1648 .wait_for_idle = vi_common_wait_for_idle, 1649 .soft_reset = vi_common_soft_reset, 1650 .set_clockgating_state = vi_common_set_clockgating_state, 1651 .set_powergating_state = vi_common_set_powergating_state, 1652 .get_clockgating_state = vi_common_get_clockgating_state, 1653 }; 1654 1655 static const struct amdgpu_ip_block_version vi_common_ip_block = 1656 { 1657 .type = AMD_IP_BLOCK_TYPE_COMMON, 1658 .major = 1, 1659 .minor = 0, 1660 .rev = 0, 1661 .funcs = &vi_common_ip_funcs, 1662 }; 1663 1664 int vi_set_ip_blocks(struct amdgpu_device *adev) 1665 { 1666 /* in early init stage, vbios code won't work */ 1667 vi_detect_hw_virtualization(adev); 1668 1669 if (amdgpu_sriov_vf(adev)) 1670 adev->virt.ops = &xgpu_vi_virt_ops; 1671 1672 switch (adev->asic_type) { 1673 case CHIP_TOPAZ: 1674 /* topaz has no DCE, UVD, VCE */ 1675 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1676 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 1677 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 1678 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1679 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); 1680 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1681 if (adev->enable_virtual_display) 1682 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1683 break; 1684 case CHIP_FIJI: 1685 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1686 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 1687 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1688 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1689 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1690 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1691 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1692 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1693 #if defined(CONFIG_DRM_AMD_DC) 1694 else if (amdgpu_device_has_dc_support(adev)) 1695 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1696 #endif 1697 else 1698 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block); 1699 if (!amdgpu_sriov_vf(adev)) { 1700 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1701 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1702 } 1703 break; 1704 case CHIP_TONGA: 1705 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1706 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1707 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1708 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1709 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1710 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1711 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1712 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1713 #if defined(CONFIG_DRM_AMD_DC) 1714 else if (amdgpu_device_has_dc_support(adev)) 1715 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1716 #endif 1717 else 1718 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block); 1719 if (!amdgpu_sriov_vf(adev)) { 1720 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block); 1721 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1722 } 1723 break; 1724 case CHIP_POLARIS10: 1725 case CHIP_POLARIS11: 1726 case CHIP_POLARIS12: 1727 case CHIP_VEGAM: 1728 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1729 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 1730 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1731 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1732 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); 1733 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1734 if (adev->enable_virtual_display) 1735 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1736 #if defined(CONFIG_DRM_AMD_DC) 1737 else if (amdgpu_device_has_dc_support(adev)) 1738 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1739 #endif 1740 else 1741 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); 1742 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); 1743 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1744 break; 1745 case CHIP_CARRIZO: 1746 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1747 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1748 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1749 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1750 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1751 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1752 if (adev->enable_virtual_display) 1753 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1754 #if defined(CONFIG_DRM_AMD_DC) 1755 else if (amdgpu_device_has_dc_support(adev)) 1756 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1757 #endif 1758 else 1759 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1760 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1761 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); 1762 #if defined(CONFIG_DRM_AMD_ACP) 1763 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1764 #endif 1765 break; 1766 case CHIP_STONEY: 1767 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1768 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1769 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1770 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block); 1771 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1772 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1773 if (adev->enable_virtual_display) 1774 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1775 #if defined(CONFIG_DRM_AMD_DC) 1776 else if (amdgpu_device_has_dc_support(adev)) 1777 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1778 #endif 1779 else 1780 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1781 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); 1782 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1783 #if defined(CONFIG_DRM_AMD_ACP) 1784 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1785 #endif 1786 break; 1787 default: 1788 /* FIXME: not supported yet */ 1789 return -EINVAL; 1790 } 1791 1792 return 0; 1793 } 1794 1795 void legacy_doorbell_index_init(struct amdgpu_device *adev) 1796 { 1797 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ; 1798 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0; 1799 adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1; 1800 adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2; 1801 adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3; 1802 adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4; 1803 adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5; 1804 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6; 1805 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7; 1806 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0; 1807 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0; 1808 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1; 1809 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH; 1810 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT; 1811 } 1812