1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/pci.h> 25 #include <linux/slab.h> 26 27 #include <drm/amdgpu_drm.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_atombios.h" 31 #include "amdgpu_ih.h" 32 #include "amdgpu_uvd.h" 33 #include "amdgpu_vce.h" 34 #include "amdgpu_ucode.h" 35 #include "atom.h" 36 #include "amd_pcie.h" 37 38 #include "gmc/gmc_8_1_d.h" 39 #include "gmc/gmc_8_1_sh_mask.h" 40 41 #include "oss/oss_3_0_d.h" 42 #include "oss/oss_3_0_sh_mask.h" 43 44 #include "bif/bif_5_0_d.h" 45 #include "bif/bif_5_0_sh_mask.h" 46 47 #include "gca/gfx_8_0_d.h" 48 #include "gca/gfx_8_0_sh_mask.h" 49 50 #include "smu/smu_7_1_1_d.h" 51 #include "smu/smu_7_1_1_sh_mask.h" 52 53 #include "uvd/uvd_5_0_d.h" 54 #include "uvd/uvd_5_0_sh_mask.h" 55 56 #include "vce/vce_3_0_d.h" 57 #include "vce/vce_3_0_sh_mask.h" 58 59 #include "dce/dce_10_0_d.h" 60 #include "dce/dce_10_0_sh_mask.h" 61 62 #include "vid.h" 63 #include "vi.h" 64 #include "gmc_v8_0.h" 65 #include "gmc_v7_0.h" 66 #include "gfx_v8_0.h" 67 #include "sdma_v2_4.h" 68 #include "sdma_v3_0.h" 69 #include "dce_v10_0.h" 70 #include "dce_v11_0.h" 71 #include "iceland_ih.h" 72 #include "tonga_ih.h" 73 #include "cz_ih.h" 74 #include "uvd_v5_0.h" 75 #include "uvd_v6_0.h" 76 #include "vce_v3_0.h" 77 #if defined(CONFIG_DRM_AMD_ACP) 78 #include "amdgpu_acp.h" 79 #endif 80 #include "amdgpu_vkms.h" 81 #include "mxgpu_vi.h" 82 #include "amdgpu_dm.h" 83 84 #define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6 85 #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L 86 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L 87 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK 0x00000004L 88 #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK 0x00000008L 89 #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK 0x00000010L 90 #define ixPCIE_L1_PM_SUB_CNTL 0x378 91 #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK 0x00000004L 92 #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK 0x00000008L 93 #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK 0x00000001L 94 #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK 0x00000002L 95 #define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK 0x00200000L 96 #define LINK_CAP 0x64 97 #define PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L 98 #define ixCPM_CONTROL 0x1400118 99 #define ixPCIE_LC_CNTL7 0x100100BC 100 #define PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK 0x00000400L 101 #define PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT 0x00000007 102 #define PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT 0x00000009 103 #define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK 0x01000000L 104 #define PCIE_L1_PM_SUB_CNTL 0x378 105 #define ASIC_IS_P22(asic_type, rid) ((asic_type >= CHIP_POLARIS10) && \ 106 (asic_type <= CHIP_POLARIS12) && \ 107 (rid >= 0x6E)) 108 /* Topaz */ 109 static const struct amdgpu_video_codecs topaz_video_codecs_encode = 110 { 111 .codec_count = 0, 112 .codec_array = NULL, 113 }; 114 115 /* Tonga, CZ, ST, Fiji */ 116 static const struct amdgpu_video_codec_info tonga_video_codecs_encode_array[] = 117 { 118 { 119 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 120 .max_width = 4096, 121 .max_height = 2304, 122 .max_pixels_per_frame = 4096 * 2304, 123 .max_level = 0, 124 }, 125 }; 126 127 static const struct amdgpu_video_codecs tonga_video_codecs_encode = 128 { 129 .codec_count = ARRAY_SIZE(tonga_video_codecs_encode_array), 130 .codec_array = tonga_video_codecs_encode_array, 131 }; 132 133 /* Polaris */ 134 static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[] = 135 { 136 { 137 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 138 .max_width = 4096, 139 .max_height = 2304, 140 .max_pixels_per_frame = 4096 * 2304, 141 .max_level = 0, 142 }, 143 { 144 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 145 .max_width = 4096, 146 .max_height = 2304, 147 .max_pixels_per_frame = 4096 * 2304, 148 .max_level = 0, 149 }, 150 }; 151 152 static const struct amdgpu_video_codecs polaris_video_codecs_encode = 153 { 154 .codec_count = ARRAY_SIZE(polaris_video_codecs_encode_array), 155 .codec_array = polaris_video_codecs_encode_array, 156 }; 157 158 /* Topaz */ 159 static const struct amdgpu_video_codecs topaz_video_codecs_decode = 160 { 161 .codec_count = 0, 162 .codec_array = NULL, 163 }; 164 165 /* Tonga */ 166 static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] = 167 { 168 { 169 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 170 .max_width = 4096, 171 .max_height = 4096, 172 .max_pixels_per_frame = 4096 * 4096, 173 .max_level = 3, 174 }, 175 { 176 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 177 .max_width = 4096, 178 .max_height = 4096, 179 .max_pixels_per_frame = 4096 * 4096, 180 .max_level = 5, 181 }, 182 { 183 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 184 .max_width = 4096, 185 .max_height = 4096, 186 .max_pixels_per_frame = 4096 * 4096, 187 .max_level = 52, 188 }, 189 { 190 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 191 .max_width = 4096, 192 .max_height = 4096, 193 .max_pixels_per_frame = 4096 * 4096, 194 .max_level = 4, 195 }, 196 }; 197 198 static const struct amdgpu_video_codecs tonga_video_codecs_decode = 199 { 200 .codec_count = ARRAY_SIZE(tonga_video_codecs_decode_array), 201 .codec_array = tonga_video_codecs_decode_array, 202 }; 203 204 /* CZ, ST, Fiji, Polaris */ 205 static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] = 206 { 207 { 208 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 209 .max_width = 4096, 210 .max_height = 4096, 211 .max_pixels_per_frame = 4096 * 4096, 212 .max_level = 3, 213 }, 214 { 215 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 216 .max_width = 4096, 217 .max_height = 4096, 218 .max_pixels_per_frame = 4096 * 4096, 219 .max_level = 5, 220 }, 221 { 222 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 223 .max_width = 4096, 224 .max_height = 4096, 225 .max_pixels_per_frame = 4096 * 4096, 226 .max_level = 52, 227 }, 228 { 229 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 230 .max_width = 4096, 231 .max_height = 4096, 232 .max_pixels_per_frame = 4096 * 4096, 233 .max_level = 4, 234 }, 235 { 236 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 237 .max_width = 4096, 238 .max_height = 4096, 239 .max_pixels_per_frame = 4096 * 4096, 240 .max_level = 186, 241 }, 242 { 243 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 244 .max_width = 4096, 245 .max_height = 4096, 246 .max_pixels_per_frame = 4096 * 4096, 247 .max_level = 0, 248 }, 249 }; 250 251 static const struct amdgpu_video_codecs cz_video_codecs_decode = 252 { 253 .codec_count = ARRAY_SIZE(cz_video_codecs_decode_array), 254 .codec_array = cz_video_codecs_decode_array, 255 }; 256 257 static int vi_query_video_codecs(struct amdgpu_device *adev, bool encode, 258 const struct amdgpu_video_codecs **codecs) 259 { 260 switch (adev->asic_type) { 261 case CHIP_TOPAZ: 262 if (encode) 263 *codecs = &topaz_video_codecs_encode; 264 else 265 *codecs = &topaz_video_codecs_decode; 266 return 0; 267 case CHIP_TONGA: 268 if (encode) 269 *codecs = &tonga_video_codecs_encode; 270 else 271 *codecs = &tonga_video_codecs_decode; 272 return 0; 273 case CHIP_POLARIS10: 274 case CHIP_POLARIS11: 275 case CHIP_POLARIS12: 276 case CHIP_VEGAM: 277 if (encode) 278 *codecs = &polaris_video_codecs_encode; 279 else 280 *codecs = &cz_video_codecs_decode; 281 return 0; 282 case CHIP_FIJI: 283 case CHIP_CARRIZO: 284 case CHIP_STONEY: 285 if (encode) 286 *codecs = &tonga_video_codecs_encode; 287 else 288 *codecs = &cz_video_codecs_decode; 289 return 0; 290 default: 291 return -EINVAL; 292 } 293 } 294 295 /* 296 * Indirect registers accessor 297 */ 298 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 299 { 300 unsigned long flags; 301 u32 r; 302 303 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 304 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 305 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 306 r = RREG32_NO_KIQ(mmPCIE_DATA); 307 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 308 return r; 309 } 310 311 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 312 { 313 unsigned long flags; 314 315 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 316 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 317 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 318 WREG32_NO_KIQ(mmPCIE_DATA, v); 319 (void)RREG32_NO_KIQ(mmPCIE_DATA); 320 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 321 } 322 323 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 324 { 325 unsigned long flags; 326 u32 r; 327 328 spin_lock_irqsave(&adev->smc_idx_lock, flags); 329 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 330 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11); 331 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 332 return r; 333 } 334 335 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 336 { 337 unsigned long flags; 338 339 spin_lock_irqsave(&adev->smc_idx_lock, flags); 340 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 341 WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v)); 342 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 343 } 344 345 /* smu_8_0_d.h */ 346 #define mmMP0PUB_IND_INDEX 0x180 347 #define mmMP0PUB_IND_DATA 0x181 348 349 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 350 { 351 unsigned long flags; 352 u32 r; 353 354 spin_lock_irqsave(&adev->smc_idx_lock, flags); 355 WREG32(mmMP0PUB_IND_INDEX, (reg)); 356 r = RREG32(mmMP0PUB_IND_DATA); 357 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 358 return r; 359 } 360 361 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 362 { 363 unsigned long flags; 364 365 spin_lock_irqsave(&adev->smc_idx_lock, flags); 366 WREG32(mmMP0PUB_IND_INDEX, (reg)); 367 WREG32(mmMP0PUB_IND_DATA, (v)); 368 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 369 } 370 371 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 372 { 373 unsigned long flags; 374 u32 r; 375 376 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 377 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 378 r = RREG32(mmUVD_CTX_DATA); 379 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 380 return r; 381 } 382 383 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 384 { 385 unsigned long flags; 386 387 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 388 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 389 WREG32(mmUVD_CTX_DATA, (v)); 390 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 391 } 392 393 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 394 { 395 unsigned long flags; 396 u32 r; 397 398 spin_lock_irqsave(&adev->didt_idx_lock, flags); 399 WREG32(mmDIDT_IND_INDEX, (reg)); 400 r = RREG32(mmDIDT_IND_DATA); 401 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 402 return r; 403 } 404 405 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 406 { 407 unsigned long flags; 408 409 spin_lock_irqsave(&adev->didt_idx_lock, flags); 410 WREG32(mmDIDT_IND_INDEX, (reg)); 411 WREG32(mmDIDT_IND_DATA, (v)); 412 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 413 } 414 415 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 416 { 417 unsigned long flags; 418 u32 r; 419 420 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 421 WREG32(mmGC_CAC_IND_INDEX, (reg)); 422 r = RREG32(mmGC_CAC_IND_DATA); 423 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 424 return r; 425 } 426 427 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 428 { 429 unsigned long flags; 430 431 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 432 WREG32(mmGC_CAC_IND_INDEX, (reg)); 433 WREG32(mmGC_CAC_IND_DATA, (v)); 434 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 435 } 436 437 438 static const u32 tonga_mgcg_cgcg_init[] = 439 { 440 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 441 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 442 mmPCIE_DATA, 0x000f0000, 0x00000000, 443 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 444 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 445 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 446 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 447 }; 448 449 static const u32 fiji_mgcg_cgcg_init[] = 450 { 451 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 452 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 453 mmPCIE_DATA, 0x000f0000, 0x00000000, 454 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 455 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 456 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 457 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 458 }; 459 460 static const u32 iceland_mgcg_cgcg_init[] = 461 { 462 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 463 mmPCIE_DATA, 0x000f0000, 0x00000000, 464 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 465 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 466 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 467 }; 468 469 static const u32 cz_mgcg_cgcg_init[] = 470 { 471 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 472 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 473 mmPCIE_DATA, 0x000f0000, 0x00000000, 474 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 475 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 476 }; 477 478 static const u32 stoney_mgcg_cgcg_init[] = 479 { 480 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 481 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 482 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 483 }; 484 485 static void vi_init_golden_registers(struct amdgpu_device *adev) 486 { 487 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 488 mutex_lock(&adev->grbm_idx_mutex); 489 490 if (amdgpu_sriov_vf(adev)) { 491 xgpu_vi_init_golden_registers(adev); 492 mutex_unlock(&adev->grbm_idx_mutex); 493 return; 494 } 495 496 switch (adev->asic_type) { 497 case CHIP_TOPAZ: 498 amdgpu_device_program_register_sequence(adev, 499 iceland_mgcg_cgcg_init, 500 ARRAY_SIZE(iceland_mgcg_cgcg_init)); 501 break; 502 case CHIP_FIJI: 503 amdgpu_device_program_register_sequence(adev, 504 fiji_mgcg_cgcg_init, 505 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 506 break; 507 case CHIP_TONGA: 508 amdgpu_device_program_register_sequence(adev, 509 tonga_mgcg_cgcg_init, 510 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 511 break; 512 case CHIP_CARRIZO: 513 amdgpu_device_program_register_sequence(adev, 514 cz_mgcg_cgcg_init, 515 ARRAY_SIZE(cz_mgcg_cgcg_init)); 516 break; 517 case CHIP_STONEY: 518 amdgpu_device_program_register_sequence(adev, 519 stoney_mgcg_cgcg_init, 520 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 521 break; 522 case CHIP_POLARIS10: 523 case CHIP_POLARIS11: 524 case CHIP_POLARIS12: 525 case CHIP_VEGAM: 526 default: 527 break; 528 } 529 mutex_unlock(&adev->grbm_idx_mutex); 530 } 531 532 /** 533 * vi_get_xclk - get the xclk 534 * 535 * @adev: amdgpu_device pointer 536 * 537 * Returns the reference clock used by the gfx engine 538 * (VI). 539 */ 540 static u32 vi_get_xclk(struct amdgpu_device *adev) 541 { 542 u32 reference_clock = adev->clock.spll.reference_freq; 543 u32 tmp; 544 545 if (adev->flags & AMD_IS_APU) 546 return reference_clock; 547 548 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 549 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 550 return 1000; 551 552 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 553 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 554 return reference_clock / 4; 555 556 return reference_clock; 557 } 558 559 /** 560 * vi_srbm_select - select specific register instances 561 * 562 * @adev: amdgpu_device pointer 563 * @me: selected ME (micro engine) 564 * @pipe: pipe 565 * @queue: queue 566 * @vmid: VMID 567 * 568 * Switches the currently active registers instances. Some 569 * registers are instanced per VMID, others are instanced per 570 * me/pipe/queue combination. 571 */ 572 void vi_srbm_select(struct amdgpu_device *adev, 573 u32 me, u32 pipe, u32 queue, u32 vmid) 574 { 575 u32 srbm_gfx_cntl = 0; 576 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 577 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 578 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 579 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 580 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 581 } 582 583 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 584 { 585 u32 bus_cntl; 586 u32 d1vga_control = 0; 587 u32 d2vga_control = 0; 588 u32 vga_render_control = 0; 589 u32 rom_cntl; 590 bool r; 591 592 bus_cntl = RREG32(mmBUS_CNTL); 593 if (adev->mode_info.num_crtc) { 594 d1vga_control = RREG32(mmD1VGA_CONTROL); 595 d2vga_control = RREG32(mmD2VGA_CONTROL); 596 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 597 } 598 rom_cntl = RREG32_SMC(ixROM_CNTL); 599 600 /* enable the rom */ 601 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 602 if (adev->mode_info.num_crtc) { 603 /* Disable VGA mode */ 604 WREG32(mmD1VGA_CONTROL, 605 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 606 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 607 WREG32(mmD2VGA_CONTROL, 608 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 609 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 610 WREG32(mmVGA_RENDER_CONTROL, 611 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 612 } 613 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 614 615 r = amdgpu_read_bios(adev); 616 617 /* restore regs */ 618 WREG32(mmBUS_CNTL, bus_cntl); 619 if (adev->mode_info.num_crtc) { 620 WREG32(mmD1VGA_CONTROL, d1vga_control); 621 WREG32(mmD2VGA_CONTROL, d2vga_control); 622 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 623 } 624 WREG32_SMC(ixROM_CNTL, rom_cntl); 625 return r; 626 } 627 628 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 629 u8 *bios, u32 length_bytes) 630 { 631 u32 *dw_ptr; 632 unsigned long flags; 633 u32 i, length_dw; 634 635 if (bios == NULL) 636 return false; 637 if (length_bytes == 0) 638 return false; 639 /* APU vbios image is part of sbios image */ 640 if (adev->flags & AMD_IS_APU) 641 return false; 642 643 dw_ptr = (u32 *)bios; 644 length_dw = ALIGN(length_bytes, 4) / 4; 645 /* take the smc lock since we are using the smc index */ 646 spin_lock_irqsave(&adev->smc_idx_lock, flags); 647 /* set rom index to 0 */ 648 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 649 WREG32(mmSMC_IND_DATA_11, 0); 650 /* set index to data for continous read */ 651 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 652 for (i = 0; i < length_dw; i++) 653 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 654 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 655 656 return true; 657 } 658 659 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 660 {mmGRBM_STATUS}, 661 {mmGRBM_STATUS2}, 662 {mmGRBM_STATUS_SE0}, 663 {mmGRBM_STATUS_SE1}, 664 {mmGRBM_STATUS_SE2}, 665 {mmGRBM_STATUS_SE3}, 666 {mmSRBM_STATUS}, 667 {mmSRBM_STATUS2}, 668 {mmSRBM_STATUS3}, 669 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, 670 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, 671 {mmCP_STAT}, 672 {mmCP_STALLED_STAT1}, 673 {mmCP_STALLED_STAT2}, 674 {mmCP_STALLED_STAT3}, 675 {mmCP_CPF_BUSY_STAT}, 676 {mmCP_CPF_STALLED_STAT1}, 677 {mmCP_CPF_STATUS}, 678 {mmCP_CPC_BUSY_STAT}, 679 {mmCP_CPC_STALLED_STAT1}, 680 {mmCP_CPC_STATUS}, 681 {mmGB_ADDR_CONFIG}, 682 {mmMC_ARB_RAMCFG}, 683 {mmGB_TILE_MODE0}, 684 {mmGB_TILE_MODE1}, 685 {mmGB_TILE_MODE2}, 686 {mmGB_TILE_MODE3}, 687 {mmGB_TILE_MODE4}, 688 {mmGB_TILE_MODE5}, 689 {mmGB_TILE_MODE6}, 690 {mmGB_TILE_MODE7}, 691 {mmGB_TILE_MODE8}, 692 {mmGB_TILE_MODE9}, 693 {mmGB_TILE_MODE10}, 694 {mmGB_TILE_MODE11}, 695 {mmGB_TILE_MODE12}, 696 {mmGB_TILE_MODE13}, 697 {mmGB_TILE_MODE14}, 698 {mmGB_TILE_MODE15}, 699 {mmGB_TILE_MODE16}, 700 {mmGB_TILE_MODE17}, 701 {mmGB_TILE_MODE18}, 702 {mmGB_TILE_MODE19}, 703 {mmGB_TILE_MODE20}, 704 {mmGB_TILE_MODE21}, 705 {mmGB_TILE_MODE22}, 706 {mmGB_TILE_MODE23}, 707 {mmGB_TILE_MODE24}, 708 {mmGB_TILE_MODE25}, 709 {mmGB_TILE_MODE26}, 710 {mmGB_TILE_MODE27}, 711 {mmGB_TILE_MODE28}, 712 {mmGB_TILE_MODE29}, 713 {mmGB_TILE_MODE30}, 714 {mmGB_TILE_MODE31}, 715 {mmGB_MACROTILE_MODE0}, 716 {mmGB_MACROTILE_MODE1}, 717 {mmGB_MACROTILE_MODE2}, 718 {mmGB_MACROTILE_MODE3}, 719 {mmGB_MACROTILE_MODE4}, 720 {mmGB_MACROTILE_MODE5}, 721 {mmGB_MACROTILE_MODE6}, 722 {mmGB_MACROTILE_MODE7}, 723 {mmGB_MACROTILE_MODE8}, 724 {mmGB_MACROTILE_MODE9}, 725 {mmGB_MACROTILE_MODE10}, 726 {mmGB_MACROTILE_MODE11}, 727 {mmGB_MACROTILE_MODE12}, 728 {mmGB_MACROTILE_MODE13}, 729 {mmGB_MACROTILE_MODE14}, 730 {mmGB_MACROTILE_MODE15}, 731 {mmCC_RB_BACKEND_DISABLE, true}, 732 {mmGC_USER_RB_BACKEND_DISABLE, true}, 733 {mmGB_BACKEND_MAP, false}, 734 {mmPA_SC_RASTER_CONFIG, true}, 735 {mmPA_SC_RASTER_CONFIG_1, true}, 736 }; 737 738 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 739 bool indexed, u32 se_num, 740 u32 sh_num, u32 reg_offset) 741 { 742 if (indexed) { 743 uint32_t val; 744 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 745 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 746 747 switch (reg_offset) { 748 case mmCC_RB_BACKEND_DISABLE: 749 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 750 case mmGC_USER_RB_BACKEND_DISABLE: 751 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 752 case mmPA_SC_RASTER_CONFIG: 753 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 754 case mmPA_SC_RASTER_CONFIG_1: 755 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 756 } 757 758 mutex_lock(&adev->grbm_idx_mutex); 759 if (se_num != 0xffffffff || sh_num != 0xffffffff) 760 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); 761 762 val = RREG32(reg_offset); 763 764 if (se_num != 0xffffffff || sh_num != 0xffffffff) 765 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 766 mutex_unlock(&adev->grbm_idx_mutex); 767 return val; 768 } else { 769 unsigned idx; 770 771 switch (reg_offset) { 772 case mmGB_ADDR_CONFIG: 773 return adev->gfx.config.gb_addr_config; 774 case mmMC_ARB_RAMCFG: 775 return adev->gfx.config.mc_arb_ramcfg; 776 case mmGB_TILE_MODE0: 777 case mmGB_TILE_MODE1: 778 case mmGB_TILE_MODE2: 779 case mmGB_TILE_MODE3: 780 case mmGB_TILE_MODE4: 781 case mmGB_TILE_MODE5: 782 case mmGB_TILE_MODE6: 783 case mmGB_TILE_MODE7: 784 case mmGB_TILE_MODE8: 785 case mmGB_TILE_MODE9: 786 case mmGB_TILE_MODE10: 787 case mmGB_TILE_MODE11: 788 case mmGB_TILE_MODE12: 789 case mmGB_TILE_MODE13: 790 case mmGB_TILE_MODE14: 791 case mmGB_TILE_MODE15: 792 case mmGB_TILE_MODE16: 793 case mmGB_TILE_MODE17: 794 case mmGB_TILE_MODE18: 795 case mmGB_TILE_MODE19: 796 case mmGB_TILE_MODE20: 797 case mmGB_TILE_MODE21: 798 case mmGB_TILE_MODE22: 799 case mmGB_TILE_MODE23: 800 case mmGB_TILE_MODE24: 801 case mmGB_TILE_MODE25: 802 case mmGB_TILE_MODE26: 803 case mmGB_TILE_MODE27: 804 case mmGB_TILE_MODE28: 805 case mmGB_TILE_MODE29: 806 case mmGB_TILE_MODE30: 807 case mmGB_TILE_MODE31: 808 idx = (reg_offset - mmGB_TILE_MODE0); 809 return adev->gfx.config.tile_mode_array[idx]; 810 case mmGB_MACROTILE_MODE0: 811 case mmGB_MACROTILE_MODE1: 812 case mmGB_MACROTILE_MODE2: 813 case mmGB_MACROTILE_MODE3: 814 case mmGB_MACROTILE_MODE4: 815 case mmGB_MACROTILE_MODE5: 816 case mmGB_MACROTILE_MODE6: 817 case mmGB_MACROTILE_MODE7: 818 case mmGB_MACROTILE_MODE8: 819 case mmGB_MACROTILE_MODE9: 820 case mmGB_MACROTILE_MODE10: 821 case mmGB_MACROTILE_MODE11: 822 case mmGB_MACROTILE_MODE12: 823 case mmGB_MACROTILE_MODE13: 824 case mmGB_MACROTILE_MODE14: 825 case mmGB_MACROTILE_MODE15: 826 idx = (reg_offset - mmGB_MACROTILE_MODE0); 827 return adev->gfx.config.macrotile_mode_array[idx]; 828 default: 829 return RREG32(reg_offset); 830 } 831 } 832 } 833 834 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 835 u32 sh_num, u32 reg_offset, u32 *value) 836 { 837 uint32_t i; 838 839 *value = 0; 840 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 841 bool indexed = vi_allowed_read_registers[i].grbm_indexed; 842 843 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 844 continue; 845 846 *value = vi_get_register_value(adev, indexed, se_num, sh_num, 847 reg_offset); 848 return 0; 849 } 850 return -EINVAL; 851 } 852 853 /** 854 * vi_asic_pci_config_reset - soft reset GPU 855 * 856 * @adev: amdgpu_device pointer 857 * 858 * Use PCI Config method to reset the GPU. 859 * 860 * Returns 0 for success. 861 */ 862 static int vi_asic_pci_config_reset(struct amdgpu_device *adev) 863 { 864 u32 i; 865 int r = -EINVAL; 866 867 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 868 869 /* disable BM */ 870 pci_clear_master(adev->pdev); 871 /* reset */ 872 amdgpu_device_pci_config_reset(adev); 873 874 udelay(100); 875 876 /* wait for asic to come out of reset */ 877 for (i = 0; i < adev->usec_timeout; i++) { 878 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 879 /* enable BM */ 880 pci_set_master(adev->pdev); 881 adev->has_hw_reset = true; 882 r = 0; 883 break; 884 } 885 udelay(1); 886 } 887 888 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 889 890 return r; 891 } 892 893 static bool vi_asic_supports_baco(struct amdgpu_device *adev) 894 { 895 switch (adev->asic_type) { 896 case CHIP_FIJI: 897 case CHIP_TONGA: 898 case CHIP_POLARIS10: 899 case CHIP_POLARIS11: 900 case CHIP_POLARIS12: 901 case CHIP_TOPAZ: 902 return amdgpu_dpm_is_baco_supported(adev); 903 default: 904 return false; 905 } 906 } 907 908 static enum amd_reset_method 909 vi_asic_reset_method(struct amdgpu_device *adev) 910 { 911 bool baco_reset; 912 913 if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY || 914 amdgpu_reset_method == AMD_RESET_METHOD_BACO) 915 return amdgpu_reset_method; 916 917 if (amdgpu_reset_method != -1) 918 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 919 amdgpu_reset_method); 920 921 switch (adev->asic_type) { 922 case CHIP_FIJI: 923 case CHIP_TONGA: 924 case CHIP_POLARIS10: 925 case CHIP_POLARIS11: 926 case CHIP_POLARIS12: 927 case CHIP_TOPAZ: 928 baco_reset = amdgpu_dpm_is_baco_supported(adev); 929 break; 930 default: 931 baco_reset = false; 932 break; 933 } 934 935 if (baco_reset) 936 return AMD_RESET_METHOD_BACO; 937 else 938 return AMD_RESET_METHOD_LEGACY; 939 } 940 941 /** 942 * vi_asic_reset - soft reset GPU 943 * 944 * @adev: amdgpu_device pointer 945 * 946 * Look up which blocks are hung and attempt 947 * to reset them. 948 * Returns 0 for success. 949 */ 950 static int vi_asic_reset(struct amdgpu_device *adev) 951 { 952 int r; 953 954 /* APUs don't have full asic reset */ 955 if (adev->flags & AMD_IS_APU) 956 return 0; 957 958 if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 959 dev_info(adev->dev, "BACO reset\n"); 960 r = amdgpu_dpm_baco_reset(adev); 961 } else { 962 dev_info(adev->dev, "PCI CONFIG reset\n"); 963 r = vi_asic_pci_config_reset(adev); 964 } 965 966 return r; 967 } 968 969 static u32 vi_get_config_memsize(struct amdgpu_device *adev) 970 { 971 return RREG32(mmCONFIG_MEMSIZE); 972 } 973 974 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 975 u32 cntl_reg, u32 status_reg) 976 { 977 int r, i; 978 struct atom_clock_dividers dividers; 979 uint32_t tmp; 980 981 r = amdgpu_atombios_get_clock_dividers(adev, 982 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 983 clock, false, ÷rs); 984 if (r) 985 return r; 986 987 tmp = RREG32_SMC(cntl_reg); 988 989 if (adev->flags & AMD_IS_APU) 990 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK; 991 else 992 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 993 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 994 tmp |= dividers.post_divider; 995 WREG32_SMC(cntl_reg, tmp); 996 997 for (i = 0; i < 100; i++) { 998 tmp = RREG32_SMC(status_reg); 999 if (adev->flags & AMD_IS_APU) { 1000 if (tmp & 0x10000) 1001 break; 1002 } else { 1003 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK) 1004 break; 1005 } 1006 mdelay(10); 1007 } 1008 if (i == 100) 1009 return -ETIMEDOUT; 1010 return 0; 1011 } 1012 1013 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0 1014 #define ixGNB_CLK1_STATUS 0xD822010C 1015 #define ixGNB_CLK2_DFS_CNTL 0xD8220110 1016 #define ixGNB_CLK2_STATUS 0xD822012C 1017 #define ixGNB_CLK3_DFS_CNTL 0xD8220130 1018 #define ixGNB_CLK3_STATUS 0xD822014C 1019 1020 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 1021 { 1022 int r; 1023 1024 if (adev->flags & AMD_IS_APU) { 1025 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS); 1026 if (r) 1027 return r; 1028 1029 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS); 1030 if (r) 1031 return r; 1032 } else { 1033 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 1034 if (r) 1035 return r; 1036 1037 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 1038 if (r) 1039 return r; 1040 } 1041 1042 return 0; 1043 } 1044 1045 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 1046 { 1047 int r, i; 1048 struct atom_clock_dividers dividers; 1049 u32 tmp; 1050 u32 reg_ctrl; 1051 u32 reg_status; 1052 u32 status_mask; 1053 u32 reg_mask; 1054 1055 if (adev->flags & AMD_IS_APU) { 1056 reg_ctrl = ixGNB_CLK3_DFS_CNTL; 1057 reg_status = ixGNB_CLK3_STATUS; 1058 status_mask = 0x00010000; 1059 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 1060 } else { 1061 reg_ctrl = ixCG_ECLK_CNTL; 1062 reg_status = ixCG_ECLK_STATUS; 1063 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK; 1064 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 1065 } 1066 1067 r = amdgpu_atombios_get_clock_dividers(adev, 1068 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 1069 ecclk, false, ÷rs); 1070 if (r) 1071 return r; 1072 1073 for (i = 0; i < 100; i++) { 1074 if (RREG32_SMC(reg_status) & status_mask) 1075 break; 1076 mdelay(10); 1077 } 1078 1079 if (i == 100) 1080 return -ETIMEDOUT; 1081 1082 tmp = RREG32_SMC(reg_ctrl); 1083 tmp &= ~reg_mask; 1084 tmp |= dividers.post_divider; 1085 WREG32_SMC(reg_ctrl, tmp); 1086 1087 for (i = 0; i < 100; i++) { 1088 if (RREG32_SMC(reg_status) & status_mask) 1089 break; 1090 mdelay(10); 1091 } 1092 1093 if (i == 100) 1094 return -ETIMEDOUT; 1095 1096 return 0; 1097 } 1098 1099 static void vi_enable_aspm(struct amdgpu_device *adev) 1100 { 1101 u32 data, orig; 1102 1103 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); 1104 data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT << 1105 PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; 1106 data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT << 1107 PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; 1108 data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; 1109 data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK; 1110 if (orig != data) 1111 WREG32_PCIE(ixPCIE_LC_CNTL, data); 1112 } 1113 1114 static void vi_program_aspm(struct amdgpu_device *adev) 1115 { 1116 u32 data, data1, orig; 1117 bool bL1SS = false; 1118 bool bClkReqSupport = true; 1119 1120 if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) 1121 return; 1122 1123 if (adev->flags & AMD_IS_APU || 1124 adev->asic_type < CHIP_POLARIS10) 1125 return; 1126 1127 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); 1128 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; 1129 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 1130 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; 1131 if (orig != data) 1132 WREG32_PCIE(ixPCIE_LC_CNTL, data); 1133 1134 orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); 1135 data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK; 1136 data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT; 1137 data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK; 1138 if (orig != data) 1139 WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data); 1140 1141 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3); 1142 data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK; 1143 if (orig != data) 1144 WREG32_PCIE(ixPCIE_LC_CNTL3, data); 1145 1146 orig = data = RREG32_PCIE(ixPCIE_P_CNTL); 1147 data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK; 1148 if (orig != data) 1149 WREG32_PCIE(ixPCIE_P_CNTL, data); 1150 1151 data = RREG32_PCIE(ixPCIE_LC_L1_PM_SUBSTATE); 1152 pci_read_config_dword(adev->pdev, PCIE_L1_PM_SUB_CNTL, &data1); 1153 if (data & PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK && 1154 (data & (PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK | 1155 PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK | 1156 PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK | 1157 PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK))) { 1158 bL1SS = true; 1159 } else if (data1 & (PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK | 1160 PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK | 1161 PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK | 1162 PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK)) { 1163 bL1SS = true; 1164 } 1165 1166 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6); 1167 data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK; 1168 if (orig != data) 1169 WREG32_PCIE(ixPCIE_LC_CNTL6, data); 1170 1171 orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL); 1172 data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK; 1173 if (orig != data) 1174 WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data); 1175 1176 pci_read_config_dword(adev->pdev, LINK_CAP, &data); 1177 if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK)) 1178 bClkReqSupport = false; 1179 1180 if (bClkReqSupport) { 1181 orig = data = RREG32_SMC(ixTHM_CLK_CNTL); 1182 data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK); 1183 data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) | 1184 (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT); 1185 if (orig != data) 1186 WREG32_SMC(ixTHM_CLK_CNTL, data); 1187 1188 orig = data = RREG32_SMC(ixMISC_CLK_CTRL); 1189 data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK | 1190 MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK); 1191 data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) | 1192 (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT); 1193 data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT); 1194 if (orig != data) 1195 WREG32_SMC(ixMISC_CLK_CTRL, data); 1196 1197 orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL); 1198 data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK; 1199 if (orig != data) 1200 WREG32_SMC(ixCG_CLKPIN_CNTL, data); 1201 1202 orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 1203 data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK; 1204 if (orig != data) 1205 WREG32_SMC(ixCG_CLKPIN_CNTL, data); 1206 1207 orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL); 1208 data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK; 1209 data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT); 1210 if (orig != data) 1211 WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data); 1212 1213 orig = data = RREG32_PCIE(ixCPM_CONTROL); 1214 data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK | 1215 CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK); 1216 if (orig != data) 1217 WREG32_PCIE(ixCPM_CONTROL, data); 1218 1219 orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL); 1220 data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK; 1221 data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT); 1222 if (orig != data) 1223 WREG32_PCIE(ixPCIE_CONFIG_CNTL, data); 1224 1225 orig = data = RREG32(mmBIF_CLK_CTRL); 1226 data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK; 1227 if (orig != data) 1228 WREG32(mmBIF_CLK_CTRL, data); 1229 1230 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7); 1231 data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK; 1232 if (orig != data) 1233 WREG32_PCIE(ixPCIE_LC_CNTL7, data); 1234 1235 orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG); 1236 data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK; 1237 if (orig != data) 1238 WREG32_PCIE(ixPCIE_HW_DEBUG, data); 1239 1240 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2); 1241 data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; 1242 data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK; 1243 if (bL1SS) 1244 data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK; 1245 if (orig != data) 1246 WREG32_PCIE(ixPCIE_LC_CNTL2, data); 1247 1248 } 1249 1250 vi_enable_aspm(adev); 1251 1252 data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); 1253 data1 = RREG32_PCIE(ixPCIE_LC_STATUS1); 1254 if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) && 1255 data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK && 1256 data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) { 1257 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); 1258 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 1259 if (orig != data) 1260 WREG32_PCIE(ixPCIE_LC_CNTL, data); 1261 } 1262 1263 if ((adev->asic_type == CHIP_POLARIS12 && 1264 !(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) || 1265 ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) { 1266 orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL); 1267 data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK; 1268 if (orig != data) 1269 WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data); 1270 } 1271 } 1272 1273 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 1274 bool enable) 1275 { 1276 u32 tmp; 1277 1278 /* not necessary on CZ */ 1279 if (adev->flags & AMD_IS_APU) 1280 return; 1281 1282 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 1283 if (enable) 1284 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 1285 else 1286 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 1287 1288 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 1289 } 1290 1291 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 1292 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 1293 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 1294 1295 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 1296 { 1297 if (adev->flags & AMD_IS_APU) 1298 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 1299 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 1300 else 1301 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 1302 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 1303 } 1304 1305 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 1306 { 1307 if (!ring || !ring->funcs->emit_wreg) { 1308 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 1309 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL); 1310 } else { 1311 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 1312 } 1313 } 1314 1315 static void vi_invalidate_hdp(struct amdgpu_device *adev, 1316 struct amdgpu_ring *ring) 1317 { 1318 if (!ring || !ring->funcs->emit_wreg) { 1319 WREG32(mmHDP_DEBUG0, 1); 1320 RREG32(mmHDP_DEBUG0); 1321 } else { 1322 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1); 1323 } 1324 } 1325 1326 static bool vi_need_full_reset(struct amdgpu_device *adev) 1327 { 1328 switch (adev->asic_type) { 1329 case CHIP_CARRIZO: 1330 case CHIP_STONEY: 1331 /* CZ has hang issues with full reset at the moment */ 1332 return false; 1333 case CHIP_FIJI: 1334 case CHIP_TONGA: 1335 /* XXX: soft reset should work on fiji and tonga */ 1336 return true; 1337 case CHIP_POLARIS10: 1338 case CHIP_POLARIS11: 1339 case CHIP_POLARIS12: 1340 case CHIP_TOPAZ: 1341 default: 1342 /* change this when we support soft reset */ 1343 return true; 1344 } 1345 } 1346 1347 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 1348 uint64_t *count1) 1349 { 1350 uint32_t perfctr = 0; 1351 uint64_t cnt0_of, cnt1_of; 1352 int tmp; 1353 1354 /* This reports 0 on APUs, so return to avoid writing/reading registers 1355 * that may or may not be different from their GPU counterparts 1356 */ 1357 if (adev->flags & AMD_IS_APU) 1358 return; 1359 1360 /* Set the 2 events that we wish to watch, defined above */ 1361 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ 1362 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 1363 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 1364 1365 /* Write to enable desired perf counters */ 1366 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr); 1367 /* Zero out and enable the perf counters 1368 * Write 0x5: 1369 * Bit 0 = Start all counters(1) 1370 * Bit 2 = Global counter reset enable(1) 1371 */ 1372 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005); 1373 1374 msleep(1000); 1375 1376 /* Load the shadow and disable the perf counters 1377 * Write 0x2: 1378 * Bit 0 = Stop counters(0) 1379 * Bit 1 = Load the shadow counters(1) 1380 */ 1381 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002); 1382 1383 /* Read register values to get any >32bit overflow */ 1384 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK); 1385 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 1386 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 1387 1388 /* Get the values and add the overflow */ 1389 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 1390 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 1391 } 1392 1393 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev) 1394 { 1395 uint64_t nak_r, nak_g; 1396 1397 /* Get the number of NAKs received and generated */ 1398 nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK); 1399 nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED); 1400 1401 /* Add the total number of NAKs, i.e the number of replays */ 1402 return (nak_r + nak_g); 1403 } 1404 1405 static bool vi_need_reset_on_init(struct amdgpu_device *adev) 1406 { 1407 u32 clock_cntl, pc; 1408 1409 if (adev->flags & AMD_IS_APU) 1410 return false; 1411 1412 /* check if the SMC is already running */ 1413 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); 1414 pc = RREG32_SMC(ixSMC_PC_C); 1415 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) && 1416 (0x20100 <= pc)) 1417 return true; 1418 1419 return false; 1420 } 1421 1422 static void vi_pre_asic_init(struct amdgpu_device *adev) 1423 { 1424 } 1425 1426 static const struct amdgpu_asic_funcs vi_asic_funcs = 1427 { 1428 .read_disabled_bios = &vi_read_disabled_bios, 1429 .read_bios_from_rom = &vi_read_bios_from_rom, 1430 .read_register = &vi_read_register, 1431 .reset = &vi_asic_reset, 1432 .reset_method = &vi_asic_reset_method, 1433 .get_xclk = &vi_get_xclk, 1434 .set_uvd_clocks = &vi_set_uvd_clocks, 1435 .set_vce_clocks = &vi_set_vce_clocks, 1436 .get_config_memsize = &vi_get_config_memsize, 1437 .flush_hdp = &vi_flush_hdp, 1438 .invalidate_hdp = &vi_invalidate_hdp, 1439 .need_full_reset = &vi_need_full_reset, 1440 .init_doorbell_index = &legacy_doorbell_index_init, 1441 .get_pcie_usage = &vi_get_pcie_usage, 1442 .need_reset_on_init = &vi_need_reset_on_init, 1443 .get_pcie_replay_count = &vi_get_pcie_replay_count, 1444 .supports_baco = &vi_asic_supports_baco, 1445 .pre_asic_init = &vi_pre_asic_init, 1446 .query_video_codecs = &vi_query_video_codecs, 1447 }; 1448 1449 #define CZ_REV_BRISTOL(rev) \ 1450 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) 1451 1452 static int vi_common_early_init(void *handle) 1453 { 1454 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1455 1456 if (adev->flags & AMD_IS_APU) { 1457 adev->smc_rreg = &cz_smc_rreg; 1458 adev->smc_wreg = &cz_smc_wreg; 1459 } else { 1460 adev->smc_rreg = &vi_smc_rreg; 1461 adev->smc_wreg = &vi_smc_wreg; 1462 } 1463 adev->pcie_rreg = &vi_pcie_rreg; 1464 adev->pcie_wreg = &vi_pcie_wreg; 1465 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1466 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1467 adev->didt_rreg = &vi_didt_rreg; 1468 adev->didt_wreg = &vi_didt_wreg; 1469 adev->gc_cac_rreg = &vi_gc_cac_rreg; 1470 adev->gc_cac_wreg = &vi_gc_cac_wreg; 1471 1472 adev->asic_funcs = &vi_asic_funcs; 1473 1474 adev->rev_id = vi_get_rev_id(adev); 1475 adev->external_rev_id = 0xFF; 1476 switch (adev->asic_type) { 1477 case CHIP_TOPAZ: 1478 adev->cg_flags = 0; 1479 adev->pg_flags = 0; 1480 adev->external_rev_id = 0x1; 1481 break; 1482 case CHIP_FIJI: 1483 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1484 AMD_CG_SUPPORT_GFX_MGLS | 1485 AMD_CG_SUPPORT_GFX_RLC_LS | 1486 AMD_CG_SUPPORT_GFX_CP_LS | 1487 AMD_CG_SUPPORT_GFX_CGTS | 1488 AMD_CG_SUPPORT_GFX_CGTS_LS | 1489 AMD_CG_SUPPORT_GFX_CGCG | 1490 AMD_CG_SUPPORT_GFX_CGLS | 1491 AMD_CG_SUPPORT_SDMA_MGCG | 1492 AMD_CG_SUPPORT_SDMA_LS | 1493 AMD_CG_SUPPORT_BIF_LS | 1494 AMD_CG_SUPPORT_HDP_MGCG | 1495 AMD_CG_SUPPORT_HDP_LS | 1496 AMD_CG_SUPPORT_ROM_MGCG | 1497 AMD_CG_SUPPORT_MC_MGCG | 1498 AMD_CG_SUPPORT_MC_LS | 1499 AMD_CG_SUPPORT_UVD_MGCG; 1500 adev->pg_flags = 0; 1501 adev->external_rev_id = adev->rev_id + 0x3c; 1502 break; 1503 case CHIP_TONGA: 1504 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1505 AMD_CG_SUPPORT_GFX_CGCG | 1506 AMD_CG_SUPPORT_GFX_CGLS | 1507 AMD_CG_SUPPORT_SDMA_MGCG | 1508 AMD_CG_SUPPORT_SDMA_LS | 1509 AMD_CG_SUPPORT_BIF_LS | 1510 AMD_CG_SUPPORT_HDP_MGCG | 1511 AMD_CG_SUPPORT_HDP_LS | 1512 AMD_CG_SUPPORT_ROM_MGCG | 1513 AMD_CG_SUPPORT_MC_MGCG | 1514 AMD_CG_SUPPORT_MC_LS | 1515 AMD_CG_SUPPORT_DRM_LS | 1516 AMD_CG_SUPPORT_UVD_MGCG; 1517 adev->pg_flags = 0; 1518 adev->external_rev_id = adev->rev_id + 0x14; 1519 break; 1520 case CHIP_POLARIS11: 1521 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1522 AMD_CG_SUPPORT_GFX_RLC_LS | 1523 AMD_CG_SUPPORT_GFX_CP_LS | 1524 AMD_CG_SUPPORT_GFX_CGCG | 1525 AMD_CG_SUPPORT_GFX_CGLS | 1526 AMD_CG_SUPPORT_GFX_3D_CGCG | 1527 AMD_CG_SUPPORT_GFX_3D_CGLS | 1528 AMD_CG_SUPPORT_SDMA_MGCG | 1529 AMD_CG_SUPPORT_SDMA_LS | 1530 AMD_CG_SUPPORT_BIF_MGCG | 1531 AMD_CG_SUPPORT_BIF_LS | 1532 AMD_CG_SUPPORT_HDP_MGCG | 1533 AMD_CG_SUPPORT_HDP_LS | 1534 AMD_CG_SUPPORT_ROM_MGCG | 1535 AMD_CG_SUPPORT_MC_MGCG | 1536 AMD_CG_SUPPORT_MC_LS | 1537 AMD_CG_SUPPORT_DRM_LS | 1538 AMD_CG_SUPPORT_UVD_MGCG | 1539 AMD_CG_SUPPORT_VCE_MGCG; 1540 adev->pg_flags = 0; 1541 adev->external_rev_id = adev->rev_id + 0x5A; 1542 break; 1543 case CHIP_POLARIS10: 1544 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1545 AMD_CG_SUPPORT_GFX_RLC_LS | 1546 AMD_CG_SUPPORT_GFX_CP_LS | 1547 AMD_CG_SUPPORT_GFX_CGCG | 1548 AMD_CG_SUPPORT_GFX_CGLS | 1549 AMD_CG_SUPPORT_GFX_3D_CGCG | 1550 AMD_CG_SUPPORT_GFX_3D_CGLS | 1551 AMD_CG_SUPPORT_SDMA_MGCG | 1552 AMD_CG_SUPPORT_SDMA_LS | 1553 AMD_CG_SUPPORT_BIF_MGCG | 1554 AMD_CG_SUPPORT_BIF_LS | 1555 AMD_CG_SUPPORT_HDP_MGCG | 1556 AMD_CG_SUPPORT_HDP_LS | 1557 AMD_CG_SUPPORT_ROM_MGCG | 1558 AMD_CG_SUPPORT_MC_MGCG | 1559 AMD_CG_SUPPORT_MC_LS | 1560 AMD_CG_SUPPORT_DRM_LS | 1561 AMD_CG_SUPPORT_UVD_MGCG | 1562 AMD_CG_SUPPORT_VCE_MGCG; 1563 adev->pg_flags = 0; 1564 adev->external_rev_id = adev->rev_id + 0x50; 1565 break; 1566 case CHIP_POLARIS12: 1567 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1568 AMD_CG_SUPPORT_GFX_RLC_LS | 1569 AMD_CG_SUPPORT_GFX_CP_LS | 1570 AMD_CG_SUPPORT_GFX_CGCG | 1571 AMD_CG_SUPPORT_GFX_CGLS | 1572 AMD_CG_SUPPORT_GFX_3D_CGCG | 1573 AMD_CG_SUPPORT_GFX_3D_CGLS | 1574 AMD_CG_SUPPORT_SDMA_MGCG | 1575 AMD_CG_SUPPORT_SDMA_LS | 1576 AMD_CG_SUPPORT_BIF_MGCG | 1577 AMD_CG_SUPPORT_BIF_LS | 1578 AMD_CG_SUPPORT_HDP_MGCG | 1579 AMD_CG_SUPPORT_HDP_LS | 1580 AMD_CG_SUPPORT_ROM_MGCG | 1581 AMD_CG_SUPPORT_MC_MGCG | 1582 AMD_CG_SUPPORT_MC_LS | 1583 AMD_CG_SUPPORT_DRM_LS | 1584 AMD_CG_SUPPORT_UVD_MGCG | 1585 AMD_CG_SUPPORT_VCE_MGCG; 1586 adev->pg_flags = 0; 1587 adev->external_rev_id = adev->rev_id + 0x64; 1588 break; 1589 case CHIP_VEGAM: 1590 adev->cg_flags = 0; 1591 /*AMD_CG_SUPPORT_GFX_MGCG | 1592 AMD_CG_SUPPORT_GFX_RLC_LS | 1593 AMD_CG_SUPPORT_GFX_CP_LS | 1594 AMD_CG_SUPPORT_GFX_CGCG | 1595 AMD_CG_SUPPORT_GFX_CGLS | 1596 AMD_CG_SUPPORT_GFX_3D_CGCG | 1597 AMD_CG_SUPPORT_GFX_3D_CGLS | 1598 AMD_CG_SUPPORT_SDMA_MGCG | 1599 AMD_CG_SUPPORT_SDMA_LS | 1600 AMD_CG_SUPPORT_BIF_MGCG | 1601 AMD_CG_SUPPORT_BIF_LS | 1602 AMD_CG_SUPPORT_HDP_MGCG | 1603 AMD_CG_SUPPORT_HDP_LS | 1604 AMD_CG_SUPPORT_ROM_MGCG | 1605 AMD_CG_SUPPORT_MC_MGCG | 1606 AMD_CG_SUPPORT_MC_LS | 1607 AMD_CG_SUPPORT_DRM_LS | 1608 AMD_CG_SUPPORT_UVD_MGCG | 1609 AMD_CG_SUPPORT_VCE_MGCG;*/ 1610 adev->pg_flags = 0; 1611 adev->external_rev_id = adev->rev_id + 0x6E; 1612 break; 1613 case CHIP_CARRIZO: 1614 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1615 AMD_CG_SUPPORT_GFX_MGCG | 1616 AMD_CG_SUPPORT_GFX_MGLS | 1617 AMD_CG_SUPPORT_GFX_RLC_LS | 1618 AMD_CG_SUPPORT_GFX_CP_LS | 1619 AMD_CG_SUPPORT_GFX_CGTS | 1620 AMD_CG_SUPPORT_GFX_CGTS_LS | 1621 AMD_CG_SUPPORT_GFX_CGCG | 1622 AMD_CG_SUPPORT_GFX_CGLS | 1623 AMD_CG_SUPPORT_BIF_LS | 1624 AMD_CG_SUPPORT_HDP_MGCG | 1625 AMD_CG_SUPPORT_HDP_LS | 1626 AMD_CG_SUPPORT_SDMA_MGCG | 1627 AMD_CG_SUPPORT_SDMA_LS | 1628 AMD_CG_SUPPORT_VCE_MGCG; 1629 /* rev0 hardware requires workarounds to support PG */ 1630 adev->pg_flags = 0; 1631 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { 1632 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | 1633 AMD_PG_SUPPORT_GFX_PIPELINE | 1634 AMD_PG_SUPPORT_CP | 1635 AMD_PG_SUPPORT_UVD | 1636 AMD_PG_SUPPORT_VCE; 1637 } 1638 adev->external_rev_id = adev->rev_id + 0x1; 1639 break; 1640 case CHIP_STONEY: 1641 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1642 AMD_CG_SUPPORT_GFX_MGCG | 1643 AMD_CG_SUPPORT_GFX_MGLS | 1644 AMD_CG_SUPPORT_GFX_RLC_LS | 1645 AMD_CG_SUPPORT_GFX_CP_LS | 1646 AMD_CG_SUPPORT_GFX_CGTS | 1647 AMD_CG_SUPPORT_GFX_CGTS_LS | 1648 AMD_CG_SUPPORT_GFX_CGLS | 1649 AMD_CG_SUPPORT_BIF_LS | 1650 AMD_CG_SUPPORT_HDP_MGCG | 1651 AMD_CG_SUPPORT_HDP_LS | 1652 AMD_CG_SUPPORT_SDMA_MGCG | 1653 AMD_CG_SUPPORT_SDMA_LS | 1654 AMD_CG_SUPPORT_VCE_MGCG; 1655 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1656 AMD_PG_SUPPORT_GFX_SMG | 1657 AMD_PG_SUPPORT_GFX_PIPELINE | 1658 AMD_PG_SUPPORT_CP | 1659 AMD_PG_SUPPORT_UVD | 1660 AMD_PG_SUPPORT_VCE; 1661 adev->external_rev_id = adev->rev_id + 0x61; 1662 break; 1663 default: 1664 /* FIXME: not supported yet */ 1665 return -EINVAL; 1666 } 1667 1668 if (amdgpu_sriov_vf(adev)) { 1669 amdgpu_virt_init_setting(adev); 1670 xgpu_vi_mailbox_set_irq_funcs(adev); 1671 } 1672 1673 return 0; 1674 } 1675 1676 static int vi_common_late_init(void *handle) 1677 { 1678 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1679 1680 if (amdgpu_sriov_vf(adev)) 1681 xgpu_vi_mailbox_get_irq(adev); 1682 1683 return 0; 1684 } 1685 1686 static int vi_common_sw_init(void *handle) 1687 { 1688 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1689 1690 if (amdgpu_sriov_vf(adev)) 1691 xgpu_vi_mailbox_add_irq_id(adev); 1692 1693 return 0; 1694 } 1695 1696 static int vi_common_sw_fini(void *handle) 1697 { 1698 return 0; 1699 } 1700 1701 static int vi_common_hw_init(void *handle) 1702 { 1703 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1704 1705 /* move the golden regs per IP block */ 1706 vi_init_golden_registers(adev); 1707 /* enable aspm */ 1708 vi_program_aspm(adev); 1709 /* enable the doorbell aperture */ 1710 vi_enable_doorbell_aperture(adev, true); 1711 1712 return 0; 1713 } 1714 1715 static int vi_common_hw_fini(void *handle) 1716 { 1717 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1718 1719 /* enable the doorbell aperture */ 1720 vi_enable_doorbell_aperture(adev, false); 1721 1722 if (amdgpu_sriov_vf(adev)) 1723 xgpu_vi_mailbox_put_irq(adev); 1724 1725 return 0; 1726 } 1727 1728 static int vi_common_suspend(void *handle) 1729 { 1730 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1731 1732 return vi_common_hw_fini(adev); 1733 } 1734 1735 static int vi_common_resume(void *handle) 1736 { 1737 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1738 1739 return vi_common_hw_init(adev); 1740 } 1741 1742 static bool vi_common_is_idle(void *handle) 1743 { 1744 return true; 1745 } 1746 1747 static int vi_common_wait_for_idle(void *handle) 1748 { 1749 return 0; 1750 } 1751 1752 static int vi_common_soft_reset(void *handle) 1753 { 1754 return 0; 1755 } 1756 1757 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1758 bool enable) 1759 { 1760 uint32_t temp, data; 1761 1762 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1763 1764 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1765 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1766 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1767 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1768 else 1769 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1770 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1771 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1772 1773 if (temp != data) 1774 WREG32_PCIE(ixPCIE_CNTL2, data); 1775 } 1776 1777 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1778 bool enable) 1779 { 1780 uint32_t temp, data; 1781 1782 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1783 1784 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1785 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1786 else 1787 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1788 1789 if (temp != data) 1790 WREG32(mmHDP_HOST_PATH_CNTL, data); 1791 } 1792 1793 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1794 bool enable) 1795 { 1796 uint32_t temp, data; 1797 1798 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1799 1800 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1801 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1802 else 1803 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1804 1805 if (temp != data) 1806 WREG32(mmHDP_MEM_POWER_LS, data); 1807 } 1808 1809 static void vi_update_drm_light_sleep(struct amdgpu_device *adev, 1810 bool enable) 1811 { 1812 uint32_t temp, data; 1813 1814 temp = data = RREG32(0x157a); 1815 1816 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1817 data |= 1; 1818 else 1819 data &= ~1; 1820 1821 if (temp != data) 1822 WREG32(0x157a, data); 1823 } 1824 1825 1826 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1827 bool enable) 1828 { 1829 uint32_t temp, data; 1830 1831 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1832 1833 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1834 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1835 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1836 else 1837 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1838 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1839 1840 if (temp != data) 1841 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1842 } 1843 1844 static int vi_common_set_clockgating_state_by_smu(void *handle, 1845 enum amd_clockgating_state state) 1846 { 1847 uint32_t msg_id, pp_state = 0; 1848 uint32_t pp_support_state = 0; 1849 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1850 1851 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1852 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1853 pp_support_state = PP_STATE_SUPPORT_LS; 1854 pp_state = PP_STATE_LS; 1855 } 1856 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1857 pp_support_state |= PP_STATE_SUPPORT_CG; 1858 pp_state |= PP_STATE_CG; 1859 } 1860 if (state == AMD_CG_STATE_UNGATE) 1861 pp_state = 0; 1862 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1863 PP_BLOCK_SYS_MC, 1864 pp_support_state, 1865 pp_state); 1866 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1867 } 1868 1869 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1870 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1871 pp_support_state = PP_STATE_SUPPORT_LS; 1872 pp_state = PP_STATE_LS; 1873 } 1874 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1875 pp_support_state |= PP_STATE_SUPPORT_CG; 1876 pp_state |= PP_STATE_CG; 1877 } 1878 if (state == AMD_CG_STATE_UNGATE) 1879 pp_state = 0; 1880 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1881 PP_BLOCK_SYS_SDMA, 1882 pp_support_state, 1883 pp_state); 1884 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1885 } 1886 1887 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1888 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1889 pp_support_state = PP_STATE_SUPPORT_LS; 1890 pp_state = PP_STATE_LS; 1891 } 1892 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1893 pp_support_state |= PP_STATE_SUPPORT_CG; 1894 pp_state |= PP_STATE_CG; 1895 } 1896 if (state == AMD_CG_STATE_UNGATE) 1897 pp_state = 0; 1898 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1899 PP_BLOCK_SYS_HDP, 1900 pp_support_state, 1901 pp_state); 1902 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1903 } 1904 1905 1906 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { 1907 if (state == AMD_CG_STATE_UNGATE) 1908 pp_state = 0; 1909 else 1910 pp_state = PP_STATE_LS; 1911 1912 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1913 PP_BLOCK_SYS_BIF, 1914 PP_STATE_SUPPORT_LS, 1915 pp_state); 1916 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1917 } 1918 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { 1919 if (state == AMD_CG_STATE_UNGATE) 1920 pp_state = 0; 1921 else 1922 pp_state = PP_STATE_CG; 1923 1924 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1925 PP_BLOCK_SYS_BIF, 1926 PP_STATE_SUPPORT_CG, 1927 pp_state); 1928 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1929 } 1930 1931 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { 1932 1933 if (state == AMD_CG_STATE_UNGATE) 1934 pp_state = 0; 1935 else 1936 pp_state = PP_STATE_LS; 1937 1938 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1939 PP_BLOCK_SYS_DRM, 1940 PP_STATE_SUPPORT_LS, 1941 pp_state); 1942 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1943 } 1944 1945 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { 1946 1947 if (state == AMD_CG_STATE_UNGATE) 1948 pp_state = 0; 1949 else 1950 pp_state = PP_STATE_CG; 1951 1952 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1953 PP_BLOCK_SYS_ROM, 1954 PP_STATE_SUPPORT_CG, 1955 pp_state); 1956 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1957 } 1958 return 0; 1959 } 1960 1961 static int vi_common_set_clockgating_state(void *handle, 1962 enum amd_clockgating_state state) 1963 { 1964 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1965 1966 if (amdgpu_sriov_vf(adev)) 1967 return 0; 1968 1969 switch (adev->asic_type) { 1970 case CHIP_FIJI: 1971 vi_update_bif_medium_grain_light_sleep(adev, 1972 state == AMD_CG_STATE_GATE); 1973 vi_update_hdp_medium_grain_clock_gating(adev, 1974 state == AMD_CG_STATE_GATE); 1975 vi_update_hdp_light_sleep(adev, 1976 state == AMD_CG_STATE_GATE); 1977 vi_update_rom_medium_grain_clock_gating(adev, 1978 state == AMD_CG_STATE_GATE); 1979 break; 1980 case CHIP_CARRIZO: 1981 case CHIP_STONEY: 1982 vi_update_bif_medium_grain_light_sleep(adev, 1983 state == AMD_CG_STATE_GATE); 1984 vi_update_hdp_medium_grain_clock_gating(adev, 1985 state == AMD_CG_STATE_GATE); 1986 vi_update_hdp_light_sleep(adev, 1987 state == AMD_CG_STATE_GATE); 1988 vi_update_drm_light_sleep(adev, 1989 state == AMD_CG_STATE_GATE); 1990 break; 1991 case CHIP_TONGA: 1992 case CHIP_POLARIS10: 1993 case CHIP_POLARIS11: 1994 case CHIP_POLARIS12: 1995 case CHIP_VEGAM: 1996 vi_common_set_clockgating_state_by_smu(adev, state); 1997 break; 1998 default: 1999 break; 2000 } 2001 return 0; 2002 } 2003 2004 static int vi_common_set_powergating_state(void *handle, 2005 enum amd_powergating_state state) 2006 { 2007 return 0; 2008 } 2009 2010 static void vi_common_get_clockgating_state(void *handle, u64 *flags) 2011 { 2012 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2013 int data; 2014 2015 if (amdgpu_sriov_vf(adev)) 2016 *flags = 0; 2017 2018 /* AMD_CG_SUPPORT_BIF_LS */ 2019 data = RREG32_PCIE(ixPCIE_CNTL2); 2020 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 2021 *flags |= AMD_CG_SUPPORT_BIF_LS; 2022 2023 /* AMD_CG_SUPPORT_HDP_LS */ 2024 data = RREG32(mmHDP_MEM_POWER_LS); 2025 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 2026 *flags |= AMD_CG_SUPPORT_HDP_LS; 2027 2028 /* AMD_CG_SUPPORT_HDP_MGCG */ 2029 data = RREG32(mmHDP_HOST_PATH_CNTL); 2030 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK)) 2031 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 2032 2033 /* AMD_CG_SUPPORT_ROM_MGCG */ 2034 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 2035 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 2036 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 2037 } 2038 2039 static const struct amd_ip_funcs vi_common_ip_funcs = { 2040 .name = "vi_common", 2041 .early_init = vi_common_early_init, 2042 .late_init = vi_common_late_init, 2043 .sw_init = vi_common_sw_init, 2044 .sw_fini = vi_common_sw_fini, 2045 .hw_init = vi_common_hw_init, 2046 .hw_fini = vi_common_hw_fini, 2047 .suspend = vi_common_suspend, 2048 .resume = vi_common_resume, 2049 .is_idle = vi_common_is_idle, 2050 .wait_for_idle = vi_common_wait_for_idle, 2051 .soft_reset = vi_common_soft_reset, 2052 .set_clockgating_state = vi_common_set_clockgating_state, 2053 .set_powergating_state = vi_common_set_powergating_state, 2054 .get_clockgating_state = vi_common_get_clockgating_state, 2055 }; 2056 2057 static const struct amdgpu_ip_block_version vi_common_ip_block = 2058 { 2059 .type = AMD_IP_BLOCK_TYPE_COMMON, 2060 .major = 1, 2061 .minor = 0, 2062 .rev = 0, 2063 .funcs = &vi_common_ip_funcs, 2064 }; 2065 2066 void vi_set_virt_ops(struct amdgpu_device *adev) 2067 { 2068 adev->virt.ops = &xgpu_vi_virt_ops; 2069 } 2070 2071 int vi_set_ip_blocks(struct amdgpu_device *adev) 2072 { 2073 amdgpu_device_set_sriov_virtual_display(adev); 2074 2075 switch (adev->asic_type) { 2076 case CHIP_TOPAZ: 2077 /* topaz has no DCE, UVD, VCE */ 2078 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 2079 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 2080 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 2081 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 2082 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); 2083 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2084 if (adev->enable_virtual_display) 2085 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2086 break; 2087 case CHIP_FIJI: 2088 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 2089 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 2090 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 2091 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 2092 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 2093 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2094 if (adev->enable_virtual_display) 2095 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2096 #if defined(CONFIG_DRM_AMD_DC) 2097 else if (amdgpu_device_has_dc_support(adev)) 2098 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2099 #endif 2100 else 2101 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block); 2102 if (!amdgpu_sriov_vf(adev)) { 2103 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 2104 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 2105 } 2106 break; 2107 case CHIP_TONGA: 2108 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 2109 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 2110 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 2111 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 2112 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 2113 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2114 if (adev->enable_virtual_display) 2115 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2116 #if defined(CONFIG_DRM_AMD_DC) 2117 else if (amdgpu_device_has_dc_support(adev)) 2118 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2119 #endif 2120 else 2121 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block); 2122 if (!amdgpu_sriov_vf(adev)) { 2123 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block); 2124 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 2125 } 2126 break; 2127 case CHIP_POLARIS10: 2128 case CHIP_POLARIS11: 2129 case CHIP_POLARIS12: 2130 case CHIP_VEGAM: 2131 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 2132 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 2133 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 2134 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 2135 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); 2136 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2137 if (adev->enable_virtual_display) 2138 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2139 #if defined(CONFIG_DRM_AMD_DC) 2140 else if (amdgpu_device_has_dc_support(adev)) 2141 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2142 #endif 2143 else 2144 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); 2145 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); 2146 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 2147 break; 2148 case CHIP_CARRIZO: 2149 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 2150 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 2151 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 2152 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 2153 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 2154 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2155 if (adev->enable_virtual_display) 2156 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2157 #if defined(CONFIG_DRM_AMD_DC) 2158 else if (amdgpu_device_has_dc_support(adev)) 2159 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2160 #endif 2161 else 2162 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 2163 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 2164 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); 2165 #if defined(CONFIG_DRM_AMD_ACP) 2166 amdgpu_device_ip_block_add(adev, &acp_ip_block); 2167 #endif 2168 break; 2169 case CHIP_STONEY: 2170 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 2171 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 2172 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 2173 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block); 2174 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 2175 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2176 if (adev->enable_virtual_display) 2177 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 2178 #if defined(CONFIG_DRM_AMD_DC) 2179 else if (amdgpu_device_has_dc_support(adev)) 2180 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2181 #endif 2182 else 2183 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 2184 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); 2185 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 2186 #if defined(CONFIG_DRM_AMD_ACP) 2187 amdgpu_device_ip_block_add(adev, &acp_ip_block); 2188 #endif 2189 break; 2190 default: 2191 /* FIXME: not supported yet */ 2192 return -EINVAL; 2193 } 2194 2195 return 0; 2196 } 2197 2198 void legacy_doorbell_index_init(struct amdgpu_device *adev) 2199 { 2200 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ; 2201 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0; 2202 adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1; 2203 adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2; 2204 adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3; 2205 adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4; 2206 adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5; 2207 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6; 2208 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7; 2209 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0; 2210 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0; 2211 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1; 2212 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH; 2213 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT; 2214 } 2215