1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/pci.h> 25 #include <linux/slab.h> 26 27 #include <drm/amdgpu_drm.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_atombios.h" 31 #include "amdgpu_ih.h" 32 #include "amdgpu_uvd.h" 33 #include "amdgpu_vce.h" 34 #include "amdgpu_ucode.h" 35 #include "atom.h" 36 #include "amd_pcie.h" 37 38 #include "gmc/gmc_8_1_d.h" 39 #include "gmc/gmc_8_1_sh_mask.h" 40 41 #include "oss/oss_3_0_d.h" 42 #include "oss/oss_3_0_sh_mask.h" 43 44 #include "bif/bif_5_0_d.h" 45 #include "bif/bif_5_0_sh_mask.h" 46 47 #include "gca/gfx_8_0_d.h" 48 #include "gca/gfx_8_0_sh_mask.h" 49 50 #include "smu/smu_7_1_1_d.h" 51 #include "smu/smu_7_1_1_sh_mask.h" 52 53 #include "uvd/uvd_5_0_d.h" 54 #include "uvd/uvd_5_0_sh_mask.h" 55 56 #include "vce/vce_3_0_d.h" 57 #include "vce/vce_3_0_sh_mask.h" 58 59 #include "dce/dce_10_0_d.h" 60 #include "dce/dce_10_0_sh_mask.h" 61 62 #include "vid.h" 63 #include "vi.h" 64 #include "gmc_v8_0.h" 65 #include "gmc_v7_0.h" 66 #include "gfx_v8_0.h" 67 #include "sdma_v2_4.h" 68 #include "sdma_v3_0.h" 69 #include "dce_v10_0.h" 70 #include "dce_v11_0.h" 71 #include "iceland_ih.h" 72 #include "tonga_ih.h" 73 #include "cz_ih.h" 74 #include "uvd_v5_0.h" 75 #include "uvd_v6_0.h" 76 #include "vce_v3_0.h" 77 #if defined(CONFIG_DRM_AMD_ACP) 78 #include "amdgpu_acp.h" 79 #endif 80 #include "dce_virtual.h" 81 #include "mxgpu_vi.h" 82 #include "amdgpu_dm.h" 83 84 /* Topaz */ 85 static const struct amdgpu_video_codecs topaz_video_codecs_encode = 86 { 87 .codec_count = 0, 88 .codec_array = NULL, 89 }; 90 91 /* Tonga, CZ, ST, Fiji */ 92 static const struct amdgpu_video_codec_info tonga_video_codecs_encode_array[] = 93 { 94 { 95 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 96 .max_width = 4096, 97 .max_height = 2304, 98 .max_pixels_per_frame = 4096 * 2304, 99 .max_level = 0, 100 }, 101 }; 102 103 static const struct amdgpu_video_codecs tonga_video_codecs_encode = 104 { 105 .codec_count = ARRAY_SIZE(tonga_video_codecs_encode_array), 106 .codec_array = tonga_video_codecs_encode_array, 107 }; 108 109 /* Polaris */ 110 static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[] = 111 { 112 { 113 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 114 .max_width = 4096, 115 .max_height = 2304, 116 .max_pixels_per_frame = 4096 * 2304, 117 .max_level = 0, 118 }, 119 { 120 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 121 .max_width = 4096, 122 .max_height = 2304, 123 .max_pixels_per_frame = 4096 * 2304, 124 .max_level = 0, 125 }, 126 }; 127 128 static const struct amdgpu_video_codecs polaris_video_codecs_encode = 129 { 130 .codec_count = ARRAY_SIZE(polaris_video_codecs_encode_array), 131 .codec_array = polaris_video_codecs_encode_array, 132 }; 133 134 /* Topaz */ 135 static const struct amdgpu_video_codecs topaz_video_codecs_decode = 136 { 137 .codec_count = 0, 138 .codec_array = NULL, 139 }; 140 141 /* Tonga */ 142 static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] = 143 { 144 { 145 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 146 .max_width = 4096, 147 .max_height = 4096, 148 .max_pixels_per_frame = 4096 * 4096, 149 .max_level = 3, 150 }, 151 { 152 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 153 .max_width = 4096, 154 .max_height = 4096, 155 .max_pixels_per_frame = 4096 * 4096, 156 .max_level = 5, 157 }, 158 { 159 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 160 .max_width = 4096, 161 .max_height = 4096, 162 .max_pixels_per_frame = 4096 * 4096, 163 .max_level = 52, 164 }, 165 { 166 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 167 .max_width = 4096, 168 .max_height = 4096, 169 .max_pixels_per_frame = 4096 * 4096, 170 .max_level = 4, 171 }, 172 }; 173 174 static const struct amdgpu_video_codecs tonga_video_codecs_decode = 175 { 176 .codec_count = ARRAY_SIZE(tonga_video_codecs_decode_array), 177 .codec_array = tonga_video_codecs_decode_array, 178 }; 179 180 /* CZ, ST, Fiji, Polaris */ 181 static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] = 182 { 183 { 184 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 185 .max_width = 4096, 186 .max_height = 4096, 187 .max_pixels_per_frame = 4096 * 4096, 188 .max_level = 3, 189 }, 190 { 191 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 192 .max_width = 4096, 193 .max_height = 4096, 194 .max_pixels_per_frame = 4096 * 4096, 195 .max_level = 5, 196 }, 197 { 198 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 199 .max_width = 4096, 200 .max_height = 4096, 201 .max_pixels_per_frame = 4096 * 4096, 202 .max_level = 52, 203 }, 204 { 205 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 206 .max_width = 4096, 207 .max_height = 4096, 208 .max_pixels_per_frame = 4096 * 4096, 209 .max_level = 4, 210 }, 211 { 212 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 213 .max_width = 4096, 214 .max_height = 4096, 215 .max_pixels_per_frame = 4096 * 4096, 216 .max_level = 186, 217 }, 218 { 219 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 220 .max_width = 4096, 221 .max_height = 4096, 222 .max_pixels_per_frame = 4096 * 4096, 223 .max_level = 0, 224 }, 225 }; 226 227 static const struct amdgpu_video_codecs cz_video_codecs_decode = 228 { 229 .codec_count = ARRAY_SIZE(cz_video_codecs_decode_array), 230 .codec_array = cz_video_codecs_decode_array, 231 }; 232 233 static int vi_query_video_codecs(struct amdgpu_device *adev, bool encode, 234 const struct amdgpu_video_codecs **codecs) 235 { 236 switch (adev->asic_type) { 237 case CHIP_TOPAZ: 238 if (encode) 239 *codecs = &topaz_video_codecs_encode; 240 else 241 *codecs = &topaz_video_codecs_decode; 242 return 0; 243 case CHIP_TONGA: 244 if (encode) 245 *codecs = &tonga_video_codecs_encode; 246 else 247 *codecs = &tonga_video_codecs_decode; 248 return 0; 249 case CHIP_POLARIS10: 250 case CHIP_POLARIS11: 251 case CHIP_POLARIS12: 252 case CHIP_VEGAM: 253 if (encode) 254 *codecs = &polaris_video_codecs_encode; 255 else 256 *codecs = &cz_video_codecs_decode; 257 return 0; 258 case CHIP_FIJI: 259 case CHIP_CARRIZO: 260 case CHIP_STONEY: 261 if (encode) 262 *codecs = &tonga_video_codecs_encode; 263 else 264 *codecs = &cz_video_codecs_decode; 265 return 0; 266 default: 267 return -EINVAL; 268 } 269 } 270 271 /* 272 * Indirect registers accessor 273 */ 274 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 275 { 276 unsigned long flags; 277 u32 r; 278 279 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 280 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 281 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 282 r = RREG32_NO_KIQ(mmPCIE_DATA); 283 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 284 return r; 285 } 286 287 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 288 { 289 unsigned long flags; 290 291 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 292 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 293 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 294 WREG32_NO_KIQ(mmPCIE_DATA, v); 295 (void)RREG32_NO_KIQ(mmPCIE_DATA); 296 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 297 } 298 299 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 300 { 301 unsigned long flags; 302 u32 r; 303 304 spin_lock_irqsave(&adev->smc_idx_lock, flags); 305 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 306 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11); 307 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 308 return r; 309 } 310 311 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 312 { 313 unsigned long flags; 314 315 spin_lock_irqsave(&adev->smc_idx_lock, flags); 316 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 317 WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v)); 318 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 319 } 320 321 /* smu_8_0_d.h */ 322 #define mmMP0PUB_IND_INDEX 0x180 323 #define mmMP0PUB_IND_DATA 0x181 324 325 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 326 { 327 unsigned long flags; 328 u32 r; 329 330 spin_lock_irqsave(&adev->smc_idx_lock, flags); 331 WREG32(mmMP0PUB_IND_INDEX, (reg)); 332 r = RREG32(mmMP0PUB_IND_DATA); 333 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 334 return r; 335 } 336 337 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 338 { 339 unsigned long flags; 340 341 spin_lock_irqsave(&adev->smc_idx_lock, flags); 342 WREG32(mmMP0PUB_IND_INDEX, (reg)); 343 WREG32(mmMP0PUB_IND_DATA, (v)); 344 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 345 } 346 347 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 348 { 349 unsigned long flags; 350 u32 r; 351 352 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 353 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 354 r = RREG32(mmUVD_CTX_DATA); 355 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 356 return r; 357 } 358 359 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 360 { 361 unsigned long flags; 362 363 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 364 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 365 WREG32(mmUVD_CTX_DATA, (v)); 366 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 367 } 368 369 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 370 { 371 unsigned long flags; 372 u32 r; 373 374 spin_lock_irqsave(&adev->didt_idx_lock, flags); 375 WREG32(mmDIDT_IND_INDEX, (reg)); 376 r = RREG32(mmDIDT_IND_DATA); 377 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 378 return r; 379 } 380 381 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 382 { 383 unsigned long flags; 384 385 spin_lock_irqsave(&adev->didt_idx_lock, flags); 386 WREG32(mmDIDT_IND_INDEX, (reg)); 387 WREG32(mmDIDT_IND_DATA, (v)); 388 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 389 } 390 391 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 392 { 393 unsigned long flags; 394 u32 r; 395 396 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 397 WREG32(mmGC_CAC_IND_INDEX, (reg)); 398 r = RREG32(mmGC_CAC_IND_DATA); 399 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 400 return r; 401 } 402 403 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 404 { 405 unsigned long flags; 406 407 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 408 WREG32(mmGC_CAC_IND_INDEX, (reg)); 409 WREG32(mmGC_CAC_IND_DATA, (v)); 410 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 411 } 412 413 414 static const u32 tonga_mgcg_cgcg_init[] = 415 { 416 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 417 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 418 mmPCIE_DATA, 0x000f0000, 0x00000000, 419 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 420 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 421 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 422 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 423 }; 424 425 static const u32 fiji_mgcg_cgcg_init[] = 426 { 427 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 428 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 429 mmPCIE_DATA, 0x000f0000, 0x00000000, 430 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 431 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 432 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 433 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 434 }; 435 436 static const u32 iceland_mgcg_cgcg_init[] = 437 { 438 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 439 mmPCIE_DATA, 0x000f0000, 0x00000000, 440 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 441 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 442 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 443 }; 444 445 static const u32 cz_mgcg_cgcg_init[] = 446 { 447 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 448 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 449 mmPCIE_DATA, 0x000f0000, 0x00000000, 450 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 451 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 452 }; 453 454 static const u32 stoney_mgcg_cgcg_init[] = 455 { 456 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 457 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 458 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 459 }; 460 461 static void vi_init_golden_registers(struct amdgpu_device *adev) 462 { 463 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 464 mutex_lock(&adev->grbm_idx_mutex); 465 466 if (amdgpu_sriov_vf(adev)) { 467 xgpu_vi_init_golden_registers(adev); 468 mutex_unlock(&adev->grbm_idx_mutex); 469 return; 470 } 471 472 switch (adev->asic_type) { 473 case CHIP_TOPAZ: 474 amdgpu_device_program_register_sequence(adev, 475 iceland_mgcg_cgcg_init, 476 ARRAY_SIZE(iceland_mgcg_cgcg_init)); 477 break; 478 case CHIP_FIJI: 479 amdgpu_device_program_register_sequence(adev, 480 fiji_mgcg_cgcg_init, 481 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 482 break; 483 case CHIP_TONGA: 484 amdgpu_device_program_register_sequence(adev, 485 tonga_mgcg_cgcg_init, 486 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 487 break; 488 case CHIP_CARRIZO: 489 amdgpu_device_program_register_sequence(adev, 490 cz_mgcg_cgcg_init, 491 ARRAY_SIZE(cz_mgcg_cgcg_init)); 492 break; 493 case CHIP_STONEY: 494 amdgpu_device_program_register_sequence(adev, 495 stoney_mgcg_cgcg_init, 496 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 497 break; 498 case CHIP_POLARIS10: 499 case CHIP_POLARIS11: 500 case CHIP_POLARIS12: 501 case CHIP_VEGAM: 502 default: 503 break; 504 } 505 mutex_unlock(&adev->grbm_idx_mutex); 506 } 507 508 /** 509 * vi_get_xclk - get the xclk 510 * 511 * @adev: amdgpu_device pointer 512 * 513 * Returns the reference clock used by the gfx engine 514 * (VI). 515 */ 516 static u32 vi_get_xclk(struct amdgpu_device *adev) 517 { 518 u32 reference_clock = adev->clock.spll.reference_freq; 519 u32 tmp; 520 521 if (adev->flags & AMD_IS_APU) 522 return reference_clock; 523 524 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 525 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 526 return 1000; 527 528 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 529 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 530 return reference_clock / 4; 531 532 return reference_clock; 533 } 534 535 /** 536 * vi_srbm_select - select specific register instances 537 * 538 * @adev: amdgpu_device pointer 539 * @me: selected ME (micro engine) 540 * @pipe: pipe 541 * @queue: queue 542 * @vmid: VMID 543 * 544 * Switches the currently active registers instances. Some 545 * registers are instanced per VMID, others are instanced per 546 * me/pipe/queue combination. 547 */ 548 void vi_srbm_select(struct amdgpu_device *adev, 549 u32 me, u32 pipe, u32 queue, u32 vmid) 550 { 551 u32 srbm_gfx_cntl = 0; 552 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 553 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 554 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 555 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 556 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 557 } 558 559 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 560 { 561 /* todo */ 562 } 563 564 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 565 { 566 u32 bus_cntl; 567 u32 d1vga_control = 0; 568 u32 d2vga_control = 0; 569 u32 vga_render_control = 0; 570 u32 rom_cntl; 571 bool r; 572 573 bus_cntl = RREG32(mmBUS_CNTL); 574 if (adev->mode_info.num_crtc) { 575 d1vga_control = RREG32(mmD1VGA_CONTROL); 576 d2vga_control = RREG32(mmD2VGA_CONTROL); 577 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 578 } 579 rom_cntl = RREG32_SMC(ixROM_CNTL); 580 581 /* enable the rom */ 582 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 583 if (adev->mode_info.num_crtc) { 584 /* Disable VGA mode */ 585 WREG32(mmD1VGA_CONTROL, 586 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 587 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 588 WREG32(mmD2VGA_CONTROL, 589 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 590 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 591 WREG32(mmVGA_RENDER_CONTROL, 592 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 593 } 594 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 595 596 r = amdgpu_read_bios(adev); 597 598 /* restore regs */ 599 WREG32(mmBUS_CNTL, bus_cntl); 600 if (adev->mode_info.num_crtc) { 601 WREG32(mmD1VGA_CONTROL, d1vga_control); 602 WREG32(mmD2VGA_CONTROL, d2vga_control); 603 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 604 } 605 WREG32_SMC(ixROM_CNTL, rom_cntl); 606 return r; 607 } 608 609 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 610 u8 *bios, u32 length_bytes) 611 { 612 u32 *dw_ptr; 613 unsigned long flags; 614 u32 i, length_dw; 615 616 if (bios == NULL) 617 return false; 618 if (length_bytes == 0) 619 return false; 620 /* APU vbios image is part of sbios image */ 621 if (adev->flags & AMD_IS_APU) 622 return false; 623 624 dw_ptr = (u32 *)bios; 625 length_dw = ALIGN(length_bytes, 4) / 4; 626 /* take the smc lock since we are using the smc index */ 627 spin_lock_irqsave(&adev->smc_idx_lock, flags); 628 /* set rom index to 0 */ 629 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 630 WREG32(mmSMC_IND_DATA_11, 0); 631 /* set index to data for continous read */ 632 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 633 for (i = 0; i < length_dw; i++) 634 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 635 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 636 637 return true; 638 } 639 640 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 641 {mmGRBM_STATUS}, 642 {mmGRBM_STATUS2}, 643 {mmGRBM_STATUS_SE0}, 644 {mmGRBM_STATUS_SE1}, 645 {mmGRBM_STATUS_SE2}, 646 {mmGRBM_STATUS_SE3}, 647 {mmSRBM_STATUS}, 648 {mmSRBM_STATUS2}, 649 {mmSRBM_STATUS3}, 650 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, 651 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, 652 {mmCP_STAT}, 653 {mmCP_STALLED_STAT1}, 654 {mmCP_STALLED_STAT2}, 655 {mmCP_STALLED_STAT3}, 656 {mmCP_CPF_BUSY_STAT}, 657 {mmCP_CPF_STALLED_STAT1}, 658 {mmCP_CPF_STATUS}, 659 {mmCP_CPC_BUSY_STAT}, 660 {mmCP_CPC_STALLED_STAT1}, 661 {mmCP_CPC_STATUS}, 662 {mmGB_ADDR_CONFIG}, 663 {mmMC_ARB_RAMCFG}, 664 {mmGB_TILE_MODE0}, 665 {mmGB_TILE_MODE1}, 666 {mmGB_TILE_MODE2}, 667 {mmGB_TILE_MODE3}, 668 {mmGB_TILE_MODE4}, 669 {mmGB_TILE_MODE5}, 670 {mmGB_TILE_MODE6}, 671 {mmGB_TILE_MODE7}, 672 {mmGB_TILE_MODE8}, 673 {mmGB_TILE_MODE9}, 674 {mmGB_TILE_MODE10}, 675 {mmGB_TILE_MODE11}, 676 {mmGB_TILE_MODE12}, 677 {mmGB_TILE_MODE13}, 678 {mmGB_TILE_MODE14}, 679 {mmGB_TILE_MODE15}, 680 {mmGB_TILE_MODE16}, 681 {mmGB_TILE_MODE17}, 682 {mmGB_TILE_MODE18}, 683 {mmGB_TILE_MODE19}, 684 {mmGB_TILE_MODE20}, 685 {mmGB_TILE_MODE21}, 686 {mmGB_TILE_MODE22}, 687 {mmGB_TILE_MODE23}, 688 {mmGB_TILE_MODE24}, 689 {mmGB_TILE_MODE25}, 690 {mmGB_TILE_MODE26}, 691 {mmGB_TILE_MODE27}, 692 {mmGB_TILE_MODE28}, 693 {mmGB_TILE_MODE29}, 694 {mmGB_TILE_MODE30}, 695 {mmGB_TILE_MODE31}, 696 {mmGB_MACROTILE_MODE0}, 697 {mmGB_MACROTILE_MODE1}, 698 {mmGB_MACROTILE_MODE2}, 699 {mmGB_MACROTILE_MODE3}, 700 {mmGB_MACROTILE_MODE4}, 701 {mmGB_MACROTILE_MODE5}, 702 {mmGB_MACROTILE_MODE6}, 703 {mmGB_MACROTILE_MODE7}, 704 {mmGB_MACROTILE_MODE8}, 705 {mmGB_MACROTILE_MODE9}, 706 {mmGB_MACROTILE_MODE10}, 707 {mmGB_MACROTILE_MODE11}, 708 {mmGB_MACROTILE_MODE12}, 709 {mmGB_MACROTILE_MODE13}, 710 {mmGB_MACROTILE_MODE14}, 711 {mmGB_MACROTILE_MODE15}, 712 {mmCC_RB_BACKEND_DISABLE, true}, 713 {mmGC_USER_RB_BACKEND_DISABLE, true}, 714 {mmGB_BACKEND_MAP, false}, 715 {mmPA_SC_RASTER_CONFIG, true}, 716 {mmPA_SC_RASTER_CONFIG_1, true}, 717 }; 718 719 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 720 bool indexed, u32 se_num, 721 u32 sh_num, u32 reg_offset) 722 { 723 if (indexed) { 724 uint32_t val; 725 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 726 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 727 728 switch (reg_offset) { 729 case mmCC_RB_BACKEND_DISABLE: 730 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 731 case mmGC_USER_RB_BACKEND_DISABLE: 732 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 733 case mmPA_SC_RASTER_CONFIG: 734 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 735 case mmPA_SC_RASTER_CONFIG_1: 736 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 737 } 738 739 mutex_lock(&adev->grbm_idx_mutex); 740 if (se_num != 0xffffffff || sh_num != 0xffffffff) 741 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 742 743 val = RREG32(reg_offset); 744 745 if (se_num != 0xffffffff || sh_num != 0xffffffff) 746 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 747 mutex_unlock(&adev->grbm_idx_mutex); 748 return val; 749 } else { 750 unsigned idx; 751 752 switch (reg_offset) { 753 case mmGB_ADDR_CONFIG: 754 return adev->gfx.config.gb_addr_config; 755 case mmMC_ARB_RAMCFG: 756 return adev->gfx.config.mc_arb_ramcfg; 757 case mmGB_TILE_MODE0: 758 case mmGB_TILE_MODE1: 759 case mmGB_TILE_MODE2: 760 case mmGB_TILE_MODE3: 761 case mmGB_TILE_MODE4: 762 case mmGB_TILE_MODE5: 763 case mmGB_TILE_MODE6: 764 case mmGB_TILE_MODE7: 765 case mmGB_TILE_MODE8: 766 case mmGB_TILE_MODE9: 767 case mmGB_TILE_MODE10: 768 case mmGB_TILE_MODE11: 769 case mmGB_TILE_MODE12: 770 case mmGB_TILE_MODE13: 771 case mmGB_TILE_MODE14: 772 case mmGB_TILE_MODE15: 773 case mmGB_TILE_MODE16: 774 case mmGB_TILE_MODE17: 775 case mmGB_TILE_MODE18: 776 case mmGB_TILE_MODE19: 777 case mmGB_TILE_MODE20: 778 case mmGB_TILE_MODE21: 779 case mmGB_TILE_MODE22: 780 case mmGB_TILE_MODE23: 781 case mmGB_TILE_MODE24: 782 case mmGB_TILE_MODE25: 783 case mmGB_TILE_MODE26: 784 case mmGB_TILE_MODE27: 785 case mmGB_TILE_MODE28: 786 case mmGB_TILE_MODE29: 787 case mmGB_TILE_MODE30: 788 case mmGB_TILE_MODE31: 789 idx = (reg_offset - mmGB_TILE_MODE0); 790 return adev->gfx.config.tile_mode_array[idx]; 791 case mmGB_MACROTILE_MODE0: 792 case mmGB_MACROTILE_MODE1: 793 case mmGB_MACROTILE_MODE2: 794 case mmGB_MACROTILE_MODE3: 795 case mmGB_MACROTILE_MODE4: 796 case mmGB_MACROTILE_MODE5: 797 case mmGB_MACROTILE_MODE6: 798 case mmGB_MACROTILE_MODE7: 799 case mmGB_MACROTILE_MODE8: 800 case mmGB_MACROTILE_MODE9: 801 case mmGB_MACROTILE_MODE10: 802 case mmGB_MACROTILE_MODE11: 803 case mmGB_MACROTILE_MODE12: 804 case mmGB_MACROTILE_MODE13: 805 case mmGB_MACROTILE_MODE14: 806 case mmGB_MACROTILE_MODE15: 807 idx = (reg_offset - mmGB_MACROTILE_MODE0); 808 return adev->gfx.config.macrotile_mode_array[idx]; 809 default: 810 return RREG32(reg_offset); 811 } 812 } 813 } 814 815 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 816 u32 sh_num, u32 reg_offset, u32 *value) 817 { 818 uint32_t i; 819 820 *value = 0; 821 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 822 bool indexed = vi_allowed_read_registers[i].grbm_indexed; 823 824 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 825 continue; 826 827 *value = vi_get_register_value(adev, indexed, se_num, sh_num, 828 reg_offset); 829 return 0; 830 } 831 return -EINVAL; 832 } 833 834 /** 835 * vi_asic_pci_config_reset - soft reset GPU 836 * 837 * @adev: amdgpu_device pointer 838 * 839 * Use PCI Config method to reset the GPU. 840 * 841 * Returns 0 for success. 842 */ 843 static int vi_asic_pci_config_reset(struct amdgpu_device *adev) 844 { 845 u32 i; 846 int r = -EINVAL; 847 848 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 849 850 /* disable BM */ 851 pci_clear_master(adev->pdev); 852 /* reset */ 853 amdgpu_device_pci_config_reset(adev); 854 855 udelay(100); 856 857 /* wait for asic to come out of reset */ 858 for (i = 0; i < adev->usec_timeout; i++) { 859 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 860 /* enable BM */ 861 pci_set_master(adev->pdev); 862 adev->has_hw_reset = true; 863 r = 0; 864 break; 865 } 866 udelay(1); 867 } 868 869 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 870 871 return r; 872 } 873 874 static bool vi_asic_supports_baco(struct amdgpu_device *adev) 875 { 876 switch (adev->asic_type) { 877 case CHIP_FIJI: 878 case CHIP_TONGA: 879 case CHIP_POLARIS10: 880 case CHIP_POLARIS11: 881 case CHIP_POLARIS12: 882 case CHIP_TOPAZ: 883 return amdgpu_dpm_is_baco_supported(adev); 884 default: 885 return false; 886 } 887 } 888 889 static enum amd_reset_method 890 vi_asic_reset_method(struct amdgpu_device *adev) 891 { 892 bool baco_reset; 893 894 if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY || 895 amdgpu_reset_method == AMD_RESET_METHOD_BACO) 896 return amdgpu_reset_method; 897 898 if (amdgpu_reset_method != -1) 899 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 900 amdgpu_reset_method); 901 902 switch (adev->asic_type) { 903 case CHIP_FIJI: 904 case CHIP_TONGA: 905 case CHIP_POLARIS10: 906 case CHIP_POLARIS11: 907 case CHIP_POLARIS12: 908 case CHIP_TOPAZ: 909 baco_reset = amdgpu_dpm_is_baco_supported(adev); 910 break; 911 default: 912 baco_reset = false; 913 break; 914 } 915 916 if (baco_reset) 917 return AMD_RESET_METHOD_BACO; 918 else 919 return AMD_RESET_METHOD_LEGACY; 920 } 921 922 /** 923 * vi_asic_reset - soft reset GPU 924 * 925 * @adev: amdgpu_device pointer 926 * 927 * Look up which blocks are hung and attempt 928 * to reset them. 929 * Returns 0 for success. 930 */ 931 static int vi_asic_reset(struct amdgpu_device *adev) 932 { 933 int r; 934 935 if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 936 dev_info(adev->dev, "BACO reset\n"); 937 r = amdgpu_dpm_baco_reset(adev); 938 } else { 939 dev_info(adev->dev, "PCI CONFIG reset\n"); 940 r = vi_asic_pci_config_reset(adev); 941 } 942 943 return r; 944 } 945 946 static u32 vi_get_config_memsize(struct amdgpu_device *adev) 947 { 948 return RREG32(mmCONFIG_MEMSIZE); 949 } 950 951 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 952 u32 cntl_reg, u32 status_reg) 953 { 954 int r, i; 955 struct atom_clock_dividers dividers; 956 uint32_t tmp; 957 958 r = amdgpu_atombios_get_clock_dividers(adev, 959 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 960 clock, false, ÷rs); 961 if (r) 962 return r; 963 964 tmp = RREG32_SMC(cntl_reg); 965 966 if (adev->flags & AMD_IS_APU) 967 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK; 968 else 969 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 970 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 971 tmp |= dividers.post_divider; 972 WREG32_SMC(cntl_reg, tmp); 973 974 for (i = 0; i < 100; i++) { 975 tmp = RREG32_SMC(status_reg); 976 if (adev->flags & AMD_IS_APU) { 977 if (tmp & 0x10000) 978 break; 979 } else { 980 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK) 981 break; 982 } 983 mdelay(10); 984 } 985 if (i == 100) 986 return -ETIMEDOUT; 987 return 0; 988 } 989 990 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0 991 #define ixGNB_CLK1_STATUS 0xD822010C 992 #define ixGNB_CLK2_DFS_CNTL 0xD8220110 993 #define ixGNB_CLK2_STATUS 0xD822012C 994 #define ixGNB_CLK3_DFS_CNTL 0xD8220130 995 #define ixGNB_CLK3_STATUS 0xD822014C 996 997 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 998 { 999 int r; 1000 1001 if (adev->flags & AMD_IS_APU) { 1002 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS); 1003 if (r) 1004 return r; 1005 1006 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS); 1007 if (r) 1008 return r; 1009 } else { 1010 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 1011 if (r) 1012 return r; 1013 1014 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 1015 if (r) 1016 return r; 1017 } 1018 1019 return 0; 1020 } 1021 1022 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 1023 { 1024 int r, i; 1025 struct atom_clock_dividers dividers; 1026 u32 tmp; 1027 u32 reg_ctrl; 1028 u32 reg_status; 1029 u32 status_mask; 1030 u32 reg_mask; 1031 1032 if (adev->flags & AMD_IS_APU) { 1033 reg_ctrl = ixGNB_CLK3_DFS_CNTL; 1034 reg_status = ixGNB_CLK3_STATUS; 1035 status_mask = 0x00010000; 1036 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 1037 } else { 1038 reg_ctrl = ixCG_ECLK_CNTL; 1039 reg_status = ixCG_ECLK_STATUS; 1040 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK; 1041 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 1042 } 1043 1044 r = amdgpu_atombios_get_clock_dividers(adev, 1045 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 1046 ecclk, false, ÷rs); 1047 if (r) 1048 return r; 1049 1050 for (i = 0; i < 100; i++) { 1051 if (RREG32_SMC(reg_status) & status_mask) 1052 break; 1053 mdelay(10); 1054 } 1055 1056 if (i == 100) 1057 return -ETIMEDOUT; 1058 1059 tmp = RREG32_SMC(reg_ctrl); 1060 tmp &= ~reg_mask; 1061 tmp |= dividers.post_divider; 1062 WREG32_SMC(reg_ctrl, tmp); 1063 1064 for (i = 0; i < 100; i++) { 1065 if (RREG32_SMC(reg_status) & status_mask) 1066 break; 1067 mdelay(10); 1068 } 1069 1070 if (i == 100) 1071 return -ETIMEDOUT; 1072 1073 return 0; 1074 } 1075 1076 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 1077 { 1078 if (pci_is_root_bus(adev->pdev->bus)) 1079 return; 1080 1081 if (amdgpu_pcie_gen2 == 0) 1082 return; 1083 1084 if (adev->flags & AMD_IS_APU) 1085 return; 1086 1087 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 1088 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 1089 return; 1090 1091 /* todo */ 1092 } 1093 1094 static void vi_program_aspm(struct amdgpu_device *adev) 1095 { 1096 1097 if (amdgpu_aspm == 0) 1098 return; 1099 1100 /* todo */ 1101 } 1102 1103 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 1104 bool enable) 1105 { 1106 u32 tmp; 1107 1108 /* not necessary on CZ */ 1109 if (adev->flags & AMD_IS_APU) 1110 return; 1111 1112 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 1113 if (enable) 1114 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 1115 else 1116 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 1117 1118 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 1119 } 1120 1121 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 1122 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 1123 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 1124 1125 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 1126 { 1127 if (adev->flags & AMD_IS_APU) 1128 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 1129 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 1130 else 1131 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 1132 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 1133 } 1134 1135 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 1136 { 1137 if (!ring || !ring->funcs->emit_wreg) { 1138 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 1139 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL); 1140 } else { 1141 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 1142 } 1143 } 1144 1145 static void vi_invalidate_hdp(struct amdgpu_device *adev, 1146 struct amdgpu_ring *ring) 1147 { 1148 if (!ring || !ring->funcs->emit_wreg) { 1149 WREG32(mmHDP_DEBUG0, 1); 1150 RREG32(mmHDP_DEBUG0); 1151 } else { 1152 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1); 1153 } 1154 } 1155 1156 static bool vi_need_full_reset(struct amdgpu_device *adev) 1157 { 1158 switch (adev->asic_type) { 1159 case CHIP_CARRIZO: 1160 case CHIP_STONEY: 1161 /* CZ has hang issues with full reset at the moment */ 1162 return false; 1163 case CHIP_FIJI: 1164 case CHIP_TONGA: 1165 /* XXX: soft reset should work on fiji and tonga */ 1166 return true; 1167 case CHIP_POLARIS10: 1168 case CHIP_POLARIS11: 1169 case CHIP_POLARIS12: 1170 case CHIP_TOPAZ: 1171 default: 1172 /* change this when we support soft reset */ 1173 return true; 1174 } 1175 } 1176 1177 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 1178 uint64_t *count1) 1179 { 1180 uint32_t perfctr = 0; 1181 uint64_t cnt0_of, cnt1_of; 1182 int tmp; 1183 1184 /* This reports 0 on APUs, so return to avoid writing/reading registers 1185 * that may or may not be different from their GPU counterparts 1186 */ 1187 if (adev->flags & AMD_IS_APU) 1188 return; 1189 1190 /* Set the 2 events that we wish to watch, defined above */ 1191 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ 1192 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 1193 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 1194 1195 /* Write to enable desired perf counters */ 1196 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr); 1197 /* Zero out and enable the perf counters 1198 * Write 0x5: 1199 * Bit 0 = Start all counters(1) 1200 * Bit 2 = Global counter reset enable(1) 1201 */ 1202 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005); 1203 1204 msleep(1000); 1205 1206 /* Load the shadow and disable the perf counters 1207 * Write 0x2: 1208 * Bit 0 = Stop counters(0) 1209 * Bit 1 = Load the shadow counters(1) 1210 */ 1211 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002); 1212 1213 /* Read register values to get any >32bit overflow */ 1214 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK); 1215 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 1216 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 1217 1218 /* Get the values and add the overflow */ 1219 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 1220 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 1221 } 1222 1223 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev) 1224 { 1225 uint64_t nak_r, nak_g; 1226 1227 /* Get the number of NAKs received and generated */ 1228 nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK); 1229 nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED); 1230 1231 /* Add the total number of NAKs, i.e the number of replays */ 1232 return (nak_r + nak_g); 1233 } 1234 1235 static bool vi_need_reset_on_init(struct amdgpu_device *adev) 1236 { 1237 u32 clock_cntl, pc; 1238 1239 if (adev->flags & AMD_IS_APU) 1240 return false; 1241 1242 /* check if the SMC is already running */ 1243 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); 1244 pc = RREG32_SMC(ixSMC_PC_C); 1245 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) && 1246 (0x20100 <= pc)) 1247 return true; 1248 1249 return false; 1250 } 1251 1252 static void vi_pre_asic_init(struct amdgpu_device *adev) 1253 { 1254 } 1255 1256 static const struct amdgpu_asic_funcs vi_asic_funcs = 1257 { 1258 .read_disabled_bios = &vi_read_disabled_bios, 1259 .read_bios_from_rom = &vi_read_bios_from_rom, 1260 .read_register = &vi_read_register, 1261 .reset = &vi_asic_reset, 1262 .reset_method = &vi_asic_reset_method, 1263 .set_vga_state = &vi_vga_set_state, 1264 .get_xclk = &vi_get_xclk, 1265 .set_uvd_clocks = &vi_set_uvd_clocks, 1266 .set_vce_clocks = &vi_set_vce_clocks, 1267 .get_config_memsize = &vi_get_config_memsize, 1268 .flush_hdp = &vi_flush_hdp, 1269 .invalidate_hdp = &vi_invalidate_hdp, 1270 .need_full_reset = &vi_need_full_reset, 1271 .init_doorbell_index = &legacy_doorbell_index_init, 1272 .get_pcie_usage = &vi_get_pcie_usage, 1273 .need_reset_on_init = &vi_need_reset_on_init, 1274 .get_pcie_replay_count = &vi_get_pcie_replay_count, 1275 .supports_baco = &vi_asic_supports_baco, 1276 .pre_asic_init = &vi_pre_asic_init, 1277 .query_video_codecs = &vi_query_video_codecs, 1278 }; 1279 1280 #define CZ_REV_BRISTOL(rev) \ 1281 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) 1282 1283 static int vi_common_early_init(void *handle) 1284 { 1285 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1286 1287 if (adev->flags & AMD_IS_APU) { 1288 adev->smc_rreg = &cz_smc_rreg; 1289 adev->smc_wreg = &cz_smc_wreg; 1290 } else { 1291 adev->smc_rreg = &vi_smc_rreg; 1292 adev->smc_wreg = &vi_smc_wreg; 1293 } 1294 adev->pcie_rreg = &vi_pcie_rreg; 1295 adev->pcie_wreg = &vi_pcie_wreg; 1296 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1297 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1298 adev->didt_rreg = &vi_didt_rreg; 1299 adev->didt_wreg = &vi_didt_wreg; 1300 adev->gc_cac_rreg = &vi_gc_cac_rreg; 1301 adev->gc_cac_wreg = &vi_gc_cac_wreg; 1302 1303 adev->asic_funcs = &vi_asic_funcs; 1304 1305 adev->rev_id = vi_get_rev_id(adev); 1306 adev->external_rev_id = 0xFF; 1307 switch (adev->asic_type) { 1308 case CHIP_TOPAZ: 1309 adev->cg_flags = 0; 1310 adev->pg_flags = 0; 1311 adev->external_rev_id = 0x1; 1312 break; 1313 case CHIP_FIJI: 1314 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1315 AMD_CG_SUPPORT_GFX_MGLS | 1316 AMD_CG_SUPPORT_GFX_RLC_LS | 1317 AMD_CG_SUPPORT_GFX_CP_LS | 1318 AMD_CG_SUPPORT_GFX_CGTS | 1319 AMD_CG_SUPPORT_GFX_CGTS_LS | 1320 AMD_CG_SUPPORT_GFX_CGCG | 1321 AMD_CG_SUPPORT_GFX_CGLS | 1322 AMD_CG_SUPPORT_SDMA_MGCG | 1323 AMD_CG_SUPPORT_SDMA_LS | 1324 AMD_CG_SUPPORT_BIF_LS | 1325 AMD_CG_SUPPORT_HDP_MGCG | 1326 AMD_CG_SUPPORT_HDP_LS | 1327 AMD_CG_SUPPORT_ROM_MGCG | 1328 AMD_CG_SUPPORT_MC_MGCG | 1329 AMD_CG_SUPPORT_MC_LS | 1330 AMD_CG_SUPPORT_UVD_MGCG; 1331 adev->pg_flags = 0; 1332 adev->external_rev_id = adev->rev_id + 0x3c; 1333 break; 1334 case CHIP_TONGA: 1335 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1336 AMD_CG_SUPPORT_GFX_CGCG | 1337 AMD_CG_SUPPORT_GFX_CGLS | 1338 AMD_CG_SUPPORT_SDMA_MGCG | 1339 AMD_CG_SUPPORT_SDMA_LS | 1340 AMD_CG_SUPPORT_BIF_LS | 1341 AMD_CG_SUPPORT_HDP_MGCG | 1342 AMD_CG_SUPPORT_HDP_LS | 1343 AMD_CG_SUPPORT_ROM_MGCG | 1344 AMD_CG_SUPPORT_MC_MGCG | 1345 AMD_CG_SUPPORT_MC_LS | 1346 AMD_CG_SUPPORT_DRM_LS | 1347 AMD_CG_SUPPORT_UVD_MGCG; 1348 adev->pg_flags = 0; 1349 adev->external_rev_id = adev->rev_id + 0x14; 1350 break; 1351 case CHIP_POLARIS11: 1352 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1353 AMD_CG_SUPPORT_GFX_RLC_LS | 1354 AMD_CG_SUPPORT_GFX_CP_LS | 1355 AMD_CG_SUPPORT_GFX_CGCG | 1356 AMD_CG_SUPPORT_GFX_CGLS | 1357 AMD_CG_SUPPORT_GFX_3D_CGCG | 1358 AMD_CG_SUPPORT_GFX_3D_CGLS | 1359 AMD_CG_SUPPORT_SDMA_MGCG | 1360 AMD_CG_SUPPORT_SDMA_LS | 1361 AMD_CG_SUPPORT_BIF_MGCG | 1362 AMD_CG_SUPPORT_BIF_LS | 1363 AMD_CG_SUPPORT_HDP_MGCG | 1364 AMD_CG_SUPPORT_HDP_LS | 1365 AMD_CG_SUPPORT_ROM_MGCG | 1366 AMD_CG_SUPPORT_MC_MGCG | 1367 AMD_CG_SUPPORT_MC_LS | 1368 AMD_CG_SUPPORT_DRM_LS | 1369 AMD_CG_SUPPORT_UVD_MGCG | 1370 AMD_CG_SUPPORT_VCE_MGCG; 1371 adev->pg_flags = 0; 1372 adev->external_rev_id = adev->rev_id + 0x5A; 1373 break; 1374 case CHIP_POLARIS10: 1375 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1376 AMD_CG_SUPPORT_GFX_RLC_LS | 1377 AMD_CG_SUPPORT_GFX_CP_LS | 1378 AMD_CG_SUPPORT_GFX_CGCG | 1379 AMD_CG_SUPPORT_GFX_CGLS | 1380 AMD_CG_SUPPORT_GFX_3D_CGCG | 1381 AMD_CG_SUPPORT_GFX_3D_CGLS | 1382 AMD_CG_SUPPORT_SDMA_MGCG | 1383 AMD_CG_SUPPORT_SDMA_LS | 1384 AMD_CG_SUPPORT_BIF_MGCG | 1385 AMD_CG_SUPPORT_BIF_LS | 1386 AMD_CG_SUPPORT_HDP_MGCG | 1387 AMD_CG_SUPPORT_HDP_LS | 1388 AMD_CG_SUPPORT_ROM_MGCG | 1389 AMD_CG_SUPPORT_MC_MGCG | 1390 AMD_CG_SUPPORT_MC_LS | 1391 AMD_CG_SUPPORT_DRM_LS | 1392 AMD_CG_SUPPORT_UVD_MGCG | 1393 AMD_CG_SUPPORT_VCE_MGCG; 1394 adev->pg_flags = 0; 1395 adev->external_rev_id = adev->rev_id + 0x50; 1396 break; 1397 case CHIP_POLARIS12: 1398 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1399 AMD_CG_SUPPORT_GFX_RLC_LS | 1400 AMD_CG_SUPPORT_GFX_CP_LS | 1401 AMD_CG_SUPPORT_GFX_CGCG | 1402 AMD_CG_SUPPORT_GFX_CGLS | 1403 AMD_CG_SUPPORT_GFX_3D_CGCG | 1404 AMD_CG_SUPPORT_GFX_3D_CGLS | 1405 AMD_CG_SUPPORT_SDMA_MGCG | 1406 AMD_CG_SUPPORT_SDMA_LS | 1407 AMD_CG_SUPPORT_BIF_MGCG | 1408 AMD_CG_SUPPORT_BIF_LS | 1409 AMD_CG_SUPPORT_HDP_MGCG | 1410 AMD_CG_SUPPORT_HDP_LS | 1411 AMD_CG_SUPPORT_ROM_MGCG | 1412 AMD_CG_SUPPORT_MC_MGCG | 1413 AMD_CG_SUPPORT_MC_LS | 1414 AMD_CG_SUPPORT_DRM_LS | 1415 AMD_CG_SUPPORT_UVD_MGCG | 1416 AMD_CG_SUPPORT_VCE_MGCG; 1417 adev->pg_flags = 0; 1418 adev->external_rev_id = adev->rev_id + 0x64; 1419 break; 1420 case CHIP_VEGAM: 1421 adev->cg_flags = 0; 1422 /*AMD_CG_SUPPORT_GFX_MGCG | 1423 AMD_CG_SUPPORT_GFX_RLC_LS | 1424 AMD_CG_SUPPORT_GFX_CP_LS | 1425 AMD_CG_SUPPORT_GFX_CGCG | 1426 AMD_CG_SUPPORT_GFX_CGLS | 1427 AMD_CG_SUPPORT_GFX_3D_CGCG | 1428 AMD_CG_SUPPORT_GFX_3D_CGLS | 1429 AMD_CG_SUPPORT_SDMA_MGCG | 1430 AMD_CG_SUPPORT_SDMA_LS | 1431 AMD_CG_SUPPORT_BIF_MGCG | 1432 AMD_CG_SUPPORT_BIF_LS | 1433 AMD_CG_SUPPORT_HDP_MGCG | 1434 AMD_CG_SUPPORT_HDP_LS | 1435 AMD_CG_SUPPORT_ROM_MGCG | 1436 AMD_CG_SUPPORT_MC_MGCG | 1437 AMD_CG_SUPPORT_MC_LS | 1438 AMD_CG_SUPPORT_DRM_LS | 1439 AMD_CG_SUPPORT_UVD_MGCG | 1440 AMD_CG_SUPPORT_VCE_MGCG;*/ 1441 adev->pg_flags = 0; 1442 adev->external_rev_id = adev->rev_id + 0x6E; 1443 break; 1444 case CHIP_CARRIZO: 1445 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1446 AMD_CG_SUPPORT_GFX_MGCG | 1447 AMD_CG_SUPPORT_GFX_MGLS | 1448 AMD_CG_SUPPORT_GFX_RLC_LS | 1449 AMD_CG_SUPPORT_GFX_CP_LS | 1450 AMD_CG_SUPPORT_GFX_CGTS | 1451 AMD_CG_SUPPORT_GFX_CGTS_LS | 1452 AMD_CG_SUPPORT_GFX_CGCG | 1453 AMD_CG_SUPPORT_GFX_CGLS | 1454 AMD_CG_SUPPORT_BIF_LS | 1455 AMD_CG_SUPPORT_HDP_MGCG | 1456 AMD_CG_SUPPORT_HDP_LS | 1457 AMD_CG_SUPPORT_SDMA_MGCG | 1458 AMD_CG_SUPPORT_SDMA_LS | 1459 AMD_CG_SUPPORT_VCE_MGCG; 1460 /* rev0 hardware requires workarounds to support PG */ 1461 adev->pg_flags = 0; 1462 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { 1463 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | 1464 AMD_PG_SUPPORT_GFX_PIPELINE | 1465 AMD_PG_SUPPORT_CP | 1466 AMD_PG_SUPPORT_UVD | 1467 AMD_PG_SUPPORT_VCE; 1468 } 1469 adev->external_rev_id = adev->rev_id + 0x1; 1470 break; 1471 case CHIP_STONEY: 1472 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1473 AMD_CG_SUPPORT_GFX_MGCG | 1474 AMD_CG_SUPPORT_GFX_MGLS | 1475 AMD_CG_SUPPORT_GFX_RLC_LS | 1476 AMD_CG_SUPPORT_GFX_CP_LS | 1477 AMD_CG_SUPPORT_GFX_CGTS | 1478 AMD_CG_SUPPORT_GFX_CGTS_LS | 1479 AMD_CG_SUPPORT_GFX_CGLS | 1480 AMD_CG_SUPPORT_BIF_LS | 1481 AMD_CG_SUPPORT_HDP_MGCG | 1482 AMD_CG_SUPPORT_HDP_LS | 1483 AMD_CG_SUPPORT_SDMA_MGCG | 1484 AMD_CG_SUPPORT_SDMA_LS | 1485 AMD_CG_SUPPORT_VCE_MGCG; 1486 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1487 AMD_PG_SUPPORT_GFX_SMG | 1488 AMD_PG_SUPPORT_GFX_PIPELINE | 1489 AMD_PG_SUPPORT_CP | 1490 AMD_PG_SUPPORT_UVD | 1491 AMD_PG_SUPPORT_VCE; 1492 adev->external_rev_id = adev->rev_id + 0x61; 1493 break; 1494 default: 1495 /* FIXME: not supported yet */ 1496 return -EINVAL; 1497 } 1498 1499 if (amdgpu_sriov_vf(adev)) { 1500 amdgpu_virt_init_setting(adev); 1501 xgpu_vi_mailbox_set_irq_funcs(adev); 1502 } 1503 1504 return 0; 1505 } 1506 1507 static int vi_common_late_init(void *handle) 1508 { 1509 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1510 1511 if (amdgpu_sriov_vf(adev)) 1512 xgpu_vi_mailbox_get_irq(adev); 1513 1514 return 0; 1515 } 1516 1517 static int vi_common_sw_init(void *handle) 1518 { 1519 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1520 1521 if (amdgpu_sriov_vf(adev)) 1522 xgpu_vi_mailbox_add_irq_id(adev); 1523 1524 return 0; 1525 } 1526 1527 static int vi_common_sw_fini(void *handle) 1528 { 1529 return 0; 1530 } 1531 1532 static int vi_common_hw_init(void *handle) 1533 { 1534 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1535 1536 /* move the golden regs per IP block */ 1537 vi_init_golden_registers(adev); 1538 /* enable pcie gen2/3 link */ 1539 vi_pcie_gen3_enable(adev); 1540 /* enable aspm */ 1541 vi_program_aspm(adev); 1542 /* enable the doorbell aperture */ 1543 vi_enable_doorbell_aperture(adev, true); 1544 1545 return 0; 1546 } 1547 1548 static int vi_common_hw_fini(void *handle) 1549 { 1550 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1551 1552 /* enable the doorbell aperture */ 1553 vi_enable_doorbell_aperture(adev, false); 1554 1555 if (amdgpu_sriov_vf(adev)) 1556 xgpu_vi_mailbox_put_irq(adev); 1557 1558 return 0; 1559 } 1560 1561 static int vi_common_suspend(void *handle) 1562 { 1563 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1564 1565 return vi_common_hw_fini(adev); 1566 } 1567 1568 static int vi_common_resume(void *handle) 1569 { 1570 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1571 1572 return vi_common_hw_init(adev); 1573 } 1574 1575 static bool vi_common_is_idle(void *handle) 1576 { 1577 return true; 1578 } 1579 1580 static int vi_common_wait_for_idle(void *handle) 1581 { 1582 return 0; 1583 } 1584 1585 static int vi_common_soft_reset(void *handle) 1586 { 1587 return 0; 1588 } 1589 1590 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1591 bool enable) 1592 { 1593 uint32_t temp, data; 1594 1595 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1596 1597 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1598 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1599 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1600 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1601 else 1602 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1603 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1604 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1605 1606 if (temp != data) 1607 WREG32_PCIE(ixPCIE_CNTL2, data); 1608 } 1609 1610 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1611 bool enable) 1612 { 1613 uint32_t temp, data; 1614 1615 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1616 1617 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1618 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1619 else 1620 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1621 1622 if (temp != data) 1623 WREG32(mmHDP_HOST_PATH_CNTL, data); 1624 } 1625 1626 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1627 bool enable) 1628 { 1629 uint32_t temp, data; 1630 1631 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1632 1633 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1634 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1635 else 1636 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1637 1638 if (temp != data) 1639 WREG32(mmHDP_MEM_POWER_LS, data); 1640 } 1641 1642 static void vi_update_drm_light_sleep(struct amdgpu_device *adev, 1643 bool enable) 1644 { 1645 uint32_t temp, data; 1646 1647 temp = data = RREG32(0x157a); 1648 1649 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1650 data |= 1; 1651 else 1652 data &= ~1; 1653 1654 if (temp != data) 1655 WREG32(0x157a, data); 1656 } 1657 1658 1659 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1660 bool enable) 1661 { 1662 uint32_t temp, data; 1663 1664 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1665 1666 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1667 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1668 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1669 else 1670 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1671 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1672 1673 if (temp != data) 1674 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1675 } 1676 1677 static int vi_common_set_clockgating_state_by_smu(void *handle, 1678 enum amd_clockgating_state state) 1679 { 1680 uint32_t msg_id, pp_state = 0; 1681 uint32_t pp_support_state = 0; 1682 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1683 1684 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1685 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1686 pp_support_state = PP_STATE_SUPPORT_LS; 1687 pp_state = PP_STATE_LS; 1688 } 1689 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1690 pp_support_state |= PP_STATE_SUPPORT_CG; 1691 pp_state |= PP_STATE_CG; 1692 } 1693 if (state == AMD_CG_STATE_UNGATE) 1694 pp_state = 0; 1695 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1696 PP_BLOCK_SYS_MC, 1697 pp_support_state, 1698 pp_state); 1699 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1700 } 1701 1702 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1703 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1704 pp_support_state = PP_STATE_SUPPORT_LS; 1705 pp_state = PP_STATE_LS; 1706 } 1707 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1708 pp_support_state |= PP_STATE_SUPPORT_CG; 1709 pp_state |= PP_STATE_CG; 1710 } 1711 if (state == AMD_CG_STATE_UNGATE) 1712 pp_state = 0; 1713 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1714 PP_BLOCK_SYS_SDMA, 1715 pp_support_state, 1716 pp_state); 1717 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1718 } 1719 1720 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1721 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1722 pp_support_state = PP_STATE_SUPPORT_LS; 1723 pp_state = PP_STATE_LS; 1724 } 1725 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1726 pp_support_state |= PP_STATE_SUPPORT_CG; 1727 pp_state |= PP_STATE_CG; 1728 } 1729 if (state == AMD_CG_STATE_UNGATE) 1730 pp_state = 0; 1731 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1732 PP_BLOCK_SYS_HDP, 1733 pp_support_state, 1734 pp_state); 1735 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1736 } 1737 1738 1739 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { 1740 if (state == AMD_CG_STATE_UNGATE) 1741 pp_state = 0; 1742 else 1743 pp_state = PP_STATE_LS; 1744 1745 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1746 PP_BLOCK_SYS_BIF, 1747 PP_STATE_SUPPORT_LS, 1748 pp_state); 1749 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1750 } 1751 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { 1752 if (state == AMD_CG_STATE_UNGATE) 1753 pp_state = 0; 1754 else 1755 pp_state = PP_STATE_CG; 1756 1757 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1758 PP_BLOCK_SYS_BIF, 1759 PP_STATE_SUPPORT_CG, 1760 pp_state); 1761 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1762 } 1763 1764 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { 1765 1766 if (state == AMD_CG_STATE_UNGATE) 1767 pp_state = 0; 1768 else 1769 pp_state = PP_STATE_LS; 1770 1771 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1772 PP_BLOCK_SYS_DRM, 1773 PP_STATE_SUPPORT_LS, 1774 pp_state); 1775 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1776 } 1777 1778 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { 1779 1780 if (state == AMD_CG_STATE_UNGATE) 1781 pp_state = 0; 1782 else 1783 pp_state = PP_STATE_CG; 1784 1785 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1786 PP_BLOCK_SYS_ROM, 1787 PP_STATE_SUPPORT_CG, 1788 pp_state); 1789 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1790 } 1791 return 0; 1792 } 1793 1794 static int vi_common_set_clockgating_state(void *handle, 1795 enum amd_clockgating_state state) 1796 { 1797 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1798 1799 if (amdgpu_sriov_vf(adev)) 1800 return 0; 1801 1802 switch (adev->asic_type) { 1803 case CHIP_FIJI: 1804 vi_update_bif_medium_grain_light_sleep(adev, 1805 state == AMD_CG_STATE_GATE); 1806 vi_update_hdp_medium_grain_clock_gating(adev, 1807 state == AMD_CG_STATE_GATE); 1808 vi_update_hdp_light_sleep(adev, 1809 state == AMD_CG_STATE_GATE); 1810 vi_update_rom_medium_grain_clock_gating(adev, 1811 state == AMD_CG_STATE_GATE); 1812 break; 1813 case CHIP_CARRIZO: 1814 case CHIP_STONEY: 1815 vi_update_bif_medium_grain_light_sleep(adev, 1816 state == AMD_CG_STATE_GATE); 1817 vi_update_hdp_medium_grain_clock_gating(adev, 1818 state == AMD_CG_STATE_GATE); 1819 vi_update_hdp_light_sleep(adev, 1820 state == AMD_CG_STATE_GATE); 1821 vi_update_drm_light_sleep(adev, 1822 state == AMD_CG_STATE_GATE); 1823 break; 1824 case CHIP_TONGA: 1825 case CHIP_POLARIS10: 1826 case CHIP_POLARIS11: 1827 case CHIP_POLARIS12: 1828 case CHIP_VEGAM: 1829 vi_common_set_clockgating_state_by_smu(adev, state); 1830 break; 1831 default: 1832 break; 1833 } 1834 return 0; 1835 } 1836 1837 static int vi_common_set_powergating_state(void *handle, 1838 enum amd_powergating_state state) 1839 { 1840 return 0; 1841 } 1842 1843 static void vi_common_get_clockgating_state(void *handle, u32 *flags) 1844 { 1845 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1846 int data; 1847 1848 if (amdgpu_sriov_vf(adev)) 1849 *flags = 0; 1850 1851 /* AMD_CG_SUPPORT_BIF_LS */ 1852 data = RREG32_PCIE(ixPCIE_CNTL2); 1853 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 1854 *flags |= AMD_CG_SUPPORT_BIF_LS; 1855 1856 /* AMD_CG_SUPPORT_HDP_LS */ 1857 data = RREG32(mmHDP_MEM_POWER_LS); 1858 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1859 *flags |= AMD_CG_SUPPORT_HDP_LS; 1860 1861 /* AMD_CG_SUPPORT_HDP_MGCG */ 1862 data = RREG32(mmHDP_HOST_PATH_CNTL); 1863 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK)) 1864 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 1865 1866 /* AMD_CG_SUPPORT_ROM_MGCG */ 1867 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1868 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1869 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1870 } 1871 1872 static const struct amd_ip_funcs vi_common_ip_funcs = { 1873 .name = "vi_common", 1874 .early_init = vi_common_early_init, 1875 .late_init = vi_common_late_init, 1876 .sw_init = vi_common_sw_init, 1877 .sw_fini = vi_common_sw_fini, 1878 .hw_init = vi_common_hw_init, 1879 .hw_fini = vi_common_hw_fini, 1880 .suspend = vi_common_suspend, 1881 .resume = vi_common_resume, 1882 .is_idle = vi_common_is_idle, 1883 .wait_for_idle = vi_common_wait_for_idle, 1884 .soft_reset = vi_common_soft_reset, 1885 .set_clockgating_state = vi_common_set_clockgating_state, 1886 .set_powergating_state = vi_common_set_powergating_state, 1887 .get_clockgating_state = vi_common_get_clockgating_state, 1888 }; 1889 1890 static const struct amdgpu_ip_block_version vi_common_ip_block = 1891 { 1892 .type = AMD_IP_BLOCK_TYPE_COMMON, 1893 .major = 1, 1894 .minor = 0, 1895 .rev = 0, 1896 .funcs = &vi_common_ip_funcs, 1897 }; 1898 1899 void vi_set_virt_ops(struct amdgpu_device *adev) 1900 { 1901 adev->virt.ops = &xgpu_vi_virt_ops; 1902 } 1903 1904 int vi_set_ip_blocks(struct amdgpu_device *adev) 1905 { 1906 switch (adev->asic_type) { 1907 case CHIP_TOPAZ: 1908 /* topaz has no DCE, UVD, VCE */ 1909 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1910 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 1911 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 1912 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1913 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); 1914 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1915 if (adev->enable_virtual_display) 1916 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1917 break; 1918 case CHIP_FIJI: 1919 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1920 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 1921 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1922 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1923 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1924 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1925 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1926 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1927 #if defined(CONFIG_DRM_AMD_DC) 1928 else if (amdgpu_device_has_dc_support(adev)) 1929 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1930 #endif 1931 else 1932 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block); 1933 if (!amdgpu_sriov_vf(adev)) { 1934 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1935 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1936 } 1937 break; 1938 case CHIP_TONGA: 1939 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1940 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1941 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1942 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1943 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1944 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1945 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1946 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1947 #if defined(CONFIG_DRM_AMD_DC) 1948 else if (amdgpu_device_has_dc_support(adev)) 1949 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1950 #endif 1951 else 1952 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block); 1953 if (!amdgpu_sriov_vf(adev)) { 1954 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block); 1955 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1956 } 1957 break; 1958 case CHIP_POLARIS10: 1959 case CHIP_POLARIS11: 1960 case CHIP_POLARIS12: 1961 case CHIP_VEGAM: 1962 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1963 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 1964 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1965 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1966 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); 1967 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1968 if (adev->enable_virtual_display) 1969 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1970 #if defined(CONFIG_DRM_AMD_DC) 1971 else if (amdgpu_device_has_dc_support(adev)) 1972 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1973 #endif 1974 else 1975 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); 1976 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); 1977 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1978 break; 1979 case CHIP_CARRIZO: 1980 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1981 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1982 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1983 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1984 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1985 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1986 if (adev->enable_virtual_display) 1987 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1988 #if defined(CONFIG_DRM_AMD_DC) 1989 else if (amdgpu_device_has_dc_support(adev)) 1990 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1991 #endif 1992 else 1993 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1994 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1995 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); 1996 #if defined(CONFIG_DRM_AMD_ACP) 1997 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1998 #endif 1999 break; 2000 case CHIP_STONEY: 2001 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 2002 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 2003 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 2004 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block); 2005 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 2006 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2007 if (adev->enable_virtual_display) 2008 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2009 #if defined(CONFIG_DRM_AMD_DC) 2010 else if (amdgpu_device_has_dc_support(adev)) 2011 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2012 #endif 2013 else 2014 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 2015 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); 2016 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 2017 #if defined(CONFIG_DRM_AMD_ACP) 2018 amdgpu_device_ip_block_add(adev, &acp_ip_block); 2019 #endif 2020 break; 2021 default: 2022 /* FIXME: not supported yet */ 2023 return -EINVAL; 2024 } 2025 2026 return 0; 2027 } 2028 2029 void legacy_doorbell_index_init(struct amdgpu_device *adev) 2030 { 2031 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ; 2032 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0; 2033 adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1; 2034 adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2; 2035 adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3; 2036 adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4; 2037 adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5; 2038 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6; 2039 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7; 2040 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0; 2041 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0; 2042 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1; 2043 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH; 2044 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT; 2045 } 2046