1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 28 #include <drm/amdgpu_drm.h> 29 30 #include "amdgpu.h" 31 #include "amdgpu_atombios.h" 32 #include "amdgpu_ih.h" 33 #include "amdgpu_uvd.h" 34 #include "amdgpu_vce.h" 35 #include "amdgpu_ucode.h" 36 #include "amdgpu_psp.h" 37 #include "atom.h" 38 #include "amd_pcie.h" 39 40 #include "uvd/uvd_7_0_offset.h" 41 #include "gc/gc_9_0_offset.h" 42 #include "gc/gc_9_0_sh_mask.h" 43 #include "sdma0/sdma0_4_0_offset.h" 44 #include "sdma1/sdma1_4_0_offset.h" 45 #include "nbio/nbio_7_0_default.h" 46 #include "nbio/nbio_7_0_offset.h" 47 #include "nbio/nbio_7_0_sh_mask.h" 48 #include "nbio/nbio_7_0_smn.h" 49 #include "mp/mp_9_0_offset.h" 50 51 #include "soc15.h" 52 #include "soc15_common.h" 53 #include "gfx_v9_0.h" 54 #include "gmc_v9_0.h" 55 #include "gfxhub_v1_0.h" 56 #include "mmhub_v1_0.h" 57 #include "df_v1_7.h" 58 #include "df_v3_6.h" 59 #include "nbio_v6_1.h" 60 #include "nbio_v7_0.h" 61 #include "nbio_v7_4.h" 62 #include "hdp_v4_0.h" 63 #include "vega10_ih.h" 64 #include "vega20_ih.h" 65 #include "navi10_ih.h" 66 #include "sdma_v4_0.h" 67 #include "uvd_v7_0.h" 68 #include "vce_v4_0.h" 69 #include "vcn_v1_0.h" 70 #include "vcn_v2_0.h" 71 #include "jpeg_v2_0.h" 72 #include "vcn_v2_5.h" 73 #include "jpeg_v2_5.h" 74 #include "smuio_v9_0.h" 75 #include "smuio_v11_0.h" 76 #include "smuio_v13_0.h" 77 #include "dce_virtual.h" 78 #include "mxgpu_ai.h" 79 #include "amdgpu_ras.h" 80 #include "amdgpu_xgmi.h" 81 #include <uapi/linux/kfd_ioctl.h> 82 83 #define mmMP0_MISC_CGTT_CTRL0 0x01b9 84 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 85 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba 86 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 87 88 /* Vega, Raven, Arcturus */ 89 static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] = 90 { 91 { 92 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 93 .max_width = 4096, 94 .max_height = 2304, 95 .max_pixels_per_frame = 4096 * 2304, 96 .max_level = 0, 97 }, 98 { 99 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 100 .max_width = 4096, 101 .max_height = 2304, 102 .max_pixels_per_frame = 4096 * 2304, 103 .max_level = 0, 104 }, 105 }; 106 107 static const struct amdgpu_video_codecs vega_video_codecs_encode = 108 { 109 .codec_count = ARRAY_SIZE(vega_video_codecs_encode_array), 110 .codec_array = vega_video_codecs_encode_array, 111 }; 112 113 /* Vega */ 114 static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] = 115 { 116 { 117 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 118 .max_width = 4096, 119 .max_height = 4096, 120 .max_pixels_per_frame = 4096 * 4096, 121 .max_level = 3, 122 }, 123 { 124 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 125 .max_width = 4096, 126 .max_height = 4096, 127 .max_pixels_per_frame = 4096 * 4096, 128 .max_level = 5, 129 }, 130 { 131 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 132 .max_width = 4096, 133 .max_height = 4096, 134 .max_pixels_per_frame = 4096 * 4096, 135 .max_level = 52, 136 }, 137 { 138 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 139 .max_width = 4096, 140 .max_height = 4096, 141 .max_pixels_per_frame = 4096 * 4096, 142 .max_level = 4, 143 }, 144 { 145 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 146 .max_width = 4096, 147 .max_height = 4096, 148 .max_pixels_per_frame = 4096 * 4096, 149 .max_level = 186, 150 }, 151 { 152 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 153 .max_width = 4096, 154 .max_height = 4096, 155 .max_pixels_per_frame = 4096 * 4096, 156 .max_level = 0, 157 }, 158 }; 159 160 static const struct amdgpu_video_codecs vega_video_codecs_decode = 161 { 162 .codec_count = ARRAY_SIZE(vega_video_codecs_decode_array), 163 .codec_array = vega_video_codecs_decode_array, 164 }; 165 166 /* Raven */ 167 static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] = 168 { 169 { 170 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 171 .max_width = 4096, 172 .max_height = 4096, 173 .max_pixels_per_frame = 4096 * 4096, 174 .max_level = 3, 175 }, 176 { 177 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 178 .max_width = 4096, 179 .max_height = 4096, 180 .max_pixels_per_frame = 4096 * 4096, 181 .max_level = 5, 182 }, 183 { 184 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 185 .max_width = 4096, 186 .max_height = 4096, 187 .max_pixels_per_frame = 4096 * 4096, 188 .max_level = 52, 189 }, 190 { 191 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 192 .max_width = 4096, 193 .max_height = 4096, 194 .max_pixels_per_frame = 4096 * 4096, 195 .max_level = 4, 196 }, 197 { 198 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 199 .max_width = 4096, 200 .max_height = 4096, 201 .max_pixels_per_frame = 4096 * 4096, 202 .max_level = 186, 203 }, 204 { 205 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 206 .max_width = 4096, 207 .max_height = 4096, 208 .max_pixels_per_frame = 4096 * 4096, 209 .max_level = 0, 210 }, 211 { 212 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 213 .max_width = 4096, 214 .max_height = 4096, 215 .max_pixels_per_frame = 4096 * 4096, 216 .max_level = 0, 217 }, 218 }; 219 220 static const struct amdgpu_video_codecs rv_video_codecs_decode = 221 { 222 .codec_count = ARRAY_SIZE(rv_video_codecs_decode_array), 223 .codec_array = rv_video_codecs_decode_array, 224 }; 225 226 /* Renoir, Arcturus */ 227 static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] = 228 { 229 { 230 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 231 .max_width = 4096, 232 .max_height = 4096, 233 .max_pixels_per_frame = 4096 * 4096, 234 .max_level = 3, 235 }, 236 { 237 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 238 .max_width = 4096, 239 .max_height = 4096, 240 .max_pixels_per_frame = 4096 * 4096, 241 .max_level = 5, 242 }, 243 { 244 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 245 .max_width = 4096, 246 .max_height = 4096, 247 .max_pixels_per_frame = 4096 * 4096, 248 .max_level = 52, 249 }, 250 { 251 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 252 .max_width = 4096, 253 .max_height = 4096, 254 .max_pixels_per_frame = 4096 * 4096, 255 .max_level = 4, 256 }, 257 { 258 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 259 .max_width = 8192, 260 .max_height = 4352, 261 .max_pixels_per_frame = 4096 * 4096, 262 .max_level = 186, 263 }, 264 { 265 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 266 .max_width = 4096, 267 .max_height = 4096, 268 .max_pixels_per_frame = 4096 * 4096, 269 .max_level = 0, 270 }, 271 { 272 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 273 .max_width = 8192, 274 .max_height = 4352, 275 .max_pixels_per_frame = 4096 * 4096, 276 .max_level = 0, 277 }, 278 }; 279 280 static const struct amdgpu_video_codecs rn_video_codecs_decode = 281 { 282 .codec_count = ARRAY_SIZE(rn_video_codecs_decode_array), 283 .codec_array = rn_video_codecs_decode_array, 284 }; 285 286 static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode, 287 const struct amdgpu_video_codecs **codecs) 288 { 289 switch (adev->asic_type) { 290 case CHIP_VEGA20: 291 case CHIP_VEGA10: 292 case CHIP_VEGA12: 293 if (encode) 294 *codecs = &vega_video_codecs_encode; 295 else 296 *codecs = &vega_video_codecs_decode; 297 return 0; 298 case CHIP_RAVEN: 299 if (encode) 300 *codecs = &vega_video_codecs_encode; 301 else 302 *codecs = &rv_video_codecs_decode; 303 return 0; 304 case CHIP_ARCTURUS: 305 case CHIP_ALDEBARAN: 306 case CHIP_RENOIR: 307 if (encode) 308 *codecs = &vega_video_codecs_encode; 309 else 310 *codecs = &rn_video_codecs_decode; 311 return 0; 312 default: 313 return -EINVAL; 314 } 315 } 316 317 /* 318 * Indirect registers accessor 319 */ 320 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) 321 { 322 unsigned long address, data; 323 address = adev->nbio.funcs->get_pcie_index_offset(adev); 324 data = adev->nbio.funcs->get_pcie_data_offset(adev); 325 326 return amdgpu_device_indirect_rreg(adev, address, data, reg); 327 } 328 329 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 330 { 331 unsigned long address, data; 332 333 address = adev->nbio.funcs->get_pcie_index_offset(adev); 334 data = adev->nbio.funcs->get_pcie_data_offset(adev); 335 336 amdgpu_device_indirect_wreg(adev, address, data, reg, v); 337 } 338 339 static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg) 340 { 341 unsigned long address, data; 342 address = adev->nbio.funcs->get_pcie_index_offset(adev); 343 data = adev->nbio.funcs->get_pcie_data_offset(adev); 344 345 return amdgpu_device_indirect_rreg64(adev, address, data, reg); 346 } 347 348 static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) 349 { 350 unsigned long address, data; 351 352 address = adev->nbio.funcs->get_pcie_index_offset(adev); 353 data = adev->nbio.funcs->get_pcie_data_offset(adev); 354 355 amdgpu_device_indirect_wreg64(adev, address, data, reg, v); 356 } 357 358 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 359 { 360 unsigned long flags, address, data; 361 u32 r; 362 363 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 364 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 365 366 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 367 WREG32(address, ((reg) & 0x1ff)); 368 r = RREG32(data); 369 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 370 return r; 371 } 372 373 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 374 { 375 unsigned long flags, address, data; 376 377 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 378 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 379 380 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 381 WREG32(address, ((reg) & 0x1ff)); 382 WREG32(data, (v)); 383 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 384 } 385 386 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg) 387 { 388 unsigned long flags, address, data; 389 u32 r; 390 391 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 392 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 393 394 spin_lock_irqsave(&adev->didt_idx_lock, flags); 395 WREG32(address, (reg)); 396 r = RREG32(data); 397 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 398 return r; 399 } 400 401 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 402 { 403 unsigned long flags, address, data; 404 405 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 406 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 407 408 spin_lock_irqsave(&adev->didt_idx_lock, flags); 409 WREG32(address, (reg)); 410 WREG32(data, (v)); 411 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 412 } 413 414 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 415 { 416 unsigned long flags; 417 u32 r; 418 419 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 420 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 421 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); 422 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 423 return r; 424 } 425 426 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 427 { 428 unsigned long flags; 429 430 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 431 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 432 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); 433 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 434 } 435 436 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) 437 { 438 unsigned long flags; 439 u32 r; 440 441 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 442 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 443 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); 444 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 445 return r; 446 } 447 448 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 449 { 450 unsigned long flags; 451 452 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 453 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 454 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); 455 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 456 } 457 458 static u32 soc15_get_config_memsize(struct amdgpu_device *adev) 459 { 460 return adev->nbio.funcs->get_memsize(adev); 461 } 462 463 static u32 soc15_get_xclk(struct amdgpu_device *adev) 464 { 465 u32 reference_clock = adev->clock.spll.reference_freq; 466 467 if (adev->asic_type == CHIP_RENOIR) 468 return 10000; 469 if (adev->asic_type == CHIP_RAVEN) 470 return reference_clock / 4; 471 472 return reference_clock; 473 } 474 475 476 void soc15_grbm_select(struct amdgpu_device *adev, 477 u32 me, u32 pipe, u32 queue, u32 vmid) 478 { 479 u32 grbm_gfx_cntl = 0; 480 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 481 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 482 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 483 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 484 485 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl); 486 } 487 488 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) 489 { 490 /* todo */ 491 } 492 493 static bool soc15_read_disabled_bios(struct amdgpu_device *adev) 494 { 495 /* todo */ 496 return false; 497 } 498 499 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, 500 u8 *bios, u32 length_bytes) 501 { 502 u32 *dw_ptr; 503 u32 i, length_dw; 504 uint32_t rom_index_offset; 505 uint32_t rom_data_offset; 506 507 if (bios == NULL) 508 return false; 509 if (length_bytes == 0) 510 return false; 511 /* APU vbios image is part of sbios image */ 512 if (adev->flags & AMD_IS_APU) 513 return false; 514 515 dw_ptr = (u32 *)bios; 516 length_dw = ALIGN(length_bytes, 4) / 4; 517 518 rom_index_offset = 519 adev->smuio.funcs->get_rom_index_offset(adev); 520 rom_data_offset = 521 adev->smuio.funcs->get_rom_data_offset(adev); 522 523 /* set rom index to 0 */ 524 WREG32(rom_index_offset, 0); 525 /* read out the rom data */ 526 for (i = 0; i < length_dw; i++) 527 dw_ptr[i] = RREG32(rom_data_offset); 528 529 return true; 530 } 531 532 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { 533 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 534 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 535 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 536 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 537 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 538 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 539 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 540 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 541 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 542 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 543 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 544 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 545 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 546 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 547 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 548 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, 549 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 550 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 551 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 552 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)}, 553 }; 554 555 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 556 u32 sh_num, u32 reg_offset) 557 { 558 uint32_t val; 559 560 mutex_lock(&adev->grbm_idx_mutex); 561 if (se_num != 0xffffffff || sh_num != 0xffffffff) 562 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 563 564 val = RREG32(reg_offset); 565 566 if (se_num != 0xffffffff || sh_num != 0xffffffff) 567 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 568 mutex_unlock(&adev->grbm_idx_mutex); 569 return val; 570 } 571 572 static uint32_t soc15_get_register_value(struct amdgpu_device *adev, 573 bool indexed, u32 se_num, 574 u32 sh_num, u32 reg_offset) 575 { 576 if (indexed) { 577 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset); 578 } else { 579 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 580 return adev->gfx.config.gb_addr_config; 581 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)) 582 return adev->gfx.config.db_debug2; 583 return RREG32(reg_offset); 584 } 585 } 586 587 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, 588 u32 sh_num, u32 reg_offset, u32 *value) 589 { 590 uint32_t i; 591 struct soc15_allowed_register_entry *en; 592 593 *value = 0; 594 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { 595 en = &soc15_allowed_read_registers[i]; 596 if (adev->reg_offset[en->hwip][en->inst] && 597 reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] 598 + en->reg_offset)) 599 continue; 600 601 *value = soc15_get_register_value(adev, 602 soc15_allowed_read_registers[i].grbm_indexed, 603 se_num, sh_num, reg_offset); 604 return 0; 605 } 606 return -EINVAL; 607 } 608 609 610 /** 611 * soc15_program_register_sequence - program an array of registers. 612 * 613 * @adev: amdgpu_device pointer 614 * @regs: pointer to the register array 615 * @array_size: size of the register array 616 * 617 * Programs an array or registers with and and or masks. 618 * This is a helper for setting golden registers. 619 */ 620 621 void soc15_program_register_sequence(struct amdgpu_device *adev, 622 const struct soc15_reg_golden *regs, 623 const u32 array_size) 624 { 625 const struct soc15_reg_golden *entry; 626 u32 tmp, reg; 627 int i; 628 629 for (i = 0; i < array_size; ++i) { 630 entry = ®s[i]; 631 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; 632 633 if (entry->and_mask == 0xffffffff) { 634 tmp = entry->or_mask; 635 } else { 636 tmp = (entry->hwip == GC_HWIP) ? 637 RREG32_SOC15_IP(GC, reg) : RREG32(reg); 638 639 tmp &= ~(entry->and_mask); 640 tmp |= (entry->or_mask & entry->and_mask); 641 } 642 643 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) || 644 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) || 645 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) || 646 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)) 647 WREG32_RLC(reg, tmp); 648 else 649 (entry->hwip == GC_HWIP) ? 650 WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp); 651 652 } 653 654 } 655 656 static int soc15_asic_baco_reset(struct amdgpu_device *adev) 657 { 658 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 659 int ret = 0; 660 661 /* avoid NBIF got stuck when do RAS recovery in BACO reset */ 662 if (ras && adev->ras_enabled) 663 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); 664 665 ret = amdgpu_dpm_baco_reset(adev); 666 if (ret) 667 return ret; 668 669 /* re-enable doorbell interrupt after BACO exit */ 670 if (ras && adev->ras_enabled) 671 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); 672 673 return 0; 674 } 675 676 static enum amd_reset_method 677 soc15_asic_reset_method(struct amdgpu_device *adev) 678 { 679 bool baco_reset = false; 680 bool connected_to_cpu = false; 681 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 682 683 if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu) 684 connected_to_cpu = true; 685 686 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || 687 amdgpu_reset_method == AMD_RESET_METHOD_MODE2 || 688 amdgpu_reset_method == AMD_RESET_METHOD_BACO || 689 amdgpu_reset_method == AMD_RESET_METHOD_PCI) { 690 /* If connected to cpu, driver only support mode2 */ 691 if (connected_to_cpu) 692 return AMD_RESET_METHOD_MODE2; 693 return amdgpu_reset_method; 694 } 695 696 if (amdgpu_reset_method != -1) 697 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 698 amdgpu_reset_method); 699 700 switch (adev->asic_type) { 701 case CHIP_RAVEN: 702 case CHIP_RENOIR: 703 return AMD_RESET_METHOD_MODE2; 704 case CHIP_VEGA10: 705 case CHIP_VEGA12: 706 case CHIP_ARCTURUS: 707 baco_reset = amdgpu_dpm_is_baco_supported(adev); 708 break; 709 case CHIP_VEGA20: 710 if (adev->psp.sos_fw_version >= 0x80067) 711 baco_reset = amdgpu_dpm_is_baco_supported(adev); 712 713 /* 714 * 1. PMFW version > 0x284300: all cases use baco 715 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco 716 */ 717 if (ras && adev->ras_enabled && 718 adev->pm.fw_version <= 0x283400) 719 baco_reset = false; 720 break; 721 case CHIP_ALDEBARAN: 722 /* 723 * 1.connected to cpu: driver issue mode2 reset 724 * 2.discret gpu: driver issue mode1 reset 725 */ 726 if (connected_to_cpu) 727 return AMD_RESET_METHOD_MODE2; 728 break; 729 default: 730 break; 731 } 732 733 if (baco_reset) 734 return AMD_RESET_METHOD_BACO; 735 else 736 return AMD_RESET_METHOD_MODE1; 737 } 738 739 static int soc15_asic_reset(struct amdgpu_device *adev) 740 { 741 /* original raven doesn't have full asic reset */ 742 if ((adev->apu_flags & AMD_APU_IS_RAVEN) && 743 !(adev->apu_flags & AMD_APU_IS_RAVEN2)) 744 return 0; 745 746 switch (soc15_asic_reset_method(adev)) { 747 case AMD_RESET_METHOD_PCI: 748 dev_info(adev->dev, "PCI reset\n"); 749 return amdgpu_device_pci_reset(adev); 750 case AMD_RESET_METHOD_BACO: 751 dev_info(adev->dev, "BACO reset\n"); 752 return soc15_asic_baco_reset(adev); 753 case AMD_RESET_METHOD_MODE2: 754 dev_info(adev->dev, "MODE2 reset\n"); 755 return amdgpu_dpm_mode2_reset(adev); 756 default: 757 dev_info(adev->dev, "MODE1 reset\n"); 758 return amdgpu_device_mode1_reset(adev); 759 } 760 } 761 762 static bool soc15_supports_baco(struct amdgpu_device *adev) 763 { 764 switch (adev->asic_type) { 765 case CHIP_VEGA10: 766 case CHIP_VEGA12: 767 case CHIP_ARCTURUS: 768 return amdgpu_dpm_is_baco_supported(adev); 769 case CHIP_VEGA20: 770 if (adev->psp.sos_fw_version >= 0x80067) 771 return amdgpu_dpm_is_baco_supported(adev); 772 return false; 773 default: 774 return false; 775 } 776 } 777 778 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 779 u32 cntl_reg, u32 status_reg) 780 { 781 return 0; 782 }*/ 783 784 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 785 { 786 /*int r; 787 788 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 789 if (r) 790 return r; 791 792 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 793 */ 794 return 0; 795 } 796 797 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 798 { 799 /* todo */ 800 801 return 0; 802 } 803 804 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) 805 { 806 if (pci_is_root_bus(adev->pdev->bus)) 807 return; 808 809 if (amdgpu_pcie_gen2 == 0) 810 return; 811 812 if (adev->flags & AMD_IS_APU) 813 return; 814 815 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 816 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 817 return; 818 819 /* todo */ 820 } 821 822 static void soc15_program_aspm(struct amdgpu_device *adev) 823 { 824 if (!amdgpu_aspm) 825 return; 826 827 if (!(adev->flags & AMD_IS_APU) && 828 (adev->nbio.funcs->program_aspm)) 829 adev->nbio.funcs->program_aspm(adev); 830 } 831 832 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, 833 bool enable) 834 { 835 adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 836 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 837 } 838 839 static const struct amdgpu_ip_block_version vega10_common_ip_block = 840 { 841 .type = AMD_IP_BLOCK_TYPE_COMMON, 842 .major = 2, 843 .minor = 0, 844 .rev = 0, 845 .funcs = &soc15_common_ip_funcs, 846 }; 847 848 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) 849 { 850 return adev->nbio.funcs->get_rev_id(adev); 851 } 852 853 static void soc15_reg_base_init(struct amdgpu_device *adev) 854 { 855 int r; 856 857 /* Set IP register base before any HW register access */ 858 switch (adev->asic_type) { 859 case CHIP_VEGA10: 860 case CHIP_VEGA12: 861 case CHIP_RAVEN: 862 vega10_reg_base_init(adev); 863 break; 864 case CHIP_RENOIR: 865 /* It's safe to do ip discovery here for Renior, 866 * it doesn't support SRIOV. */ 867 if (amdgpu_discovery) { 868 r = amdgpu_discovery_reg_base_init(adev); 869 if (r == 0) 870 break; 871 DRM_WARN("failed to init reg base from ip discovery table, " 872 "fallback to legacy init method\n"); 873 } 874 vega10_reg_base_init(adev); 875 break; 876 case CHIP_VEGA20: 877 vega20_reg_base_init(adev); 878 break; 879 case CHIP_ARCTURUS: 880 arct_reg_base_init(adev); 881 break; 882 case CHIP_ALDEBARAN: 883 aldebaran_reg_base_init(adev); 884 break; 885 default: 886 DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type); 887 break; 888 } 889 } 890 891 void soc15_set_virt_ops(struct amdgpu_device *adev) 892 { 893 adev->virt.ops = &xgpu_ai_virt_ops; 894 895 /* init soc15 reg base early enough so we can 896 * request request full access for sriov before 897 * set_ip_blocks. */ 898 soc15_reg_base_init(adev); 899 } 900 901 int soc15_set_ip_blocks(struct amdgpu_device *adev) 902 { 903 /* for bare metal case */ 904 if (!amdgpu_sriov_vf(adev)) 905 soc15_reg_base_init(adev); 906 907 if (adev->flags & AMD_IS_APU) { 908 adev->nbio.funcs = &nbio_v7_0_funcs; 909 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 910 } else if (adev->asic_type == CHIP_VEGA20 || 911 adev->asic_type == CHIP_ARCTURUS || 912 adev->asic_type == CHIP_ALDEBARAN) { 913 adev->nbio.funcs = &nbio_v7_4_funcs; 914 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 915 } else { 916 adev->nbio.funcs = &nbio_v6_1_funcs; 917 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 918 } 919 adev->hdp.funcs = &hdp_v4_0_funcs; 920 921 if (adev->asic_type == CHIP_VEGA20 || 922 adev->asic_type == CHIP_ARCTURUS || 923 adev->asic_type == CHIP_ALDEBARAN) 924 adev->df.funcs = &df_v3_6_funcs; 925 else 926 adev->df.funcs = &df_v1_7_funcs; 927 928 if (adev->asic_type == CHIP_VEGA20 || 929 adev->asic_type == CHIP_ARCTURUS) 930 adev->smuio.funcs = &smuio_v11_0_funcs; 931 else if (adev->asic_type == CHIP_ALDEBARAN) 932 adev->smuio.funcs = &smuio_v13_0_funcs; 933 else 934 adev->smuio.funcs = &smuio_v9_0_funcs; 935 936 adev->rev_id = soc15_get_rev_id(adev); 937 938 switch (adev->asic_type) { 939 case CHIP_VEGA10: 940 case CHIP_VEGA12: 941 case CHIP_VEGA20: 942 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 943 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 944 945 /* For Vega10 SR-IOV, PSP need to be initialized before IH */ 946 if (amdgpu_sriov_vf(adev)) { 947 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 948 if (adev->asic_type == CHIP_VEGA20) 949 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 950 else 951 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 952 } 953 if (adev->asic_type == CHIP_VEGA20) 954 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 955 else 956 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 957 } else { 958 if (adev->asic_type == CHIP_VEGA20) 959 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 960 else 961 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 962 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 963 if (adev->asic_type == CHIP_VEGA20) 964 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 965 else 966 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 967 } 968 } 969 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 970 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 971 if (is_support_sw_smu(adev)) { 972 if (!amdgpu_sriov_vf(adev)) 973 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 974 } else { 975 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 976 } 977 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 978 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 979 #if defined(CONFIG_DRM_AMD_DC) 980 else if (amdgpu_device_has_dc_support(adev)) 981 amdgpu_device_ip_block_add(adev, &dm_ip_block); 982 #endif 983 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { 984 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 985 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 986 } 987 break; 988 case CHIP_RAVEN: 989 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 990 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 991 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 992 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 993 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 994 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 995 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 996 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 997 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 998 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 999 #if defined(CONFIG_DRM_AMD_DC) 1000 else if (amdgpu_device_has_dc_support(adev)) 1001 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1002 #endif 1003 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 1004 break; 1005 case CHIP_ARCTURUS: 1006 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1007 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 1008 1009 if (amdgpu_sriov_vf(adev)) { 1010 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 1011 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 1012 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 1013 } else { 1014 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 1015 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 1016 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 1017 } 1018 1019 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1020 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1021 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 1022 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 1023 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1024 1025 if (amdgpu_sriov_vf(adev)) { 1026 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 1027 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 1028 } else { 1029 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 1030 } 1031 if (!amdgpu_sriov_vf(adev)) 1032 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 1033 break; 1034 case CHIP_RENOIR: 1035 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1036 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 1037 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 1038 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 1039 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 1040 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 1041 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 1042 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 1043 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1044 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1045 #if defined(CONFIG_DRM_AMD_DC) 1046 else if (amdgpu_device_has_dc_support(adev)) 1047 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1048 #endif 1049 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 1050 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 1051 break; 1052 case CHIP_ALDEBARAN: 1053 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1054 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 1055 1056 if (amdgpu_sriov_vf(adev)) { 1057 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 1058 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 1059 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 1060 } else { 1061 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 1062 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 1063 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 1064 } 1065 1066 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 1067 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 1068 1069 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 1070 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 1071 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 1072 break; 1073 default: 1074 return -EINVAL; 1075 } 1076 1077 return 0; 1078 } 1079 1080 static bool soc15_need_full_reset(struct amdgpu_device *adev) 1081 { 1082 /* change this when we implement soft reset */ 1083 return true; 1084 } 1085 1086 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 1087 uint64_t *count1) 1088 { 1089 uint32_t perfctr = 0; 1090 uint64_t cnt0_of, cnt1_of; 1091 int tmp; 1092 1093 /* This reports 0 on APUs, so return to avoid writing/reading registers 1094 * that may or may not be different from their GPU counterparts 1095 */ 1096 if (adev->flags & AMD_IS_APU) 1097 return; 1098 1099 /* Set the 2 events that we wish to watch, defined above */ 1100 /* Reg 40 is # received msgs */ 1101 /* Reg 104 is # of posted requests sent */ 1102 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 1103 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 1104 1105 /* Write to enable desired perf counters */ 1106 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr); 1107 /* Zero out and enable the perf counters 1108 * Write 0x5: 1109 * Bit 0 = Start all counters(1) 1110 * Bit 2 = Global counter reset enable(1) 1111 */ 1112 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); 1113 1114 msleep(1000); 1115 1116 /* Load the shadow and disable the perf counters 1117 * Write 0x2: 1118 * Bit 0 = Stop counters(0) 1119 * Bit 1 = Load the shadow counters(1) 1120 */ 1121 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); 1122 1123 /* Read register values to get any >32bit overflow */ 1124 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK); 1125 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 1126 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 1127 1128 /* Get the values and add the overflow */ 1129 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 1130 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 1131 } 1132 1133 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 1134 uint64_t *count1) 1135 { 1136 uint32_t perfctr = 0; 1137 uint64_t cnt0_of, cnt1_of; 1138 int tmp; 1139 1140 /* This reports 0 on APUs, so return to avoid writing/reading registers 1141 * that may or may not be different from their GPU counterparts 1142 */ 1143 if (adev->flags & AMD_IS_APU) 1144 return; 1145 1146 /* Set the 2 events that we wish to watch, defined above */ 1147 /* Reg 40 is # received msgs */ 1148 /* Reg 108 is # of posted requests sent on VG20 */ 1149 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, 1150 EVENT0_SEL, 40); 1151 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, 1152 EVENT1_SEL, 108); 1153 1154 /* Write to enable desired perf counters */ 1155 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr); 1156 /* Zero out and enable the perf counters 1157 * Write 0x5: 1158 * Bit 0 = Start all counters(1) 1159 * Bit 2 = Global counter reset enable(1) 1160 */ 1161 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); 1162 1163 msleep(1000); 1164 1165 /* Load the shadow and disable the perf counters 1166 * Write 0x2: 1167 * Bit 0 = Stop counters(0) 1168 * Bit 1 = Load the shadow counters(1) 1169 */ 1170 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); 1171 1172 /* Read register values to get any >32bit overflow */ 1173 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3); 1174 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER); 1175 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER); 1176 1177 /* Get the values and add the overflow */ 1178 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32); 1179 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32); 1180 } 1181 1182 static bool soc15_need_reset_on_init(struct amdgpu_device *adev) 1183 { 1184 u32 sol_reg; 1185 1186 /* Just return false for soc15 GPUs. Reset does not seem to 1187 * be necessary. 1188 */ 1189 if (!amdgpu_passthrough(adev)) 1190 return false; 1191 1192 if (adev->flags & AMD_IS_APU) 1193 return false; 1194 1195 /* Check sOS sign of life register to confirm sys driver and sOS 1196 * are already been loaded. 1197 */ 1198 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 1199 if (sol_reg) 1200 return true; 1201 1202 return false; 1203 } 1204 1205 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev) 1206 { 1207 uint64_t nak_r, nak_g; 1208 1209 /* Get the number of NAKs received and generated */ 1210 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK); 1211 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED); 1212 1213 /* Add the total number of NAKs, i.e the number of replays */ 1214 return (nak_r + nak_g); 1215 } 1216 1217 static void soc15_pre_asic_init(struct amdgpu_device *adev) 1218 { 1219 gmc_v9_0_restore_registers(adev); 1220 } 1221 1222 static const struct amdgpu_asic_funcs soc15_asic_funcs = 1223 { 1224 .read_disabled_bios = &soc15_read_disabled_bios, 1225 .read_bios_from_rom = &soc15_read_bios_from_rom, 1226 .read_register = &soc15_read_register, 1227 .reset = &soc15_asic_reset, 1228 .reset_method = &soc15_asic_reset_method, 1229 .set_vga_state = &soc15_vga_set_state, 1230 .get_xclk = &soc15_get_xclk, 1231 .set_uvd_clocks = &soc15_set_uvd_clocks, 1232 .set_vce_clocks = &soc15_set_vce_clocks, 1233 .get_config_memsize = &soc15_get_config_memsize, 1234 .need_full_reset = &soc15_need_full_reset, 1235 .init_doorbell_index = &vega10_doorbell_index_init, 1236 .get_pcie_usage = &soc15_get_pcie_usage, 1237 .need_reset_on_init = &soc15_need_reset_on_init, 1238 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 1239 .supports_baco = &soc15_supports_baco, 1240 .pre_asic_init = &soc15_pre_asic_init, 1241 .query_video_codecs = &soc15_query_video_codecs, 1242 }; 1243 1244 static const struct amdgpu_asic_funcs vega20_asic_funcs = 1245 { 1246 .read_disabled_bios = &soc15_read_disabled_bios, 1247 .read_bios_from_rom = &soc15_read_bios_from_rom, 1248 .read_register = &soc15_read_register, 1249 .reset = &soc15_asic_reset, 1250 .reset_method = &soc15_asic_reset_method, 1251 .set_vga_state = &soc15_vga_set_state, 1252 .get_xclk = &soc15_get_xclk, 1253 .set_uvd_clocks = &soc15_set_uvd_clocks, 1254 .set_vce_clocks = &soc15_set_vce_clocks, 1255 .get_config_memsize = &soc15_get_config_memsize, 1256 .need_full_reset = &soc15_need_full_reset, 1257 .init_doorbell_index = &vega20_doorbell_index_init, 1258 .get_pcie_usage = &vega20_get_pcie_usage, 1259 .need_reset_on_init = &soc15_need_reset_on_init, 1260 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 1261 .supports_baco = &soc15_supports_baco, 1262 .pre_asic_init = &soc15_pre_asic_init, 1263 .query_video_codecs = &soc15_query_video_codecs, 1264 }; 1265 1266 static int soc15_common_early_init(void *handle) 1267 { 1268 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 1269 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1270 1271 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 1272 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 1273 adev->smc_rreg = NULL; 1274 adev->smc_wreg = NULL; 1275 adev->pcie_rreg = &soc15_pcie_rreg; 1276 adev->pcie_wreg = &soc15_pcie_wreg; 1277 adev->pcie_rreg64 = &soc15_pcie_rreg64; 1278 adev->pcie_wreg64 = &soc15_pcie_wreg64; 1279 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; 1280 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; 1281 adev->didt_rreg = &soc15_didt_rreg; 1282 adev->didt_wreg = &soc15_didt_wreg; 1283 adev->gc_cac_rreg = &soc15_gc_cac_rreg; 1284 adev->gc_cac_wreg = &soc15_gc_cac_wreg; 1285 adev->se_cac_rreg = &soc15_se_cac_rreg; 1286 adev->se_cac_wreg = &soc15_se_cac_wreg; 1287 1288 1289 adev->external_rev_id = 0xFF; 1290 switch (adev->asic_type) { 1291 case CHIP_VEGA10: 1292 adev->asic_funcs = &soc15_asic_funcs; 1293 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1294 AMD_CG_SUPPORT_GFX_MGLS | 1295 AMD_CG_SUPPORT_GFX_RLC_LS | 1296 AMD_CG_SUPPORT_GFX_CP_LS | 1297 AMD_CG_SUPPORT_GFX_3D_CGCG | 1298 AMD_CG_SUPPORT_GFX_3D_CGLS | 1299 AMD_CG_SUPPORT_GFX_CGCG | 1300 AMD_CG_SUPPORT_GFX_CGLS | 1301 AMD_CG_SUPPORT_BIF_MGCG | 1302 AMD_CG_SUPPORT_BIF_LS | 1303 AMD_CG_SUPPORT_HDP_LS | 1304 AMD_CG_SUPPORT_DRM_MGCG | 1305 AMD_CG_SUPPORT_DRM_LS | 1306 AMD_CG_SUPPORT_ROM_MGCG | 1307 AMD_CG_SUPPORT_DF_MGCG | 1308 AMD_CG_SUPPORT_SDMA_MGCG | 1309 AMD_CG_SUPPORT_SDMA_LS | 1310 AMD_CG_SUPPORT_MC_MGCG | 1311 AMD_CG_SUPPORT_MC_LS; 1312 adev->pg_flags = 0; 1313 adev->external_rev_id = 0x1; 1314 break; 1315 case CHIP_VEGA12: 1316 adev->asic_funcs = &soc15_asic_funcs; 1317 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1318 AMD_CG_SUPPORT_GFX_MGLS | 1319 AMD_CG_SUPPORT_GFX_CGCG | 1320 AMD_CG_SUPPORT_GFX_CGLS | 1321 AMD_CG_SUPPORT_GFX_3D_CGCG | 1322 AMD_CG_SUPPORT_GFX_3D_CGLS | 1323 AMD_CG_SUPPORT_GFX_CP_LS | 1324 AMD_CG_SUPPORT_MC_LS | 1325 AMD_CG_SUPPORT_MC_MGCG | 1326 AMD_CG_SUPPORT_SDMA_MGCG | 1327 AMD_CG_SUPPORT_SDMA_LS | 1328 AMD_CG_SUPPORT_BIF_MGCG | 1329 AMD_CG_SUPPORT_BIF_LS | 1330 AMD_CG_SUPPORT_HDP_MGCG | 1331 AMD_CG_SUPPORT_HDP_LS | 1332 AMD_CG_SUPPORT_ROM_MGCG | 1333 AMD_CG_SUPPORT_VCE_MGCG | 1334 AMD_CG_SUPPORT_UVD_MGCG; 1335 adev->pg_flags = 0; 1336 adev->external_rev_id = adev->rev_id + 0x14; 1337 break; 1338 case CHIP_VEGA20: 1339 adev->asic_funcs = &vega20_asic_funcs; 1340 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1341 AMD_CG_SUPPORT_GFX_MGLS | 1342 AMD_CG_SUPPORT_GFX_CGCG | 1343 AMD_CG_SUPPORT_GFX_CGLS | 1344 AMD_CG_SUPPORT_GFX_3D_CGCG | 1345 AMD_CG_SUPPORT_GFX_3D_CGLS | 1346 AMD_CG_SUPPORT_GFX_CP_LS | 1347 AMD_CG_SUPPORT_MC_LS | 1348 AMD_CG_SUPPORT_MC_MGCG | 1349 AMD_CG_SUPPORT_SDMA_MGCG | 1350 AMD_CG_SUPPORT_SDMA_LS | 1351 AMD_CG_SUPPORT_BIF_MGCG | 1352 AMD_CG_SUPPORT_BIF_LS | 1353 AMD_CG_SUPPORT_HDP_MGCG | 1354 AMD_CG_SUPPORT_HDP_LS | 1355 AMD_CG_SUPPORT_ROM_MGCG | 1356 AMD_CG_SUPPORT_VCE_MGCG | 1357 AMD_CG_SUPPORT_UVD_MGCG; 1358 adev->pg_flags = 0; 1359 adev->external_rev_id = adev->rev_id + 0x28; 1360 break; 1361 case CHIP_RAVEN: 1362 adev->asic_funcs = &soc15_asic_funcs; 1363 if (adev->pdev->device == 0x15dd) 1364 adev->apu_flags |= AMD_APU_IS_RAVEN; 1365 if (adev->pdev->device == 0x15d8) 1366 adev->apu_flags |= AMD_APU_IS_PICASSO; 1367 if (adev->rev_id >= 0x8) 1368 adev->apu_flags |= AMD_APU_IS_RAVEN2; 1369 1370 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1371 adev->external_rev_id = adev->rev_id + 0x79; 1372 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 1373 adev->external_rev_id = adev->rev_id + 0x41; 1374 else if (adev->rev_id == 1) 1375 adev->external_rev_id = adev->rev_id + 0x20; 1376 else 1377 adev->external_rev_id = adev->rev_id + 0x01; 1378 1379 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 1380 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1381 AMD_CG_SUPPORT_GFX_MGLS | 1382 AMD_CG_SUPPORT_GFX_CP_LS | 1383 AMD_CG_SUPPORT_GFX_3D_CGCG | 1384 AMD_CG_SUPPORT_GFX_3D_CGLS | 1385 AMD_CG_SUPPORT_GFX_CGCG | 1386 AMD_CG_SUPPORT_GFX_CGLS | 1387 AMD_CG_SUPPORT_BIF_LS | 1388 AMD_CG_SUPPORT_HDP_LS | 1389 AMD_CG_SUPPORT_MC_MGCG | 1390 AMD_CG_SUPPORT_MC_LS | 1391 AMD_CG_SUPPORT_SDMA_MGCG | 1392 AMD_CG_SUPPORT_SDMA_LS | 1393 AMD_CG_SUPPORT_VCN_MGCG; 1394 1395 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 1396 } else if (adev->apu_flags & AMD_APU_IS_PICASSO) { 1397 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1398 AMD_CG_SUPPORT_GFX_MGLS | 1399 AMD_CG_SUPPORT_GFX_CP_LS | 1400 AMD_CG_SUPPORT_GFX_3D_CGLS | 1401 AMD_CG_SUPPORT_GFX_CGCG | 1402 AMD_CG_SUPPORT_GFX_CGLS | 1403 AMD_CG_SUPPORT_BIF_LS | 1404 AMD_CG_SUPPORT_HDP_LS | 1405 AMD_CG_SUPPORT_MC_MGCG | 1406 AMD_CG_SUPPORT_MC_LS | 1407 AMD_CG_SUPPORT_SDMA_MGCG | 1408 AMD_CG_SUPPORT_SDMA_LS | 1409 AMD_CG_SUPPORT_VCN_MGCG; 1410 1411 adev->pg_flags = AMD_PG_SUPPORT_SDMA | 1412 AMD_PG_SUPPORT_MMHUB | 1413 AMD_PG_SUPPORT_VCN; 1414 } else { 1415 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1416 AMD_CG_SUPPORT_GFX_MGLS | 1417 AMD_CG_SUPPORT_GFX_RLC_LS | 1418 AMD_CG_SUPPORT_GFX_CP_LS | 1419 AMD_CG_SUPPORT_GFX_3D_CGLS | 1420 AMD_CG_SUPPORT_GFX_CGCG | 1421 AMD_CG_SUPPORT_GFX_CGLS | 1422 AMD_CG_SUPPORT_BIF_MGCG | 1423 AMD_CG_SUPPORT_BIF_LS | 1424 AMD_CG_SUPPORT_HDP_MGCG | 1425 AMD_CG_SUPPORT_HDP_LS | 1426 AMD_CG_SUPPORT_DRM_MGCG | 1427 AMD_CG_SUPPORT_DRM_LS | 1428 AMD_CG_SUPPORT_MC_MGCG | 1429 AMD_CG_SUPPORT_MC_LS | 1430 AMD_CG_SUPPORT_SDMA_MGCG | 1431 AMD_CG_SUPPORT_SDMA_LS | 1432 AMD_CG_SUPPORT_VCN_MGCG; 1433 1434 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 1435 } 1436 break; 1437 case CHIP_ARCTURUS: 1438 adev->asic_funcs = &vega20_asic_funcs; 1439 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1440 AMD_CG_SUPPORT_GFX_MGLS | 1441 AMD_CG_SUPPORT_GFX_CGCG | 1442 AMD_CG_SUPPORT_GFX_CGLS | 1443 AMD_CG_SUPPORT_GFX_CP_LS | 1444 AMD_CG_SUPPORT_HDP_MGCG | 1445 AMD_CG_SUPPORT_HDP_LS | 1446 AMD_CG_SUPPORT_SDMA_MGCG | 1447 AMD_CG_SUPPORT_SDMA_LS | 1448 AMD_CG_SUPPORT_MC_MGCG | 1449 AMD_CG_SUPPORT_MC_LS | 1450 AMD_CG_SUPPORT_IH_CG | 1451 AMD_CG_SUPPORT_VCN_MGCG | 1452 AMD_CG_SUPPORT_JPEG_MGCG; 1453 adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG; 1454 adev->external_rev_id = adev->rev_id + 0x32; 1455 break; 1456 case CHIP_RENOIR: 1457 adev->asic_funcs = &soc15_asic_funcs; 1458 if ((adev->pdev->device == 0x1636) || 1459 (adev->pdev->device == 0x164c)) 1460 adev->apu_flags |= AMD_APU_IS_RENOIR; 1461 else 1462 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; 1463 1464 if (adev->apu_flags & AMD_APU_IS_RENOIR) 1465 adev->external_rev_id = adev->rev_id + 0x91; 1466 else 1467 adev->external_rev_id = adev->rev_id + 0xa1; 1468 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1469 AMD_CG_SUPPORT_GFX_MGLS | 1470 AMD_CG_SUPPORT_GFX_3D_CGCG | 1471 AMD_CG_SUPPORT_GFX_3D_CGLS | 1472 AMD_CG_SUPPORT_GFX_CGCG | 1473 AMD_CG_SUPPORT_GFX_CGLS | 1474 AMD_CG_SUPPORT_GFX_CP_LS | 1475 AMD_CG_SUPPORT_MC_MGCG | 1476 AMD_CG_SUPPORT_MC_LS | 1477 AMD_CG_SUPPORT_SDMA_MGCG | 1478 AMD_CG_SUPPORT_SDMA_LS | 1479 AMD_CG_SUPPORT_BIF_LS | 1480 AMD_CG_SUPPORT_HDP_LS | 1481 AMD_CG_SUPPORT_VCN_MGCG | 1482 AMD_CG_SUPPORT_JPEG_MGCG | 1483 AMD_CG_SUPPORT_IH_CG | 1484 AMD_CG_SUPPORT_ATHUB_LS | 1485 AMD_CG_SUPPORT_ATHUB_MGCG | 1486 AMD_CG_SUPPORT_DF_MGCG; 1487 adev->pg_flags = AMD_PG_SUPPORT_SDMA | 1488 AMD_PG_SUPPORT_VCN | 1489 AMD_PG_SUPPORT_JPEG | 1490 AMD_PG_SUPPORT_VCN_DPG; 1491 break; 1492 case CHIP_ALDEBARAN: 1493 adev->asic_funcs = &vega20_asic_funcs; 1494 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1495 AMD_CG_SUPPORT_GFX_MGLS | 1496 AMD_CG_SUPPORT_GFX_CGCG | 1497 AMD_CG_SUPPORT_GFX_CGLS | 1498 AMD_CG_SUPPORT_GFX_CP_LS | 1499 AMD_CG_SUPPORT_HDP_LS | 1500 AMD_CG_SUPPORT_SDMA_MGCG | 1501 AMD_CG_SUPPORT_SDMA_LS | 1502 AMD_CG_SUPPORT_IH_CG | 1503 AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG; 1504 adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG; 1505 adev->external_rev_id = adev->rev_id + 0x3c; 1506 break; 1507 default: 1508 /* FIXME: not supported yet */ 1509 return -EINVAL; 1510 } 1511 1512 if (amdgpu_sriov_vf(adev)) { 1513 amdgpu_virt_init_setting(adev); 1514 xgpu_ai_mailbox_set_irq_funcs(adev); 1515 } 1516 1517 return 0; 1518 } 1519 1520 static int soc15_common_late_init(void *handle) 1521 { 1522 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1523 int r = 0; 1524 1525 if (amdgpu_sriov_vf(adev)) 1526 xgpu_ai_mailbox_get_irq(adev); 1527 1528 if (adev->nbio.ras_funcs && 1529 adev->nbio.ras_funcs->ras_late_init) 1530 r = adev->nbio.ras_funcs->ras_late_init(adev); 1531 1532 return r; 1533 } 1534 1535 static int soc15_common_sw_init(void *handle) 1536 { 1537 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1538 1539 if (amdgpu_sriov_vf(adev)) 1540 xgpu_ai_mailbox_add_irq_id(adev); 1541 1542 adev->df.funcs->sw_init(adev); 1543 1544 return 0; 1545 } 1546 1547 static int soc15_common_sw_fini(void *handle) 1548 { 1549 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1550 1551 if (adev->nbio.ras_funcs && 1552 adev->nbio.ras_funcs->ras_fini) 1553 adev->nbio.ras_funcs->ras_fini(adev); 1554 adev->df.funcs->sw_fini(adev); 1555 return 0; 1556 } 1557 1558 static void soc15_doorbell_range_init(struct amdgpu_device *adev) 1559 { 1560 int i; 1561 struct amdgpu_ring *ring; 1562 1563 /* sdma/ih doorbell range are programed by hypervisor */ 1564 if (!amdgpu_sriov_vf(adev)) { 1565 for (i = 0; i < adev->sdma.num_instances; i++) { 1566 ring = &adev->sdma.instance[i].ring; 1567 adev->nbio.funcs->sdma_doorbell_range(adev, i, 1568 ring->use_doorbell, ring->doorbell_index, 1569 adev->doorbell_index.sdma_doorbell_range); 1570 } 1571 1572 adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, 1573 adev->irq.ih.doorbell_index); 1574 } 1575 } 1576 1577 static int soc15_common_hw_init(void *handle) 1578 { 1579 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1580 1581 /* enable pcie gen2/3 link */ 1582 soc15_pcie_gen3_enable(adev); 1583 /* enable aspm */ 1584 soc15_program_aspm(adev); 1585 /* setup nbio registers */ 1586 adev->nbio.funcs->init_registers(adev); 1587 /* remap HDP registers to a hole in mmio space, 1588 * for the purpose of expose those registers 1589 * to process space 1590 */ 1591 if (adev->nbio.funcs->remap_hdp_registers) 1592 adev->nbio.funcs->remap_hdp_registers(adev); 1593 1594 /* enable the doorbell aperture */ 1595 soc15_enable_doorbell_aperture(adev, true); 1596 /* HW doorbell routing policy: doorbell writing not 1597 * in SDMA/IH/MM/ACV range will be routed to CP. So 1598 * we need to init SDMA/IH/MM/ACV doorbell range prior 1599 * to CP ip block init and ring test. 1600 */ 1601 soc15_doorbell_range_init(adev); 1602 1603 return 0; 1604 } 1605 1606 static int soc15_common_hw_fini(void *handle) 1607 { 1608 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1609 1610 /* disable the doorbell aperture */ 1611 soc15_enable_doorbell_aperture(adev, false); 1612 if (amdgpu_sriov_vf(adev)) 1613 xgpu_ai_mailbox_put_irq(adev); 1614 1615 if (adev->nbio.ras_if && 1616 amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { 1617 if (adev->nbio.ras_funcs && 1618 adev->nbio.ras_funcs->init_ras_controller_interrupt) 1619 amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0); 1620 if (adev->nbio.ras_funcs && 1621 adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) 1622 amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0); 1623 } 1624 1625 return 0; 1626 } 1627 1628 static int soc15_common_suspend(void *handle) 1629 { 1630 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1631 1632 return soc15_common_hw_fini(adev); 1633 } 1634 1635 static int soc15_common_resume(void *handle) 1636 { 1637 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1638 1639 return soc15_common_hw_init(adev); 1640 } 1641 1642 static bool soc15_common_is_idle(void *handle) 1643 { 1644 return true; 1645 } 1646 1647 static int soc15_common_wait_for_idle(void *handle) 1648 { 1649 return 0; 1650 } 1651 1652 static int soc15_common_soft_reset(void *handle) 1653 { 1654 return 0; 1655 } 1656 1657 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) 1658 { 1659 uint32_t def, data; 1660 1661 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1662 1663 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG)) 1664 data &= ~(0x01000000 | 1665 0x02000000 | 1666 0x04000000 | 1667 0x08000000 | 1668 0x10000000 | 1669 0x20000000 | 1670 0x40000000 | 1671 0x80000000); 1672 else 1673 data |= (0x01000000 | 1674 0x02000000 | 1675 0x04000000 | 1676 0x08000000 | 1677 0x10000000 | 1678 0x20000000 | 1679 0x40000000 | 1680 0x80000000); 1681 1682 if (def != data) 1683 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data); 1684 } 1685 1686 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable) 1687 { 1688 uint32_t def, data; 1689 1690 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1691 1692 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1693 data |= 1; 1694 else 1695 data &= ~1; 1696 1697 if (def != data) 1698 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data); 1699 } 1700 1701 static int soc15_common_set_clockgating_state(void *handle, 1702 enum amd_clockgating_state state) 1703 { 1704 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1705 1706 if (amdgpu_sriov_vf(adev)) 1707 return 0; 1708 1709 switch (adev->asic_type) { 1710 case CHIP_VEGA10: 1711 case CHIP_VEGA12: 1712 case CHIP_VEGA20: 1713 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1714 state == AMD_CG_STATE_GATE); 1715 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1716 state == AMD_CG_STATE_GATE); 1717 adev->hdp.funcs->update_clock_gating(adev, 1718 state == AMD_CG_STATE_GATE); 1719 soc15_update_drm_clock_gating(adev, 1720 state == AMD_CG_STATE_GATE); 1721 soc15_update_drm_light_sleep(adev, 1722 state == AMD_CG_STATE_GATE); 1723 adev->smuio.funcs->update_rom_clock_gating(adev, 1724 state == AMD_CG_STATE_GATE); 1725 adev->df.funcs->update_medium_grain_clock_gating(adev, 1726 state == AMD_CG_STATE_GATE); 1727 break; 1728 case CHIP_RAVEN: 1729 case CHIP_RENOIR: 1730 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1731 state == AMD_CG_STATE_GATE); 1732 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1733 state == AMD_CG_STATE_GATE); 1734 adev->hdp.funcs->update_clock_gating(adev, 1735 state == AMD_CG_STATE_GATE); 1736 soc15_update_drm_clock_gating(adev, 1737 state == AMD_CG_STATE_GATE); 1738 soc15_update_drm_light_sleep(adev, 1739 state == AMD_CG_STATE_GATE); 1740 break; 1741 case CHIP_ARCTURUS: 1742 case CHIP_ALDEBARAN: 1743 adev->hdp.funcs->update_clock_gating(adev, 1744 state == AMD_CG_STATE_GATE); 1745 break; 1746 default: 1747 break; 1748 } 1749 return 0; 1750 } 1751 1752 static void soc15_common_get_clockgating_state(void *handle, u32 *flags) 1753 { 1754 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1755 int data; 1756 1757 if (amdgpu_sriov_vf(adev)) 1758 *flags = 0; 1759 1760 adev->nbio.funcs->get_clockgating_state(adev, flags); 1761 1762 adev->hdp.funcs->get_clock_gating_state(adev, flags); 1763 1764 if (adev->asic_type != CHIP_ALDEBARAN) { 1765 1766 /* AMD_CG_SUPPORT_DRM_MGCG */ 1767 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1768 if (!(data & 0x01000000)) 1769 *flags |= AMD_CG_SUPPORT_DRM_MGCG; 1770 1771 /* AMD_CG_SUPPORT_DRM_LS */ 1772 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1773 if (data & 0x1) 1774 *flags |= AMD_CG_SUPPORT_DRM_LS; 1775 } 1776 1777 /* AMD_CG_SUPPORT_ROM_MGCG */ 1778 adev->smuio.funcs->get_clock_gating_state(adev, flags); 1779 1780 adev->df.funcs->get_clockgating_state(adev, flags); 1781 } 1782 1783 static int soc15_common_set_powergating_state(void *handle, 1784 enum amd_powergating_state state) 1785 { 1786 /* todo */ 1787 return 0; 1788 } 1789 1790 const struct amd_ip_funcs soc15_common_ip_funcs = { 1791 .name = "soc15_common", 1792 .early_init = soc15_common_early_init, 1793 .late_init = soc15_common_late_init, 1794 .sw_init = soc15_common_sw_init, 1795 .sw_fini = soc15_common_sw_fini, 1796 .hw_init = soc15_common_hw_init, 1797 .hw_fini = soc15_common_hw_fini, 1798 .suspend = soc15_common_suspend, 1799 .resume = soc15_common_resume, 1800 .is_idle = soc15_common_is_idle, 1801 .wait_for_idle = soc15_common_wait_for_idle, 1802 .soft_reset = soc15_common_soft_reset, 1803 .set_clockgating_state = soc15_common_set_clockgating_state, 1804 .set_powergating_state = soc15_common_set_powergating_state, 1805 .get_clockgating_state= soc15_common_get_clockgating_state, 1806 }; 1807