1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_discovery.h" 28 #include "soc15_hw_ip.h" 29 #include "discovery.h" 30 31 #include "soc15.h" 32 #include "gfx_v9_0.h" 33 #include "gmc_v9_0.h" 34 #include "df_v1_7.h" 35 #include "df_v3_6.h" 36 #include "nbio_v6_1.h" 37 #include "nbio_v7_0.h" 38 #include "nbio_v7_4.h" 39 #include "hdp_v4_0.h" 40 #include "vega10_ih.h" 41 #include "vega20_ih.h" 42 #include "sdma_v4_0.h" 43 #include "uvd_v7_0.h" 44 #include "vce_v4_0.h" 45 #include "vcn_v1_0.h" 46 #include "vcn_v2_5.h" 47 #include "jpeg_v2_5.h" 48 #include "smuio_v9_0.h" 49 #include "gmc_v10_0.h" 50 #include "gfxhub_v2_0.h" 51 #include "mmhub_v2_0.h" 52 #include "nbio_v2_3.h" 53 #include "nbio_v7_2.h" 54 #include "hdp_v5_0.h" 55 #include "nv.h" 56 #include "navi10_ih.h" 57 #include "gfx_v10_0.h" 58 #include "sdma_v5_0.h" 59 #include "sdma_v5_2.h" 60 #include "vcn_v2_0.h" 61 #include "jpeg_v2_0.h" 62 #include "vcn_v3_0.h" 63 #include "jpeg_v3_0.h" 64 #include "amdgpu_vkms.h" 65 #include "mes_v10_1.h" 66 #include "smuio_v11_0.h" 67 #include "smuio_v11_0_6.h" 68 #include "smuio_v13_0.h" 69 70 MODULE_FIRMWARE("amdgpu/ip_discovery.bin"); 71 72 #define mmRCC_CONFIG_MEMSIZE 0xde3 73 #define mmMM_INDEX 0x0 74 #define mmMM_INDEX_HI 0x6 75 #define mmMM_DATA 0x1 76 77 static const char *hw_id_names[HW_ID_MAX] = { 78 [MP1_HWID] = "MP1", 79 [MP2_HWID] = "MP2", 80 [THM_HWID] = "THM", 81 [SMUIO_HWID] = "SMUIO", 82 [FUSE_HWID] = "FUSE", 83 [CLKA_HWID] = "CLKA", 84 [PWR_HWID] = "PWR", 85 [GC_HWID] = "GC", 86 [UVD_HWID] = "UVD", 87 [AUDIO_AZ_HWID] = "AUDIO_AZ", 88 [ACP_HWID] = "ACP", 89 [DCI_HWID] = "DCI", 90 [DMU_HWID] = "DMU", 91 [DCO_HWID] = "DCO", 92 [DIO_HWID] = "DIO", 93 [XDMA_HWID] = "XDMA", 94 [DCEAZ_HWID] = "DCEAZ", 95 [DAZ_HWID] = "DAZ", 96 [SDPMUX_HWID] = "SDPMUX", 97 [NTB_HWID] = "NTB", 98 [IOHC_HWID] = "IOHC", 99 [L2IMU_HWID] = "L2IMU", 100 [VCE_HWID] = "VCE", 101 [MMHUB_HWID] = "MMHUB", 102 [ATHUB_HWID] = "ATHUB", 103 [DBGU_NBIO_HWID] = "DBGU_NBIO", 104 [DFX_HWID] = "DFX", 105 [DBGU0_HWID] = "DBGU0", 106 [DBGU1_HWID] = "DBGU1", 107 [OSSSYS_HWID] = "OSSSYS", 108 [HDP_HWID] = "HDP", 109 [SDMA0_HWID] = "SDMA0", 110 [SDMA1_HWID] = "SDMA1", 111 [SDMA2_HWID] = "SDMA2", 112 [SDMA3_HWID] = "SDMA3", 113 [ISP_HWID] = "ISP", 114 [DBGU_IO_HWID] = "DBGU_IO", 115 [DF_HWID] = "DF", 116 [CLKB_HWID] = "CLKB", 117 [FCH_HWID] = "FCH", 118 [DFX_DAP_HWID] = "DFX_DAP", 119 [L1IMU_PCIE_HWID] = "L1IMU_PCIE", 120 [L1IMU_NBIF_HWID] = "L1IMU_NBIF", 121 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR", 122 [L1IMU3_HWID] = "L1IMU3", 123 [L1IMU4_HWID] = "L1IMU4", 124 [L1IMU5_HWID] = "L1IMU5", 125 [L1IMU6_HWID] = "L1IMU6", 126 [L1IMU7_HWID] = "L1IMU7", 127 [L1IMU8_HWID] = "L1IMU8", 128 [L1IMU9_HWID] = "L1IMU9", 129 [L1IMU10_HWID] = "L1IMU10", 130 [L1IMU11_HWID] = "L1IMU11", 131 [L1IMU12_HWID] = "L1IMU12", 132 [L1IMU13_HWID] = "L1IMU13", 133 [L1IMU14_HWID] = "L1IMU14", 134 [L1IMU15_HWID] = "L1IMU15", 135 [WAFLC_HWID] = "WAFLC", 136 [FCH_USB_PD_HWID] = "FCH_USB_PD", 137 [PCIE_HWID] = "PCIE", 138 [PCS_HWID] = "PCS", 139 [DDCL_HWID] = "DDCL", 140 [SST_HWID] = "SST", 141 [IOAGR_HWID] = "IOAGR", 142 [NBIF_HWID] = "NBIF", 143 [IOAPIC_HWID] = "IOAPIC", 144 [SYSTEMHUB_HWID] = "SYSTEMHUB", 145 [NTBCCP_HWID] = "NTBCCP", 146 [UMC_HWID] = "UMC", 147 [SATA_HWID] = "SATA", 148 [USB_HWID] = "USB", 149 [CCXSEC_HWID] = "CCXSEC", 150 [XGMI_HWID] = "XGMI", 151 [XGBE_HWID] = "XGBE", 152 [MP0_HWID] = "MP0", 153 }; 154 155 static int hw_id_map[MAX_HWIP] = { 156 [GC_HWIP] = GC_HWID, 157 [HDP_HWIP] = HDP_HWID, 158 [SDMA0_HWIP] = SDMA0_HWID, 159 [SDMA1_HWIP] = SDMA1_HWID, 160 [SDMA2_HWIP] = SDMA2_HWID, 161 [SDMA3_HWIP] = SDMA3_HWID, 162 [MMHUB_HWIP] = MMHUB_HWID, 163 [ATHUB_HWIP] = ATHUB_HWID, 164 [NBIO_HWIP] = NBIF_HWID, 165 [MP0_HWIP] = MP0_HWID, 166 [MP1_HWIP] = MP1_HWID, 167 [UVD_HWIP] = UVD_HWID, 168 [VCE_HWIP] = VCE_HWID, 169 [DF_HWIP] = DF_HWID, 170 [DCE_HWIP] = DMU_HWID, 171 [OSSSYS_HWIP] = OSSSYS_HWID, 172 [SMUIO_HWIP] = SMUIO_HWID, 173 [PWR_HWIP] = PWR_HWID, 174 [NBIF_HWIP] = NBIF_HWID, 175 [THM_HWIP] = THM_HWID, 176 [CLK_HWIP] = CLKA_HWID, 177 [UMC_HWIP] = UMC_HWID, 178 [XGMI_HWIP] = XGMI_HWID, 179 [DCI_HWIP] = DCI_HWID, 180 }; 181 182 static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary) 183 { 184 uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 185 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; 186 187 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, 188 adev->mman.discovery_tmr_size, false); 189 return 0; 190 } 191 192 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) 193 { 194 uint16_t checksum = 0; 195 int i; 196 197 for (i = 0; i < size; i++) 198 checksum += data[i]; 199 200 return checksum; 201 } 202 203 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size, 204 uint16_t expected) 205 { 206 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); 207 } 208 209 static int amdgpu_discovery_init(struct amdgpu_device *adev) 210 { 211 struct table_info *info; 212 struct binary_header *bhdr; 213 struct ip_discovery_header *ihdr; 214 struct gpu_info_header *ghdr; 215 const struct firmware *fw; 216 uint16_t offset; 217 uint16_t size; 218 uint16_t checksum; 219 int r; 220 221 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE; 222 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL); 223 if (!adev->mman.discovery_bin) 224 return -ENOMEM; 225 226 if (amdgpu_discovery == 2) { 227 r = request_firmware(&fw, "amdgpu/ip_discovery.bin", adev->dev); 228 if (r) 229 goto get_from_vram; 230 dev_info(adev->dev, "Using IP discovery from file\n"); 231 memcpy((u8 *)adev->mman.discovery_bin, (u8 *)fw->data, 232 adev->mman.discovery_tmr_size); 233 release_firmware(fw); 234 } else { 235 get_from_vram: 236 r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin); 237 if (r) { 238 DRM_ERROR("failed to read ip discovery binary\n"); 239 goto out; 240 } 241 } 242 243 bhdr = (struct binary_header *)adev->mman.discovery_bin; 244 245 if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) { 246 DRM_ERROR("invalid ip discovery binary signature\n"); 247 r = -EINVAL; 248 goto out; 249 } 250 251 offset = offsetof(struct binary_header, binary_checksum) + 252 sizeof(bhdr->binary_checksum); 253 size = le16_to_cpu(bhdr->binary_size) - offset; 254 checksum = le16_to_cpu(bhdr->binary_checksum); 255 256 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 257 size, checksum)) { 258 DRM_ERROR("invalid ip discovery binary checksum\n"); 259 r = -EINVAL; 260 goto out; 261 } 262 263 info = &bhdr->table_list[IP_DISCOVERY]; 264 offset = le16_to_cpu(info->offset); 265 checksum = le16_to_cpu(info->checksum); 266 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); 267 268 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { 269 DRM_ERROR("invalid ip discovery data table signature\n"); 270 r = -EINVAL; 271 goto out; 272 } 273 274 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 275 le16_to_cpu(ihdr->size), checksum)) { 276 DRM_ERROR("invalid ip discovery data table checksum\n"); 277 r = -EINVAL; 278 goto out; 279 } 280 281 info = &bhdr->table_list[GC]; 282 offset = le16_to_cpu(info->offset); 283 checksum = le16_to_cpu(info->checksum); 284 ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset); 285 286 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 287 le32_to_cpu(ghdr->size), checksum)) { 288 DRM_ERROR("invalid gc data table checksum\n"); 289 r = -EINVAL; 290 goto out; 291 } 292 293 return 0; 294 295 out: 296 kfree(adev->mman.discovery_bin); 297 adev->mman.discovery_bin = NULL; 298 299 return r; 300 } 301 302 void amdgpu_discovery_fini(struct amdgpu_device *adev) 303 { 304 kfree(adev->mman.discovery_bin); 305 adev->mman.discovery_bin = NULL; 306 } 307 308 static int amdgpu_discovery_validate_ip(const struct ip *ip) 309 { 310 if (ip->number_instance >= HWIP_MAX_INSTANCE) { 311 DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n", 312 ip->number_instance); 313 return -EINVAL; 314 } 315 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) { 316 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n", 317 le16_to_cpu(ip->hw_id)); 318 return -EINVAL; 319 } 320 321 return 0; 322 } 323 324 int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) 325 { 326 struct binary_header *bhdr; 327 struct ip_discovery_header *ihdr; 328 struct die_header *dhdr; 329 struct ip *ip; 330 uint16_t die_offset; 331 uint16_t ip_offset; 332 uint16_t num_dies; 333 uint16_t num_ips; 334 uint8_t num_base_address; 335 int hw_ip; 336 int i, j, k; 337 int r; 338 339 r = amdgpu_discovery_init(adev); 340 if (r) { 341 DRM_ERROR("amdgpu_discovery_init failed\n"); 342 return r; 343 } 344 345 bhdr = (struct binary_header *)adev->mman.discovery_bin; 346 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 347 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 348 num_dies = le16_to_cpu(ihdr->num_dies); 349 350 DRM_DEBUG("number of dies: %d\n", num_dies); 351 352 for (i = 0; i < num_dies; i++) { 353 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 354 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 355 num_ips = le16_to_cpu(dhdr->num_ips); 356 ip_offset = die_offset + sizeof(*dhdr); 357 358 if (le16_to_cpu(dhdr->die_id) != i) { 359 DRM_ERROR("invalid die id %d, expected %d\n", 360 le16_to_cpu(dhdr->die_id), i); 361 return -EINVAL; 362 } 363 364 DRM_DEBUG("number of hardware IPs on die%d: %d\n", 365 le16_to_cpu(dhdr->die_id), num_ips); 366 367 for (j = 0; j < num_ips; j++) { 368 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 369 370 if (amdgpu_discovery_validate_ip(ip)) 371 goto next_ip; 372 373 num_base_address = ip->num_base_address; 374 375 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", 376 hw_id_names[le16_to_cpu(ip->hw_id)], 377 le16_to_cpu(ip->hw_id), 378 ip->number_instance, 379 ip->major, ip->minor, 380 ip->revision); 381 382 if (le16_to_cpu(ip->hw_id) == VCN_HWID) { 383 if (amdgpu_sriov_vf(adev)) { 384 /* SR-IOV modifies each VCN’s revision (uint8) 385 * Bit [5:0]: original revision value 386 * Bit [7:6]: en/decode capability: 387 * 0b00 : VCN function normally 388 * 0b10 : encode is disabled 389 * 0b01 : decode is disabled 390 */ 391 adev->vcn.sriov_config[adev->vcn.num_vcn_inst] = 392 (ip->revision & 0xc0) >> 6; 393 ip->revision &= ~0xc0; 394 } 395 adev->vcn.num_vcn_inst++; 396 } 397 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || 398 le16_to_cpu(ip->hw_id) == SDMA1_HWID || 399 le16_to_cpu(ip->hw_id) == SDMA2_HWID || 400 le16_to_cpu(ip->hw_id) == SDMA3_HWID) 401 adev->sdma.num_instances++; 402 403 for (k = 0; k < num_base_address; k++) { 404 /* 405 * convert the endianness of base addresses in place, 406 * so that we don't need to convert them when accessing adev->reg_offset. 407 */ 408 ip->base_address[k] = le32_to_cpu(ip->base_address[k]); 409 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); 410 } 411 412 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { 413 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) { 414 DRM_DEBUG("set register base offset for %s\n", 415 hw_id_names[le16_to_cpu(ip->hw_id)]); 416 adev->reg_offset[hw_ip][ip->number_instance] = 417 ip->base_address; 418 /* Instance support is somewhat inconsistent. 419 * SDMA is a good example. Sienna cichlid has 4 total 420 * SDMA instances, each enumerated separately (HWIDs 421 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, 422 * but they are enumerated as multiple instances of the 423 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another 424 * example. On most chips there are multiple instances 425 * with the same HWID. 426 */ 427 adev->ip_versions[hw_ip][ip->number_instance] = 428 IP_VERSION(ip->major, ip->minor, ip->revision); 429 } 430 } 431 432 next_ip: 433 ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1); 434 } 435 } 436 437 return 0; 438 } 439 440 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, 441 int *major, int *minor, int *revision) 442 { 443 struct binary_header *bhdr; 444 struct ip_discovery_header *ihdr; 445 struct die_header *dhdr; 446 struct ip *ip; 447 uint16_t die_offset; 448 uint16_t ip_offset; 449 uint16_t num_dies; 450 uint16_t num_ips; 451 int i, j; 452 453 if (!adev->mman.discovery_bin) { 454 DRM_ERROR("ip discovery uninitialized\n"); 455 return -EINVAL; 456 } 457 458 bhdr = (struct binary_header *)adev->mman.discovery_bin; 459 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 460 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 461 num_dies = le16_to_cpu(ihdr->num_dies); 462 463 for (i = 0; i < num_dies; i++) { 464 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 465 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 466 num_ips = le16_to_cpu(dhdr->num_ips); 467 ip_offset = die_offset + sizeof(*dhdr); 468 469 for (j = 0; j < num_ips; j++) { 470 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 471 472 if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) { 473 if (major) 474 *major = ip->major; 475 if (minor) 476 *minor = ip->minor; 477 if (revision) 478 *revision = ip->revision; 479 return 0; 480 } 481 ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1); 482 } 483 } 484 485 return -EINVAL; 486 } 487 488 489 int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance, 490 int *major, int *minor, int *revision) 491 { 492 return amdgpu_discovery_get_ip_version(adev, VCN_HWID, 493 vcn_instance, major, minor, revision); 494 } 495 496 void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) 497 { 498 struct binary_header *bhdr; 499 struct harvest_table *harvest_info; 500 int i, vcn_harvest_count = 0; 501 502 bhdr = (struct binary_header *)adev->mman.discovery_bin; 503 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + 504 le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset)); 505 506 for (i = 0; i < 32; i++) { 507 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) 508 break; 509 510 switch (le16_to_cpu(harvest_info->list[i].hw_id)) { 511 case VCN_HWID: 512 vcn_harvest_count++; 513 if (harvest_info->list[i].number_instance == 0) 514 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 515 else 516 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 517 break; 518 case DMU_HWID: 519 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 520 break; 521 default: 522 break; 523 } 524 } 525 /* some IP discovery tables on Navy Flounder don't have this set correctly */ 526 if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && 527 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) 528 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 529 if (vcn_harvest_count == adev->vcn.num_vcn_inst) { 530 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 531 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 532 } 533 if ((adev->pdev->device == 0x731E && 534 (adev->pdev->revision == 0xC6 || adev->pdev->revision == 0xC7)) || 535 (adev->pdev->device == 0x7340 && adev->pdev->revision == 0xC9) || 536 (adev->pdev->device == 0x7360 && adev->pdev->revision == 0xC7)) { 537 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 538 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 539 } 540 } 541 542 int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) 543 { 544 struct binary_header *bhdr; 545 struct gc_info_v1_0 *gc_info; 546 547 if (!adev->mman.discovery_bin) { 548 DRM_ERROR("ip discovery uninitialized\n"); 549 return -EINVAL; 550 } 551 552 bhdr = (struct binary_header *)adev->mman.discovery_bin; 553 gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin + 554 le16_to_cpu(bhdr->table_list[GC].offset)); 555 556 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se); 557 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->gc_num_wgp0_per_sa) + 558 le32_to_cpu(gc_info->gc_num_wgp1_per_sa)); 559 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->gc_num_sa_per_se); 560 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->gc_num_rb_per_se); 561 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->gc_num_gl2c); 562 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->gc_num_gprs); 563 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->gc_num_max_gs_thds); 564 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->gc_gs_table_depth); 565 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->gc_gsprim_buff_depth); 566 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->gc_double_offchip_lds_buffer); 567 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size); 568 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd); 569 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu); 570 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size); 571 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->gc_num_sc_per_se) / 572 le32_to_cpu(gc_info->gc_num_sa_per_se); 573 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->gc_num_packer_per_sc); 574 575 return 0; 576 } 577 578 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 579 { 580 /* what IP to use for this? */ 581 switch (adev->ip_versions[GC_HWIP][0]) { 582 case IP_VERSION(9, 0, 1): 583 case IP_VERSION(9, 1, 0): 584 case IP_VERSION(9, 2, 1): 585 case IP_VERSION(9, 2, 2): 586 case IP_VERSION(9, 3, 0): 587 case IP_VERSION(9, 4, 0): 588 case IP_VERSION(9, 4, 1): 589 case IP_VERSION(9, 4, 2): 590 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 591 break; 592 case IP_VERSION(10, 1, 10): 593 case IP_VERSION(10, 1, 1): 594 case IP_VERSION(10, 1, 2): 595 case IP_VERSION(10, 1, 3): 596 case IP_VERSION(10, 3, 0): 597 case IP_VERSION(10, 3, 1): 598 case IP_VERSION(10, 3, 2): 599 case IP_VERSION(10, 3, 3): 600 case IP_VERSION(10, 3, 4): 601 case IP_VERSION(10, 3, 5): 602 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 603 break; 604 default: 605 dev_err(adev->dev, 606 "Failed to add common ip block(GC_HWIP:0x%x)\n", 607 adev->ip_versions[GC_HWIP][0]); 608 return -EINVAL; 609 } 610 return 0; 611 } 612 613 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 614 { 615 /* use GC or MMHUB IP version */ 616 switch (adev->ip_versions[GC_HWIP][0]) { 617 case IP_VERSION(9, 0, 1): 618 case IP_VERSION(9, 1, 0): 619 case IP_VERSION(9, 2, 1): 620 case IP_VERSION(9, 2, 2): 621 case IP_VERSION(9, 3, 0): 622 case IP_VERSION(9, 4, 0): 623 case IP_VERSION(9, 4, 1): 624 case IP_VERSION(9, 4, 2): 625 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 626 break; 627 case IP_VERSION(10, 1, 10): 628 case IP_VERSION(10, 1, 1): 629 case IP_VERSION(10, 1, 2): 630 case IP_VERSION(10, 1, 3): 631 case IP_VERSION(10, 3, 0): 632 case IP_VERSION(10, 3, 1): 633 case IP_VERSION(10, 3, 2): 634 case IP_VERSION(10, 3, 3): 635 case IP_VERSION(10, 3, 4): 636 case IP_VERSION(10, 3, 5): 637 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 638 break; 639 default: 640 dev_err(adev->dev, 641 "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 642 adev->ip_versions[GC_HWIP][0]); 643 return -EINVAL; 644 } 645 return 0; 646 } 647 648 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 649 { 650 switch (adev->ip_versions[OSSSYS_HWIP][0]) { 651 case IP_VERSION(4, 0, 0): 652 case IP_VERSION(4, 0, 1): 653 case IP_VERSION(4, 1, 0): 654 case IP_VERSION(4, 1, 1): 655 case IP_VERSION(4, 3, 0): 656 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 657 break; 658 case IP_VERSION(4, 2, 0): 659 case IP_VERSION(4, 2, 1): 660 case IP_VERSION(4, 4, 0): 661 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 662 break; 663 case IP_VERSION(5, 0, 0): 664 case IP_VERSION(5, 0, 1): 665 case IP_VERSION(5, 0, 2): 666 case IP_VERSION(5, 0, 3): 667 case IP_VERSION(5, 2, 0): 668 case IP_VERSION(5, 2, 1): 669 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 670 break; 671 default: 672 dev_err(adev->dev, 673 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 674 adev->ip_versions[OSSSYS_HWIP][0]); 675 return -EINVAL; 676 } 677 return 0; 678 } 679 680 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 681 { 682 switch (adev->ip_versions[MP0_HWIP][0]) { 683 case IP_VERSION(9, 0, 0): 684 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 685 break; 686 case IP_VERSION(10, 0, 0): 687 case IP_VERSION(10, 0, 1): 688 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 689 break; 690 case IP_VERSION(11, 0, 0): 691 case IP_VERSION(11, 0, 2): 692 case IP_VERSION(11, 0, 4): 693 case IP_VERSION(11, 0, 5): 694 case IP_VERSION(11, 0, 9): 695 case IP_VERSION(11, 0, 7): 696 case IP_VERSION(11, 0, 11): 697 case IP_VERSION(11, 0, 12): 698 case IP_VERSION(11, 0, 13): 699 case IP_VERSION(11, 5, 0): 700 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 701 break; 702 case IP_VERSION(11, 0, 8): 703 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); 704 break; 705 case IP_VERSION(11, 0, 3): 706 case IP_VERSION(12, 0, 1): 707 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 708 break; 709 case IP_VERSION(13, 0, 1): 710 case IP_VERSION(13, 0, 2): 711 case IP_VERSION(13, 0, 3): 712 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 713 break; 714 default: 715 dev_err(adev->dev, 716 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 717 adev->ip_versions[MP0_HWIP][0]); 718 return -EINVAL; 719 } 720 return 0; 721 } 722 723 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 724 { 725 switch (adev->ip_versions[MP1_HWIP][0]) { 726 case IP_VERSION(9, 0, 0): 727 case IP_VERSION(10, 0, 0): 728 case IP_VERSION(10, 0, 1): 729 case IP_VERSION(11, 0, 2): 730 if (adev->asic_type == CHIP_ARCTURUS) 731 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 732 else 733 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 734 break; 735 case IP_VERSION(11, 0, 0): 736 case IP_VERSION(11, 0, 5): 737 case IP_VERSION(11, 0, 9): 738 case IP_VERSION(11, 0, 7): 739 case IP_VERSION(11, 0, 8): 740 case IP_VERSION(11, 0, 11): 741 case IP_VERSION(11, 0, 12): 742 case IP_VERSION(11, 0, 13): 743 case IP_VERSION(11, 5, 0): 744 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 745 break; 746 case IP_VERSION(12, 0, 0): 747 case IP_VERSION(12, 0, 1): 748 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 749 break; 750 case IP_VERSION(13, 0, 1): 751 case IP_VERSION(13, 0, 2): 752 case IP_VERSION(13, 0, 3): 753 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 754 break; 755 default: 756 dev_err(adev->dev, 757 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 758 adev->ip_versions[MP1_HWIP][0]); 759 return -EINVAL; 760 } 761 return 0; 762 } 763 764 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) 765 { 766 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) { 767 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 768 #if defined(CONFIG_DRM_AMD_DC) 769 } else if (adev->ip_versions[DCE_HWIP][0]) { 770 switch (adev->ip_versions[DCE_HWIP][0]) { 771 case IP_VERSION(1, 0, 0): 772 case IP_VERSION(1, 0, 1): 773 case IP_VERSION(2, 0, 2): 774 case IP_VERSION(2, 0, 0): 775 case IP_VERSION(2, 0, 3): 776 case IP_VERSION(2, 1, 0): 777 case IP_VERSION(3, 0, 0): 778 case IP_VERSION(3, 0, 2): 779 case IP_VERSION(3, 0, 3): 780 case IP_VERSION(3, 0, 1): 781 case IP_VERSION(3, 1, 2): 782 case IP_VERSION(3, 1, 3): 783 amdgpu_device_ip_block_add(adev, &dm_ip_block); 784 break; 785 default: 786 dev_err(adev->dev, 787 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 788 adev->ip_versions[DCE_HWIP][0]); 789 return -EINVAL; 790 } 791 } else if (adev->ip_versions[DCI_HWIP][0]) { 792 switch (adev->ip_versions[DCI_HWIP][0]) { 793 case IP_VERSION(12, 0, 0): 794 case IP_VERSION(12, 0, 1): 795 case IP_VERSION(12, 1, 0): 796 amdgpu_device_ip_block_add(adev, &dm_ip_block); 797 break; 798 default: 799 dev_err(adev->dev, 800 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 801 adev->ip_versions[DCI_HWIP][0]); 802 return -EINVAL; 803 } 804 #endif 805 } 806 return 0; 807 } 808 809 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 810 { 811 switch (adev->ip_versions[GC_HWIP][0]) { 812 case IP_VERSION(9, 0, 1): 813 case IP_VERSION(9, 1, 0): 814 case IP_VERSION(9, 2, 1): 815 case IP_VERSION(9, 2, 2): 816 case IP_VERSION(9, 3, 0): 817 case IP_VERSION(9, 4, 0): 818 case IP_VERSION(9, 4, 1): 819 case IP_VERSION(9, 4, 2): 820 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 821 break; 822 case IP_VERSION(10, 1, 10): 823 case IP_VERSION(10, 1, 2): 824 case IP_VERSION(10, 1, 1): 825 case IP_VERSION(10, 1, 3): 826 case IP_VERSION(10, 3, 0): 827 case IP_VERSION(10, 3, 2): 828 case IP_VERSION(10, 3, 1): 829 case IP_VERSION(10, 3, 4): 830 case IP_VERSION(10, 3, 5): 831 case IP_VERSION(10, 3, 3): 832 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 833 break; 834 default: 835 dev_err(adev->dev, 836 "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 837 adev->ip_versions[GC_HWIP][0]); 838 return -EINVAL; 839 } 840 return 0; 841 } 842 843 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 844 { 845 switch (adev->ip_versions[SDMA0_HWIP][0]) { 846 case IP_VERSION(4, 0, 0): 847 case IP_VERSION(4, 0, 1): 848 case IP_VERSION(4, 1, 0): 849 case IP_VERSION(4, 1, 1): 850 case IP_VERSION(4, 1, 2): 851 case IP_VERSION(4, 2, 0): 852 case IP_VERSION(4, 2, 2): 853 case IP_VERSION(4, 4, 0): 854 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 855 break; 856 case IP_VERSION(5, 0, 0): 857 case IP_VERSION(5, 0, 1): 858 case IP_VERSION(5, 0, 2): 859 case IP_VERSION(5, 0, 5): 860 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 861 break; 862 case IP_VERSION(5, 2, 0): 863 case IP_VERSION(5, 2, 2): 864 case IP_VERSION(5, 2, 4): 865 case IP_VERSION(5, 2, 5): 866 case IP_VERSION(5, 2, 3): 867 case IP_VERSION(5, 2, 1): 868 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 869 break; 870 default: 871 dev_err(adev->dev, 872 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 873 adev->ip_versions[SDMA0_HWIP][0]); 874 return -EINVAL; 875 } 876 return 0; 877 } 878 879 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 880 { 881 if (adev->ip_versions[VCE_HWIP][0]) { 882 switch (adev->ip_versions[UVD_HWIP][0]) { 883 case IP_VERSION(7, 0, 0): 884 case IP_VERSION(7, 2, 0): 885 /* UVD is not supported on vega20 SR-IOV */ 886 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 887 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 888 break; 889 default: 890 dev_err(adev->dev, 891 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 892 adev->ip_versions[UVD_HWIP][0]); 893 return -EINVAL; 894 } 895 switch (adev->ip_versions[VCE_HWIP][0]) { 896 case IP_VERSION(4, 0, 0): 897 case IP_VERSION(4, 1, 0): 898 /* VCE is not supported on vega20 SR-IOV */ 899 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 900 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 901 break; 902 default: 903 dev_err(adev->dev, 904 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 905 adev->ip_versions[VCE_HWIP][0]); 906 return -EINVAL; 907 } 908 } else { 909 switch (adev->ip_versions[UVD_HWIP][0]) { 910 case IP_VERSION(1, 0, 0): 911 case IP_VERSION(1, 0, 1): 912 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 913 break; 914 case IP_VERSION(2, 0, 0): 915 case IP_VERSION(2, 0, 2): 916 case IP_VERSION(2, 2, 0): 917 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 918 if (!amdgpu_sriov_vf(adev)) 919 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 920 break; 921 case IP_VERSION(2, 0, 3): 922 break; 923 case IP_VERSION(2, 5, 0): 924 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 925 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 926 break; 927 case IP_VERSION(2, 6, 0): 928 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 929 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 930 break; 931 case IP_VERSION(3, 0, 0): 932 case IP_VERSION(3, 0, 16): 933 case IP_VERSION(3, 1, 1): 934 case IP_VERSION(3, 0, 2): 935 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 936 if (!amdgpu_sriov_vf(adev)) 937 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 938 break; 939 case IP_VERSION(3, 0, 33): 940 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 941 break; 942 default: 943 dev_err(adev->dev, 944 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 945 adev->ip_versions[UVD_HWIP][0]); 946 return -EINVAL; 947 } 948 } 949 return 0; 950 } 951 952 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 953 { 954 switch (adev->ip_versions[GC_HWIP][0]) { 955 case IP_VERSION(10, 1, 10): 956 case IP_VERSION(10, 1, 1): 957 case IP_VERSION(10, 1, 2): 958 case IP_VERSION(10, 1, 3): 959 case IP_VERSION(10, 3, 0): 960 case IP_VERSION(10, 3, 1): 961 case IP_VERSION(10, 3, 2): 962 case IP_VERSION(10, 3, 3): 963 case IP_VERSION(10, 3, 4): 964 case IP_VERSION(10, 3, 5): 965 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 966 break; 967 default: 968 break;; 969 } 970 return 0; 971 } 972 973 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) 974 { 975 int r; 976 977 switch (adev->asic_type) { 978 case CHIP_VEGA10: 979 vega10_reg_base_init(adev); 980 adev->sdma.num_instances = 2; 981 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); 982 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); 983 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); 984 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); 985 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); 986 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); 987 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 988 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); 989 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); 990 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 991 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 992 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 993 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); 994 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); 995 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 996 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 997 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); 998 break; 999 case CHIP_VEGA12: 1000 vega10_reg_base_init(adev); 1001 adev->sdma.num_instances = 2; 1002 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); 1003 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); 1004 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); 1005 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); 1006 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); 1007 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); 1008 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); 1009 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); 1010 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); 1011 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 1012 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 1013 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 1014 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); 1015 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); 1016 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 1017 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 1018 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); 1019 break; 1020 case CHIP_RAVEN: 1021 vega10_reg_base_init(adev); 1022 adev->sdma.num_instances = 1; 1023 adev->vcn.num_vcn_inst = 1; 1024 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 1025 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); 1026 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); 1027 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); 1028 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); 1029 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); 1030 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); 1031 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); 1032 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); 1033 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); 1034 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); 1035 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); 1036 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); 1037 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); 1038 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); 1039 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); 1040 } else { 1041 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); 1042 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); 1043 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); 1044 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); 1045 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); 1046 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 1047 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); 1048 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); 1049 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); 1050 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); 1051 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); 1052 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); 1053 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); 1054 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); 1055 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); 1056 } 1057 break; 1058 case CHIP_VEGA20: 1059 vega20_reg_base_init(adev); 1060 adev->sdma.num_instances = 2; 1061 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); 1062 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); 1063 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); 1064 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); 1065 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); 1066 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); 1067 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); 1068 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); 1069 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); 1070 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); 1071 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 1072 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); 1073 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); 1074 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); 1075 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); 1076 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); 1077 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); 1078 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); 1079 break; 1080 case CHIP_ARCTURUS: 1081 arct_reg_base_init(adev); 1082 adev->sdma.num_instances = 8; 1083 adev->vcn.num_vcn_inst = 2; 1084 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); 1085 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); 1086 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); 1087 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); 1088 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); 1089 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); 1090 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); 1091 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); 1092 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); 1093 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); 1094 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); 1095 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); 1096 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); 1097 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); 1098 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); 1099 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); 1100 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 1101 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); 1102 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); 1103 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); 1104 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); 1105 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); 1106 break; 1107 case CHIP_ALDEBARAN: 1108 aldebaran_reg_base_init(adev); 1109 adev->sdma.num_instances = 5; 1110 adev->vcn.num_vcn_inst = 2; 1111 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); 1112 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); 1113 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); 1114 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); 1115 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); 1116 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); 1117 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); 1118 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); 1119 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); 1120 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); 1121 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); 1122 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); 1123 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); 1124 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); 1125 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); 1126 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); 1127 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); 1128 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); 1129 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); 1130 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); 1131 break; 1132 default: 1133 r = amdgpu_discovery_reg_base_init(adev); 1134 if (r) 1135 return -EINVAL; 1136 1137 amdgpu_discovery_harvest_ip(adev); 1138 1139 if (!adev->mman.discovery_bin) { 1140 DRM_ERROR("ip discovery uninitialized\n"); 1141 return -EINVAL; 1142 } 1143 break; 1144 } 1145 1146 switch (adev->ip_versions[GC_HWIP][0]) { 1147 case IP_VERSION(9, 0, 1): 1148 case IP_VERSION(9, 2, 1): 1149 case IP_VERSION(9, 4, 0): 1150 case IP_VERSION(9, 4, 1): 1151 case IP_VERSION(9, 4, 2): 1152 adev->family = AMDGPU_FAMILY_AI; 1153 break; 1154 case IP_VERSION(9, 1, 0): 1155 case IP_VERSION(9, 2, 2): 1156 case IP_VERSION(9, 3, 0): 1157 adev->family = AMDGPU_FAMILY_RV; 1158 break; 1159 case IP_VERSION(10, 1, 10): 1160 case IP_VERSION(10, 1, 1): 1161 case IP_VERSION(10, 1, 2): 1162 case IP_VERSION(10, 1, 3): 1163 case IP_VERSION(10, 3, 0): 1164 case IP_VERSION(10, 3, 2): 1165 case IP_VERSION(10, 3, 4): 1166 case IP_VERSION(10, 3, 5): 1167 adev->family = AMDGPU_FAMILY_NV; 1168 break; 1169 case IP_VERSION(10, 3, 1): 1170 adev->family = AMDGPU_FAMILY_VGH; 1171 break; 1172 case IP_VERSION(10, 3, 3): 1173 adev->family = AMDGPU_FAMILY_YC; 1174 break; 1175 default: 1176 return -EINVAL; 1177 } 1178 1179 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0)) 1180 adev->gmc.xgmi.supported = true; 1181 1182 /* set NBIO version */ 1183 switch (adev->ip_versions[NBIO_HWIP][0]) { 1184 case IP_VERSION(6, 1, 0): 1185 case IP_VERSION(6, 2, 0): 1186 adev->nbio.funcs = &nbio_v6_1_funcs; 1187 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 1188 break; 1189 case IP_VERSION(7, 0, 0): 1190 case IP_VERSION(7, 0, 1): 1191 case IP_VERSION(2, 5, 0): 1192 adev->nbio.funcs = &nbio_v7_0_funcs; 1193 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 1194 break; 1195 case IP_VERSION(7, 4, 0): 1196 case IP_VERSION(7, 4, 1): 1197 adev->nbio.funcs = &nbio_v7_4_funcs; 1198 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 1199 break; 1200 case IP_VERSION(7, 4, 4): 1201 adev->nbio.funcs = &nbio_v7_4_funcs; 1202 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg_ald; 1203 break; 1204 case IP_VERSION(7, 2, 0): 1205 case IP_VERSION(7, 2, 1): 1206 case IP_VERSION(7, 5, 0): 1207 adev->nbio.funcs = &nbio_v7_2_funcs; 1208 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; 1209 break; 1210 case IP_VERSION(2, 1, 1): 1211 case IP_VERSION(2, 3, 0): 1212 case IP_VERSION(2, 3, 1): 1213 case IP_VERSION(2, 3, 2): 1214 adev->nbio.funcs = &nbio_v2_3_funcs; 1215 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 1216 break; 1217 case IP_VERSION(3, 3, 0): 1218 case IP_VERSION(3, 3, 1): 1219 case IP_VERSION(3, 3, 2): 1220 case IP_VERSION(3, 3, 3): 1221 adev->nbio.funcs = &nbio_v2_3_funcs; 1222 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg_sc; 1223 break; 1224 default: 1225 break; 1226 } 1227 1228 switch (adev->ip_versions[HDP_HWIP][0]) { 1229 case IP_VERSION(4, 0, 0): 1230 case IP_VERSION(4, 0, 1): 1231 case IP_VERSION(4, 1, 0): 1232 case IP_VERSION(4, 1, 1): 1233 case IP_VERSION(4, 1, 2): 1234 case IP_VERSION(4, 2, 0): 1235 case IP_VERSION(4, 2, 1): 1236 case IP_VERSION(4, 4, 0): 1237 adev->hdp.funcs = &hdp_v4_0_funcs; 1238 break; 1239 case IP_VERSION(5, 0, 0): 1240 case IP_VERSION(5, 0, 1): 1241 case IP_VERSION(5, 0, 2): 1242 case IP_VERSION(5, 0, 3): 1243 case IP_VERSION(5, 0, 4): 1244 case IP_VERSION(5, 2, 0): 1245 adev->hdp.funcs = &hdp_v5_0_funcs; 1246 break; 1247 default: 1248 break; 1249 } 1250 1251 switch (adev->ip_versions[DF_HWIP][0]) { 1252 case IP_VERSION(3, 6, 0): 1253 case IP_VERSION(3, 6, 1): 1254 case IP_VERSION(3, 6, 2): 1255 adev->df.funcs = &df_v3_6_funcs; 1256 break; 1257 case IP_VERSION(2, 1, 0): 1258 case IP_VERSION(2, 1, 1): 1259 case IP_VERSION(2, 5, 0): 1260 case IP_VERSION(3, 5, 1): 1261 case IP_VERSION(3, 5, 2): 1262 adev->df.funcs = &df_v1_7_funcs; 1263 break; 1264 default: 1265 break; 1266 } 1267 1268 switch (adev->ip_versions[SMUIO_HWIP][0]) { 1269 case IP_VERSION(9, 0, 0): 1270 case IP_VERSION(9, 0, 1): 1271 case IP_VERSION(10, 0, 0): 1272 case IP_VERSION(10, 0, 1): 1273 case IP_VERSION(10, 0, 2): 1274 adev->smuio.funcs = &smuio_v9_0_funcs; 1275 break; 1276 case IP_VERSION(11, 0, 0): 1277 case IP_VERSION(11, 0, 2): 1278 case IP_VERSION(11, 0, 3): 1279 case IP_VERSION(11, 0, 4): 1280 case IP_VERSION(11, 0, 7): 1281 case IP_VERSION(11, 0, 8): 1282 adev->smuio.funcs = &smuio_v11_0_funcs; 1283 break; 1284 case IP_VERSION(11, 0, 6): 1285 case IP_VERSION(11, 0, 10): 1286 case IP_VERSION(11, 0, 11): 1287 case IP_VERSION(11, 5, 0): 1288 case IP_VERSION(13, 0, 1): 1289 adev->smuio.funcs = &smuio_v11_0_6_funcs; 1290 break; 1291 case IP_VERSION(13, 0, 2): 1292 adev->smuio.funcs = &smuio_v13_0_funcs; 1293 break; 1294 default: 1295 break; 1296 } 1297 1298 r = amdgpu_discovery_set_common_ip_blocks(adev); 1299 if (r) 1300 return r; 1301 1302 r = amdgpu_discovery_set_gmc_ip_blocks(adev); 1303 if (r) 1304 return r; 1305 1306 /* For SR-IOV, PSP needs to be initialized before IH */ 1307 if (amdgpu_sriov_vf(adev)) { 1308 r = amdgpu_discovery_set_psp_ip_blocks(adev); 1309 if (r) 1310 return r; 1311 r = amdgpu_discovery_set_ih_ip_blocks(adev); 1312 if (r) 1313 return r; 1314 } else { 1315 r = amdgpu_discovery_set_ih_ip_blocks(adev); 1316 if (r) 1317 return r; 1318 1319 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 1320 r = amdgpu_discovery_set_psp_ip_blocks(adev); 1321 if (r) 1322 return r; 1323 } 1324 } 1325 1326 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 1327 r = amdgpu_discovery_set_smu_ip_blocks(adev); 1328 if (r) 1329 return r; 1330 } 1331 1332 r = amdgpu_discovery_set_display_ip_blocks(adev); 1333 if (r) 1334 return r; 1335 1336 r = amdgpu_discovery_set_gc_ip_blocks(adev); 1337 if (r) 1338 return r; 1339 1340 r = amdgpu_discovery_set_sdma_ip_blocks(adev); 1341 if (r) 1342 return r; 1343 1344 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 1345 !amdgpu_sriov_vf(adev)) { 1346 r = amdgpu_discovery_set_smu_ip_blocks(adev); 1347 if (r) 1348 return r; 1349 } 1350 1351 r = amdgpu_discovery_set_mm_ip_blocks(adev); 1352 if (r) 1353 return r; 1354 1355 if (adev->enable_mes) { 1356 r = amdgpu_discovery_set_mes_ip_blocks(adev); 1357 if (r) 1358 return r; 1359 } 1360 1361 return 0; 1362 } 1363 1364