1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_discovery.h" 28 #include "soc15_hw_ip.h" 29 #include "discovery.h" 30 31 #include "soc15.h" 32 #include "gfx_v9_0.h" 33 #include "gmc_v9_0.h" 34 #include "df_v1_7.h" 35 #include "df_v3_6.h" 36 #include "nbio_v6_1.h" 37 #include "nbio_v7_0.h" 38 #include "nbio_v7_4.h" 39 #include "hdp_v4_0.h" 40 #include "vega10_ih.h" 41 #include "vega20_ih.h" 42 #include "sdma_v4_0.h" 43 #include "uvd_v7_0.h" 44 #include "vce_v4_0.h" 45 #include "vcn_v1_0.h" 46 #include "vcn_v2_5.h" 47 #include "jpeg_v2_5.h" 48 #include "smuio_v9_0.h" 49 #include "gmc_v10_0.h" 50 #include "gmc_v11_0.h" 51 #include "gfxhub_v2_0.h" 52 #include "mmhub_v2_0.h" 53 #include "nbio_v2_3.h" 54 #include "nbio_v4_3.h" 55 #include "nbio_v7_2.h" 56 #include "hdp_v5_0.h" 57 #include "hdp_v6_0.h" 58 #include "nv.h" 59 #include "soc21.h" 60 #include "navi10_ih.h" 61 #include "ih_v6_0.h" 62 #include "gfx_v10_0.h" 63 #include "sdma_v5_0.h" 64 #include "sdma_v5_2.h" 65 #include "vcn_v2_0.h" 66 #include "jpeg_v2_0.h" 67 #include "vcn_v3_0.h" 68 #include "jpeg_v3_0.h" 69 #include "amdgpu_vkms.h" 70 #include "mes_v10_1.h" 71 #include "smuio_v11_0.h" 72 #include "smuio_v11_0_6.h" 73 #include "smuio_v13_0.h" 74 #include "smuio_v13_0_6.h" 75 76 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" 77 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); 78 79 #define mmRCC_CONFIG_MEMSIZE 0xde3 80 #define mmMM_INDEX 0x0 81 #define mmMM_INDEX_HI 0x6 82 #define mmMM_DATA 0x1 83 84 static const char *hw_id_names[HW_ID_MAX] = { 85 [MP1_HWID] = "MP1", 86 [MP2_HWID] = "MP2", 87 [THM_HWID] = "THM", 88 [SMUIO_HWID] = "SMUIO", 89 [FUSE_HWID] = "FUSE", 90 [CLKA_HWID] = "CLKA", 91 [PWR_HWID] = "PWR", 92 [GC_HWID] = "GC", 93 [UVD_HWID] = "UVD", 94 [AUDIO_AZ_HWID] = "AUDIO_AZ", 95 [ACP_HWID] = "ACP", 96 [DCI_HWID] = "DCI", 97 [DMU_HWID] = "DMU", 98 [DCO_HWID] = "DCO", 99 [DIO_HWID] = "DIO", 100 [XDMA_HWID] = "XDMA", 101 [DCEAZ_HWID] = "DCEAZ", 102 [DAZ_HWID] = "DAZ", 103 [SDPMUX_HWID] = "SDPMUX", 104 [NTB_HWID] = "NTB", 105 [IOHC_HWID] = "IOHC", 106 [L2IMU_HWID] = "L2IMU", 107 [VCE_HWID] = "VCE", 108 [MMHUB_HWID] = "MMHUB", 109 [ATHUB_HWID] = "ATHUB", 110 [DBGU_NBIO_HWID] = "DBGU_NBIO", 111 [DFX_HWID] = "DFX", 112 [DBGU0_HWID] = "DBGU0", 113 [DBGU1_HWID] = "DBGU1", 114 [OSSSYS_HWID] = "OSSSYS", 115 [HDP_HWID] = "HDP", 116 [SDMA0_HWID] = "SDMA0", 117 [SDMA1_HWID] = "SDMA1", 118 [SDMA2_HWID] = "SDMA2", 119 [SDMA3_HWID] = "SDMA3", 120 [ISP_HWID] = "ISP", 121 [DBGU_IO_HWID] = "DBGU_IO", 122 [DF_HWID] = "DF", 123 [CLKB_HWID] = "CLKB", 124 [FCH_HWID] = "FCH", 125 [DFX_DAP_HWID] = "DFX_DAP", 126 [L1IMU_PCIE_HWID] = "L1IMU_PCIE", 127 [L1IMU_NBIF_HWID] = "L1IMU_NBIF", 128 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR", 129 [L1IMU3_HWID] = "L1IMU3", 130 [L1IMU4_HWID] = "L1IMU4", 131 [L1IMU5_HWID] = "L1IMU5", 132 [L1IMU6_HWID] = "L1IMU6", 133 [L1IMU7_HWID] = "L1IMU7", 134 [L1IMU8_HWID] = "L1IMU8", 135 [L1IMU9_HWID] = "L1IMU9", 136 [L1IMU10_HWID] = "L1IMU10", 137 [L1IMU11_HWID] = "L1IMU11", 138 [L1IMU12_HWID] = "L1IMU12", 139 [L1IMU13_HWID] = "L1IMU13", 140 [L1IMU14_HWID] = "L1IMU14", 141 [L1IMU15_HWID] = "L1IMU15", 142 [WAFLC_HWID] = "WAFLC", 143 [FCH_USB_PD_HWID] = "FCH_USB_PD", 144 [PCIE_HWID] = "PCIE", 145 [PCS_HWID] = "PCS", 146 [DDCL_HWID] = "DDCL", 147 [SST_HWID] = "SST", 148 [IOAGR_HWID] = "IOAGR", 149 [NBIF_HWID] = "NBIF", 150 [IOAPIC_HWID] = "IOAPIC", 151 [SYSTEMHUB_HWID] = "SYSTEMHUB", 152 [NTBCCP_HWID] = "NTBCCP", 153 [UMC_HWID] = "UMC", 154 [SATA_HWID] = "SATA", 155 [USB_HWID] = "USB", 156 [CCXSEC_HWID] = "CCXSEC", 157 [XGMI_HWID] = "XGMI", 158 [XGBE_HWID] = "XGBE", 159 [MP0_HWID] = "MP0", 160 }; 161 162 static int hw_id_map[MAX_HWIP] = { 163 [GC_HWIP] = GC_HWID, 164 [HDP_HWIP] = HDP_HWID, 165 [SDMA0_HWIP] = SDMA0_HWID, 166 [SDMA1_HWIP] = SDMA1_HWID, 167 [SDMA2_HWIP] = SDMA2_HWID, 168 [SDMA3_HWIP] = SDMA3_HWID, 169 [MMHUB_HWIP] = MMHUB_HWID, 170 [ATHUB_HWIP] = ATHUB_HWID, 171 [NBIO_HWIP] = NBIF_HWID, 172 [MP0_HWIP] = MP0_HWID, 173 [MP1_HWIP] = MP1_HWID, 174 [UVD_HWIP] = UVD_HWID, 175 [VCE_HWIP] = VCE_HWID, 176 [DF_HWIP] = DF_HWID, 177 [DCE_HWIP] = DMU_HWID, 178 [OSSSYS_HWIP] = OSSSYS_HWID, 179 [SMUIO_HWIP] = SMUIO_HWID, 180 [PWR_HWIP] = PWR_HWID, 181 [NBIF_HWIP] = NBIF_HWID, 182 [THM_HWIP] = THM_HWID, 183 [CLK_HWIP] = CLKA_HWID, 184 [UMC_HWIP] = UMC_HWID, 185 [XGMI_HWIP] = XGMI_HWID, 186 [DCI_HWIP] = DCI_HWID, 187 }; 188 189 static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary) 190 { 191 uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 192 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; 193 194 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, 195 adev->mman.discovery_tmr_size, false); 196 return 0; 197 } 198 199 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary) 200 { 201 const struct firmware *fw; 202 const char *fw_name; 203 int r; 204 205 switch (amdgpu_discovery) { 206 case 2: 207 fw_name = FIRMWARE_IP_DISCOVERY; 208 break; 209 default: 210 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n"); 211 return -EINVAL; 212 } 213 214 r = request_firmware(&fw, fw_name, adev->dev); 215 if (r) { 216 dev_err(adev->dev, "can't load firmware \"%s\"\n", 217 fw_name); 218 return r; 219 } 220 221 memcpy((u8 *)binary, (u8 *)fw->data, adev->mman.discovery_tmr_size); 222 release_firmware(fw); 223 224 return 0; 225 } 226 227 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) 228 { 229 uint16_t checksum = 0; 230 int i; 231 232 for (i = 0; i < size; i++) 233 checksum += data[i]; 234 235 return checksum; 236 } 237 238 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size, 239 uint16_t expected) 240 { 241 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); 242 } 243 244 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) 245 { 246 struct binary_header *bhdr; 247 bhdr = (struct binary_header *)binary; 248 249 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); 250 } 251 252 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) 253 { 254 /* 255 * So far, apply this quirk only on those Navy Flounder boards which 256 * have a bad harvest table of VCN config. 257 */ 258 if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && 259 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) { 260 switch (adev->pdev->revision) { 261 case 0xC1: 262 case 0xC2: 263 case 0xC3: 264 case 0xC5: 265 case 0xC7: 266 case 0xCF: 267 case 0xDF: 268 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 269 break; 270 default: 271 break; 272 } 273 } 274 } 275 276 static int amdgpu_discovery_init(struct amdgpu_device *adev) 277 { 278 struct table_info *info; 279 struct binary_header *bhdr; 280 uint16_t offset; 281 uint16_t size; 282 uint16_t checksum; 283 int r; 284 285 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE; 286 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL); 287 if (!adev->mman.discovery_bin) 288 return -ENOMEM; 289 290 r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin); 291 if (r) { 292 dev_err(adev->dev, "failed to read ip discovery binary from vram\n"); 293 r = -EINVAL; 294 goto out; 295 } 296 297 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { 298 dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n"); 299 /* retry read ip discovery binary from file */ 300 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin); 301 if (r) { 302 dev_err(adev->dev, "failed to read ip discovery binary from file\n"); 303 r = -EINVAL; 304 goto out; 305 } 306 /* check the ip discovery binary signature */ 307 if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { 308 dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n"); 309 r = -EINVAL; 310 goto out; 311 } 312 } 313 314 bhdr = (struct binary_header *)adev->mman.discovery_bin; 315 316 offset = offsetof(struct binary_header, binary_checksum) + 317 sizeof(bhdr->binary_checksum); 318 size = le16_to_cpu(bhdr->binary_size) - offset; 319 checksum = le16_to_cpu(bhdr->binary_checksum); 320 321 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 322 size, checksum)) { 323 dev_err(adev->dev, "invalid ip discovery binary checksum\n"); 324 r = -EINVAL; 325 goto out; 326 } 327 328 info = &bhdr->table_list[IP_DISCOVERY]; 329 offset = le16_to_cpu(info->offset); 330 checksum = le16_to_cpu(info->checksum); 331 332 if (offset) { 333 struct ip_discovery_header *ihdr = 334 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); 335 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { 336 dev_err(adev->dev, "invalid ip discovery data table signature\n"); 337 r = -EINVAL; 338 goto out; 339 } 340 341 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 342 le16_to_cpu(ihdr->size), checksum)) { 343 dev_err(adev->dev, "invalid ip discovery data table checksum\n"); 344 r = -EINVAL; 345 goto out; 346 } 347 } 348 349 info = &bhdr->table_list[GC]; 350 offset = le16_to_cpu(info->offset); 351 checksum = le16_to_cpu(info->checksum); 352 353 if (offset) { 354 struct gpu_info_header *ghdr = 355 (struct gpu_info_header *)(adev->mman.discovery_bin + offset); 356 357 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { 358 dev_err(adev->dev, "invalid ip discovery gc table id\n"); 359 r = -EINVAL; 360 goto out; 361 } 362 363 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 364 le32_to_cpu(ghdr->size), checksum)) { 365 dev_err(adev->dev, "invalid gc data table checksum\n"); 366 r = -EINVAL; 367 goto out; 368 } 369 } 370 371 info = &bhdr->table_list[HARVEST_INFO]; 372 offset = le16_to_cpu(info->offset); 373 checksum = le16_to_cpu(info->checksum); 374 375 if (offset) { 376 struct harvest_info_header *hhdr = 377 (struct harvest_info_header *)(adev->mman.discovery_bin + offset); 378 379 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { 380 dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); 381 r = -EINVAL; 382 goto out; 383 } 384 385 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 386 sizeof(struct harvest_table), checksum)) { 387 dev_err(adev->dev, "invalid harvest data table checksum\n"); 388 r = -EINVAL; 389 goto out; 390 } 391 } 392 393 info = &bhdr->table_list[VCN_INFO]; 394 offset = le16_to_cpu(info->offset); 395 checksum = le16_to_cpu(info->checksum); 396 397 if (offset) { 398 struct vcn_info_header *vhdr = 399 (struct vcn_info_header *)(adev->mman.discovery_bin + offset); 400 401 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { 402 dev_err(adev->dev, "invalid ip discovery vcn table id\n"); 403 r = -EINVAL; 404 goto out; 405 } 406 407 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 408 le32_to_cpu(vhdr->size_bytes), checksum)) { 409 dev_err(adev->dev, "invalid vcn data table checksum\n"); 410 r = -EINVAL; 411 goto out; 412 } 413 } 414 415 info = &bhdr->table_list[MALL_INFO]; 416 offset = le16_to_cpu(info->offset); 417 checksum = le16_to_cpu(info->checksum); 418 419 if (0 && offset) { 420 struct mall_info_header *mhdr = 421 (struct mall_info_header *)(adev->mman.discovery_bin + offset); 422 423 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { 424 dev_err(adev->dev, "invalid ip discovery mall table id\n"); 425 r = -EINVAL; 426 goto out; 427 } 428 429 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 430 le32_to_cpu(mhdr->size_bytes), checksum)) { 431 dev_err(adev->dev, "invalid mall data table checksum\n"); 432 r = -EINVAL; 433 goto out; 434 } 435 } 436 437 return 0; 438 439 out: 440 kfree(adev->mman.discovery_bin); 441 adev->mman.discovery_bin = NULL; 442 443 return r; 444 } 445 446 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); 447 448 void amdgpu_discovery_fini(struct amdgpu_device *adev) 449 { 450 amdgpu_discovery_sysfs_fini(adev); 451 kfree(adev->mman.discovery_bin); 452 adev->mman.discovery_bin = NULL; 453 } 454 455 static int amdgpu_discovery_validate_ip(const struct ip *ip) 456 { 457 if (ip->number_instance >= HWIP_MAX_INSTANCE) { 458 DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n", 459 ip->number_instance); 460 return -EINVAL; 461 } 462 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) { 463 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n", 464 le16_to_cpu(ip->hw_id)); 465 return -EINVAL; 466 } 467 468 return 0; 469 } 470 471 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, 472 uint32_t *vcn_harvest_count) 473 { 474 struct binary_header *bhdr; 475 struct ip_discovery_header *ihdr; 476 struct die_header *dhdr; 477 struct ip *ip; 478 uint16_t die_offset, ip_offset, num_dies, num_ips; 479 int i, j; 480 481 bhdr = (struct binary_header *)adev->mman.discovery_bin; 482 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 483 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 484 num_dies = le16_to_cpu(ihdr->num_dies); 485 486 /* scan harvest bit of all IP data structures */ 487 for (i = 0; i < num_dies; i++) { 488 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 489 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 490 num_ips = le16_to_cpu(dhdr->num_ips); 491 ip_offset = die_offset + sizeof(*dhdr); 492 493 for (j = 0; j < num_ips; j++) { 494 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 495 496 if (amdgpu_discovery_validate_ip(ip)) 497 goto next_ip; 498 499 if (le16_to_cpu(ip->harvest) == 1) { 500 switch (le16_to_cpu(ip->hw_id)) { 501 case VCN_HWID: 502 (*vcn_harvest_count)++; 503 if (ip->number_instance == 0) 504 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 505 else 506 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 507 break; 508 case DMU_HWID: 509 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 510 break; 511 default: 512 break; 513 } 514 } 515 next_ip: 516 ip_offset += struct_size(ip, base_address, ip->num_base_address); 517 } 518 } 519 } 520 521 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, 522 uint32_t *vcn_harvest_count, 523 uint32_t *umc_harvest_count) 524 { 525 struct binary_header *bhdr; 526 struct harvest_table *harvest_info; 527 u16 offset; 528 int i; 529 530 bhdr = (struct binary_header *)adev->mman.discovery_bin; 531 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); 532 533 if (!offset) { 534 dev_err(adev->dev, "invalid harvest table offset\n"); 535 return; 536 } 537 538 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset); 539 540 for (i = 0; i < 32; i++) { 541 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) 542 break; 543 544 switch (le16_to_cpu(harvest_info->list[i].hw_id)) { 545 case VCN_HWID: 546 (*vcn_harvest_count)++; 547 if (harvest_info->list[i].number_instance == 0) 548 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 549 else 550 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 551 break; 552 case DMU_HWID: 553 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 554 break; 555 case UMC_HWID: 556 (*umc_harvest_count)++; 557 break; 558 default: 559 break; 560 } 561 } 562 } 563 564 /* ================================================== */ 565 566 struct ip_hw_instance { 567 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */ 568 569 int hw_id; 570 u8 num_instance; 571 u8 major, minor, revision; 572 u8 harvest; 573 574 int num_base_addresses; 575 u32 base_addr[]; 576 }; 577 578 struct ip_hw_id { 579 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */ 580 int hw_id; 581 }; 582 583 struct ip_die_entry { 584 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */ 585 u16 num_ips; 586 }; 587 588 /* -------------------------------------------------- */ 589 590 struct ip_hw_instance_attr { 591 struct attribute attr; 592 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf); 593 }; 594 595 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf) 596 { 597 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id); 598 } 599 600 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf) 601 { 602 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance); 603 } 604 605 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf) 606 { 607 return sysfs_emit(buf, "%d\n", ip_hw_instance->major); 608 } 609 610 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf) 611 { 612 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor); 613 } 614 615 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf) 616 { 617 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision); 618 } 619 620 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf) 621 { 622 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest); 623 } 624 625 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf) 626 { 627 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses); 628 } 629 630 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf) 631 { 632 ssize_t res, at; 633 int ii; 634 635 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) { 636 /* Here we satisfy the condition that, at + size <= PAGE_SIZE. 637 */ 638 if (at + 12 > PAGE_SIZE) 639 break; 640 res = sysfs_emit_at(buf, at, "0x%08X\n", 641 ip_hw_instance->base_addr[ii]); 642 if (res <= 0) 643 break; 644 at += res; 645 } 646 647 return res < 0 ? res : at; 648 } 649 650 static struct ip_hw_instance_attr ip_hw_attr[] = { 651 __ATTR_RO(hw_id), 652 __ATTR_RO(num_instance), 653 __ATTR_RO(major), 654 __ATTR_RO(minor), 655 __ATTR_RO(revision), 656 __ATTR_RO(harvest), 657 __ATTR_RO(num_base_addresses), 658 __ATTR_RO(base_addr), 659 }; 660 661 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1]; 662 ATTRIBUTE_GROUPS(ip_hw_instance); 663 664 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj) 665 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr) 666 667 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj, 668 struct attribute *attr, 669 char *buf) 670 { 671 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 672 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr); 673 674 if (!ip_hw_attr->show) 675 return -EIO; 676 677 return ip_hw_attr->show(ip_hw_instance, buf); 678 } 679 680 static const struct sysfs_ops ip_hw_instance_sysfs_ops = { 681 .show = ip_hw_instance_attr_show, 682 }; 683 684 static void ip_hw_instance_release(struct kobject *kobj) 685 { 686 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 687 688 kfree(ip_hw_instance); 689 } 690 691 static struct kobj_type ip_hw_instance_ktype = { 692 .release = ip_hw_instance_release, 693 .sysfs_ops = &ip_hw_instance_sysfs_ops, 694 .default_groups = ip_hw_instance_groups, 695 }; 696 697 /* -------------------------------------------------- */ 698 699 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset) 700 701 static void ip_hw_id_release(struct kobject *kobj) 702 { 703 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj); 704 705 if (!list_empty(&ip_hw_id->hw_id_kset.list)) 706 DRM_ERROR("ip_hw_id->hw_id_kset is not empty"); 707 kfree(ip_hw_id); 708 } 709 710 static struct kobj_type ip_hw_id_ktype = { 711 .release = ip_hw_id_release, 712 .sysfs_ops = &kobj_sysfs_ops, 713 }; 714 715 /* -------------------------------------------------- */ 716 717 static void die_kobj_release(struct kobject *kobj); 718 static void ip_disc_release(struct kobject *kobj); 719 720 struct ip_die_entry_attribute { 721 struct attribute attr; 722 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf); 723 }; 724 725 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr) 726 727 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf) 728 { 729 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips); 730 } 731 732 /* If there are more ip_die_entry attrs, other than the number of IPs, 733 * we can make this intro an array of attrs, and then initialize 734 * ip_die_entry_attrs in a loop. 735 */ 736 static struct ip_die_entry_attribute num_ips_attr = 737 __ATTR_RO(num_ips); 738 739 static struct attribute *ip_die_entry_attrs[] = { 740 &num_ips_attr.attr, 741 NULL, 742 }; 743 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */ 744 745 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset) 746 747 static ssize_t ip_die_entry_attr_show(struct kobject *kobj, 748 struct attribute *attr, 749 char *buf) 750 { 751 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr); 752 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 753 754 if (!ip_die_entry_attr->show) 755 return -EIO; 756 757 return ip_die_entry_attr->show(ip_die_entry, buf); 758 } 759 760 static void ip_die_entry_release(struct kobject *kobj) 761 { 762 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 763 764 if (!list_empty(&ip_die_entry->ip_kset.list)) 765 DRM_ERROR("ip_die_entry->ip_kset is not empty"); 766 kfree(ip_die_entry); 767 } 768 769 static const struct sysfs_ops ip_die_entry_sysfs_ops = { 770 .show = ip_die_entry_attr_show, 771 }; 772 773 static struct kobj_type ip_die_entry_ktype = { 774 .release = ip_die_entry_release, 775 .sysfs_ops = &ip_die_entry_sysfs_ops, 776 .default_groups = ip_die_entry_groups, 777 }; 778 779 static struct kobj_type die_kobj_ktype = { 780 .release = die_kobj_release, 781 .sysfs_ops = &kobj_sysfs_ops, 782 }; 783 784 static struct kobj_type ip_discovery_ktype = { 785 .release = ip_disc_release, 786 .sysfs_ops = &kobj_sysfs_ops, 787 }; 788 789 struct ip_discovery_top { 790 struct kobject kobj; /* ip_discovery/ */ 791 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */ 792 struct amdgpu_device *adev; 793 }; 794 795 static void die_kobj_release(struct kobject *kobj) 796 { 797 struct ip_discovery_top *ip_top = container_of(to_kset(kobj), 798 struct ip_discovery_top, 799 die_kset); 800 if (!list_empty(&ip_top->die_kset.list)) 801 DRM_ERROR("ip_top->die_kset is not empty"); 802 } 803 804 static void ip_disc_release(struct kobject *kobj) 805 { 806 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top, 807 kobj); 808 struct amdgpu_device *adev = ip_top->adev; 809 810 adev->ip_top = NULL; 811 kfree(ip_top); 812 } 813 814 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, 815 struct ip_die_entry *ip_die_entry, 816 const size_t _ip_offset, const int num_ips) 817 { 818 int ii, jj, kk, res; 819 820 DRM_DEBUG("num_ips:%d", num_ips); 821 822 /* Find all IPs of a given HW ID, and add their instance to 823 * #die/#hw_id/#instance/<attributes> 824 */ 825 for (ii = 0; ii < HW_ID_MAX; ii++) { 826 struct ip_hw_id *ip_hw_id = NULL; 827 size_t ip_offset = _ip_offset; 828 829 for (jj = 0; jj < num_ips; jj++) { 830 struct ip *ip; 831 struct ip_hw_instance *ip_hw_instance; 832 833 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 834 if (amdgpu_discovery_validate_ip(ip) || 835 le16_to_cpu(ip->hw_id) != ii) 836 goto next_ip; 837 838 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset); 839 840 /* We have a hw_id match; register the hw 841 * block if not yet registered. 842 */ 843 if (!ip_hw_id) { 844 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL); 845 if (!ip_hw_id) 846 return -ENOMEM; 847 ip_hw_id->hw_id = ii; 848 849 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii); 850 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset; 851 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype; 852 res = kset_register(&ip_hw_id->hw_id_kset); 853 if (res) { 854 DRM_ERROR("Couldn't register ip_hw_id kset"); 855 kfree(ip_hw_id); 856 return res; 857 } 858 if (hw_id_names[ii]) { 859 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj, 860 &ip_hw_id->hw_id_kset.kobj, 861 hw_id_names[ii]); 862 if (res) { 863 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n", 864 hw_id_names[ii], 865 kobject_name(&ip_die_entry->ip_kset.kobj)); 866 } 867 } 868 } 869 870 /* Now register its instance. 871 */ 872 ip_hw_instance = kzalloc(struct_size(ip_hw_instance, 873 base_addr, 874 ip->num_base_address), 875 GFP_KERNEL); 876 if (!ip_hw_instance) { 877 DRM_ERROR("no memory for ip_hw_instance"); 878 return -ENOMEM; 879 } 880 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */ 881 ip_hw_instance->num_instance = ip->number_instance; 882 ip_hw_instance->major = ip->major; 883 ip_hw_instance->minor = ip->minor; 884 ip_hw_instance->revision = ip->revision; 885 ip_hw_instance->harvest = ip->harvest; 886 ip_hw_instance->num_base_addresses = ip->num_base_address; 887 888 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) 889 ip_hw_instance->base_addr[kk] = ip->base_address[kk]; 890 891 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype); 892 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset; 893 res = kobject_add(&ip_hw_instance->kobj, NULL, 894 "%d", ip_hw_instance->num_instance); 895 next_ip: 896 ip_offset += struct_size(ip, base_address, ip->num_base_address); 897 } 898 } 899 900 return 0; 901 } 902 903 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) 904 { 905 struct binary_header *bhdr; 906 struct ip_discovery_header *ihdr; 907 struct die_header *dhdr; 908 struct kset *die_kset = &adev->ip_top->die_kset; 909 u16 num_dies, die_offset, num_ips; 910 size_t ip_offset; 911 int ii, res; 912 913 bhdr = (struct binary_header *)adev->mman.discovery_bin; 914 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 915 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 916 num_dies = le16_to_cpu(ihdr->num_dies); 917 918 DRM_DEBUG("number of dies: %d\n", num_dies); 919 920 for (ii = 0; ii < num_dies; ii++) { 921 struct ip_die_entry *ip_die_entry; 922 923 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); 924 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 925 num_ips = le16_to_cpu(dhdr->num_ips); 926 ip_offset = die_offset + sizeof(*dhdr); 927 928 /* Add the die to the kset. 929 * 930 * dhdr->die_id == ii, which was checked in 931 * amdgpu_discovery_reg_base_init(). 932 */ 933 934 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL); 935 if (!ip_die_entry) 936 return -ENOMEM; 937 938 ip_die_entry->num_ips = num_ips; 939 940 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id)); 941 ip_die_entry->ip_kset.kobj.kset = die_kset; 942 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype; 943 res = kset_register(&ip_die_entry->ip_kset); 944 if (res) { 945 DRM_ERROR("Couldn't register ip_die_entry kset"); 946 kfree(ip_die_entry); 947 return res; 948 } 949 950 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips); 951 } 952 953 return 0; 954 } 955 956 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) 957 { 958 struct kset *die_kset; 959 int res, ii; 960 961 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL); 962 if (!adev->ip_top) 963 return -ENOMEM; 964 965 adev->ip_top->adev = adev; 966 967 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype, 968 &adev->dev->kobj, "ip_discovery"); 969 if (res) { 970 DRM_ERROR("Couldn't init and add ip_discovery/"); 971 goto Err; 972 } 973 974 die_kset = &adev->ip_top->die_kset; 975 kobject_set_name(&die_kset->kobj, "%s", "die"); 976 die_kset->kobj.parent = &adev->ip_top->kobj; 977 die_kset->kobj.ktype = &die_kobj_ktype; 978 res = kset_register(&adev->ip_top->die_kset); 979 if (res) { 980 DRM_ERROR("Couldn't register die_kset"); 981 goto Err; 982 } 983 984 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++) 985 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr; 986 ip_hw_instance_attrs[ii] = NULL; 987 988 res = amdgpu_discovery_sysfs_recurse(adev); 989 990 return res; 991 Err: 992 kobject_put(&adev->ip_top->kobj); 993 return res; 994 } 995 996 /* -------------------------------------------------- */ 997 998 #define list_to_kobj(el) container_of(el, struct kobject, entry) 999 1000 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id) 1001 { 1002 struct list_head *el, *tmp; 1003 struct kset *hw_id_kset; 1004 1005 hw_id_kset = &ip_hw_id->hw_id_kset; 1006 spin_lock(&hw_id_kset->list_lock); 1007 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) { 1008 list_del_init(el); 1009 spin_unlock(&hw_id_kset->list_lock); 1010 /* kobject is embedded in ip_hw_instance */ 1011 kobject_put(list_to_kobj(el)); 1012 spin_lock(&hw_id_kset->list_lock); 1013 } 1014 spin_unlock(&hw_id_kset->list_lock); 1015 kobject_put(&ip_hw_id->hw_id_kset.kobj); 1016 } 1017 1018 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) 1019 { 1020 struct list_head *el, *tmp; 1021 struct kset *ip_kset; 1022 1023 ip_kset = &ip_die_entry->ip_kset; 1024 spin_lock(&ip_kset->list_lock); 1025 list_for_each_prev_safe(el, tmp, &ip_kset->list) { 1026 list_del_init(el); 1027 spin_unlock(&ip_kset->list_lock); 1028 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el))); 1029 spin_lock(&ip_kset->list_lock); 1030 } 1031 spin_unlock(&ip_kset->list_lock); 1032 kobject_put(&ip_die_entry->ip_kset.kobj); 1033 } 1034 1035 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) 1036 { 1037 struct list_head *el, *tmp; 1038 struct kset *die_kset; 1039 1040 die_kset = &adev->ip_top->die_kset; 1041 spin_lock(&die_kset->list_lock); 1042 list_for_each_prev_safe(el, tmp, &die_kset->list) { 1043 list_del_init(el); 1044 spin_unlock(&die_kset->list_lock); 1045 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el))); 1046 spin_lock(&die_kset->list_lock); 1047 } 1048 spin_unlock(&die_kset->list_lock); 1049 kobject_put(&adev->ip_top->die_kset.kobj); 1050 kobject_put(&adev->ip_top->kobj); 1051 } 1052 1053 /* ================================================== */ 1054 1055 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) 1056 { 1057 struct binary_header *bhdr; 1058 struct ip_discovery_header *ihdr; 1059 struct die_header *dhdr; 1060 struct ip *ip; 1061 uint16_t die_offset; 1062 uint16_t ip_offset; 1063 uint16_t num_dies; 1064 uint16_t num_ips; 1065 uint8_t num_base_address; 1066 int hw_ip; 1067 int i, j, k; 1068 int r; 1069 1070 r = amdgpu_discovery_init(adev); 1071 if (r) { 1072 DRM_ERROR("amdgpu_discovery_init failed\n"); 1073 return r; 1074 } 1075 1076 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1077 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1078 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1079 num_dies = le16_to_cpu(ihdr->num_dies); 1080 1081 DRM_DEBUG("number of dies: %d\n", num_dies); 1082 1083 for (i = 0; i < num_dies; i++) { 1084 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1085 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1086 num_ips = le16_to_cpu(dhdr->num_ips); 1087 ip_offset = die_offset + sizeof(*dhdr); 1088 1089 if (le16_to_cpu(dhdr->die_id) != i) { 1090 DRM_ERROR("invalid die id %d, expected %d\n", 1091 le16_to_cpu(dhdr->die_id), i); 1092 return -EINVAL; 1093 } 1094 1095 DRM_DEBUG("number of hardware IPs on die%d: %d\n", 1096 le16_to_cpu(dhdr->die_id), num_ips); 1097 1098 for (j = 0; j < num_ips; j++) { 1099 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 1100 1101 if (amdgpu_discovery_validate_ip(ip)) 1102 goto next_ip; 1103 1104 num_base_address = ip->num_base_address; 1105 1106 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", 1107 hw_id_names[le16_to_cpu(ip->hw_id)], 1108 le16_to_cpu(ip->hw_id), 1109 ip->number_instance, 1110 ip->major, ip->minor, 1111 ip->revision); 1112 1113 if (le16_to_cpu(ip->hw_id) == VCN_HWID) { 1114 /* Bit [5:0]: original revision value 1115 * Bit [7:6]: en/decode capability: 1116 * 0b00 : VCN function normally 1117 * 0b10 : encode is disabled 1118 * 0b01 : decode is disabled 1119 */ 1120 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] = 1121 ip->revision & 0xc0; 1122 ip->revision &= ~0xc0; 1123 adev->vcn.num_vcn_inst++; 1124 } 1125 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || 1126 le16_to_cpu(ip->hw_id) == SDMA1_HWID || 1127 le16_to_cpu(ip->hw_id) == SDMA2_HWID || 1128 le16_to_cpu(ip->hw_id) == SDMA3_HWID) 1129 adev->sdma.num_instances++; 1130 1131 if (le16_to_cpu(ip->hw_id) == UMC_HWID) 1132 adev->gmc.num_umc++; 1133 1134 for (k = 0; k < num_base_address; k++) { 1135 /* 1136 * convert the endianness of base addresses in place, 1137 * so that we don't need to convert them when accessing adev->reg_offset. 1138 */ 1139 ip->base_address[k] = le32_to_cpu(ip->base_address[k]); 1140 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); 1141 } 1142 1143 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { 1144 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) { 1145 DRM_DEBUG("set register base offset for %s\n", 1146 hw_id_names[le16_to_cpu(ip->hw_id)]); 1147 adev->reg_offset[hw_ip][ip->number_instance] = 1148 ip->base_address; 1149 /* Instance support is somewhat inconsistent. 1150 * SDMA is a good example. Sienna cichlid has 4 total 1151 * SDMA instances, each enumerated separately (HWIDs 1152 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, 1153 * but they are enumerated as multiple instances of the 1154 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another 1155 * example. On most chips there are multiple instances 1156 * with the same HWID. 1157 */ 1158 adev->ip_versions[hw_ip][ip->number_instance] = 1159 IP_VERSION(ip->major, ip->minor, ip->revision); 1160 } 1161 } 1162 1163 next_ip: 1164 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1165 } 1166 } 1167 1168 amdgpu_discovery_sysfs_init(adev); 1169 1170 return 0; 1171 } 1172 1173 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, 1174 int *major, int *minor, int *revision) 1175 { 1176 struct binary_header *bhdr; 1177 struct ip_discovery_header *ihdr; 1178 struct die_header *dhdr; 1179 struct ip *ip; 1180 uint16_t die_offset; 1181 uint16_t ip_offset; 1182 uint16_t num_dies; 1183 uint16_t num_ips; 1184 int i, j; 1185 1186 if (!adev->mman.discovery_bin) { 1187 DRM_ERROR("ip discovery uninitialized\n"); 1188 return -EINVAL; 1189 } 1190 1191 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1192 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1193 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1194 num_dies = le16_to_cpu(ihdr->num_dies); 1195 1196 for (i = 0; i < num_dies; i++) { 1197 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1198 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1199 num_ips = le16_to_cpu(dhdr->num_ips); 1200 ip_offset = die_offset + sizeof(*dhdr); 1201 1202 for (j = 0; j < num_ips; j++) { 1203 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 1204 1205 if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) { 1206 if (major) 1207 *major = ip->major; 1208 if (minor) 1209 *minor = ip->minor; 1210 if (revision) 1211 *revision = ip->revision; 1212 return 0; 1213 } 1214 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1215 } 1216 } 1217 1218 return -EINVAL; 1219 } 1220 1221 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) 1222 { 1223 int vcn_harvest_count = 0; 1224 int umc_harvest_count = 0; 1225 1226 /* 1227 * Harvest table does not fit Navi1x and legacy GPUs, 1228 * so read harvest bit per IP data structure to set 1229 * harvest configuration. 1230 */ 1231 if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0)) { 1232 if ((adev->pdev->device == 0x731E && 1233 (adev->pdev->revision == 0xC6 || 1234 adev->pdev->revision == 0xC7)) || 1235 (adev->pdev->device == 0x7340 && 1236 adev->pdev->revision == 0xC9) || 1237 (adev->pdev->device == 0x7360 && 1238 adev->pdev->revision == 0xC7)) 1239 amdgpu_discovery_read_harvest_bit_per_ip(adev, 1240 &vcn_harvest_count); 1241 } else { 1242 amdgpu_discovery_read_from_harvest_table(adev, 1243 &vcn_harvest_count, 1244 &umc_harvest_count); 1245 } 1246 1247 amdgpu_discovery_harvest_config_quirk(adev); 1248 1249 if (vcn_harvest_count == adev->vcn.num_vcn_inst) { 1250 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 1251 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 1252 } 1253 1254 if (umc_harvest_count < adev->gmc.num_umc) { 1255 adev->gmc.num_umc -= umc_harvest_count; 1256 } 1257 } 1258 1259 union gc_info { 1260 struct gc_info_v1_0 v1; 1261 struct gc_info_v1_1 v1_1; 1262 struct gc_info_v1_2 v1_2; 1263 struct gc_info_v2_0 v2; 1264 }; 1265 1266 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) 1267 { 1268 struct binary_header *bhdr; 1269 union gc_info *gc_info; 1270 u16 offset; 1271 1272 if (!adev->mman.discovery_bin) { 1273 DRM_ERROR("ip discovery uninitialized\n"); 1274 return -EINVAL; 1275 } 1276 1277 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1278 offset = le16_to_cpu(bhdr->table_list[GC].offset); 1279 1280 if (!offset) 1281 return 0; 1282 1283 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset); 1284 1285 switch (le16_to_cpu(gc_info->v1.header.version_major)) { 1286 case 1: 1287 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); 1288 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + 1289 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); 1290 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1291 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); 1292 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); 1293 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); 1294 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); 1295 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); 1296 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); 1297 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); 1298 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); 1299 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); 1300 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); 1301 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); 1302 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / 1303 le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1304 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); 1305 if (gc_info->v1.header.version_minor >= 1) { 1306 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); 1307 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); 1308 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); 1309 } 1310 if (gc_info->v1.header.version_minor >= 2) { 1311 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); 1312 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); 1313 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); 1314 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc); 1315 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc); 1316 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa); 1317 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance); 1318 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu); 1319 } 1320 break; 1321 case 2: 1322 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); 1323 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); 1324 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1325 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); 1326 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); 1327 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); 1328 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); 1329 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); 1330 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); 1331 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); 1332 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); 1333 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); 1334 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); 1335 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); 1336 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / 1337 le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1338 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); 1339 break; 1340 default: 1341 dev_err(adev->dev, 1342 "Unhandled GC info table %d.%d\n", 1343 le16_to_cpu(gc_info->v1.header.version_major), 1344 le16_to_cpu(gc_info->v1.header.version_minor)); 1345 return -EINVAL; 1346 } 1347 return 0; 1348 } 1349 1350 union mall_info { 1351 struct mall_info_v1_0 v1; 1352 }; 1353 1354 int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) 1355 { 1356 struct binary_header *bhdr; 1357 union mall_info *mall_info; 1358 u32 u, mall_size_per_umc, m_s_present, half_use; 1359 u64 mall_size; 1360 u16 offset; 1361 1362 if (!adev->mman.discovery_bin) { 1363 DRM_ERROR("ip discovery uninitialized\n"); 1364 return -EINVAL; 1365 } 1366 1367 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1368 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); 1369 1370 if (!offset) 1371 return 0; 1372 1373 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset); 1374 1375 switch (le16_to_cpu(mall_info->v1.header.version_major)) { 1376 case 1: 1377 mall_size = 0; 1378 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m); 1379 m_s_present = le32_to_cpu(mall_info->v1.m_s_present); 1380 half_use = le32_to_cpu(mall_info->v1.m_half_use); 1381 for (u = 0; u < adev->gmc.num_umc; u++) { 1382 if (m_s_present & (1 << u)) 1383 mall_size += mall_size_per_umc * 2; 1384 else if (half_use & (1 << u)) 1385 mall_size += mall_size_per_umc / 2; 1386 else 1387 mall_size += mall_size_per_umc; 1388 } 1389 adev->gmc.mall_size = mall_size; 1390 break; 1391 default: 1392 dev_err(adev->dev, 1393 "Unhandled MALL info table %d.%d\n", 1394 le16_to_cpu(mall_info->v1.header.version_major), 1395 le16_to_cpu(mall_info->v1.header.version_minor)); 1396 return -EINVAL; 1397 } 1398 return 0; 1399 } 1400 1401 union vcn_info { 1402 struct vcn_info_v1_0 v1; 1403 }; 1404 1405 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) 1406 { 1407 struct binary_header *bhdr; 1408 union vcn_info *vcn_info; 1409 u16 offset; 1410 int v; 1411 1412 if (!adev->mman.discovery_bin) { 1413 DRM_ERROR("ip discovery uninitialized\n"); 1414 return -EINVAL; 1415 } 1416 1417 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) { 1418 dev_err(adev->dev, "invalid vcn instances\n"); 1419 return -EINVAL; 1420 } 1421 1422 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1423 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); 1424 1425 if (!offset) 1426 return 0; 1427 1428 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset); 1429 1430 switch (le16_to_cpu(vcn_info->v1.header.version_major)) { 1431 case 1: 1432 for (v = 0; v < adev->vcn.num_vcn_inst; v++) { 1433 adev->vcn.vcn_codec_disable_mask[v] = 1434 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits); 1435 } 1436 break; 1437 default: 1438 dev_err(adev->dev, 1439 "Unhandled VCN info table %d.%d\n", 1440 le16_to_cpu(vcn_info->v1.header.version_major), 1441 le16_to_cpu(vcn_info->v1.header.version_minor)); 1442 return -EINVAL; 1443 } 1444 return 0; 1445 } 1446 1447 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 1448 { 1449 /* what IP to use for this? */ 1450 switch (adev->ip_versions[GC_HWIP][0]) { 1451 case IP_VERSION(9, 0, 1): 1452 case IP_VERSION(9, 1, 0): 1453 case IP_VERSION(9, 2, 1): 1454 case IP_VERSION(9, 2, 2): 1455 case IP_VERSION(9, 3, 0): 1456 case IP_VERSION(9, 4, 0): 1457 case IP_VERSION(9, 4, 1): 1458 case IP_VERSION(9, 4, 2): 1459 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1460 break; 1461 case IP_VERSION(10, 1, 10): 1462 case IP_VERSION(10, 1, 1): 1463 case IP_VERSION(10, 1, 2): 1464 case IP_VERSION(10, 1, 3): 1465 case IP_VERSION(10, 1, 4): 1466 case IP_VERSION(10, 3, 0): 1467 case IP_VERSION(10, 3, 1): 1468 case IP_VERSION(10, 3, 2): 1469 case IP_VERSION(10, 3, 3): 1470 case IP_VERSION(10, 3, 4): 1471 case IP_VERSION(10, 3, 5): 1472 case IP_VERSION(10, 3, 6): 1473 case IP_VERSION(10, 3, 7): 1474 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 1475 break; 1476 case IP_VERSION(11, 0, 0): 1477 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); 1478 break; 1479 default: 1480 dev_err(adev->dev, 1481 "Failed to add common ip block(GC_HWIP:0x%x)\n", 1482 adev->ip_versions[GC_HWIP][0]); 1483 return -EINVAL; 1484 } 1485 return 0; 1486 } 1487 1488 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 1489 { 1490 /* use GC or MMHUB IP version */ 1491 switch (adev->ip_versions[GC_HWIP][0]) { 1492 case IP_VERSION(9, 0, 1): 1493 case IP_VERSION(9, 1, 0): 1494 case IP_VERSION(9, 2, 1): 1495 case IP_VERSION(9, 2, 2): 1496 case IP_VERSION(9, 3, 0): 1497 case IP_VERSION(9, 4, 0): 1498 case IP_VERSION(9, 4, 1): 1499 case IP_VERSION(9, 4, 2): 1500 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 1501 break; 1502 case IP_VERSION(10, 1, 10): 1503 case IP_VERSION(10, 1, 1): 1504 case IP_VERSION(10, 1, 2): 1505 case IP_VERSION(10, 1, 3): 1506 case IP_VERSION(10, 1, 4): 1507 case IP_VERSION(10, 3, 0): 1508 case IP_VERSION(10, 3, 1): 1509 case IP_VERSION(10, 3, 2): 1510 case IP_VERSION(10, 3, 3): 1511 case IP_VERSION(10, 3, 4): 1512 case IP_VERSION(10, 3, 5): 1513 case IP_VERSION(10, 3, 6): 1514 case IP_VERSION(10, 3, 7): 1515 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 1516 break; 1517 case IP_VERSION(11, 0, 0): 1518 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); 1519 break; 1520 default: 1521 dev_err(adev->dev, 1522 "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 1523 adev->ip_versions[GC_HWIP][0]); 1524 return -EINVAL; 1525 } 1526 return 0; 1527 } 1528 1529 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 1530 { 1531 switch (adev->ip_versions[OSSSYS_HWIP][0]) { 1532 case IP_VERSION(4, 0, 0): 1533 case IP_VERSION(4, 0, 1): 1534 case IP_VERSION(4, 1, 0): 1535 case IP_VERSION(4, 1, 1): 1536 case IP_VERSION(4, 3, 0): 1537 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 1538 break; 1539 case IP_VERSION(4, 2, 0): 1540 case IP_VERSION(4, 2, 1): 1541 case IP_VERSION(4, 4, 0): 1542 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 1543 break; 1544 case IP_VERSION(5, 0, 0): 1545 case IP_VERSION(5, 0, 1): 1546 case IP_VERSION(5, 0, 2): 1547 case IP_VERSION(5, 0, 3): 1548 case IP_VERSION(5, 2, 0): 1549 case IP_VERSION(5, 2, 1): 1550 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 1551 break; 1552 case IP_VERSION(6, 0, 0): 1553 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); 1554 break; 1555 default: 1556 dev_err(adev->dev, 1557 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 1558 adev->ip_versions[OSSSYS_HWIP][0]); 1559 return -EINVAL; 1560 } 1561 return 0; 1562 } 1563 1564 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 1565 { 1566 switch (adev->ip_versions[MP0_HWIP][0]) { 1567 case IP_VERSION(9, 0, 0): 1568 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 1569 break; 1570 case IP_VERSION(10, 0, 0): 1571 case IP_VERSION(10, 0, 1): 1572 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 1573 break; 1574 case IP_VERSION(11, 0, 0): 1575 case IP_VERSION(11, 0, 2): 1576 case IP_VERSION(11, 0, 4): 1577 case IP_VERSION(11, 0, 5): 1578 case IP_VERSION(11, 0, 9): 1579 case IP_VERSION(11, 0, 7): 1580 case IP_VERSION(11, 0, 11): 1581 case IP_VERSION(11, 0, 12): 1582 case IP_VERSION(11, 0, 13): 1583 case IP_VERSION(11, 5, 0): 1584 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 1585 break; 1586 case IP_VERSION(11, 0, 8): 1587 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); 1588 break; 1589 case IP_VERSION(11, 0, 3): 1590 case IP_VERSION(12, 0, 1): 1591 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 1592 break; 1593 case IP_VERSION(13, 0, 0): 1594 case IP_VERSION(13, 0, 1): 1595 case IP_VERSION(13, 0, 2): 1596 case IP_VERSION(13, 0, 3): 1597 case IP_VERSION(13, 0, 5): 1598 case IP_VERSION(13, 0, 8): 1599 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 1600 break; 1601 default: 1602 dev_err(adev->dev, 1603 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 1604 adev->ip_versions[MP0_HWIP][0]); 1605 return -EINVAL; 1606 } 1607 return 0; 1608 } 1609 1610 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 1611 { 1612 switch (adev->ip_versions[MP1_HWIP][0]) { 1613 case IP_VERSION(9, 0, 0): 1614 case IP_VERSION(10, 0, 0): 1615 case IP_VERSION(10, 0, 1): 1616 case IP_VERSION(11, 0, 2): 1617 if (adev->asic_type == CHIP_ARCTURUS) 1618 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1619 else 1620 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1621 break; 1622 case IP_VERSION(11, 0, 0): 1623 case IP_VERSION(11, 0, 5): 1624 case IP_VERSION(11, 0, 9): 1625 case IP_VERSION(11, 0, 7): 1626 case IP_VERSION(11, 0, 8): 1627 case IP_VERSION(11, 0, 11): 1628 case IP_VERSION(11, 0, 12): 1629 case IP_VERSION(11, 0, 13): 1630 case IP_VERSION(11, 5, 0): 1631 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1632 break; 1633 case IP_VERSION(12, 0, 0): 1634 case IP_VERSION(12, 0, 1): 1635 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 1636 break; 1637 case IP_VERSION(13, 0, 0): 1638 case IP_VERSION(13, 0, 1): 1639 case IP_VERSION(13, 0, 2): 1640 case IP_VERSION(13, 0, 3): 1641 case IP_VERSION(13, 0, 5): 1642 case IP_VERSION(13, 0, 8): 1643 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 1644 break; 1645 default: 1646 dev_err(adev->dev, 1647 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 1648 adev->ip_versions[MP1_HWIP][0]); 1649 return -EINVAL; 1650 } 1651 return 0; 1652 } 1653 1654 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) 1655 { 1656 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) { 1657 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 1658 return 0; 1659 } 1660 1661 if (!amdgpu_device_has_dc_support(adev)) 1662 return 0; 1663 1664 #if defined(CONFIG_DRM_AMD_DC) 1665 if (adev->ip_versions[DCE_HWIP][0]) { 1666 switch (adev->ip_versions[DCE_HWIP][0]) { 1667 case IP_VERSION(1, 0, 0): 1668 case IP_VERSION(1, 0, 1): 1669 case IP_VERSION(2, 0, 2): 1670 case IP_VERSION(2, 0, 0): 1671 case IP_VERSION(2, 0, 3): 1672 case IP_VERSION(2, 1, 0): 1673 case IP_VERSION(3, 0, 0): 1674 case IP_VERSION(3, 0, 2): 1675 case IP_VERSION(3, 0, 3): 1676 case IP_VERSION(3, 0, 1): 1677 case IP_VERSION(3, 1, 2): 1678 case IP_VERSION(3, 1, 3): 1679 case IP_VERSION(3, 1, 5): 1680 case IP_VERSION(3, 1, 6): 1681 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1682 break; 1683 default: 1684 dev_err(adev->dev, 1685 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 1686 adev->ip_versions[DCE_HWIP][0]); 1687 return -EINVAL; 1688 } 1689 } else if (adev->ip_versions[DCI_HWIP][0]) { 1690 switch (adev->ip_versions[DCI_HWIP][0]) { 1691 case IP_VERSION(12, 0, 0): 1692 case IP_VERSION(12, 0, 1): 1693 case IP_VERSION(12, 1, 0): 1694 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1695 break; 1696 default: 1697 dev_err(adev->dev, 1698 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 1699 adev->ip_versions[DCI_HWIP][0]); 1700 return -EINVAL; 1701 } 1702 } 1703 #endif 1704 return 0; 1705 } 1706 1707 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 1708 { 1709 switch (adev->ip_versions[GC_HWIP][0]) { 1710 case IP_VERSION(9, 0, 1): 1711 case IP_VERSION(9, 1, 0): 1712 case IP_VERSION(9, 2, 1): 1713 case IP_VERSION(9, 2, 2): 1714 case IP_VERSION(9, 3, 0): 1715 case IP_VERSION(9, 4, 0): 1716 case IP_VERSION(9, 4, 1): 1717 case IP_VERSION(9, 4, 2): 1718 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 1719 break; 1720 case IP_VERSION(10, 1, 10): 1721 case IP_VERSION(10, 1, 2): 1722 case IP_VERSION(10, 1, 1): 1723 case IP_VERSION(10, 1, 3): 1724 case IP_VERSION(10, 1, 4): 1725 case IP_VERSION(10, 3, 0): 1726 case IP_VERSION(10, 3, 2): 1727 case IP_VERSION(10, 3, 1): 1728 case IP_VERSION(10, 3, 4): 1729 case IP_VERSION(10, 3, 5): 1730 case IP_VERSION(10, 3, 6): 1731 case IP_VERSION(10, 3, 3): 1732 case IP_VERSION(10, 3, 7): 1733 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 1734 break; 1735 default: 1736 dev_err(adev->dev, 1737 "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 1738 adev->ip_versions[GC_HWIP][0]); 1739 return -EINVAL; 1740 } 1741 return 0; 1742 } 1743 1744 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 1745 { 1746 switch (adev->ip_versions[SDMA0_HWIP][0]) { 1747 case IP_VERSION(4, 0, 0): 1748 case IP_VERSION(4, 0, 1): 1749 case IP_VERSION(4, 1, 0): 1750 case IP_VERSION(4, 1, 1): 1751 case IP_VERSION(4, 1, 2): 1752 case IP_VERSION(4, 2, 0): 1753 case IP_VERSION(4, 2, 2): 1754 case IP_VERSION(4, 4, 0): 1755 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 1756 break; 1757 case IP_VERSION(5, 0, 0): 1758 case IP_VERSION(5, 0, 1): 1759 case IP_VERSION(5, 0, 2): 1760 case IP_VERSION(5, 0, 5): 1761 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 1762 break; 1763 case IP_VERSION(5, 2, 0): 1764 case IP_VERSION(5, 2, 2): 1765 case IP_VERSION(5, 2, 4): 1766 case IP_VERSION(5, 2, 5): 1767 case IP_VERSION(5, 2, 6): 1768 case IP_VERSION(5, 2, 3): 1769 case IP_VERSION(5, 2, 1): 1770 case IP_VERSION(5, 2, 7): 1771 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 1772 break; 1773 default: 1774 dev_err(adev->dev, 1775 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 1776 adev->ip_versions[SDMA0_HWIP][0]); 1777 return -EINVAL; 1778 } 1779 return 0; 1780 } 1781 1782 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 1783 { 1784 if (adev->ip_versions[VCE_HWIP][0]) { 1785 switch (adev->ip_versions[UVD_HWIP][0]) { 1786 case IP_VERSION(7, 0, 0): 1787 case IP_VERSION(7, 2, 0): 1788 /* UVD is not supported on vega20 SR-IOV */ 1789 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 1790 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 1791 break; 1792 default: 1793 dev_err(adev->dev, 1794 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 1795 adev->ip_versions[UVD_HWIP][0]); 1796 return -EINVAL; 1797 } 1798 switch (adev->ip_versions[VCE_HWIP][0]) { 1799 case IP_VERSION(4, 0, 0): 1800 case IP_VERSION(4, 1, 0): 1801 /* VCE is not supported on vega20 SR-IOV */ 1802 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 1803 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 1804 break; 1805 default: 1806 dev_err(adev->dev, 1807 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 1808 adev->ip_versions[VCE_HWIP][0]); 1809 return -EINVAL; 1810 } 1811 } else { 1812 switch (adev->ip_versions[UVD_HWIP][0]) { 1813 case IP_VERSION(1, 0, 0): 1814 case IP_VERSION(1, 0, 1): 1815 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 1816 break; 1817 case IP_VERSION(2, 0, 0): 1818 case IP_VERSION(2, 0, 2): 1819 case IP_VERSION(2, 2, 0): 1820 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 1821 if (!amdgpu_sriov_vf(adev)) 1822 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 1823 break; 1824 case IP_VERSION(2, 0, 3): 1825 break; 1826 case IP_VERSION(2, 5, 0): 1827 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 1828 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 1829 break; 1830 case IP_VERSION(2, 6, 0): 1831 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 1832 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 1833 break; 1834 case IP_VERSION(3, 0, 0): 1835 case IP_VERSION(3, 0, 16): 1836 case IP_VERSION(3, 1, 1): 1837 case IP_VERSION(3, 1, 2): 1838 case IP_VERSION(3, 0, 2): 1839 case IP_VERSION(3, 0, 192): 1840 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 1841 if (!amdgpu_sriov_vf(adev)) 1842 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 1843 break; 1844 case IP_VERSION(3, 0, 33): 1845 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 1846 break; 1847 default: 1848 dev_err(adev->dev, 1849 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 1850 adev->ip_versions[UVD_HWIP][0]); 1851 return -EINVAL; 1852 } 1853 } 1854 return 0; 1855 } 1856 1857 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 1858 { 1859 switch (adev->ip_versions[GC_HWIP][0]) { 1860 case IP_VERSION(10, 1, 10): 1861 case IP_VERSION(10, 1, 1): 1862 case IP_VERSION(10, 1, 2): 1863 case IP_VERSION(10, 1, 3): 1864 case IP_VERSION(10, 1, 4): 1865 case IP_VERSION(10, 3, 0): 1866 case IP_VERSION(10, 3, 1): 1867 case IP_VERSION(10, 3, 2): 1868 case IP_VERSION(10, 3, 3): 1869 case IP_VERSION(10, 3, 4): 1870 case IP_VERSION(10, 3, 5): 1871 case IP_VERSION(10, 3, 6): 1872 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 1873 break; 1874 default: 1875 break; 1876 } 1877 return 0; 1878 } 1879 1880 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) 1881 { 1882 int r; 1883 1884 switch (adev->asic_type) { 1885 case CHIP_VEGA10: 1886 vega10_reg_base_init(adev); 1887 adev->sdma.num_instances = 2; 1888 adev->gmc.num_umc = 4; 1889 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); 1890 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); 1891 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); 1892 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); 1893 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); 1894 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); 1895 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 1896 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); 1897 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); 1898 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 1899 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 1900 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 1901 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); 1902 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); 1903 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 1904 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 1905 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); 1906 break; 1907 case CHIP_VEGA12: 1908 vega10_reg_base_init(adev); 1909 adev->sdma.num_instances = 2; 1910 adev->gmc.num_umc = 4; 1911 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); 1912 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); 1913 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); 1914 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); 1915 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); 1916 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); 1917 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); 1918 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); 1919 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); 1920 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 1921 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 1922 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 1923 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); 1924 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); 1925 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 1926 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 1927 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); 1928 break; 1929 case CHIP_RAVEN: 1930 vega10_reg_base_init(adev); 1931 adev->sdma.num_instances = 1; 1932 adev->vcn.num_vcn_inst = 1; 1933 adev->gmc.num_umc = 2; 1934 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 1935 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); 1936 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); 1937 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); 1938 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); 1939 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); 1940 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); 1941 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); 1942 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); 1943 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); 1944 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); 1945 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); 1946 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); 1947 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); 1948 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); 1949 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); 1950 } else { 1951 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); 1952 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); 1953 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); 1954 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); 1955 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); 1956 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 1957 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); 1958 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); 1959 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); 1960 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); 1961 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); 1962 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); 1963 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); 1964 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); 1965 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); 1966 } 1967 break; 1968 case CHIP_VEGA20: 1969 vega20_reg_base_init(adev); 1970 adev->sdma.num_instances = 2; 1971 adev->gmc.num_umc = 8; 1972 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); 1973 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); 1974 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); 1975 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); 1976 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); 1977 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); 1978 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); 1979 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); 1980 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); 1981 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); 1982 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 1983 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); 1984 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); 1985 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); 1986 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); 1987 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); 1988 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); 1989 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); 1990 break; 1991 case CHIP_ARCTURUS: 1992 arct_reg_base_init(adev); 1993 adev->sdma.num_instances = 8; 1994 adev->vcn.num_vcn_inst = 2; 1995 adev->gmc.num_umc = 8; 1996 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); 1997 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); 1998 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); 1999 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); 2000 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); 2001 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); 2002 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); 2003 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); 2004 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); 2005 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); 2006 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); 2007 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); 2008 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); 2009 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); 2010 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); 2011 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); 2012 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2013 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); 2014 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); 2015 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); 2016 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); 2017 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); 2018 break; 2019 case CHIP_ALDEBARAN: 2020 aldebaran_reg_base_init(adev); 2021 adev->sdma.num_instances = 5; 2022 adev->vcn.num_vcn_inst = 2; 2023 adev->gmc.num_umc = 4; 2024 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2025 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2026 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); 2027 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); 2028 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); 2029 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); 2030 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); 2031 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); 2032 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); 2033 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); 2034 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); 2035 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); 2036 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); 2037 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); 2038 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); 2039 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); 2040 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); 2041 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); 2042 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); 2043 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); 2044 break; 2045 default: 2046 r = amdgpu_discovery_reg_base_init(adev); 2047 if (r) 2048 return -EINVAL; 2049 2050 amdgpu_discovery_harvest_ip(adev); 2051 amdgpu_discovery_get_gfx_info(adev); 2052 amdgpu_discovery_get_mall_info(adev); 2053 amdgpu_discovery_get_vcn_info(adev); 2054 break; 2055 } 2056 2057 switch (adev->ip_versions[GC_HWIP][0]) { 2058 case IP_VERSION(9, 0, 1): 2059 case IP_VERSION(9, 2, 1): 2060 case IP_VERSION(9, 4, 0): 2061 case IP_VERSION(9, 4, 1): 2062 case IP_VERSION(9, 4, 2): 2063 adev->family = AMDGPU_FAMILY_AI; 2064 break; 2065 case IP_VERSION(9, 1, 0): 2066 case IP_VERSION(9, 2, 2): 2067 case IP_VERSION(9, 3, 0): 2068 adev->family = AMDGPU_FAMILY_RV; 2069 break; 2070 case IP_VERSION(10, 1, 10): 2071 case IP_VERSION(10, 1, 1): 2072 case IP_VERSION(10, 1, 2): 2073 case IP_VERSION(10, 1, 3): 2074 case IP_VERSION(10, 1, 4): 2075 case IP_VERSION(10, 3, 0): 2076 case IP_VERSION(10, 3, 2): 2077 case IP_VERSION(10, 3, 4): 2078 case IP_VERSION(10, 3, 5): 2079 adev->family = AMDGPU_FAMILY_NV; 2080 break; 2081 case IP_VERSION(10, 3, 1): 2082 adev->family = AMDGPU_FAMILY_VGH; 2083 break; 2084 case IP_VERSION(10, 3, 3): 2085 adev->family = AMDGPU_FAMILY_YC; 2086 break; 2087 case IP_VERSION(10, 3, 6): 2088 adev->family = AMDGPU_FAMILY_GC_10_3_6; 2089 break; 2090 case IP_VERSION(10, 3, 7): 2091 adev->family = AMDGPU_FAMILY_GC_10_3_7; 2092 break; 2093 case IP_VERSION(11, 0, 0): 2094 adev->family = AMDGPU_FAMILY_GC_11_0_0; 2095 break; 2096 default: 2097 return -EINVAL; 2098 } 2099 2100 switch (adev->ip_versions[GC_HWIP][0]) { 2101 case IP_VERSION(9, 1, 0): 2102 case IP_VERSION(9, 2, 2): 2103 case IP_VERSION(9, 3, 0): 2104 case IP_VERSION(10, 1, 3): 2105 case IP_VERSION(10, 1, 4): 2106 case IP_VERSION(10, 3, 1): 2107 case IP_VERSION(10, 3, 3): 2108 case IP_VERSION(10, 3, 6): 2109 case IP_VERSION(10, 3, 7): 2110 adev->flags |= AMD_IS_APU; 2111 break; 2112 default: 2113 break; 2114 } 2115 2116 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0)) 2117 adev->gmc.xgmi.supported = true; 2118 2119 /* set NBIO version */ 2120 switch (adev->ip_versions[NBIO_HWIP][0]) { 2121 case IP_VERSION(6, 1, 0): 2122 case IP_VERSION(6, 2, 0): 2123 adev->nbio.funcs = &nbio_v6_1_funcs; 2124 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 2125 break; 2126 case IP_VERSION(7, 0, 0): 2127 case IP_VERSION(7, 0, 1): 2128 case IP_VERSION(2, 5, 0): 2129 adev->nbio.funcs = &nbio_v7_0_funcs; 2130 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 2131 break; 2132 case IP_VERSION(7, 4, 0): 2133 case IP_VERSION(7, 4, 1): 2134 adev->nbio.funcs = &nbio_v7_4_funcs; 2135 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 2136 break; 2137 case IP_VERSION(7, 4, 4): 2138 adev->nbio.funcs = &nbio_v7_4_funcs; 2139 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg_ald; 2140 break; 2141 case IP_VERSION(7, 2, 0): 2142 case IP_VERSION(7, 2, 1): 2143 case IP_VERSION(7, 3, 0): 2144 case IP_VERSION(7, 5, 0): 2145 case IP_VERSION(7, 5, 1): 2146 adev->nbio.funcs = &nbio_v7_2_funcs; 2147 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; 2148 break; 2149 case IP_VERSION(2, 1, 1): 2150 case IP_VERSION(2, 3, 0): 2151 case IP_VERSION(2, 3, 1): 2152 case IP_VERSION(2, 3, 2): 2153 adev->nbio.funcs = &nbio_v2_3_funcs; 2154 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 2155 break; 2156 case IP_VERSION(3, 3, 0): 2157 case IP_VERSION(3, 3, 1): 2158 case IP_VERSION(3, 3, 2): 2159 case IP_VERSION(3, 3, 3): 2160 adev->nbio.funcs = &nbio_v2_3_funcs; 2161 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg_sc; 2162 break; 2163 case IP_VERSION(4, 3, 0): 2164 adev->nbio.funcs = &nbio_v4_3_funcs; 2165 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg; 2166 break; 2167 default: 2168 break; 2169 } 2170 2171 switch (adev->ip_versions[HDP_HWIP][0]) { 2172 case IP_VERSION(4, 0, 0): 2173 case IP_VERSION(4, 0, 1): 2174 case IP_VERSION(4, 1, 0): 2175 case IP_VERSION(4, 1, 1): 2176 case IP_VERSION(4, 1, 2): 2177 case IP_VERSION(4, 2, 0): 2178 case IP_VERSION(4, 2, 1): 2179 case IP_VERSION(4, 4, 0): 2180 adev->hdp.funcs = &hdp_v4_0_funcs; 2181 break; 2182 case IP_VERSION(5, 0, 0): 2183 case IP_VERSION(5, 0, 1): 2184 case IP_VERSION(5, 0, 2): 2185 case IP_VERSION(5, 0, 3): 2186 case IP_VERSION(5, 0, 4): 2187 case IP_VERSION(5, 2, 0): 2188 adev->hdp.funcs = &hdp_v5_0_funcs; 2189 break; 2190 case IP_VERSION(6, 0, 0): 2191 adev->hdp.funcs = &hdp_v6_0_funcs; 2192 break; 2193 default: 2194 break; 2195 } 2196 2197 switch (adev->ip_versions[DF_HWIP][0]) { 2198 case IP_VERSION(3, 6, 0): 2199 case IP_VERSION(3, 6, 1): 2200 case IP_VERSION(3, 6, 2): 2201 adev->df.funcs = &df_v3_6_funcs; 2202 break; 2203 case IP_VERSION(2, 1, 0): 2204 case IP_VERSION(2, 1, 1): 2205 case IP_VERSION(2, 5, 0): 2206 case IP_VERSION(3, 5, 1): 2207 case IP_VERSION(3, 5, 2): 2208 adev->df.funcs = &df_v1_7_funcs; 2209 break; 2210 default: 2211 break; 2212 } 2213 2214 switch (adev->ip_versions[SMUIO_HWIP][0]) { 2215 case IP_VERSION(9, 0, 0): 2216 case IP_VERSION(9, 0, 1): 2217 case IP_VERSION(10, 0, 0): 2218 case IP_VERSION(10, 0, 1): 2219 case IP_VERSION(10, 0, 2): 2220 adev->smuio.funcs = &smuio_v9_0_funcs; 2221 break; 2222 case IP_VERSION(11, 0, 0): 2223 case IP_VERSION(11, 0, 2): 2224 case IP_VERSION(11, 0, 3): 2225 case IP_VERSION(11, 0, 4): 2226 case IP_VERSION(11, 0, 7): 2227 case IP_VERSION(11, 0, 8): 2228 adev->smuio.funcs = &smuio_v11_0_funcs; 2229 break; 2230 case IP_VERSION(11, 0, 6): 2231 case IP_VERSION(11, 0, 10): 2232 case IP_VERSION(11, 0, 11): 2233 case IP_VERSION(11, 5, 0): 2234 case IP_VERSION(13, 0, 1): 2235 case IP_VERSION(13, 0, 9): 2236 case IP_VERSION(13, 0, 10): 2237 adev->smuio.funcs = &smuio_v11_0_6_funcs; 2238 break; 2239 case IP_VERSION(13, 0, 2): 2240 adev->smuio.funcs = &smuio_v13_0_funcs; 2241 break; 2242 case IP_VERSION(13, 0, 6): 2243 adev->smuio.funcs = &smuio_v13_0_6_funcs; 2244 break; 2245 default: 2246 break; 2247 } 2248 2249 r = amdgpu_discovery_set_common_ip_blocks(adev); 2250 if (r) 2251 return r; 2252 2253 r = amdgpu_discovery_set_gmc_ip_blocks(adev); 2254 if (r) 2255 return r; 2256 2257 /* For SR-IOV, PSP needs to be initialized before IH */ 2258 if (amdgpu_sriov_vf(adev)) { 2259 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2260 if (r) 2261 return r; 2262 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2263 if (r) 2264 return r; 2265 } else { 2266 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2267 if (r) 2268 return r; 2269 2270 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2271 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2272 if (r) 2273 return r; 2274 } 2275 } 2276 2277 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2278 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2279 if (r) 2280 return r; 2281 } 2282 2283 r = amdgpu_discovery_set_display_ip_blocks(adev); 2284 if (r) 2285 return r; 2286 2287 r = amdgpu_discovery_set_gc_ip_blocks(adev); 2288 if (r) 2289 return r; 2290 2291 r = amdgpu_discovery_set_sdma_ip_blocks(adev); 2292 if (r) 2293 return r; 2294 2295 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 2296 !amdgpu_sriov_vf(adev)) { 2297 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2298 if (r) 2299 return r; 2300 } 2301 2302 r = amdgpu_discovery_set_mm_ip_blocks(adev); 2303 if (r) 2304 return r; 2305 2306 if (adev->enable_mes) { 2307 r = amdgpu_discovery_set_mes_ip_blocks(adev); 2308 if (r) 2309 return r; 2310 } 2311 2312 return 0; 2313 } 2314 2315