1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_discovery.h" 28 #include "soc15_hw_ip.h" 29 #include "discovery.h" 30 31 #include "soc15.h" 32 #include "gfx_v9_0.h" 33 #include "gmc_v9_0.h" 34 #include "df_v1_7.h" 35 #include "df_v3_6.h" 36 #include "nbio_v6_1.h" 37 #include "nbio_v7_0.h" 38 #include "nbio_v7_4.h" 39 #include "hdp_v4_0.h" 40 #include "vega10_ih.h" 41 #include "vega20_ih.h" 42 #include "sdma_v4_0.h" 43 #include "uvd_v7_0.h" 44 #include "vce_v4_0.h" 45 #include "vcn_v1_0.h" 46 #include "vcn_v2_5.h" 47 #include "jpeg_v2_5.h" 48 #include "smuio_v9_0.h" 49 #include "gmc_v10_0.h" 50 #include "gmc_v11_0.h" 51 #include "gfxhub_v2_0.h" 52 #include "mmhub_v2_0.h" 53 #include "nbio_v2_3.h" 54 #include "nbio_v4_3.h" 55 #include "nbio_v7_2.h" 56 #include "nbio_v7_7.h" 57 #include "hdp_v5_0.h" 58 #include "hdp_v5_2.h" 59 #include "hdp_v6_0.h" 60 #include "nv.h" 61 #include "soc21.h" 62 #include "navi10_ih.h" 63 #include "ih_v6_0.h" 64 #include "gfx_v10_0.h" 65 #include "gfx_v11_0.h" 66 #include "sdma_v5_0.h" 67 #include "sdma_v5_2.h" 68 #include "sdma_v6_0.h" 69 #include "lsdma_v6_0.h" 70 #include "vcn_v2_0.h" 71 #include "jpeg_v2_0.h" 72 #include "vcn_v3_0.h" 73 #include "jpeg_v3_0.h" 74 #include "vcn_v4_0.h" 75 #include "jpeg_v4_0.h" 76 #include "amdgpu_vkms.h" 77 #include "mes_v10_1.h" 78 #include "mes_v11_0.h" 79 #include "smuio_v11_0.h" 80 #include "smuio_v11_0_6.h" 81 #include "smuio_v13_0.h" 82 #include "smuio_v13_0_6.h" 83 84 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" 85 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); 86 87 #define mmRCC_CONFIG_MEMSIZE 0xde3 88 #define mmMM_INDEX 0x0 89 #define mmMM_INDEX_HI 0x6 90 #define mmMM_DATA 0x1 91 92 static const char *hw_id_names[HW_ID_MAX] = { 93 [MP1_HWID] = "MP1", 94 [MP2_HWID] = "MP2", 95 [THM_HWID] = "THM", 96 [SMUIO_HWID] = "SMUIO", 97 [FUSE_HWID] = "FUSE", 98 [CLKA_HWID] = "CLKA", 99 [PWR_HWID] = "PWR", 100 [GC_HWID] = "GC", 101 [UVD_HWID] = "UVD", 102 [AUDIO_AZ_HWID] = "AUDIO_AZ", 103 [ACP_HWID] = "ACP", 104 [DCI_HWID] = "DCI", 105 [DMU_HWID] = "DMU", 106 [DCO_HWID] = "DCO", 107 [DIO_HWID] = "DIO", 108 [XDMA_HWID] = "XDMA", 109 [DCEAZ_HWID] = "DCEAZ", 110 [DAZ_HWID] = "DAZ", 111 [SDPMUX_HWID] = "SDPMUX", 112 [NTB_HWID] = "NTB", 113 [IOHC_HWID] = "IOHC", 114 [L2IMU_HWID] = "L2IMU", 115 [VCE_HWID] = "VCE", 116 [MMHUB_HWID] = "MMHUB", 117 [ATHUB_HWID] = "ATHUB", 118 [DBGU_NBIO_HWID] = "DBGU_NBIO", 119 [DFX_HWID] = "DFX", 120 [DBGU0_HWID] = "DBGU0", 121 [DBGU1_HWID] = "DBGU1", 122 [OSSSYS_HWID] = "OSSSYS", 123 [HDP_HWID] = "HDP", 124 [SDMA0_HWID] = "SDMA0", 125 [SDMA1_HWID] = "SDMA1", 126 [SDMA2_HWID] = "SDMA2", 127 [SDMA3_HWID] = "SDMA3", 128 [LSDMA_HWID] = "LSDMA", 129 [ISP_HWID] = "ISP", 130 [DBGU_IO_HWID] = "DBGU_IO", 131 [DF_HWID] = "DF", 132 [CLKB_HWID] = "CLKB", 133 [FCH_HWID] = "FCH", 134 [DFX_DAP_HWID] = "DFX_DAP", 135 [L1IMU_PCIE_HWID] = "L1IMU_PCIE", 136 [L1IMU_NBIF_HWID] = "L1IMU_NBIF", 137 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR", 138 [L1IMU3_HWID] = "L1IMU3", 139 [L1IMU4_HWID] = "L1IMU4", 140 [L1IMU5_HWID] = "L1IMU5", 141 [L1IMU6_HWID] = "L1IMU6", 142 [L1IMU7_HWID] = "L1IMU7", 143 [L1IMU8_HWID] = "L1IMU8", 144 [L1IMU9_HWID] = "L1IMU9", 145 [L1IMU10_HWID] = "L1IMU10", 146 [L1IMU11_HWID] = "L1IMU11", 147 [L1IMU12_HWID] = "L1IMU12", 148 [L1IMU13_HWID] = "L1IMU13", 149 [L1IMU14_HWID] = "L1IMU14", 150 [L1IMU15_HWID] = "L1IMU15", 151 [WAFLC_HWID] = "WAFLC", 152 [FCH_USB_PD_HWID] = "FCH_USB_PD", 153 [PCIE_HWID] = "PCIE", 154 [PCS_HWID] = "PCS", 155 [DDCL_HWID] = "DDCL", 156 [SST_HWID] = "SST", 157 [IOAGR_HWID] = "IOAGR", 158 [NBIF_HWID] = "NBIF", 159 [IOAPIC_HWID] = "IOAPIC", 160 [SYSTEMHUB_HWID] = "SYSTEMHUB", 161 [NTBCCP_HWID] = "NTBCCP", 162 [UMC_HWID] = "UMC", 163 [SATA_HWID] = "SATA", 164 [USB_HWID] = "USB", 165 [CCXSEC_HWID] = "CCXSEC", 166 [XGMI_HWID] = "XGMI", 167 [XGBE_HWID] = "XGBE", 168 [MP0_HWID] = "MP0", 169 }; 170 171 static int hw_id_map[MAX_HWIP] = { 172 [GC_HWIP] = GC_HWID, 173 [HDP_HWIP] = HDP_HWID, 174 [SDMA0_HWIP] = SDMA0_HWID, 175 [SDMA1_HWIP] = SDMA1_HWID, 176 [SDMA2_HWIP] = SDMA2_HWID, 177 [SDMA3_HWIP] = SDMA3_HWID, 178 [LSDMA_HWIP] = LSDMA_HWID, 179 [MMHUB_HWIP] = MMHUB_HWID, 180 [ATHUB_HWIP] = ATHUB_HWID, 181 [NBIO_HWIP] = NBIF_HWID, 182 [MP0_HWIP] = MP0_HWID, 183 [MP1_HWIP] = MP1_HWID, 184 [UVD_HWIP] = UVD_HWID, 185 [VCE_HWIP] = VCE_HWID, 186 [DF_HWIP] = DF_HWID, 187 [DCE_HWIP] = DMU_HWID, 188 [OSSSYS_HWIP] = OSSSYS_HWID, 189 [SMUIO_HWIP] = SMUIO_HWID, 190 [PWR_HWIP] = PWR_HWID, 191 [NBIF_HWIP] = NBIF_HWID, 192 [THM_HWIP] = THM_HWID, 193 [CLK_HWIP] = CLKA_HWID, 194 [UMC_HWIP] = UMC_HWID, 195 [XGMI_HWIP] = XGMI_HWID, 196 [DCI_HWIP] = DCI_HWID, 197 [PCIE_HWIP] = PCIE_HWID, 198 }; 199 200 static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary) 201 { 202 uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 203 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; 204 205 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, 206 adev->mman.discovery_tmr_size, false); 207 return 0; 208 } 209 210 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary) 211 { 212 const struct firmware *fw; 213 const char *fw_name; 214 int r; 215 216 switch (amdgpu_discovery) { 217 case 2: 218 fw_name = FIRMWARE_IP_DISCOVERY; 219 break; 220 default: 221 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n"); 222 return -EINVAL; 223 } 224 225 r = request_firmware(&fw, fw_name, adev->dev); 226 if (r) { 227 dev_err(adev->dev, "can't load firmware \"%s\"\n", 228 fw_name); 229 return r; 230 } 231 232 memcpy((u8 *)binary, (u8 *)fw->data, fw->size); 233 release_firmware(fw); 234 235 return 0; 236 } 237 238 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) 239 { 240 uint16_t checksum = 0; 241 int i; 242 243 for (i = 0; i < size; i++) 244 checksum += data[i]; 245 246 return checksum; 247 } 248 249 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size, 250 uint16_t expected) 251 { 252 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); 253 } 254 255 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) 256 { 257 struct binary_header *bhdr; 258 bhdr = (struct binary_header *)binary; 259 260 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); 261 } 262 263 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) 264 { 265 /* 266 * So far, apply this quirk only on those Navy Flounder boards which 267 * have a bad harvest table of VCN config. 268 */ 269 if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && 270 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) { 271 switch (adev->pdev->revision) { 272 case 0xC1: 273 case 0xC2: 274 case 0xC3: 275 case 0xC5: 276 case 0xC7: 277 case 0xCF: 278 case 0xDF: 279 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 280 break; 281 default: 282 break; 283 } 284 } 285 } 286 287 static int amdgpu_discovery_init(struct amdgpu_device *adev) 288 { 289 struct table_info *info; 290 struct binary_header *bhdr; 291 uint16_t offset; 292 uint16_t size; 293 uint16_t checksum; 294 int r; 295 296 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE; 297 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL); 298 if (!adev->mman.discovery_bin) 299 return -ENOMEM; 300 301 r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin); 302 if (r) { 303 dev_err(adev->dev, "failed to read ip discovery binary from vram\n"); 304 r = -EINVAL; 305 goto out; 306 } 307 308 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin) || amdgpu_discovery == 2) { 309 /* ignore the discovery binary from vram if discovery=2 in kernel module parameter */ 310 if (amdgpu_discovery == 2) 311 dev_info(adev->dev,"force read ip discovery binary from file"); 312 else 313 dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n"); 314 315 /* retry read ip discovery binary from file */ 316 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin); 317 if (r) { 318 dev_err(adev->dev, "failed to read ip discovery binary from file\n"); 319 r = -EINVAL; 320 goto out; 321 } 322 /* check the ip discovery binary signature */ 323 if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { 324 dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n"); 325 r = -EINVAL; 326 goto out; 327 } 328 } 329 330 bhdr = (struct binary_header *)adev->mman.discovery_bin; 331 332 offset = offsetof(struct binary_header, binary_checksum) + 333 sizeof(bhdr->binary_checksum); 334 size = le16_to_cpu(bhdr->binary_size) - offset; 335 checksum = le16_to_cpu(bhdr->binary_checksum); 336 337 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 338 size, checksum)) { 339 dev_err(adev->dev, "invalid ip discovery binary checksum\n"); 340 r = -EINVAL; 341 goto out; 342 } 343 344 info = &bhdr->table_list[IP_DISCOVERY]; 345 offset = le16_to_cpu(info->offset); 346 checksum = le16_to_cpu(info->checksum); 347 348 if (offset) { 349 struct ip_discovery_header *ihdr = 350 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); 351 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { 352 dev_err(adev->dev, "invalid ip discovery data table signature\n"); 353 r = -EINVAL; 354 goto out; 355 } 356 357 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 358 le16_to_cpu(ihdr->size), checksum)) { 359 dev_err(adev->dev, "invalid ip discovery data table checksum\n"); 360 r = -EINVAL; 361 goto out; 362 } 363 } 364 365 info = &bhdr->table_list[GC]; 366 offset = le16_to_cpu(info->offset); 367 checksum = le16_to_cpu(info->checksum); 368 369 if (offset) { 370 struct gpu_info_header *ghdr = 371 (struct gpu_info_header *)(adev->mman.discovery_bin + offset); 372 373 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { 374 dev_err(adev->dev, "invalid ip discovery gc table id\n"); 375 r = -EINVAL; 376 goto out; 377 } 378 379 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 380 le32_to_cpu(ghdr->size), checksum)) { 381 dev_err(adev->dev, "invalid gc data table checksum\n"); 382 r = -EINVAL; 383 goto out; 384 } 385 } 386 387 info = &bhdr->table_list[HARVEST_INFO]; 388 offset = le16_to_cpu(info->offset); 389 checksum = le16_to_cpu(info->checksum); 390 391 if (offset) { 392 struct harvest_info_header *hhdr = 393 (struct harvest_info_header *)(adev->mman.discovery_bin + offset); 394 395 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { 396 dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); 397 r = -EINVAL; 398 goto out; 399 } 400 401 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 402 sizeof(struct harvest_table), checksum)) { 403 dev_err(adev->dev, "invalid harvest data table checksum\n"); 404 r = -EINVAL; 405 goto out; 406 } 407 } 408 409 info = &bhdr->table_list[VCN_INFO]; 410 offset = le16_to_cpu(info->offset); 411 checksum = le16_to_cpu(info->checksum); 412 413 if (offset) { 414 struct vcn_info_header *vhdr = 415 (struct vcn_info_header *)(adev->mman.discovery_bin + offset); 416 417 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { 418 dev_err(adev->dev, "invalid ip discovery vcn table id\n"); 419 r = -EINVAL; 420 goto out; 421 } 422 423 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 424 le32_to_cpu(vhdr->size_bytes), checksum)) { 425 dev_err(adev->dev, "invalid vcn data table checksum\n"); 426 r = -EINVAL; 427 goto out; 428 } 429 } 430 431 info = &bhdr->table_list[MALL_INFO]; 432 offset = le16_to_cpu(info->offset); 433 checksum = le16_to_cpu(info->checksum); 434 435 if (0 && offset) { 436 struct mall_info_header *mhdr = 437 (struct mall_info_header *)(adev->mman.discovery_bin + offset); 438 439 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { 440 dev_err(adev->dev, "invalid ip discovery mall table id\n"); 441 r = -EINVAL; 442 goto out; 443 } 444 445 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 446 le32_to_cpu(mhdr->size_bytes), checksum)) { 447 dev_err(adev->dev, "invalid mall data table checksum\n"); 448 r = -EINVAL; 449 goto out; 450 } 451 } 452 453 return 0; 454 455 out: 456 kfree(adev->mman.discovery_bin); 457 adev->mman.discovery_bin = NULL; 458 459 return r; 460 } 461 462 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); 463 464 void amdgpu_discovery_fini(struct amdgpu_device *adev) 465 { 466 amdgpu_discovery_sysfs_fini(adev); 467 kfree(adev->mman.discovery_bin); 468 adev->mman.discovery_bin = NULL; 469 } 470 471 static int amdgpu_discovery_validate_ip(const struct ip *ip) 472 { 473 if (ip->number_instance >= HWIP_MAX_INSTANCE) { 474 DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n", 475 ip->number_instance); 476 return -EINVAL; 477 } 478 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) { 479 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n", 480 le16_to_cpu(ip->hw_id)); 481 return -EINVAL; 482 } 483 484 return 0; 485 } 486 487 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, 488 uint32_t *vcn_harvest_count) 489 { 490 struct binary_header *bhdr; 491 struct ip_discovery_header *ihdr; 492 struct die_header *dhdr; 493 struct ip *ip; 494 uint16_t die_offset, ip_offset, num_dies, num_ips; 495 int i, j; 496 497 bhdr = (struct binary_header *)adev->mman.discovery_bin; 498 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 499 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 500 num_dies = le16_to_cpu(ihdr->num_dies); 501 502 /* scan harvest bit of all IP data structures */ 503 for (i = 0; i < num_dies; i++) { 504 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 505 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 506 num_ips = le16_to_cpu(dhdr->num_ips); 507 ip_offset = die_offset + sizeof(*dhdr); 508 509 for (j = 0; j < num_ips; j++) { 510 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 511 512 if (amdgpu_discovery_validate_ip(ip)) 513 goto next_ip; 514 515 if (le16_to_cpu(ip->harvest) == 1) { 516 switch (le16_to_cpu(ip->hw_id)) { 517 case VCN_HWID: 518 (*vcn_harvest_count)++; 519 if (ip->number_instance == 0) 520 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 521 else 522 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 523 break; 524 case DMU_HWID: 525 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 526 break; 527 default: 528 break; 529 } 530 } 531 next_ip: 532 ip_offset += struct_size(ip, base_address, ip->num_base_address); 533 } 534 } 535 } 536 537 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, 538 uint32_t *vcn_harvest_count, 539 uint32_t *umc_harvest_count) 540 { 541 struct binary_header *bhdr; 542 struct harvest_table *harvest_info; 543 u16 offset; 544 int i; 545 546 bhdr = (struct binary_header *)adev->mman.discovery_bin; 547 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); 548 549 if (!offset) { 550 dev_err(adev->dev, "invalid harvest table offset\n"); 551 return; 552 } 553 554 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset); 555 556 for (i = 0; i < 32; i++) { 557 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) 558 break; 559 560 switch (le16_to_cpu(harvest_info->list[i].hw_id)) { 561 case VCN_HWID: 562 (*vcn_harvest_count)++; 563 if (harvest_info->list[i].number_instance == 0) 564 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 565 else 566 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 567 break; 568 case DMU_HWID: 569 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 570 break; 571 case UMC_HWID: 572 (*umc_harvest_count)++; 573 break; 574 default: 575 break; 576 } 577 } 578 } 579 580 /* ================================================== */ 581 582 struct ip_hw_instance { 583 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */ 584 585 int hw_id; 586 u8 num_instance; 587 u8 major, minor, revision; 588 u8 harvest; 589 590 int num_base_addresses; 591 u32 base_addr[]; 592 }; 593 594 struct ip_hw_id { 595 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */ 596 int hw_id; 597 }; 598 599 struct ip_die_entry { 600 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */ 601 u16 num_ips; 602 }; 603 604 /* -------------------------------------------------- */ 605 606 struct ip_hw_instance_attr { 607 struct attribute attr; 608 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf); 609 }; 610 611 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf) 612 { 613 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id); 614 } 615 616 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf) 617 { 618 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance); 619 } 620 621 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf) 622 { 623 return sysfs_emit(buf, "%d\n", ip_hw_instance->major); 624 } 625 626 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf) 627 { 628 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor); 629 } 630 631 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf) 632 { 633 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision); 634 } 635 636 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf) 637 { 638 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest); 639 } 640 641 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf) 642 { 643 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses); 644 } 645 646 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf) 647 { 648 ssize_t res, at; 649 int ii; 650 651 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) { 652 /* Here we satisfy the condition that, at + size <= PAGE_SIZE. 653 */ 654 if (at + 12 > PAGE_SIZE) 655 break; 656 res = sysfs_emit_at(buf, at, "0x%08X\n", 657 ip_hw_instance->base_addr[ii]); 658 if (res <= 0) 659 break; 660 at += res; 661 } 662 663 return res < 0 ? res : at; 664 } 665 666 static struct ip_hw_instance_attr ip_hw_attr[] = { 667 __ATTR_RO(hw_id), 668 __ATTR_RO(num_instance), 669 __ATTR_RO(major), 670 __ATTR_RO(minor), 671 __ATTR_RO(revision), 672 __ATTR_RO(harvest), 673 __ATTR_RO(num_base_addresses), 674 __ATTR_RO(base_addr), 675 }; 676 677 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1]; 678 ATTRIBUTE_GROUPS(ip_hw_instance); 679 680 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj) 681 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr) 682 683 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj, 684 struct attribute *attr, 685 char *buf) 686 { 687 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 688 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr); 689 690 if (!ip_hw_attr->show) 691 return -EIO; 692 693 return ip_hw_attr->show(ip_hw_instance, buf); 694 } 695 696 static const struct sysfs_ops ip_hw_instance_sysfs_ops = { 697 .show = ip_hw_instance_attr_show, 698 }; 699 700 static void ip_hw_instance_release(struct kobject *kobj) 701 { 702 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 703 704 kfree(ip_hw_instance); 705 } 706 707 static struct kobj_type ip_hw_instance_ktype = { 708 .release = ip_hw_instance_release, 709 .sysfs_ops = &ip_hw_instance_sysfs_ops, 710 .default_groups = ip_hw_instance_groups, 711 }; 712 713 /* -------------------------------------------------- */ 714 715 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset) 716 717 static void ip_hw_id_release(struct kobject *kobj) 718 { 719 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj); 720 721 if (!list_empty(&ip_hw_id->hw_id_kset.list)) 722 DRM_ERROR("ip_hw_id->hw_id_kset is not empty"); 723 kfree(ip_hw_id); 724 } 725 726 static struct kobj_type ip_hw_id_ktype = { 727 .release = ip_hw_id_release, 728 .sysfs_ops = &kobj_sysfs_ops, 729 }; 730 731 /* -------------------------------------------------- */ 732 733 static void die_kobj_release(struct kobject *kobj); 734 static void ip_disc_release(struct kobject *kobj); 735 736 struct ip_die_entry_attribute { 737 struct attribute attr; 738 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf); 739 }; 740 741 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr) 742 743 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf) 744 { 745 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips); 746 } 747 748 /* If there are more ip_die_entry attrs, other than the number of IPs, 749 * we can make this intro an array of attrs, and then initialize 750 * ip_die_entry_attrs in a loop. 751 */ 752 static struct ip_die_entry_attribute num_ips_attr = 753 __ATTR_RO(num_ips); 754 755 static struct attribute *ip_die_entry_attrs[] = { 756 &num_ips_attr.attr, 757 NULL, 758 }; 759 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */ 760 761 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset) 762 763 static ssize_t ip_die_entry_attr_show(struct kobject *kobj, 764 struct attribute *attr, 765 char *buf) 766 { 767 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr); 768 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 769 770 if (!ip_die_entry_attr->show) 771 return -EIO; 772 773 return ip_die_entry_attr->show(ip_die_entry, buf); 774 } 775 776 static void ip_die_entry_release(struct kobject *kobj) 777 { 778 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 779 780 if (!list_empty(&ip_die_entry->ip_kset.list)) 781 DRM_ERROR("ip_die_entry->ip_kset is not empty"); 782 kfree(ip_die_entry); 783 } 784 785 static const struct sysfs_ops ip_die_entry_sysfs_ops = { 786 .show = ip_die_entry_attr_show, 787 }; 788 789 static struct kobj_type ip_die_entry_ktype = { 790 .release = ip_die_entry_release, 791 .sysfs_ops = &ip_die_entry_sysfs_ops, 792 .default_groups = ip_die_entry_groups, 793 }; 794 795 static struct kobj_type die_kobj_ktype = { 796 .release = die_kobj_release, 797 .sysfs_ops = &kobj_sysfs_ops, 798 }; 799 800 static struct kobj_type ip_discovery_ktype = { 801 .release = ip_disc_release, 802 .sysfs_ops = &kobj_sysfs_ops, 803 }; 804 805 struct ip_discovery_top { 806 struct kobject kobj; /* ip_discovery/ */ 807 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */ 808 struct amdgpu_device *adev; 809 }; 810 811 static void die_kobj_release(struct kobject *kobj) 812 { 813 struct ip_discovery_top *ip_top = container_of(to_kset(kobj), 814 struct ip_discovery_top, 815 die_kset); 816 if (!list_empty(&ip_top->die_kset.list)) 817 DRM_ERROR("ip_top->die_kset is not empty"); 818 } 819 820 static void ip_disc_release(struct kobject *kobj) 821 { 822 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top, 823 kobj); 824 struct amdgpu_device *adev = ip_top->adev; 825 826 adev->ip_top = NULL; 827 kfree(ip_top); 828 } 829 830 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, 831 struct ip_die_entry *ip_die_entry, 832 const size_t _ip_offset, const int num_ips) 833 { 834 int ii, jj, kk, res; 835 836 DRM_DEBUG("num_ips:%d", num_ips); 837 838 /* Find all IPs of a given HW ID, and add their instance to 839 * #die/#hw_id/#instance/<attributes> 840 */ 841 for (ii = 0; ii < HW_ID_MAX; ii++) { 842 struct ip_hw_id *ip_hw_id = NULL; 843 size_t ip_offset = _ip_offset; 844 845 for (jj = 0; jj < num_ips; jj++) { 846 struct ip *ip; 847 struct ip_hw_instance *ip_hw_instance; 848 849 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 850 if (amdgpu_discovery_validate_ip(ip) || 851 le16_to_cpu(ip->hw_id) != ii) 852 goto next_ip; 853 854 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset); 855 856 /* We have a hw_id match; register the hw 857 * block if not yet registered. 858 */ 859 if (!ip_hw_id) { 860 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL); 861 if (!ip_hw_id) 862 return -ENOMEM; 863 ip_hw_id->hw_id = ii; 864 865 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii); 866 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset; 867 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype; 868 res = kset_register(&ip_hw_id->hw_id_kset); 869 if (res) { 870 DRM_ERROR("Couldn't register ip_hw_id kset"); 871 kfree(ip_hw_id); 872 return res; 873 } 874 if (hw_id_names[ii]) { 875 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj, 876 &ip_hw_id->hw_id_kset.kobj, 877 hw_id_names[ii]); 878 if (res) { 879 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n", 880 hw_id_names[ii], 881 kobject_name(&ip_die_entry->ip_kset.kobj)); 882 } 883 } 884 } 885 886 /* Now register its instance. 887 */ 888 ip_hw_instance = kzalloc(struct_size(ip_hw_instance, 889 base_addr, 890 ip->num_base_address), 891 GFP_KERNEL); 892 if (!ip_hw_instance) { 893 DRM_ERROR("no memory for ip_hw_instance"); 894 return -ENOMEM; 895 } 896 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */ 897 ip_hw_instance->num_instance = ip->number_instance; 898 ip_hw_instance->major = ip->major; 899 ip_hw_instance->minor = ip->minor; 900 ip_hw_instance->revision = ip->revision; 901 ip_hw_instance->harvest = ip->harvest; 902 ip_hw_instance->num_base_addresses = ip->num_base_address; 903 904 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) 905 ip_hw_instance->base_addr[kk] = ip->base_address[kk]; 906 907 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype); 908 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset; 909 res = kobject_add(&ip_hw_instance->kobj, NULL, 910 "%d", ip_hw_instance->num_instance); 911 next_ip: 912 ip_offset += struct_size(ip, base_address, ip->num_base_address); 913 } 914 } 915 916 return 0; 917 } 918 919 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) 920 { 921 struct binary_header *bhdr; 922 struct ip_discovery_header *ihdr; 923 struct die_header *dhdr; 924 struct kset *die_kset = &adev->ip_top->die_kset; 925 u16 num_dies, die_offset, num_ips; 926 size_t ip_offset; 927 int ii, res; 928 929 bhdr = (struct binary_header *)adev->mman.discovery_bin; 930 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 931 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 932 num_dies = le16_to_cpu(ihdr->num_dies); 933 934 DRM_DEBUG("number of dies: %d\n", num_dies); 935 936 for (ii = 0; ii < num_dies; ii++) { 937 struct ip_die_entry *ip_die_entry; 938 939 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); 940 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 941 num_ips = le16_to_cpu(dhdr->num_ips); 942 ip_offset = die_offset + sizeof(*dhdr); 943 944 /* Add the die to the kset. 945 * 946 * dhdr->die_id == ii, which was checked in 947 * amdgpu_discovery_reg_base_init(). 948 */ 949 950 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL); 951 if (!ip_die_entry) 952 return -ENOMEM; 953 954 ip_die_entry->num_ips = num_ips; 955 956 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id)); 957 ip_die_entry->ip_kset.kobj.kset = die_kset; 958 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype; 959 res = kset_register(&ip_die_entry->ip_kset); 960 if (res) { 961 DRM_ERROR("Couldn't register ip_die_entry kset"); 962 kfree(ip_die_entry); 963 return res; 964 } 965 966 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips); 967 } 968 969 return 0; 970 } 971 972 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) 973 { 974 struct kset *die_kset; 975 int res, ii; 976 977 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL); 978 if (!adev->ip_top) 979 return -ENOMEM; 980 981 adev->ip_top->adev = adev; 982 983 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype, 984 &adev->dev->kobj, "ip_discovery"); 985 if (res) { 986 DRM_ERROR("Couldn't init and add ip_discovery/"); 987 goto Err; 988 } 989 990 die_kset = &adev->ip_top->die_kset; 991 kobject_set_name(&die_kset->kobj, "%s", "die"); 992 die_kset->kobj.parent = &adev->ip_top->kobj; 993 die_kset->kobj.ktype = &die_kobj_ktype; 994 res = kset_register(&adev->ip_top->die_kset); 995 if (res) { 996 DRM_ERROR("Couldn't register die_kset"); 997 goto Err; 998 } 999 1000 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++) 1001 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr; 1002 ip_hw_instance_attrs[ii] = NULL; 1003 1004 res = amdgpu_discovery_sysfs_recurse(adev); 1005 1006 return res; 1007 Err: 1008 kobject_put(&adev->ip_top->kobj); 1009 return res; 1010 } 1011 1012 /* -------------------------------------------------- */ 1013 1014 #define list_to_kobj(el) container_of(el, struct kobject, entry) 1015 1016 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id) 1017 { 1018 struct list_head *el, *tmp; 1019 struct kset *hw_id_kset; 1020 1021 hw_id_kset = &ip_hw_id->hw_id_kset; 1022 spin_lock(&hw_id_kset->list_lock); 1023 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) { 1024 list_del_init(el); 1025 spin_unlock(&hw_id_kset->list_lock); 1026 /* kobject is embedded in ip_hw_instance */ 1027 kobject_put(list_to_kobj(el)); 1028 spin_lock(&hw_id_kset->list_lock); 1029 } 1030 spin_unlock(&hw_id_kset->list_lock); 1031 kobject_put(&ip_hw_id->hw_id_kset.kobj); 1032 } 1033 1034 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) 1035 { 1036 struct list_head *el, *tmp; 1037 struct kset *ip_kset; 1038 1039 ip_kset = &ip_die_entry->ip_kset; 1040 spin_lock(&ip_kset->list_lock); 1041 list_for_each_prev_safe(el, tmp, &ip_kset->list) { 1042 list_del_init(el); 1043 spin_unlock(&ip_kset->list_lock); 1044 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el))); 1045 spin_lock(&ip_kset->list_lock); 1046 } 1047 spin_unlock(&ip_kset->list_lock); 1048 kobject_put(&ip_die_entry->ip_kset.kobj); 1049 } 1050 1051 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) 1052 { 1053 struct list_head *el, *tmp; 1054 struct kset *die_kset; 1055 1056 die_kset = &adev->ip_top->die_kset; 1057 spin_lock(&die_kset->list_lock); 1058 list_for_each_prev_safe(el, tmp, &die_kset->list) { 1059 list_del_init(el); 1060 spin_unlock(&die_kset->list_lock); 1061 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el))); 1062 spin_lock(&die_kset->list_lock); 1063 } 1064 spin_unlock(&die_kset->list_lock); 1065 kobject_put(&adev->ip_top->die_kset.kobj); 1066 kobject_put(&adev->ip_top->kobj); 1067 } 1068 1069 /* ================================================== */ 1070 1071 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) 1072 { 1073 struct binary_header *bhdr; 1074 struct ip_discovery_header *ihdr; 1075 struct die_header *dhdr; 1076 struct ip *ip; 1077 uint16_t die_offset; 1078 uint16_t ip_offset; 1079 uint16_t num_dies; 1080 uint16_t num_ips; 1081 uint8_t num_base_address; 1082 int hw_ip; 1083 int i, j, k; 1084 int r; 1085 1086 r = amdgpu_discovery_init(adev); 1087 if (r) { 1088 DRM_ERROR("amdgpu_discovery_init failed\n"); 1089 return r; 1090 } 1091 1092 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1093 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1094 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1095 num_dies = le16_to_cpu(ihdr->num_dies); 1096 1097 DRM_DEBUG("number of dies: %d\n", num_dies); 1098 1099 for (i = 0; i < num_dies; i++) { 1100 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1101 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1102 num_ips = le16_to_cpu(dhdr->num_ips); 1103 ip_offset = die_offset + sizeof(*dhdr); 1104 1105 if (le16_to_cpu(dhdr->die_id) != i) { 1106 DRM_ERROR("invalid die id %d, expected %d\n", 1107 le16_to_cpu(dhdr->die_id), i); 1108 return -EINVAL; 1109 } 1110 1111 DRM_DEBUG("number of hardware IPs on die%d: %d\n", 1112 le16_to_cpu(dhdr->die_id), num_ips); 1113 1114 for (j = 0; j < num_ips; j++) { 1115 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 1116 1117 if (amdgpu_discovery_validate_ip(ip)) 1118 goto next_ip; 1119 1120 num_base_address = ip->num_base_address; 1121 1122 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", 1123 hw_id_names[le16_to_cpu(ip->hw_id)], 1124 le16_to_cpu(ip->hw_id), 1125 ip->number_instance, 1126 ip->major, ip->minor, 1127 ip->revision); 1128 1129 if (le16_to_cpu(ip->hw_id) == VCN_HWID) { 1130 /* Bit [5:0]: original revision value 1131 * Bit [7:6]: en/decode capability: 1132 * 0b00 : VCN function normally 1133 * 0b10 : encode is disabled 1134 * 0b01 : decode is disabled 1135 */ 1136 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] = 1137 ip->revision & 0xc0; 1138 ip->revision &= ~0xc0; 1139 if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES) 1140 adev->vcn.num_vcn_inst++; 1141 else 1142 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n", 1143 adev->vcn.num_vcn_inst + 1, 1144 AMDGPU_MAX_VCN_INSTANCES); 1145 } 1146 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || 1147 le16_to_cpu(ip->hw_id) == SDMA1_HWID || 1148 le16_to_cpu(ip->hw_id) == SDMA2_HWID || 1149 le16_to_cpu(ip->hw_id) == SDMA3_HWID) { 1150 if (adev->sdma.num_instances < AMDGPU_MAX_SDMA_INSTANCES) 1151 adev->sdma.num_instances++; 1152 else 1153 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n", 1154 adev->sdma.num_instances + 1, 1155 AMDGPU_MAX_SDMA_INSTANCES); 1156 } 1157 1158 if (le16_to_cpu(ip->hw_id) == UMC_HWID) 1159 adev->gmc.num_umc++; 1160 1161 for (k = 0; k < num_base_address; k++) { 1162 /* 1163 * convert the endianness of base addresses in place, 1164 * so that we don't need to convert them when accessing adev->reg_offset. 1165 */ 1166 ip->base_address[k] = le32_to_cpu(ip->base_address[k]); 1167 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); 1168 } 1169 1170 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { 1171 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) { 1172 DRM_DEBUG("set register base offset for %s\n", 1173 hw_id_names[le16_to_cpu(ip->hw_id)]); 1174 adev->reg_offset[hw_ip][ip->number_instance] = 1175 ip->base_address; 1176 /* Instance support is somewhat inconsistent. 1177 * SDMA is a good example. Sienna cichlid has 4 total 1178 * SDMA instances, each enumerated separately (HWIDs 1179 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, 1180 * but they are enumerated as multiple instances of the 1181 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another 1182 * example. On most chips there are multiple instances 1183 * with the same HWID. 1184 */ 1185 adev->ip_versions[hw_ip][ip->number_instance] = 1186 IP_VERSION(ip->major, ip->minor, ip->revision); 1187 } 1188 } 1189 1190 next_ip: 1191 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1192 } 1193 } 1194 1195 amdgpu_discovery_sysfs_init(adev); 1196 1197 return 0; 1198 } 1199 1200 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, 1201 int *major, int *minor, int *revision) 1202 { 1203 struct binary_header *bhdr; 1204 struct ip_discovery_header *ihdr; 1205 struct die_header *dhdr; 1206 struct ip *ip; 1207 uint16_t die_offset; 1208 uint16_t ip_offset; 1209 uint16_t num_dies; 1210 uint16_t num_ips; 1211 int i, j; 1212 1213 if (!adev->mman.discovery_bin) { 1214 DRM_ERROR("ip discovery uninitialized\n"); 1215 return -EINVAL; 1216 } 1217 1218 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1219 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1220 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1221 num_dies = le16_to_cpu(ihdr->num_dies); 1222 1223 for (i = 0; i < num_dies; i++) { 1224 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1225 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1226 num_ips = le16_to_cpu(dhdr->num_ips); 1227 ip_offset = die_offset + sizeof(*dhdr); 1228 1229 for (j = 0; j < num_ips; j++) { 1230 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 1231 1232 if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) { 1233 if (major) 1234 *major = ip->major; 1235 if (minor) 1236 *minor = ip->minor; 1237 if (revision) 1238 *revision = ip->revision; 1239 return 0; 1240 } 1241 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1242 } 1243 } 1244 1245 return -EINVAL; 1246 } 1247 1248 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) 1249 { 1250 int vcn_harvest_count = 0; 1251 int umc_harvest_count = 0; 1252 1253 /* 1254 * Harvest table does not fit Navi1x and legacy GPUs, 1255 * so read harvest bit per IP data structure to set 1256 * harvest configuration. 1257 */ 1258 if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0)) { 1259 if ((adev->pdev->device == 0x731E && 1260 (adev->pdev->revision == 0xC6 || 1261 adev->pdev->revision == 0xC7)) || 1262 (adev->pdev->device == 0x7340 && 1263 adev->pdev->revision == 0xC9) || 1264 (adev->pdev->device == 0x7360 && 1265 adev->pdev->revision == 0xC7)) 1266 amdgpu_discovery_read_harvest_bit_per_ip(adev, 1267 &vcn_harvest_count); 1268 } else { 1269 amdgpu_discovery_read_from_harvest_table(adev, 1270 &vcn_harvest_count, 1271 &umc_harvest_count); 1272 } 1273 1274 amdgpu_discovery_harvest_config_quirk(adev); 1275 1276 if (vcn_harvest_count == adev->vcn.num_vcn_inst) { 1277 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 1278 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 1279 } 1280 1281 if (umc_harvest_count < adev->gmc.num_umc) { 1282 adev->gmc.num_umc -= umc_harvest_count; 1283 } 1284 } 1285 1286 union gc_info { 1287 struct gc_info_v1_0 v1; 1288 struct gc_info_v1_1 v1_1; 1289 struct gc_info_v1_2 v1_2; 1290 struct gc_info_v2_0 v2; 1291 }; 1292 1293 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) 1294 { 1295 struct binary_header *bhdr; 1296 union gc_info *gc_info; 1297 u16 offset; 1298 1299 if (!adev->mman.discovery_bin) { 1300 DRM_ERROR("ip discovery uninitialized\n"); 1301 return -EINVAL; 1302 } 1303 1304 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1305 offset = le16_to_cpu(bhdr->table_list[GC].offset); 1306 1307 if (!offset) 1308 return 0; 1309 1310 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset); 1311 1312 switch (le16_to_cpu(gc_info->v1.header.version_major)) { 1313 case 1: 1314 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); 1315 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + 1316 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); 1317 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1318 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); 1319 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); 1320 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); 1321 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); 1322 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); 1323 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); 1324 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); 1325 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); 1326 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); 1327 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); 1328 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); 1329 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / 1330 le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1331 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); 1332 if (gc_info->v1.header.version_minor >= 1) { 1333 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); 1334 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); 1335 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); 1336 } 1337 if (gc_info->v1.header.version_minor >= 2) { 1338 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); 1339 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); 1340 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); 1341 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc); 1342 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc); 1343 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa); 1344 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance); 1345 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu); 1346 } 1347 break; 1348 case 2: 1349 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); 1350 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); 1351 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1352 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); 1353 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); 1354 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); 1355 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); 1356 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); 1357 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); 1358 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); 1359 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); 1360 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); 1361 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); 1362 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); 1363 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / 1364 le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1365 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); 1366 break; 1367 default: 1368 dev_err(adev->dev, 1369 "Unhandled GC info table %d.%d\n", 1370 le16_to_cpu(gc_info->v1.header.version_major), 1371 le16_to_cpu(gc_info->v1.header.version_minor)); 1372 return -EINVAL; 1373 } 1374 return 0; 1375 } 1376 1377 union mall_info { 1378 struct mall_info_v1_0 v1; 1379 }; 1380 1381 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) 1382 { 1383 struct binary_header *bhdr; 1384 union mall_info *mall_info; 1385 u32 u, mall_size_per_umc, m_s_present, half_use; 1386 u64 mall_size; 1387 u16 offset; 1388 1389 if (!adev->mman.discovery_bin) { 1390 DRM_ERROR("ip discovery uninitialized\n"); 1391 return -EINVAL; 1392 } 1393 1394 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1395 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); 1396 1397 if (!offset) 1398 return 0; 1399 1400 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset); 1401 1402 switch (le16_to_cpu(mall_info->v1.header.version_major)) { 1403 case 1: 1404 mall_size = 0; 1405 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m); 1406 m_s_present = le32_to_cpu(mall_info->v1.m_s_present); 1407 half_use = le32_to_cpu(mall_info->v1.m_half_use); 1408 for (u = 0; u < adev->gmc.num_umc; u++) { 1409 if (m_s_present & (1 << u)) 1410 mall_size += mall_size_per_umc * 2; 1411 else if (half_use & (1 << u)) 1412 mall_size += mall_size_per_umc / 2; 1413 else 1414 mall_size += mall_size_per_umc; 1415 } 1416 adev->gmc.mall_size = mall_size; 1417 break; 1418 default: 1419 dev_err(adev->dev, 1420 "Unhandled MALL info table %d.%d\n", 1421 le16_to_cpu(mall_info->v1.header.version_major), 1422 le16_to_cpu(mall_info->v1.header.version_minor)); 1423 return -EINVAL; 1424 } 1425 return 0; 1426 } 1427 1428 union vcn_info { 1429 struct vcn_info_v1_0 v1; 1430 }; 1431 1432 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) 1433 { 1434 struct binary_header *bhdr; 1435 union vcn_info *vcn_info; 1436 u16 offset; 1437 int v; 1438 1439 if (!adev->mman.discovery_bin) { 1440 DRM_ERROR("ip discovery uninitialized\n"); 1441 return -EINVAL; 1442 } 1443 1444 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1445 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES 1446 * but that may change in the future with new GPUs so keep this 1447 * check for defensive purposes. 1448 */ 1449 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) { 1450 dev_err(adev->dev, "invalid vcn instances\n"); 1451 return -EINVAL; 1452 } 1453 1454 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1455 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); 1456 1457 if (!offset) 1458 return 0; 1459 1460 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset); 1461 1462 switch (le16_to_cpu(vcn_info->v1.header.version_major)) { 1463 case 1: 1464 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1465 * so this won't overflow. 1466 */ 1467 for (v = 0; v < adev->vcn.num_vcn_inst; v++) { 1468 adev->vcn.vcn_codec_disable_mask[v] = 1469 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits); 1470 } 1471 break; 1472 default: 1473 dev_err(adev->dev, 1474 "Unhandled VCN info table %d.%d\n", 1475 le16_to_cpu(vcn_info->v1.header.version_major), 1476 le16_to_cpu(vcn_info->v1.header.version_minor)); 1477 return -EINVAL; 1478 } 1479 return 0; 1480 } 1481 1482 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 1483 { 1484 /* what IP to use for this? */ 1485 switch (adev->ip_versions[GC_HWIP][0]) { 1486 case IP_VERSION(9, 0, 1): 1487 case IP_VERSION(9, 1, 0): 1488 case IP_VERSION(9, 2, 1): 1489 case IP_VERSION(9, 2, 2): 1490 case IP_VERSION(9, 3, 0): 1491 case IP_VERSION(9, 4, 0): 1492 case IP_VERSION(9, 4, 1): 1493 case IP_VERSION(9, 4, 2): 1494 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1495 break; 1496 case IP_VERSION(10, 1, 10): 1497 case IP_VERSION(10, 1, 1): 1498 case IP_VERSION(10, 1, 2): 1499 case IP_VERSION(10, 1, 3): 1500 case IP_VERSION(10, 1, 4): 1501 case IP_VERSION(10, 3, 0): 1502 case IP_VERSION(10, 3, 1): 1503 case IP_VERSION(10, 3, 2): 1504 case IP_VERSION(10, 3, 3): 1505 case IP_VERSION(10, 3, 4): 1506 case IP_VERSION(10, 3, 5): 1507 case IP_VERSION(10, 3, 6): 1508 case IP_VERSION(10, 3, 7): 1509 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 1510 break; 1511 case IP_VERSION(11, 0, 0): 1512 case IP_VERSION(11, 0, 1): 1513 case IP_VERSION(11, 0, 2): 1514 case IP_VERSION(11, 0, 3): 1515 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); 1516 break; 1517 default: 1518 dev_err(adev->dev, 1519 "Failed to add common ip block(GC_HWIP:0x%x)\n", 1520 adev->ip_versions[GC_HWIP][0]); 1521 return -EINVAL; 1522 } 1523 return 0; 1524 } 1525 1526 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 1527 { 1528 /* use GC or MMHUB IP version */ 1529 switch (adev->ip_versions[GC_HWIP][0]) { 1530 case IP_VERSION(9, 0, 1): 1531 case IP_VERSION(9, 1, 0): 1532 case IP_VERSION(9, 2, 1): 1533 case IP_VERSION(9, 2, 2): 1534 case IP_VERSION(9, 3, 0): 1535 case IP_VERSION(9, 4, 0): 1536 case IP_VERSION(9, 4, 1): 1537 case IP_VERSION(9, 4, 2): 1538 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 1539 break; 1540 case IP_VERSION(10, 1, 10): 1541 case IP_VERSION(10, 1, 1): 1542 case IP_VERSION(10, 1, 2): 1543 case IP_VERSION(10, 1, 3): 1544 case IP_VERSION(10, 1, 4): 1545 case IP_VERSION(10, 3, 0): 1546 case IP_VERSION(10, 3, 1): 1547 case IP_VERSION(10, 3, 2): 1548 case IP_VERSION(10, 3, 3): 1549 case IP_VERSION(10, 3, 4): 1550 case IP_VERSION(10, 3, 5): 1551 case IP_VERSION(10, 3, 6): 1552 case IP_VERSION(10, 3, 7): 1553 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 1554 break; 1555 case IP_VERSION(11, 0, 0): 1556 case IP_VERSION(11, 0, 1): 1557 case IP_VERSION(11, 0, 2): 1558 case IP_VERSION(11, 0, 3): 1559 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); 1560 break; 1561 default: 1562 dev_err(adev->dev, 1563 "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 1564 adev->ip_versions[GC_HWIP][0]); 1565 return -EINVAL; 1566 } 1567 return 0; 1568 } 1569 1570 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 1571 { 1572 switch (adev->ip_versions[OSSSYS_HWIP][0]) { 1573 case IP_VERSION(4, 0, 0): 1574 case IP_VERSION(4, 0, 1): 1575 case IP_VERSION(4, 1, 0): 1576 case IP_VERSION(4, 1, 1): 1577 case IP_VERSION(4, 3, 0): 1578 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 1579 break; 1580 case IP_VERSION(4, 2, 0): 1581 case IP_VERSION(4, 2, 1): 1582 case IP_VERSION(4, 4, 0): 1583 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 1584 break; 1585 case IP_VERSION(5, 0, 0): 1586 case IP_VERSION(5, 0, 1): 1587 case IP_VERSION(5, 0, 2): 1588 case IP_VERSION(5, 0, 3): 1589 case IP_VERSION(5, 2, 0): 1590 case IP_VERSION(5, 2, 1): 1591 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 1592 break; 1593 case IP_VERSION(6, 0, 0): 1594 case IP_VERSION(6, 0, 1): 1595 case IP_VERSION(6, 0, 2): 1596 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); 1597 break; 1598 default: 1599 dev_err(adev->dev, 1600 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 1601 adev->ip_versions[OSSSYS_HWIP][0]); 1602 return -EINVAL; 1603 } 1604 return 0; 1605 } 1606 1607 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 1608 { 1609 switch (adev->ip_versions[MP0_HWIP][0]) { 1610 case IP_VERSION(9, 0, 0): 1611 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 1612 break; 1613 case IP_VERSION(10, 0, 0): 1614 case IP_VERSION(10, 0, 1): 1615 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 1616 break; 1617 case IP_VERSION(11, 0, 0): 1618 case IP_VERSION(11, 0, 2): 1619 case IP_VERSION(11, 0, 4): 1620 case IP_VERSION(11, 0, 5): 1621 case IP_VERSION(11, 0, 9): 1622 case IP_VERSION(11, 0, 7): 1623 case IP_VERSION(11, 0, 11): 1624 case IP_VERSION(11, 0, 12): 1625 case IP_VERSION(11, 0, 13): 1626 case IP_VERSION(11, 5, 0): 1627 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 1628 break; 1629 case IP_VERSION(11, 0, 8): 1630 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); 1631 break; 1632 case IP_VERSION(11, 0, 3): 1633 case IP_VERSION(12, 0, 1): 1634 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 1635 break; 1636 case IP_VERSION(13, 0, 0): 1637 case IP_VERSION(13, 0, 1): 1638 case IP_VERSION(13, 0, 2): 1639 case IP_VERSION(13, 0, 3): 1640 case IP_VERSION(13, 0, 5): 1641 case IP_VERSION(13, 0, 7): 1642 case IP_VERSION(13, 0, 8): 1643 case IP_VERSION(13, 0, 10): 1644 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 1645 break; 1646 case IP_VERSION(13, 0, 4): 1647 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block); 1648 break; 1649 default: 1650 dev_err(adev->dev, 1651 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 1652 adev->ip_versions[MP0_HWIP][0]); 1653 return -EINVAL; 1654 } 1655 return 0; 1656 } 1657 1658 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 1659 { 1660 switch (adev->ip_versions[MP1_HWIP][0]) { 1661 case IP_VERSION(9, 0, 0): 1662 case IP_VERSION(10, 0, 0): 1663 case IP_VERSION(10, 0, 1): 1664 case IP_VERSION(11, 0, 2): 1665 if (adev->asic_type == CHIP_ARCTURUS) 1666 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1667 else 1668 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1669 break; 1670 case IP_VERSION(11, 0, 0): 1671 case IP_VERSION(11, 0, 5): 1672 case IP_VERSION(11, 0, 9): 1673 case IP_VERSION(11, 0, 7): 1674 case IP_VERSION(11, 0, 8): 1675 case IP_VERSION(11, 0, 11): 1676 case IP_VERSION(11, 0, 12): 1677 case IP_VERSION(11, 0, 13): 1678 case IP_VERSION(11, 5, 0): 1679 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1680 break; 1681 case IP_VERSION(12, 0, 0): 1682 case IP_VERSION(12, 0, 1): 1683 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 1684 break; 1685 case IP_VERSION(13, 0, 0): 1686 case IP_VERSION(13, 0, 1): 1687 case IP_VERSION(13, 0, 2): 1688 case IP_VERSION(13, 0, 3): 1689 case IP_VERSION(13, 0, 4): 1690 case IP_VERSION(13, 0, 5): 1691 case IP_VERSION(13, 0, 7): 1692 case IP_VERSION(13, 0, 8): 1693 case IP_VERSION(13, 0, 10): 1694 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 1695 break; 1696 default: 1697 dev_err(adev->dev, 1698 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 1699 adev->ip_versions[MP1_HWIP][0]); 1700 return -EINVAL; 1701 } 1702 return 0; 1703 } 1704 1705 #if defined(CONFIG_DRM_AMD_DC) 1706 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev) 1707 { 1708 amdgpu_device_set_sriov_virtual_display(adev); 1709 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 1710 } 1711 #endif 1712 1713 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) 1714 { 1715 if (adev->enable_virtual_display) { 1716 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 1717 return 0; 1718 } 1719 1720 if (!amdgpu_device_has_dc_support(adev)) 1721 return 0; 1722 1723 #if defined(CONFIG_DRM_AMD_DC) 1724 if (adev->ip_versions[DCE_HWIP][0]) { 1725 switch (adev->ip_versions[DCE_HWIP][0]) { 1726 case IP_VERSION(1, 0, 0): 1727 case IP_VERSION(1, 0, 1): 1728 case IP_VERSION(2, 0, 2): 1729 case IP_VERSION(2, 0, 0): 1730 case IP_VERSION(2, 0, 3): 1731 case IP_VERSION(2, 1, 0): 1732 case IP_VERSION(3, 0, 0): 1733 case IP_VERSION(3, 0, 2): 1734 case IP_VERSION(3, 0, 3): 1735 case IP_VERSION(3, 0, 1): 1736 case IP_VERSION(3, 1, 2): 1737 case IP_VERSION(3, 1, 3): 1738 case IP_VERSION(3, 1, 4): 1739 case IP_VERSION(3, 1, 5): 1740 case IP_VERSION(3, 1, 6): 1741 case IP_VERSION(3, 2, 0): 1742 case IP_VERSION(3, 2, 1): 1743 if (amdgpu_sriov_vf(adev)) 1744 amdgpu_discovery_set_sriov_display(adev); 1745 else 1746 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1747 break; 1748 default: 1749 dev_err(adev->dev, 1750 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 1751 adev->ip_versions[DCE_HWIP][0]); 1752 return -EINVAL; 1753 } 1754 } else if (adev->ip_versions[DCI_HWIP][0]) { 1755 switch (adev->ip_versions[DCI_HWIP][0]) { 1756 case IP_VERSION(12, 0, 0): 1757 case IP_VERSION(12, 0, 1): 1758 case IP_VERSION(12, 1, 0): 1759 if (amdgpu_sriov_vf(adev)) 1760 amdgpu_discovery_set_sriov_display(adev); 1761 else 1762 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1763 break; 1764 default: 1765 dev_err(adev->dev, 1766 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 1767 adev->ip_versions[DCI_HWIP][0]); 1768 return -EINVAL; 1769 } 1770 } 1771 #endif 1772 return 0; 1773 } 1774 1775 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 1776 { 1777 switch (adev->ip_versions[GC_HWIP][0]) { 1778 case IP_VERSION(9, 0, 1): 1779 case IP_VERSION(9, 1, 0): 1780 case IP_VERSION(9, 2, 1): 1781 case IP_VERSION(9, 2, 2): 1782 case IP_VERSION(9, 3, 0): 1783 case IP_VERSION(9, 4, 0): 1784 case IP_VERSION(9, 4, 1): 1785 case IP_VERSION(9, 4, 2): 1786 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 1787 break; 1788 case IP_VERSION(10, 1, 10): 1789 case IP_VERSION(10, 1, 2): 1790 case IP_VERSION(10, 1, 1): 1791 case IP_VERSION(10, 1, 3): 1792 case IP_VERSION(10, 1, 4): 1793 case IP_VERSION(10, 3, 0): 1794 case IP_VERSION(10, 3, 2): 1795 case IP_VERSION(10, 3, 1): 1796 case IP_VERSION(10, 3, 4): 1797 case IP_VERSION(10, 3, 5): 1798 case IP_VERSION(10, 3, 6): 1799 case IP_VERSION(10, 3, 3): 1800 case IP_VERSION(10, 3, 7): 1801 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 1802 break; 1803 case IP_VERSION(11, 0, 0): 1804 case IP_VERSION(11, 0, 1): 1805 case IP_VERSION(11, 0, 2): 1806 case IP_VERSION(11, 0, 3): 1807 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); 1808 break; 1809 default: 1810 dev_err(adev->dev, 1811 "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 1812 adev->ip_versions[GC_HWIP][0]); 1813 return -EINVAL; 1814 } 1815 return 0; 1816 } 1817 1818 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 1819 { 1820 switch (adev->ip_versions[SDMA0_HWIP][0]) { 1821 case IP_VERSION(4, 0, 0): 1822 case IP_VERSION(4, 0, 1): 1823 case IP_VERSION(4, 1, 0): 1824 case IP_VERSION(4, 1, 1): 1825 case IP_VERSION(4, 1, 2): 1826 case IP_VERSION(4, 2, 0): 1827 case IP_VERSION(4, 2, 2): 1828 case IP_VERSION(4, 4, 0): 1829 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 1830 break; 1831 case IP_VERSION(5, 0, 0): 1832 case IP_VERSION(5, 0, 1): 1833 case IP_VERSION(5, 0, 2): 1834 case IP_VERSION(5, 0, 5): 1835 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 1836 break; 1837 case IP_VERSION(5, 2, 0): 1838 case IP_VERSION(5, 2, 2): 1839 case IP_VERSION(5, 2, 4): 1840 case IP_VERSION(5, 2, 5): 1841 case IP_VERSION(5, 2, 6): 1842 case IP_VERSION(5, 2, 3): 1843 case IP_VERSION(5, 2, 1): 1844 case IP_VERSION(5, 2, 7): 1845 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 1846 break; 1847 case IP_VERSION(6, 0, 0): 1848 case IP_VERSION(6, 0, 1): 1849 case IP_VERSION(6, 0, 2): 1850 case IP_VERSION(6, 0, 3): 1851 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block); 1852 break; 1853 default: 1854 dev_err(adev->dev, 1855 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 1856 adev->ip_versions[SDMA0_HWIP][0]); 1857 return -EINVAL; 1858 } 1859 return 0; 1860 } 1861 1862 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 1863 { 1864 if (adev->ip_versions[VCE_HWIP][0]) { 1865 switch (adev->ip_versions[UVD_HWIP][0]) { 1866 case IP_VERSION(7, 0, 0): 1867 case IP_VERSION(7, 2, 0): 1868 /* UVD is not supported on vega20 SR-IOV */ 1869 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 1870 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 1871 break; 1872 default: 1873 dev_err(adev->dev, 1874 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 1875 adev->ip_versions[UVD_HWIP][0]); 1876 return -EINVAL; 1877 } 1878 switch (adev->ip_versions[VCE_HWIP][0]) { 1879 case IP_VERSION(4, 0, 0): 1880 case IP_VERSION(4, 1, 0): 1881 /* VCE is not supported on vega20 SR-IOV */ 1882 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 1883 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 1884 break; 1885 default: 1886 dev_err(adev->dev, 1887 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 1888 adev->ip_versions[VCE_HWIP][0]); 1889 return -EINVAL; 1890 } 1891 } else { 1892 switch (adev->ip_versions[UVD_HWIP][0]) { 1893 case IP_VERSION(1, 0, 0): 1894 case IP_VERSION(1, 0, 1): 1895 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 1896 break; 1897 case IP_VERSION(2, 0, 0): 1898 case IP_VERSION(2, 0, 2): 1899 case IP_VERSION(2, 2, 0): 1900 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 1901 if (!amdgpu_sriov_vf(adev)) 1902 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 1903 break; 1904 case IP_VERSION(2, 0, 3): 1905 break; 1906 case IP_VERSION(2, 5, 0): 1907 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 1908 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 1909 break; 1910 case IP_VERSION(2, 6, 0): 1911 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 1912 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 1913 break; 1914 case IP_VERSION(3, 0, 0): 1915 case IP_VERSION(3, 0, 16): 1916 case IP_VERSION(3, 1, 1): 1917 case IP_VERSION(3, 1, 2): 1918 case IP_VERSION(3, 0, 2): 1919 case IP_VERSION(3, 0, 192): 1920 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 1921 if (!amdgpu_sriov_vf(adev)) 1922 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 1923 break; 1924 case IP_VERSION(3, 0, 33): 1925 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 1926 break; 1927 case IP_VERSION(4, 0, 0): 1928 case IP_VERSION(4, 0, 2): 1929 case IP_VERSION(4, 0, 4): 1930 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block); 1931 if (!amdgpu_sriov_vf(adev)) 1932 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); 1933 break; 1934 default: 1935 dev_err(adev->dev, 1936 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 1937 adev->ip_versions[UVD_HWIP][0]); 1938 return -EINVAL; 1939 } 1940 } 1941 return 0; 1942 } 1943 1944 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 1945 { 1946 switch (adev->ip_versions[GC_HWIP][0]) { 1947 case IP_VERSION(10, 1, 10): 1948 case IP_VERSION(10, 1, 1): 1949 case IP_VERSION(10, 1, 2): 1950 case IP_VERSION(10, 1, 3): 1951 case IP_VERSION(10, 1, 4): 1952 case IP_VERSION(10, 3, 0): 1953 case IP_VERSION(10, 3, 1): 1954 case IP_VERSION(10, 3, 2): 1955 case IP_VERSION(10, 3, 3): 1956 case IP_VERSION(10, 3, 4): 1957 case IP_VERSION(10, 3, 5): 1958 case IP_VERSION(10, 3, 6): 1959 if (amdgpu_mes) { 1960 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 1961 adev->enable_mes = true; 1962 if (amdgpu_mes_kiq) 1963 adev->enable_mes_kiq = true; 1964 } 1965 break; 1966 case IP_VERSION(11, 0, 0): 1967 case IP_VERSION(11, 0, 1): 1968 case IP_VERSION(11, 0, 2): 1969 case IP_VERSION(11, 0, 3): 1970 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block); 1971 adev->enable_mes = true; 1972 adev->enable_mes_kiq = true; 1973 break; 1974 default: 1975 break; 1976 } 1977 return 0; 1978 } 1979 1980 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) 1981 { 1982 int r; 1983 1984 switch (adev->asic_type) { 1985 case CHIP_VEGA10: 1986 vega10_reg_base_init(adev); 1987 adev->sdma.num_instances = 2; 1988 adev->gmc.num_umc = 4; 1989 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); 1990 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); 1991 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); 1992 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); 1993 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); 1994 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); 1995 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 1996 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); 1997 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); 1998 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 1999 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2000 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2001 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); 2002 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); 2003 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2004 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2005 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); 2006 break; 2007 case CHIP_VEGA12: 2008 vega10_reg_base_init(adev); 2009 adev->sdma.num_instances = 2; 2010 adev->gmc.num_umc = 4; 2011 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2012 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2013 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); 2014 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); 2015 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); 2016 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); 2017 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); 2018 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); 2019 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); 2020 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2021 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2022 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2023 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); 2024 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); 2025 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2026 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2027 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); 2028 break; 2029 case CHIP_RAVEN: 2030 vega10_reg_base_init(adev); 2031 adev->sdma.num_instances = 1; 2032 adev->vcn.num_vcn_inst = 1; 2033 adev->gmc.num_umc = 2; 2034 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 2035 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2036 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2037 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); 2038 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); 2039 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); 2040 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); 2041 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); 2042 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); 2043 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); 2044 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); 2045 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); 2046 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); 2047 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); 2048 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); 2049 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); 2050 } else { 2051 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2052 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2053 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); 2054 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); 2055 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); 2056 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2057 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); 2058 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); 2059 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); 2060 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); 2061 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); 2062 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); 2063 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); 2064 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); 2065 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); 2066 } 2067 break; 2068 case CHIP_VEGA20: 2069 vega20_reg_base_init(adev); 2070 adev->sdma.num_instances = 2; 2071 adev->gmc.num_umc = 8; 2072 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2073 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2074 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); 2075 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); 2076 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); 2077 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); 2078 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); 2079 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); 2080 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); 2081 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); 2082 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2083 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); 2084 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); 2085 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); 2086 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); 2087 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); 2088 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); 2089 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); 2090 break; 2091 case CHIP_ARCTURUS: 2092 arct_reg_base_init(adev); 2093 adev->sdma.num_instances = 8; 2094 adev->vcn.num_vcn_inst = 2; 2095 adev->gmc.num_umc = 8; 2096 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2097 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2098 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); 2099 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); 2100 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); 2101 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); 2102 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); 2103 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); 2104 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); 2105 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); 2106 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); 2107 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); 2108 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); 2109 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); 2110 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); 2111 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); 2112 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2113 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); 2114 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); 2115 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); 2116 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); 2117 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); 2118 break; 2119 case CHIP_ALDEBARAN: 2120 aldebaran_reg_base_init(adev); 2121 adev->sdma.num_instances = 5; 2122 adev->vcn.num_vcn_inst = 2; 2123 adev->gmc.num_umc = 4; 2124 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2125 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2126 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); 2127 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); 2128 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); 2129 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); 2130 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); 2131 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); 2132 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); 2133 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); 2134 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); 2135 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); 2136 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); 2137 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); 2138 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); 2139 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); 2140 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); 2141 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); 2142 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); 2143 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); 2144 break; 2145 default: 2146 r = amdgpu_discovery_reg_base_init(adev); 2147 if (r) 2148 return -EINVAL; 2149 2150 amdgpu_discovery_harvest_ip(adev); 2151 amdgpu_discovery_get_gfx_info(adev); 2152 amdgpu_discovery_get_mall_info(adev); 2153 amdgpu_discovery_get_vcn_info(adev); 2154 break; 2155 } 2156 2157 switch (adev->ip_versions[GC_HWIP][0]) { 2158 case IP_VERSION(9, 0, 1): 2159 case IP_VERSION(9, 2, 1): 2160 case IP_VERSION(9, 4, 0): 2161 case IP_VERSION(9, 4, 1): 2162 case IP_VERSION(9, 4, 2): 2163 adev->family = AMDGPU_FAMILY_AI; 2164 break; 2165 case IP_VERSION(9, 1, 0): 2166 case IP_VERSION(9, 2, 2): 2167 case IP_VERSION(9, 3, 0): 2168 adev->family = AMDGPU_FAMILY_RV; 2169 break; 2170 case IP_VERSION(10, 1, 10): 2171 case IP_VERSION(10, 1, 1): 2172 case IP_VERSION(10, 1, 2): 2173 case IP_VERSION(10, 1, 3): 2174 case IP_VERSION(10, 1, 4): 2175 case IP_VERSION(10, 3, 0): 2176 case IP_VERSION(10, 3, 2): 2177 case IP_VERSION(10, 3, 4): 2178 case IP_VERSION(10, 3, 5): 2179 adev->family = AMDGPU_FAMILY_NV; 2180 break; 2181 case IP_VERSION(10, 3, 1): 2182 adev->family = AMDGPU_FAMILY_VGH; 2183 adev->apu_flags |= AMD_APU_IS_VANGOGH; 2184 break; 2185 case IP_VERSION(10, 3, 3): 2186 adev->family = AMDGPU_FAMILY_YC; 2187 break; 2188 case IP_VERSION(10, 3, 6): 2189 adev->family = AMDGPU_FAMILY_GC_10_3_6; 2190 break; 2191 case IP_VERSION(10, 3, 7): 2192 adev->family = AMDGPU_FAMILY_GC_10_3_7; 2193 break; 2194 case IP_VERSION(11, 0, 0): 2195 case IP_VERSION(11, 0, 2): 2196 case IP_VERSION(11, 0, 3): 2197 adev->family = AMDGPU_FAMILY_GC_11_0_0; 2198 break; 2199 case IP_VERSION(11, 0, 1): 2200 adev->family = AMDGPU_FAMILY_GC_11_0_1; 2201 break; 2202 default: 2203 return -EINVAL; 2204 } 2205 2206 switch (adev->ip_versions[GC_HWIP][0]) { 2207 case IP_VERSION(9, 1, 0): 2208 case IP_VERSION(9, 2, 2): 2209 case IP_VERSION(9, 3, 0): 2210 case IP_VERSION(10, 1, 3): 2211 case IP_VERSION(10, 1, 4): 2212 case IP_VERSION(10, 3, 1): 2213 case IP_VERSION(10, 3, 3): 2214 case IP_VERSION(10, 3, 6): 2215 case IP_VERSION(10, 3, 7): 2216 case IP_VERSION(11, 0, 1): 2217 adev->flags |= AMD_IS_APU; 2218 break; 2219 default: 2220 break; 2221 } 2222 2223 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0)) 2224 adev->gmc.xgmi.supported = true; 2225 2226 /* set NBIO version */ 2227 switch (adev->ip_versions[NBIO_HWIP][0]) { 2228 case IP_VERSION(6, 1, 0): 2229 case IP_VERSION(6, 2, 0): 2230 adev->nbio.funcs = &nbio_v6_1_funcs; 2231 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 2232 break; 2233 case IP_VERSION(7, 0, 0): 2234 case IP_VERSION(7, 0, 1): 2235 case IP_VERSION(2, 5, 0): 2236 adev->nbio.funcs = &nbio_v7_0_funcs; 2237 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 2238 break; 2239 case IP_VERSION(7, 4, 0): 2240 case IP_VERSION(7, 4, 1): 2241 case IP_VERSION(7, 4, 4): 2242 adev->nbio.funcs = &nbio_v7_4_funcs; 2243 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 2244 break; 2245 case IP_VERSION(7, 2, 0): 2246 case IP_VERSION(7, 2, 1): 2247 case IP_VERSION(7, 3, 0): 2248 case IP_VERSION(7, 5, 0): 2249 case IP_VERSION(7, 5, 1): 2250 adev->nbio.funcs = &nbio_v7_2_funcs; 2251 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; 2252 break; 2253 case IP_VERSION(2, 1, 1): 2254 case IP_VERSION(2, 3, 0): 2255 case IP_VERSION(2, 3, 1): 2256 case IP_VERSION(2, 3, 2): 2257 case IP_VERSION(3, 3, 0): 2258 case IP_VERSION(3, 3, 1): 2259 case IP_VERSION(3, 3, 2): 2260 case IP_VERSION(3, 3, 3): 2261 adev->nbio.funcs = &nbio_v2_3_funcs; 2262 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 2263 break; 2264 case IP_VERSION(4, 3, 0): 2265 case IP_VERSION(4, 3, 1): 2266 if (amdgpu_sriov_vf(adev)) 2267 adev->nbio.funcs = &nbio_v4_3_sriov_funcs; 2268 else 2269 adev->nbio.funcs = &nbio_v4_3_funcs; 2270 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg; 2271 break; 2272 case IP_VERSION(7, 7, 0): 2273 adev->nbio.funcs = &nbio_v7_7_funcs; 2274 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg; 2275 break; 2276 default: 2277 break; 2278 } 2279 2280 switch (adev->ip_versions[HDP_HWIP][0]) { 2281 case IP_VERSION(4, 0, 0): 2282 case IP_VERSION(4, 0, 1): 2283 case IP_VERSION(4, 1, 0): 2284 case IP_VERSION(4, 1, 1): 2285 case IP_VERSION(4, 1, 2): 2286 case IP_VERSION(4, 2, 0): 2287 case IP_VERSION(4, 2, 1): 2288 case IP_VERSION(4, 4, 0): 2289 adev->hdp.funcs = &hdp_v4_0_funcs; 2290 break; 2291 case IP_VERSION(5, 0, 0): 2292 case IP_VERSION(5, 0, 1): 2293 case IP_VERSION(5, 0, 2): 2294 case IP_VERSION(5, 0, 3): 2295 case IP_VERSION(5, 0, 4): 2296 case IP_VERSION(5, 2, 0): 2297 adev->hdp.funcs = &hdp_v5_0_funcs; 2298 break; 2299 case IP_VERSION(5, 2, 1): 2300 adev->hdp.funcs = &hdp_v5_2_funcs; 2301 break; 2302 case IP_VERSION(6, 0, 0): 2303 case IP_VERSION(6, 0, 1): 2304 adev->hdp.funcs = &hdp_v6_0_funcs; 2305 break; 2306 default: 2307 break; 2308 } 2309 2310 switch (adev->ip_versions[DF_HWIP][0]) { 2311 case IP_VERSION(3, 6, 0): 2312 case IP_VERSION(3, 6, 1): 2313 case IP_VERSION(3, 6, 2): 2314 adev->df.funcs = &df_v3_6_funcs; 2315 break; 2316 case IP_VERSION(2, 1, 0): 2317 case IP_VERSION(2, 1, 1): 2318 case IP_VERSION(2, 5, 0): 2319 case IP_VERSION(3, 5, 1): 2320 case IP_VERSION(3, 5, 2): 2321 adev->df.funcs = &df_v1_7_funcs; 2322 break; 2323 default: 2324 break; 2325 } 2326 2327 switch (adev->ip_versions[SMUIO_HWIP][0]) { 2328 case IP_VERSION(9, 0, 0): 2329 case IP_VERSION(9, 0, 1): 2330 case IP_VERSION(10, 0, 0): 2331 case IP_VERSION(10, 0, 1): 2332 case IP_VERSION(10, 0, 2): 2333 adev->smuio.funcs = &smuio_v9_0_funcs; 2334 break; 2335 case IP_VERSION(11, 0, 0): 2336 case IP_VERSION(11, 0, 2): 2337 case IP_VERSION(11, 0, 3): 2338 case IP_VERSION(11, 0, 4): 2339 case IP_VERSION(11, 0, 7): 2340 case IP_VERSION(11, 0, 8): 2341 adev->smuio.funcs = &smuio_v11_0_funcs; 2342 break; 2343 case IP_VERSION(11, 0, 6): 2344 case IP_VERSION(11, 0, 10): 2345 case IP_VERSION(11, 0, 11): 2346 case IP_VERSION(11, 5, 0): 2347 case IP_VERSION(13, 0, 1): 2348 case IP_VERSION(13, 0, 9): 2349 case IP_VERSION(13, 0, 10): 2350 adev->smuio.funcs = &smuio_v11_0_6_funcs; 2351 break; 2352 case IP_VERSION(13, 0, 2): 2353 adev->smuio.funcs = &smuio_v13_0_funcs; 2354 break; 2355 case IP_VERSION(13, 0, 6): 2356 case IP_VERSION(13, 0, 8): 2357 adev->smuio.funcs = &smuio_v13_0_6_funcs; 2358 break; 2359 default: 2360 break; 2361 } 2362 2363 switch (adev->ip_versions[LSDMA_HWIP][0]) { 2364 case IP_VERSION(6, 0, 0): 2365 case IP_VERSION(6, 0, 1): 2366 case IP_VERSION(6, 0, 2): 2367 case IP_VERSION(6, 0, 3): 2368 adev->lsdma.funcs = &lsdma_v6_0_funcs; 2369 break; 2370 default: 2371 break; 2372 } 2373 2374 r = amdgpu_discovery_set_common_ip_blocks(adev); 2375 if (r) 2376 return r; 2377 2378 r = amdgpu_discovery_set_gmc_ip_blocks(adev); 2379 if (r) 2380 return r; 2381 2382 /* For SR-IOV, PSP needs to be initialized before IH */ 2383 if (amdgpu_sriov_vf(adev)) { 2384 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2385 if (r) 2386 return r; 2387 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2388 if (r) 2389 return r; 2390 } else { 2391 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2392 if (r) 2393 return r; 2394 2395 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2396 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2397 if (r) 2398 return r; 2399 } 2400 } 2401 2402 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2403 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2404 if (r) 2405 return r; 2406 } 2407 2408 r = amdgpu_discovery_set_display_ip_blocks(adev); 2409 if (r) 2410 return r; 2411 2412 r = amdgpu_discovery_set_gc_ip_blocks(adev); 2413 if (r) 2414 return r; 2415 2416 r = amdgpu_discovery_set_sdma_ip_blocks(adev); 2417 if (r) 2418 return r; 2419 2420 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 2421 !amdgpu_sriov_vf(adev)) || 2422 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 2423 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2424 if (r) 2425 return r; 2426 } 2427 2428 r = amdgpu_discovery_set_mm_ip_blocks(adev); 2429 if (r) 2430 return r; 2431 2432 r = amdgpu_discovery_set_mes_ip_blocks(adev); 2433 if (r) 2434 return r; 2435 2436 return 0; 2437 } 2438 2439