1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_discovery.h" 28 #include "soc15_hw_ip.h" 29 #include "discovery.h" 30 31 #include "soc15.h" 32 #include "gfx_v9_0.h" 33 #include "gmc_v9_0.h" 34 #include "df_v1_7.h" 35 #include "df_v3_6.h" 36 #include "df_v4_3.h" 37 #include "nbio_v6_1.h" 38 #include "nbio_v7_0.h" 39 #include "nbio_v7_4.h" 40 #include "hdp_v4_0.h" 41 #include "vega10_ih.h" 42 #include "vega20_ih.h" 43 #include "sdma_v4_0.h" 44 #include "uvd_v7_0.h" 45 #include "vce_v4_0.h" 46 #include "vcn_v1_0.h" 47 #include "vcn_v2_5.h" 48 #include "jpeg_v2_5.h" 49 #include "smuio_v9_0.h" 50 #include "gmc_v10_0.h" 51 #include "gmc_v11_0.h" 52 #include "gfxhub_v2_0.h" 53 #include "mmhub_v2_0.h" 54 #include "nbio_v2_3.h" 55 #include "nbio_v4_3.h" 56 #include "nbio_v7_2.h" 57 #include "nbio_v7_7.h" 58 #include "hdp_v5_0.h" 59 #include "hdp_v5_2.h" 60 #include "hdp_v6_0.h" 61 #include "nv.h" 62 #include "soc21.h" 63 #include "navi10_ih.h" 64 #include "ih_v6_0.h" 65 #include "gfx_v10_0.h" 66 #include "gfx_v11_0.h" 67 #include "sdma_v5_0.h" 68 #include "sdma_v5_2.h" 69 #include "sdma_v6_0.h" 70 #include "lsdma_v6_0.h" 71 #include "vcn_v2_0.h" 72 #include "jpeg_v2_0.h" 73 #include "vcn_v3_0.h" 74 #include "jpeg_v3_0.h" 75 #include "vcn_v4_0.h" 76 #include "jpeg_v4_0.h" 77 #include "amdgpu_vkms.h" 78 #include "mes_v10_1.h" 79 #include "mes_v11_0.h" 80 #include "smuio_v11_0.h" 81 #include "smuio_v11_0_6.h" 82 #include "smuio_v13_0.h" 83 #include "smuio_v13_0_6.h" 84 85 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" 86 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); 87 88 #define mmRCC_CONFIG_MEMSIZE 0xde3 89 #define mmMM_INDEX 0x0 90 #define mmMM_INDEX_HI 0x6 91 #define mmMM_DATA 0x1 92 93 static const char *hw_id_names[HW_ID_MAX] = { 94 [MP1_HWID] = "MP1", 95 [MP2_HWID] = "MP2", 96 [THM_HWID] = "THM", 97 [SMUIO_HWID] = "SMUIO", 98 [FUSE_HWID] = "FUSE", 99 [CLKA_HWID] = "CLKA", 100 [PWR_HWID] = "PWR", 101 [GC_HWID] = "GC", 102 [UVD_HWID] = "UVD", 103 [AUDIO_AZ_HWID] = "AUDIO_AZ", 104 [ACP_HWID] = "ACP", 105 [DCI_HWID] = "DCI", 106 [DMU_HWID] = "DMU", 107 [DCO_HWID] = "DCO", 108 [DIO_HWID] = "DIO", 109 [XDMA_HWID] = "XDMA", 110 [DCEAZ_HWID] = "DCEAZ", 111 [DAZ_HWID] = "DAZ", 112 [SDPMUX_HWID] = "SDPMUX", 113 [NTB_HWID] = "NTB", 114 [IOHC_HWID] = "IOHC", 115 [L2IMU_HWID] = "L2IMU", 116 [VCE_HWID] = "VCE", 117 [MMHUB_HWID] = "MMHUB", 118 [ATHUB_HWID] = "ATHUB", 119 [DBGU_NBIO_HWID] = "DBGU_NBIO", 120 [DFX_HWID] = "DFX", 121 [DBGU0_HWID] = "DBGU0", 122 [DBGU1_HWID] = "DBGU1", 123 [OSSSYS_HWID] = "OSSSYS", 124 [HDP_HWID] = "HDP", 125 [SDMA0_HWID] = "SDMA0", 126 [SDMA1_HWID] = "SDMA1", 127 [SDMA2_HWID] = "SDMA2", 128 [SDMA3_HWID] = "SDMA3", 129 [LSDMA_HWID] = "LSDMA", 130 [ISP_HWID] = "ISP", 131 [DBGU_IO_HWID] = "DBGU_IO", 132 [DF_HWID] = "DF", 133 [CLKB_HWID] = "CLKB", 134 [FCH_HWID] = "FCH", 135 [DFX_DAP_HWID] = "DFX_DAP", 136 [L1IMU_PCIE_HWID] = "L1IMU_PCIE", 137 [L1IMU_NBIF_HWID] = "L1IMU_NBIF", 138 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR", 139 [L1IMU3_HWID] = "L1IMU3", 140 [L1IMU4_HWID] = "L1IMU4", 141 [L1IMU5_HWID] = "L1IMU5", 142 [L1IMU6_HWID] = "L1IMU6", 143 [L1IMU7_HWID] = "L1IMU7", 144 [L1IMU8_HWID] = "L1IMU8", 145 [L1IMU9_HWID] = "L1IMU9", 146 [L1IMU10_HWID] = "L1IMU10", 147 [L1IMU11_HWID] = "L1IMU11", 148 [L1IMU12_HWID] = "L1IMU12", 149 [L1IMU13_HWID] = "L1IMU13", 150 [L1IMU14_HWID] = "L1IMU14", 151 [L1IMU15_HWID] = "L1IMU15", 152 [WAFLC_HWID] = "WAFLC", 153 [FCH_USB_PD_HWID] = "FCH_USB_PD", 154 [PCIE_HWID] = "PCIE", 155 [PCS_HWID] = "PCS", 156 [DDCL_HWID] = "DDCL", 157 [SST_HWID] = "SST", 158 [IOAGR_HWID] = "IOAGR", 159 [NBIF_HWID] = "NBIF", 160 [IOAPIC_HWID] = "IOAPIC", 161 [SYSTEMHUB_HWID] = "SYSTEMHUB", 162 [NTBCCP_HWID] = "NTBCCP", 163 [UMC_HWID] = "UMC", 164 [SATA_HWID] = "SATA", 165 [USB_HWID] = "USB", 166 [CCXSEC_HWID] = "CCXSEC", 167 [XGMI_HWID] = "XGMI", 168 [XGBE_HWID] = "XGBE", 169 [MP0_HWID] = "MP0", 170 }; 171 172 static int hw_id_map[MAX_HWIP] = { 173 [GC_HWIP] = GC_HWID, 174 [HDP_HWIP] = HDP_HWID, 175 [SDMA0_HWIP] = SDMA0_HWID, 176 [SDMA1_HWIP] = SDMA1_HWID, 177 [SDMA2_HWIP] = SDMA2_HWID, 178 [SDMA3_HWIP] = SDMA3_HWID, 179 [LSDMA_HWIP] = LSDMA_HWID, 180 [MMHUB_HWIP] = MMHUB_HWID, 181 [ATHUB_HWIP] = ATHUB_HWID, 182 [NBIO_HWIP] = NBIF_HWID, 183 [MP0_HWIP] = MP0_HWID, 184 [MP1_HWIP] = MP1_HWID, 185 [UVD_HWIP] = UVD_HWID, 186 [VCE_HWIP] = VCE_HWID, 187 [DF_HWIP] = DF_HWID, 188 [DCE_HWIP] = DMU_HWID, 189 [OSSSYS_HWIP] = OSSSYS_HWID, 190 [SMUIO_HWIP] = SMUIO_HWID, 191 [PWR_HWIP] = PWR_HWID, 192 [NBIF_HWIP] = NBIF_HWID, 193 [THM_HWIP] = THM_HWID, 194 [CLK_HWIP] = CLKA_HWID, 195 [UMC_HWIP] = UMC_HWID, 196 [XGMI_HWIP] = XGMI_HWID, 197 [DCI_HWIP] = DCI_HWID, 198 [PCIE_HWIP] = PCIE_HWID, 199 }; 200 201 static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary) 202 { 203 uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 204 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; 205 206 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, 207 adev->mman.discovery_tmr_size, false); 208 return 0; 209 } 210 211 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary) 212 { 213 const struct firmware *fw; 214 const char *fw_name; 215 int r; 216 217 switch (amdgpu_discovery) { 218 case 2: 219 fw_name = FIRMWARE_IP_DISCOVERY; 220 break; 221 default: 222 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n"); 223 return -EINVAL; 224 } 225 226 r = request_firmware(&fw, fw_name, adev->dev); 227 if (r) { 228 dev_err(adev->dev, "can't load firmware \"%s\"\n", 229 fw_name); 230 return r; 231 } 232 233 memcpy((u8 *)binary, (u8 *)fw->data, fw->size); 234 release_firmware(fw); 235 236 return 0; 237 } 238 239 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) 240 { 241 uint16_t checksum = 0; 242 int i; 243 244 for (i = 0; i < size; i++) 245 checksum += data[i]; 246 247 return checksum; 248 } 249 250 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size, 251 uint16_t expected) 252 { 253 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); 254 } 255 256 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) 257 { 258 struct binary_header *bhdr; 259 bhdr = (struct binary_header *)binary; 260 261 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); 262 } 263 264 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) 265 { 266 /* 267 * So far, apply this quirk only on those Navy Flounder boards which 268 * have a bad harvest table of VCN config. 269 */ 270 if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && 271 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) { 272 switch (adev->pdev->revision) { 273 case 0xC1: 274 case 0xC2: 275 case 0xC3: 276 case 0xC5: 277 case 0xC7: 278 case 0xCF: 279 case 0xDF: 280 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 281 break; 282 default: 283 break; 284 } 285 } 286 } 287 288 static int amdgpu_discovery_init(struct amdgpu_device *adev) 289 { 290 struct table_info *info; 291 struct binary_header *bhdr; 292 uint16_t offset; 293 uint16_t size; 294 uint16_t checksum; 295 int r; 296 297 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE; 298 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL); 299 if (!adev->mman.discovery_bin) 300 return -ENOMEM; 301 302 r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin); 303 if (r) { 304 dev_err(adev->dev, "failed to read ip discovery binary from vram\n"); 305 r = -EINVAL; 306 goto out; 307 } 308 309 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin) || amdgpu_discovery == 2) { 310 /* ignore the discovery binary from vram if discovery=2 in kernel module parameter */ 311 if (amdgpu_discovery == 2) 312 dev_info(adev->dev,"force read ip discovery binary from file"); 313 else 314 dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n"); 315 316 /* retry read ip discovery binary from file */ 317 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin); 318 if (r) { 319 dev_err(adev->dev, "failed to read ip discovery binary from file\n"); 320 r = -EINVAL; 321 goto out; 322 } 323 /* check the ip discovery binary signature */ 324 if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { 325 dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n"); 326 r = -EINVAL; 327 goto out; 328 } 329 } 330 331 bhdr = (struct binary_header *)adev->mman.discovery_bin; 332 333 offset = offsetof(struct binary_header, binary_checksum) + 334 sizeof(bhdr->binary_checksum); 335 size = le16_to_cpu(bhdr->binary_size) - offset; 336 checksum = le16_to_cpu(bhdr->binary_checksum); 337 338 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 339 size, checksum)) { 340 dev_err(adev->dev, "invalid ip discovery binary checksum\n"); 341 r = -EINVAL; 342 goto out; 343 } 344 345 info = &bhdr->table_list[IP_DISCOVERY]; 346 offset = le16_to_cpu(info->offset); 347 checksum = le16_to_cpu(info->checksum); 348 349 if (offset) { 350 struct ip_discovery_header *ihdr = 351 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); 352 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { 353 dev_err(adev->dev, "invalid ip discovery data table signature\n"); 354 r = -EINVAL; 355 goto out; 356 } 357 358 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 359 le16_to_cpu(ihdr->size), checksum)) { 360 dev_err(adev->dev, "invalid ip discovery data table checksum\n"); 361 r = -EINVAL; 362 goto out; 363 } 364 } 365 366 info = &bhdr->table_list[GC]; 367 offset = le16_to_cpu(info->offset); 368 checksum = le16_to_cpu(info->checksum); 369 370 if (offset) { 371 struct gpu_info_header *ghdr = 372 (struct gpu_info_header *)(adev->mman.discovery_bin + offset); 373 374 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { 375 dev_err(adev->dev, "invalid ip discovery gc table id\n"); 376 r = -EINVAL; 377 goto out; 378 } 379 380 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 381 le32_to_cpu(ghdr->size), checksum)) { 382 dev_err(adev->dev, "invalid gc data table checksum\n"); 383 r = -EINVAL; 384 goto out; 385 } 386 } 387 388 info = &bhdr->table_list[HARVEST_INFO]; 389 offset = le16_to_cpu(info->offset); 390 checksum = le16_to_cpu(info->checksum); 391 392 if (offset) { 393 struct harvest_info_header *hhdr = 394 (struct harvest_info_header *)(adev->mman.discovery_bin + offset); 395 396 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { 397 dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); 398 r = -EINVAL; 399 goto out; 400 } 401 402 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 403 sizeof(struct harvest_table), checksum)) { 404 dev_err(adev->dev, "invalid harvest data table checksum\n"); 405 r = -EINVAL; 406 goto out; 407 } 408 } 409 410 info = &bhdr->table_list[VCN_INFO]; 411 offset = le16_to_cpu(info->offset); 412 checksum = le16_to_cpu(info->checksum); 413 414 if (offset) { 415 struct vcn_info_header *vhdr = 416 (struct vcn_info_header *)(adev->mman.discovery_bin + offset); 417 418 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { 419 dev_err(adev->dev, "invalid ip discovery vcn table id\n"); 420 r = -EINVAL; 421 goto out; 422 } 423 424 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 425 le32_to_cpu(vhdr->size_bytes), checksum)) { 426 dev_err(adev->dev, "invalid vcn data table checksum\n"); 427 r = -EINVAL; 428 goto out; 429 } 430 } 431 432 info = &bhdr->table_list[MALL_INFO]; 433 offset = le16_to_cpu(info->offset); 434 checksum = le16_to_cpu(info->checksum); 435 436 if (0 && offset) { 437 struct mall_info_header *mhdr = 438 (struct mall_info_header *)(adev->mman.discovery_bin + offset); 439 440 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { 441 dev_err(adev->dev, "invalid ip discovery mall table id\n"); 442 r = -EINVAL; 443 goto out; 444 } 445 446 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 447 le32_to_cpu(mhdr->size_bytes), checksum)) { 448 dev_err(adev->dev, "invalid mall data table checksum\n"); 449 r = -EINVAL; 450 goto out; 451 } 452 } 453 454 return 0; 455 456 out: 457 kfree(adev->mman.discovery_bin); 458 adev->mman.discovery_bin = NULL; 459 460 return r; 461 } 462 463 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); 464 465 void amdgpu_discovery_fini(struct amdgpu_device *adev) 466 { 467 amdgpu_discovery_sysfs_fini(adev); 468 kfree(adev->mman.discovery_bin); 469 adev->mman.discovery_bin = NULL; 470 } 471 472 static int amdgpu_discovery_validate_ip(const struct ip *ip) 473 { 474 if (ip->number_instance >= HWIP_MAX_INSTANCE) { 475 DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n", 476 ip->number_instance); 477 return -EINVAL; 478 } 479 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) { 480 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n", 481 le16_to_cpu(ip->hw_id)); 482 return -EINVAL; 483 } 484 485 return 0; 486 } 487 488 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, 489 uint32_t *vcn_harvest_count) 490 { 491 struct binary_header *bhdr; 492 struct ip_discovery_header *ihdr; 493 struct die_header *dhdr; 494 struct ip *ip; 495 uint16_t die_offset, ip_offset, num_dies, num_ips; 496 int i, j; 497 498 bhdr = (struct binary_header *)adev->mman.discovery_bin; 499 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 500 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 501 num_dies = le16_to_cpu(ihdr->num_dies); 502 503 /* scan harvest bit of all IP data structures */ 504 for (i = 0; i < num_dies; i++) { 505 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 506 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 507 num_ips = le16_to_cpu(dhdr->num_ips); 508 ip_offset = die_offset + sizeof(*dhdr); 509 510 for (j = 0; j < num_ips; j++) { 511 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 512 513 if (amdgpu_discovery_validate_ip(ip)) 514 goto next_ip; 515 516 if (le16_to_cpu(ip->harvest) == 1) { 517 switch (le16_to_cpu(ip->hw_id)) { 518 case VCN_HWID: 519 (*vcn_harvest_count)++; 520 if (ip->number_instance == 0) 521 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 522 else 523 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 524 break; 525 case DMU_HWID: 526 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 527 break; 528 default: 529 break; 530 } 531 } 532 next_ip: 533 ip_offset += struct_size(ip, base_address, ip->num_base_address); 534 } 535 } 536 } 537 538 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, 539 uint32_t *vcn_harvest_count, 540 uint32_t *umc_harvest_count) 541 { 542 struct binary_header *bhdr; 543 struct harvest_table *harvest_info; 544 u16 offset; 545 int i; 546 547 bhdr = (struct binary_header *)adev->mman.discovery_bin; 548 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); 549 550 if (!offset) { 551 dev_err(adev->dev, "invalid harvest table offset\n"); 552 return; 553 } 554 555 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset); 556 557 for (i = 0; i < 32; i++) { 558 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) 559 break; 560 561 switch (le16_to_cpu(harvest_info->list[i].hw_id)) { 562 case VCN_HWID: 563 (*vcn_harvest_count)++; 564 if (harvest_info->list[i].number_instance == 0) 565 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 566 else 567 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 568 break; 569 case DMU_HWID: 570 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 571 break; 572 case UMC_HWID: 573 (*umc_harvest_count)++; 574 break; 575 default: 576 break; 577 } 578 } 579 } 580 581 /* ================================================== */ 582 583 struct ip_hw_instance { 584 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */ 585 586 int hw_id; 587 u8 num_instance; 588 u8 major, minor, revision; 589 u8 harvest; 590 591 int num_base_addresses; 592 u32 base_addr[]; 593 }; 594 595 struct ip_hw_id { 596 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */ 597 int hw_id; 598 }; 599 600 struct ip_die_entry { 601 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */ 602 u16 num_ips; 603 }; 604 605 /* -------------------------------------------------- */ 606 607 struct ip_hw_instance_attr { 608 struct attribute attr; 609 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf); 610 }; 611 612 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf) 613 { 614 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id); 615 } 616 617 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf) 618 { 619 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance); 620 } 621 622 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf) 623 { 624 return sysfs_emit(buf, "%d\n", ip_hw_instance->major); 625 } 626 627 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf) 628 { 629 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor); 630 } 631 632 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf) 633 { 634 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision); 635 } 636 637 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf) 638 { 639 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest); 640 } 641 642 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf) 643 { 644 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses); 645 } 646 647 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf) 648 { 649 ssize_t res, at; 650 int ii; 651 652 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) { 653 /* Here we satisfy the condition that, at + size <= PAGE_SIZE. 654 */ 655 if (at + 12 > PAGE_SIZE) 656 break; 657 res = sysfs_emit_at(buf, at, "0x%08X\n", 658 ip_hw_instance->base_addr[ii]); 659 if (res <= 0) 660 break; 661 at += res; 662 } 663 664 return res < 0 ? res : at; 665 } 666 667 static struct ip_hw_instance_attr ip_hw_attr[] = { 668 __ATTR_RO(hw_id), 669 __ATTR_RO(num_instance), 670 __ATTR_RO(major), 671 __ATTR_RO(minor), 672 __ATTR_RO(revision), 673 __ATTR_RO(harvest), 674 __ATTR_RO(num_base_addresses), 675 __ATTR_RO(base_addr), 676 }; 677 678 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1]; 679 ATTRIBUTE_GROUPS(ip_hw_instance); 680 681 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj) 682 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr) 683 684 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj, 685 struct attribute *attr, 686 char *buf) 687 { 688 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 689 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr); 690 691 if (!ip_hw_attr->show) 692 return -EIO; 693 694 return ip_hw_attr->show(ip_hw_instance, buf); 695 } 696 697 static const struct sysfs_ops ip_hw_instance_sysfs_ops = { 698 .show = ip_hw_instance_attr_show, 699 }; 700 701 static void ip_hw_instance_release(struct kobject *kobj) 702 { 703 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 704 705 kfree(ip_hw_instance); 706 } 707 708 static struct kobj_type ip_hw_instance_ktype = { 709 .release = ip_hw_instance_release, 710 .sysfs_ops = &ip_hw_instance_sysfs_ops, 711 .default_groups = ip_hw_instance_groups, 712 }; 713 714 /* -------------------------------------------------- */ 715 716 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset) 717 718 static void ip_hw_id_release(struct kobject *kobj) 719 { 720 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj); 721 722 if (!list_empty(&ip_hw_id->hw_id_kset.list)) 723 DRM_ERROR("ip_hw_id->hw_id_kset is not empty"); 724 kfree(ip_hw_id); 725 } 726 727 static struct kobj_type ip_hw_id_ktype = { 728 .release = ip_hw_id_release, 729 .sysfs_ops = &kobj_sysfs_ops, 730 }; 731 732 /* -------------------------------------------------- */ 733 734 static void die_kobj_release(struct kobject *kobj); 735 static void ip_disc_release(struct kobject *kobj); 736 737 struct ip_die_entry_attribute { 738 struct attribute attr; 739 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf); 740 }; 741 742 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr) 743 744 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf) 745 { 746 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips); 747 } 748 749 /* If there are more ip_die_entry attrs, other than the number of IPs, 750 * we can make this intro an array of attrs, and then initialize 751 * ip_die_entry_attrs in a loop. 752 */ 753 static struct ip_die_entry_attribute num_ips_attr = 754 __ATTR_RO(num_ips); 755 756 static struct attribute *ip_die_entry_attrs[] = { 757 &num_ips_attr.attr, 758 NULL, 759 }; 760 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */ 761 762 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset) 763 764 static ssize_t ip_die_entry_attr_show(struct kobject *kobj, 765 struct attribute *attr, 766 char *buf) 767 { 768 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr); 769 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 770 771 if (!ip_die_entry_attr->show) 772 return -EIO; 773 774 return ip_die_entry_attr->show(ip_die_entry, buf); 775 } 776 777 static void ip_die_entry_release(struct kobject *kobj) 778 { 779 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 780 781 if (!list_empty(&ip_die_entry->ip_kset.list)) 782 DRM_ERROR("ip_die_entry->ip_kset is not empty"); 783 kfree(ip_die_entry); 784 } 785 786 static const struct sysfs_ops ip_die_entry_sysfs_ops = { 787 .show = ip_die_entry_attr_show, 788 }; 789 790 static struct kobj_type ip_die_entry_ktype = { 791 .release = ip_die_entry_release, 792 .sysfs_ops = &ip_die_entry_sysfs_ops, 793 .default_groups = ip_die_entry_groups, 794 }; 795 796 static struct kobj_type die_kobj_ktype = { 797 .release = die_kobj_release, 798 .sysfs_ops = &kobj_sysfs_ops, 799 }; 800 801 static struct kobj_type ip_discovery_ktype = { 802 .release = ip_disc_release, 803 .sysfs_ops = &kobj_sysfs_ops, 804 }; 805 806 struct ip_discovery_top { 807 struct kobject kobj; /* ip_discovery/ */ 808 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */ 809 struct amdgpu_device *adev; 810 }; 811 812 static void die_kobj_release(struct kobject *kobj) 813 { 814 struct ip_discovery_top *ip_top = container_of(to_kset(kobj), 815 struct ip_discovery_top, 816 die_kset); 817 if (!list_empty(&ip_top->die_kset.list)) 818 DRM_ERROR("ip_top->die_kset is not empty"); 819 } 820 821 static void ip_disc_release(struct kobject *kobj) 822 { 823 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top, 824 kobj); 825 struct amdgpu_device *adev = ip_top->adev; 826 827 adev->ip_top = NULL; 828 kfree(ip_top); 829 } 830 831 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, 832 struct ip_die_entry *ip_die_entry, 833 const size_t _ip_offset, const int num_ips) 834 { 835 int ii, jj, kk, res; 836 837 DRM_DEBUG("num_ips:%d", num_ips); 838 839 /* Find all IPs of a given HW ID, and add their instance to 840 * #die/#hw_id/#instance/<attributes> 841 */ 842 for (ii = 0; ii < HW_ID_MAX; ii++) { 843 struct ip_hw_id *ip_hw_id = NULL; 844 size_t ip_offset = _ip_offset; 845 846 for (jj = 0; jj < num_ips; jj++) { 847 struct ip *ip; 848 struct ip_hw_instance *ip_hw_instance; 849 850 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 851 if (amdgpu_discovery_validate_ip(ip) || 852 le16_to_cpu(ip->hw_id) != ii) 853 goto next_ip; 854 855 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset); 856 857 /* We have a hw_id match; register the hw 858 * block if not yet registered. 859 */ 860 if (!ip_hw_id) { 861 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL); 862 if (!ip_hw_id) 863 return -ENOMEM; 864 ip_hw_id->hw_id = ii; 865 866 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii); 867 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset; 868 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype; 869 res = kset_register(&ip_hw_id->hw_id_kset); 870 if (res) { 871 DRM_ERROR("Couldn't register ip_hw_id kset"); 872 kfree(ip_hw_id); 873 return res; 874 } 875 if (hw_id_names[ii]) { 876 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj, 877 &ip_hw_id->hw_id_kset.kobj, 878 hw_id_names[ii]); 879 if (res) { 880 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n", 881 hw_id_names[ii], 882 kobject_name(&ip_die_entry->ip_kset.kobj)); 883 } 884 } 885 } 886 887 /* Now register its instance. 888 */ 889 ip_hw_instance = kzalloc(struct_size(ip_hw_instance, 890 base_addr, 891 ip->num_base_address), 892 GFP_KERNEL); 893 if (!ip_hw_instance) { 894 DRM_ERROR("no memory for ip_hw_instance"); 895 return -ENOMEM; 896 } 897 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */ 898 ip_hw_instance->num_instance = ip->number_instance; 899 ip_hw_instance->major = ip->major; 900 ip_hw_instance->minor = ip->minor; 901 ip_hw_instance->revision = ip->revision; 902 ip_hw_instance->harvest = ip->harvest; 903 ip_hw_instance->num_base_addresses = ip->num_base_address; 904 905 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) 906 ip_hw_instance->base_addr[kk] = ip->base_address[kk]; 907 908 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype); 909 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset; 910 res = kobject_add(&ip_hw_instance->kobj, NULL, 911 "%d", ip_hw_instance->num_instance); 912 next_ip: 913 ip_offset += struct_size(ip, base_address, ip->num_base_address); 914 } 915 } 916 917 return 0; 918 } 919 920 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) 921 { 922 struct binary_header *bhdr; 923 struct ip_discovery_header *ihdr; 924 struct die_header *dhdr; 925 struct kset *die_kset = &adev->ip_top->die_kset; 926 u16 num_dies, die_offset, num_ips; 927 size_t ip_offset; 928 int ii, res; 929 930 bhdr = (struct binary_header *)adev->mman.discovery_bin; 931 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 932 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 933 num_dies = le16_to_cpu(ihdr->num_dies); 934 935 DRM_DEBUG("number of dies: %d\n", num_dies); 936 937 for (ii = 0; ii < num_dies; ii++) { 938 struct ip_die_entry *ip_die_entry; 939 940 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); 941 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 942 num_ips = le16_to_cpu(dhdr->num_ips); 943 ip_offset = die_offset + sizeof(*dhdr); 944 945 /* Add the die to the kset. 946 * 947 * dhdr->die_id == ii, which was checked in 948 * amdgpu_discovery_reg_base_init(). 949 */ 950 951 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL); 952 if (!ip_die_entry) 953 return -ENOMEM; 954 955 ip_die_entry->num_ips = num_ips; 956 957 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id)); 958 ip_die_entry->ip_kset.kobj.kset = die_kset; 959 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype; 960 res = kset_register(&ip_die_entry->ip_kset); 961 if (res) { 962 DRM_ERROR("Couldn't register ip_die_entry kset"); 963 kfree(ip_die_entry); 964 return res; 965 } 966 967 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips); 968 } 969 970 return 0; 971 } 972 973 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) 974 { 975 struct kset *die_kset; 976 int res, ii; 977 978 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL); 979 if (!adev->ip_top) 980 return -ENOMEM; 981 982 adev->ip_top->adev = adev; 983 984 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype, 985 &adev->dev->kobj, "ip_discovery"); 986 if (res) { 987 DRM_ERROR("Couldn't init and add ip_discovery/"); 988 goto Err; 989 } 990 991 die_kset = &adev->ip_top->die_kset; 992 kobject_set_name(&die_kset->kobj, "%s", "die"); 993 die_kset->kobj.parent = &adev->ip_top->kobj; 994 die_kset->kobj.ktype = &die_kobj_ktype; 995 res = kset_register(&adev->ip_top->die_kset); 996 if (res) { 997 DRM_ERROR("Couldn't register die_kset"); 998 goto Err; 999 } 1000 1001 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++) 1002 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr; 1003 ip_hw_instance_attrs[ii] = NULL; 1004 1005 res = amdgpu_discovery_sysfs_recurse(adev); 1006 1007 return res; 1008 Err: 1009 kobject_put(&adev->ip_top->kobj); 1010 return res; 1011 } 1012 1013 /* -------------------------------------------------- */ 1014 1015 #define list_to_kobj(el) container_of(el, struct kobject, entry) 1016 1017 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id) 1018 { 1019 struct list_head *el, *tmp; 1020 struct kset *hw_id_kset; 1021 1022 hw_id_kset = &ip_hw_id->hw_id_kset; 1023 spin_lock(&hw_id_kset->list_lock); 1024 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) { 1025 list_del_init(el); 1026 spin_unlock(&hw_id_kset->list_lock); 1027 /* kobject is embedded in ip_hw_instance */ 1028 kobject_put(list_to_kobj(el)); 1029 spin_lock(&hw_id_kset->list_lock); 1030 } 1031 spin_unlock(&hw_id_kset->list_lock); 1032 kobject_put(&ip_hw_id->hw_id_kset.kobj); 1033 } 1034 1035 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) 1036 { 1037 struct list_head *el, *tmp; 1038 struct kset *ip_kset; 1039 1040 ip_kset = &ip_die_entry->ip_kset; 1041 spin_lock(&ip_kset->list_lock); 1042 list_for_each_prev_safe(el, tmp, &ip_kset->list) { 1043 list_del_init(el); 1044 spin_unlock(&ip_kset->list_lock); 1045 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el))); 1046 spin_lock(&ip_kset->list_lock); 1047 } 1048 spin_unlock(&ip_kset->list_lock); 1049 kobject_put(&ip_die_entry->ip_kset.kobj); 1050 } 1051 1052 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) 1053 { 1054 struct list_head *el, *tmp; 1055 struct kset *die_kset; 1056 1057 die_kset = &adev->ip_top->die_kset; 1058 spin_lock(&die_kset->list_lock); 1059 list_for_each_prev_safe(el, tmp, &die_kset->list) { 1060 list_del_init(el); 1061 spin_unlock(&die_kset->list_lock); 1062 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el))); 1063 spin_lock(&die_kset->list_lock); 1064 } 1065 spin_unlock(&die_kset->list_lock); 1066 kobject_put(&adev->ip_top->die_kset.kobj); 1067 kobject_put(&adev->ip_top->kobj); 1068 } 1069 1070 /* ================================================== */ 1071 1072 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) 1073 { 1074 struct binary_header *bhdr; 1075 struct ip_discovery_header *ihdr; 1076 struct die_header *dhdr; 1077 struct ip *ip; 1078 uint16_t die_offset; 1079 uint16_t ip_offset; 1080 uint16_t num_dies; 1081 uint16_t num_ips; 1082 uint8_t num_base_address; 1083 int hw_ip; 1084 int i, j, k; 1085 int r; 1086 1087 r = amdgpu_discovery_init(adev); 1088 if (r) { 1089 DRM_ERROR("amdgpu_discovery_init failed\n"); 1090 return r; 1091 } 1092 1093 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1094 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1095 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1096 num_dies = le16_to_cpu(ihdr->num_dies); 1097 1098 DRM_DEBUG("number of dies: %d\n", num_dies); 1099 1100 for (i = 0; i < num_dies; i++) { 1101 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1102 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1103 num_ips = le16_to_cpu(dhdr->num_ips); 1104 ip_offset = die_offset + sizeof(*dhdr); 1105 1106 if (le16_to_cpu(dhdr->die_id) != i) { 1107 DRM_ERROR("invalid die id %d, expected %d\n", 1108 le16_to_cpu(dhdr->die_id), i); 1109 return -EINVAL; 1110 } 1111 1112 DRM_DEBUG("number of hardware IPs on die%d: %d\n", 1113 le16_to_cpu(dhdr->die_id), num_ips); 1114 1115 for (j = 0; j < num_ips; j++) { 1116 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 1117 1118 if (amdgpu_discovery_validate_ip(ip)) 1119 goto next_ip; 1120 1121 num_base_address = ip->num_base_address; 1122 1123 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", 1124 hw_id_names[le16_to_cpu(ip->hw_id)], 1125 le16_to_cpu(ip->hw_id), 1126 ip->number_instance, 1127 ip->major, ip->minor, 1128 ip->revision); 1129 1130 if (le16_to_cpu(ip->hw_id) == VCN_HWID) { 1131 /* Bit [5:0]: original revision value 1132 * Bit [7:6]: en/decode capability: 1133 * 0b00 : VCN function normally 1134 * 0b10 : encode is disabled 1135 * 0b01 : decode is disabled 1136 */ 1137 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] = 1138 ip->revision & 0xc0; 1139 ip->revision &= ~0xc0; 1140 if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES) 1141 adev->vcn.num_vcn_inst++; 1142 else 1143 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n", 1144 adev->vcn.num_vcn_inst + 1, 1145 AMDGPU_MAX_VCN_INSTANCES); 1146 } 1147 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || 1148 le16_to_cpu(ip->hw_id) == SDMA1_HWID || 1149 le16_to_cpu(ip->hw_id) == SDMA2_HWID || 1150 le16_to_cpu(ip->hw_id) == SDMA3_HWID) { 1151 if (adev->sdma.num_instances < AMDGPU_MAX_SDMA_INSTANCES) 1152 adev->sdma.num_instances++; 1153 else 1154 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n", 1155 adev->sdma.num_instances + 1, 1156 AMDGPU_MAX_SDMA_INSTANCES); 1157 } 1158 1159 if (le16_to_cpu(ip->hw_id) == UMC_HWID) 1160 adev->gmc.num_umc++; 1161 1162 for (k = 0; k < num_base_address; k++) { 1163 /* 1164 * convert the endianness of base addresses in place, 1165 * so that we don't need to convert them when accessing adev->reg_offset. 1166 */ 1167 ip->base_address[k] = le32_to_cpu(ip->base_address[k]); 1168 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); 1169 } 1170 1171 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { 1172 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) { 1173 DRM_DEBUG("set register base offset for %s\n", 1174 hw_id_names[le16_to_cpu(ip->hw_id)]); 1175 adev->reg_offset[hw_ip][ip->number_instance] = 1176 ip->base_address; 1177 /* Instance support is somewhat inconsistent. 1178 * SDMA is a good example. Sienna cichlid has 4 total 1179 * SDMA instances, each enumerated separately (HWIDs 1180 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, 1181 * but they are enumerated as multiple instances of the 1182 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another 1183 * example. On most chips there are multiple instances 1184 * with the same HWID. 1185 */ 1186 adev->ip_versions[hw_ip][ip->number_instance] = 1187 IP_VERSION(ip->major, ip->minor, ip->revision); 1188 } 1189 } 1190 1191 next_ip: 1192 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1193 } 1194 } 1195 1196 amdgpu_discovery_sysfs_init(adev); 1197 1198 return 0; 1199 } 1200 1201 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, 1202 int *major, int *minor, int *revision) 1203 { 1204 struct binary_header *bhdr; 1205 struct ip_discovery_header *ihdr; 1206 struct die_header *dhdr; 1207 struct ip *ip; 1208 uint16_t die_offset; 1209 uint16_t ip_offset; 1210 uint16_t num_dies; 1211 uint16_t num_ips; 1212 int i, j; 1213 1214 if (!adev->mman.discovery_bin) { 1215 DRM_ERROR("ip discovery uninitialized\n"); 1216 return -EINVAL; 1217 } 1218 1219 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1220 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1221 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1222 num_dies = le16_to_cpu(ihdr->num_dies); 1223 1224 for (i = 0; i < num_dies; i++) { 1225 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1226 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1227 num_ips = le16_to_cpu(dhdr->num_ips); 1228 ip_offset = die_offset + sizeof(*dhdr); 1229 1230 for (j = 0; j < num_ips; j++) { 1231 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 1232 1233 if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) { 1234 if (major) 1235 *major = ip->major; 1236 if (minor) 1237 *minor = ip->minor; 1238 if (revision) 1239 *revision = ip->revision; 1240 return 0; 1241 } 1242 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1243 } 1244 } 1245 1246 return -EINVAL; 1247 } 1248 1249 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) 1250 { 1251 int vcn_harvest_count = 0; 1252 int umc_harvest_count = 0; 1253 1254 /* 1255 * Harvest table does not fit Navi1x and legacy GPUs, 1256 * so read harvest bit per IP data structure to set 1257 * harvest configuration. 1258 */ 1259 if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0)) { 1260 if ((adev->pdev->device == 0x731E && 1261 (adev->pdev->revision == 0xC6 || 1262 adev->pdev->revision == 0xC7)) || 1263 (adev->pdev->device == 0x7340 && 1264 adev->pdev->revision == 0xC9) || 1265 (adev->pdev->device == 0x7360 && 1266 adev->pdev->revision == 0xC7)) 1267 amdgpu_discovery_read_harvest_bit_per_ip(adev, 1268 &vcn_harvest_count); 1269 } else { 1270 amdgpu_discovery_read_from_harvest_table(adev, 1271 &vcn_harvest_count, 1272 &umc_harvest_count); 1273 } 1274 1275 amdgpu_discovery_harvest_config_quirk(adev); 1276 1277 if (vcn_harvest_count == adev->vcn.num_vcn_inst) { 1278 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 1279 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 1280 } 1281 1282 if (umc_harvest_count < adev->gmc.num_umc) { 1283 adev->gmc.num_umc -= umc_harvest_count; 1284 } 1285 } 1286 1287 union gc_info { 1288 struct gc_info_v1_0 v1; 1289 struct gc_info_v1_1 v1_1; 1290 struct gc_info_v1_2 v1_2; 1291 struct gc_info_v2_0 v2; 1292 }; 1293 1294 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) 1295 { 1296 struct binary_header *bhdr; 1297 union gc_info *gc_info; 1298 u16 offset; 1299 1300 if (!adev->mman.discovery_bin) { 1301 DRM_ERROR("ip discovery uninitialized\n"); 1302 return -EINVAL; 1303 } 1304 1305 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1306 offset = le16_to_cpu(bhdr->table_list[GC].offset); 1307 1308 if (!offset) 1309 return 0; 1310 1311 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset); 1312 1313 switch (le16_to_cpu(gc_info->v1.header.version_major)) { 1314 case 1: 1315 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); 1316 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + 1317 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); 1318 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1319 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); 1320 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); 1321 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); 1322 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); 1323 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); 1324 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); 1325 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); 1326 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); 1327 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); 1328 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); 1329 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); 1330 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / 1331 le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1332 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); 1333 if (gc_info->v1.header.version_minor >= 1) { 1334 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); 1335 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); 1336 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); 1337 } 1338 if (gc_info->v1.header.version_minor >= 2) { 1339 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); 1340 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); 1341 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); 1342 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc); 1343 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc); 1344 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa); 1345 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance); 1346 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu); 1347 } 1348 break; 1349 case 2: 1350 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); 1351 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); 1352 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1353 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); 1354 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); 1355 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); 1356 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); 1357 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); 1358 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); 1359 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); 1360 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); 1361 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); 1362 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); 1363 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); 1364 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / 1365 le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1366 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); 1367 break; 1368 default: 1369 dev_err(adev->dev, 1370 "Unhandled GC info table %d.%d\n", 1371 le16_to_cpu(gc_info->v1.header.version_major), 1372 le16_to_cpu(gc_info->v1.header.version_minor)); 1373 return -EINVAL; 1374 } 1375 return 0; 1376 } 1377 1378 union mall_info { 1379 struct mall_info_v1_0 v1; 1380 }; 1381 1382 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) 1383 { 1384 struct binary_header *bhdr; 1385 union mall_info *mall_info; 1386 u32 u, mall_size_per_umc, m_s_present, half_use; 1387 u64 mall_size; 1388 u16 offset; 1389 1390 if (!adev->mman.discovery_bin) { 1391 DRM_ERROR("ip discovery uninitialized\n"); 1392 return -EINVAL; 1393 } 1394 1395 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1396 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); 1397 1398 if (!offset) 1399 return 0; 1400 1401 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset); 1402 1403 switch (le16_to_cpu(mall_info->v1.header.version_major)) { 1404 case 1: 1405 mall_size = 0; 1406 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m); 1407 m_s_present = le32_to_cpu(mall_info->v1.m_s_present); 1408 half_use = le32_to_cpu(mall_info->v1.m_half_use); 1409 for (u = 0; u < adev->gmc.num_umc; u++) { 1410 if (m_s_present & (1 << u)) 1411 mall_size += mall_size_per_umc * 2; 1412 else if (half_use & (1 << u)) 1413 mall_size += mall_size_per_umc / 2; 1414 else 1415 mall_size += mall_size_per_umc; 1416 } 1417 adev->gmc.mall_size = mall_size; 1418 break; 1419 default: 1420 dev_err(adev->dev, 1421 "Unhandled MALL info table %d.%d\n", 1422 le16_to_cpu(mall_info->v1.header.version_major), 1423 le16_to_cpu(mall_info->v1.header.version_minor)); 1424 return -EINVAL; 1425 } 1426 return 0; 1427 } 1428 1429 union vcn_info { 1430 struct vcn_info_v1_0 v1; 1431 }; 1432 1433 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) 1434 { 1435 struct binary_header *bhdr; 1436 union vcn_info *vcn_info; 1437 u16 offset; 1438 int v; 1439 1440 if (!adev->mman.discovery_bin) { 1441 DRM_ERROR("ip discovery uninitialized\n"); 1442 return -EINVAL; 1443 } 1444 1445 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1446 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES 1447 * but that may change in the future with new GPUs so keep this 1448 * check for defensive purposes. 1449 */ 1450 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) { 1451 dev_err(adev->dev, "invalid vcn instances\n"); 1452 return -EINVAL; 1453 } 1454 1455 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1456 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); 1457 1458 if (!offset) 1459 return 0; 1460 1461 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset); 1462 1463 switch (le16_to_cpu(vcn_info->v1.header.version_major)) { 1464 case 1: 1465 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1466 * so this won't overflow. 1467 */ 1468 for (v = 0; v < adev->vcn.num_vcn_inst; v++) { 1469 adev->vcn.vcn_codec_disable_mask[v] = 1470 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits); 1471 } 1472 break; 1473 default: 1474 dev_err(adev->dev, 1475 "Unhandled VCN info table %d.%d\n", 1476 le16_to_cpu(vcn_info->v1.header.version_major), 1477 le16_to_cpu(vcn_info->v1.header.version_minor)); 1478 return -EINVAL; 1479 } 1480 return 0; 1481 } 1482 1483 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 1484 { 1485 /* what IP to use for this? */ 1486 switch (adev->ip_versions[GC_HWIP][0]) { 1487 case IP_VERSION(9, 0, 1): 1488 case IP_VERSION(9, 1, 0): 1489 case IP_VERSION(9, 2, 1): 1490 case IP_VERSION(9, 2, 2): 1491 case IP_VERSION(9, 3, 0): 1492 case IP_VERSION(9, 4, 0): 1493 case IP_VERSION(9, 4, 1): 1494 case IP_VERSION(9, 4, 2): 1495 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1496 break; 1497 case IP_VERSION(10, 1, 10): 1498 case IP_VERSION(10, 1, 1): 1499 case IP_VERSION(10, 1, 2): 1500 case IP_VERSION(10, 1, 3): 1501 case IP_VERSION(10, 1, 4): 1502 case IP_VERSION(10, 3, 0): 1503 case IP_VERSION(10, 3, 1): 1504 case IP_VERSION(10, 3, 2): 1505 case IP_VERSION(10, 3, 3): 1506 case IP_VERSION(10, 3, 4): 1507 case IP_VERSION(10, 3, 5): 1508 case IP_VERSION(10, 3, 6): 1509 case IP_VERSION(10, 3, 7): 1510 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 1511 break; 1512 case IP_VERSION(11, 0, 0): 1513 case IP_VERSION(11, 0, 1): 1514 case IP_VERSION(11, 0, 2): 1515 case IP_VERSION(11, 0, 3): 1516 case IP_VERSION(11, 0, 4): 1517 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); 1518 break; 1519 default: 1520 dev_err(adev->dev, 1521 "Failed to add common ip block(GC_HWIP:0x%x)\n", 1522 adev->ip_versions[GC_HWIP][0]); 1523 return -EINVAL; 1524 } 1525 return 0; 1526 } 1527 1528 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 1529 { 1530 /* use GC or MMHUB IP version */ 1531 switch (adev->ip_versions[GC_HWIP][0]) { 1532 case IP_VERSION(9, 0, 1): 1533 case IP_VERSION(9, 1, 0): 1534 case IP_VERSION(9, 2, 1): 1535 case IP_VERSION(9, 2, 2): 1536 case IP_VERSION(9, 3, 0): 1537 case IP_VERSION(9, 4, 0): 1538 case IP_VERSION(9, 4, 1): 1539 case IP_VERSION(9, 4, 2): 1540 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 1541 break; 1542 case IP_VERSION(10, 1, 10): 1543 case IP_VERSION(10, 1, 1): 1544 case IP_VERSION(10, 1, 2): 1545 case IP_VERSION(10, 1, 3): 1546 case IP_VERSION(10, 1, 4): 1547 case IP_VERSION(10, 3, 0): 1548 case IP_VERSION(10, 3, 1): 1549 case IP_VERSION(10, 3, 2): 1550 case IP_VERSION(10, 3, 3): 1551 case IP_VERSION(10, 3, 4): 1552 case IP_VERSION(10, 3, 5): 1553 case IP_VERSION(10, 3, 6): 1554 case IP_VERSION(10, 3, 7): 1555 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 1556 break; 1557 case IP_VERSION(11, 0, 0): 1558 case IP_VERSION(11, 0, 1): 1559 case IP_VERSION(11, 0, 2): 1560 case IP_VERSION(11, 0, 3): 1561 case IP_VERSION(11, 0, 4): 1562 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); 1563 break; 1564 default: 1565 dev_err(adev->dev, 1566 "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 1567 adev->ip_versions[GC_HWIP][0]); 1568 return -EINVAL; 1569 } 1570 return 0; 1571 } 1572 1573 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 1574 { 1575 switch (adev->ip_versions[OSSSYS_HWIP][0]) { 1576 case IP_VERSION(4, 0, 0): 1577 case IP_VERSION(4, 0, 1): 1578 case IP_VERSION(4, 1, 0): 1579 case IP_VERSION(4, 1, 1): 1580 case IP_VERSION(4, 3, 0): 1581 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 1582 break; 1583 case IP_VERSION(4, 2, 0): 1584 case IP_VERSION(4, 2, 1): 1585 case IP_VERSION(4, 4, 0): 1586 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 1587 break; 1588 case IP_VERSION(5, 0, 0): 1589 case IP_VERSION(5, 0, 1): 1590 case IP_VERSION(5, 0, 2): 1591 case IP_VERSION(5, 0, 3): 1592 case IP_VERSION(5, 2, 0): 1593 case IP_VERSION(5, 2, 1): 1594 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 1595 break; 1596 case IP_VERSION(6, 0, 0): 1597 case IP_VERSION(6, 0, 1): 1598 case IP_VERSION(6, 0, 2): 1599 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); 1600 break; 1601 default: 1602 dev_err(adev->dev, 1603 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 1604 adev->ip_versions[OSSSYS_HWIP][0]); 1605 return -EINVAL; 1606 } 1607 return 0; 1608 } 1609 1610 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 1611 { 1612 switch (adev->ip_versions[MP0_HWIP][0]) { 1613 case IP_VERSION(9, 0, 0): 1614 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 1615 break; 1616 case IP_VERSION(10, 0, 0): 1617 case IP_VERSION(10, 0, 1): 1618 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 1619 break; 1620 case IP_VERSION(11, 0, 0): 1621 case IP_VERSION(11, 0, 2): 1622 case IP_VERSION(11, 0, 4): 1623 case IP_VERSION(11, 0, 5): 1624 case IP_VERSION(11, 0, 9): 1625 case IP_VERSION(11, 0, 7): 1626 case IP_VERSION(11, 0, 11): 1627 case IP_VERSION(11, 0, 12): 1628 case IP_VERSION(11, 0, 13): 1629 case IP_VERSION(11, 5, 0): 1630 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 1631 break; 1632 case IP_VERSION(11, 0, 8): 1633 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); 1634 break; 1635 case IP_VERSION(11, 0, 3): 1636 case IP_VERSION(12, 0, 1): 1637 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 1638 break; 1639 case IP_VERSION(13, 0, 0): 1640 case IP_VERSION(13, 0, 1): 1641 case IP_VERSION(13, 0, 2): 1642 case IP_VERSION(13, 0, 3): 1643 case IP_VERSION(13, 0, 5): 1644 case IP_VERSION(13, 0, 7): 1645 case IP_VERSION(13, 0, 8): 1646 case IP_VERSION(13, 0, 10): 1647 case IP_VERSION(13, 0, 11): 1648 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 1649 break; 1650 case IP_VERSION(13, 0, 4): 1651 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block); 1652 break; 1653 default: 1654 dev_err(adev->dev, 1655 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 1656 adev->ip_versions[MP0_HWIP][0]); 1657 return -EINVAL; 1658 } 1659 return 0; 1660 } 1661 1662 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 1663 { 1664 switch (adev->ip_versions[MP1_HWIP][0]) { 1665 case IP_VERSION(9, 0, 0): 1666 case IP_VERSION(10, 0, 0): 1667 case IP_VERSION(10, 0, 1): 1668 case IP_VERSION(11, 0, 2): 1669 if (adev->asic_type == CHIP_ARCTURUS) 1670 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1671 else 1672 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1673 break; 1674 case IP_VERSION(11, 0, 0): 1675 case IP_VERSION(11, 0, 5): 1676 case IP_VERSION(11, 0, 9): 1677 case IP_VERSION(11, 0, 7): 1678 case IP_VERSION(11, 0, 8): 1679 case IP_VERSION(11, 0, 11): 1680 case IP_VERSION(11, 0, 12): 1681 case IP_VERSION(11, 0, 13): 1682 case IP_VERSION(11, 5, 0): 1683 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1684 break; 1685 case IP_VERSION(12, 0, 0): 1686 case IP_VERSION(12, 0, 1): 1687 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 1688 break; 1689 case IP_VERSION(13, 0, 0): 1690 case IP_VERSION(13, 0, 1): 1691 case IP_VERSION(13, 0, 2): 1692 case IP_VERSION(13, 0, 3): 1693 case IP_VERSION(13, 0, 4): 1694 case IP_VERSION(13, 0, 5): 1695 case IP_VERSION(13, 0, 7): 1696 case IP_VERSION(13, 0, 8): 1697 case IP_VERSION(13, 0, 10): 1698 case IP_VERSION(13, 0, 11): 1699 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 1700 break; 1701 default: 1702 dev_err(adev->dev, 1703 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 1704 adev->ip_versions[MP1_HWIP][0]); 1705 return -EINVAL; 1706 } 1707 return 0; 1708 } 1709 1710 #if defined(CONFIG_DRM_AMD_DC) 1711 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev) 1712 { 1713 amdgpu_device_set_sriov_virtual_display(adev); 1714 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 1715 } 1716 #endif 1717 1718 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) 1719 { 1720 if (adev->enable_virtual_display) { 1721 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 1722 return 0; 1723 } 1724 1725 if (!amdgpu_device_has_dc_support(adev)) 1726 return 0; 1727 1728 #if defined(CONFIG_DRM_AMD_DC) 1729 if (adev->ip_versions[DCE_HWIP][0]) { 1730 switch (adev->ip_versions[DCE_HWIP][0]) { 1731 case IP_VERSION(1, 0, 0): 1732 case IP_VERSION(1, 0, 1): 1733 case IP_VERSION(2, 0, 2): 1734 case IP_VERSION(2, 0, 0): 1735 case IP_VERSION(2, 0, 3): 1736 case IP_VERSION(2, 1, 0): 1737 case IP_VERSION(3, 0, 0): 1738 case IP_VERSION(3, 0, 2): 1739 case IP_VERSION(3, 0, 3): 1740 case IP_VERSION(3, 0, 1): 1741 case IP_VERSION(3, 1, 2): 1742 case IP_VERSION(3, 1, 3): 1743 case IP_VERSION(3, 1, 4): 1744 case IP_VERSION(3, 1, 5): 1745 case IP_VERSION(3, 1, 6): 1746 case IP_VERSION(3, 2, 0): 1747 case IP_VERSION(3, 2, 1): 1748 if (amdgpu_sriov_vf(adev)) 1749 amdgpu_discovery_set_sriov_display(adev); 1750 else 1751 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1752 break; 1753 default: 1754 dev_err(adev->dev, 1755 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 1756 adev->ip_versions[DCE_HWIP][0]); 1757 return -EINVAL; 1758 } 1759 } else if (adev->ip_versions[DCI_HWIP][0]) { 1760 switch (adev->ip_versions[DCI_HWIP][0]) { 1761 case IP_VERSION(12, 0, 0): 1762 case IP_VERSION(12, 0, 1): 1763 case IP_VERSION(12, 1, 0): 1764 if (amdgpu_sriov_vf(adev)) 1765 amdgpu_discovery_set_sriov_display(adev); 1766 else 1767 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1768 break; 1769 default: 1770 dev_err(adev->dev, 1771 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 1772 adev->ip_versions[DCI_HWIP][0]); 1773 return -EINVAL; 1774 } 1775 } 1776 #endif 1777 return 0; 1778 } 1779 1780 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 1781 { 1782 switch (adev->ip_versions[GC_HWIP][0]) { 1783 case IP_VERSION(9, 0, 1): 1784 case IP_VERSION(9, 1, 0): 1785 case IP_VERSION(9, 2, 1): 1786 case IP_VERSION(9, 2, 2): 1787 case IP_VERSION(9, 3, 0): 1788 case IP_VERSION(9, 4, 0): 1789 case IP_VERSION(9, 4, 1): 1790 case IP_VERSION(9, 4, 2): 1791 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 1792 break; 1793 case IP_VERSION(10, 1, 10): 1794 case IP_VERSION(10, 1, 2): 1795 case IP_VERSION(10, 1, 1): 1796 case IP_VERSION(10, 1, 3): 1797 case IP_VERSION(10, 1, 4): 1798 case IP_VERSION(10, 3, 0): 1799 case IP_VERSION(10, 3, 2): 1800 case IP_VERSION(10, 3, 1): 1801 case IP_VERSION(10, 3, 4): 1802 case IP_VERSION(10, 3, 5): 1803 case IP_VERSION(10, 3, 6): 1804 case IP_VERSION(10, 3, 3): 1805 case IP_VERSION(10, 3, 7): 1806 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 1807 break; 1808 case IP_VERSION(11, 0, 0): 1809 case IP_VERSION(11, 0, 1): 1810 case IP_VERSION(11, 0, 2): 1811 case IP_VERSION(11, 0, 3): 1812 case IP_VERSION(11, 0, 4): 1813 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); 1814 break; 1815 default: 1816 dev_err(adev->dev, 1817 "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 1818 adev->ip_versions[GC_HWIP][0]); 1819 return -EINVAL; 1820 } 1821 return 0; 1822 } 1823 1824 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 1825 { 1826 switch (adev->ip_versions[SDMA0_HWIP][0]) { 1827 case IP_VERSION(4, 0, 0): 1828 case IP_VERSION(4, 0, 1): 1829 case IP_VERSION(4, 1, 0): 1830 case IP_VERSION(4, 1, 1): 1831 case IP_VERSION(4, 1, 2): 1832 case IP_VERSION(4, 2, 0): 1833 case IP_VERSION(4, 2, 2): 1834 case IP_VERSION(4, 4, 0): 1835 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 1836 break; 1837 case IP_VERSION(5, 0, 0): 1838 case IP_VERSION(5, 0, 1): 1839 case IP_VERSION(5, 0, 2): 1840 case IP_VERSION(5, 0, 5): 1841 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 1842 break; 1843 case IP_VERSION(5, 2, 0): 1844 case IP_VERSION(5, 2, 2): 1845 case IP_VERSION(5, 2, 4): 1846 case IP_VERSION(5, 2, 5): 1847 case IP_VERSION(5, 2, 6): 1848 case IP_VERSION(5, 2, 3): 1849 case IP_VERSION(5, 2, 1): 1850 case IP_VERSION(5, 2, 7): 1851 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 1852 break; 1853 case IP_VERSION(6, 0, 0): 1854 case IP_VERSION(6, 0, 1): 1855 case IP_VERSION(6, 0, 2): 1856 case IP_VERSION(6, 0, 3): 1857 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block); 1858 break; 1859 default: 1860 dev_err(adev->dev, 1861 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 1862 adev->ip_versions[SDMA0_HWIP][0]); 1863 return -EINVAL; 1864 } 1865 return 0; 1866 } 1867 1868 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 1869 { 1870 if (adev->ip_versions[VCE_HWIP][0]) { 1871 switch (adev->ip_versions[UVD_HWIP][0]) { 1872 case IP_VERSION(7, 0, 0): 1873 case IP_VERSION(7, 2, 0): 1874 /* UVD is not supported on vega20 SR-IOV */ 1875 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 1876 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 1877 break; 1878 default: 1879 dev_err(adev->dev, 1880 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 1881 adev->ip_versions[UVD_HWIP][0]); 1882 return -EINVAL; 1883 } 1884 switch (adev->ip_versions[VCE_HWIP][0]) { 1885 case IP_VERSION(4, 0, 0): 1886 case IP_VERSION(4, 1, 0): 1887 /* VCE is not supported on vega20 SR-IOV */ 1888 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 1889 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 1890 break; 1891 default: 1892 dev_err(adev->dev, 1893 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 1894 adev->ip_versions[VCE_HWIP][0]); 1895 return -EINVAL; 1896 } 1897 } else { 1898 switch (adev->ip_versions[UVD_HWIP][0]) { 1899 case IP_VERSION(1, 0, 0): 1900 case IP_VERSION(1, 0, 1): 1901 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 1902 break; 1903 case IP_VERSION(2, 0, 0): 1904 case IP_VERSION(2, 0, 2): 1905 case IP_VERSION(2, 2, 0): 1906 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 1907 if (!amdgpu_sriov_vf(adev)) 1908 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 1909 break; 1910 case IP_VERSION(2, 0, 3): 1911 break; 1912 case IP_VERSION(2, 5, 0): 1913 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 1914 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 1915 break; 1916 case IP_VERSION(2, 6, 0): 1917 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 1918 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 1919 break; 1920 case IP_VERSION(3, 0, 0): 1921 case IP_VERSION(3, 0, 16): 1922 case IP_VERSION(3, 1, 1): 1923 case IP_VERSION(3, 1, 2): 1924 case IP_VERSION(3, 0, 2): 1925 case IP_VERSION(3, 0, 192): 1926 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 1927 if (!amdgpu_sriov_vf(adev)) 1928 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 1929 break; 1930 case IP_VERSION(3, 0, 33): 1931 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 1932 break; 1933 case IP_VERSION(4, 0, 0): 1934 case IP_VERSION(4, 0, 2): 1935 case IP_VERSION(4, 0, 4): 1936 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block); 1937 if (!amdgpu_sriov_vf(adev)) 1938 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); 1939 break; 1940 default: 1941 dev_err(adev->dev, 1942 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 1943 adev->ip_versions[UVD_HWIP][0]); 1944 return -EINVAL; 1945 } 1946 } 1947 return 0; 1948 } 1949 1950 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 1951 { 1952 switch (adev->ip_versions[GC_HWIP][0]) { 1953 case IP_VERSION(10, 1, 10): 1954 case IP_VERSION(10, 1, 1): 1955 case IP_VERSION(10, 1, 2): 1956 case IP_VERSION(10, 1, 3): 1957 case IP_VERSION(10, 1, 4): 1958 case IP_VERSION(10, 3, 0): 1959 case IP_VERSION(10, 3, 1): 1960 case IP_VERSION(10, 3, 2): 1961 case IP_VERSION(10, 3, 3): 1962 case IP_VERSION(10, 3, 4): 1963 case IP_VERSION(10, 3, 5): 1964 case IP_VERSION(10, 3, 6): 1965 if (amdgpu_mes) { 1966 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 1967 adev->enable_mes = true; 1968 if (amdgpu_mes_kiq) 1969 adev->enable_mes_kiq = true; 1970 } 1971 break; 1972 case IP_VERSION(11, 0, 0): 1973 case IP_VERSION(11, 0, 1): 1974 case IP_VERSION(11, 0, 2): 1975 case IP_VERSION(11, 0, 3): 1976 case IP_VERSION(11, 0, 4): 1977 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block); 1978 adev->enable_mes = true; 1979 adev->enable_mes_kiq = true; 1980 break; 1981 default: 1982 break; 1983 } 1984 return 0; 1985 } 1986 1987 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) 1988 { 1989 int r; 1990 1991 switch (adev->asic_type) { 1992 case CHIP_VEGA10: 1993 vega10_reg_base_init(adev); 1994 adev->sdma.num_instances = 2; 1995 adev->gmc.num_umc = 4; 1996 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); 1997 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); 1998 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); 1999 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); 2000 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); 2001 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); 2002 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2003 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); 2004 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); 2005 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2006 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2007 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2008 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); 2009 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); 2010 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2011 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2012 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); 2013 break; 2014 case CHIP_VEGA12: 2015 vega10_reg_base_init(adev); 2016 adev->sdma.num_instances = 2; 2017 adev->gmc.num_umc = 4; 2018 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2019 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2020 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); 2021 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); 2022 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); 2023 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); 2024 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); 2025 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); 2026 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); 2027 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2028 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2029 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2030 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); 2031 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); 2032 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2033 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2034 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); 2035 break; 2036 case CHIP_RAVEN: 2037 vega10_reg_base_init(adev); 2038 adev->sdma.num_instances = 1; 2039 adev->vcn.num_vcn_inst = 1; 2040 adev->gmc.num_umc = 2; 2041 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 2042 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2043 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2044 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); 2045 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); 2046 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); 2047 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); 2048 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); 2049 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); 2050 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); 2051 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); 2052 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); 2053 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); 2054 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); 2055 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); 2056 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); 2057 } else { 2058 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2059 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2060 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); 2061 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); 2062 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); 2063 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2064 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); 2065 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); 2066 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); 2067 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); 2068 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); 2069 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); 2070 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); 2071 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); 2072 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); 2073 } 2074 break; 2075 case CHIP_VEGA20: 2076 vega20_reg_base_init(adev); 2077 adev->sdma.num_instances = 2; 2078 adev->gmc.num_umc = 8; 2079 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2080 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2081 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); 2082 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); 2083 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); 2084 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); 2085 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); 2086 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); 2087 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); 2088 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); 2089 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2090 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); 2091 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); 2092 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); 2093 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); 2094 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); 2095 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); 2096 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); 2097 break; 2098 case CHIP_ARCTURUS: 2099 arct_reg_base_init(adev); 2100 adev->sdma.num_instances = 8; 2101 adev->vcn.num_vcn_inst = 2; 2102 adev->gmc.num_umc = 8; 2103 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2104 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2105 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); 2106 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); 2107 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); 2108 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); 2109 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); 2110 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); 2111 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); 2112 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); 2113 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); 2114 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); 2115 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); 2116 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); 2117 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); 2118 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); 2119 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2120 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); 2121 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); 2122 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); 2123 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); 2124 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); 2125 break; 2126 case CHIP_ALDEBARAN: 2127 aldebaran_reg_base_init(adev); 2128 adev->sdma.num_instances = 5; 2129 adev->vcn.num_vcn_inst = 2; 2130 adev->gmc.num_umc = 4; 2131 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2132 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2133 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); 2134 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); 2135 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); 2136 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); 2137 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); 2138 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); 2139 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); 2140 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); 2141 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); 2142 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); 2143 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); 2144 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); 2145 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); 2146 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); 2147 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); 2148 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); 2149 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); 2150 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); 2151 break; 2152 default: 2153 r = amdgpu_discovery_reg_base_init(adev); 2154 if (r) 2155 return -EINVAL; 2156 2157 amdgpu_discovery_harvest_ip(adev); 2158 amdgpu_discovery_get_gfx_info(adev); 2159 amdgpu_discovery_get_mall_info(adev); 2160 amdgpu_discovery_get_vcn_info(adev); 2161 break; 2162 } 2163 2164 switch (adev->ip_versions[GC_HWIP][0]) { 2165 case IP_VERSION(9, 0, 1): 2166 case IP_VERSION(9, 2, 1): 2167 case IP_VERSION(9, 4, 0): 2168 case IP_VERSION(9, 4, 1): 2169 case IP_VERSION(9, 4, 2): 2170 adev->family = AMDGPU_FAMILY_AI; 2171 break; 2172 case IP_VERSION(9, 1, 0): 2173 case IP_VERSION(9, 2, 2): 2174 case IP_VERSION(9, 3, 0): 2175 adev->family = AMDGPU_FAMILY_RV; 2176 break; 2177 case IP_VERSION(10, 1, 10): 2178 case IP_VERSION(10, 1, 1): 2179 case IP_VERSION(10, 1, 2): 2180 case IP_VERSION(10, 1, 3): 2181 case IP_VERSION(10, 1, 4): 2182 case IP_VERSION(10, 3, 0): 2183 case IP_VERSION(10, 3, 2): 2184 case IP_VERSION(10, 3, 4): 2185 case IP_VERSION(10, 3, 5): 2186 adev->family = AMDGPU_FAMILY_NV; 2187 break; 2188 case IP_VERSION(10, 3, 1): 2189 adev->family = AMDGPU_FAMILY_VGH; 2190 adev->apu_flags |= AMD_APU_IS_VANGOGH; 2191 break; 2192 case IP_VERSION(10, 3, 3): 2193 adev->family = AMDGPU_FAMILY_YC; 2194 break; 2195 case IP_VERSION(10, 3, 6): 2196 adev->family = AMDGPU_FAMILY_GC_10_3_6; 2197 break; 2198 case IP_VERSION(10, 3, 7): 2199 adev->family = AMDGPU_FAMILY_GC_10_3_7; 2200 break; 2201 case IP_VERSION(11, 0, 0): 2202 case IP_VERSION(11, 0, 2): 2203 case IP_VERSION(11, 0, 3): 2204 adev->family = AMDGPU_FAMILY_GC_11_0_0; 2205 break; 2206 case IP_VERSION(11, 0, 1): 2207 case IP_VERSION(11, 0, 4): 2208 adev->family = AMDGPU_FAMILY_GC_11_0_1; 2209 break; 2210 default: 2211 return -EINVAL; 2212 } 2213 2214 switch (adev->ip_versions[GC_HWIP][0]) { 2215 case IP_VERSION(9, 1, 0): 2216 case IP_VERSION(9, 2, 2): 2217 case IP_VERSION(9, 3, 0): 2218 case IP_VERSION(10, 1, 3): 2219 case IP_VERSION(10, 1, 4): 2220 case IP_VERSION(10, 3, 1): 2221 case IP_VERSION(10, 3, 3): 2222 case IP_VERSION(10, 3, 6): 2223 case IP_VERSION(10, 3, 7): 2224 case IP_VERSION(11, 0, 1): 2225 case IP_VERSION(11, 0, 4): 2226 adev->flags |= AMD_IS_APU; 2227 break; 2228 default: 2229 break; 2230 } 2231 2232 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0)) 2233 adev->gmc.xgmi.supported = true; 2234 2235 /* set NBIO version */ 2236 switch (adev->ip_versions[NBIO_HWIP][0]) { 2237 case IP_VERSION(6, 1, 0): 2238 case IP_VERSION(6, 2, 0): 2239 adev->nbio.funcs = &nbio_v6_1_funcs; 2240 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 2241 break; 2242 case IP_VERSION(7, 0, 0): 2243 case IP_VERSION(7, 0, 1): 2244 case IP_VERSION(2, 5, 0): 2245 adev->nbio.funcs = &nbio_v7_0_funcs; 2246 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 2247 break; 2248 case IP_VERSION(7, 4, 0): 2249 case IP_VERSION(7, 4, 1): 2250 case IP_VERSION(7, 4, 4): 2251 adev->nbio.funcs = &nbio_v7_4_funcs; 2252 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 2253 break; 2254 case IP_VERSION(7, 2, 0): 2255 case IP_VERSION(7, 2, 1): 2256 case IP_VERSION(7, 3, 0): 2257 case IP_VERSION(7, 5, 0): 2258 case IP_VERSION(7, 5, 1): 2259 adev->nbio.funcs = &nbio_v7_2_funcs; 2260 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; 2261 break; 2262 case IP_VERSION(2, 1, 1): 2263 case IP_VERSION(2, 3, 0): 2264 case IP_VERSION(2, 3, 1): 2265 case IP_VERSION(2, 3, 2): 2266 case IP_VERSION(3, 3, 0): 2267 case IP_VERSION(3, 3, 1): 2268 case IP_VERSION(3, 3, 2): 2269 case IP_VERSION(3, 3, 3): 2270 adev->nbio.funcs = &nbio_v2_3_funcs; 2271 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 2272 break; 2273 case IP_VERSION(4, 3, 0): 2274 case IP_VERSION(4, 3, 1): 2275 if (amdgpu_sriov_vf(adev)) 2276 adev->nbio.funcs = &nbio_v4_3_sriov_funcs; 2277 else 2278 adev->nbio.funcs = &nbio_v4_3_funcs; 2279 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg; 2280 break; 2281 case IP_VERSION(7, 7, 0): 2282 case IP_VERSION(7, 7, 1): 2283 adev->nbio.funcs = &nbio_v7_7_funcs; 2284 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg; 2285 break; 2286 default: 2287 break; 2288 } 2289 2290 switch (adev->ip_versions[HDP_HWIP][0]) { 2291 case IP_VERSION(4, 0, 0): 2292 case IP_VERSION(4, 0, 1): 2293 case IP_VERSION(4, 1, 0): 2294 case IP_VERSION(4, 1, 1): 2295 case IP_VERSION(4, 1, 2): 2296 case IP_VERSION(4, 2, 0): 2297 case IP_VERSION(4, 2, 1): 2298 case IP_VERSION(4, 4, 0): 2299 adev->hdp.funcs = &hdp_v4_0_funcs; 2300 break; 2301 case IP_VERSION(5, 0, 0): 2302 case IP_VERSION(5, 0, 1): 2303 case IP_VERSION(5, 0, 2): 2304 case IP_VERSION(5, 0, 3): 2305 case IP_VERSION(5, 0, 4): 2306 case IP_VERSION(5, 2, 0): 2307 adev->hdp.funcs = &hdp_v5_0_funcs; 2308 break; 2309 case IP_VERSION(5, 2, 1): 2310 adev->hdp.funcs = &hdp_v5_2_funcs; 2311 break; 2312 case IP_VERSION(6, 0, 0): 2313 case IP_VERSION(6, 0, 1): 2314 adev->hdp.funcs = &hdp_v6_0_funcs; 2315 break; 2316 default: 2317 break; 2318 } 2319 2320 switch (adev->ip_versions[DF_HWIP][0]) { 2321 case IP_VERSION(3, 6, 0): 2322 case IP_VERSION(3, 6, 1): 2323 case IP_VERSION(3, 6, 2): 2324 adev->df.funcs = &df_v3_6_funcs; 2325 break; 2326 case IP_VERSION(2, 1, 0): 2327 case IP_VERSION(2, 1, 1): 2328 case IP_VERSION(2, 5, 0): 2329 case IP_VERSION(3, 5, 1): 2330 case IP_VERSION(3, 5, 2): 2331 adev->df.funcs = &df_v1_7_funcs; 2332 break; 2333 case IP_VERSION(4, 3, 0): 2334 adev->df.funcs = &df_v4_3_funcs; 2335 break; 2336 default: 2337 break; 2338 } 2339 2340 switch (adev->ip_versions[SMUIO_HWIP][0]) { 2341 case IP_VERSION(9, 0, 0): 2342 case IP_VERSION(9, 0, 1): 2343 case IP_VERSION(10, 0, 0): 2344 case IP_VERSION(10, 0, 1): 2345 case IP_VERSION(10, 0, 2): 2346 adev->smuio.funcs = &smuio_v9_0_funcs; 2347 break; 2348 case IP_VERSION(11, 0, 0): 2349 case IP_VERSION(11, 0, 2): 2350 case IP_VERSION(11, 0, 3): 2351 case IP_VERSION(11, 0, 4): 2352 case IP_VERSION(11, 0, 7): 2353 case IP_VERSION(11, 0, 8): 2354 adev->smuio.funcs = &smuio_v11_0_funcs; 2355 break; 2356 case IP_VERSION(11, 0, 6): 2357 case IP_VERSION(11, 0, 10): 2358 case IP_VERSION(11, 0, 11): 2359 case IP_VERSION(11, 5, 0): 2360 case IP_VERSION(13, 0, 1): 2361 case IP_VERSION(13, 0, 9): 2362 case IP_VERSION(13, 0, 10): 2363 adev->smuio.funcs = &smuio_v11_0_6_funcs; 2364 break; 2365 case IP_VERSION(13, 0, 2): 2366 adev->smuio.funcs = &smuio_v13_0_funcs; 2367 break; 2368 case IP_VERSION(13, 0, 6): 2369 case IP_VERSION(13, 0, 8): 2370 adev->smuio.funcs = &smuio_v13_0_6_funcs; 2371 break; 2372 default: 2373 break; 2374 } 2375 2376 switch (adev->ip_versions[LSDMA_HWIP][0]) { 2377 case IP_VERSION(6, 0, 0): 2378 case IP_VERSION(6, 0, 1): 2379 case IP_VERSION(6, 0, 2): 2380 case IP_VERSION(6, 0, 3): 2381 adev->lsdma.funcs = &lsdma_v6_0_funcs; 2382 break; 2383 default: 2384 break; 2385 } 2386 2387 r = amdgpu_discovery_set_common_ip_blocks(adev); 2388 if (r) 2389 return r; 2390 2391 r = amdgpu_discovery_set_gmc_ip_blocks(adev); 2392 if (r) 2393 return r; 2394 2395 /* For SR-IOV, PSP needs to be initialized before IH */ 2396 if (amdgpu_sriov_vf(adev)) { 2397 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2398 if (r) 2399 return r; 2400 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2401 if (r) 2402 return r; 2403 } else { 2404 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2405 if (r) 2406 return r; 2407 2408 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2409 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2410 if (r) 2411 return r; 2412 } 2413 } 2414 2415 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2416 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2417 if (r) 2418 return r; 2419 } 2420 2421 r = amdgpu_discovery_set_display_ip_blocks(adev); 2422 if (r) 2423 return r; 2424 2425 r = amdgpu_discovery_set_gc_ip_blocks(adev); 2426 if (r) 2427 return r; 2428 2429 r = amdgpu_discovery_set_sdma_ip_blocks(adev); 2430 if (r) 2431 return r; 2432 2433 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 2434 !amdgpu_sriov_vf(adev)) || 2435 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 2436 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2437 if (r) 2438 return r; 2439 } 2440 2441 r = amdgpu_discovery_set_mm_ip_blocks(adev); 2442 if (r) 2443 return r; 2444 2445 r = amdgpu_discovery_set_mes_ip_blocks(adev); 2446 if (r) 2447 return r; 2448 2449 return 0; 2450 } 2451 2452