1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30
31 #include "soc15.h"
32 #include "gfx_v9_0.h"
33 #include "gfx_v9_4_3.h"
34 #include "gmc_v9_0.h"
35 #include "df_v1_7.h"
36 #include "df_v3_6.h"
37 #include "df_v4_3.h"
38 #include "nbio_v6_1.h"
39 #include "nbio_v7_0.h"
40 #include "nbio_v7_4.h"
41 #include "nbio_v7_9.h"
42 #include "hdp_v4_0.h"
43 #include "vega10_ih.h"
44 #include "vega20_ih.h"
45 #include "sdma_v4_0.h"
46 #include "sdma_v4_4_2.h"
47 #include "uvd_v7_0.h"
48 #include "vce_v4_0.h"
49 #include "vcn_v1_0.h"
50 #include "vcn_v2_5.h"
51 #include "jpeg_v2_5.h"
52 #include "smuio_v9_0.h"
53 #include "gmc_v10_0.h"
54 #include "gmc_v11_0.h"
55 #include "gfxhub_v2_0.h"
56 #include "mmhub_v2_0.h"
57 #include "nbio_v2_3.h"
58 #include "nbio_v4_3.h"
59 #include "nbio_v7_2.h"
60 #include "nbio_v7_7.h"
61 #include "hdp_v5_0.h"
62 #include "hdp_v5_2.h"
63 #include "hdp_v6_0.h"
64 #include "nv.h"
65 #include "soc21.h"
66 #include "navi10_ih.h"
67 #include "ih_v6_0.h"
68 #include "ih_v6_1.h"
69 #include "gfx_v10_0.h"
70 #include "gfx_v11_0.h"
71 #include "sdma_v5_0.h"
72 #include "sdma_v5_2.h"
73 #include "sdma_v6_0.h"
74 #include "lsdma_v6_0.h"
75 #include "vcn_v2_0.h"
76 #include "jpeg_v2_0.h"
77 #include "vcn_v3_0.h"
78 #include "jpeg_v3_0.h"
79 #include "vcn_v4_0.h"
80 #include "jpeg_v4_0.h"
81 #include "vcn_v4_0_3.h"
82 #include "jpeg_v4_0_3.h"
83 #include "amdgpu_vkms.h"
84 #include "mes_v10_1.h"
85 #include "mes_v11_0.h"
86 #include "smuio_v11_0.h"
87 #include "smuio_v11_0_6.h"
88 #include "smuio_v13_0.h"
89 #include "smuio_v13_0_3.h"
90 #include "smuio_v13_0_6.h"
91
92 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
93 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
94
95 #define mmRCC_CONFIG_MEMSIZE 0xde3
96 #define mmMP0_SMN_C2PMSG_33 0x16061
97 #define mmMM_INDEX 0x0
98 #define mmMM_INDEX_HI 0x6
99 #define mmMM_DATA 0x1
100
101 static const char *hw_id_names[HW_ID_MAX] = {
102 [MP1_HWID] = "MP1",
103 [MP2_HWID] = "MP2",
104 [THM_HWID] = "THM",
105 [SMUIO_HWID] = "SMUIO",
106 [FUSE_HWID] = "FUSE",
107 [CLKA_HWID] = "CLKA",
108 [PWR_HWID] = "PWR",
109 [GC_HWID] = "GC",
110 [UVD_HWID] = "UVD",
111 [AUDIO_AZ_HWID] = "AUDIO_AZ",
112 [ACP_HWID] = "ACP",
113 [DCI_HWID] = "DCI",
114 [DMU_HWID] = "DMU",
115 [DCO_HWID] = "DCO",
116 [DIO_HWID] = "DIO",
117 [XDMA_HWID] = "XDMA",
118 [DCEAZ_HWID] = "DCEAZ",
119 [DAZ_HWID] = "DAZ",
120 [SDPMUX_HWID] = "SDPMUX",
121 [NTB_HWID] = "NTB",
122 [IOHC_HWID] = "IOHC",
123 [L2IMU_HWID] = "L2IMU",
124 [VCE_HWID] = "VCE",
125 [MMHUB_HWID] = "MMHUB",
126 [ATHUB_HWID] = "ATHUB",
127 [DBGU_NBIO_HWID] = "DBGU_NBIO",
128 [DFX_HWID] = "DFX",
129 [DBGU0_HWID] = "DBGU0",
130 [DBGU1_HWID] = "DBGU1",
131 [OSSSYS_HWID] = "OSSSYS",
132 [HDP_HWID] = "HDP",
133 [SDMA0_HWID] = "SDMA0",
134 [SDMA1_HWID] = "SDMA1",
135 [SDMA2_HWID] = "SDMA2",
136 [SDMA3_HWID] = "SDMA3",
137 [LSDMA_HWID] = "LSDMA",
138 [ISP_HWID] = "ISP",
139 [DBGU_IO_HWID] = "DBGU_IO",
140 [DF_HWID] = "DF",
141 [CLKB_HWID] = "CLKB",
142 [FCH_HWID] = "FCH",
143 [DFX_DAP_HWID] = "DFX_DAP",
144 [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
145 [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
146 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
147 [L1IMU3_HWID] = "L1IMU3",
148 [L1IMU4_HWID] = "L1IMU4",
149 [L1IMU5_HWID] = "L1IMU5",
150 [L1IMU6_HWID] = "L1IMU6",
151 [L1IMU7_HWID] = "L1IMU7",
152 [L1IMU8_HWID] = "L1IMU8",
153 [L1IMU9_HWID] = "L1IMU9",
154 [L1IMU10_HWID] = "L1IMU10",
155 [L1IMU11_HWID] = "L1IMU11",
156 [L1IMU12_HWID] = "L1IMU12",
157 [L1IMU13_HWID] = "L1IMU13",
158 [L1IMU14_HWID] = "L1IMU14",
159 [L1IMU15_HWID] = "L1IMU15",
160 [WAFLC_HWID] = "WAFLC",
161 [FCH_USB_PD_HWID] = "FCH_USB_PD",
162 [PCIE_HWID] = "PCIE",
163 [PCS_HWID] = "PCS",
164 [DDCL_HWID] = "DDCL",
165 [SST_HWID] = "SST",
166 [IOAGR_HWID] = "IOAGR",
167 [NBIF_HWID] = "NBIF",
168 [IOAPIC_HWID] = "IOAPIC",
169 [SYSTEMHUB_HWID] = "SYSTEMHUB",
170 [NTBCCP_HWID] = "NTBCCP",
171 [UMC_HWID] = "UMC",
172 [SATA_HWID] = "SATA",
173 [USB_HWID] = "USB",
174 [CCXSEC_HWID] = "CCXSEC",
175 [XGMI_HWID] = "XGMI",
176 [XGBE_HWID] = "XGBE",
177 [MP0_HWID] = "MP0",
178 };
179
180 static int hw_id_map[MAX_HWIP] = {
181 [GC_HWIP] = GC_HWID,
182 [HDP_HWIP] = HDP_HWID,
183 [SDMA0_HWIP] = SDMA0_HWID,
184 [SDMA1_HWIP] = SDMA1_HWID,
185 [SDMA2_HWIP] = SDMA2_HWID,
186 [SDMA3_HWIP] = SDMA3_HWID,
187 [LSDMA_HWIP] = LSDMA_HWID,
188 [MMHUB_HWIP] = MMHUB_HWID,
189 [ATHUB_HWIP] = ATHUB_HWID,
190 [NBIO_HWIP] = NBIF_HWID,
191 [MP0_HWIP] = MP0_HWID,
192 [MP1_HWIP] = MP1_HWID,
193 [UVD_HWIP] = UVD_HWID,
194 [VCE_HWIP] = VCE_HWID,
195 [DF_HWIP] = DF_HWID,
196 [DCE_HWIP] = DMU_HWID,
197 [OSSSYS_HWIP] = OSSSYS_HWID,
198 [SMUIO_HWIP] = SMUIO_HWID,
199 [PWR_HWIP] = PWR_HWID,
200 [NBIF_HWIP] = NBIF_HWID,
201 [THM_HWIP] = THM_HWID,
202 [CLK_HWIP] = CLKA_HWID,
203 [UMC_HWIP] = UMC_HWID,
204 [XGMI_HWIP] = XGMI_HWID,
205 [DCI_HWIP] = DCI_HWID,
206 [PCIE_HWIP] = PCIE_HWID,
207 };
208
amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device * adev,uint8_t * binary)209 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
210 {
211 u64 tmr_offset, tmr_size, pos;
212 void *discv_regn;
213 int ret;
214
215 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
216 if (ret)
217 return ret;
218
219 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
220
221 /* This region is read-only and reserved from system use */
222 discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
223 if (discv_regn) {
224 memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
225 memunmap(discv_regn);
226 return 0;
227 }
228
229 return -ENOENT;
230 }
231
amdgpu_discovery_read_binary_from_mem(struct amdgpu_device * adev,uint8_t * binary)232 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
233 uint8_t *binary)
234 {
235 uint64_t vram_size;
236 u32 msg;
237 int i, ret = 0;
238
239 /* It can take up to a second for IFWI init to complete on some dGPUs,
240 * but generally it should be in the 60-100ms range. Normally this starts
241 * as soon as the device gets power so by the time the OS loads this has long
242 * completed. However, when a card is hotplugged via e.g., USB4, we need to
243 * wait for this to complete. Once the C2PMSG is updated, we can
244 * continue.
245 */
246 if (dev_is_removable(&adev->pdev->dev)) {
247 for (i = 0; i < 1000; i++) {
248 msg = RREG32(mmMP0_SMN_C2PMSG_33);
249 if (msg & 0x80000000)
250 break;
251 msleep(1);
252 }
253 }
254 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
255
256 if (vram_size) {
257 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
258 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
259 adev->mman.discovery_tmr_size, false);
260 } else {
261 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
262 }
263
264 return ret;
265 }
266
amdgpu_discovery_read_binary_from_file(struct amdgpu_device * adev,uint8_t * binary)267 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
268 {
269 const struct firmware *fw;
270 const char *fw_name;
271 int r;
272
273 switch (amdgpu_discovery) {
274 case 2:
275 fw_name = FIRMWARE_IP_DISCOVERY;
276 break;
277 default:
278 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
279 return -EINVAL;
280 }
281
282 r = request_firmware(&fw, fw_name, adev->dev);
283 if (r) {
284 dev_err(adev->dev, "can't load firmware \"%s\"\n",
285 fw_name);
286 return r;
287 }
288
289 memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
290 release_firmware(fw);
291
292 return 0;
293 }
294
amdgpu_discovery_calculate_checksum(uint8_t * data,uint32_t size)295 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
296 {
297 uint16_t checksum = 0;
298 int i;
299
300 for (i = 0; i < size; i++)
301 checksum += data[i];
302
303 return checksum;
304 }
305
amdgpu_discovery_verify_checksum(uint8_t * data,uint32_t size,uint16_t expected)306 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
307 uint16_t expected)
308 {
309 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
310 }
311
amdgpu_discovery_verify_binary_signature(uint8_t * binary)312 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
313 {
314 struct binary_header *bhdr;
315 bhdr = (struct binary_header *)binary;
316
317 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
318 }
319
amdgpu_discovery_harvest_config_quirk(struct amdgpu_device * adev)320 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
321 {
322 /*
323 * So far, apply this quirk only on those Navy Flounder boards which
324 * have a bad harvest table of VCN config.
325 */
326 if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
327 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) {
328 switch (adev->pdev->revision) {
329 case 0xC1:
330 case 0xC2:
331 case 0xC3:
332 case 0xC5:
333 case 0xC7:
334 case 0xCF:
335 case 0xDF:
336 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
337 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
338 break;
339 default:
340 break;
341 }
342 }
343 }
344
amdgpu_discovery_init(struct amdgpu_device * adev)345 static int amdgpu_discovery_init(struct amdgpu_device *adev)
346 {
347 struct table_info *info;
348 struct binary_header *bhdr;
349 uint16_t offset;
350 uint16_t size;
351 uint16_t checksum;
352 int r;
353
354 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
355 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
356 if (!adev->mman.discovery_bin)
357 return -ENOMEM;
358
359 /* Read from file if it is the preferred option */
360 if (amdgpu_discovery == 2) {
361 dev_info(adev->dev, "use ip discovery information from file");
362 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
363
364 if (r) {
365 dev_err(adev->dev, "failed to read ip discovery binary from file\n");
366 r = -EINVAL;
367 goto out;
368 }
369
370 } else {
371 r = amdgpu_discovery_read_binary_from_mem(
372 adev, adev->mman.discovery_bin);
373 if (r)
374 goto out;
375 }
376
377 /* check the ip discovery binary signature */
378 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
379 dev_err(adev->dev,
380 "get invalid ip discovery binary signature\n");
381 r = -EINVAL;
382 goto out;
383 }
384
385 bhdr = (struct binary_header *)adev->mman.discovery_bin;
386
387 offset = offsetof(struct binary_header, binary_checksum) +
388 sizeof(bhdr->binary_checksum);
389 size = le16_to_cpu(bhdr->binary_size) - offset;
390 checksum = le16_to_cpu(bhdr->binary_checksum);
391
392 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
393 size, checksum)) {
394 dev_err(adev->dev, "invalid ip discovery binary checksum\n");
395 r = -EINVAL;
396 goto out;
397 }
398
399 info = &bhdr->table_list[IP_DISCOVERY];
400 offset = le16_to_cpu(info->offset);
401 checksum = le16_to_cpu(info->checksum);
402
403 if (offset) {
404 struct ip_discovery_header *ihdr =
405 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
406 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
407 dev_err(adev->dev, "invalid ip discovery data table signature\n");
408 r = -EINVAL;
409 goto out;
410 }
411
412 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
413 le16_to_cpu(ihdr->size), checksum)) {
414 dev_err(adev->dev, "invalid ip discovery data table checksum\n");
415 r = -EINVAL;
416 goto out;
417 }
418 }
419
420 info = &bhdr->table_list[GC];
421 offset = le16_to_cpu(info->offset);
422 checksum = le16_to_cpu(info->checksum);
423
424 if (offset) {
425 struct gpu_info_header *ghdr =
426 (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
427
428 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
429 dev_err(adev->dev, "invalid ip discovery gc table id\n");
430 r = -EINVAL;
431 goto out;
432 }
433
434 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
435 le32_to_cpu(ghdr->size), checksum)) {
436 dev_err(adev->dev, "invalid gc data table checksum\n");
437 r = -EINVAL;
438 goto out;
439 }
440 }
441
442 info = &bhdr->table_list[HARVEST_INFO];
443 offset = le16_to_cpu(info->offset);
444 checksum = le16_to_cpu(info->checksum);
445
446 if (offset) {
447 struct harvest_info_header *hhdr =
448 (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
449
450 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
451 dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
452 r = -EINVAL;
453 goto out;
454 }
455
456 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
457 sizeof(struct harvest_table), checksum)) {
458 dev_err(adev->dev, "invalid harvest data table checksum\n");
459 r = -EINVAL;
460 goto out;
461 }
462 }
463
464 info = &bhdr->table_list[VCN_INFO];
465 offset = le16_to_cpu(info->offset);
466 checksum = le16_to_cpu(info->checksum);
467
468 if (offset) {
469 struct vcn_info_header *vhdr =
470 (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
471
472 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
473 dev_err(adev->dev, "invalid ip discovery vcn table id\n");
474 r = -EINVAL;
475 goto out;
476 }
477
478 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
479 le32_to_cpu(vhdr->size_bytes), checksum)) {
480 dev_err(adev->dev, "invalid vcn data table checksum\n");
481 r = -EINVAL;
482 goto out;
483 }
484 }
485
486 info = &bhdr->table_list[MALL_INFO];
487 offset = le16_to_cpu(info->offset);
488 checksum = le16_to_cpu(info->checksum);
489
490 if (0 && offset) {
491 struct mall_info_header *mhdr =
492 (struct mall_info_header *)(adev->mman.discovery_bin + offset);
493
494 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
495 dev_err(adev->dev, "invalid ip discovery mall table id\n");
496 r = -EINVAL;
497 goto out;
498 }
499
500 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
501 le32_to_cpu(mhdr->size_bytes), checksum)) {
502 dev_err(adev->dev, "invalid mall data table checksum\n");
503 r = -EINVAL;
504 goto out;
505 }
506 }
507
508 return 0;
509
510 out:
511 kfree(adev->mman.discovery_bin);
512 adev->mman.discovery_bin = NULL;
513
514 return r;
515 }
516
517 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
518
amdgpu_discovery_fini(struct amdgpu_device * adev)519 void amdgpu_discovery_fini(struct amdgpu_device *adev)
520 {
521 amdgpu_discovery_sysfs_fini(adev);
522 kfree(adev->mman.discovery_bin);
523 adev->mman.discovery_bin = NULL;
524 }
525
amdgpu_discovery_validate_ip(const struct ip_v4 * ip)526 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
527 {
528 if (ip->instance_number >= HWIP_MAX_INSTANCE) {
529 DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
530 ip->instance_number);
531 return -EINVAL;
532 }
533 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
534 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
535 le16_to_cpu(ip->hw_id));
536 return -EINVAL;
537 }
538
539 return 0;
540 }
541
amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device * adev,uint32_t * vcn_harvest_count)542 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
543 uint32_t *vcn_harvest_count)
544 {
545 struct binary_header *bhdr;
546 struct ip_discovery_header *ihdr;
547 struct die_header *dhdr;
548 struct ip_v4 *ip;
549 uint16_t die_offset, ip_offset, num_dies, num_ips;
550 int i, j;
551
552 bhdr = (struct binary_header *)adev->mman.discovery_bin;
553 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
554 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
555 num_dies = le16_to_cpu(ihdr->num_dies);
556
557 /* scan harvest bit of all IP data structures */
558 for (i = 0; i < num_dies; i++) {
559 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
560 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
561 num_ips = le16_to_cpu(dhdr->num_ips);
562 ip_offset = die_offset + sizeof(*dhdr);
563
564 for (j = 0; j < num_ips; j++) {
565 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
566
567 if (amdgpu_discovery_validate_ip(ip))
568 goto next_ip;
569
570 if (le16_to_cpu(ip->variant) == 1) {
571 switch (le16_to_cpu(ip->hw_id)) {
572 case VCN_HWID:
573 (*vcn_harvest_count)++;
574 if (ip->instance_number == 0) {
575 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
576 adev->vcn.inst_mask &=
577 ~AMDGPU_VCN_HARVEST_VCN0;
578 adev->jpeg.inst_mask &=
579 ~AMDGPU_VCN_HARVEST_VCN0;
580 } else {
581 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
582 adev->vcn.inst_mask &=
583 ~AMDGPU_VCN_HARVEST_VCN1;
584 adev->jpeg.inst_mask &=
585 ~AMDGPU_VCN_HARVEST_VCN1;
586 }
587 break;
588 case DMU_HWID:
589 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
590 break;
591 default:
592 break;
593 }
594 }
595 next_ip:
596 if (ihdr->base_addr_64_bit)
597 ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
598 else
599 ip_offset += struct_size(ip, base_address, ip->num_base_address);
600 }
601 }
602 }
603
amdgpu_discovery_read_from_harvest_table(struct amdgpu_device * adev,uint32_t * vcn_harvest_count,uint32_t * umc_harvest_count)604 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
605 uint32_t *vcn_harvest_count,
606 uint32_t *umc_harvest_count)
607 {
608 struct binary_header *bhdr;
609 struct harvest_table *harvest_info;
610 u16 offset;
611 int i;
612 uint32_t umc_harvest_config = 0;
613
614 bhdr = (struct binary_header *)adev->mman.discovery_bin;
615 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
616
617 if (!offset) {
618 dev_err(adev->dev, "invalid harvest table offset\n");
619 return;
620 }
621
622 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
623
624 for (i = 0; i < 32; i++) {
625 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
626 break;
627
628 switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
629 case VCN_HWID:
630 (*vcn_harvest_count)++;
631 adev->vcn.harvest_config |=
632 (1 << harvest_info->list[i].number_instance);
633 adev->jpeg.harvest_config |=
634 (1 << harvest_info->list[i].number_instance);
635
636 adev->vcn.inst_mask &=
637 ~(1U << harvest_info->list[i].number_instance);
638 adev->jpeg.inst_mask &=
639 ~(1U << harvest_info->list[i].number_instance);
640 break;
641 case DMU_HWID:
642 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
643 break;
644 case UMC_HWID:
645 umc_harvest_config |=
646 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
647 (*umc_harvest_count)++;
648 break;
649 case GC_HWID:
650 adev->gfx.xcc_mask &=
651 ~(1U << harvest_info->list[i].number_instance);
652 break;
653 case SDMA0_HWID:
654 adev->sdma.sdma_mask &=
655 ~(1U << harvest_info->list[i].number_instance);
656 break;
657 default:
658 break;
659 }
660 }
661
662 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
663 ~umc_harvest_config;
664 }
665
666 /* ================================================== */
667
668 struct ip_hw_instance {
669 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
670
671 int hw_id;
672 u8 num_instance;
673 u8 major, minor, revision;
674 u8 harvest;
675
676 int num_base_addresses;
677 u32 base_addr[];
678 };
679
680 struct ip_hw_id {
681 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
682 int hw_id;
683 };
684
685 struct ip_die_entry {
686 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
687 u16 num_ips;
688 };
689
690 /* -------------------------------------------------- */
691
692 struct ip_hw_instance_attr {
693 struct attribute attr;
694 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
695 };
696
hw_id_show(struct ip_hw_instance * ip_hw_instance,char * buf)697 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
698 {
699 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
700 }
701
num_instance_show(struct ip_hw_instance * ip_hw_instance,char * buf)702 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
703 {
704 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
705 }
706
major_show(struct ip_hw_instance * ip_hw_instance,char * buf)707 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
708 {
709 return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
710 }
711
minor_show(struct ip_hw_instance * ip_hw_instance,char * buf)712 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
713 {
714 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
715 }
716
revision_show(struct ip_hw_instance * ip_hw_instance,char * buf)717 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
718 {
719 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
720 }
721
harvest_show(struct ip_hw_instance * ip_hw_instance,char * buf)722 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
723 {
724 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
725 }
726
num_base_addresses_show(struct ip_hw_instance * ip_hw_instance,char * buf)727 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
728 {
729 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
730 }
731
base_addr_show(struct ip_hw_instance * ip_hw_instance,char * buf)732 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
733 {
734 ssize_t res, at;
735 int ii;
736
737 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
738 /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
739 */
740 if (at + 12 > PAGE_SIZE)
741 break;
742 res = sysfs_emit_at(buf, at, "0x%08X\n",
743 ip_hw_instance->base_addr[ii]);
744 if (res <= 0)
745 break;
746 at += res;
747 }
748
749 return res < 0 ? res : at;
750 }
751
752 static struct ip_hw_instance_attr ip_hw_attr[] = {
753 __ATTR_RO(hw_id),
754 __ATTR_RO(num_instance),
755 __ATTR_RO(major),
756 __ATTR_RO(minor),
757 __ATTR_RO(revision),
758 __ATTR_RO(harvest),
759 __ATTR_RO(num_base_addresses),
760 __ATTR_RO(base_addr),
761 };
762
763 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
764 ATTRIBUTE_GROUPS(ip_hw_instance);
765
766 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
767 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
768
ip_hw_instance_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)769 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
770 struct attribute *attr,
771 char *buf)
772 {
773 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
774 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
775
776 if (!ip_hw_attr->show)
777 return -EIO;
778
779 return ip_hw_attr->show(ip_hw_instance, buf);
780 }
781
782 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
783 .show = ip_hw_instance_attr_show,
784 };
785
ip_hw_instance_release(struct kobject * kobj)786 static void ip_hw_instance_release(struct kobject *kobj)
787 {
788 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
789
790 kfree(ip_hw_instance);
791 }
792
793 static const struct kobj_type ip_hw_instance_ktype = {
794 .release = ip_hw_instance_release,
795 .sysfs_ops = &ip_hw_instance_sysfs_ops,
796 .default_groups = ip_hw_instance_groups,
797 };
798
799 /* -------------------------------------------------- */
800
801 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
802
ip_hw_id_release(struct kobject * kobj)803 static void ip_hw_id_release(struct kobject *kobj)
804 {
805 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
806
807 if (!list_empty(&ip_hw_id->hw_id_kset.list))
808 DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
809 kfree(ip_hw_id);
810 }
811
812 static const struct kobj_type ip_hw_id_ktype = {
813 .release = ip_hw_id_release,
814 .sysfs_ops = &kobj_sysfs_ops,
815 };
816
817 /* -------------------------------------------------- */
818
819 static void die_kobj_release(struct kobject *kobj);
820 static void ip_disc_release(struct kobject *kobj);
821
822 struct ip_die_entry_attribute {
823 struct attribute attr;
824 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
825 };
826
827 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
828
num_ips_show(struct ip_die_entry * ip_die_entry,char * buf)829 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
830 {
831 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
832 }
833
834 /* If there are more ip_die_entry attrs, other than the number of IPs,
835 * we can make this intro an array of attrs, and then initialize
836 * ip_die_entry_attrs in a loop.
837 */
838 static struct ip_die_entry_attribute num_ips_attr =
839 __ATTR_RO(num_ips);
840
841 static struct attribute *ip_die_entry_attrs[] = {
842 &num_ips_attr.attr,
843 NULL,
844 };
845 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
846
847 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
848
ip_die_entry_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)849 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
850 struct attribute *attr,
851 char *buf)
852 {
853 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
854 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
855
856 if (!ip_die_entry_attr->show)
857 return -EIO;
858
859 return ip_die_entry_attr->show(ip_die_entry, buf);
860 }
861
ip_die_entry_release(struct kobject * kobj)862 static void ip_die_entry_release(struct kobject *kobj)
863 {
864 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
865
866 if (!list_empty(&ip_die_entry->ip_kset.list))
867 DRM_ERROR("ip_die_entry->ip_kset is not empty");
868 kfree(ip_die_entry);
869 }
870
871 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
872 .show = ip_die_entry_attr_show,
873 };
874
875 static const struct kobj_type ip_die_entry_ktype = {
876 .release = ip_die_entry_release,
877 .sysfs_ops = &ip_die_entry_sysfs_ops,
878 .default_groups = ip_die_entry_groups,
879 };
880
881 static const struct kobj_type die_kobj_ktype = {
882 .release = die_kobj_release,
883 .sysfs_ops = &kobj_sysfs_ops,
884 };
885
886 static const struct kobj_type ip_discovery_ktype = {
887 .release = ip_disc_release,
888 .sysfs_ops = &kobj_sysfs_ops,
889 };
890
891 struct ip_discovery_top {
892 struct kobject kobj; /* ip_discovery/ */
893 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
894 struct amdgpu_device *adev;
895 };
896
die_kobj_release(struct kobject * kobj)897 static void die_kobj_release(struct kobject *kobj)
898 {
899 struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
900 struct ip_discovery_top,
901 die_kset);
902 if (!list_empty(&ip_top->die_kset.list))
903 DRM_ERROR("ip_top->die_kset is not empty");
904 }
905
ip_disc_release(struct kobject * kobj)906 static void ip_disc_release(struct kobject *kobj)
907 {
908 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
909 kobj);
910 struct amdgpu_device *adev = ip_top->adev;
911
912 adev->ip_top = NULL;
913 kfree(ip_top);
914 }
915
amdgpu_discovery_get_harvest_info(struct amdgpu_device * adev,uint16_t hw_id,uint8_t inst)916 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
917 uint16_t hw_id, uint8_t inst)
918 {
919 uint8_t harvest = 0;
920
921 /* Until a uniform way is figured, get mask based on hwid */
922 switch (hw_id) {
923 case VCN_HWID:
924 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
925 break;
926 case DMU_HWID:
927 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
928 harvest = 0x1;
929 break;
930 case UMC_HWID:
931 /* TODO: It needs another parsing; for now, ignore.*/
932 break;
933 case GC_HWID:
934 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
935 break;
936 case SDMA0_HWID:
937 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
938 break;
939 default:
940 break;
941 }
942
943 return harvest;
944 }
945
amdgpu_discovery_sysfs_ips(struct amdgpu_device * adev,struct ip_die_entry * ip_die_entry,const size_t _ip_offset,const int num_ips,bool reg_base_64)946 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
947 struct ip_die_entry *ip_die_entry,
948 const size_t _ip_offset, const int num_ips,
949 bool reg_base_64)
950 {
951 int ii, jj, kk, res;
952
953 DRM_DEBUG("num_ips:%d", num_ips);
954
955 /* Find all IPs of a given HW ID, and add their instance to
956 * #die/#hw_id/#instance/<attributes>
957 */
958 for (ii = 0; ii < HW_ID_MAX; ii++) {
959 struct ip_hw_id *ip_hw_id = NULL;
960 size_t ip_offset = _ip_offset;
961
962 for (jj = 0; jj < num_ips; jj++) {
963 struct ip_v4 *ip;
964 struct ip_hw_instance *ip_hw_instance;
965
966 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
967 if (amdgpu_discovery_validate_ip(ip) ||
968 le16_to_cpu(ip->hw_id) != ii)
969 goto next_ip;
970
971 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
972
973 /* We have a hw_id match; register the hw
974 * block if not yet registered.
975 */
976 if (!ip_hw_id) {
977 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
978 if (!ip_hw_id)
979 return -ENOMEM;
980 ip_hw_id->hw_id = ii;
981
982 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
983 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
984 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
985 res = kset_register(&ip_hw_id->hw_id_kset);
986 if (res) {
987 DRM_ERROR("Couldn't register ip_hw_id kset");
988 kfree(ip_hw_id);
989 return res;
990 }
991 if (hw_id_names[ii]) {
992 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
993 &ip_hw_id->hw_id_kset.kobj,
994 hw_id_names[ii]);
995 if (res) {
996 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
997 hw_id_names[ii],
998 kobject_name(&ip_die_entry->ip_kset.kobj));
999 }
1000 }
1001 }
1002
1003 /* Now register its instance.
1004 */
1005 ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1006 base_addr,
1007 ip->num_base_address),
1008 GFP_KERNEL);
1009 if (!ip_hw_instance) {
1010 DRM_ERROR("no memory for ip_hw_instance");
1011 return -ENOMEM;
1012 }
1013 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1014 ip_hw_instance->num_instance = ip->instance_number;
1015 ip_hw_instance->major = ip->major;
1016 ip_hw_instance->minor = ip->minor;
1017 ip_hw_instance->revision = ip->revision;
1018 ip_hw_instance->harvest =
1019 amdgpu_discovery_get_harvest_info(
1020 adev, ip_hw_instance->hw_id,
1021 ip_hw_instance->num_instance);
1022 ip_hw_instance->num_base_addresses = ip->num_base_address;
1023
1024 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1025 if (reg_base_64)
1026 ip_hw_instance->base_addr[kk] =
1027 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1028 else
1029 ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1030 }
1031
1032 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1033 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1034 res = kobject_add(&ip_hw_instance->kobj, NULL,
1035 "%d", ip_hw_instance->num_instance);
1036 next_ip:
1037 if (reg_base_64)
1038 ip_offset += struct_size(ip, base_address_64,
1039 ip->num_base_address);
1040 else
1041 ip_offset += struct_size(ip, base_address,
1042 ip->num_base_address);
1043 }
1044 }
1045
1046 return 0;
1047 }
1048
amdgpu_discovery_sysfs_recurse(struct amdgpu_device * adev)1049 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1050 {
1051 struct binary_header *bhdr;
1052 struct ip_discovery_header *ihdr;
1053 struct die_header *dhdr;
1054 struct kset *die_kset = &adev->ip_top->die_kset;
1055 u16 num_dies, die_offset, num_ips;
1056 size_t ip_offset;
1057 int ii, res;
1058
1059 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1060 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1061 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1062 num_dies = le16_to_cpu(ihdr->num_dies);
1063
1064 DRM_DEBUG("number of dies: %d\n", num_dies);
1065
1066 for (ii = 0; ii < num_dies; ii++) {
1067 struct ip_die_entry *ip_die_entry;
1068
1069 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1070 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1071 num_ips = le16_to_cpu(dhdr->num_ips);
1072 ip_offset = die_offset + sizeof(*dhdr);
1073
1074 /* Add the die to the kset.
1075 *
1076 * dhdr->die_id == ii, which was checked in
1077 * amdgpu_discovery_reg_base_init().
1078 */
1079
1080 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1081 if (!ip_die_entry)
1082 return -ENOMEM;
1083
1084 ip_die_entry->num_ips = num_ips;
1085
1086 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1087 ip_die_entry->ip_kset.kobj.kset = die_kset;
1088 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1089 res = kset_register(&ip_die_entry->ip_kset);
1090 if (res) {
1091 DRM_ERROR("Couldn't register ip_die_entry kset");
1092 kfree(ip_die_entry);
1093 return res;
1094 }
1095
1096 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1097 }
1098
1099 return 0;
1100 }
1101
amdgpu_discovery_sysfs_init(struct amdgpu_device * adev)1102 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1103 {
1104 struct kset *die_kset;
1105 int res, ii;
1106
1107 if (!adev->mman.discovery_bin)
1108 return -EINVAL;
1109
1110 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1111 if (!adev->ip_top)
1112 return -ENOMEM;
1113
1114 adev->ip_top->adev = adev;
1115
1116 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1117 &adev->dev->kobj, "ip_discovery");
1118 if (res) {
1119 DRM_ERROR("Couldn't init and add ip_discovery/");
1120 goto Err;
1121 }
1122
1123 die_kset = &adev->ip_top->die_kset;
1124 kobject_set_name(&die_kset->kobj, "%s", "die");
1125 die_kset->kobj.parent = &adev->ip_top->kobj;
1126 die_kset->kobj.ktype = &die_kobj_ktype;
1127 res = kset_register(&adev->ip_top->die_kset);
1128 if (res) {
1129 DRM_ERROR("Couldn't register die_kset");
1130 goto Err;
1131 }
1132
1133 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1134 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1135 ip_hw_instance_attrs[ii] = NULL;
1136
1137 res = amdgpu_discovery_sysfs_recurse(adev);
1138
1139 return res;
1140 Err:
1141 kobject_put(&adev->ip_top->kobj);
1142 return res;
1143 }
1144
1145 /* -------------------------------------------------- */
1146
1147 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1148
amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id * ip_hw_id)1149 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1150 {
1151 struct list_head *el, *tmp;
1152 struct kset *hw_id_kset;
1153
1154 hw_id_kset = &ip_hw_id->hw_id_kset;
1155 spin_lock(&hw_id_kset->list_lock);
1156 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1157 list_del_init(el);
1158 spin_unlock(&hw_id_kset->list_lock);
1159 /* kobject is embedded in ip_hw_instance */
1160 kobject_put(list_to_kobj(el));
1161 spin_lock(&hw_id_kset->list_lock);
1162 }
1163 spin_unlock(&hw_id_kset->list_lock);
1164 kobject_put(&ip_hw_id->hw_id_kset.kobj);
1165 }
1166
amdgpu_discovery_sysfs_die_free(struct ip_die_entry * ip_die_entry)1167 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1168 {
1169 struct list_head *el, *tmp;
1170 struct kset *ip_kset;
1171
1172 ip_kset = &ip_die_entry->ip_kset;
1173 spin_lock(&ip_kset->list_lock);
1174 list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1175 list_del_init(el);
1176 spin_unlock(&ip_kset->list_lock);
1177 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1178 spin_lock(&ip_kset->list_lock);
1179 }
1180 spin_unlock(&ip_kset->list_lock);
1181 kobject_put(&ip_die_entry->ip_kset.kobj);
1182 }
1183
amdgpu_discovery_sysfs_fini(struct amdgpu_device * adev)1184 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1185 {
1186 struct list_head *el, *tmp;
1187 struct kset *die_kset;
1188
1189 die_kset = &adev->ip_top->die_kset;
1190 spin_lock(&die_kset->list_lock);
1191 list_for_each_prev_safe(el, tmp, &die_kset->list) {
1192 list_del_init(el);
1193 spin_unlock(&die_kset->list_lock);
1194 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1195 spin_lock(&die_kset->list_lock);
1196 }
1197 spin_unlock(&die_kset->list_lock);
1198 kobject_put(&adev->ip_top->die_kset.kobj);
1199 kobject_put(&adev->ip_top->kobj);
1200 }
1201
1202 /* ================================================== */
1203
amdgpu_discovery_reg_base_init(struct amdgpu_device * adev)1204 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1205 {
1206 struct binary_header *bhdr;
1207 struct ip_discovery_header *ihdr;
1208 struct die_header *dhdr;
1209 struct ip_v4 *ip;
1210 uint16_t die_offset;
1211 uint16_t ip_offset;
1212 uint16_t num_dies;
1213 uint16_t num_ips;
1214 uint8_t num_base_address;
1215 int hw_ip;
1216 int i, j, k;
1217 int r;
1218
1219 r = amdgpu_discovery_init(adev);
1220 if (r) {
1221 DRM_ERROR("amdgpu_discovery_init failed\n");
1222 return r;
1223 }
1224
1225 adev->gfx.xcc_mask = 0;
1226 adev->sdma.sdma_mask = 0;
1227 adev->vcn.inst_mask = 0;
1228 adev->jpeg.inst_mask = 0;
1229 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1230 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1231 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1232 num_dies = le16_to_cpu(ihdr->num_dies);
1233
1234 DRM_DEBUG("number of dies: %d\n", num_dies);
1235
1236 for (i = 0; i < num_dies; i++) {
1237 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1238 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1239 num_ips = le16_to_cpu(dhdr->num_ips);
1240 ip_offset = die_offset + sizeof(*dhdr);
1241
1242 if (le16_to_cpu(dhdr->die_id) != i) {
1243 DRM_ERROR("invalid die id %d, expected %d\n",
1244 le16_to_cpu(dhdr->die_id), i);
1245 return -EINVAL;
1246 }
1247
1248 DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1249 le16_to_cpu(dhdr->die_id), num_ips);
1250
1251 for (j = 0; j < num_ips; j++) {
1252 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1253
1254 if (amdgpu_discovery_validate_ip(ip))
1255 goto next_ip;
1256
1257 num_base_address = ip->num_base_address;
1258
1259 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1260 hw_id_names[le16_to_cpu(ip->hw_id)],
1261 le16_to_cpu(ip->hw_id),
1262 ip->instance_number,
1263 ip->major, ip->minor,
1264 ip->revision);
1265
1266 if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1267 /* Bit [5:0]: original revision value
1268 * Bit [7:6]: en/decode capability:
1269 * 0b00 : VCN function normally
1270 * 0b10 : encode is disabled
1271 * 0b01 : decode is disabled
1272 */
1273 if (adev->vcn.num_vcn_inst <
1274 AMDGPU_MAX_VCN_INSTANCES) {
1275 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1276 ip->revision & 0xc0;
1277 adev->vcn.num_vcn_inst++;
1278 adev->vcn.inst_mask |=
1279 (1U << ip->instance_number);
1280 adev->jpeg.inst_mask |=
1281 (1U << ip->instance_number);
1282 } else {
1283 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1284 adev->vcn.num_vcn_inst + 1,
1285 AMDGPU_MAX_VCN_INSTANCES);
1286 }
1287 ip->revision &= ~0xc0;
1288 }
1289 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1290 le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1291 le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1292 le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1293 if (adev->sdma.num_instances <
1294 AMDGPU_MAX_SDMA_INSTANCES) {
1295 adev->sdma.num_instances++;
1296 adev->sdma.sdma_mask |=
1297 (1U << ip->instance_number);
1298 } else {
1299 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1300 adev->sdma.num_instances + 1,
1301 AMDGPU_MAX_SDMA_INSTANCES);
1302 }
1303 }
1304
1305 if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1306 adev->gmc.num_umc++;
1307 adev->umc.node_inst_num++;
1308 }
1309
1310 if (le16_to_cpu(ip->hw_id) == GC_HWID)
1311 adev->gfx.xcc_mask |=
1312 (1U << ip->instance_number);
1313
1314 for (k = 0; k < num_base_address; k++) {
1315 /*
1316 * convert the endianness of base addresses in place,
1317 * so that we don't need to convert them when accessing adev->reg_offset.
1318 */
1319 if (ihdr->base_addr_64_bit)
1320 /* Truncate the 64bit base address from ip discovery
1321 * and only store lower 32bit ip base in reg_offset[].
1322 * Bits > 32 follows ASIC specific format, thus just
1323 * discard them and handle it within specific ASIC.
1324 * By this way reg_offset[] and related helpers can
1325 * stay unchanged.
1326 * The base address is in dwords, thus clear the
1327 * highest 2 bits to store.
1328 */
1329 ip->base_address[k] =
1330 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1331 else
1332 ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1333 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1334 }
1335
1336 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1337 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1338 hw_id_map[hw_ip] != 0) {
1339 DRM_DEBUG("set register base offset for %s\n",
1340 hw_id_names[le16_to_cpu(ip->hw_id)]);
1341 adev->reg_offset[hw_ip][ip->instance_number] =
1342 ip->base_address;
1343 /* Instance support is somewhat inconsistent.
1344 * SDMA is a good example. Sienna cichlid has 4 total
1345 * SDMA instances, each enumerated separately (HWIDs
1346 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
1347 * but they are enumerated as multiple instances of the
1348 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
1349 * example. On most chips there are multiple instances
1350 * with the same HWID.
1351 */
1352 adev->ip_versions[hw_ip][ip->instance_number] =
1353 IP_VERSION(ip->major, ip->minor, ip->revision);
1354 }
1355 }
1356
1357 next_ip:
1358 if (ihdr->base_addr_64_bit)
1359 ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1360 else
1361 ip_offset += struct_size(ip, base_address, ip->num_base_address);
1362 }
1363 }
1364
1365 return 0;
1366 }
1367
amdgpu_discovery_harvest_ip(struct amdgpu_device * adev)1368 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1369 {
1370 int vcn_harvest_count = 0;
1371 int umc_harvest_count = 0;
1372
1373 /*
1374 * Harvest table does not fit Navi1x and legacy GPUs,
1375 * so read harvest bit per IP data structure to set
1376 * harvest configuration.
1377 */
1378 if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0) &&
1379 adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) {
1380 if ((adev->pdev->device == 0x731E &&
1381 (adev->pdev->revision == 0xC6 ||
1382 adev->pdev->revision == 0xC7)) ||
1383 (adev->pdev->device == 0x7340 &&
1384 adev->pdev->revision == 0xC9) ||
1385 (adev->pdev->device == 0x7360 &&
1386 adev->pdev->revision == 0xC7))
1387 amdgpu_discovery_read_harvest_bit_per_ip(adev,
1388 &vcn_harvest_count);
1389 } else {
1390 amdgpu_discovery_read_from_harvest_table(adev,
1391 &vcn_harvest_count,
1392 &umc_harvest_count);
1393 }
1394
1395 amdgpu_discovery_harvest_config_quirk(adev);
1396
1397 if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1398 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1399 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1400 }
1401
1402 if (umc_harvest_count < adev->gmc.num_umc) {
1403 adev->gmc.num_umc -= umc_harvest_count;
1404 }
1405 }
1406
1407 union gc_info {
1408 struct gc_info_v1_0 v1;
1409 struct gc_info_v1_1 v1_1;
1410 struct gc_info_v1_2 v1_2;
1411 struct gc_info_v2_0 v2;
1412 struct gc_info_v2_1 v2_1;
1413 };
1414
amdgpu_discovery_get_gfx_info(struct amdgpu_device * adev)1415 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1416 {
1417 struct binary_header *bhdr;
1418 union gc_info *gc_info;
1419 u16 offset;
1420
1421 if (!adev->mman.discovery_bin) {
1422 DRM_ERROR("ip discovery uninitialized\n");
1423 return -EINVAL;
1424 }
1425
1426 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1427 offset = le16_to_cpu(bhdr->table_list[GC].offset);
1428
1429 if (!offset)
1430 return 0;
1431
1432 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1433
1434 switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1435 case 1:
1436 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1437 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1438 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1439 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1440 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1441 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1442 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1443 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1444 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1445 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1446 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1447 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1448 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1449 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1450 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1451 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1452 le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1453 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1454 if (gc_info->v1.header.version_minor >= 1) {
1455 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1456 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1457 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1458 }
1459 if (gc_info->v1.header.version_minor >= 2) {
1460 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1461 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1462 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1463 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1464 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1465 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1466 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1467 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1468 }
1469 break;
1470 case 2:
1471 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1472 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1473 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1474 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1475 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1476 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1477 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1478 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1479 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1480 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1481 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1482 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1483 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1484 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1485 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1486 le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1487 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1488 if (gc_info->v2.header.version_minor == 1) {
1489 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1490 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1491 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1492 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1493 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1494 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1495 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1496 }
1497 break;
1498 default:
1499 dev_err(adev->dev,
1500 "Unhandled GC info table %d.%d\n",
1501 le16_to_cpu(gc_info->v1.header.version_major),
1502 le16_to_cpu(gc_info->v1.header.version_minor));
1503 return -EINVAL;
1504 }
1505 return 0;
1506 }
1507
1508 union mall_info {
1509 struct mall_info_v1_0 v1;
1510 struct mall_info_v2_0 v2;
1511 };
1512
amdgpu_discovery_get_mall_info(struct amdgpu_device * adev)1513 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1514 {
1515 struct binary_header *bhdr;
1516 union mall_info *mall_info;
1517 u32 u, mall_size_per_umc, m_s_present, half_use;
1518 u64 mall_size;
1519 u16 offset;
1520
1521 if (!adev->mman.discovery_bin) {
1522 DRM_ERROR("ip discovery uninitialized\n");
1523 return -EINVAL;
1524 }
1525
1526 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1527 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1528
1529 if (!offset)
1530 return 0;
1531
1532 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1533
1534 switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1535 case 1:
1536 mall_size = 0;
1537 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1538 m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1539 half_use = le32_to_cpu(mall_info->v1.m_half_use);
1540 for (u = 0; u < adev->gmc.num_umc; u++) {
1541 if (m_s_present & (1 << u))
1542 mall_size += mall_size_per_umc * 2;
1543 else if (half_use & (1 << u))
1544 mall_size += mall_size_per_umc / 2;
1545 else
1546 mall_size += mall_size_per_umc;
1547 }
1548 adev->gmc.mall_size = mall_size;
1549 adev->gmc.m_half_use = half_use;
1550 break;
1551 case 2:
1552 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1553 adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
1554 break;
1555 default:
1556 dev_err(adev->dev,
1557 "Unhandled MALL info table %d.%d\n",
1558 le16_to_cpu(mall_info->v1.header.version_major),
1559 le16_to_cpu(mall_info->v1.header.version_minor));
1560 return -EINVAL;
1561 }
1562 return 0;
1563 }
1564
1565 union vcn_info {
1566 struct vcn_info_v1_0 v1;
1567 };
1568
amdgpu_discovery_get_vcn_info(struct amdgpu_device * adev)1569 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1570 {
1571 struct binary_header *bhdr;
1572 union vcn_info *vcn_info;
1573 u16 offset;
1574 int v;
1575
1576 if (!adev->mman.discovery_bin) {
1577 DRM_ERROR("ip discovery uninitialized\n");
1578 return -EINVAL;
1579 }
1580
1581 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1582 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1583 * but that may change in the future with new GPUs so keep this
1584 * check for defensive purposes.
1585 */
1586 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1587 dev_err(adev->dev, "invalid vcn instances\n");
1588 return -EINVAL;
1589 }
1590
1591 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1592 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1593
1594 if (!offset)
1595 return 0;
1596
1597 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1598
1599 switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1600 case 1:
1601 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1602 * so this won't overflow.
1603 */
1604 for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1605 adev->vcn.vcn_codec_disable_mask[v] =
1606 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1607 }
1608 break;
1609 default:
1610 dev_err(adev->dev,
1611 "Unhandled VCN info table %d.%d\n",
1612 le16_to_cpu(vcn_info->v1.header.version_major),
1613 le16_to_cpu(vcn_info->v1.header.version_minor));
1614 return -EINVAL;
1615 }
1616 return 0;
1617 }
1618
amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device * adev)1619 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1620 {
1621 /* what IP to use for this? */
1622 switch (adev->ip_versions[GC_HWIP][0]) {
1623 case IP_VERSION(9, 0, 1):
1624 case IP_VERSION(9, 1, 0):
1625 case IP_VERSION(9, 2, 1):
1626 case IP_VERSION(9, 2, 2):
1627 case IP_VERSION(9, 3, 0):
1628 case IP_VERSION(9, 4, 0):
1629 case IP_VERSION(9, 4, 1):
1630 case IP_VERSION(9, 4, 2):
1631 case IP_VERSION(9, 4, 3):
1632 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1633 break;
1634 case IP_VERSION(10, 1, 10):
1635 case IP_VERSION(10, 1, 1):
1636 case IP_VERSION(10, 1, 2):
1637 case IP_VERSION(10, 1, 3):
1638 case IP_VERSION(10, 1, 4):
1639 case IP_VERSION(10, 3, 0):
1640 case IP_VERSION(10, 3, 1):
1641 case IP_VERSION(10, 3, 2):
1642 case IP_VERSION(10, 3, 3):
1643 case IP_VERSION(10, 3, 4):
1644 case IP_VERSION(10, 3, 5):
1645 case IP_VERSION(10, 3, 6):
1646 case IP_VERSION(10, 3, 7):
1647 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1648 break;
1649 case IP_VERSION(11, 0, 0):
1650 case IP_VERSION(11, 0, 1):
1651 case IP_VERSION(11, 0, 2):
1652 case IP_VERSION(11, 0, 3):
1653 case IP_VERSION(11, 0, 4):
1654 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1655 break;
1656 default:
1657 dev_err(adev->dev,
1658 "Failed to add common ip block(GC_HWIP:0x%x)\n",
1659 adev->ip_versions[GC_HWIP][0]);
1660 return -EINVAL;
1661 }
1662 return 0;
1663 }
1664
amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device * adev)1665 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1666 {
1667 /* use GC or MMHUB IP version */
1668 switch (adev->ip_versions[GC_HWIP][0]) {
1669 case IP_VERSION(9, 0, 1):
1670 case IP_VERSION(9, 1, 0):
1671 case IP_VERSION(9, 2, 1):
1672 case IP_VERSION(9, 2, 2):
1673 case IP_VERSION(9, 3, 0):
1674 case IP_VERSION(9, 4, 0):
1675 case IP_VERSION(9, 4, 1):
1676 case IP_VERSION(9, 4, 2):
1677 case IP_VERSION(9, 4, 3):
1678 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1679 break;
1680 case IP_VERSION(10, 1, 10):
1681 case IP_VERSION(10, 1, 1):
1682 case IP_VERSION(10, 1, 2):
1683 case IP_VERSION(10, 1, 3):
1684 case IP_VERSION(10, 1, 4):
1685 case IP_VERSION(10, 3, 0):
1686 case IP_VERSION(10, 3, 1):
1687 case IP_VERSION(10, 3, 2):
1688 case IP_VERSION(10, 3, 3):
1689 case IP_VERSION(10, 3, 4):
1690 case IP_VERSION(10, 3, 5):
1691 case IP_VERSION(10, 3, 6):
1692 case IP_VERSION(10, 3, 7):
1693 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1694 break;
1695 case IP_VERSION(11, 0, 0):
1696 case IP_VERSION(11, 0, 1):
1697 case IP_VERSION(11, 0, 2):
1698 case IP_VERSION(11, 0, 3):
1699 case IP_VERSION(11, 0, 4):
1700 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1701 break;
1702 default:
1703 dev_err(adev->dev,
1704 "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1705 adev->ip_versions[GC_HWIP][0]);
1706 return -EINVAL;
1707 }
1708 return 0;
1709 }
1710
amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device * adev)1711 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1712 {
1713 switch (adev->ip_versions[OSSSYS_HWIP][0]) {
1714 case IP_VERSION(4, 0, 0):
1715 case IP_VERSION(4, 0, 1):
1716 case IP_VERSION(4, 1, 0):
1717 case IP_VERSION(4, 1, 1):
1718 case IP_VERSION(4, 3, 0):
1719 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1720 break;
1721 case IP_VERSION(4, 2, 0):
1722 case IP_VERSION(4, 2, 1):
1723 case IP_VERSION(4, 4, 0):
1724 case IP_VERSION(4, 4, 2):
1725 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1726 break;
1727 case IP_VERSION(5, 0, 0):
1728 case IP_VERSION(5, 0, 1):
1729 case IP_VERSION(5, 0, 2):
1730 case IP_VERSION(5, 0, 3):
1731 case IP_VERSION(5, 2, 0):
1732 case IP_VERSION(5, 2, 1):
1733 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1734 break;
1735 case IP_VERSION(6, 0, 0):
1736 case IP_VERSION(6, 0, 1):
1737 case IP_VERSION(6, 0, 2):
1738 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1739 break;
1740 case IP_VERSION(6, 1, 0):
1741 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1742 break;
1743 default:
1744 dev_err(adev->dev,
1745 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1746 adev->ip_versions[OSSSYS_HWIP][0]);
1747 return -EINVAL;
1748 }
1749 return 0;
1750 }
1751
amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device * adev)1752 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1753 {
1754 switch (adev->ip_versions[MP0_HWIP][0]) {
1755 case IP_VERSION(9, 0, 0):
1756 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1757 break;
1758 case IP_VERSION(10, 0, 0):
1759 case IP_VERSION(10, 0, 1):
1760 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1761 break;
1762 case IP_VERSION(11, 0, 0):
1763 case IP_VERSION(11, 0, 2):
1764 case IP_VERSION(11, 0, 4):
1765 case IP_VERSION(11, 0, 5):
1766 case IP_VERSION(11, 0, 9):
1767 case IP_VERSION(11, 0, 7):
1768 case IP_VERSION(11, 0, 11):
1769 case IP_VERSION(11, 0, 12):
1770 case IP_VERSION(11, 0, 13):
1771 case IP_VERSION(11, 5, 0):
1772 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1773 break;
1774 case IP_VERSION(11, 0, 8):
1775 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1776 break;
1777 case IP_VERSION(11, 0, 3):
1778 case IP_VERSION(12, 0, 1):
1779 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1780 break;
1781 case IP_VERSION(13, 0, 0):
1782 case IP_VERSION(13, 0, 1):
1783 case IP_VERSION(13, 0, 2):
1784 case IP_VERSION(13, 0, 3):
1785 case IP_VERSION(13, 0, 5):
1786 case IP_VERSION(13, 0, 6):
1787 case IP_VERSION(13, 0, 7):
1788 case IP_VERSION(13, 0, 8):
1789 case IP_VERSION(13, 0, 10):
1790 case IP_VERSION(13, 0, 11):
1791 case IP_VERSION(14, 0, 0):
1792 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1793 break;
1794 case IP_VERSION(13, 0, 4):
1795 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1796 break;
1797 default:
1798 dev_err(adev->dev,
1799 "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1800 adev->ip_versions[MP0_HWIP][0]);
1801 return -EINVAL;
1802 }
1803 return 0;
1804 }
1805
amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device * adev)1806 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1807 {
1808 switch (adev->ip_versions[MP1_HWIP][0]) {
1809 case IP_VERSION(9, 0, 0):
1810 case IP_VERSION(10, 0, 0):
1811 case IP_VERSION(10, 0, 1):
1812 case IP_VERSION(11, 0, 2):
1813 if (adev->asic_type == CHIP_ARCTURUS)
1814 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1815 else
1816 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1817 break;
1818 case IP_VERSION(11, 0, 0):
1819 case IP_VERSION(11, 0, 5):
1820 case IP_VERSION(11, 0, 9):
1821 case IP_VERSION(11, 0, 7):
1822 case IP_VERSION(11, 0, 8):
1823 case IP_VERSION(11, 0, 11):
1824 case IP_VERSION(11, 0, 12):
1825 case IP_VERSION(11, 0, 13):
1826 case IP_VERSION(11, 5, 0):
1827 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1828 break;
1829 case IP_VERSION(12, 0, 0):
1830 case IP_VERSION(12, 0, 1):
1831 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1832 break;
1833 case IP_VERSION(13, 0, 0):
1834 case IP_VERSION(13, 0, 1):
1835 case IP_VERSION(13, 0, 2):
1836 case IP_VERSION(13, 0, 3):
1837 case IP_VERSION(13, 0, 4):
1838 case IP_VERSION(13, 0, 5):
1839 case IP_VERSION(13, 0, 6):
1840 case IP_VERSION(13, 0, 7):
1841 case IP_VERSION(13, 0, 8):
1842 case IP_VERSION(13, 0, 10):
1843 case IP_VERSION(13, 0, 11):
1844 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1845 break;
1846 default:
1847 dev_err(adev->dev,
1848 "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1849 adev->ip_versions[MP1_HWIP][0]);
1850 return -EINVAL;
1851 }
1852 return 0;
1853 }
1854
1855 #if defined(CONFIG_DRM_AMD_DC)
amdgpu_discovery_set_sriov_display(struct amdgpu_device * adev)1856 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
1857 {
1858 amdgpu_device_set_sriov_virtual_display(adev);
1859 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1860 }
1861 #endif
1862
amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device * adev)1863 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1864 {
1865 if (adev->enable_virtual_display) {
1866 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1867 return 0;
1868 }
1869
1870 if (!amdgpu_device_has_dc_support(adev))
1871 return 0;
1872
1873 #if defined(CONFIG_DRM_AMD_DC)
1874 if (adev->ip_versions[DCE_HWIP][0]) {
1875 switch (adev->ip_versions[DCE_HWIP][0]) {
1876 case IP_VERSION(1, 0, 0):
1877 case IP_VERSION(1, 0, 1):
1878 case IP_VERSION(2, 0, 2):
1879 case IP_VERSION(2, 0, 0):
1880 case IP_VERSION(2, 0, 3):
1881 case IP_VERSION(2, 1, 0):
1882 case IP_VERSION(3, 0, 0):
1883 case IP_VERSION(3, 0, 2):
1884 case IP_VERSION(3, 0, 3):
1885 case IP_VERSION(3, 0, 1):
1886 case IP_VERSION(3, 1, 2):
1887 case IP_VERSION(3, 1, 3):
1888 case IP_VERSION(3, 1, 4):
1889 case IP_VERSION(3, 1, 5):
1890 case IP_VERSION(3, 1, 6):
1891 case IP_VERSION(3, 2, 0):
1892 case IP_VERSION(3, 2, 1):
1893 if (amdgpu_sriov_vf(adev))
1894 amdgpu_discovery_set_sriov_display(adev);
1895 else
1896 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1897 break;
1898 default:
1899 dev_err(adev->dev,
1900 "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1901 adev->ip_versions[DCE_HWIP][0]);
1902 return -EINVAL;
1903 }
1904 } else if (adev->ip_versions[DCI_HWIP][0]) {
1905 switch (adev->ip_versions[DCI_HWIP][0]) {
1906 case IP_VERSION(12, 0, 0):
1907 case IP_VERSION(12, 0, 1):
1908 case IP_VERSION(12, 1, 0):
1909 if (amdgpu_sriov_vf(adev))
1910 amdgpu_discovery_set_sriov_display(adev);
1911 else
1912 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1913 break;
1914 default:
1915 dev_err(adev->dev,
1916 "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1917 adev->ip_versions[DCI_HWIP][0]);
1918 return -EINVAL;
1919 }
1920 }
1921 #endif
1922 return 0;
1923 }
1924
amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device * adev)1925 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1926 {
1927 switch (adev->ip_versions[GC_HWIP][0]) {
1928 case IP_VERSION(9, 0, 1):
1929 case IP_VERSION(9, 1, 0):
1930 case IP_VERSION(9, 2, 1):
1931 case IP_VERSION(9, 2, 2):
1932 case IP_VERSION(9, 3, 0):
1933 case IP_VERSION(9, 4, 0):
1934 case IP_VERSION(9, 4, 1):
1935 case IP_VERSION(9, 4, 2):
1936 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1937 break;
1938 case IP_VERSION(9, 4, 3):
1939 if (!amdgpu_exp_hw_support)
1940 return -EINVAL;
1941 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
1942 break;
1943 case IP_VERSION(10, 1, 10):
1944 case IP_VERSION(10, 1, 2):
1945 case IP_VERSION(10, 1, 1):
1946 case IP_VERSION(10, 1, 3):
1947 case IP_VERSION(10, 1, 4):
1948 case IP_VERSION(10, 3, 0):
1949 case IP_VERSION(10, 3, 2):
1950 case IP_VERSION(10, 3, 1):
1951 case IP_VERSION(10, 3, 4):
1952 case IP_VERSION(10, 3, 5):
1953 case IP_VERSION(10, 3, 6):
1954 case IP_VERSION(10, 3, 3):
1955 case IP_VERSION(10, 3, 7):
1956 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
1957 break;
1958 case IP_VERSION(11, 0, 0):
1959 case IP_VERSION(11, 0, 1):
1960 case IP_VERSION(11, 0, 2):
1961 case IP_VERSION(11, 0, 3):
1962 case IP_VERSION(11, 0, 4):
1963 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
1964 break;
1965 default:
1966 dev_err(adev->dev,
1967 "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
1968 adev->ip_versions[GC_HWIP][0]);
1969 return -EINVAL;
1970 }
1971 return 0;
1972 }
1973
amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device * adev)1974 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
1975 {
1976 switch (adev->ip_versions[SDMA0_HWIP][0]) {
1977 case IP_VERSION(4, 0, 0):
1978 case IP_VERSION(4, 0, 1):
1979 case IP_VERSION(4, 1, 0):
1980 case IP_VERSION(4, 1, 1):
1981 case IP_VERSION(4, 1, 2):
1982 case IP_VERSION(4, 2, 0):
1983 case IP_VERSION(4, 2, 2):
1984 case IP_VERSION(4, 4, 0):
1985 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
1986 break;
1987 case IP_VERSION(4, 4, 2):
1988 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
1989 break;
1990 case IP_VERSION(5, 0, 0):
1991 case IP_VERSION(5, 0, 1):
1992 case IP_VERSION(5, 0, 2):
1993 case IP_VERSION(5, 0, 5):
1994 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
1995 break;
1996 case IP_VERSION(5, 2, 0):
1997 case IP_VERSION(5, 2, 2):
1998 case IP_VERSION(5, 2, 4):
1999 case IP_VERSION(5, 2, 5):
2000 case IP_VERSION(5, 2, 6):
2001 case IP_VERSION(5, 2, 3):
2002 case IP_VERSION(5, 2, 1):
2003 case IP_VERSION(5, 2, 7):
2004 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2005 break;
2006 case IP_VERSION(6, 0, 0):
2007 case IP_VERSION(6, 0, 1):
2008 case IP_VERSION(6, 0, 2):
2009 case IP_VERSION(6, 0, 3):
2010 case IP_VERSION(6, 1, 0):
2011 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2012 break;
2013 default:
2014 dev_err(adev->dev,
2015 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2016 adev->ip_versions[SDMA0_HWIP][0]);
2017 return -EINVAL;
2018 }
2019 return 0;
2020 }
2021
amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device * adev)2022 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2023 {
2024 if (adev->ip_versions[VCE_HWIP][0]) {
2025 switch (adev->ip_versions[UVD_HWIP][0]) {
2026 case IP_VERSION(7, 0, 0):
2027 case IP_VERSION(7, 2, 0):
2028 /* UVD is not supported on vega20 SR-IOV */
2029 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2030 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2031 break;
2032 default:
2033 dev_err(adev->dev,
2034 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2035 adev->ip_versions[UVD_HWIP][0]);
2036 return -EINVAL;
2037 }
2038 switch (adev->ip_versions[VCE_HWIP][0]) {
2039 case IP_VERSION(4, 0, 0):
2040 case IP_VERSION(4, 1, 0):
2041 /* VCE is not supported on vega20 SR-IOV */
2042 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2043 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2044 break;
2045 default:
2046 dev_err(adev->dev,
2047 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2048 adev->ip_versions[VCE_HWIP][0]);
2049 return -EINVAL;
2050 }
2051 } else {
2052 switch (adev->ip_versions[UVD_HWIP][0]) {
2053 case IP_VERSION(1, 0, 0):
2054 case IP_VERSION(1, 0, 1):
2055 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2056 break;
2057 case IP_VERSION(2, 0, 0):
2058 case IP_VERSION(2, 0, 2):
2059 case IP_VERSION(2, 2, 0):
2060 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2061 if (!amdgpu_sriov_vf(adev))
2062 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2063 break;
2064 case IP_VERSION(2, 0, 3):
2065 break;
2066 case IP_VERSION(2, 5, 0):
2067 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2068 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2069 break;
2070 case IP_VERSION(2, 6, 0):
2071 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2072 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2073 break;
2074 case IP_VERSION(3, 0, 0):
2075 case IP_VERSION(3, 0, 16):
2076 case IP_VERSION(3, 1, 1):
2077 case IP_VERSION(3, 1, 2):
2078 case IP_VERSION(3, 0, 2):
2079 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2080 if (!amdgpu_sriov_vf(adev))
2081 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2082 break;
2083 case IP_VERSION(3, 0, 33):
2084 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2085 break;
2086 case IP_VERSION(4, 0, 0):
2087 case IP_VERSION(4, 0, 2):
2088 case IP_VERSION(4, 0, 4):
2089 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2090 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2091 break;
2092 case IP_VERSION(4, 0, 3):
2093 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2094 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2095 break;
2096 default:
2097 dev_err(adev->dev,
2098 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2099 adev->ip_versions[UVD_HWIP][0]);
2100 return -EINVAL;
2101 }
2102 }
2103 return 0;
2104 }
2105
amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device * adev)2106 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2107 {
2108 switch (adev->ip_versions[GC_HWIP][0]) {
2109 case IP_VERSION(10, 1, 10):
2110 case IP_VERSION(10, 1, 1):
2111 case IP_VERSION(10, 1, 2):
2112 case IP_VERSION(10, 1, 3):
2113 case IP_VERSION(10, 1, 4):
2114 case IP_VERSION(10, 3, 0):
2115 case IP_VERSION(10, 3, 1):
2116 case IP_VERSION(10, 3, 2):
2117 case IP_VERSION(10, 3, 3):
2118 case IP_VERSION(10, 3, 4):
2119 case IP_VERSION(10, 3, 5):
2120 case IP_VERSION(10, 3, 6):
2121 if (amdgpu_mes) {
2122 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
2123 adev->enable_mes = true;
2124 if (amdgpu_mes_kiq)
2125 adev->enable_mes_kiq = true;
2126 }
2127 break;
2128 case IP_VERSION(11, 0, 0):
2129 case IP_VERSION(11, 0, 1):
2130 case IP_VERSION(11, 0, 2):
2131 case IP_VERSION(11, 0, 3):
2132 case IP_VERSION(11, 0, 4):
2133 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2134 adev->enable_mes = true;
2135 adev->enable_mes_kiq = true;
2136 break;
2137 default:
2138 break;
2139 }
2140 return 0;
2141 }
2142
amdgpu_discovery_init_soc_config(struct amdgpu_device * adev)2143 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2144 {
2145 switch (adev->ip_versions[GC_HWIP][0]) {
2146 case IP_VERSION(9, 4, 3):
2147 aqua_vanjaram_init_soc_config(adev);
2148 break;
2149 default:
2150 break;
2151 }
2152 }
2153
amdgpu_discovery_set_ip_blocks(struct amdgpu_device * adev)2154 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2155 {
2156 int r;
2157
2158 switch (adev->asic_type) {
2159 case CHIP_VEGA10:
2160 vega10_reg_base_init(adev);
2161 adev->sdma.num_instances = 2;
2162 adev->gmc.num_umc = 4;
2163 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2164 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2165 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2166 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2167 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2168 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2169 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2170 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2171 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2172 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2173 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2174 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2175 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2176 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2177 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2178 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2179 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2180 break;
2181 case CHIP_VEGA12:
2182 vega10_reg_base_init(adev);
2183 adev->sdma.num_instances = 2;
2184 adev->gmc.num_umc = 4;
2185 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2186 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2187 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2188 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2189 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2190 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2191 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2192 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2193 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2194 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2195 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2196 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2197 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2198 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2199 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2200 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2201 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2202 break;
2203 case CHIP_RAVEN:
2204 vega10_reg_base_init(adev);
2205 adev->sdma.num_instances = 1;
2206 adev->vcn.num_vcn_inst = 1;
2207 adev->gmc.num_umc = 2;
2208 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2209 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2210 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2211 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2212 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2213 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2214 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2215 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2216 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2217 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2218 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2219 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2220 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2221 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2222 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2223 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2224 } else {
2225 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2226 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2227 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2228 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2229 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2230 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2231 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2232 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2233 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2234 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2235 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2236 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2237 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2238 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2239 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2240 }
2241 break;
2242 case CHIP_VEGA20:
2243 vega20_reg_base_init(adev);
2244 adev->sdma.num_instances = 2;
2245 adev->gmc.num_umc = 8;
2246 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2247 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2248 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2249 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2250 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2251 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2252 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2253 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2254 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2255 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2256 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2257 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2258 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2259 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2260 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2261 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2262 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2263 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2264 break;
2265 case CHIP_ARCTURUS:
2266 arct_reg_base_init(adev);
2267 adev->sdma.num_instances = 8;
2268 adev->vcn.num_vcn_inst = 2;
2269 adev->gmc.num_umc = 8;
2270 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2271 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2272 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2273 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2274 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2275 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2276 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2277 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2278 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2279 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2280 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2281 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2282 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2283 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2284 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2285 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2286 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2287 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2288 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2289 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2290 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2291 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2292 break;
2293 case CHIP_ALDEBARAN:
2294 aldebaran_reg_base_init(adev);
2295 adev->sdma.num_instances = 5;
2296 adev->vcn.num_vcn_inst = 2;
2297 adev->gmc.num_umc = 4;
2298 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2299 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2300 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2301 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2302 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2303 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2304 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2305 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2306 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2307 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2308 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2309 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2310 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2311 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2312 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2313 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2314 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2315 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2316 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2317 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2318 break;
2319 default:
2320 r = amdgpu_discovery_reg_base_init(adev);
2321 if (r)
2322 return -EINVAL;
2323
2324 amdgpu_discovery_harvest_ip(adev);
2325 amdgpu_discovery_get_gfx_info(adev);
2326 amdgpu_discovery_get_mall_info(adev);
2327 amdgpu_discovery_get_vcn_info(adev);
2328 break;
2329 }
2330
2331 amdgpu_discovery_init_soc_config(adev);
2332 amdgpu_discovery_sysfs_init(adev);
2333
2334 switch (adev->ip_versions[GC_HWIP][0]) {
2335 case IP_VERSION(9, 0, 1):
2336 case IP_VERSION(9, 2, 1):
2337 case IP_VERSION(9, 4, 0):
2338 case IP_VERSION(9, 4, 1):
2339 case IP_VERSION(9, 4, 2):
2340 case IP_VERSION(9, 4, 3):
2341 adev->family = AMDGPU_FAMILY_AI;
2342 break;
2343 case IP_VERSION(9, 1, 0):
2344 case IP_VERSION(9, 2, 2):
2345 case IP_VERSION(9, 3, 0):
2346 adev->family = AMDGPU_FAMILY_RV;
2347 break;
2348 case IP_VERSION(10, 1, 10):
2349 case IP_VERSION(10, 1, 1):
2350 case IP_VERSION(10, 1, 2):
2351 case IP_VERSION(10, 1, 3):
2352 case IP_VERSION(10, 1, 4):
2353 case IP_VERSION(10, 3, 0):
2354 case IP_VERSION(10, 3, 2):
2355 case IP_VERSION(10, 3, 4):
2356 case IP_VERSION(10, 3, 5):
2357 adev->family = AMDGPU_FAMILY_NV;
2358 break;
2359 case IP_VERSION(10, 3, 1):
2360 adev->family = AMDGPU_FAMILY_VGH;
2361 adev->apu_flags |= AMD_APU_IS_VANGOGH;
2362 break;
2363 case IP_VERSION(10, 3, 3):
2364 adev->family = AMDGPU_FAMILY_YC;
2365 break;
2366 case IP_VERSION(10, 3, 6):
2367 adev->family = AMDGPU_FAMILY_GC_10_3_6;
2368 break;
2369 case IP_VERSION(10, 3, 7):
2370 adev->family = AMDGPU_FAMILY_GC_10_3_7;
2371 break;
2372 case IP_VERSION(11, 0, 0):
2373 case IP_VERSION(11, 0, 2):
2374 case IP_VERSION(11, 0, 3):
2375 adev->family = AMDGPU_FAMILY_GC_11_0_0;
2376 break;
2377 case IP_VERSION(11, 0, 1):
2378 case IP_VERSION(11, 0, 4):
2379 adev->family = AMDGPU_FAMILY_GC_11_0_1;
2380 break;
2381 default:
2382 return -EINVAL;
2383 }
2384
2385 switch (adev->ip_versions[GC_HWIP][0]) {
2386 case IP_VERSION(9, 1, 0):
2387 case IP_VERSION(9, 2, 2):
2388 case IP_VERSION(9, 3, 0):
2389 case IP_VERSION(10, 1, 3):
2390 case IP_VERSION(10, 1, 4):
2391 case IP_VERSION(10, 3, 1):
2392 case IP_VERSION(10, 3, 3):
2393 case IP_VERSION(10, 3, 6):
2394 case IP_VERSION(10, 3, 7):
2395 case IP_VERSION(11, 0, 1):
2396 case IP_VERSION(11, 0, 4):
2397 adev->flags |= AMD_IS_APU;
2398 break;
2399 default:
2400 break;
2401 }
2402
2403 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
2404 adev->gmc.xgmi.supported = true;
2405
2406 /* set NBIO version */
2407 switch (adev->ip_versions[NBIO_HWIP][0]) {
2408 case IP_VERSION(6, 1, 0):
2409 case IP_VERSION(6, 2, 0):
2410 adev->nbio.funcs = &nbio_v6_1_funcs;
2411 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2412 break;
2413 case IP_VERSION(7, 0, 0):
2414 case IP_VERSION(7, 0, 1):
2415 case IP_VERSION(2, 5, 0):
2416 adev->nbio.funcs = &nbio_v7_0_funcs;
2417 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2418 break;
2419 case IP_VERSION(7, 4, 0):
2420 case IP_VERSION(7, 4, 1):
2421 case IP_VERSION(7, 4, 4):
2422 adev->nbio.funcs = &nbio_v7_4_funcs;
2423 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2424 break;
2425 case IP_VERSION(7, 9, 0):
2426 adev->nbio.funcs = &nbio_v7_9_funcs;
2427 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2428 break;
2429 case IP_VERSION(7, 2, 0):
2430 case IP_VERSION(7, 2, 1):
2431 case IP_VERSION(7, 3, 0):
2432 case IP_VERSION(7, 5, 0):
2433 case IP_VERSION(7, 5, 1):
2434 adev->nbio.funcs = &nbio_v7_2_funcs;
2435 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2436 break;
2437 case IP_VERSION(2, 1, 1):
2438 case IP_VERSION(2, 3, 0):
2439 case IP_VERSION(2, 3, 1):
2440 case IP_VERSION(2, 3, 2):
2441 case IP_VERSION(3, 3, 0):
2442 case IP_VERSION(3, 3, 1):
2443 case IP_VERSION(3, 3, 2):
2444 case IP_VERSION(3, 3, 3):
2445 adev->nbio.funcs = &nbio_v2_3_funcs;
2446 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2447 break;
2448 case IP_VERSION(4, 3, 0):
2449 case IP_VERSION(4, 3, 1):
2450 if (amdgpu_sriov_vf(adev))
2451 adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2452 else
2453 adev->nbio.funcs = &nbio_v4_3_funcs;
2454 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2455 break;
2456 case IP_VERSION(7, 7, 0):
2457 case IP_VERSION(7, 7, 1):
2458 adev->nbio.funcs = &nbio_v7_7_funcs;
2459 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2460 break;
2461 default:
2462 break;
2463 }
2464
2465 switch (adev->ip_versions[HDP_HWIP][0]) {
2466 case IP_VERSION(4, 0, 0):
2467 case IP_VERSION(4, 0, 1):
2468 case IP_VERSION(4, 1, 0):
2469 case IP_VERSION(4, 1, 1):
2470 case IP_VERSION(4, 1, 2):
2471 case IP_VERSION(4, 2, 0):
2472 case IP_VERSION(4, 2, 1):
2473 case IP_VERSION(4, 4, 0):
2474 case IP_VERSION(4, 4, 2):
2475 adev->hdp.funcs = &hdp_v4_0_funcs;
2476 break;
2477 case IP_VERSION(5, 0, 0):
2478 case IP_VERSION(5, 0, 1):
2479 case IP_VERSION(5, 0, 2):
2480 case IP_VERSION(5, 0, 3):
2481 case IP_VERSION(5, 0, 4):
2482 case IP_VERSION(5, 2, 0):
2483 adev->hdp.funcs = &hdp_v5_0_funcs;
2484 break;
2485 case IP_VERSION(5, 2, 1):
2486 adev->hdp.funcs = &hdp_v5_2_funcs;
2487 break;
2488 case IP_VERSION(6, 0, 0):
2489 case IP_VERSION(6, 0, 1):
2490 case IP_VERSION(6, 1, 0):
2491 adev->hdp.funcs = &hdp_v6_0_funcs;
2492 break;
2493 default:
2494 break;
2495 }
2496
2497 switch (adev->ip_versions[DF_HWIP][0]) {
2498 case IP_VERSION(3, 6, 0):
2499 case IP_VERSION(3, 6, 1):
2500 case IP_VERSION(3, 6, 2):
2501 adev->df.funcs = &df_v3_6_funcs;
2502 break;
2503 case IP_VERSION(2, 1, 0):
2504 case IP_VERSION(2, 1, 1):
2505 case IP_VERSION(2, 5, 0):
2506 case IP_VERSION(3, 5, 1):
2507 case IP_VERSION(3, 5, 2):
2508 adev->df.funcs = &df_v1_7_funcs;
2509 break;
2510 case IP_VERSION(4, 3, 0):
2511 adev->df.funcs = &df_v4_3_funcs;
2512 break;
2513 default:
2514 break;
2515 }
2516
2517 switch (adev->ip_versions[SMUIO_HWIP][0]) {
2518 case IP_VERSION(9, 0, 0):
2519 case IP_VERSION(9, 0, 1):
2520 case IP_VERSION(10, 0, 0):
2521 case IP_VERSION(10, 0, 1):
2522 case IP_VERSION(10, 0, 2):
2523 adev->smuio.funcs = &smuio_v9_0_funcs;
2524 break;
2525 case IP_VERSION(11, 0, 0):
2526 case IP_VERSION(11, 0, 2):
2527 case IP_VERSION(11, 0, 3):
2528 case IP_VERSION(11, 0, 4):
2529 case IP_VERSION(11, 0, 7):
2530 case IP_VERSION(11, 0, 8):
2531 adev->smuio.funcs = &smuio_v11_0_funcs;
2532 break;
2533 case IP_VERSION(11, 0, 6):
2534 case IP_VERSION(11, 0, 10):
2535 case IP_VERSION(11, 0, 11):
2536 case IP_VERSION(11, 5, 0):
2537 case IP_VERSION(13, 0, 1):
2538 case IP_VERSION(13, 0, 9):
2539 case IP_VERSION(13, 0, 10):
2540 adev->smuio.funcs = &smuio_v11_0_6_funcs;
2541 break;
2542 case IP_VERSION(13, 0, 2):
2543 adev->smuio.funcs = &smuio_v13_0_funcs;
2544 break;
2545 case IP_VERSION(13, 0, 3):
2546 adev->smuio.funcs = &smuio_v13_0_3_funcs;
2547 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2548 adev->flags |= AMD_IS_APU;
2549 }
2550 break;
2551 case IP_VERSION(13, 0, 6):
2552 case IP_VERSION(13, 0, 8):
2553 case IP_VERSION(14, 0, 0):
2554 adev->smuio.funcs = &smuio_v13_0_6_funcs;
2555 break;
2556 default:
2557 break;
2558 }
2559
2560 switch (adev->ip_versions[LSDMA_HWIP][0]) {
2561 case IP_VERSION(6, 0, 0):
2562 case IP_VERSION(6, 0, 1):
2563 case IP_VERSION(6, 0, 2):
2564 case IP_VERSION(6, 0, 3):
2565 adev->lsdma.funcs = &lsdma_v6_0_funcs;
2566 break;
2567 default:
2568 break;
2569 }
2570
2571 r = amdgpu_discovery_set_common_ip_blocks(adev);
2572 if (r)
2573 return r;
2574
2575 r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2576 if (r)
2577 return r;
2578
2579 /* For SR-IOV, PSP needs to be initialized before IH */
2580 if (amdgpu_sriov_vf(adev)) {
2581 r = amdgpu_discovery_set_psp_ip_blocks(adev);
2582 if (r)
2583 return r;
2584 r = amdgpu_discovery_set_ih_ip_blocks(adev);
2585 if (r)
2586 return r;
2587 } else {
2588 r = amdgpu_discovery_set_ih_ip_blocks(adev);
2589 if (r)
2590 return r;
2591
2592 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2593 r = amdgpu_discovery_set_psp_ip_blocks(adev);
2594 if (r)
2595 return r;
2596 }
2597 }
2598
2599 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2600 r = amdgpu_discovery_set_smu_ip_blocks(adev);
2601 if (r)
2602 return r;
2603 }
2604
2605 r = amdgpu_discovery_set_display_ip_blocks(adev);
2606 if (r)
2607 return r;
2608
2609 r = amdgpu_discovery_set_gc_ip_blocks(adev);
2610 if (r)
2611 return r;
2612
2613 r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2614 if (r)
2615 return r;
2616
2617 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2618 !amdgpu_sriov_vf(adev)) ||
2619 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2620 r = amdgpu_discovery_set_smu_ip_blocks(adev);
2621 if (r)
2622 return r;
2623 }
2624
2625 r = amdgpu_discovery_set_mm_ip_blocks(adev);
2626 if (r)
2627 return r;
2628
2629 r = amdgpu_discovery_set_mes_ip_blocks(adev);
2630 if (r)
2631 return r;
2632
2633 return 0;
2634 }
2635
2636