1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 
31 #include "soc15.h"
32 #include "gfx_v9_0.h"
33 #include "gfx_v9_4_3.h"
34 #include "gmc_v9_0.h"
35 #include "df_v1_7.h"
36 #include "df_v3_6.h"
37 #include "df_v4_3.h"
38 #include "nbio_v6_1.h"
39 #include "nbio_v7_0.h"
40 #include "nbio_v7_4.h"
41 #include "nbio_v7_9.h"
42 #include "hdp_v4_0.h"
43 #include "vega10_ih.h"
44 #include "vega20_ih.h"
45 #include "sdma_v4_0.h"
46 #include "sdma_v4_4_2.h"
47 #include "uvd_v7_0.h"
48 #include "vce_v4_0.h"
49 #include "vcn_v1_0.h"
50 #include "vcn_v2_5.h"
51 #include "jpeg_v2_5.h"
52 #include "smuio_v9_0.h"
53 #include "gmc_v10_0.h"
54 #include "gmc_v11_0.h"
55 #include "gfxhub_v2_0.h"
56 #include "mmhub_v2_0.h"
57 #include "nbio_v2_3.h"
58 #include "nbio_v4_3.h"
59 #include "nbio_v7_2.h"
60 #include "nbio_v7_7.h"
61 #include "hdp_v5_0.h"
62 #include "hdp_v5_2.h"
63 #include "hdp_v6_0.h"
64 #include "nv.h"
65 #include "soc21.h"
66 #include "navi10_ih.h"
67 #include "ih_v6_0.h"
68 #include "gfx_v10_0.h"
69 #include "gfx_v11_0.h"
70 #include "sdma_v5_0.h"
71 #include "sdma_v5_2.h"
72 #include "sdma_v6_0.h"
73 #include "lsdma_v6_0.h"
74 #include "vcn_v2_0.h"
75 #include "jpeg_v2_0.h"
76 #include "vcn_v3_0.h"
77 #include "jpeg_v3_0.h"
78 #include "vcn_v4_0.h"
79 #include "jpeg_v4_0.h"
80 #include "vcn_v4_0_3.h"
81 #include "jpeg_v4_0_3.h"
82 #include "amdgpu_vkms.h"
83 #include "mes_v10_1.h"
84 #include "mes_v11_0.h"
85 #include "smuio_v11_0.h"
86 #include "smuio_v11_0_6.h"
87 #include "smuio_v13_0.h"
88 #include "smuio_v13_0_3.h"
89 #include "smuio_v13_0_6.h"
90 
91 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
92 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
93 
94 #define mmRCC_CONFIG_MEMSIZE	0xde3
95 #define mmMM_INDEX		0x0
96 #define mmMM_INDEX_HI		0x6
97 #define mmMM_DATA		0x1
98 
99 static const char *hw_id_names[HW_ID_MAX] = {
100 	[MP1_HWID]		= "MP1",
101 	[MP2_HWID]		= "MP2",
102 	[THM_HWID]		= "THM",
103 	[SMUIO_HWID]		= "SMUIO",
104 	[FUSE_HWID]		= "FUSE",
105 	[CLKA_HWID]		= "CLKA",
106 	[PWR_HWID]		= "PWR",
107 	[GC_HWID]		= "GC",
108 	[UVD_HWID]		= "UVD",
109 	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
110 	[ACP_HWID]		= "ACP",
111 	[DCI_HWID]		= "DCI",
112 	[DMU_HWID]		= "DMU",
113 	[DCO_HWID]		= "DCO",
114 	[DIO_HWID]		= "DIO",
115 	[XDMA_HWID]		= "XDMA",
116 	[DCEAZ_HWID]		= "DCEAZ",
117 	[DAZ_HWID]		= "DAZ",
118 	[SDPMUX_HWID]		= "SDPMUX",
119 	[NTB_HWID]		= "NTB",
120 	[IOHC_HWID]		= "IOHC",
121 	[L2IMU_HWID]		= "L2IMU",
122 	[VCE_HWID]		= "VCE",
123 	[MMHUB_HWID]		= "MMHUB",
124 	[ATHUB_HWID]		= "ATHUB",
125 	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
126 	[DFX_HWID]		= "DFX",
127 	[DBGU0_HWID]		= "DBGU0",
128 	[DBGU1_HWID]		= "DBGU1",
129 	[OSSSYS_HWID]		= "OSSSYS",
130 	[HDP_HWID]		= "HDP",
131 	[SDMA0_HWID]		= "SDMA0",
132 	[SDMA1_HWID]		= "SDMA1",
133 	[SDMA2_HWID]		= "SDMA2",
134 	[SDMA3_HWID]		= "SDMA3",
135 	[LSDMA_HWID]		= "LSDMA",
136 	[ISP_HWID]		= "ISP",
137 	[DBGU_IO_HWID]		= "DBGU_IO",
138 	[DF_HWID]		= "DF",
139 	[CLKB_HWID]		= "CLKB",
140 	[FCH_HWID]		= "FCH",
141 	[DFX_DAP_HWID]		= "DFX_DAP",
142 	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
143 	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
144 	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
145 	[L1IMU3_HWID]		= "L1IMU3",
146 	[L1IMU4_HWID]		= "L1IMU4",
147 	[L1IMU5_HWID]		= "L1IMU5",
148 	[L1IMU6_HWID]		= "L1IMU6",
149 	[L1IMU7_HWID]		= "L1IMU7",
150 	[L1IMU8_HWID]		= "L1IMU8",
151 	[L1IMU9_HWID]		= "L1IMU9",
152 	[L1IMU10_HWID]		= "L1IMU10",
153 	[L1IMU11_HWID]		= "L1IMU11",
154 	[L1IMU12_HWID]		= "L1IMU12",
155 	[L1IMU13_HWID]		= "L1IMU13",
156 	[L1IMU14_HWID]		= "L1IMU14",
157 	[L1IMU15_HWID]		= "L1IMU15",
158 	[WAFLC_HWID]		= "WAFLC",
159 	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
160 	[PCIE_HWID]		= "PCIE",
161 	[PCS_HWID]		= "PCS",
162 	[DDCL_HWID]		= "DDCL",
163 	[SST_HWID]		= "SST",
164 	[IOAGR_HWID]		= "IOAGR",
165 	[NBIF_HWID]		= "NBIF",
166 	[IOAPIC_HWID]		= "IOAPIC",
167 	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
168 	[NTBCCP_HWID]		= "NTBCCP",
169 	[UMC_HWID]		= "UMC",
170 	[SATA_HWID]		= "SATA",
171 	[USB_HWID]		= "USB",
172 	[CCXSEC_HWID]		= "CCXSEC",
173 	[XGMI_HWID]		= "XGMI",
174 	[XGBE_HWID]		= "XGBE",
175 	[MP0_HWID]		= "MP0",
176 };
177 
178 static int hw_id_map[MAX_HWIP] = {
179 	[GC_HWIP]	= GC_HWID,
180 	[HDP_HWIP]	= HDP_HWID,
181 	[SDMA0_HWIP]	= SDMA0_HWID,
182 	[SDMA1_HWIP]	= SDMA1_HWID,
183 	[SDMA2_HWIP]    = SDMA2_HWID,
184 	[SDMA3_HWIP]    = SDMA3_HWID,
185 	[LSDMA_HWIP]    = LSDMA_HWID,
186 	[MMHUB_HWIP]	= MMHUB_HWID,
187 	[ATHUB_HWIP]	= ATHUB_HWID,
188 	[NBIO_HWIP]	= NBIF_HWID,
189 	[MP0_HWIP]	= MP0_HWID,
190 	[MP1_HWIP]	= MP1_HWID,
191 	[UVD_HWIP]	= UVD_HWID,
192 	[VCE_HWIP]	= VCE_HWID,
193 	[DF_HWIP]	= DF_HWID,
194 	[DCE_HWIP]	= DMU_HWID,
195 	[OSSSYS_HWIP]	= OSSSYS_HWID,
196 	[SMUIO_HWIP]	= SMUIO_HWID,
197 	[PWR_HWIP]	= PWR_HWID,
198 	[NBIF_HWIP]	= NBIF_HWID,
199 	[THM_HWIP]	= THM_HWID,
200 	[CLK_HWIP]	= CLKA_HWID,
201 	[UMC_HWIP]	= UMC_HWID,
202 	[XGMI_HWIP]	= XGMI_HWID,
203 	[DCI_HWIP]	= DCI_HWID,
204 	[PCIE_HWIP]	= PCIE_HWID,
205 };
206 
207 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
208 {
209 	u64 tmr_offset, tmr_size, pos;
210 	void *discv_regn;
211 	int ret;
212 
213 	ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
214 	if (ret)
215 		return ret;
216 
217 	pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
218 
219 	/* This region is read-only and reserved from system use */
220 	discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
221 	if (discv_regn) {
222 		memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
223 		memunmap(discv_regn);
224 		return 0;
225 	}
226 
227 	return -ENOENT;
228 }
229 
230 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
231 						 uint8_t *binary)
232 {
233 	uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
234 	int ret = 0;
235 
236 	if (vram_size) {
237 		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
238 		amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
239 					  adev->mman.discovery_tmr_size, false);
240 	} else {
241 		ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
242 	}
243 
244 	return ret;
245 }
246 
247 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
248 {
249 	const struct firmware *fw;
250 	const char *fw_name;
251 	int r;
252 
253 	switch (amdgpu_discovery) {
254 	case 2:
255 		fw_name = FIRMWARE_IP_DISCOVERY;
256 		break;
257 	default:
258 		dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
259 		return -EINVAL;
260 	}
261 
262 	r = request_firmware(&fw, fw_name, adev->dev);
263 	if (r) {
264 		dev_err(adev->dev, "can't load firmware \"%s\"\n",
265 			fw_name);
266 		return r;
267 	}
268 
269 	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
270 	release_firmware(fw);
271 
272 	return 0;
273 }
274 
275 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
276 {
277 	uint16_t checksum = 0;
278 	int i;
279 
280 	for (i = 0; i < size; i++)
281 		checksum += data[i];
282 
283 	return checksum;
284 }
285 
286 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
287 						    uint16_t expected)
288 {
289 	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
290 }
291 
292 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
293 {
294 	struct binary_header *bhdr;
295 	bhdr = (struct binary_header *)binary;
296 
297 	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
298 }
299 
300 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
301 {
302 	/*
303 	 * So far, apply this quirk only on those Navy Flounder boards which
304 	 * have a bad harvest table of VCN config.
305 	 */
306 	if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
307 		(adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) {
308 		switch (adev->pdev->revision) {
309 		case 0xC1:
310 		case 0xC2:
311 		case 0xC3:
312 		case 0xC5:
313 		case 0xC7:
314 		case 0xCF:
315 		case 0xDF:
316 			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
317 			adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
318 			break;
319 		default:
320 			break;
321 		}
322 	}
323 }
324 
325 static int amdgpu_discovery_init(struct amdgpu_device *adev)
326 {
327 	struct table_info *info;
328 	struct binary_header *bhdr;
329 	uint16_t offset;
330 	uint16_t size;
331 	uint16_t checksum;
332 	int r;
333 
334 	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
335 	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
336 	if (!adev->mman.discovery_bin)
337 		return -ENOMEM;
338 
339 	/* Read from file if it is the preferred option */
340 	if (amdgpu_discovery == 2) {
341 		dev_info(adev->dev, "use ip discovery information from file");
342 		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
343 
344 		if (r) {
345 			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
346 			r = -EINVAL;
347 			goto out;
348 		}
349 
350 	} else {
351 		r = amdgpu_discovery_read_binary_from_mem(
352 			adev, adev->mman.discovery_bin);
353 		if (r)
354 			goto out;
355 	}
356 
357 	/* check the ip discovery binary signature */
358 	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
359 		dev_err(adev->dev,
360 			"get invalid ip discovery binary signature\n");
361 		r = -EINVAL;
362 		goto out;
363 	}
364 
365 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
366 
367 	offset = offsetof(struct binary_header, binary_checksum) +
368 		sizeof(bhdr->binary_checksum);
369 	size = le16_to_cpu(bhdr->binary_size) - offset;
370 	checksum = le16_to_cpu(bhdr->binary_checksum);
371 
372 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
373 					      size, checksum)) {
374 		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
375 		r = -EINVAL;
376 		goto out;
377 	}
378 
379 	info = &bhdr->table_list[IP_DISCOVERY];
380 	offset = le16_to_cpu(info->offset);
381 	checksum = le16_to_cpu(info->checksum);
382 
383 	if (offset) {
384 		struct ip_discovery_header *ihdr =
385 			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
386 		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
387 			dev_err(adev->dev, "invalid ip discovery data table signature\n");
388 			r = -EINVAL;
389 			goto out;
390 		}
391 
392 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
393 						      le16_to_cpu(ihdr->size), checksum)) {
394 			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
395 			r = -EINVAL;
396 			goto out;
397 		}
398 	}
399 
400 	info = &bhdr->table_list[GC];
401 	offset = le16_to_cpu(info->offset);
402 	checksum = le16_to_cpu(info->checksum);
403 
404 	if (offset) {
405 		struct gpu_info_header *ghdr =
406 			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
407 
408 		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
409 			dev_err(adev->dev, "invalid ip discovery gc table id\n");
410 			r = -EINVAL;
411 			goto out;
412 		}
413 
414 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
415 						      le32_to_cpu(ghdr->size), checksum)) {
416 			dev_err(adev->dev, "invalid gc data table checksum\n");
417 			r = -EINVAL;
418 			goto out;
419 		}
420 	}
421 
422 	info = &bhdr->table_list[HARVEST_INFO];
423 	offset = le16_to_cpu(info->offset);
424 	checksum = le16_to_cpu(info->checksum);
425 
426 	if (offset) {
427 		struct harvest_info_header *hhdr =
428 			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
429 
430 		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
431 			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
432 			r = -EINVAL;
433 			goto out;
434 		}
435 
436 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
437 						      sizeof(struct harvest_table), checksum)) {
438 			dev_err(adev->dev, "invalid harvest data table checksum\n");
439 			r = -EINVAL;
440 			goto out;
441 		}
442 	}
443 
444 	info = &bhdr->table_list[VCN_INFO];
445 	offset = le16_to_cpu(info->offset);
446 	checksum = le16_to_cpu(info->checksum);
447 
448 	if (offset) {
449 		struct vcn_info_header *vhdr =
450 			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
451 
452 		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
453 			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
454 			r = -EINVAL;
455 			goto out;
456 		}
457 
458 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
459 						      le32_to_cpu(vhdr->size_bytes), checksum)) {
460 			dev_err(adev->dev, "invalid vcn data table checksum\n");
461 			r = -EINVAL;
462 			goto out;
463 		}
464 	}
465 
466 	info = &bhdr->table_list[MALL_INFO];
467 	offset = le16_to_cpu(info->offset);
468 	checksum = le16_to_cpu(info->checksum);
469 
470 	if (0 && offset) {
471 		struct mall_info_header *mhdr =
472 			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
473 
474 		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
475 			dev_err(adev->dev, "invalid ip discovery mall table id\n");
476 			r = -EINVAL;
477 			goto out;
478 		}
479 
480 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
481 						      le32_to_cpu(mhdr->size_bytes), checksum)) {
482 			dev_err(adev->dev, "invalid mall data table checksum\n");
483 			r = -EINVAL;
484 			goto out;
485 		}
486 	}
487 
488 	return 0;
489 
490 out:
491 	kfree(adev->mman.discovery_bin);
492 	adev->mman.discovery_bin = NULL;
493 
494 	return r;
495 }
496 
497 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
498 
499 void amdgpu_discovery_fini(struct amdgpu_device *adev)
500 {
501 	amdgpu_discovery_sysfs_fini(adev);
502 	kfree(adev->mman.discovery_bin);
503 	adev->mman.discovery_bin = NULL;
504 }
505 
506 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
507 {
508 	if (ip->instance_number >= HWIP_MAX_INSTANCE) {
509 		DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
510 			  ip->instance_number);
511 		return -EINVAL;
512 	}
513 	if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
514 		DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
515 			  le16_to_cpu(ip->hw_id));
516 		return -EINVAL;
517 	}
518 
519 	return 0;
520 }
521 
522 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
523 						uint32_t *vcn_harvest_count)
524 {
525 	struct binary_header *bhdr;
526 	struct ip_discovery_header *ihdr;
527 	struct die_header *dhdr;
528 	struct ip_v4 *ip;
529 	uint16_t die_offset, ip_offset, num_dies, num_ips;
530 	int i, j;
531 
532 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
533 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
534 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
535 	num_dies = le16_to_cpu(ihdr->num_dies);
536 
537 	/* scan harvest bit of all IP data structures */
538 	for (i = 0; i < num_dies; i++) {
539 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
540 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
541 		num_ips = le16_to_cpu(dhdr->num_ips);
542 		ip_offset = die_offset + sizeof(*dhdr);
543 
544 		for (j = 0; j < num_ips; j++) {
545 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
546 
547 			if (amdgpu_discovery_validate_ip(ip))
548 				goto next_ip;
549 
550 			if (le16_to_cpu(ip->variant) == 1) {
551 				switch (le16_to_cpu(ip->hw_id)) {
552 				case VCN_HWID:
553 					(*vcn_harvest_count)++;
554 					if (ip->instance_number == 0) {
555 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
556 						adev->vcn.inst_mask &=
557 							~AMDGPU_VCN_HARVEST_VCN0;
558 						adev->jpeg.inst_mask &=
559 							~AMDGPU_VCN_HARVEST_VCN0;
560 					} else {
561 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
562 						adev->vcn.inst_mask &=
563 							~AMDGPU_VCN_HARVEST_VCN1;
564 						adev->jpeg.inst_mask &=
565 							~AMDGPU_VCN_HARVEST_VCN1;
566 					}
567 					break;
568 				case DMU_HWID:
569 					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
570 					break;
571 				default:
572 					break;
573 				}
574 			}
575 next_ip:
576 			if (ihdr->base_addr_64_bit)
577 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
578 			else
579 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
580 		}
581 	}
582 }
583 
584 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
585 						     uint32_t *vcn_harvest_count,
586 						     uint32_t *umc_harvest_count)
587 {
588 	struct binary_header *bhdr;
589 	struct harvest_table *harvest_info;
590 	u16 offset;
591 	int i;
592 	uint32_t umc_harvest_config = 0;
593 
594 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
595 	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
596 
597 	if (!offset) {
598 		dev_err(adev->dev, "invalid harvest table offset\n");
599 		return;
600 	}
601 
602 	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
603 
604 	for (i = 0; i < 32; i++) {
605 		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
606 			break;
607 
608 		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
609 		case VCN_HWID:
610 			(*vcn_harvest_count)++;
611 			adev->vcn.harvest_config |=
612 				(1 << harvest_info->list[i].number_instance);
613 			adev->jpeg.harvest_config |=
614 				(1 << harvest_info->list[i].number_instance);
615 
616 			adev->vcn.inst_mask &=
617 				~(1U << harvest_info->list[i].number_instance);
618 			adev->jpeg.inst_mask &=
619 				~(1U << harvest_info->list[i].number_instance);
620 			break;
621 		case DMU_HWID:
622 			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
623 			break;
624 		case UMC_HWID:
625 			umc_harvest_config |=
626 				1 << (le16_to_cpu(harvest_info->list[i].number_instance));
627 			(*umc_harvest_count)++;
628 			break;
629 		case GC_HWID:
630 			adev->gfx.xcc_mask &=
631 				~(1U << harvest_info->list[i].number_instance);
632 			break;
633 		case SDMA0_HWID:
634 			adev->sdma.sdma_mask &=
635 				~(1U << harvest_info->list[i].number_instance);
636 			break;
637 		default:
638 			break;
639 		}
640 	}
641 
642 	adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
643 				~umc_harvest_config;
644 }
645 
646 /* ================================================== */
647 
648 struct ip_hw_instance {
649 	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
650 
651 	int hw_id;
652 	u8  num_instance;
653 	u8  major, minor, revision;
654 	u8  harvest;
655 
656 	int num_base_addresses;
657 	u32 base_addr[];
658 };
659 
660 struct ip_hw_id {
661 	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
662 	int hw_id;
663 };
664 
665 struct ip_die_entry {
666 	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
667 	u16 num_ips;
668 };
669 
670 /* -------------------------------------------------- */
671 
672 struct ip_hw_instance_attr {
673 	struct attribute attr;
674 	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
675 };
676 
677 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
678 {
679 	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
680 }
681 
682 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
683 {
684 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
685 }
686 
687 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
688 {
689 	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
690 }
691 
692 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
693 {
694 	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
695 }
696 
697 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
698 {
699 	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
700 }
701 
702 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
703 {
704 	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
705 }
706 
707 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
708 {
709 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
710 }
711 
712 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
713 {
714 	ssize_t res, at;
715 	int ii;
716 
717 	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
718 		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
719 		 */
720 		if (at + 12 > PAGE_SIZE)
721 			break;
722 		res = sysfs_emit_at(buf, at, "0x%08X\n",
723 				    ip_hw_instance->base_addr[ii]);
724 		if (res <= 0)
725 			break;
726 		at += res;
727 	}
728 
729 	return res < 0 ? res : at;
730 }
731 
732 static struct ip_hw_instance_attr ip_hw_attr[] = {
733 	__ATTR_RO(hw_id),
734 	__ATTR_RO(num_instance),
735 	__ATTR_RO(major),
736 	__ATTR_RO(minor),
737 	__ATTR_RO(revision),
738 	__ATTR_RO(harvest),
739 	__ATTR_RO(num_base_addresses),
740 	__ATTR_RO(base_addr),
741 };
742 
743 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
744 ATTRIBUTE_GROUPS(ip_hw_instance);
745 
746 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
747 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
748 
749 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
750 					struct attribute *attr,
751 					char *buf)
752 {
753 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
754 	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
755 
756 	if (!ip_hw_attr->show)
757 		return -EIO;
758 
759 	return ip_hw_attr->show(ip_hw_instance, buf);
760 }
761 
762 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
763 	.show = ip_hw_instance_attr_show,
764 };
765 
766 static void ip_hw_instance_release(struct kobject *kobj)
767 {
768 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
769 
770 	kfree(ip_hw_instance);
771 }
772 
773 static const struct kobj_type ip_hw_instance_ktype = {
774 	.release = ip_hw_instance_release,
775 	.sysfs_ops = &ip_hw_instance_sysfs_ops,
776 	.default_groups = ip_hw_instance_groups,
777 };
778 
779 /* -------------------------------------------------- */
780 
781 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
782 
783 static void ip_hw_id_release(struct kobject *kobj)
784 {
785 	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
786 
787 	if (!list_empty(&ip_hw_id->hw_id_kset.list))
788 		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
789 	kfree(ip_hw_id);
790 }
791 
792 static const struct kobj_type ip_hw_id_ktype = {
793 	.release = ip_hw_id_release,
794 	.sysfs_ops = &kobj_sysfs_ops,
795 };
796 
797 /* -------------------------------------------------- */
798 
799 static void die_kobj_release(struct kobject *kobj);
800 static void ip_disc_release(struct kobject *kobj);
801 
802 struct ip_die_entry_attribute {
803 	struct attribute attr;
804 	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
805 };
806 
807 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
808 
809 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
810 {
811 	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
812 }
813 
814 /* If there are more ip_die_entry attrs, other than the number of IPs,
815  * we can make this intro an array of attrs, and then initialize
816  * ip_die_entry_attrs in a loop.
817  */
818 static struct ip_die_entry_attribute num_ips_attr =
819 	__ATTR_RO(num_ips);
820 
821 static struct attribute *ip_die_entry_attrs[] = {
822 	&num_ips_attr.attr,
823 	NULL,
824 };
825 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
826 
827 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
828 
829 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
830 				      struct attribute *attr,
831 				      char *buf)
832 {
833 	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
834 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
835 
836 	if (!ip_die_entry_attr->show)
837 		return -EIO;
838 
839 	return ip_die_entry_attr->show(ip_die_entry, buf);
840 }
841 
842 static void ip_die_entry_release(struct kobject *kobj)
843 {
844 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
845 
846 	if (!list_empty(&ip_die_entry->ip_kset.list))
847 		DRM_ERROR("ip_die_entry->ip_kset is not empty");
848 	kfree(ip_die_entry);
849 }
850 
851 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
852 	.show = ip_die_entry_attr_show,
853 };
854 
855 static const struct kobj_type ip_die_entry_ktype = {
856 	.release = ip_die_entry_release,
857 	.sysfs_ops = &ip_die_entry_sysfs_ops,
858 	.default_groups = ip_die_entry_groups,
859 };
860 
861 static const struct kobj_type die_kobj_ktype = {
862 	.release = die_kobj_release,
863 	.sysfs_ops = &kobj_sysfs_ops,
864 };
865 
866 static const struct kobj_type ip_discovery_ktype = {
867 	.release = ip_disc_release,
868 	.sysfs_ops = &kobj_sysfs_ops,
869 };
870 
871 struct ip_discovery_top {
872 	struct kobject kobj;    /* ip_discovery/ */
873 	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
874 	struct amdgpu_device *adev;
875 };
876 
877 static void die_kobj_release(struct kobject *kobj)
878 {
879 	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
880 						       struct ip_discovery_top,
881 						       die_kset);
882 	if (!list_empty(&ip_top->die_kset.list))
883 		DRM_ERROR("ip_top->die_kset is not empty");
884 }
885 
886 static void ip_disc_release(struct kobject *kobj)
887 {
888 	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
889 						       kobj);
890 	struct amdgpu_device *adev = ip_top->adev;
891 
892 	adev->ip_top = NULL;
893 	kfree(ip_top);
894 }
895 
896 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
897 						 uint16_t hw_id, uint8_t inst)
898 {
899 	uint8_t harvest = 0;
900 
901 	/* Until a uniform way is figured, get mask based on hwid */
902 	switch (hw_id) {
903 	case VCN_HWID:
904 		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
905 		break;
906 	case DMU_HWID:
907 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
908 			harvest = 0x1;
909 		break;
910 	case UMC_HWID:
911 		/* TODO: It needs another parsing; for now, ignore.*/
912 		break;
913 	case GC_HWID:
914 		harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
915 		break;
916 	case SDMA0_HWID:
917 		harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
918 		break;
919 	default:
920 		break;
921 	}
922 
923 	return harvest;
924 }
925 
926 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
927 				      struct ip_die_entry *ip_die_entry,
928 				      const size_t _ip_offset, const int num_ips,
929 				      bool reg_base_64)
930 {
931 	int ii, jj, kk, res;
932 
933 	DRM_DEBUG("num_ips:%d", num_ips);
934 
935 	/* Find all IPs of a given HW ID, and add their instance to
936 	 * #die/#hw_id/#instance/<attributes>
937 	 */
938 	for (ii = 0; ii < HW_ID_MAX; ii++) {
939 		struct ip_hw_id *ip_hw_id = NULL;
940 		size_t ip_offset = _ip_offset;
941 
942 		for (jj = 0; jj < num_ips; jj++) {
943 			struct ip_v4 *ip;
944 			struct ip_hw_instance *ip_hw_instance;
945 
946 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
947 			if (amdgpu_discovery_validate_ip(ip) ||
948 			    le16_to_cpu(ip->hw_id) != ii)
949 				goto next_ip;
950 
951 			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
952 
953 			/* We have a hw_id match; register the hw
954 			 * block if not yet registered.
955 			 */
956 			if (!ip_hw_id) {
957 				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
958 				if (!ip_hw_id)
959 					return -ENOMEM;
960 				ip_hw_id->hw_id = ii;
961 
962 				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
963 				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
964 				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
965 				res = kset_register(&ip_hw_id->hw_id_kset);
966 				if (res) {
967 					DRM_ERROR("Couldn't register ip_hw_id kset");
968 					kfree(ip_hw_id);
969 					return res;
970 				}
971 				if (hw_id_names[ii]) {
972 					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
973 								&ip_hw_id->hw_id_kset.kobj,
974 								hw_id_names[ii]);
975 					if (res) {
976 						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
977 							  hw_id_names[ii],
978 							  kobject_name(&ip_die_entry->ip_kset.kobj));
979 					}
980 				}
981 			}
982 
983 			/* Now register its instance.
984 			 */
985 			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
986 							     base_addr,
987 							     ip->num_base_address),
988 						 GFP_KERNEL);
989 			if (!ip_hw_instance) {
990 				DRM_ERROR("no memory for ip_hw_instance");
991 				return -ENOMEM;
992 			}
993 			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
994 			ip_hw_instance->num_instance = ip->instance_number;
995 			ip_hw_instance->major = ip->major;
996 			ip_hw_instance->minor = ip->minor;
997 			ip_hw_instance->revision = ip->revision;
998 			ip_hw_instance->harvest =
999 				amdgpu_discovery_get_harvest_info(
1000 					adev, ip_hw_instance->hw_id,
1001 					ip_hw_instance->num_instance);
1002 			ip_hw_instance->num_base_addresses = ip->num_base_address;
1003 
1004 			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1005 				if (reg_base_64)
1006 					ip_hw_instance->base_addr[kk] =
1007 						lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1008 				else
1009 					ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1010 			}
1011 
1012 			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1013 			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1014 			res = kobject_add(&ip_hw_instance->kobj, NULL,
1015 					  "%d", ip_hw_instance->num_instance);
1016 next_ip:
1017 			if (reg_base_64)
1018 				ip_offset += struct_size(ip, base_address_64,
1019 							 ip->num_base_address);
1020 			else
1021 				ip_offset += struct_size(ip, base_address,
1022 							 ip->num_base_address);
1023 		}
1024 	}
1025 
1026 	return 0;
1027 }
1028 
1029 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1030 {
1031 	struct binary_header *bhdr;
1032 	struct ip_discovery_header *ihdr;
1033 	struct die_header *dhdr;
1034 	struct kset *die_kset = &adev->ip_top->die_kset;
1035 	u16 num_dies, die_offset, num_ips;
1036 	size_t ip_offset;
1037 	int ii, res;
1038 
1039 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1040 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1041 					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1042 	num_dies = le16_to_cpu(ihdr->num_dies);
1043 
1044 	DRM_DEBUG("number of dies: %d\n", num_dies);
1045 
1046 	for (ii = 0; ii < num_dies; ii++) {
1047 		struct ip_die_entry *ip_die_entry;
1048 
1049 		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1050 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1051 		num_ips = le16_to_cpu(dhdr->num_ips);
1052 		ip_offset = die_offset + sizeof(*dhdr);
1053 
1054 		/* Add the die to the kset.
1055 		 *
1056 		 * dhdr->die_id == ii, which was checked in
1057 		 * amdgpu_discovery_reg_base_init().
1058 		 */
1059 
1060 		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1061 		if (!ip_die_entry)
1062 			return -ENOMEM;
1063 
1064 		ip_die_entry->num_ips = num_ips;
1065 
1066 		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1067 		ip_die_entry->ip_kset.kobj.kset = die_kset;
1068 		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1069 		res = kset_register(&ip_die_entry->ip_kset);
1070 		if (res) {
1071 			DRM_ERROR("Couldn't register ip_die_entry kset");
1072 			kfree(ip_die_entry);
1073 			return res;
1074 		}
1075 
1076 		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1077 	}
1078 
1079 	return 0;
1080 }
1081 
1082 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1083 {
1084 	struct kset *die_kset;
1085 	int res, ii;
1086 
1087 	if (!adev->mman.discovery_bin)
1088 		return -EINVAL;
1089 
1090 	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1091 	if (!adev->ip_top)
1092 		return -ENOMEM;
1093 
1094 	adev->ip_top->adev = adev;
1095 
1096 	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1097 				   &adev->dev->kobj, "ip_discovery");
1098 	if (res) {
1099 		DRM_ERROR("Couldn't init and add ip_discovery/");
1100 		goto Err;
1101 	}
1102 
1103 	die_kset = &adev->ip_top->die_kset;
1104 	kobject_set_name(&die_kset->kobj, "%s", "die");
1105 	die_kset->kobj.parent = &adev->ip_top->kobj;
1106 	die_kset->kobj.ktype = &die_kobj_ktype;
1107 	res = kset_register(&adev->ip_top->die_kset);
1108 	if (res) {
1109 		DRM_ERROR("Couldn't register die_kset");
1110 		goto Err;
1111 	}
1112 
1113 	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1114 		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1115 	ip_hw_instance_attrs[ii] = NULL;
1116 
1117 	res = amdgpu_discovery_sysfs_recurse(adev);
1118 
1119 	return res;
1120 Err:
1121 	kobject_put(&adev->ip_top->kobj);
1122 	return res;
1123 }
1124 
1125 /* -------------------------------------------------- */
1126 
1127 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1128 
1129 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1130 {
1131 	struct list_head *el, *tmp;
1132 	struct kset *hw_id_kset;
1133 
1134 	hw_id_kset = &ip_hw_id->hw_id_kset;
1135 	spin_lock(&hw_id_kset->list_lock);
1136 	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1137 		list_del_init(el);
1138 		spin_unlock(&hw_id_kset->list_lock);
1139 		/* kobject is embedded in ip_hw_instance */
1140 		kobject_put(list_to_kobj(el));
1141 		spin_lock(&hw_id_kset->list_lock);
1142 	}
1143 	spin_unlock(&hw_id_kset->list_lock);
1144 	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1145 }
1146 
1147 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1148 {
1149 	struct list_head *el, *tmp;
1150 	struct kset *ip_kset;
1151 
1152 	ip_kset = &ip_die_entry->ip_kset;
1153 	spin_lock(&ip_kset->list_lock);
1154 	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1155 		list_del_init(el);
1156 		spin_unlock(&ip_kset->list_lock);
1157 		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1158 		spin_lock(&ip_kset->list_lock);
1159 	}
1160 	spin_unlock(&ip_kset->list_lock);
1161 	kobject_put(&ip_die_entry->ip_kset.kobj);
1162 }
1163 
1164 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1165 {
1166 	struct list_head *el, *tmp;
1167 	struct kset *die_kset;
1168 
1169 	die_kset = &adev->ip_top->die_kset;
1170 	spin_lock(&die_kset->list_lock);
1171 	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1172 		list_del_init(el);
1173 		spin_unlock(&die_kset->list_lock);
1174 		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1175 		spin_lock(&die_kset->list_lock);
1176 	}
1177 	spin_unlock(&die_kset->list_lock);
1178 	kobject_put(&adev->ip_top->die_kset.kobj);
1179 	kobject_put(&adev->ip_top->kobj);
1180 }
1181 
1182 /* ================================================== */
1183 
1184 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1185 {
1186 	struct binary_header *bhdr;
1187 	struct ip_discovery_header *ihdr;
1188 	struct die_header *dhdr;
1189 	struct ip_v4 *ip;
1190 	uint16_t die_offset;
1191 	uint16_t ip_offset;
1192 	uint16_t num_dies;
1193 	uint16_t num_ips;
1194 	uint8_t num_base_address;
1195 	int hw_ip;
1196 	int i, j, k;
1197 	int r;
1198 
1199 	r = amdgpu_discovery_init(adev);
1200 	if (r) {
1201 		DRM_ERROR("amdgpu_discovery_init failed\n");
1202 		return r;
1203 	}
1204 
1205 	adev->gfx.xcc_mask = 0;
1206 	adev->sdma.sdma_mask = 0;
1207 	adev->vcn.inst_mask = 0;
1208 	adev->jpeg.inst_mask = 0;
1209 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1210 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1211 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1212 	num_dies = le16_to_cpu(ihdr->num_dies);
1213 
1214 	DRM_DEBUG("number of dies: %d\n", num_dies);
1215 
1216 	for (i = 0; i < num_dies; i++) {
1217 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1218 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1219 		num_ips = le16_to_cpu(dhdr->num_ips);
1220 		ip_offset = die_offset + sizeof(*dhdr);
1221 
1222 		if (le16_to_cpu(dhdr->die_id) != i) {
1223 			DRM_ERROR("invalid die id %d, expected %d\n",
1224 					le16_to_cpu(dhdr->die_id), i);
1225 			return -EINVAL;
1226 		}
1227 
1228 		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1229 				le16_to_cpu(dhdr->die_id), num_ips);
1230 
1231 		for (j = 0; j < num_ips; j++) {
1232 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1233 
1234 			if (amdgpu_discovery_validate_ip(ip))
1235 				goto next_ip;
1236 
1237 			num_base_address = ip->num_base_address;
1238 
1239 			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1240 				  hw_id_names[le16_to_cpu(ip->hw_id)],
1241 				  le16_to_cpu(ip->hw_id),
1242 				  ip->instance_number,
1243 				  ip->major, ip->minor,
1244 				  ip->revision);
1245 
1246 			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1247 				/* Bit [5:0]: original revision value
1248 				 * Bit [7:6]: en/decode capability:
1249 				 *     0b00 : VCN function normally
1250 				 *     0b10 : encode is disabled
1251 				 *     0b01 : decode is disabled
1252 				 */
1253 				adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1254 					ip->revision & 0xc0;
1255 				ip->revision &= ~0xc0;
1256 				if (adev->vcn.num_vcn_inst <
1257 				    AMDGPU_MAX_VCN_INSTANCES) {
1258 					adev->vcn.num_vcn_inst++;
1259 					adev->vcn.inst_mask |=
1260 						(1U << ip->instance_number);
1261 					adev->jpeg.inst_mask |=
1262 						(1U << ip->instance_number);
1263 				} else {
1264 					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1265 						adev->vcn.num_vcn_inst + 1,
1266 						AMDGPU_MAX_VCN_INSTANCES);
1267 				}
1268 			}
1269 			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1270 			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1271 			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1272 			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1273 				if (adev->sdma.num_instances <
1274 				    AMDGPU_MAX_SDMA_INSTANCES) {
1275 					adev->sdma.num_instances++;
1276 					adev->sdma.sdma_mask |=
1277 						(1U << ip->instance_number);
1278 				} else {
1279 					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1280 						adev->sdma.num_instances + 1,
1281 						AMDGPU_MAX_SDMA_INSTANCES);
1282 				}
1283 			}
1284 
1285 			if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1286 				adev->gmc.num_umc++;
1287 				adev->umc.node_inst_num++;
1288 			}
1289 
1290 			if (le16_to_cpu(ip->hw_id) == GC_HWID)
1291 				adev->gfx.xcc_mask |=
1292 					(1U << ip->instance_number);
1293 
1294 			for (k = 0; k < num_base_address; k++) {
1295 				/*
1296 				 * convert the endianness of base addresses in place,
1297 				 * so that we don't need to convert them when accessing adev->reg_offset.
1298 				 */
1299 				if (ihdr->base_addr_64_bit)
1300 					/* Truncate the 64bit base address from ip discovery
1301 					 * and only store lower 32bit ip base in reg_offset[].
1302 					 * Bits > 32 follows ASIC specific format, thus just
1303 					 * discard them and handle it within specific ASIC.
1304 					 * By this way reg_offset[] and related helpers can
1305 					 * stay unchanged.
1306 					 * The base address is in dwords, thus clear the
1307 					 * highest 2 bits to store.
1308 					 */
1309 					ip->base_address[k] =
1310 						lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1311 				else
1312 					ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1313 				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1314 			}
1315 
1316 			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1317 				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1318 				    hw_id_map[hw_ip] != 0) {
1319 					DRM_DEBUG("set register base offset for %s\n",
1320 							hw_id_names[le16_to_cpu(ip->hw_id)]);
1321 					adev->reg_offset[hw_ip][ip->instance_number] =
1322 						ip->base_address;
1323 					/* Instance support is somewhat inconsistent.
1324 					 * SDMA is a good example.  Sienna cichlid has 4 total
1325 					 * SDMA instances, each enumerated separately (HWIDs
1326 					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1327 					 * but they are enumerated as multiple instances of the
1328 					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1329 					 * example.  On most chips there are multiple instances
1330 					 * with the same HWID.
1331 					 */
1332 					adev->ip_versions[hw_ip][ip->instance_number] =
1333 						IP_VERSION(ip->major, ip->minor, ip->revision);
1334 				}
1335 			}
1336 
1337 next_ip:
1338 			if (ihdr->base_addr_64_bit)
1339 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1340 			else
1341 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
1342 		}
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1349 {
1350 	int vcn_harvest_count = 0;
1351 	int umc_harvest_count = 0;
1352 
1353 	/*
1354 	 * Harvest table does not fit Navi1x and legacy GPUs,
1355 	 * so read harvest bit per IP data structure to set
1356 	 * harvest configuration.
1357 	 */
1358 	if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0) &&
1359 	    adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) {
1360 		if ((adev->pdev->device == 0x731E &&
1361 			(adev->pdev->revision == 0xC6 ||
1362 			 adev->pdev->revision == 0xC7)) ||
1363 			(adev->pdev->device == 0x7340 &&
1364 			 adev->pdev->revision == 0xC9) ||
1365 			(adev->pdev->device == 0x7360 &&
1366 			 adev->pdev->revision == 0xC7))
1367 			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1368 				&vcn_harvest_count);
1369 	} else {
1370 		amdgpu_discovery_read_from_harvest_table(adev,
1371 							 &vcn_harvest_count,
1372 							 &umc_harvest_count);
1373 	}
1374 
1375 	amdgpu_discovery_harvest_config_quirk(adev);
1376 
1377 	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1378 		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1379 		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1380 	}
1381 
1382 	if (umc_harvest_count < adev->gmc.num_umc) {
1383 		adev->gmc.num_umc -= umc_harvest_count;
1384 	}
1385 }
1386 
1387 union gc_info {
1388 	struct gc_info_v1_0 v1;
1389 	struct gc_info_v1_1 v1_1;
1390 	struct gc_info_v1_2 v1_2;
1391 	struct gc_info_v2_0 v2;
1392 };
1393 
1394 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1395 {
1396 	struct binary_header *bhdr;
1397 	union gc_info *gc_info;
1398 	u16 offset;
1399 
1400 	if (!adev->mman.discovery_bin) {
1401 		DRM_ERROR("ip discovery uninitialized\n");
1402 		return -EINVAL;
1403 	}
1404 
1405 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1406 	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1407 
1408 	if (!offset)
1409 		return 0;
1410 
1411 	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1412 
1413 	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1414 	case 1:
1415 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1416 		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1417 						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1418 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1419 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1420 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1421 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1422 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1423 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1424 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1425 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1426 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1427 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1428 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1429 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1430 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1431 			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1432 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1433 		if (gc_info->v1.header.version_minor >= 1) {
1434 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1435 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1436 			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1437 		}
1438 		if (gc_info->v1.header.version_minor >= 2) {
1439 			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1440 			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1441 			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1442 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1443 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1444 			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1445 			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1446 			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1447 		}
1448 		break;
1449 	case 2:
1450 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1451 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1452 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1453 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1454 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1455 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1456 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1457 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1458 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1459 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1460 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1461 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1462 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1463 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1464 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1465 			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1466 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1467 		break;
1468 	default:
1469 		dev_err(adev->dev,
1470 			"Unhandled GC info table %d.%d\n",
1471 			le16_to_cpu(gc_info->v1.header.version_major),
1472 			le16_to_cpu(gc_info->v1.header.version_minor));
1473 		return -EINVAL;
1474 	}
1475 	return 0;
1476 }
1477 
1478 union mall_info {
1479 	struct mall_info_v1_0 v1;
1480 };
1481 
1482 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1483 {
1484 	struct binary_header *bhdr;
1485 	union mall_info *mall_info;
1486 	u32 u, mall_size_per_umc, m_s_present, half_use;
1487 	u64 mall_size;
1488 	u16 offset;
1489 
1490 	if (!adev->mman.discovery_bin) {
1491 		DRM_ERROR("ip discovery uninitialized\n");
1492 		return -EINVAL;
1493 	}
1494 
1495 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1496 	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1497 
1498 	if (!offset)
1499 		return 0;
1500 
1501 	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1502 
1503 	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1504 	case 1:
1505 		mall_size = 0;
1506 		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1507 		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1508 		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1509 		for (u = 0; u < adev->gmc.num_umc; u++) {
1510 			if (m_s_present & (1 << u))
1511 				mall_size += mall_size_per_umc * 2;
1512 			else if (half_use & (1 << u))
1513 				mall_size += mall_size_per_umc / 2;
1514 			else
1515 				mall_size += mall_size_per_umc;
1516 		}
1517 		adev->gmc.mall_size = mall_size;
1518 		adev->gmc.m_half_use = half_use;
1519 		break;
1520 	default:
1521 		dev_err(adev->dev,
1522 			"Unhandled MALL info table %d.%d\n",
1523 			le16_to_cpu(mall_info->v1.header.version_major),
1524 			le16_to_cpu(mall_info->v1.header.version_minor));
1525 		return -EINVAL;
1526 	}
1527 	return 0;
1528 }
1529 
1530 union vcn_info {
1531 	struct vcn_info_v1_0 v1;
1532 };
1533 
1534 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1535 {
1536 	struct binary_header *bhdr;
1537 	union vcn_info *vcn_info;
1538 	u16 offset;
1539 	int v;
1540 
1541 	if (!adev->mman.discovery_bin) {
1542 		DRM_ERROR("ip discovery uninitialized\n");
1543 		return -EINVAL;
1544 	}
1545 
1546 	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1547 	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1548 	 * but that may change in the future with new GPUs so keep this
1549 	 * check for defensive purposes.
1550 	 */
1551 	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1552 		dev_err(adev->dev, "invalid vcn instances\n");
1553 		return -EINVAL;
1554 	}
1555 
1556 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1557 	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1558 
1559 	if (!offset)
1560 		return 0;
1561 
1562 	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1563 
1564 	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1565 	case 1:
1566 		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1567 		 * so this won't overflow.
1568 		 */
1569 		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1570 			adev->vcn.vcn_codec_disable_mask[v] =
1571 				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1572 		}
1573 		break;
1574 	default:
1575 		dev_err(adev->dev,
1576 			"Unhandled VCN info table %d.%d\n",
1577 			le16_to_cpu(vcn_info->v1.header.version_major),
1578 			le16_to_cpu(vcn_info->v1.header.version_minor));
1579 		return -EINVAL;
1580 	}
1581 	return 0;
1582 }
1583 
1584 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1585 {
1586 	/* what IP to use for this? */
1587 	switch (adev->ip_versions[GC_HWIP][0]) {
1588 	case IP_VERSION(9, 0, 1):
1589 	case IP_VERSION(9, 1, 0):
1590 	case IP_VERSION(9, 2, 1):
1591 	case IP_VERSION(9, 2, 2):
1592 	case IP_VERSION(9, 3, 0):
1593 	case IP_VERSION(9, 4, 0):
1594 	case IP_VERSION(9, 4, 1):
1595 	case IP_VERSION(9, 4, 2):
1596 	case IP_VERSION(9, 4, 3):
1597 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1598 		break;
1599 	case IP_VERSION(10, 1, 10):
1600 	case IP_VERSION(10, 1, 1):
1601 	case IP_VERSION(10, 1, 2):
1602 	case IP_VERSION(10, 1, 3):
1603 	case IP_VERSION(10, 1, 4):
1604 	case IP_VERSION(10, 3, 0):
1605 	case IP_VERSION(10, 3, 1):
1606 	case IP_VERSION(10, 3, 2):
1607 	case IP_VERSION(10, 3, 3):
1608 	case IP_VERSION(10, 3, 4):
1609 	case IP_VERSION(10, 3, 5):
1610 	case IP_VERSION(10, 3, 6):
1611 	case IP_VERSION(10, 3, 7):
1612 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1613 		break;
1614 	case IP_VERSION(11, 0, 0):
1615 	case IP_VERSION(11, 0, 1):
1616 	case IP_VERSION(11, 0, 2):
1617 	case IP_VERSION(11, 0, 3):
1618 	case IP_VERSION(11, 0, 4):
1619 		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1620 		break;
1621 	default:
1622 		dev_err(adev->dev,
1623 			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1624 			adev->ip_versions[GC_HWIP][0]);
1625 		return -EINVAL;
1626 	}
1627 	return 0;
1628 }
1629 
1630 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1631 {
1632 	/* use GC or MMHUB IP version */
1633 	switch (adev->ip_versions[GC_HWIP][0]) {
1634 	case IP_VERSION(9, 0, 1):
1635 	case IP_VERSION(9, 1, 0):
1636 	case IP_VERSION(9, 2, 1):
1637 	case IP_VERSION(9, 2, 2):
1638 	case IP_VERSION(9, 3, 0):
1639 	case IP_VERSION(9, 4, 0):
1640 	case IP_VERSION(9, 4, 1):
1641 	case IP_VERSION(9, 4, 2):
1642 	case IP_VERSION(9, 4, 3):
1643 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1644 		break;
1645 	case IP_VERSION(10, 1, 10):
1646 	case IP_VERSION(10, 1, 1):
1647 	case IP_VERSION(10, 1, 2):
1648 	case IP_VERSION(10, 1, 3):
1649 	case IP_VERSION(10, 1, 4):
1650 	case IP_VERSION(10, 3, 0):
1651 	case IP_VERSION(10, 3, 1):
1652 	case IP_VERSION(10, 3, 2):
1653 	case IP_VERSION(10, 3, 3):
1654 	case IP_VERSION(10, 3, 4):
1655 	case IP_VERSION(10, 3, 5):
1656 	case IP_VERSION(10, 3, 6):
1657 	case IP_VERSION(10, 3, 7):
1658 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1659 		break;
1660 	case IP_VERSION(11, 0, 0):
1661 	case IP_VERSION(11, 0, 1):
1662 	case IP_VERSION(11, 0, 2):
1663 	case IP_VERSION(11, 0, 3):
1664 	case IP_VERSION(11, 0, 4):
1665 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1666 		break;
1667 	default:
1668 		dev_err(adev->dev,
1669 			"Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1670 			adev->ip_versions[GC_HWIP][0]);
1671 		return -EINVAL;
1672 	}
1673 	return 0;
1674 }
1675 
1676 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1677 {
1678 	switch (adev->ip_versions[OSSSYS_HWIP][0]) {
1679 	case IP_VERSION(4, 0, 0):
1680 	case IP_VERSION(4, 0, 1):
1681 	case IP_VERSION(4, 1, 0):
1682 	case IP_VERSION(4, 1, 1):
1683 	case IP_VERSION(4, 3, 0):
1684 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1685 		break;
1686 	case IP_VERSION(4, 2, 0):
1687 	case IP_VERSION(4, 2, 1):
1688 	case IP_VERSION(4, 4, 0):
1689 	case IP_VERSION(4, 4, 2):
1690 		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1691 		break;
1692 	case IP_VERSION(5, 0, 0):
1693 	case IP_VERSION(5, 0, 1):
1694 	case IP_VERSION(5, 0, 2):
1695 	case IP_VERSION(5, 0, 3):
1696 	case IP_VERSION(5, 2, 0):
1697 	case IP_VERSION(5, 2, 1):
1698 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1699 		break;
1700 	case IP_VERSION(6, 0, 0):
1701 	case IP_VERSION(6, 0, 1):
1702 	case IP_VERSION(6, 0, 2):
1703 		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1704 		break;
1705 	default:
1706 		dev_err(adev->dev,
1707 			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1708 			adev->ip_versions[OSSSYS_HWIP][0]);
1709 		return -EINVAL;
1710 	}
1711 	return 0;
1712 }
1713 
1714 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1715 {
1716 	switch (adev->ip_versions[MP0_HWIP][0]) {
1717 	case IP_VERSION(9, 0, 0):
1718 		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1719 		break;
1720 	case IP_VERSION(10, 0, 0):
1721 	case IP_VERSION(10, 0, 1):
1722 		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1723 		break;
1724 	case IP_VERSION(11, 0, 0):
1725 	case IP_VERSION(11, 0, 2):
1726 	case IP_VERSION(11, 0, 4):
1727 	case IP_VERSION(11, 0, 5):
1728 	case IP_VERSION(11, 0, 9):
1729 	case IP_VERSION(11, 0, 7):
1730 	case IP_VERSION(11, 0, 11):
1731 	case IP_VERSION(11, 0, 12):
1732 	case IP_VERSION(11, 0, 13):
1733 	case IP_VERSION(11, 5, 0):
1734 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1735 		break;
1736 	case IP_VERSION(11, 0, 8):
1737 		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1738 		break;
1739 	case IP_VERSION(11, 0, 3):
1740 	case IP_VERSION(12, 0, 1):
1741 		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1742 		break;
1743 	case IP_VERSION(13, 0, 0):
1744 	case IP_VERSION(13, 0, 1):
1745 	case IP_VERSION(13, 0, 2):
1746 	case IP_VERSION(13, 0, 3):
1747 	case IP_VERSION(13, 0, 5):
1748 	case IP_VERSION(13, 0, 6):
1749 	case IP_VERSION(13, 0, 7):
1750 	case IP_VERSION(13, 0, 8):
1751 	case IP_VERSION(13, 0, 10):
1752 	case IP_VERSION(13, 0, 11):
1753 	case IP_VERSION(14, 0, 0):
1754 		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1755 		break;
1756 	case IP_VERSION(13, 0, 4):
1757 		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1758 		break;
1759 	default:
1760 		dev_err(adev->dev,
1761 			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1762 			adev->ip_versions[MP0_HWIP][0]);
1763 		return -EINVAL;
1764 	}
1765 	return 0;
1766 }
1767 
1768 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1769 {
1770 	switch (adev->ip_versions[MP1_HWIP][0]) {
1771 	case IP_VERSION(9, 0, 0):
1772 	case IP_VERSION(10, 0, 0):
1773 	case IP_VERSION(10, 0, 1):
1774 	case IP_VERSION(11, 0, 2):
1775 		if (adev->asic_type == CHIP_ARCTURUS)
1776 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1777 		else
1778 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1779 		break;
1780 	case IP_VERSION(11, 0, 0):
1781 	case IP_VERSION(11, 0, 5):
1782 	case IP_VERSION(11, 0, 9):
1783 	case IP_VERSION(11, 0, 7):
1784 	case IP_VERSION(11, 0, 8):
1785 	case IP_VERSION(11, 0, 11):
1786 	case IP_VERSION(11, 0, 12):
1787 	case IP_VERSION(11, 0, 13):
1788 	case IP_VERSION(11, 5, 0):
1789 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1790 		break;
1791 	case IP_VERSION(12, 0, 0):
1792 	case IP_VERSION(12, 0, 1):
1793 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1794 		break;
1795 	case IP_VERSION(13, 0, 0):
1796 	case IP_VERSION(13, 0, 1):
1797 	case IP_VERSION(13, 0, 2):
1798 	case IP_VERSION(13, 0, 3):
1799 	case IP_VERSION(13, 0, 4):
1800 	case IP_VERSION(13, 0, 5):
1801 	case IP_VERSION(13, 0, 6):
1802 	case IP_VERSION(13, 0, 7):
1803 	case IP_VERSION(13, 0, 8):
1804 	case IP_VERSION(13, 0, 10):
1805 	case IP_VERSION(13, 0, 11):
1806 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1807 		break;
1808 	default:
1809 		dev_err(adev->dev,
1810 			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1811 			adev->ip_versions[MP1_HWIP][0]);
1812 		return -EINVAL;
1813 	}
1814 	return 0;
1815 }
1816 
1817 #if defined(CONFIG_DRM_AMD_DC)
1818 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
1819 {
1820 	amdgpu_device_set_sriov_virtual_display(adev);
1821 	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1822 }
1823 #endif
1824 
1825 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1826 {
1827 	if (adev->enable_virtual_display) {
1828 		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1829 		return 0;
1830 	}
1831 
1832 	if (!amdgpu_device_has_dc_support(adev))
1833 		return 0;
1834 
1835 #if defined(CONFIG_DRM_AMD_DC)
1836 	if (adev->ip_versions[DCE_HWIP][0]) {
1837 		switch (adev->ip_versions[DCE_HWIP][0]) {
1838 		case IP_VERSION(1, 0, 0):
1839 		case IP_VERSION(1, 0, 1):
1840 		case IP_VERSION(2, 0, 2):
1841 		case IP_VERSION(2, 0, 0):
1842 		case IP_VERSION(2, 0, 3):
1843 		case IP_VERSION(2, 1, 0):
1844 		case IP_VERSION(3, 0, 0):
1845 		case IP_VERSION(3, 0, 2):
1846 		case IP_VERSION(3, 0, 3):
1847 		case IP_VERSION(3, 0, 1):
1848 		case IP_VERSION(3, 1, 2):
1849 		case IP_VERSION(3, 1, 3):
1850 		case IP_VERSION(3, 1, 4):
1851 		case IP_VERSION(3, 1, 5):
1852 		case IP_VERSION(3, 1, 6):
1853 		case IP_VERSION(3, 2, 0):
1854 		case IP_VERSION(3, 2, 1):
1855 			if (amdgpu_sriov_vf(adev))
1856 				amdgpu_discovery_set_sriov_display(adev);
1857 			else
1858 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1859 			break;
1860 		default:
1861 			dev_err(adev->dev,
1862 				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1863 				adev->ip_versions[DCE_HWIP][0]);
1864 			return -EINVAL;
1865 		}
1866 	} else if (adev->ip_versions[DCI_HWIP][0]) {
1867 		switch (adev->ip_versions[DCI_HWIP][0]) {
1868 		case IP_VERSION(12, 0, 0):
1869 		case IP_VERSION(12, 0, 1):
1870 		case IP_VERSION(12, 1, 0):
1871 			if (amdgpu_sriov_vf(adev))
1872 				amdgpu_discovery_set_sriov_display(adev);
1873 			else
1874 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1875 			break;
1876 		default:
1877 			dev_err(adev->dev,
1878 				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1879 				adev->ip_versions[DCI_HWIP][0]);
1880 			return -EINVAL;
1881 		}
1882 	}
1883 #endif
1884 	return 0;
1885 }
1886 
1887 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1888 {
1889 	switch (adev->ip_versions[GC_HWIP][0]) {
1890 	case IP_VERSION(9, 0, 1):
1891 	case IP_VERSION(9, 1, 0):
1892 	case IP_VERSION(9, 2, 1):
1893 	case IP_VERSION(9, 2, 2):
1894 	case IP_VERSION(9, 3, 0):
1895 	case IP_VERSION(9, 4, 0):
1896 	case IP_VERSION(9, 4, 1):
1897 	case IP_VERSION(9, 4, 2):
1898 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1899 		break;
1900 	case IP_VERSION(9, 4, 3):
1901 		if (!amdgpu_exp_hw_support)
1902 			return -EINVAL;
1903 		amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
1904 		break;
1905 	case IP_VERSION(10, 1, 10):
1906 	case IP_VERSION(10, 1, 2):
1907 	case IP_VERSION(10, 1, 1):
1908 	case IP_VERSION(10, 1, 3):
1909 	case IP_VERSION(10, 1, 4):
1910 	case IP_VERSION(10, 3, 0):
1911 	case IP_VERSION(10, 3, 2):
1912 	case IP_VERSION(10, 3, 1):
1913 	case IP_VERSION(10, 3, 4):
1914 	case IP_VERSION(10, 3, 5):
1915 	case IP_VERSION(10, 3, 6):
1916 	case IP_VERSION(10, 3, 3):
1917 	case IP_VERSION(10, 3, 7):
1918 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
1919 		break;
1920 	case IP_VERSION(11, 0, 0):
1921 	case IP_VERSION(11, 0, 1):
1922 	case IP_VERSION(11, 0, 2):
1923 	case IP_VERSION(11, 0, 3):
1924 	case IP_VERSION(11, 0, 4):
1925 		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
1926 		break;
1927 	default:
1928 		dev_err(adev->dev,
1929 			"Failed to add gfx ip block(GC_HWIP:0x%x)\n",
1930 			adev->ip_versions[GC_HWIP][0]);
1931 		return -EINVAL;
1932 	}
1933 	return 0;
1934 }
1935 
1936 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
1937 {
1938 	switch (adev->ip_versions[SDMA0_HWIP][0]) {
1939 	case IP_VERSION(4, 0, 0):
1940 	case IP_VERSION(4, 0, 1):
1941 	case IP_VERSION(4, 1, 0):
1942 	case IP_VERSION(4, 1, 1):
1943 	case IP_VERSION(4, 1, 2):
1944 	case IP_VERSION(4, 2, 0):
1945 	case IP_VERSION(4, 2, 2):
1946 	case IP_VERSION(4, 4, 0):
1947 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
1948 		break;
1949 	case IP_VERSION(4, 4, 2):
1950 		amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
1951 		break;
1952 	case IP_VERSION(5, 0, 0):
1953 	case IP_VERSION(5, 0, 1):
1954 	case IP_VERSION(5, 0, 2):
1955 	case IP_VERSION(5, 0, 5):
1956 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
1957 		break;
1958 	case IP_VERSION(5, 2, 0):
1959 	case IP_VERSION(5, 2, 2):
1960 	case IP_VERSION(5, 2, 4):
1961 	case IP_VERSION(5, 2, 5):
1962 	case IP_VERSION(5, 2, 6):
1963 	case IP_VERSION(5, 2, 3):
1964 	case IP_VERSION(5, 2, 1):
1965 	case IP_VERSION(5, 2, 7):
1966 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
1967 		break;
1968 	case IP_VERSION(6, 0, 0):
1969 	case IP_VERSION(6, 0, 1):
1970 	case IP_VERSION(6, 0, 2):
1971 	case IP_VERSION(6, 0, 3):
1972 		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
1973 		break;
1974 	default:
1975 		dev_err(adev->dev,
1976 			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
1977 			adev->ip_versions[SDMA0_HWIP][0]);
1978 		return -EINVAL;
1979 	}
1980 	return 0;
1981 }
1982 
1983 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
1984 {
1985 	if (adev->ip_versions[VCE_HWIP][0]) {
1986 		switch (adev->ip_versions[UVD_HWIP][0]) {
1987 		case IP_VERSION(7, 0, 0):
1988 		case IP_VERSION(7, 2, 0):
1989 			/* UVD is not supported on vega20 SR-IOV */
1990 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
1991 				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
1992 			break;
1993 		default:
1994 			dev_err(adev->dev,
1995 				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
1996 				adev->ip_versions[UVD_HWIP][0]);
1997 			return -EINVAL;
1998 		}
1999 		switch (adev->ip_versions[VCE_HWIP][0]) {
2000 		case IP_VERSION(4, 0, 0):
2001 		case IP_VERSION(4, 1, 0):
2002 			/* VCE is not supported on vega20 SR-IOV */
2003 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2004 				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2005 			break;
2006 		default:
2007 			dev_err(adev->dev,
2008 				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2009 				adev->ip_versions[VCE_HWIP][0]);
2010 			return -EINVAL;
2011 		}
2012 	} else {
2013 		switch (adev->ip_versions[UVD_HWIP][0]) {
2014 		case IP_VERSION(1, 0, 0):
2015 		case IP_VERSION(1, 0, 1):
2016 			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2017 			break;
2018 		case IP_VERSION(2, 0, 0):
2019 		case IP_VERSION(2, 0, 2):
2020 		case IP_VERSION(2, 2, 0):
2021 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2022 			if (!amdgpu_sriov_vf(adev))
2023 				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2024 			break;
2025 		case IP_VERSION(2, 0, 3):
2026 			break;
2027 		case IP_VERSION(2, 5, 0):
2028 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2029 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2030 			break;
2031 		case IP_VERSION(2, 6, 0):
2032 			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2033 			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2034 			break;
2035 		case IP_VERSION(3, 0, 0):
2036 		case IP_VERSION(3, 0, 16):
2037 		case IP_VERSION(3, 1, 1):
2038 		case IP_VERSION(3, 1, 2):
2039 		case IP_VERSION(3, 0, 2):
2040 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2041 			if (!amdgpu_sriov_vf(adev))
2042 				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2043 			break;
2044 		case IP_VERSION(3, 0, 33):
2045 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2046 			break;
2047 		case IP_VERSION(4, 0, 0):
2048 		case IP_VERSION(4, 0, 2):
2049 		case IP_VERSION(4, 0, 4):
2050 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2051 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2052 			break;
2053 		case IP_VERSION(4, 0, 3):
2054 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2055 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2056 			break;
2057 		default:
2058 			dev_err(adev->dev,
2059 				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2060 				adev->ip_versions[UVD_HWIP][0]);
2061 			return -EINVAL;
2062 		}
2063 	}
2064 	return 0;
2065 }
2066 
2067 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2068 {
2069 	switch (adev->ip_versions[GC_HWIP][0]) {
2070 	case IP_VERSION(10, 1, 10):
2071 	case IP_VERSION(10, 1, 1):
2072 	case IP_VERSION(10, 1, 2):
2073 	case IP_VERSION(10, 1, 3):
2074 	case IP_VERSION(10, 1, 4):
2075 	case IP_VERSION(10, 3, 0):
2076 	case IP_VERSION(10, 3, 1):
2077 	case IP_VERSION(10, 3, 2):
2078 	case IP_VERSION(10, 3, 3):
2079 	case IP_VERSION(10, 3, 4):
2080 	case IP_VERSION(10, 3, 5):
2081 	case IP_VERSION(10, 3, 6):
2082 		if (amdgpu_mes) {
2083 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
2084 			adev->enable_mes = true;
2085 			if (amdgpu_mes_kiq)
2086 				adev->enable_mes_kiq = true;
2087 		}
2088 		break;
2089 	case IP_VERSION(11, 0, 0):
2090 	case IP_VERSION(11, 0, 1):
2091 	case IP_VERSION(11, 0, 2):
2092 	case IP_VERSION(11, 0, 3):
2093 	case IP_VERSION(11, 0, 4):
2094 		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2095 		adev->enable_mes = true;
2096 		adev->enable_mes_kiq = true;
2097 		break;
2098 	default:
2099 		break;
2100 	}
2101 	return 0;
2102 }
2103 
2104 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2105 {
2106 	switch (adev->ip_versions[GC_HWIP][0]) {
2107 	case IP_VERSION(9, 4, 3):
2108 		aqua_vanjaram_init_soc_config(adev);
2109 		break;
2110 	default:
2111 		break;
2112 	}
2113 }
2114 
2115 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2116 {
2117 	int r;
2118 
2119 	switch (adev->asic_type) {
2120 	case CHIP_VEGA10:
2121 		vega10_reg_base_init(adev);
2122 		adev->sdma.num_instances = 2;
2123 		adev->gmc.num_umc = 4;
2124 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2125 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2126 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2127 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2128 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2129 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2130 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2131 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2132 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2133 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2134 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2135 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2136 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2137 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2138 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2139 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2140 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2141 		break;
2142 	case CHIP_VEGA12:
2143 		vega10_reg_base_init(adev);
2144 		adev->sdma.num_instances = 2;
2145 		adev->gmc.num_umc = 4;
2146 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2147 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2148 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2149 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2150 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2151 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2152 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2153 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2154 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2155 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2156 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2157 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2158 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2159 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2160 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2161 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2162 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2163 		break;
2164 	case CHIP_RAVEN:
2165 		vega10_reg_base_init(adev);
2166 		adev->sdma.num_instances = 1;
2167 		adev->vcn.num_vcn_inst = 1;
2168 		adev->gmc.num_umc = 2;
2169 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2170 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2171 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2172 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2173 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2174 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2175 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2176 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2177 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2178 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2179 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2180 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2181 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2182 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2183 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2184 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2185 		} else {
2186 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2187 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2188 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2189 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2190 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2191 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2192 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2193 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2194 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2195 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2196 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2197 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2198 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2199 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2200 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2201 		}
2202 		break;
2203 	case CHIP_VEGA20:
2204 		vega20_reg_base_init(adev);
2205 		adev->sdma.num_instances = 2;
2206 		adev->gmc.num_umc = 8;
2207 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2208 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2209 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2210 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2211 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2212 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2213 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2214 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2215 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2216 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2217 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2218 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2219 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2220 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2221 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2222 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2223 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2224 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2225 		break;
2226 	case CHIP_ARCTURUS:
2227 		arct_reg_base_init(adev);
2228 		adev->sdma.num_instances = 8;
2229 		adev->vcn.num_vcn_inst = 2;
2230 		adev->gmc.num_umc = 8;
2231 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2232 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2233 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2234 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2235 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2236 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2237 		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2238 		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2239 		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2240 		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2241 		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2242 		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2243 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2244 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2245 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2246 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2247 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2248 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2249 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2250 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2251 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2252 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2253 		break;
2254 	case CHIP_ALDEBARAN:
2255 		aldebaran_reg_base_init(adev);
2256 		adev->sdma.num_instances = 5;
2257 		adev->vcn.num_vcn_inst = 2;
2258 		adev->gmc.num_umc = 4;
2259 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2260 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2261 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2262 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2263 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2264 		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2265 		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2266 		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2267 		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2268 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2269 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2270 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2271 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2272 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2273 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2274 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2275 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2276 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2277 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2278 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2279 		break;
2280 	default:
2281 		r = amdgpu_discovery_reg_base_init(adev);
2282 		if (r)
2283 			return -EINVAL;
2284 
2285 		amdgpu_discovery_harvest_ip(adev);
2286 		amdgpu_discovery_get_gfx_info(adev);
2287 		amdgpu_discovery_get_mall_info(adev);
2288 		amdgpu_discovery_get_vcn_info(adev);
2289 		break;
2290 	}
2291 
2292 	amdgpu_discovery_init_soc_config(adev);
2293 	amdgpu_discovery_sysfs_init(adev);
2294 
2295 	switch (adev->ip_versions[GC_HWIP][0]) {
2296 	case IP_VERSION(9, 0, 1):
2297 	case IP_VERSION(9, 2, 1):
2298 	case IP_VERSION(9, 4, 0):
2299 	case IP_VERSION(9, 4, 1):
2300 	case IP_VERSION(9, 4, 2):
2301 	case IP_VERSION(9, 4, 3):
2302 		adev->family = AMDGPU_FAMILY_AI;
2303 		break;
2304 	case IP_VERSION(9, 1, 0):
2305 	case IP_VERSION(9, 2, 2):
2306 	case IP_VERSION(9, 3, 0):
2307 		adev->family = AMDGPU_FAMILY_RV;
2308 		break;
2309 	case IP_VERSION(10, 1, 10):
2310 	case IP_VERSION(10, 1, 1):
2311 	case IP_VERSION(10, 1, 2):
2312 	case IP_VERSION(10, 1, 3):
2313 	case IP_VERSION(10, 1, 4):
2314 	case IP_VERSION(10, 3, 0):
2315 	case IP_VERSION(10, 3, 2):
2316 	case IP_VERSION(10, 3, 4):
2317 	case IP_VERSION(10, 3, 5):
2318 		adev->family = AMDGPU_FAMILY_NV;
2319 		break;
2320 	case IP_VERSION(10, 3, 1):
2321 		adev->family = AMDGPU_FAMILY_VGH;
2322 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2323 		break;
2324 	case IP_VERSION(10, 3, 3):
2325 		adev->family = AMDGPU_FAMILY_YC;
2326 		break;
2327 	case IP_VERSION(10, 3, 6):
2328 		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2329 		break;
2330 	case IP_VERSION(10, 3, 7):
2331 		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2332 		break;
2333 	case IP_VERSION(11, 0, 0):
2334 	case IP_VERSION(11, 0, 2):
2335 	case IP_VERSION(11, 0, 3):
2336 		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2337 		break;
2338 	case IP_VERSION(11, 0, 1):
2339 	case IP_VERSION(11, 0, 4):
2340 		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2341 		break;
2342 	default:
2343 		return -EINVAL;
2344 	}
2345 
2346 	switch (adev->ip_versions[GC_HWIP][0]) {
2347 	case IP_VERSION(9, 1, 0):
2348 	case IP_VERSION(9, 2, 2):
2349 	case IP_VERSION(9, 3, 0):
2350 	case IP_VERSION(10, 1, 3):
2351 	case IP_VERSION(10, 1, 4):
2352 	case IP_VERSION(10, 3, 1):
2353 	case IP_VERSION(10, 3, 3):
2354 	case IP_VERSION(10, 3, 6):
2355 	case IP_VERSION(10, 3, 7):
2356 	case IP_VERSION(11, 0, 1):
2357 	case IP_VERSION(11, 0, 4):
2358 		adev->flags |= AMD_IS_APU;
2359 		break;
2360 	default:
2361 		break;
2362 	}
2363 
2364 	if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
2365 		adev->gmc.xgmi.supported = true;
2366 
2367 	/* set NBIO version */
2368 	switch (adev->ip_versions[NBIO_HWIP][0]) {
2369 	case IP_VERSION(6, 1, 0):
2370 	case IP_VERSION(6, 2, 0):
2371 		adev->nbio.funcs = &nbio_v6_1_funcs;
2372 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2373 		break;
2374 	case IP_VERSION(7, 0, 0):
2375 	case IP_VERSION(7, 0, 1):
2376 	case IP_VERSION(2, 5, 0):
2377 		adev->nbio.funcs = &nbio_v7_0_funcs;
2378 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2379 		break;
2380 	case IP_VERSION(7, 4, 0):
2381 	case IP_VERSION(7, 4, 1):
2382 	case IP_VERSION(7, 4, 4):
2383 		adev->nbio.funcs = &nbio_v7_4_funcs;
2384 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2385 		break;
2386 	case IP_VERSION(7, 9, 0):
2387 		adev->nbio.funcs = &nbio_v7_9_funcs;
2388 		adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2389 		break;
2390 	case IP_VERSION(7, 2, 0):
2391 	case IP_VERSION(7, 2, 1):
2392 	case IP_VERSION(7, 3, 0):
2393 	case IP_VERSION(7, 5, 0):
2394 	case IP_VERSION(7, 5, 1):
2395 		adev->nbio.funcs = &nbio_v7_2_funcs;
2396 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2397 		break;
2398 	case IP_VERSION(2, 1, 1):
2399 	case IP_VERSION(2, 3, 0):
2400 	case IP_VERSION(2, 3, 1):
2401 	case IP_VERSION(2, 3, 2):
2402 	case IP_VERSION(3, 3, 0):
2403 	case IP_VERSION(3, 3, 1):
2404 	case IP_VERSION(3, 3, 2):
2405 	case IP_VERSION(3, 3, 3):
2406 		adev->nbio.funcs = &nbio_v2_3_funcs;
2407 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2408 		break;
2409 	case IP_VERSION(4, 3, 0):
2410 	case IP_VERSION(4, 3, 1):
2411 		if (amdgpu_sriov_vf(adev))
2412 			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2413 		else
2414 			adev->nbio.funcs = &nbio_v4_3_funcs;
2415 		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2416 		break;
2417 	case IP_VERSION(7, 7, 0):
2418 	case IP_VERSION(7, 7, 1):
2419 		adev->nbio.funcs = &nbio_v7_7_funcs;
2420 		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2421 		break;
2422 	default:
2423 		break;
2424 	}
2425 
2426 	switch (adev->ip_versions[HDP_HWIP][0]) {
2427 	case IP_VERSION(4, 0, 0):
2428 	case IP_VERSION(4, 0, 1):
2429 	case IP_VERSION(4, 1, 0):
2430 	case IP_VERSION(4, 1, 1):
2431 	case IP_VERSION(4, 1, 2):
2432 	case IP_VERSION(4, 2, 0):
2433 	case IP_VERSION(4, 2, 1):
2434 	case IP_VERSION(4, 4, 0):
2435 	case IP_VERSION(4, 4, 2):
2436 		adev->hdp.funcs = &hdp_v4_0_funcs;
2437 		break;
2438 	case IP_VERSION(5, 0, 0):
2439 	case IP_VERSION(5, 0, 1):
2440 	case IP_VERSION(5, 0, 2):
2441 	case IP_VERSION(5, 0, 3):
2442 	case IP_VERSION(5, 0, 4):
2443 	case IP_VERSION(5, 2, 0):
2444 		adev->hdp.funcs = &hdp_v5_0_funcs;
2445 		break;
2446 	case IP_VERSION(5, 2, 1):
2447 		adev->hdp.funcs = &hdp_v5_2_funcs;
2448 		break;
2449 	case IP_VERSION(6, 0, 0):
2450 	case IP_VERSION(6, 0, 1):
2451 		adev->hdp.funcs = &hdp_v6_0_funcs;
2452 		break;
2453 	default:
2454 		break;
2455 	}
2456 
2457 	switch (adev->ip_versions[DF_HWIP][0]) {
2458 	case IP_VERSION(3, 6, 0):
2459 	case IP_VERSION(3, 6, 1):
2460 	case IP_VERSION(3, 6, 2):
2461 		adev->df.funcs = &df_v3_6_funcs;
2462 		break;
2463 	case IP_VERSION(2, 1, 0):
2464 	case IP_VERSION(2, 1, 1):
2465 	case IP_VERSION(2, 5, 0):
2466 	case IP_VERSION(3, 5, 1):
2467 	case IP_VERSION(3, 5, 2):
2468 		adev->df.funcs = &df_v1_7_funcs;
2469 		break;
2470 	case IP_VERSION(4, 3, 0):
2471 		adev->df.funcs = &df_v4_3_funcs;
2472 		break;
2473 	default:
2474 		break;
2475 	}
2476 
2477 	switch (adev->ip_versions[SMUIO_HWIP][0]) {
2478 	case IP_VERSION(9, 0, 0):
2479 	case IP_VERSION(9, 0, 1):
2480 	case IP_VERSION(10, 0, 0):
2481 	case IP_VERSION(10, 0, 1):
2482 	case IP_VERSION(10, 0, 2):
2483 		adev->smuio.funcs = &smuio_v9_0_funcs;
2484 		break;
2485 	case IP_VERSION(11, 0, 0):
2486 	case IP_VERSION(11, 0, 2):
2487 	case IP_VERSION(11, 0, 3):
2488 	case IP_VERSION(11, 0, 4):
2489 	case IP_VERSION(11, 0, 7):
2490 	case IP_VERSION(11, 0, 8):
2491 		adev->smuio.funcs = &smuio_v11_0_funcs;
2492 		break;
2493 	case IP_VERSION(11, 0, 6):
2494 	case IP_VERSION(11, 0, 10):
2495 	case IP_VERSION(11, 0, 11):
2496 	case IP_VERSION(11, 5, 0):
2497 	case IP_VERSION(13, 0, 1):
2498 	case IP_VERSION(13, 0, 9):
2499 	case IP_VERSION(13, 0, 10):
2500 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2501 		break;
2502 	case IP_VERSION(13, 0, 2):
2503 		adev->smuio.funcs = &smuio_v13_0_funcs;
2504 		break;
2505 	case IP_VERSION(13, 0, 3):
2506 		adev->smuio.funcs = &smuio_v13_0_3_funcs;
2507 		if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2508 			adev->flags |= AMD_IS_APU;
2509 		}
2510 		break;
2511 	case IP_VERSION(13, 0, 6):
2512 	case IP_VERSION(13, 0, 8):
2513 		adev->smuio.funcs = &smuio_v13_0_6_funcs;
2514 		break;
2515 	default:
2516 		break;
2517 	}
2518 
2519 	switch (adev->ip_versions[LSDMA_HWIP][0]) {
2520 	case IP_VERSION(6, 0, 0):
2521 	case IP_VERSION(6, 0, 1):
2522 	case IP_VERSION(6, 0, 2):
2523 	case IP_VERSION(6, 0, 3):
2524 		adev->lsdma.funcs = &lsdma_v6_0_funcs;
2525 		break;
2526 	default:
2527 		break;
2528 	}
2529 
2530 	r = amdgpu_discovery_set_common_ip_blocks(adev);
2531 	if (r)
2532 		return r;
2533 
2534 	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2535 	if (r)
2536 		return r;
2537 
2538 	/* For SR-IOV, PSP needs to be initialized before IH */
2539 	if (amdgpu_sriov_vf(adev)) {
2540 		r = amdgpu_discovery_set_psp_ip_blocks(adev);
2541 		if (r)
2542 			return r;
2543 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2544 		if (r)
2545 			return r;
2546 	} else {
2547 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2548 		if (r)
2549 			return r;
2550 
2551 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2552 			r = amdgpu_discovery_set_psp_ip_blocks(adev);
2553 			if (r)
2554 				return r;
2555 		}
2556 	}
2557 
2558 	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2559 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2560 		if (r)
2561 			return r;
2562 	}
2563 
2564 	r = amdgpu_discovery_set_display_ip_blocks(adev);
2565 	if (r)
2566 		return r;
2567 
2568 	r = amdgpu_discovery_set_gc_ip_blocks(adev);
2569 	if (r)
2570 		return r;
2571 
2572 	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2573 	if (r)
2574 		return r;
2575 
2576 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2577 	     !amdgpu_sriov_vf(adev)) ||
2578 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2579 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2580 		if (r)
2581 			return r;
2582 	}
2583 
2584 	r = amdgpu_discovery_set_mm_ip_blocks(adev);
2585 	if (r)
2586 		return r;
2587 
2588 	r = amdgpu_discovery_set_mes_ip_blocks(adev);
2589 	if (r)
2590 		return r;
2591 
2592 	return 0;
2593 }
2594 
2595