1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 
31 #include "soc15.h"
32 #include "gfx_v9_0.h"
33 #include "gmc_v9_0.h"
34 #include "df_v1_7.h"
35 #include "df_v3_6.h"
36 #include "nbio_v6_1.h"
37 #include "nbio_v7_0.h"
38 #include "nbio_v7_4.h"
39 #include "hdp_v4_0.h"
40 #include "vega10_ih.h"
41 #include "vega20_ih.h"
42 #include "sdma_v4_0.h"
43 #include "uvd_v7_0.h"
44 #include "vce_v4_0.h"
45 #include "vcn_v1_0.h"
46 #include "vcn_v2_5.h"
47 #include "jpeg_v2_5.h"
48 #include "smuio_v9_0.h"
49 #include "gmc_v10_0.h"
50 #include "gmc_v11_0.h"
51 #include "gfxhub_v2_0.h"
52 #include "mmhub_v2_0.h"
53 #include "nbio_v2_3.h"
54 #include "nbio_v4_3.h"
55 #include "nbio_v7_2.h"
56 #include "nbio_v7_7.h"
57 #include "hdp_v5_0.h"
58 #include "hdp_v5_2.h"
59 #include "hdp_v6_0.h"
60 #include "nv.h"
61 #include "soc21.h"
62 #include "navi10_ih.h"
63 #include "ih_v6_0.h"
64 #include "gfx_v10_0.h"
65 #include "gfx_v11_0.h"
66 #include "sdma_v5_0.h"
67 #include "sdma_v5_2.h"
68 #include "sdma_v6_0.h"
69 #include "lsdma_v6_0.h"
70 #include "vcn_v2_0.h"
71 #include "jpeg_v2_0.h"
72 #include "vcn_v3_0.h"
73 #include "jpeg_v3_0.h"
74 #include "vcn_v4_0.h"
75 #include "jpeg_v4_0.h"
76 #include "amdgpu_vkms.h"
77 #include "mes_v10_1.h"
78 #include "mes_v11_0.h"
79 #include "smuio_v11_0.h"
80 #include "smuio_v11_0_6.h"
81 #include "smuio_v13_0.h"
82 #include "smuio_v13_0_6.h"
83 
84 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
85 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
86 
87 #define mmRCC_CONFIG_MEMSIZE	0xde3
88 #define mmMM_INDEX		0x0
89 #define mmMM_INDEX_HI		0x6
90 #define mmMM_DATA		0x1
91 
92 static const char *hw_id_names[HW_ID_MAX] = {
93 	[MP1_HWID]		= "MP1",
94 	[MP2_HWID]		= "MP2",
95 	[THM_HWID]		= "THM",
96 	[SMUIO_HWID]		= "SMUIO",
97 	[FUSE_HWID]		= "FUSE",
98 	[CLKA_HWID]		= "CLKA",
99 	[PWR_HWID]		= "PWR",
100 	[GC_HWID]		= "GC",
101 	[UVD_HWID]		= "UVD",
102 	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
103 	[ACP_HWID]		= "ACP",
104 	[DCI_HWID]		= "DCI",
105 	[DMU_HWID]		= "DMU",
106 	[DCO_HWID]		= "DCO",
107 	[DIO_HWID]		= "DIO",
108 	[XDMA_HWID]		= "XDMA",
109 	[DCEAZ_HWID]		= "DCEAZ",
110 	[DAZ_HWID]		= "DAZ",
111 	[SDPMUX_HWID]		= "SDPMUX",
112 	[NTB_HWID]		= "NTB",
113 	[IOHC_HWID]		= "IOHC",
114 	[L2IMU_HWID]		= "L2IMU",
115 	[VCE_HWID]		= "VCE",
116 	[MMHUB_HWID]		= "MMHUB",
117 	[ATHUB_HWID]		= "ATHUB",
118 	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
119 	[DFX_HWID]		= "DFX",
120 	[DBGU0_HWID]		= "DBGU0",
121 	[DBGU1_HWID]		= "DBGU1",
122 	[OSSSYS_HWID]		= "OSSSYS",
123 	[HDP_HWID]		= "HDP",
124 	[SDMA0_HWID]		= "SDMA0",
125 	[SDMA1_HWID]		= "SDMA1",
126 	[SDMA2_HWID]		= "SDMA2",
127 	[SDMA3_HWID]		= "SDMA3",
128 	[LSDMA_HWID]		= "LSDMA",
129 	[ISP_HWID]		= "ISP",
130 	[DBGU_IO_HWID]		= "DBGU_IO",
131 	[DF_HWID]		= "DF",
132 	[CLKB_HWID]		= "CLKB",
133 	[FCH_HWID]		= "FCH",
134 	[DFX_DAP_HWID]		= "DFX_DAP",
135 	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
136 	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
137 	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
138 	[L1IMU3_HWID]		= "L1IMU3",
139 	[L1IMU4_HWID]		= "L1IMU4",
140 	[L1IMU5_HWID]		= "L1IMU5",
141 	[L1IMU6_HWID]		= "L1IMU6",
142 	[L1IMU7_HWID]		= "L1IMU7",
143 	[L1IMU8_HWID]		= "L1IMU8",
144 	[L1IMU9_HWID]		= "L1IMU9",
145 	[L1IMU10_HWID]		= "L1IMU10",
146 	[L1IMU11_HWID]		= "L1IMU11",
147 	[L1IMU12_HWID]		= "L1IMU12",
148 	[L1IMU13_HWID]		= "L1IMU13",
149 	[L1IMU14_HWID]		= "L1IMU14",
150 	[L1IMU15_HWID]		= "L1IMU15",
151 	[WAFLC_HWID]		= "WAFLC",
152 	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
153 	[PCIE_HWID]		= "PCIE",
154 	[PCS_HWID]		= "PCS",
155 	[DDCL_HWID]		= "DDCL",
156 	[SST_HWID]		= "SST",
157 	[IOAGR_HWID]		= "IOAGR",
158 	[NBIF_HWID]		= "NBIF",
159 	[IOAPIC_HWID]		= "IOAPIC",
160 	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
161 	[NTBCCP_HWID]		= "NTBCCP",
162 	[UMC_HWID]		= "UMC",
163 	[SATA_HWID]		= "SATA",
164 	[USB_HWID]		= "USB",
165 	[CCXSEC_HWID]		= "CCXSEC",
166 	[XGMI_HWID]		= "XGMI",
167 	[XGBE_HWID]		= "XGBE",
168 	[MP0_HWID]		= "MP0",
169 };
170 
171 static int hw_id_map[MAX_HWIP] = {
172 	[GC_HWIP]	= GC_HWID,
173 	[HDP_HWIP]	= HDP_HWID,
174 	[SDMA0_HWIP]	= SDMA0_HWID,
175 	[SDMA1_HWIP]	= SDMA1_HWID,
176 	[SDMA2_HWIP]    = SDMA2_HWID,
177 	[SDMA3_HWIP]    = SDMA3_HWID,
178 	[LSDMA_HWIP]    = LSDMA_HWID,
179 	[MMHUB_HWIP]	= MMHUB_HWID,
180 	[ATHUB_HWIP]	= ATHUB_HWID,
181 	[NBIO_HWIP]	= NBIF_HWID,
182 	[MP0_HWIP]	= MP0_HWID,
183 	[MP1_HWIP]	= MP1_HWID,
184 	[UVD_HWIP]	= UVD_HWID,
185 	[VCE_HWIP]	= VCE_HWID,
186 	[DF_HWIP]	= DF_HWID,
187 	[DCE_HWIP]	= DMU_HWID,
188 	[OSSSYS_HWIP]	= OSSSYS_HWID,
189 	[SMUIO_HWIP]	= SMUIO_HWID,
190 	[PWR_HWIP]	= PWR_HWID,
191 	[NBIF_HWIP]	= NBIF_HWID,
192 	[THM_HWIP]	= THM_HWID,
193 	[CLK_HWIP]	= CLKA_HWID,
194 	[UMC_HWIP]	= UMC_HWID,
195 	[XGMI_HWIP]	= XGMI_HWID,
196 	[DCI_HWIP]	= DCI_HWID,
197 	[PCIE_HWIP]	= PCIE_HWID,
198 };
199 
200 static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary)
201 {
202 	uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
203 	uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
204 
205 	amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
206 				  adev->mman.discovery_tmr_size, false);
207 	return 0;
208 }
209 
210 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
211 {
212 	const struct firmware *fw;
213 	const char *fw_name;
214 	int r;
215 
216 	switch (amdgpu_discovery) {
217 	case 2:
218 		fw_name = FIRMWARE_IP_DISCOVERY;
219 		break;
220 	default:
221 		dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
222 		return -EINVAL;
223 	}
224 
225 	r = request_firmware(&fw, fw_name, adev->dev);
226 	if (r) {
227 		dev_err(adev->dev, "can't load firmware \"%s\"\n",
228 			fw_name);
229 		return r;
230 	}
231 
232 	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
233 	release_firmware(fw);
234 
235 	return 0;
236 }
237 
238 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
239 {
240 	uint16_t checksum = 0;
241 	int i;
242 
243 	for (i = 0; i < size; i++)
244 		checksum += data[i];
245 
246 	return checksum;
247 }
248 
249 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
250 						    uint16_t expected)
251 {
252 	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
253 }
254 
255 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
256 {
257 	struct binary_header *bhdr;
258 	bhdr = (struct binary_header *)binary;
259 
260 	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
261 }
262 
263 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
264 {
265 	/*
266 	 * So far, apply this quirk only on those Navy Flounder boards which
267 	 * have a bad harvest table of VCN config.
268 	 */
269 	if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
270 		(adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) {
271 		switch (adev->pdev->revision) {
272 		case 0xC1:
273 		case 0xC2:
274 		case 0xC3:
275 		case 0xC5:
276 		case 0xC7:
277 		case 0xCF:
278 		case 0xDF:
279 			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
280 			break;
281 		default:
282 			break;
283 		}
284 	}
285 }
286 
287 static int amdgpu_discovery_init(struct amdgpu_device *adev)
288 {
289 	struct table_info *info;
290 	struct binary_header *bhdr;
291 	uint16_t offset;
292 	uint16_t size;
293 	uint16_t checksum;
294 	int r;
295 
296 	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
297 	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
298 	if (!adev->mman.discovery_bin)
299 		return -ENOMEM;
300 
301 	r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin);
302 	if (r) {
303 		dev_err(adev->dev, "failed to read ip discovery binary from vram\n");
304 		r = -EINVAL;
305 		goto out;
306 	}
307 
308 	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin) || amdgpu_discovery == 2) {
309 		/* ignore the discovery binary from vram if discovery=2 in kernel module parameter */
310 		if (amdgpu_discovery == 2)
311 			dev_info(adev->dev,"force read ip discovery binary from file");
312 		else
313 			dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
314 
315 		/* retry read ip discovery binary from file */
316 		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
317 		if (r) {
318 			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
319 			r = -EINVAL;
320 			goto out;
321 		}
322 		/* check the ip discovery binary signature */
323 		if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
324 			dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n");
325 			r = -EINVAL;
326 			goto out;
327 		}
328 	}
329 
330 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
331 
332 	offset = offsetof(struct binary_header, binary_checksum) +
333 		sizeof(bhdr->binary_checksum);
334 	size = le16_to_cpu(bhdr->binary_size) - offset;
335 	checksum = le16_to_cpu(bhdr->binary_checksum);
336 
337 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
338 					      size, checksum)) {
339 		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
340 		r = -EINVAL;
341 		goto out;
342 	}
343 
344 	info = &bhdr->table_list[IP_DISCOVERY];
345 	offset = le16_to_cpu(info->offset);
346 	checksum = le16_to_cpu(info->checksum);
347 
348 	if (offset) {
349 		struct ip_discovery_header *ihdr =
350 			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
351 		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
352 			dev_err(adev->dev, "invalid ip discovery data table signature\n");
353 			r = -EINVAL;
354 			goto out;
355 		}
356 
357 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
358 						      le16_to_cpu(ihdr->size), checksum)) {
359 			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
360 			r = -EINVAL;
361 			goto out;
362 		}
363 	}
364 
365 	info = &bhdr->table_list[GC];
366 	offset = le16_to_cpu(info->offset);
367 	checksum = le16_to_cpu(info->checksum);
368 
369 	if (offset) {
370 		struct gpu_info_header *ghdr =
371 			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
372 
373 		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
374 			dev_err(adev->dev, "invalid ip discovery gc table id\n");
375 			r = -EINVAL;
376 			goto out;
377 		}
378 
379 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
380 						      le32_to_cpu(ghdr->size), checksum)) {
381 			dev_err(adev->dev, "invalid gc data table checksum\n");
382 			r = -EINVAL;
383 			goto out;
384 		}
385 	}
386 
387 	info = &bhdr->table_list[HARVEST_INFO];
388 	offset = le16_to_cpu(info->offset);
389 	checksum = le16_to_cpu(info->checksum);
390 
391 	if (offset) {
392 		struct harvest_info_header *hhdr =
393 			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
394 
395 		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
396 			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
397 			r = -EINVAL;
398 			goto out;
399 		}
400 
401 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
402 						      sizeof(struct harvest_table), checksum)) {
403 			dev_err(adev->dev, "invalid harvest data table checksum\n");
404 			r = -EINVAL;
405 			goto out;
406 		}
407 	}
408 
409 	info = &bhdr->table_list[VCN_INFO];
410 	offset = le16_to_cpu(info->offset);
411 	checksum = le16_to_cpu(info->checksum);
412 
413 	if (offset) {
414 		struct vcn_info_header *vhdr =
415 			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
416 
417 		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
418 			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
419 			r = -EINVAL;
420 			goto out;
421 		}
422 
423 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
424 						      le32_to_cpu(vhdr->size_bytes), checksum)) {
425 			dev_err(adev->dev, "invalid vcn data table checksum\n");
426 			r = -EINVAL;
427 			goto out;
428 		}
429 	}
430 
431 	info = &bhdr->table_list[MALL_INFO];
432 	offset = le16_to_cpu(info->offset);
433 	checksum = le16_to_cpu(info->checksum);
434 
435 	if (0 && offset) {
436 		struct mall_info_header *mhdr =
437 			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
438 
439 		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
440 			dev_err(adev->dev, "invalid ip discovery mall table id\n");
441 			r = -EINVAL;
442 			goto out;
443 		}
444 
445 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
446 						      le32_to_cpu(mhdr->size_bytes), checksum)) {
447 			dev_err(adev->dev, "invalid mall data table checksum\n");
448 			r = -EINVAL;
449 			goto out;
450 		}
451 	}
452 
453 	return 0;
454 
455 out:
456 	kfree(adev->mman.discovery_bin);
457 	adev->mman.discovery_bin = NULL;
458 
459 	return r;
460 }
461 
462 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
463 
464 void amdgpu_discovery_fini(struct amdgpu_device *adev)
465 {
466 	amdgpu_discovery_sysfs_fini(adev);
467 	kfree(adev->mman.discovery_bin);
468 	adev->mman.discovery_bin = NULL;
469 }
470 
471 static int amdgpu_discovery_validate_ip(const struct ip *ip)
472 {
473 	if (ip->number_instance >= HWIP_MAX_INSTANCE) {
474 		DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n",
475 			  ip->number_instance);
476 		return -EINVAL;
477 	}
478 	if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
479 		DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
480 			  le16_to_cpu(ip->hw_id));
481 		return -EINVAL;
482 	}
483 
484 	return 0;
485 }
486 
487 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
488 						uint32_t *vcn_harvest_count)
489 {
490 	struct binary_header *bhdr;
491 	struct ip_discovery_header *ihdr;
492 	struct die_header *dhdr;
493 	struct ip *ip;
494 	uint16_t die_offset, ip_offset, num_dies, num_ips;
495 	int i, j;
496 
497 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
498 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
499 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
500 	num_dies = le16_to_cpu(ihdr->num_dies);
501 
502 	/* scan harvest bit of all IP data structures */
503 	for (i = 0; i < num_dies; i++) {
504 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
505 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
506 		num_ips = le16_to_cpu(dhdr->num_ips);
507 		ip_offset = die_offset + sizeof(*dhdr);
508 
509 		for (j = 0; j < num_ips; j++) {
510 			ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
511 
512 			if (amdgpu_discovery_validate_ip(ip))
513 				goto next_ip;
514 
515 			if (le16_to_cpu(ip->harvest) == 1) {
516 				switch (le16_to_cpu(ip->hw_id)) {
517 				case VCN_HWID:
518 					(*vcn_harvest_count)++;
519 					if (ip->number_instance == 0)
520 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
521 					else
522 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
523 					break;
524 				case DMU_HWID:
525 					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
526 					break;
527 				default:
528 					break;
529                                 }
530                         }
531 next_ip:
532 			ip_offset += struct_size(ip, base_address, ip->num_base_address);
533 		}
534 	}
535 }
536 
537 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
538 						     uint32_t *vcn_harvest_count,
539 						     uint32_t *umc_harvest_count)
540 {
541 	struct binary_header *bhdr;
542 	struct harvest_table *harvest_info;
543 	u16 offset;
544 	int i;
545 
546 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
547 	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
548 
549 	if (!offset) {
550 		dev_err(adev->dev, "invalid harvest table offset\n");
551 		return;
552 	}
553 
554 	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
555 
556 	for (i = 0; i < 32; i++) {
557 		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
558 			break;
559 
560 		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
561 		case VCN_HWID:
562 			(*vcn_harvest_count)++;
563 			if (harvest_info->list[i].number_instance == 0)
564 				adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
565 			else
566 				adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
567 			break;
568 		case DMU_HWID:
569 			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
570 			break;
571 		case UMC_HWID:
572 			(*umc_harvest_count)++;
573 			break;
574 		default:
575 			break;
576 		}
577 	}
578 }
579 
580 /* ================================================== */
581 
582 struct ip_hw_instance {
583 	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
584 
585 	int hw_id;
586 	u8  num_instance;
587 	u8  major, minor, revision;
588 	u8  harvest;
589 
590 	int num_base_addresses;
591 	u32 base_addr[];
592 };
593 
594 struct ip_hw_id {
595 	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
596 	int hw_id;
597 };
598 
599 struct ip_die_entry {
600 	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
601 	u16 num_ips;
602 };
603 
604 /* -------------------------------------------------- */
605 
606 struct ip_hw_instance_attr {
607 	struct attribute attr;
608 	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
609 };
610 
611 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
612 {
613 	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
614 }
615 
616 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
617 {
618 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
619 }
620 
621 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
622 {
623 	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
624 }
625 
626 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
627 {
628 	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
629 }
630 
631 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
632 {
633 	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
634 }
635 
636 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
637 {
638 	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
639 }
640 
641 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
642 {
643 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
644 }
645 
646 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
647 {
648 	ssize_t res, at;
649 	int ii;
650 
651 	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
652 		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
653 		 */
654 		if (at + 12 > PAGE_SIZE)
655 			break;
656 		res = sysfs_emit_at(buf, at, "0x%08X\n",
657 				    ip_hw_instance->base_addr[ii]);
658 		if (res <= 0)
659 			break;
660 		at += res;
661 	}
662 
663 	return res < 0 ? res : at;
664 }
665 
666 static struct ip_hw_instance_attr ip_hw_attr[] = {
667 	__ATTR_RO(hw_id),
668 	__ATTR_RO(num_instance),
669 	__ATTR_RO(major),
670 	__ATTR_RO(minor),
671 	__ATTR_RO(revision),
672 	__ATTR_RO(harvest),
673 	__ATTR_RO(num_base_addresses),
674 	__ATTR_RO(base_addr),
675 };
676 
677 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
678 ATTRIBUTE_GROUPS(ip_hw_instance);
679 
680 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
681 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
682 
683 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
684 					struct attribute *attr,
685 					char *buf)
686 {
687 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
688 	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
689 
690 	if (!ip_hw_attr->show)
691 		return -EIO;
692 
693 	return ip_hw_attr->show(ip_hw_instance, buf);
694 }
695 
696 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
697 	.show = ip_hw_instance_attr_show,
698 };
699 
700 static void ip_hw_instance_release(struct kobject *kobj)
701 {
702 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
703 
704 	kfree(ip_hw_instance);
705 }
706 
707 static struct kobj_type ip_hw_instance_ktype = {
708 	.release = ip_hw_instance_release,
709 	.sysfs_ops = &ip_hw_instance_sysfs_ops,
710 	.default_groups = ip_hw_instance_groups,
711 };
712 
713 /* -------------------------------------------------- */
714 
715 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
716 
717 static void ip_hw_id_release(struct kobject *kobj)
718 {
719 	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
720 
721 	if (!list_empty(&ip_hw_id->hw_id_kset.list))
722 		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
723 	kfree(ip_hw_id);
724 }
725 
726 static struct kobj_type ip_hw_id_ktype = {
727 	.release = ip_hw_id_release,
728 	.sysfs_ops = &kobj_sysfs_ops,
729 };
730 
731 /* -------------------------------------------------- */
732 
733 static void die_kobj_release(struct kobject *kobj);
734 static void ip_disc_release(struct kobject *kobj);
735 
736 struct ip_die_entry_attribute {
737 	struct attribute attr;
738 	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
739 };
740 
741 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
742 
743 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
744 {
745 	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
746 }
747 
748 /* If there are more ip_die_entry attrs, other than the number of IPs,
749  * we can make this intro an array of attrs, and then initialize
750  * ip_die_entry_attrs in a loop.
751  */
752 static struct ip_die_entry_attribute num_ips_attr =
753 	__ATTR_RO(num_ips);
754 
755 static struct attribute *ip_die_entry_attrs[] = {
756 	&num_ips_attr.attr,
757 	NULL,
758 };
759 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
760 
761 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
762 
763 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
764 				      struct attribute *attr,
765 				      char *buf)
766 {
767 	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
768 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
769 
770 	if (!ip_die_entry_attr->show)
771 		return -EIO;
772 
773 	return ip_die_entry_attr->show(ip_die_entry, buf);
774 }
775 
776 static void ip_die_entry_release(struct kobject *kobj)
777 {
778 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
779 
780 	if (!list_empty(&ip_die_entry->ip_kset.list))
781 		DRM_ERROR("ip_die_entry->ip_kset is not empty");
782 	kfree(ip_die_entry);
783 }
784 
785 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
786 	.show = ip_die_entry_attr_show,
787 };
788 
789 static struct kobj_type ip_die_entry_ktype = {
790 	.release = ip_die_entry_release,
791 	.sysfs_ops = &ip_die_entry_sysfs_ops,
792 	.default_groups = ip_die_entry_groups,
793 };
794 
795 static struct kobj_type die_kobj_ktype = {
796 	.release = die_kobj_release,
797 	.sysfs_ops = &kobj_sysfs_ops,
798 };
799 
800 static struct kobj_type ip_discovery_ktype = {
801 	.release = ip_disc_release,
802 	.sysfs_ops = &kobj_sysfs_ops,
803 };
804 
805 struct ip_discovery_top {
806 	struct kobject kobj;    /* ip_discovery/ */
807 	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
808 	struct amdgpu_device *adev;
809 };
810 
811 static void die_kobj_release(struct kobject *kobj)
812 {
813 	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
814 						       struct ip_discovery_top,
815 						       die_kset);
816 	if (!list_empty(&ip_top->die_kset.list))
817 		DRM_ERROR("ip_top->die_kset is not empty");
818 }
819 
820 static void ip_disc_release(struct kobject *kobj)
821 {
822 	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
823 						       kobj);
824 	struct amdgpu_device *adev = ip_top->adev;
825 
826 	adev->ip_top = NULL;
827 	kfree(ip_top);
828 }
829 
830 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
831 				      struct ip_die_entry *ip_die_entry,
832 				      const size_t _ip_offset, const int num_ips)
833 {
834 	int ii, jj, kk, res;
835 
836 	DRM_DEBUG("num_ips:%d", num_ips);
837 
838 	/* Find all IPs of a given HW ID, and add their instance to
839 	 * #die/#hw_id/#instance/<attributes>
840 	 */
841 	for (ii = 0; ii < HW_ID_MAX; ii++) {
842 		struct ip_hw_id *ip_hw_id = NULL;
843 		size_t ip_offset = _ip_offset;
844 
845 		for (jj = 0; jj < num_ips; jj++) {
846 			struct ip *ip;
847 			struct ip_hw_instance *ip_hw_instance;
848 
849 			ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
850 			if (amdgpu_discovery_validate_ip(ip) ||
851 			    le16_to_cpu(ip->hw_id) != ii)
852 				goto next_ip;
853 
854 			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
855 
856 			/* We have a hw_id match; register the hw
857 			 * block if not yet registered.
858 			 */
859 			if (!ip_hw_id) {
860 				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
861 				if (!ip_hw_id)
862 					return -ENOMEM;
863 				ip_hw_id->hw_id = ii;
864 
865 				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
866 				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
867 				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
868 				res = kset_register(&ip_hw_id->hw_id_kset);
869 				if (res) {
870 					DRM_ERROR("Couldn't register ip_hw_id kset");
871 					kfree(ip_hw_id);
872 					return res;
873 				}
874 				if (hw_id_names[ii]) {
875 					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
876 								&ip_hw_id->hw_id_kset.kobj,
877 								hw_id_names[ii]);
878 					if (res) {
879 						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
880 							  hw_id_names[ii],
881 							  kobject_name(&ip_die_entry->ip_kset.kobj));
882 					}
883 				}
884 			}
885 
886 			/* Now register its instance.
887 			 */
888 			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
889 							     base_addr,
890 							     ip->num_base_address),
891 						 GFP_KERNEL);
892 			if (!ip_hw_instance) {
893 				DRM_ERROR("no memory for ip_hw_instance");
894 				return -ENOMEM;
895 			}
896 			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
897 			ip_hw_instance->num_instance = ip->number_instance;
898 			ip_hw_instance->major = ip->major;
899 			ip_hw_instance->minor = ip->minor;
900 			ip_hw_instance->revision = ip->revision;
901 			ip_hw_instance->harvest = ip->harvest;
902 			ip_hw_instance->num_base_addresses = ip->num_base_address;
903 
904 			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++)
905 				ip_hw_instance->base_addr[kk] = ip->base_address[kk];
906 
907 			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
908 			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
909 			res = kobject_add(&ip_hw_instance->kobj, NULL,
910 					  "%d", ip_hw_instance->num_instance);
911 next_ip:
912 			ip_offset += struct_size(ip, base_address, ip->num_base_address);
913 		}
914 	}
915 
916 	return 0;
917 }
918 
919 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
920 {
921 	struct binary_header *bhdr;
922 	struct ip_discovery_header *ihdr;
923 	struct die_header *dhdr;
924 	struct kset *die_kset = &adev->ip_top->die_kset;
925 	u16 num_dies, die_offset, num_ips;
926 	size_t ip_offset;
927 	int ii, res;
928 
929 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
930 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
931 					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
932 	num_dies = le16_to_cpu(ihdr->num_dies);
933 
934 	DRM_DEBUG("number of dies: %d\n", num_dies);
935 
936 	for (ii = 0; ii < num_dies; ii++) {
937 		struct ip_die_entry *ip_die_entry;
938 
939 		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
940 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
941 		num_ips = le16_to_cpu(dhdr->num_ips);
942 		ip_offset = die_offset + sizeof(*dhdr);
943 
944 		/* Add the die to the kset.
945 		 *
946 		 * dhdr->die_id == ii, which was checked in
947 		 * amdgpu_discovery_reg_base_init().
948 		 */
949 
950 		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
951 		if (!ip_die_entry)
952 			return -ENOMEM;
953 
954 		ip_die_entry->num_ips = num_ips;
955 
956 		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
957 		ip_die_entry->ip_kset.kobj.kset = die_kset;
958 		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
959 		res = kset_register(&ip_die_entry->ip_kset);
960 		if (res) {
961 			DRM_ERROR("Couldn't register ip_die_entry kset");
962 			kfree(ip_die_entry);
963 			return res;
964 		}
965 
966 		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips);
967 	}
968 
969 	return 0;
970 }
971 
972 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
973 {
974 	struct kset *die_kset;
975 	int res, ii;
976 
977 	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
978 	if (!adev->ip_top)
979 		return -ENOMEM;
980 
981 	adev->ip_top->adev = adev;
982 
983 	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
984 				   &adev->dev->kobj, "ip_discovery");
985 	if (res) {
986 		DRM_ERROR("Couldn't init and add ip_discovery/");
987 		goto Err;
988 	}
989 
990 	die_kset = &adev->ip_top->die_kset;
991 	kobject_set_name(&die_kset->kobj, "%s", "die");
992 	die_kset->kobj.parent = &adev->ip_top->kobj;
993 	die_kset->kobj.ktype = &die_kobj_ktype;
994 	res = kset_register(&adev->ip_top->die_kset);
995 	if (res) {
996 		DRM_ERROR("Couldn't register die_kset");
997 		goto Err;
998 	}
999 
1000 	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1001 		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1002 	ip_hw_instance_attrs[ii] = NULL;
1003 
1004 	res = amdgpu_discovery_sysfs_recurse(adev);
1005 
1006 	return res;
1007 Err:
1008 	kobject_put(&adev->ip_top->kobj);
1009 	return res;
1010 }
1011 
1012 /* -------------------------------------------------- */
1013 
1014 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1015 
1016 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1017 {
1018 	struct list_head *el, *tmp;
1019 	struct kset *hw_id_kset;
1020 
1021 	hw_id_kset = &ip_hw_id->hw_id_kset;
1022 	spin_lock(&hw_id_kset->list_lock);
1023 	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1024 		list_del_init(el);
1025 		spin_unlock(&hw_id_kset->list_lock);
1026 		/* kobject is embedded in ip_hw_instance */
1027 		kobject_put(list_to_kobj(el));
1028 		spin_lock(&hw_id_kset->list_lock);
1029 	}
1030 	spin_unlock(&hw_id_kset->list_lock);
1031 	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1032 }
1033 
1034 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1035 {
1036 	struct list_head *el, *tmp;
1037 	struct kset *ip_kset;
1038 
1039 	ip_kset = &ip_die_entry->ip_kset;
1040 	spin_lock(&ip_kset->list_lock);
1041 	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1042 		list_del_init(el);
1043 		spin_unlock(&ip_kset->list_lock);
1044 		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1045 		spin_lock(&ip_kset->list_lock);
1046 	}
1047 	spin_unlock(&ip_kset->list_lock);
1048 	kobject_put(&ip_die_entry->ip_kset.kobj);
1049 }
1050 
1051 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1052 {
1053 	struct list_head *el, *tmp;
1054 	struct kset *die_kset;
1055 
1056 	die_kset = &adev->ip_top->die_kset;
1057 	spin_lock(&die_kset->list_lock);
1058 	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1059 		list_del_init(el);
1060 		spin_unlock(&die_kset->list_lock);
1061 		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1062 		spin_lock(&die_kset->list_lock);
1063 	}
1064 	spin_unlock(&die_kset->list_lock);
1065 	kobject_put(&adev->ip_top->die_kset.kobj);
1066 	kobject_put(&adev->ip_top->kobj);
1067 }
1068 
1069 /* ================================================== */
1070 
1071 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1072 {
1073 	struct binary_header *bhdr;
1074 	struct ip_discovery_header *ihdr;
1075 	struct die_header *dhdr;
1076 	struct ip *ip;
1077 	uint16_t die_offset;
1078 	uint16_t ip_offset;
1079 	uint16_t num_dies;
1080 	uint16_t num_ips;
1081 	uint8_t num_base_address;
1082 	int hw_ip;
1083 	int i, j, k;
1084 	int r;
1085 
1086 	r = amdgpu_discovery_init(adev);
1087 	if (r) {
1088 		DRM_ERROR("amdgpu_discovery_init failed\n");
1089 		return r;
1090 	}
1091 
1092 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1093 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1094 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1095 	num_dies = le16_to_cpu(ihdr->num_dies);
1096 
1097 	DRM_DEBUG("number of dies: %d\n", num_dies);
1098 
1099 	for (i = 0; i < num_dies; i++) {
1100 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1101 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1102 		num_ips = le16_to_cpu(dhdr->num_ips);
1103 		ip_offset = die_offset + sizeof(*dhdr);
1104 
1105 		if (le16_to_cpu(dhdr->die_id) != i) {
1106 			DRM_ERROR("invalid die id %d, expected %d\n",
1107 					le16_to_cpu(dhdr->die_id), i);
1108 			return -EINVAL;
1109 		}
1110 
1111 		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1112 				le16_to_cpu(dhdr->die_id), num_ips);
1113 
1114 		for (j = 0; j < num_ips; j++) {
1115 			ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
1116 
1117 			if (amdgpu_discovery_validate_ip(ip))
1118 				goto next_ip;
1119 
1120 			num_base_address = ip->num_base_address;
1121 
1122 			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1123 				  hw_id_names[le16_to_cpu(ip->hw_id)],
1124 				  le16_to_cpu(ip->hw_id),
1125 				  ip->number_instance,
1126 				  ip->major, ip->minor,
1127 				  ip->revision);
1128 
1129 			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1130 				/* Bit [5:0]: original revision value
1131 				 * Bit [7:6]: en/decode capability:
1132 				 *     0b00 : VCN function normally
1133 				 *     0b10 : encode is disabled
1134 				 *     0b01 : decode is disabled
1135 				 */
1136 				adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1137 					ip->revision & 0xc0;
1138 				ip->revision &= ~0xc0;
1139 				if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES)
1140 					adev->vcn.num_vcn_inst++;
1141 				else
1142 					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1143 						adev->vcn.num_vcn_inst + 1,
1144 						AMDGPU_MAX_VCN_INSTANCES);
1145 			}
1146 			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1147 			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1148 			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1149 			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1150 				if (adev->sdma.num_instances < AMDGPU_MAX_SDMA_INSTANCES)
1151 					adev->sdma.num_instances++;
1152 				else
1153 					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1154 						adev->sdma.num_instances + 1,
1155 						AMDGPU_MAX_SDMA_INSTANCES);
1156 			}
1157 
1158 			if (le16_to_cpu(ip->hw_id) == UMC_HWID)
1159 				adev->gmc.num_umc++;
1160 
1161 			for (k = 0; k < num_base_address; k++) {
1162 				/*
1163 				 * convert the endianness of base addresses in place,
1164 				 * so that we don't need to convert them when accessing adev->reg_offset.
1165 				 */
1166 				ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1167 				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1168 			}
1169 
1170 			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1171 				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) {
1172 					DRM_DEBUG("set register base offset for %s\n",
1173 							hw_id_names[le16_to_cpu(ip->hw_id)]);
1174 					adev->reg_offset[hw_ip][ip->number_instance] =
1175 						ip->base_address;
1176 					/* Instance support is somewhat inconsistent.
1177 					 * SDMA is a good example.  Sienna cichlid has 4 total
1178 					 * SDMA instances, each enumerated separately (HWIDs
1179 					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1180 					 * but they are enumerated as multiple instances of the
1181 					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1182 					 * example.  On most chips there are multiple instances
1183 					 * with the same HWID.
1184 					 */
1185 					adev->ip_versions[hw_ip][ip->number_instance] =
1186 						IP_VERSION(ip->major, ip->minor, ip->revision);
1187 				}
1188 			}
1189 
1190 next_ip:
1191 			ip_offset += struct_size(ip, base_address, ip->num_base_address);
1192 		}
1193 	}
1194 
1195 	amdgpu_discovery_sysfs_init(adev);
1196 
1197 	return 0;
1198 }
1199 
1200 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
1201 				    int *major, int *minor, int *revision)
1202 {
1203 	struct binary_header *bhdr;
1204 	struct ip_discovery_header *ihdr;
1205 	struct die_header *dhdr;
1206 	struct ip *ip;
1207 	uint16_t die_offset;
1208 	uint16_t ip_offset;
1209 	uint16_t num_dies;
1210 	uint16_t num_ips;
1211 	int i, j;
1212 
1213 	if (!adev->mman.discovery_bin) {
1214 		DRM_ERROR("ip discovery uninitialized\n");
1215 		return -EINVAL;
1216 	}
1217 
1218 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1219 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1220 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1221 	num_dies = le16_to_cpu(ihdr->num_dies);
1222 
1223 	for (i = 0; i < num_dies; i++) {
1224 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1225 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1226 		num_ips = le16_to_cpu(dhdr->num_ips);
1227 		ip_offset = die_offset + sizeof(*dhdr);
1228 
1229 		for (j = 0; j < num_ips; j++) {
1230 			ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
1231 
1232 			if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) {
1233 				if (major)
1234 					*major = ip->major;
1235 				if (minor)
1236 					*minor = ip->minor;
1237 				if (revision)
1238 					*revision = ip->revision;
1239 				return 0;
1240 			}
1241 			ip_offset += struct_size(ip, base_address, ip->num_base_address);
1242 		}
1243 	}
1244 
1245 	return -EINVAL;
1246 }
1247 
1248 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1249 {
1250 	int vcn_harvest_count = 0;
1251 	int umc_harvest_count = 0;
1252 
1253 	/*
1254 	 * Harvest table does not fit Navi1x and legacy GPUs,
1255 	 * so read harvest bit per IP data structure to set
1256 	 * harvest configuration.
1257 	 */
1258 	if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0)) {
1259 		if ((adev->pdev->device == 0x731E &&
1260 			(adev->pdev->revision == 0xC6 ||
1261 			 adev->pdev->revision == 0xC7)) ||
1262 			(adev->pdev->device == 0x7340 &&
1263 			 adev->pdev->revision == 0xC9) ||
1264 			(adev->pdev->device == 0x7360 &&
1265 			 adev->pdev->revision == 0xC7))
1266 			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1267 				&vcn_harvest_count);
1268 	} else {
1269 		amdgpu_discovery_read_from_harvest_table(adev,
1270 							 &vcn_harvest_count,
1271 							 &umc_harvest_count);
1272 	}
1273 
1274 	amdgpu_discovery_harvest_config_quirk(adev);
1275 
1276 	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1277 		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1278 		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1279 	}
1280 
1281 	if (umc_harvest_count < adev->gmc.num_umc) {
1282 		adev->gmc.num_umc -= umc_harvest_count;
1283 	}
1284 }
1285 
1286 union gc_info {
1287 	struct gc_info_v1_0 v1;
1288 	struct gc_info_v1_1 v1_1;
1289 	struct gc_info_v1_2 v1_2;
1290 	struct gc_info_v2_0 v2;
1291 };
1292 
1293 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1294 {
1295 	struct binary_header *bhdr;
1296 	union gc_info *gc_info;
1297 	u16 offset;
1298 
1299 	if (!adev->mman.discovery_bin) {
1300 		DRM_ERROR("ip discovery uninitialized\n");
1301 		return -EINVAL;
1302 	}
1303 
1304 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1305 	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1306 
1307 	if (!offset)
1308 		return 0;
1309 
1310 	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1311 
1312 	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1313 	case 1:
1314 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1315 		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1316 						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1317 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1318 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1319 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1320 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1321 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1322 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1323 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1324 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1325 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1326 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1327 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1328 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1329 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1330 			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1331 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1332 		if (gc_info->v1.header.version_minor >= 1) {
1333 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1334 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1335 			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1336 		}
1337 		if (gc_info->v1.header.version_minor >= 2) {
1338 			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1339 			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1340 			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1341 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1342 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1343 			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1344 			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1345 			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1346 		}
1347 		break;
1348 	case 2:
1349 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1350 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1351 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1352 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1353 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1354 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1355 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1356 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1357 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1358 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1359 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1360 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1361 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1362 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1363 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1364 			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1365 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1366 		break;
1367 	default:
1368 		dev_err(adev->dev,
1369 			"Unhandled GC info table %d.%d\n",
1370 			le16_to_cpu(gc_info->v1.header.version_major),
1371 			le16_to_cpu(gc_info->v1.header.version_minor));
1372 		return -EINVAL;
1373 	}
1374 	return 0;
1375 }
1376 
1377 union mall_info {
1378 	struct mall_info_v1_0 v1;
1379 };
1380 
1381 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1382 {
1383 	struct binary_header *bhdr;
1384 	union mall_info *mall_info;
1385 	u32 u, mall_size_per_umc, m_s_present, half_use;
1386 	u64 mall_size;
1387 	u16 offset;
1388 
1389 	if (!adev->mman.discovery_bin) {
1390 		DRM_ERROR("ip discovery uninitialized\n");
1391 		return -EINVAL;
1392 	}
1393 
1394 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1395 	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1396 
1397 	if (!offset)
1398 		return 0;
1399 
1400 	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1401 
1402 	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1403 	case 1:
1404 		mall_size = 0;
1405 		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1406 		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1407 		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1408 		for (u = 0; u < adev->gmc.num_umc; u++) {
1409 			if (m_s_present & (1 << u))
1410 				mall_size += mall_size_per_umc * 2;
1411 			else if (half_use & (1 << u))
1412 				mall_size += mall_size_per_umc / 2;
1413 			else
1414 				mall_size += mall_size_per_umc;
1415 		}
1416 		adev->gmc.mall_size = mall_size;
1417 		break;
1418 	default:
1419 		dev_err(adev->dev,
1420 			"Unhandled MALL info table %d.%d\n",
1421 			le16_to_cpu(mall_info->v1.header.version_major),
1422 			le16_to_cpu(mall_info->v1.header.version_minor));
1423 		return -EINVAL;
1424 	}
1425 	return 0;
1426 }
1427 
1428 union vcn_info {
1429 	struct vcn_info_v1_0 v1;
1430 };
1431 
1432 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1433 {
1434 	struct binary_header *bhdr;
1435 	union vcn_info *vcn_info;
1436 	u16 offset;
1437 	int v;
1438 
1439 	if (!adev->mman.discovery_bin) {
1440 		DRM_ERROR("ip discovery uninitialized\n");
1441 		return -EINVAL;
1442 	}
1443 
1444 	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1445 	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1446 	 * but that may change in the future with new GPUs so keep this
1447 	 * check for defensive purposes.
1448 	 */
1449 	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1450 		dev_err(adev->dev, "invalid vcn instances\n");
1451 		return -EINVAL;
1452 	}
1453 
1454 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1455 	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1456 
1457 	if (!offset)
1458 		return 0;
1459 
1460 	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1461 
1462 	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1463 	case 1:
1464 		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1465 		 * so this won't overflow.
1466 		 */
1467 		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1468 			adev->vcn.vcn_codec_disable_mask[v] =
1469 				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1470 		}
1471 		break;
1472 	default:
1473 		dev_err(adev->dev,
1474 			"Unhandled VCN info table %d.%d\n",
1475 			le16_to_cpu(vcn_info->v1.header.version_major),
1476 			le16_to_cpu(vcn_info->v1.header.version_minor));
1477 		return -EINVAL;
1478 	}
1479 	return 0;
1480 }
1481 
1482 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1483 {
1484 	/* what IP to use for this? */
1485 	switch (adev->ip_versions[GC_HWIP][0]) {
1486 	case IP_VERSION(9, 0, 1):
1487 	case IP_VERSION(9, 1, 0):
1488 	case IP_VERSION(9, 2, 1):
1489 	case IP_VERSION(9, 2, 2):
1490 	case IP_VERSION(9, 3, 0):
1491 	case IP_VERSION(9, 4, 0):
1492 	case IP_VERSION(9, 4, 1):
1493 	case IP_VERSION(9, 4, 2):
1494 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1495 		break;
1496 	case IP_VERSION(10, 1, 10):
1497 	case IP_VERSION(10, 1, 1):
1498 	case IP_VERSION(10, 1, 2):
1499 	case IP_VERSION(10, 1, 3):
1500 	case IP_VERSION(10, 1, 4):
1501 	case IP_VERSION(10, 3, 0):
1502 	case IP_VERSION(10, 3, 1):
1503 	case IP_VERSION(10, 3, 2):
1504 	case IP_VERSION(10, 3, 3):
1505 	case IP_VERSION(10, 3, 4):
1506 	case IP_VERSION(10, 3, 5):
1507 	case IP_VERSION(10, 3, 6):
1508 	case IP_VERSION(10, 3, 7):
1509 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1510 		break;
1511 	case IP_VERSION(11, 0, 0):
1512 	case IP_VERSION(11, 0, 1):
1513 	case IP_VERSION(11, 0, 2):
1514 	case IP_VERSION(11, 0, 3):
1515 	case IP_VERSION(11, 0, 4):
1516 		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1517 		break;
1518 	default:
1519 		dev_err(adev->dev,
1520 			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1521 			adev->ip_versions[GC_HWIP][0]);
1522 		return -EINVAL;
1523 	}
1524 	return 0;
1525 }
1526 
1527 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1528 {
1529 	/* use GC or MMHUB IP version */
1530 	switch (adev->ip_versions[GC_HWIP][0]) {
1531 	case IP_VERSION(9, 0, 1):
1532 	case IP_VERSION(9, 1, 0):
1533 	case IP_VERSION(9, 2, 1):
1534 	case IP_VERSION(9, 2, 2):
1535 	case IP_VERSION(9, 3, 0):
1536 	case IP_VERSION(9, 4, 0):
1537 	case IP_VERSION(9, 4, 1):
1538 	case IP_VERSION(9, 4, 2):
1539 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1540 		break;
1541 	case IP_VERSION(10, 1, 10):
1542 	case IP_VERSION(10, 1, 1):
1543 	case IP_VERSION(10, 1, 2):
1544 	case IP_VERSION(10, 1, 3):
1545 	case IP_VERSION(10, 1, 4):
1546 	case IP_VERSION(10, 3, 0):
1547 	case IP_VERSION(10, 3, 1):
1548 	case IP_VERSION(10, 3, 2):
1549 	case IP_VERSION(10, 3, 3):
1550 	case IP_VERSION(10, 3, 4):
1551 	case IP_VERSION(10, 3, 5):
1552 	case IP_VERSION(10, 3, 6):
1553 	case IP_VERSION(10, 3, 7):
1554 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1555 		break;
1556 	case IP_VERSION(11, 0, 0):
1557 	case IP_VERSION(11, 0, 1):
1558 	case IP_VERSION(11, 0, 2):
1559 	case IP_VERSION(11, 0, 3):
1560 	case IP_VERSION(11, 0, 4):
1561 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1562 		break;
1563 	default:
1564 		dev_err(adev->dev,
1565 			"Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1566 			adev->ip_versions[GC_HWIP][0]);
1567 		return -EINVAL;
1568 	}
1569 	return 0;
1570 }
1571 
1572 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1573 {
1574 	switch (adev->ip_versions[OSSSYS_HWIP][0]) {
1575 	case IP_VERSION(4, 0, 0):
1576 	case IP_VERSION(4, 0, 1):
1577 	case IP_VERSION(4, 1, 0):
1578 	case IP_VERSION(4, 1, 1):
1579 	case IP_VERSION(4, 3, 0):
1580 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1581 		break;
1582 	case IP_VERSION(4, 2, 0):
1583 	case IP_VERSION(4, 2, 1):
1584 	case IP_VERSION(4, 4, 0):
1585 		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1586 		break;
1587 	case IP_VERSION(5, 0, 0):
1588 	case IP_VERSION(5, 0, 1):
1589 	case IP_VERSION(5, 0, 2):
1590 	case IP_VERSION(5, 0, 3):
1591 	case IP_VERSION(5, 2, 0):
1592 	case IP_VERSION(5, 2, 1):
1593 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1594 		break;
1595 	case IP_VERSION(6, 0, 0):
1596 	case IP_VERSION(6, 0, 1):
1597 	case IP_VERSION(6, 0, 2):
1598 		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1599 		break;
1600 	default:
1601 		dev_err(adev->dev,
1602 			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1603 			adev->ip_versions[OSSSYS_HWIP][0]);
1604 		return -EINVAL;
1605 	}
1606 	return 0;
1607 }
1608 
1609 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1610 {
1611 	switch (adev->ip_versions[MP0_HWIP][0]) {
1612 	case IP_VERSION(9, 0, 0):
1613 		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1614 		break;
1615 	case IP_VERSION(10, 0, 0):
1616 	case IP_VERSION(10, 0, 1):
1617 		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1618 		break;
1619 	case IP_VERSION(11, 0, 0):
1620 	case IP_VERSION(11, 0, 2):
1621 	case IP_VERSION(11, 0, 4):
1622 	case IP_VERSION(11, 0, 5):
1623 	case IP_VERSION(11, 0, 9):
1624 	case IP_VERSION(11, 0, 7):
1625 	case IP_VERSION(11, 0, 11):
1626 	case IP_VERSION(11, 0, 12):
1627 	case IP_VERSION(11, 0, 13):
1628 	case IP_VERSION(11, 5, 0):
1629 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1630 		break;
1631 	case IP_VERSION(11, 0, 8):
1632 		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1633 		break;
1634 	case IP_VERSION(11, 0, 3):
1635 	case IP_VERSION(12, 0, 1):
1636 		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1637 		break;
1638 	case IP_VERSION(13, 0, 0):
1639 	case IP_VERSION(13, 0, 1):
1640 	case IP_VERSION(13, 0, 2):
1641 	case IP_VERSION(13, 0, 3):
1642 	case IP_VERSION(13, 0, 5):
1643 	case IP_VERSION(13, 0, 7):
1644 	case IP_VERSION(13, 0, 8):
1645 	case IP_VERSION(13, 0, 10):
1646 	case IP_VERSION(13, 0, 11):
1647 		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1648 		break;
1649 	case IP_VERSION(13, 0, 4):
1650 		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1651 		break;
1652 	default:
1653 		dev_err(adev->dev,
1654 			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1655 			adev->ip_versions[MP0_HWIP][0]);
1656 		return -EINVAL;
1657 	}
1658 	return 0;
1659 }
1660 
1661 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1662 {
1663 	switch (adev->ip_versions[MP1_HWIP][0]) {
1664 	case IP_VERSION(9, 0, 0):
1665 	case IP_VERSION(10, 0, 0):
1666 	case IP_VERSION(10, 0, 1):
1667 	case IP_VERSION(11, 0, 2):
1668 		if (adev->asic_type == CHIP_ARCTURUS)
1669 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1670 		else
1671 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1672 		break;
1673 	case IP_VERSION(11, 0, 0):
1674 	case IP_VERSION(11, 0, 5):
1675 	case IP_VERSION(11, 0, 9):
1676 	case IP_VERSION(11, 0, 7):
1677 	case IP_VERSION(11, 0, 8):
1678 	case IP_VERSION(11, 0, 11):
1679 	case IP_VERSION(11, 0, 12):
1680 	case IP_VERSION(11, 0, 13):
1681 	case IP_VERSION(11, 5, 0):
1682 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1683 		break;
1684 	case IP_VERSION(12, 0, 0):
1685 	case IP_VERSION(12, 0, 1):
1686 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1687 		break;
1688 	case IP_VERSION(13, 0, 0):
1689 	case IP_VERSION(13, 0, 1):
1690 	case IP_VERSION(13, 0, 2):
1691 	case IP_VERSION(13, 0, 3):
1692 	case IP_VERSION(13, 0, 4):
1693 	case IP_VERSION(13, 0, 5):
1694 	case IP_VERSION(13, 0, 7):
1695 	case IP_VERSION(13, 0, 8):
1696 	case IP_VERSION(13, 0, 10):
1697 	case IP_VERSION(13, 0, 11):
1698 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1699 		break;
1700 	default:
1701 		dev_err(adev->dev,
1702 			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1703 			adev->ip_versions[MP1_HWIP][0]);
1704 		return -EINVAL;
1705 	}
1706 	return 0;
1707 }
1708 
1709 #if defined(CONFIG_DRM_AMD_DC)
1710 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
1711 {
1712 	amdgpu_device_set_sriov_virtual_display(adev);
1713 	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1714 }
1715 #endif
1716 
1717 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1718 {
1719 	if (adev->enable_virtual_display) {
1720 		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1721 		return 0;
1722 	}
1723 
1724 	if (!amdgpu_device_has_dc_support(adev))
1725 		return 0;
1726 
1727 #if defined(CONFIG_DRM_AMD_DC)
1728 	if (adev->ip_versions[DCE_HWIP][0]) {
1729 		switch (adev->ip_versions[DCE_HWIP][0]) {
1730 		case IP_VERSION(1, 0, 0):
1731 		case IP_VERSION(1, 0, 1):
1732 		case IP_VERSION(2, 0, 2):
1733 		case IP_VERSION(2, 0, 0):
1734 		case IP_VERSION(2, 0, 3):
1735 		case IP_VERSION(2, 1, 0):
1736 		case IP_VERSION(3, 0, 0):
1737 		case IP_VERSION(3, 0, 2):
1738 		case IP_VERSION(3, 0, 3):
1739 		case IP_VERSION(3, 0, 1):
1740 		case IP_VERSION(3, 1, 2):
1741 		case IP_VERSION(3, 1, 3):
1742 		case IP_VERSION(3, 1, 4):
1743 		case IP_VERSION(3, 1, 5):
1744 		case IP_VERSION(3, 1, 6):
1745 		case IP_VERSION(3, 2, 0):
1746 		case IP_VERSION(3, 2, 1):
1747 			if (amdgpu_sriov_vf(adev))
1748 				amdgpu_discovery_set_sriov_display(adev);
1749 			else
1750 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1751 			break;
1752 		default:
1753 			dev_err(adev->dev,
1754 				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1755 				adev->ip_versions[DCE_HWIP][0]);
1756 			return -EINVAL;
1757 		}
1758 	} else if (adev->ip_versions[DCI_HWIP][0]) {
1759 		switch (adev->ip_versions[DCI_HWIP][0]) {
1760 		case IP_VERSION(12, 0, 0):
1761 		case IP_VERSION(12, 0, 1):
1762 		case IP_VERSION(12, 1, 0):
1763 			if (amdgpu_sriov_vf(adev))
1764 				amdgpu_discovery_set_sriov_display(adev);
1765 			else
1766 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
1767 			break;
1768 		default:
1769 			dev_err(adev->dev,
1770 				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1771 				adev->ip_versions[DCI_HWIP][0]);
1772 			return -EINVAL;
1773 		}
1774 	}
1775 #endif
1776 	return 0;
1777 }
1778 
1779 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1780 {
1781 	switch (adev->ip_versions[GC_HWIP][0]) {
1782 	case IP_VERSION(9, 0, 1):
1783 	case IP_VERSION(9, 1, 0):
1784 	case IP_VERSION(9, 2, 1):
1785 	case IP_VERSION(9, 2, 2):
1786 	case IP_VERSION(9, 3, 0):
1787 	case IP_VERSION(9, 4, 0):
1788 	case IP_VERSION(9, 4, 1):
1789 	case IP_VERSION(9, 4, 2):
1790 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1791 		break;
1792 	case IP_VERSION(10, 1, 10):
1793 	case IP_VERSION(10, 1, 2):
1794 	case IP_VERSION(10, 1, 1):
1795 	case IP_VERSION(10, 1, 3):
1796 	case IP_VERSION(10, 1, 4):
1797 	case IP_VERSION(10, 3, 0):
1798 	case IP_VERSION(10, 3, 2):
1799 	case IP_VERSION(10, 3, 1):
1800 	case IP_VERSION(10, 3, 4):
1801 	case IP_VERSION(10, 3, 5):
1802 	case IP_VERSION(10, 3, 6):
1803 	case IP_VERSION(10, 3, 3):
1804 	case IP_VERSION(10, 3, 7):
1805 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
1806 		break;
1807 	case IP_VERSION(11, 0, 0):
1808 	case IP_VERSION(11, 0, 1):
1809 	case IP_VERSION(11, 0, 2):
1810 	case IP_VERSION(11, 0, 3):
1811 	case IP_VERSION(11, 0, 4):
1812 		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
1813 		break;
1814 	default:
1815 		dev_err(adev->dev,
1816 			"Failed to add gfx ip block(GC_HWIP:0x%x)\n",
1817 			adev->ip_versions[GC_HWIP][0]);
1818 		return -EINVAL;
1819 	}
1820 	return 0;
1821 }
1822 
1823 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
1824 {
1825 	switch (adev->ip_versions[SDMA0_HWIP][0]) {
1826 	case IP_VERSION(4, 0, 0):
1827 	case IP_VERSION(4, 0, 1):
1828 	case IP_VERSION(4, 1, 0):
1829 	case IP_VERSION(4, 1, 1):
1830 	case IP_VERSION(4, 1, 2):
1831 	case IP_VERSION(4, 2, 0):
1832 	case IP_VERSION(4, 2, 2):
1833 	case IP_VERSION(4, 4, 0):
1834 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
1835 		break;
1836 	case IP_VERSION(5, 0, 0):
1837 	case IP_VERSION(5, 0, 1):
1838 	case IP_VERSION(5, 0, 2):
1839 	case IP_VERSION(5, 0, 5):
1840 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
1841 		break;
1842 	case IP_VERSION(5, 2, 0):
1843 	case IP_VERSION(5, 2, 2):
1844 	case IP_VERSION(5, 2, 4):
1845 	case IP_VERSION(5, 2, 5):
1846 	case IP_VERSION(5, 2, 6):
1847 	case IP_VERSION(5, 2, 3):
1848 	case IP_VERSION(5, 2, 1):
1849 	case IP_VERSION(5, 2, 7):
1850 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
1851 		break;
1852 	case IP_VERSION(6, 0, 0):
1853 	case IP_VERSION(6, 0, 1):
1854 	case IP_VERSION(6, 0, 2):
1855 	case IP_VERSION(6, 0, 3):
1856 		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
1857 		break;
1858 	default:
1859 		dev_err(adev->dev,
1860 			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
1861 			adev->ip_versions[SDMA0_HWIP][0]);
1862 		return -EINVAL;
1863 	}
1864 	return 0;
1865 }
1866 
1867 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
1868 {
1869 	if (adev->ip_versions[VCE_HWIP][0]) {
1870 		switch (adev->ip_versions[UVD_HWIP][0]) {
1871 		case IP_VERSION(7, 0, 0):
1872 		case IP_VERSION(7, 2, 0):
1873 			/* UVD is not supported on vega20 SR-IOV */
1874 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
1875 				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
1876 			break;
1877 		default:
1878 			dev_err(adev->dev,
1879 				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
1880 				adev->ip_versions[UVD_HWIP][0]);
1881 			return -EINVAL;
1882 		}
1883 		switch (adev->ip_versions[VCE_HWIP][0]) {
1884 		case IP_VERSION(4, 0, 0):
1885 		case IP_VERSION(4, 1, 0):
1886 			/* VCE is not supported on vega20 SR-IOV */
1887 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
1888 				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
1889 			break;
1890 		default:
1891 			dev_err(adev->dev,
1892 				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
1893 				adev->ip_versions[VCE_HWIP][0]);
1894 			return -EINVAL;
1895 		}
1896 	} else {
1897 		switch (adev->ip_versions[UVD_HWIP][0]) {
1898 		case IP_VERSION(1, 0, 0):
1899 		case IP_VERSION(1, 0, 1):
1900 			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
1901 			break;
1902 		case IP_VERSION(2, 0, 0):
1903 		case IP_VERSION(2, 0, 2):
1904 		case IP_VERSION(2, 2, 0):
1905 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
1906 			if (!amdgpu_sriov_vf(adev))
1907 				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
1908 			break;
1909 		case IP_VERSION(2, 0, 3):
1910 			break;
1911 		case IP_VERSION(2, 5, 0):
1912 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
1913 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
1914 			break;
1915 		case IP_VERSION(2, 6, 0):
1916 			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
1917 			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
1918 			break;
1919 		case IP_VERSION(3, 0, 0):
1920 		case IP_VERSION(3, 0, 16):
1921 		case IP_VERSION(3, 1, 1):
1922 		case IP_VERSION(3, 1, 2):
1923 		case IP_VERSION(3, 0, 2):
1924 		case IP_VERSION(3, 0, 192):
1925 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
1926 			if (!amdgpu_sriov_vf(adev))
1927 				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
1928 			break;
1929 		case IP_VERSION(3, 0, 33):
1930 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
1931 			break;
1932 		case IP_VERSION(4, 0, 0):
1933 		case IP_VERSION(4, 0, 2):
1934 		case IP_VERSION(4, 0, 4):
1935 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
1936 			if (!amdgpu_sriov_vf(adev))
1937 				amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
1938 			break;
1939 		default:
1940 			dev_err(adev->dev,
1941 				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
1942 				adev->ip_versions[UVD_HWIP][0]);
1943 			return -EINVAL;
1944 		}
1945 	}
1946 	return 0;
1947 }
1948 
1949 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
1950 {
1951 	switch (adev->ip_versions[GC_HWIP][0]) {
1952 	case IP_VERSION(10, 1, 10):
1953 	case IP_VERSION(10, 1, 1):
1954 	case IP_VERSION(10, 1, 2):
1955 	case IP_VERSION(10, 1, 3):
1956 	case IP_VERSION(10, 1, 4):
1957 	case IP_VERSION(10, 3, 0):
1958 	case IP_VERSION(10, 3, 1):
1959 	case IP_VERSION(10, 3, 2):
1960 	case IP_VERSION(10, 3, 3):
1961 	case IP_VERSION(10, 3, 4):
1962 	case IP_VERSION(10, 3, 5):
1963 	case IP_VERSION(10, 3, 6):
1964 		if (amdgpu_mes) {
1965 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
1966 			adev->enable_mes = true;
1967 			if (amdgpu_mes_kiq)
1968 				adev->enable_mes_kiq = true;
1969 		}
1970 		break;
1971 	case IP_VERSION(11, 0, 0):
1972 	case IP_VERSION(11, 0, 1):
1973 	case IP_VERSION(11, 0, 2):
1974 	case IP_VERSION(11, 0, 3):
1975 	case IP_VERSION(11, 0, 4):
1976 		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
1977 		adev->enable_mes = true;
1978 		adev->enable_mes_kiq = true;
1979 		break;
1980 	default:
1981 		break;
1982 	}
1983 	return 0;
1984 }
1985 
1986 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
1987 {
1988 	int r;
1989 
1990 	switch (adev->asic_type) {
1991 	case CHIP_VEGA10:
1992 		vega10_reg_base_init(adev);
1993 		adev->sdma.num_instances = 2;
1994 		adev->gmc.num_umc = 4;
1995 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
1996 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
1997 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
1998 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
1999 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2000 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2001 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2002 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2003 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2004 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2005 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2006 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2007 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2008 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2009 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2010 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2011 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2012 		break;
2013 	case CHIP_VEGA12:
2014 		vega10_reg_base_init(adev);
2015 		adev->sdma.num_instances = 2;
2016 		adev->gmc.num_umc = 4;
2017 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2018 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2019 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2020 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2021 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2022 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2023 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2024 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2025 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2026 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2027 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2028 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2029 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2030 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2031 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2032 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2033 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2034 		break;
2035 	case CHIP_RAVEN:
2036 		vega10_reg_base_init(adev);
2037 		adev->sdma.num_instances = 1;
2038 		adev->vcn.num_vcn_inst = 1;
2039 		adev->gmc.num_umc = 2;
2040 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2041 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2042 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2043 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2044 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2045 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2046 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2047 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2048 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2049 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2050 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2051 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2052 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2053 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2054 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2055 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2056 		} else {
2057 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2058 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2059 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2060 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2061 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2062 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2063 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2064 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2065 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2066 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2067 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2068 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2069 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2070 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2071 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2072 		}
2073 		break;
2074 	case CHIP_VEGA20:
2075 		vega20_reg_base_init(adev);
2076 		adev->sdma.num_instances = 2;
2077 		adev->gmc.num_umc = 8;
2078 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2079 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2080 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2081 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2082 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2083 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2084 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2085 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2086 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2087 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2088 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2089 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2090 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2091 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2092 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2093 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2094 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2095 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2096 		break;
2097 	case CHIP_ARCTURUS:
2098 		arct_reg_base_init(adev);
2099 		adev->sdma.num_instances = 8;
2100 		adev->vcn.num_vcn_inst = 2;
2101 		adev->gmc.num_umc = 8;
2102 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2103 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2104 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2105 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2106 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2107 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2108 		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2109 		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2110 		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2111 		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2112 		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2113 		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2114 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2115 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2116 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2117 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2118 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2119 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2120 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2121 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2122 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2123 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2124 		break;
2125 	case CHIP_ALDEBARAN:
2126 		aldebaran_reg_base_init(adev);
2127 		adev->sdma.num_instances = 5;
2128 		adev->vcn.num_vcn_inst = 2;
2129 		adev->gmc.num_umc = 4;
2130 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2131 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2132 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2133 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2134 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2135 		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2136 		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2137 		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2138 		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2139 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2140 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2141 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2142 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2143 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2144 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2145 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2146 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2147 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2148 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2149 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2150 		break;
2151 	default:
2152 		r = amdgpu_discovery_reg_base_init(adev);
2153 		if (r)
2154 			return -EINVAL;
2155 
2156 		amdgpu_discovery_harvest_ip(adev);
2157 		amdgpu_discovery_get_gfx_info(adev);
2158 		amdgpu_discovery_get_mall_info(adev);
2159 		amdgpu_discovery_get_vcn_info(adev);
2160 		break;
2161 	}
2162 
2163 	switch (adev->ip_versions[GC_HWIP][0]) {
2164 	case IP_VERSION(9, 0, 1):
2165 	case IP_VERSION(9, 2, 1):
2166 	case IP_VERSION(9, 4, 0):
2167 	case IP_VERSION(9, 4, 1):
2168 	case IP_VERSION(9, 4, 2):
2169 		adev->family = AMDGPU_FAMILY_AI;
2170 		break;
2171 	case IP_VERSION(9, 1, 0):
2172 	case IP_VERSION(9, 2, 2):
2173 	case IP_VERSION(9, 3, 0):
2174 		adev->family = AMDGPU_FAMILY_RV;
2175 		break;
2176 	case IP_VERSION(10, 1, 10):
2177 	case IP_VERSION(10, 1, 1):
2178 	case IP_VERSION(10, 1, 2):
2179 	case IP_VERSION(10, 1, 3):
2180 	case IP_VERSION(10, 1, 4):
2181 	case IP_VERSION(10, 3, 0):
2182 	case IP_VERSION(10, 3, 2):
2183 	case IP_VERSION(10, 3, 4):
2184 	case IP_VERSION(10, 3, 5):
2185 		adev->family = AMDGPU_FAMILY_NV;
2186 		break;
2187 	case IP_VERSION(10, 3, 1):
2188 		adev->family = AMDGPU_FAMILY_VGH;
2189 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2190 		break;
2191 	case IP_VERSION(10, 3, 3):
2192 		adev->family = AMDGPU_FAMILY_YC;
2193 		break;
2194 	case IP_VERSION(10, 3, 6):
2195 		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2196 		break;
2197 	case IP_VERSION(10, 3, 7):
2198 		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2199 		break;
2200 	case IP_VERSION(11, 0, 0):
2201 	case IP_VERSION(11, 0, 2):
2202 	case IP_VERSION(11, 0, 3):
2203 		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2204 		break;
2205 	case IP_VERSION(11, 0, 1):
2206 	case IP_VERSION(11, 0, 4):
2207 		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2208 		break;
2209 	default:
2210 		return -EINVAL;
2211 	}
2212 
2213 	switch (adev->ip_versions[GC_HWIP][0]) {
2214 	case IP_VERSION(9, 1, 0):
2215 	case IP_VERSION(9, 2, 2):
2216 	case IP_VERSION(9, 3, 0):
2217 	case IP_VERSION(10, 1, 3):
2218 	case IP_VERSION(10, 1, 4):
2219 	case IP_VERSION(10, 3, 1):
2220 	case IP_VERSION(10, 3, 3):
2221 	case IP_VERSION(10, 3, 6):
2222 	case IP_VERSION(10, 3, 7):
2223 	case IP_VERSION(11, 0, 1):
2224 	case IP_VERSION(11, 0, 4):
2225 		adev->flags |= AMD_IS_APU;
2226 		break;
2227 	default:
2228 		break;
2229 	}
2230 
2231 	if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
2232 		adev->gmc.xgmi.supported = true;
2233 
2234 	/* set NBIO version */
2235 	switch (adev->ip_versions[NBIO_HWIP][0]) {
2236 	case IP_VERSION(6, 1, 0):
2237 	case IP_VERSION(6, 2, 0):
2238 		adev->nbio.funcs = &nbio_v6_1_funcs;
2239 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2240 		break;
2241 	case IP_VERSION(7, 0, 0):
2242 	case IP_VERSION(7, 0, 1):
2243 	case IP_VERSION(2, 5, 0):
2244 		adev->nbio.funcs = &nbio_v7_0_funcs;
2245 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2246 		break;
2247 	case IP_VERSION(7, 4, 0):
2248 	case IP_VERSION(7, 4, 1):
2249 	case IP_VERSION(7, 4, 4):
2250 		adev->nbio.funcs = &nbio_v7_4_funcs;
2251 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2252 		break;
2253 	case IP_VERSION(7, 2, 0):
2254 	case IP_VERSION(7, 2, 1):
2255 	case IP_VERSION(7, 3, 0):
2256 	case IP_VERSION(7, 5, 0):
2257 	case IP_VERSION(7, 5, 1):
2258 		adev->nbio.funcs = &nbio_v7_2_funcs;
2259 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2260 		break;
2261 	case IP_VERSION(2, 1, 1):
2262 	case IP_VERSION(2, 3, 0):
2263 	case IP_VERSION(2, 3, 1):
2264 	case IP_VERSION(2, 3, 2):
2265 	case IP_VERSION(3, 3, 0):
2266 	case IP_VERSION(3, 3, 1):
2267 	case IP_VERSION(3, 3, 2):
2268 	case IP_VERSION(3, 3, 3):
2269 		adev->nbio.funcs = &nbio_v2_3_funcs;
2270 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2271 		break;
2272 	case IP_VERSION(4, 3, 0):
2273 	case IP_VERSION(4, 3, 1):
2274 		if (amdgpu_sriov_vf(adev))
2275 			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2276 		else
2277 			adev->nbio.funcs = &nbio_v4_3_funcs;
2278 		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2279 		break;
2280 	case IP_VERSION(7, 7, 0):
2281 	case IP_VERSION(7, 7, 1):
2282 		adev->nbio.funcs = &nbio_v7_7_funcs;
2283 		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2284 		break;
2285 	default:
2286 		break;
2287 	}
2288 
2289 	switch (adev->ip_versions[HDP_HWIP][0]) {
2290 	case IP_VERSION(4, 0, 0):
2291 	case IP_VERSION(4, 0, 1):
2292 	case IP_VERSION(4, 1, 0):
2293 	case IP_VERSION(4, 1, 1):
2294 	case IP_VERSION(4, 1, 2):
2295 	case IP_VERSION(4, 2, 0):
2296 	case IP_VERSION(4, 2, 1):
2297 	case IP_VERSION(4, 4, 0):
2298 		adev->hdp.funcs = &hdp_v4_0_funcs;
2299 		break;
2300 	case IP_VERSION(5, 0, 0):
2301 	case IP_VERSION(5, 0, 1):
2302 	case IP_VERSION(5, 0, 2):
2303 	case IP_VERSION(5, 0, 3):
2304 	case IP_VERSION(5, 0, 4):
2305 	case IP_VERSION(5, 2, 0):
2306 		adev->hdp.funcs = &hdp_v5_0_funcs;
2307 		break;
2308 	case IP_VERSION(5, 2, 1):
2309 		adev->hdp.funcs = &hdp_v5_2_funcs;
2310 		break;
2311 	case IP_VERSION(6, 0, 0):
2312 	case IP_VERSION(6, 0, 1):
2313 		adev->hdp.funcs = &hdp_v6_0_funcs;
2314 		break;
2315 	default:
2316 		break;
2317 	}
2318 
2319 	switch (adev->ip_versions[DF_HWIP][0]) {
2320 	case IP_VERSION(3, 6, 0):
2321 	case IP_VERSION(3, 6, 1):
2322 	case IP_VERSION(3, 6, 2):
2323 		adev->df.funcs = &df_v3_6_funcs;
2324 		break;
2325 	case IP_VERSION(2, 1, 0):
2326 	case IP_VERSION(2, 1, 1):
2327 	case IP_VERSION(2, 5, 0):
2328 	case IP_VERSION(3, 5, 1):
2329 	case IP_VERSION(3, 5, 2):
2330 		adev->df.funcs = &df_v1_7_funcs;
2331 		break;
2332 	default:
2333 		break;
2334 	}
2335 
2336 	switch (adev->ip_versions[SMUIO_HWIP][0]) {
2337 	case IP_VERSION(9, 0, 0):
2338 	case IP_VERSION(9, 0, 1):
2339 	case IP_VERSION(10, 0, 0):
2340 	case IP_VERSION(10, 0, 1):
2341 	case IP_VERSION(10, 0, 2):
2342 		adev->smuio.funcs = &smuio_v9_0_funcs;
2343 		break;
2344 	case IP_VERSION(11, 0, 0):
2345 	case IP_VERSION(11, 0, 2):
2346 	case IP_VERSION(11, 0, 3):
2347 	case IP_VERSION(11, 0, 4):
2348 	case IP_VERSION(11, 0, 7):
2349 	case IP_VERSION(11, 0, 8):
2350 		adev->smuio.funcs = &smuio_v11_0_funcs;
2351 		break;
2352 	case IP_VERSION(11, 0, 6):
2353 	case IP_VERSION(11, 0, 10):
2354 	case IP_VERSION(11, 0, 11):
2355 	case IP_VERSION(11, 5, 0):
2356 	case IP_VERSION(13, 0, 1):
2357 	case IP_VERSION(13, 0, 9):
2358 	case IP_VERSION(13, 0, 10):
2359 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2360 		break;
2361 	case IP_VERSION(13, 0, 2):
2362 		adev->smuio.funcs = &smuio_v13_0_funcs;
2363 		break;
2364 	case IP_VERSION(13, 0, 6):
2365 	case IP_VERSION(13, 0, 8):
2366 		adev->smuio.funcs = &smuio_v13_0_6_funcs;
2367 		break;
2368 	default:
2369 		break;
2370 	}
2371 
2372 	switch (adev->ip_versions[LSDMA_HWIP][0]) {
2373 	case IP_VERSION(6, 0, 0):
2374 	case IP_VERSION(6, 0, 1):
2375 	case IP_VERSION(6, 0, 2):
2376 	case IP_VERSION(6, 0, 3):
2377 		adev->lsdma.funcs = &lsdma_v6_0_funcs;
2378 		break;
2379 	default:
2380 		break;
2381 	}
2382 
2383 	r = amdgpu_discovery_set_common_ip_blocks(adev);
2384 	if (r)
2385 		return r;
2386 
2387 	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2388 	if (r)
2389 		return r;
2390 
2391 	/* For SR-IOV, PSP needs to be initialized before IH */
2392 	if (amdgpu_sriov_vf(adev)) {
2393 		r = amdgpu_discovery_set_psp_ip_blocks(adev);
2394 		if (r)
2395 			return r;
2396 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2397 		if (r)
2398 			return r;
2399 	} else {
2400 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2401 		if (r)
2402 			return r;
2403 
2404 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2405 			r = amdgpu_discovery_set_psp_ip_blocks(adev);
2406 			if (r)
2407 				return r;
2408 		}
2409 	}
2410 
2411 	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2412 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2413 		if (r)
2414 			return r;
2415 	}
2416 
2417 	r = amdgpu_discovery_set_display_ip_blocks(adev);
2418 	if (r)
2419 		return r;
2420 
2421 	r = amdgpu_discovery_set_gc_ip_blocks(adev);
2422 	if (r)
2423 		return r;
2424 
2425 	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2426 	if (r)
2427 		return r;
2428 
2429 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2430 	     !amdgpu_sriov_vf(adev)) ||
2431 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2432 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2433 		if (r)
2434 			return r;
2435 	}
2436 
2437 	r = amdgpu_discovery_set_mm_ip_blocks(adev);
2438 	if (r)
2439 		return r;
2440 
2441 	r = amdgpu_discovery_set_mes_ip_blocks(adev);
2442 	if (r)
2443 		return r;
2444 
2445 	return 0;
2446 }
2447 
2448