1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <drm/amdgpu_drm.h>
25 #include "amdgpu.h"
26 #include "atomfirmware.h"
27 #include "amdgpu_atomfirmware.h"
28 #include "atom.h"
29 #include "atombios.h"
30 #include "soc15_hw_ip.h"
31 
32 bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
33 {
34 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
35 						firmwareinfo);
36 	uint16_t data_offset;
37 
38 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
39 					  NULL, NULL, &data_offset)) {
40 		struct atom_firmware_info_v3_1 *firmware_info =
41 			(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
42 							   data_offset);
43 
44 		if (le32_to_cpu(firmware_info->firmware_capability) &
45 		    ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION)
46 			return true;
47 	}
48 	return false;
49 }
50 
51 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
52 {
53 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
54 						firmwareinfo);
55 	uint16_t data_offset;
56 
57 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
58 					  NULL, NULL, &data_offset)) {
59 		struct atom_firmware_info_v3_1 *firmware_info =
60 			(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
61 							   data_offset);
62 
63 		adev->bios_scratch_reg_offset =
64 			le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
65 	}
66 }
67 
68 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
69 {
70 	struct atom_context *ctx = adev->mode_info.atom_context;
71 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
72 						vram_usagebyfirmware);
73 	struct vram_usagebyfirmware_v2_1 *firmware_usage;
74 	uint32_t start_addr, size;
75 	uint16_t data_offset;
76 	int usage_bytes = 0;
77 
78 	if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
79 		firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
80 		DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
81 			  le32_to_cpu(firmware_usage->start_address_in_kb),
82 			  le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
83 			  le16_to_cpu(firmware_usage->used_by_driver_in_kb));
84 
85 		start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
86 		size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
87 
88 		if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
89 			(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
90 			ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
91 			/* Firmware request VRAM reservation for SR-IOV */
92 			adev->mman.fw_vram_usage_start_offset = (start_addr &
93 				(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
94 			adev->mman.fw_vram_usage_size = size << 10;
95 			/* Use the default scratch size */
96 			usage_bytes = 0;
97 		} else {
98 			usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
99 		}
100 	}
101 	ctx->scratch_size_bytes = 0;
102 	if (usage_bytes == 0)
103 		usage_bytes = 20 * 1024;
104 	/* allocate some scratch memory */
105 	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
106 	if (!ctx->scratch)
107 		return -ENOMEM;
108 	ctx->scratch_size_bytes = usage_bytes;
109 	return 0;
110 }
111 
112 union igp_info {
113 	struct atom_integrated_system_info_v1_11 v11;
114 	struct atom_integrated_system_info_v1_12 v12;
115 	struct atom_integrated_system_info_v2_1 v21;
116 };
117 
118 union umc_info {
119 	struct atom_umc_info_v3_1 v31;
120 	struct atom_umc_info_v3_2 v32;
121 	struct atom_umc_info_v3_3 v33;
122 };
123 
124 union vram_info {
125 	struct atom_vram_info_header_v2_3 v23;
126 	struct atom_vram_info_header_v2_4 v24;
127 	struct atom_vram_info_header_v2_5 v25;
128 	struct atom_vram_info_header_v2_6 v26;
129 };
130 
131 union vram_module {
132 	struct atom_vram_module_v9 v9;
133 	struct atom_vram_module_v10 v10;
134 	struct atom_vram_module_v11 v11;
135 };
136 
137 static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
138 					      int atom_mem_type)
139 {
140 	int vram_type;
141 
142 	if (adev->flags & AMD_IS_APU) {
143 		switch (atom_mem_type) {
144 		case Ddr2MemType:
145 		case LpDdr2MemType:
146 			vram_type = AMDGPU_VRAM_TYPE_DDR2;
147 			break;
148 		case Ddr3MemType:
149 		case LpDdr3MemType:
150 			vram_type = AMDGPU_VRAM_TYPE_DDR3;
151 			break;
152 		case Ddr4MemType:
153 		case LpDdr4MemType:
154 			vram_type = AMDGPU_VRAM_TYPE_DDR4;
155 			break;
156 		case Ddr5MemType:
157 		case LpDdr5MemType:
158 			vram_type = AMDGPU_VRAM_TYPE_DDR5;
159 			break;
160 		default:
161 			vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
162 			break;
163 		}
164 	} else {
165 		switch (atom_mem_type) {
166 		case ATOM_DGPU_VRAM_TYPE_GDDR5:
167 			vram_type = AMDGPU_VRAM_TYPE_GDDR5;
168 			break;
169 		case ATOM_DGPU_VRAM_TYPE_HBM2:
170 		case ATOM_DGPU_VRAM_TYPE_HBM2E:
171 			vram_type = AMDGPU_VRAM_TYPE_HBM;
172 			break;
173 		case ATOM_DGPU_VRAM_TYPE_GDDR6:
174 			vram_type = AMDGPU_VRAM_TYPE_GDDR6;
175 			break;
176 		default:
177 			vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
178 			break;
179 		}
180 	}
181 
182 	return vram_type;
183 }
184 
185 
186 int
187 amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
188 				  int *vram_width, int *vram_type,
189 				  int *vram_vendor)
190 {
191 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
192 	int index, i = 0;
193 	u16 data_offset, size;
194 	union igp_info *igp_info;
195 	union vram_info *vram_info;
196 	union vram_module *vram_module;
197 	u8 frev, crev;
198 	u8 mem_type;
199 	u8 mem_vendor;
200 	u32 mem_channel_number;
201 	u32 mem_channel_width;
202 	u32 module_id;
203 
204 	if (adev->flags & AMD_IS_APU)
205 		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
206 						    integratedsysteminfo);
207 	else
208 		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
209 						    vram_info);
210 
211 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
212 					  index, &size,
213 					  &frev, &crev, &data_offset)) {
214 		if (adev->flags & AMD_IS_APU) {
215 			igp_info = (union igp_info *)
216 				(mode_info->atom_context->bios + data_offset);
217 			switch (frev) {
218 			case 1:
219 				switch (crev) {
220 				case 11:
221 				case 12:
222 					mem_channel_number = igp_info->v11.umachannelnumber;
223 					if (!mem_channel_number)
224 						mem_channel_number = 1;
225 					/* channel width is 64 */
226 					if (vram_width)
227 						*vram_width = mem_channel_number * 64;
228 					mem_type = igp_info->v11.memorytype;
229 					if (vram_type)
230 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
231 					break;
232 				default:
233 					return -EINVAL;
234 				}
235 				break;
236 			case 2:
237 				switch (crev) {
238 				case 1:
239 				case 2:
240 					mem_channel_number = igp_info->v21.umachannelnumber;
241 					if (!mem_channel_number)
242 						mem_channel_number = 1;
243 					/* channel width is 64 */
244 					if (vram_width)
245 						*vram_width = mem_channel_number * 64;
246 					mem_type = igp_info->v21.memorytype;
247 					if (vram_type)
248 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
249 					break;
250 				default:
251 					return -EINVAL;
252 				}
253 				break;
254 			default:
255 				return -EINVAL;
256 			}
257 		} else {
258 			vram_info = (union vram_info *)
259 				(mode_info->atom_context->bios + data_offset);
260 			module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
261 			switch (crev) {
262 			case 3:
263 				if (module_id > vram_info->v23.vram_module_num)
264 					module_id = 0;
265 				vram_module = (union vram_module *)vram_info->v23.vram_module;
266 				while (i < module_id) {
267 					vram_module = (union vram_module *)
268 						((u8 *)vram_module + vram_module->v9.vram_module_size);
269 					i++;
270 				}
271 				mem_type = vram_module->v9.memory_type;
272 				if (vram_type)
273 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
274 				mem_channel_number = vram_module->v9.channel_num;
275 				mem_channel_width = vram_module->v9.channel_width;
276 				if (vram_width)
277 					*vram_width = mem_channel_number * (1 << mem_channel_width);
278 				mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
279 				if (vram_vendor)
280 					*vram_vendor = mem_vendor;
281 				break;
282 			case 4:
283 				if (module_id > vram_info->v24.vram_module_num)
284 					module_id = 0;
285 				vram_module = (union vram_module *)vram_info->v24.vram_module;
286 				while (i < module_id) {
287 					vram_module = (union vram_module *)
288 						((u8 *)vram_module + vram_module->v10.vram_module_size);
289 					i++;
290 				}
291 				mem_type = vram_module->v10.memory_type;
292 				if (vram_type)
293 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
294 				mem_channel_number = vram_module->v10.channel_num;
295 				mem_channel_width = vram_module->v10.channel_width;
296 				if (vram_width)
297 					*vram_width = mem_channel_number * (1 << mem_channel_width);
298 				mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
299 				if (vram_vendor)
300 					*vram_vendor = mem_vendor;
301 				break;
302 			case 5:
303 				if (module_id > vram_info->v25.vram_module_num)
304 					module_id = 0;
305 				vram_module = (union vram_module *)vram_info->v25.vram_module;
306 				while (i < module_id) {
307 					vram_module = (union vram_module *)
308 						((u8 *)vram_module + vram_module->v11.vram_module_size);
309 					i++;
310 				}
311 				mem_type = vram_module->v11.memory_type;
312 				if (vram_type)
313 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
314 				mem_channel_number = vram_module->v11.channel_num;
315 				mem_channel_width = vram_module->v11.channel_width;
316 				if (vram_width)
317 					*vram_width = mem_channel_number * (1 << mem_channel_width);
318 				mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
319 				if (vram_vendor)
320 					*vram_vendor = mem_vendor;
321 				break;
322 			case 6:
323 				if (module_id > vram_info->v26.vram_module_num)
324 					module_id = 0;
325 				vram_module = (union vram_module *)vram_info->v26.vram_module;
326 				while (i < module_id) {
327 					vram_module = (union vram_module *)
328 						((u8 *)vram_module + vram_module->v9.vram_module_size);
329 					i++;
330 				}
331 				mem_type = vram_module->v9.memory_type;
332 				if (vram_type)
333 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
334 				mem_channel_number = vram_module->v9.channel_num;
335 				mem_channel_width = vram_module->v9.channel_width;
336 				if (vram_width)
337 					*vram_width = mem_channel_number * (1 << mem_channel_width);
338 				mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
339 				if (vram_vendor)
340 					*vram_vendor = mem_vendor;
341 				break;
342 			default:
343 				return -EINVAL;
344 			}
345 		}
346 
347 	}
348 
349 	return 0;
350 }
351 
352 /*
353  * Return true if vbios enabled ecc by default, if umc info table is available
354  * or false if ecc is not enabled or umc info table is not available
355  */
356 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
357 {
358 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
359 	int index;
360 	u16 data_offset, size;
361 	union umc_info *umc_info;
362 	u8 frev, crev;
363 	bool ecc_default_enabled = false;
364 	u8 umc_config;
365 	u32 umc_config1;
366 
367 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
368 			umc_info);
369 
370 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
371 				index, &size, &frev, &crev, &data_offset)) {
372 		if (frev == 3) {
373 			umc_info = (union umc_info *)
374 				(mode_info->atom_context->bios + data_offset);
375 			switch (crev) {
376 			case 1:
377 				umc_config = le32_to_cpu(umc_info->v31.umc_config);
378 				ecc_default_enabled =
379 					(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
380 				break;
381 			case 2:
382 				umc_config = le32_to_cpu(umc_info->v32.umc_config);
383 				ecc_default_enabled =
384 					(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
385 				break;
386 			case 3:
387 				umc_config = le32_to_cpu(umc_info->v33.umc_config);
388 				umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
389 				ecc_default_enabled =
390 					((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
391 					 (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
392 				break;
393 			default:
394 				/* unsupported crev */
395 				return false;
396 			}
397 		}
398 	}
399 
400 	return ecc_default_enabled;
401 }
402 
403 union firmware_info {
404 	struct atom_firmware_info_v3_1 v31;
405 	struct atom_firmware_info_v3_2 v32;
406 	struct atom_firmware_info_v3_3 v33;
407 	struct atom_firmware_info_v3_4 v34;
408 };
409 
410 /*
411  * Return true if vbios supports sram ecc or false if not
412  */
413 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
414 {
415 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
416 	int index;
417 	u16 data_offset, size;
418 	union firmware_info *firmware_info;
419 	u8 frev, crev;
420 	bool sram_ecc_supported = false;
421 
422 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
423 			firmwareinfo);
424 
425 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
426 				index, &size, &frev, &crev, &data_offset)) {
427 		/* support firmware_info 3.1 + */
428 		if ((frev == 3 && crev >=1) || (frev > 3)) {
429 			firmware_info = (union firmware_info *)
430 				(mode_info->atom_context->bios + data_offset);
431 			sram_ecc_supported =
432 				(le32_to_cpu(firmware_info->v31.firmware_capability) &
433 				 ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
434 		}
435 	}
436 
437 	return sram_ecc_supported;
438 }
439 
440 union smu_info {
441 	struct atom_smu_info_v3_1 v31;
442 };
443 
444 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
445 {
446 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
447 	struct amdgpu_pll *spll = &adev->clock.spll;
448 	struct amdgpu_pll *mpll = &adev->clock.mpll;
449 	uint8_t frev, crev;
450 	uint16_t data_offset;
451 	int ret = -EINVAL, index;
452 
453 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
454 					    firmwareinfo);
455 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
456 				   &frev, &crev, &data_offset)) {
457 		union firmware_info *firmware_info =
458 			(union firmware_info *)(mode_info->atom_context->bios +
459 						data_offset);
460 
461 		adev->clock.default_sclk =
462 			le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
463 		adev->clock.default_mclk =
464 			le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
465 
466 		adev->pm.current_sclk = adev->clock.default_sclk;
467 		adev->pm.current_mclk = adev->clock.default_mclk;
468 
469 		/* not technically a clock, but... */
470 		adev->mode_info.firmware_flags =
471 			le32_to_cpu(firmware_info->v31.firmware_capability);
472 
473 		ret = 0;
474 	}
475 
476 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
477 					    smu_info);
478 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
479 				   &frev, &crev, &data_offset)) {
480 		union smu_info *smu_info =
481 			(union smu_info *)(mode_info->atom_context->bios +
482 					   data_offset);
483 
484 		/* system clock */
485 		spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
486 
487 		spll->reference_div = 0;
488 		spll->min_post_div = 1;
489 		spll->max_post_div = 1;
490 		spll->min_ref_div = 2;
491 		spll->max_ref_div = 0xff;
492 		spll->min_feedback_div = 4;
493 		spll->max_feedback_div = 0xff;
494 		spll->best_vco = 0;
495 
496 		ret = 0;
497 	}
498 
499 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
500 					    umc_info);
501 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
502 				   &frev, &crev, &data_offset)) {
503 		union umc_info *umc_info =
504 			(union umc_info *)(mode_info->atom_context->bios +
505 					   data_offset);
506 
507 		/* memory clock */
508 		mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
509 
510 		mpll->reference_div = 0;
511 		mpll->min_post_div = 1;
512 		mpll->max_post_div = 1;
513 		mpll->min_ref_div = 2;
514 		mpll->max_ref_div = 0xff;
515 		mpll->min_feedback_div = 4;
516 		mpll->max_feedback_div = 0xff;
517 		mpll->best_vco = 0;
518 
519 		ret = 0;
520 	}
521 
522 	return ret;
523 }
524 
525 union gfx_info {
526 	struct atom_gfx_info_v2_4 v24;
527 	struct atom_gfx_info_v2_7 v27;
528 };
529 
530 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
531 {
532 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
533 	int index;
534 	uint8_t frev, crev;
535 	uint16_t data_offset;
536 
537 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
538 					    gfx_info);
539 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
540 				   &frev, &crev, &data_offset)) {
541 		union gfx_info *gfx_info = (union gfx_info *)
542 			(mode_info->atom_context->bios + data_offset);
543 		switch (crev) {
544 		case 4:
545 			adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
546 			adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
547 			adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
548 			adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
549 			adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
550 			adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
551 			adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
552 			adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
553 			adev->gfx.config.gs_prim_buffer_depth =
554 				le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
555 			adev->gfx.config.double_offchip_lds_buf =
556 				gfx_info->v24.gc_double_offchip_lds_buffer;
557 			adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
558 			adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
559 			adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
560 			adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
561 			return 0;
562 		case 7:
563 			adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
564 			adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
565 			adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
566 			adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
567 			adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
568 			adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
569 			adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
570 			adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
571 			adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
572 			adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
573 			adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
574 			adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
575 			adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
576 			adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
577 			return 0;
578 		default:
579 			return -EINVAL;
580 		}
581 
582 	}
583 	return -EINVAL;
584 }
585 
586 /*
587  * Check if VBIOS supports GDDR6 training data save/restore
588  */
589 static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
590 {
591 	uint16_t data_offset;
592 	int index;
593 
594 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
595 					    firmwareinfo);
596 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
597 					  NULL, NULL, &data_offset)) {
598 		struct atom_firmware_info_v3_1 *firmware_info =
599 			(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
600 							   data_offset);
601 
602 		DRM_DEBUG("atom firmware capability:0x%08x.\n",
603 			  le32_to_cpu(firmware_info->firmware_capability));
604 
605 		if (le32_to_cpu(firmware_info->firmware_capability) &
606 		    ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
607 			return true;
608 	}
609 
610 	return false;
611 }
612 
613 int amdgpu_mem_train_support(struct amdgpu_device *adev)
614 {
615 	int ret;
616 	uint32_t major, minor, revision, hw_v;
617 
618 	if (gddr6_mem_train_vbios_support(adev)) {
619 		amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
620 		hw_v = HW_REV(major, minor, revision);
621 		/*
622 		 * treat 0 revision as a special case since register for MP0 and MMHUB is missing
623 		 * for some Navi10 A0, preventing driver from discovering the hwip information since
624 		 * none of the functions will be initialized, it should not cause any problems
625 		 */
626 		switch (hw_v) {
627 		case HW_REV(11, 0, 0):
628 		case HW_REV(11, 0, 5):
629 		case HW_REV(11, 0, 7):
630 		case HW_REV(11, 0, 11):
631 		case HW_REV(11, 0, 12):
632 			ret = 1;
633 			break;
634 		default:
635 			DRM_ERROR("memory training vbios supports but psp hw(%08x)"
636 				  " doesn't support!\n", hw_v);
637 			ret = -1;
638 			break;
639 		}
640 	} else {
641 		ret = 0;
642 		hw_v = -1;
643 	}
644 
645 
646 	DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
647 	return ret;
648 }
649 
650 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
651 {
652 	struct atom_context *ctx = adev->mode_info.atom_context;
653 	union firmware_info *firmware_info;
654 	int index;
655 	u16 data_offset, size;
656 	u8 frev, crev;
657 	int fw_reserved_fb_size;
658 
659 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
660 			firmwareinfo);
661 
662 	if (!amdgpu_atom_parse_data_header(ctx, index, &size,
663 				&frev, &crev, &data_offset))
664 		/* fail to parse data_header */
665 		return 0;
666 
667 	firmware_info = (union firmware_info *)(ctx->bios + data_offset);
668 
669 	if (frev !=3)
670 		return -EINVAL;
671 
672 	switch (crev) {
673 	case 4:
674 		fw_reserved_fb_size =
675 			(firmware_info->v34.fw_reserved_size_in_kb << 10);
676 		break;
677 	default:
678 		fw_reserved_fb_size = 0;
679 		break;
680 	}
681 
682 	return fw_reserved_fb_size;
683 }
684