1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <drm/amdgpu_drm.h>
25 #include "amdgpu.h"
26 #include "atomfirmware.h"
27 #include "amdgpu_atomfirmware.h"
28 #include "atom.h"
29 #include "atombios.h"
30 #include "soc15_hw_ip.h"
31 
32 bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
33 {
34 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
35 						firmwareinfo);
36 	uint16_t data_offset;
37 
38 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
39 					  NULL, NULL, &data_offset)) {
40 		struct atom_firmware_info_v3_1 *firmware_info =
41 			(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
42 							   data_offset);
43 
44 		if (le32_to_cpu(firmware_info->firmware_capability) &
45 		    ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION)
46 			return true;
47 	}
48 	return false;
49 }
50 
51 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
52 {
53 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
54 						firmwareinfo);
55 	uint16_t data_offset;
56 
57 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
58 					  NULL, NULL, &data_offset)) {
59 		struct atom_firmware_info_v3_1 *firmware_info =
60 			(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
61 							   data_offset);
62 
63 		adev->bios_scratch_reg_offset =
64 			le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
65 	}
66 }
67 
68 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
69 {
70 	struct atom_context *ctx = adev->mode_info.atom_context;
71 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
72 						vram_usagebyfirmware);
73 	struct vram_usagebyfirmware_v2_1 *firmware_usage;
74 	uint32_t start_addr, size;
75 	uint16_t data_offset;
76 	int usage_bytes = 0;
77 
78 	if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
79 		firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
80 		DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
81 			  le32_to_cpu(firmware_usage->start_address_in_kb),
82 			  le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
83 			  le16_to_cpu(firmware_usage->used_by_driver_in_kb));
84 
85 		start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
86 		size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
87 
88 		if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
89 			(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
90 			ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
91 			/* Firmware request VRAM reservation for SR-IOV */
92 			adev->mman.fw_vram_usage_start_offset = (start_addr &
93 				(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
94 			adev->mman.fw_vram_usage_size = size << 10;
95 			/* Use the default scratch size */
96 			usage_bytes = 0;
97 		} else {
98 			usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
99 		}
100 	}
101 	ctx->scratch_size_bytes = 0;
102 	if (usage_bytes == 0)
103 		usage_bytes = 20 * 1024;
104 	/* allocate some scratch memory */
105 	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
106 	if (!ctx->scratch)
107 		return -ENOMEM;
108 	ctx->scratch_size_bytes = usage_bytes;
109 	return 0;
110 }
111 
112 union igp_info {
113 	struct atom_integrated_system_info_v1_11 v11;
114 	struct atom_integrated_system_info_v1_12 v12;
115 };
116 
117 union umc_info {
118 	struct atom_umc_info_v3_1 v31;
119 };
120 
121 union vram_info {
122 	struct atom_vram_info_header_v2_3 v23;
123 	struct atom_vram_info_header_v2_4 v24;
124 	struct atom_vram_info_header_v2_5 v25;
125 };
126 
127 union vram_module {
128 	struct atom_vram_module_v9 v9;
129 	struct atom_vram_module_v10 v10;
130 	struct atom_vram_module_v11 v11;
131 };
132 
133 static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
134 					      int atom_mem_type)
135 {
136 	int vram_type;
137 
138 	if (adev->flags & AMD_IS_APU) {
139 		switch (atom_mem_type) {
140 		case Ddr2MemType:
141 		case LpDdr2MemType:
142 			vram_type = AMDGPU_VRAM_TYPE_DDR2;
143 			break;
144 		case Ddr3MemType:
145 		case LpDdr3MemType:
146 			vram_type = AMDGPU_VRAM_TYPE_DDR3;
147 			break;
148 		case Ddr4MemType:
149 		case LpDdr4MemType:
150 			vram_type = AMDGPU_VRAM_TYPE_DDR4;
151 			break;
152 		case Ddr5MemType:
153 		case LpDdr5MemType:
154 			vram_type = AMDGPU_VRAM_TYPE_DDR5;
155 			break;
156 		default:
157 			vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
158 			break;
159 		}
160 	} else {
161 		switch (atom_mem_type) {
162 		case ATOM_DGPU_VRAM_TYPE_GDDR5:
163 			vram_type = AMDGPU_VRAM_TYPE_GDDR5;
164 			break;
165 		case ATOM_DGPU_VRAM_TYPE_HBM2:
166 			vram_type = AMDGPU_VRAM_TYPE_HBM;
167 			break;
168 		case ATOM_DGPU_VRAM_TYPE_GDDR6:
169 			vram_type = AMDGPU_VRAM_TYPE_GDDR6;
170 			break;
171 		default:
172 			vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
173 			break;
174 		}
175 	}
176 
177 	return vram_type;
178 }
179 
180 
181 int
182 amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
183 				  int *vram_width, int *vram_type,
184 				  int *vram_vendor)
185 {
186 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
187 	int index, i = 0;
188 	u16 data_offset, size;
189 	union igp_info *igp_info;
190 	union vram_info *vram_info;
191 	union vram_module *vram_module;
192 	u8 frev, crev;
193 	u8 mem_type;
194 	u8 mem_vendor;
195 	u32 mem_channel_number;
196 	u32 mem_channel_width;
197 	u32 module_id;
198 
199 	if (adev->flags & AMD_IS_APU)
200 		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
201 						    integratedsysteminfo);
202 	else
203 		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
204 						    vram_info);
205 
206 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
207 					  index, &size,
208 					  &frev, &crev, &data_offset)) {
209 		if (adev->flags & AMD_IS_APU) {
210 			igp_info = (union igp_info *)
211 				(mode_info->atom_context->bios + data_offset);
212 			switch (crev) {
213 			case 11:
214 				mem_channel_number = igp_info->v11.umachannelnumber;
215 				/* channel width is 64 */
216 				if (vram_width)
217 					*vram_width = mem_channel_number * 64;
218 				mem_type = igp_info->v11.memorytype;
219 				if (vram_type)
220 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
221 				break;
222 			case 12:
223 				mem_channel_number = igp_info->v12.umachannelnumber;
224 				/* channel width is 64 */
225 				if (vram_width)
226 					*vram_width = mem_channel_number * 64;
227 				mem_type = igp_info->v12.memorytype;
228 				if (vram_type)
229 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
230 				break;
231 			default:
232 				return -EINVAL;
233 			}
234 		} else {
235 			vram_info = (union vram_info *)
236 				(mode_info->atom_context->bios + data_offset);
237 			module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
238 			switch (crev) {
239 			case 3:
240 				if (module_id > vram_info->v23.vram_module_num)
241 					module_id = 0;
242 				vram_module = (union vram_module *)vram_info->v23.vram_module;
243 				while (i < module_id) {
244 					vram_module = (union vram_module *)
245 						((u8 *)vram_module + vram_module->v9.vram_module_size);
246 					i++;
247 				}
248 				mem_type = vram_module->v9.memory_type;
249 				if (vram_type)
250 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
251 				mem_channel_number = vram_module->v9.channel_num;
252 				mem_channel_width = vram_module->v9.channel_width;
253 				if (vram_width)
254 					*vram_width = mem_channel_number * (1 << mem_channel_width);
255 				mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
256 				if (vram_vendor)
257 					*vram_vendor = mem_vendor;
258 				break;
259 			case 4:
260 				if (module_id > vram_info->v24.vram_module_num)
261 					module_id = 0;
262 				vram_module = (union vram_module *)vram_info->v24.vram_module;
263 				while (i < module_id) {
264 					vram_module = (union vram_module *)
265 						((u8 *)vram_module + vram_module->v10.vram_module_size);
266 					i++;
267 				}
268 				mem_type = vram_module->v10.memory_type;
269 				if (vram_type)
270 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
271 				mem_channel_number = vram_module->v10.channel_num;
272 				mem_channel_width = vram_module->v10.channel_width;
273 				if (vram_width)
274 					*vram_width = mem_channel_number * (1 << mem_channel_width);
275 				mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
276 				if (vram_vendor)
277 					*vram_vendor = mem_vendor;
278 				break;
279 			case 5:
280 				if (module_id > vram_info->v25.vram_module_num)
281 					module_id = 0;
282 				vram_module = (union vram_module *)vram_info->v25.vram_module;
283 				while (i < module_id) {
284 					vram_module = (union vram_module *)
285 						((u8 *)vram_module + vram_module->v11.vram_module_size);
286 					i++;
287 				}
288 				mem_type = vram_module->v11.memory_type;
289 				if (vram_type)
290 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
291 				mem_channel_number = vram_module->v11.channel_num;
292 				mem_channel_width = vram_module->v11.channel_width;
293 				if (vram_width)
294 					*vram_width = mem_channel_number * (1 << mem_channel_width);
295 				mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
296 				if (vram_vendor)
297 					*vram_vendor = mem_vendor;
298 				break;
299 			default:
300 				return -EINVAL;
301 			}
302 		}
303 
304 	}
305 
306 	return 0;
307 }
308 
309 /*
310  * Return true if vbios enabled ecc by default, if umc info table is available
311  * or false if ecc is not enabled or umc info table is not available
312  */
313 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
314 {
315 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
316 	int index;
317 	u16 data_offset, size;
318 	union umc_info *umc_info;
319 	u8 frev, crev;
320 	bool ecc_default_enabled = false;
321 
322 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
323 			umc_info);
324 
325 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
326 				index, &size, &frev, &crev, &data_offset)) {
327 		/* support umc_info 3.1+ */
328 		if ((frev == 3 && crev >= 1) || (frev > 3)) {
329 			umc_info = (union umc_info *)
330 				(mode_info->atom_context->bios + data_offset);
331 			ecc_default_enabled =
332 				(le32_to_cpu(umc_info->v31.umc_config) &
333 				 UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
334 		}
335 	}
336 
337 	return ecc_default_enabled;
338 }
339 
340 union firmware_info {
341 	struct atom_firmware_info_v3_1 v31;
342 	struct atom_firmware_info_v3_2 v32;
343 	struct atom_firmware_info_v3_3 v33;
344 	struct atom_firmware_info_v3_4 v34;
345 };
346 
347 /*
348  * Return true if vbios supports sram ecc or false if not
349  */
350 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
351 {
352 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
353 	int index;
354 	u16 data_offset, size;
355 	union firmware_info *firmware_info;
356 	u8 frev, crev;
357 	bool sram_ecc_supported = false;
358 
359 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
360 			firmwareinfo);
361 
362 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
363 				index, &size, &frev, &crev, &data_offset)) {
364 		/* support firmware_info 3.1 + */
365 		if ((frev == 3 && crev >=1) || (frev > 3)) {
366 			firmware_info = (union firmware_info *)
367 				(mode_info->atom_context->bios + data_offset);
368 			sram_ecc_supported =
369 				(le32_to_cpu(firmware_info->v31.firmware_capability) &
370 				 ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
371 		}
372 	}
373 
374 	return sram_ecc_supported;
375 }
376 
377 union smu_info {
378 	struct atom_smu_info_v3_1 v31;
379 };
380 
381 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
382 {
383 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
384 	struct amdgpu_pll *spll = &adev->clock.spll;
385 	struct amdgpu_pll *mpll = &adev->clock.mpll;
386 	uint8_t frev, crev;
387 	uint16_t data_offset;
388 	int ret = -EINVAL, index;
389 
390 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
391 					    firmwareinfo);
392 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
393 				   &frev, &crev, &data_offset)) {
394 		union firmware_info *firmware_info =
395 			(union firmware_info *)(mode_info->atom_context->bios +
396 						data_offset);
397 
398 		adev->clock.default_sclk =
399 			le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
400 		adev->clock.default_mclk =
401 			le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
402 
403 		adev->pm.current_sclk = adev->clock.default_sclk;
404 		adev->pm.current_mclk = adev->clock.default_mclk;
405 
406 		/* not technically a clock, but... */
407 		adev->mode_info.firmware_flags =
408 			le32_to_cpu(firmware_info->v31.firmware_capability);
409 
410 		ret = 0;
411 	}
412 
413 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
414 					    smu_info);
415 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
416 				   &frev, &crev, &data_offset)) {
417 		union smu_info *smu_info =
418 			(union smu_info *)(mode_info->atom_context->bios +
419 					   data_offset);
420 
421 		/* system clock */
422 		spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
423 
424 		spll->reference_div = 0;
425 		spll->min_post_div = 1;
426 		spll->max_post_div = 1;
427 		spll->min_ref_div = 2;
428 		spll->max_ref_div = 0xff;
429 		spll->min_feedback_div = 4;
430 		spll->max_feedback_div = 0xff;
431 		spll->best_vco = 0;
432 
433 		ret = 0;
434 	}
435 
436 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
437 					    umc_info);
438 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
439 				   &frev, &crev, &data_offset)) {
440 		union umc_info *umc_info =
441 			(union umc_info *)(mode_info->atom_context->bios +
442 					   data_offset);
443 
444 		/* memory clock */
445 		mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
446 
447 		mpll->reference_div = 0;
448 		mpll->min_post_div = 1;
449 		mpll->max_post_div = 1;
450 		mpll->min_ref_div = 2;
451 		mpll->max_ref_div = 0xff;
452 		mpll->min_feedback_div = 4;
453 		mpll->max_feedback_div = 0xff;
454 		mpll->best_vco = 0;
455 
456 		ret = 0;
457 	}
458 
459 	return ret;
460 }
461 
462 union gfx_info {
463 	struct  atom_gfx_info_v2_4 v24;
464 };
465 
466 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
467 {
468 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
469 	int index;
470 	uint8_t frev, crev;
471 	uint16_t data_offset;
472 
473 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
474 					    gfx_info);
475 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
476 				   &frev, &crev, &data_offset)) {
477 		union gfx_info *gfx_info = (union gfx_info *)
478 			(mode_info->atom_context->bios + data_offset);
479 		switch (crev) {
480 		case 4:
481 			adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
482 			adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
483 			adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
484 			adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
485 			adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
486 			adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
487 			adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
488 			adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
489 			adev->gfx.config.gs_prim_buffer_depth =
490 				le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
491 			adev->gfx.config.double_offchip_lds_buf =
492 				gfx_info->v24.gc_double_offchip_lds_buffer;
493 			adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
494 			adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
495 			adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
496 			adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
497 			return 0;
498 		default:
499 			return -EINVAL;
500 		}
501 
502 	}
503 	return -EINVAL;
504 }
505 
506 /*
507  * Check if VBIOS supports GDDR6 training data save/restore
508  */
509 static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
510 {
511 	uint16_t data_offset;
512 	int index;
513 
514 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
515 					    firmwareinfo);
516 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
517 					  NULL, NULL, &data_offset)) {
518 		struct atom_firmware_info_v3_1 *firmware_info =
519 			(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
520 							   data_offset);
521 
522 		DRM_DEBUG("atom firmware capability:0x%08x.\n",
523 			  le32_to_cpu(firmware_info->firmware_capability));
524 
525 		if (le32_to_cpu(firmware_info->firmware_capability) &
526 		    ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
527 			return true;
528 	}
529 
530 	return false;
531 }
532 
533 int amdgpu_mem_train_support(struct amdgpu_device *adev)
534 {
535 	int ret;
536 	uint32_t major, minor, revision, hw_v;
537 
538 	if (gddr6_mem_train_vbios_support(adev)) {
539 		amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
540 		hw_v = HW_REV(major, minor, revision);
541 		/*
542 		 * treat 0 revision as a special case since register for MP0 and MMHUB is missing
543 		 * for some Navi10 A0, preventing driver from discovering the hwip information since
544 		 * none of the functions will be initialized, it should not cause any problems
545 		 */
546 		switch (hw_v) {
547 		case HW_REV(11, 0, 0):
548 		case HW_REV(11, 0, 5):
549 		case HW_REV(11, 0, 7):
550 		case HW_REV(11, 0, 11):
551 		case HW_REV(11, 0, 12):
552 			ret = 1;
553 			break;
554 		default:
555 			DRM_ERROR("memory training vbios supports but psp hw(%08x)"
556 				  " doesn't support!\n", hw_v);
557 			ret = -1;
558 			break;
559 		}
560 	} else {
561 		ret = 0;
562 		hw_v = -1;
563 	}
564 
565 
566 	DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
567 	return ret;
568 }
569 
570 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
571 {
572 	struct atom_context *ctx = adev->mode_info.atom_context;
573 	union firmware_info *firmware_info;
574 	int index;
575 	u16 data_offset, size;
576 	u8 frev, crev;
577 	int fw_reserved_fb_size;
578 
579 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
580 			firmwareinfo);
581 
582 	if (!amdgpu_atom_parse_data_header(ctx, index, &size,
583 				&frev, &crev, &data_offset))
584 		/* fail to parse data_header */
585 		return 0;
586 
587 	firmware_info = (union firmware_info *)(ctx->bios + data_offset);
588 
589 	if (frev !=3)
590 		return -EINVAL;
591 
592 	switch (crev) {
593 	case 4:
594 		fw_reserved_fb_size =
595 			(firmware_info->v34.fw_reserved_size_in_kb << 10);
596 		break;
597 	default:
598 		fw_reserved_fb_size = 0;
599 		break;
600 	}
601 
602 	return fw_reserved_fb_size;
603 }
604