1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include "atom.h"
28 #include "ppatomctrl.h"
29 #include "atombios.h"
30 #include "cgs_common.h"
31 #include "ppevvmath.h"
32 
33 #define MEM_ID_MASK           0xff000000
34 #define MEM_ID_SHIFT          24
35 #define CLOCK_RANGE_MASK      0x00ffffff
36 #define CLOCK_RANGE_SHIFT     0
37 #define LOW_NIBBLE_MASK       0xf
38 #define DATA_EQU_PREV         0
39 #define DATA_FROM_TABLE       4
40 
41 union voltage_object_info {
42 	struct _ATOM_VOLTAGE_OBJECT_INFO v1;
43 	struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
44 	struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
45 };
46 
47 static int atomctrl_retrieve_ac_timing(
48 		uint8_t index,
49 		ATOM_INIT_REG_BLOCK *reg_block,
50 		pp_atomctrl_mc_reg_table *table)
51 {
52 	uint32_t i, j;
53 	uint8_t tmem_id;
54 	ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
55 		((uint8_t *)reg_block + (2 * sizeof(uint16_t)) + le16_to_cpu(reg_block->usRegIndexTblSize));
56 
57 	uint8_t num_ranges = 0;
58 
59 	while (*(uint32_t *)reg_data != END_OF_REG_DATA_BLOCK &&
60 			num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES) {
61 		tmem_id = (uint8_t)((*(uint32_t *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
62 
63 		if (index == tmem_id) {
64 			table->mc_reg_table_entry[num_ranges].mclk_max =
65 				(uint32_t)((*(uint32_t *)reg_data & CLOCK_RANGE_MASK) >>
66 						CLOCK_RANGE_SHIFT);
67 
68 			for (i = 0, j = 1; i < table->last; i++) {
69 				if ((table->mc_reg_address[i].uc_pre_reg_data &
70 							LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
71 					table->mc_reg_table_entry[num_ranges].mc_data[i] =
72 						(uint32_t)*((uint32_t *)reg_data + j);
73 					j++;
74 				} else if ((table->mc_reg_address[i].uc_pre_reg_data &
75 							LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
76 					table->mc_reg_table_entry[num_ranges].mc_data[i] =
77 						table->mc_reg_table_entry[num_ranges].mc_data[i-1];
78 				}
79 			}
80 			num_ranges++;
81 		}
82 
83 		reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
84 			((uint8_t *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)) ;
85 	}
86 
87 	PP_ASSERT_WITH_CODE((*(uint32_t *)reg_data == END_OF_REG_DATA_BLOCK),
88 			"Invalid VramInfo table.", return -1);
89 	table->num_entries = num_ranges;
90 
91 	return 0;
92 }
93 
94 /**
95  * Get memory clock AC timing registers index from VBIOS table
96  * VBIOS set end of memory clock AC timing registers by ucPreRegDataLength bit6 = 1
97  * @param    reg_block the address ATOM_INIT_REG_BLOCK
98  * @param    table the address of MCRegTable
99  * @return   0
100  */
101 static int atomctrl_set_mc_reg_address_table(
102 		ATOM_INIT_REG_BLOCK *reg_block,
103 		pp_atomctrl_mc_reg_table *table)
104 {
105 	uint8_t i = 0;
106 	uint8_t num_entries = (uint8_t)((le16_to_cpu(reg_block->usRegIndexTblSize))
107 			/ sizeof(ATOM_INIT_REG_INDEX_FORMAT));
108 	ATOM_INIT_REG_INDEX_FORMAT *format = &reg_block->asRegIndexBuf[0];
109 
110 	num_entries--;        /* subtract 1 data end mark entry */
111 
112 	PP_ASSERT_WITH_CODE((num_entries <= VBIOS_MC_REGISTER_ARRAY_SIZE),
113 			"Invalid VramInfo table.", return -1);
114 
115 	/* ucPreRegDataLength bit6 = 1 is the end of memory clock AC timing registers */
116 	while ((!(format->ucPreRegDataLength & ACCESS_PLACEHOLDER)) &&
117 			(i < num_entries)) {
118 		table->mc_reg_address[i].s1 =
119 			(uint16_t)(le16_to_cpu(format->usRegIndex));
120 		table->mc_reg_address[i].uc_pre_reg_data =
121 			format->ucPreRegDataLength;
122 
123 		i++;
124 		format = (ATOM_INIT_REG_INDEX_FORMAT *)
125 			((uint8_t *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
126 	}
127 
128 	table->last = i;
129 	return 0;
130 }
131 
132 int atomctrl_initialize_mc_reg_table(
133 		struct pp_hwmgr *hwmgr,
134 		uint8_t module_index,
135 		pp_atomctrl_mc_reg_table *table)
136 {
137 	ATOM_VRAM_INFO_HEADER_V2_1 *vram_info;
138 	ATOM_INIT_REG_BLOCK *reg_block;
139 	int result = 0;
140 	u8 frev, crev;
141 	u16 size;
142 
143 	vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
144 		smu_atom_get_data_table(hwmgr->adev,
145 				GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
146 
147 	if (module_index >= vram_info->ucNumOfVRAMModule) {
148 		pr_err("Invalid VramInfo table.");
149 		result = -1;
150 	} else if (vram_info->sHeader.ucTableFormatRevision < 2) {
151 		pr_err("Invalid VramInfo table.");
152 		result = -1;
153 	}
154 
155 	if (0 == result) {
156 		reg_block = (ATOM_INIT_REG_BLOCK *)
157 			((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset));
158 		result = atomctrl_set_mc_reg_address_table(reg_block, table);
159 	}
160 
161 	if (0 == result) {
162 		result = atomctrl_retrieve_ac_timing(module_index,
163 					reg_block, table);
164 	}
165 
166 	return result;
167 }
168 
169 int atomctrl_initialize_mc_reg_table_v2_2(
170 		struct pp_hwmgr *hwmgr,
171 		uint8_t module_index,
172 		pp_atomctrl_mc_reg_table *table)
173 {
174 	ATOM_VRAM_INFO_HEADER_V2_2 *vram_info;
175 	ATOM_INIT_REG_BLOCK *reg_block;
176 	int result = 0;
177 	u8 frev, crev;
178 	u16 size;
179 
180 	vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *)
181 		smu_atom_get_data_table(hwmgr->adev,
182 				GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
183 
184 	if (module_index >= vram_info->ucNumOfVRAMModule) {
185 		pr_err("Invalid VramInfo table.");
186 		result = -1;
187 	} else if (vram_info->sHeader.ucTableFormatRevision < 2) {
188 		pr_err("Invalid VramInfo table.");
189 		result = -1;
190 	}
191 
192 	if (0 == result) {
193 		reg_block = (ATOM_INIT_REG_BLOCK *)
194 			((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset));
195 		result = atomctrl_set_mc_reg_address_table(reg_block, table);
196 	}
197 
198 	if (0 == result) {
199 		result = atomctrl_retrieve_ac_timing(module_index,
200 					reg_block, table);
201 	}
202 
203 	return result;
204 }
205 
206 /**
207  * Set DRAM timings based on engine clock and memory clock.
208  */
209 int atomctrl_set_engine_dram_timings_rv770(
210 		struct pp_hwmgr *hwmgr,
211 		uint32_t engine_clock,
212 		uint32_t memory_clock)
213 {
214 	struct amdgpu_device *adev = hwmgr->adev;
215 
216 	SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters;
217 
218 	/* They are both in 10KHz Units. */
219 	engine_clock_parameters.ulTargetEngineClock =
220 		cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |
221 			    ((COMPUTE_ENGINE_PLL_PARAM << 24)));
222 
223 	/* in 10 khz units.*/
224 	engine_clock_parameters.sReserved.ulClock =
225 		cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
226 
227 	return amdgpu_atom_execute_table(adev->mode_info.atom_context,
228 			GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
229 			(uint32_t *)&engine_clock_parameters);
230 }
231 
232 /**
233  * Private Function to get the PowerPlay Table Address.
234  * WARNING: The tabled returned by this function is in
235  * dynamically allocated memory.
236  * The caller has to release if by calling kfree.
237  */
238 static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device)
239 {
240 	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
241 	u8 frev, crev;
242 	u16 size;
243 	union voltage_object_info *voltage_info;
244 
245 	voltage_info = (union voltage_object_info *)
246 		smu_atom_get_data_table(device, index,
247 			&size, &frev, &crev);
248 
249 	if (voltage_info != NULL)
250 		return (ATOM_VOLTAGE_OBJECT_INFO *) &(voltage_info->v3);
251 	else
252 		return NULL;
253 }
254 
255 static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3(
256 		const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table,
257 		uint8_t voltage_type, uint8_t voltage_mode)
258 {
259 	unsigned int size = le16_to_cpu(voltage_object_info_table->sHeader.usStructureSize);
260 	unsigned int offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
261 	uint8_t *start = (uint8_t *)voltage_object_info_table;
262 
263 	while (offset < size) {
264 		const ATOM_VOLTAGE_OBJECT_V3 *voltage_object =
265 			(const ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
266 
267 		if (voltage_type == voltage_object->asGpioVoltageObj.sHeader.ucVoltageType &&
268 			voltage_mode == voltage_object->asGpioVoltageObj.sHeader.ucVoltageMode)
269 			return voltage_object;
270 
271 		offset += le16_to_cpu(voltage_object->asGpioVoltageObj.sHeader.usSize);
272 	}
273 
274 	return NULL;
275 }
276 
277 /** atomctrl_get_memory_pll_dividers_si().
278  *
279  * @param hwmgr                 input parameter: pointer to HwMgr
280  * @param clock_value             input parameter: memory clock
281  * @param dividers                 output parameter: memory PLL dividers
282  * @param strobe_mode            input parameter: 1 for strobe mode,  0 for performance mode
283  */
284 int atomctrl_get_memory_pll_dividers_si(
285 		struct pp_hwmgr *hwmgr,
286 		uint32_t clock_value,
287 		pp_atomctrl_memory_clock_param *mpll_param,
288 		bool strobe_mode)
289 {
290 	struct amdgpu_device *adev = hwmgr->adev;
291 	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
292 	int result;
293 
294 	mpll_parameters.ulClock = cpu_to_le32(clock_value);
295 	mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
296 
297 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
298 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
299 		(uint32_t *)&mpll_parameters);
300 
301 	if (0 == result) {
302 		mpll_param->mpll_fb_divider.clk_frac =
303 			le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac);
304 		mpll_param->mpll_fb_divider.cl_kf =
305 			le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv);
306 		mpll_param->mpll_post_divider =
307 			(uint32_t)mpll_parameters.ucPostDiv;
308 		mpll_param->vco_mode =
309 			(uint32_t)(mpll_parameters.ucPllCntlFlag &
310 					MPLL_CNTL_FLAG_VCO_MODE_MASK);
311 		mpll_param->yclk_sel =
312 			(uint32_t)((mpll_parameters.ucPllCntlFlag &
313 						MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0);
314 		mpll_param->qdr =
315 			(uint32_t)((mpll_parameters.ucPllCntlFlag &
316 						MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0);
317 		mpll_param->half_rate =
318 			(uint32_t)((mpll_parameters.ucPllCntlFlag &
319 						MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0);
320 		mpll_param->dll_speed =
321 			(uint32_t)(mpll_parameters.ucDllSpeed);
322 		mpll_param->bw_ctrl =
323 			(uint32_t)(mpll_parameters.ucBWCntl);
324 	}
325 
326 	return result;
327 }
328 
329 /** atomctrl_get_memory_pll_dividers_vi().
330  *
331  * @param hwmgr                 input parameter: pointer to HwMgr
332  * @param clock_value             input parameter: memory clock
333  * @param dividers               output parameter: memory PLL dividers
334  */
335 int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
336 		uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
337 {
338 	struct amdgpu_device *adev = hwmgr->adev;
339 	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
340 	int result;
341 
342 	mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
343 
344 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
345 			GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
346 			(uint32_t *)&mpll_parameters);
347 
348 	if (!result)
349 		mpll_param->mpll_post_divider =
350 				(uint32_t)mpll_parameters.ulClock.ucPostDiv;
351 
352 	return result;
353 }
354 
355 int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
356 					uint32_t clock_value,
357 					pp_atomctrl_memory_clock_param_ai *mpll_param)
358 {
359 	struct amdgpu_device *adev = hwmgr->adev;
360 	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {{0}, 0, 0};
361 	int result;
362 
363 	mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
364 
365 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
366 			GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
367 			(uint32_t *)&mpll_parameters);
368 
369 	/* VEGAM's mpll takes sometime to finish computing */
370 	udelay(10);
371 
372 	if (!result) {
373 		mpll_param->ulMclk_fcw_int =
374 			le16_to_cpu(mpll_parameters.usMclk_fcw_int);
375 		mpll_param->ulMclk_fcw_frac =
376 			le16_to_cpu(mpll_parameters.usMclk_fcw_frac);
377 		mpll_param->ulClock =
378 			le32_to_cpu(mpll_parameters.ulClock.ulClock);
379 		mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv;
380 	}
381 
382 	return result;
383 }
384 
385 int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
386 					  uint32_t clock_value,
387 					  pp_atomctrl_clock_dividers_kong *dividers)
388 {
389 	struct amdgpu_device *adev = hwmgr->adev;
390 	COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
391 	int result;
392 
393 	pll_parameters.ulClock = cpu_to_le32(clock_value);
394 
395 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
396 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
397 		(uint32_t *)&pll_parameters);
398 
399 	if (0 == result) {
400 		dividers->pll_post_divider = pll_parameters.ucPostDiv;
401 		dividers->real_clock = le32_to_cpu(pll_parameters.ulClock);
402 	}
403 
404 	return result;
405 }
406 
407 int atomctrl_get_engine_pll_dividers_vi(
408 		struct pp_hwmgr *hwmgr,
409 		uint32_t clock_value,
410 		pp_atomctrl_clock_dividers_vi *dividers)
411 {
412 	struct amdgpu_device *adev = hwmgr->adev;
413 	COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
414 	int result;
415 
416 	pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
417 	pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
418 
419 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
420 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
421 		(uint32_t *)&pll_patameters);
422 
423 	if (0 == result) {
424 		dividers->pll_post_divider =
425 			pll_patameters.ulClock.ucPostDiv;
426 		dividers->real_clock =
427 			le32_to_cpu(pll_patameters.ulClock.ulClock);
428 
429 		dividers->ul_fb_div.ul_fb_div_frac =
430 			le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
431 		dividers->ul_fb_div.ul_fb_div =
432 			le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
433 
434 		dividers->uc_pll_ref_div =
435 			pll_patameters.ucPllRefDiv;
436 		dividers->uc_pll_post_div =
437 			pll_patameters.ucPllPostDiv;
438 		dividers->uc_pll_cntl_flag =
439 			pll_patameters.ucPllCntlFlag;
440 	}
441 
442 	return result;
443 }
444 
445 int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
446 		uint32_t clock_value,
447 		pp_atomctrl_clock_dividers_ai *dividers)
448 {
449 	struct amdgpu_device *adev = hwmgr->adev;
450 	COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
451 	int result;
452 
453 	pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
454 	pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
455 
456 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
457 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
458 		(uint32_t *)&pll_patameters);
459 
460 	if (0 == result) {
461 		dividers->usSclk_fcw_frac     = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
462 		dividers->usSclk_fcw_int      = le16_to_cpu(pll_patameters.usSclk_fcw_int);
463 		dividers->ucSclkPostDiv       = pll_patameters.ucSclkPostDiv;
464 		dividers->ucSclkVcoMode       = pll_patameters.ucSclkVcoMode;
465 		dividers->ucSclkPllRange      = pll_patameters.ucSclkPllRange;
466 		dividers->ucSscEnable         = pll_patameters.ucSscEnable;
467 		dividers->usSsc_fcw1_frac     = le16_to_cpu(pll_patameters.usSsc_fcw1_frac);
468 		dividers->usSsc_fcw1_int      = le16_to_cpu(pll_patameters.usSsc_fcw1_int);
469 		dividers->usPcc_fcw_int       = le16_to_cpu(pll_patameters.usPcc_fcw_int);
470 		dividers->usSsc_fcw_slew_frac = le16_to_cpu(pll_patameters.usSsc_fcw_slew_frac);
471 		dividers->usPcc_fcw_slew_frac = le16_to_cpu(pll_patameters.usPcc_fcw_slew_frac);
472 	}
473 	return result;
474 }
475 
476 int atomctrl_get_dfs_pll_dividers_vi(
477 		struct pp_hwmgr *hwmgr,
478 		uint32_t clock_value,
479 		pp_atomctrl_clock_dividers_vi *dividers)
480 {
481 	struct amdgpu_device *adev = hwmgr->adev;
482 	COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
483 	int result;
484 
485 	pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
486 	pll_patameters.ulClock.ucPostDiv =
487 		COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
488 
489 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
490 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
491 		(uint32_t *)&pll_patameters);
492 
493 	if (0 == result) {
494 		dividers->pll_post_divider =
495 			pll_patameters.ulClock.ucPostDiv;
496 		dividers->real_clock =
497 			le32_to_cpu(pll_patameters.ulClock.ulClock);
498 
499 		dividers->ul_fb_div.ul_fb_div_frac =
500 			le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
501 		dividers->ul_fb_div.ul_fb_div =
502 			le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
503 
504 		dividers->uc_pll_ref_div =
505 			pll_patameters.ucPllRefDiv;
506 		dividers->uc_pll_post_div =
507 			pll_patameters.ucPllPostDiv;
508 		dividers->uc_pll_cntl_flag =
509 			pll_patameters.ucPllCntlFlag;
510 	}
511 
512 	return result;
513 }
514 
515 /**
516  * Get the reference clock in 10KHz
517  */
518 uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
519 {
520 	ATOM_FIRMWARE_INFO *fw_info;
521 	u8 frev, crev;
522 	u16 size;
523 	uint32_t clock;
524 
525 	fw_info = (ATOM_FIRMWARE_INFO *)
526 		smu_atom_get_data_table(hwmgr->adev,
527 			GetIndexIntoMasterTable(DATA, FirmwareInfo),
528 			&size, &frev, &crev);
529 
530 	if (fw_info == NULL)
531 		clock = 2700;
532 	else
533 		clock = (uint32_t)(le16_to_cpu(fw_info->usReferenceClock));
534 
535 	return clock;
536 }
537 
538 /**
539  * Returns true if the given voltage type is controlled by GPIO pins.
540  * voltage_type is one of SET_VOLTAGE_TYPE_ASIC_VDDC,
541  * SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ.
542  * voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE
543  */
544 bool atomctrl_is_voltage_controlled_by_gpio_v3(
545 		struct pp_hwmgr *hwmgr,
546 		uint8_t voltage_type,
547 		uint8_t voltage_mode)
548 {
549 	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
550 		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
551 	bool ret;
552 
553 	PP_ASSERT_WITH_CODE((NULL != voltage_info),
554 			"Could not find Voltage Table in BIOS.", return false;);
555 
556 	ret = (NULL != atomctrl_lookup_voltage_type_v3
557 			(voltage_info, voltage_type, voltage_mode)) ? true : false;
558 
559 	return ret;
560 }
561 
562 int atomctrl_get_voltage_table_v3(
563 		struct pp_hwmgr *hwmgr,
564 		uint8_t voltage_type,
565 		uint8_t voltage_mode,
566 		pp_atomctrl_voltage_table *voltage_table)
567 {
568 	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
569 		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
570 	const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
571 	unsigned int i;
572 
573 	PP_ASSERT_WITH_CODE((NULL != voltage_info),
574 			"Could not find Voltage Table in BIOS.", return -1;);
575 
576 	voltage_object = atomctrl_lookup_voltage_type_v3
577 		(voltage_info, voltage_type, voltage_mode);
578 
579 	if (voltage_object == NULL)
580 		return -1;
581 
582 	PP_ASSERT_WITH_CODE(
583 			(voltage_object->asGpioVoltageObj.ucGpioEntryNum <=
584 			PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES),
585 			"Too many voltage entries!",
586 			return -1;
587 			);
588 
589 	for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) {
590 		voltage_table->entries[i].value =
591 			le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue);
592 		voltage_table->entries[i].smio_low =
593 			le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId);
594 	}
595 
596 	voltage_table->mask_low    =
597 		le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal);
598 	voltage_table->count      =
599 		voltage_object->asGpioVoltageObj.ucGpioEntryNum;
600 	voltage_table->phase_delay =
601 		voltage_object->asGpioVoltageObj.ucPhaseDelay;
602 
603 	return 0;
604 }
605 
606 static bool atomctrl_lookup_gpio_pin(
607 		ATOM_GPIO_PIN_LUT * gpio_lookup_table,
608 		const uint32_t pinId,
609 		pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
610 {
611 	unsigned int size = le16_to_cpu(gpio_lookup_table->sHeader.usStructureSize);
612 	unsigned int offset = offsetof(ATOM_GPIO_PIN_LUT, asGPIO_Pin[0]);
613 	uint8_t *start = (uint8_t *)gpio_lookup_table;
614 
615 	while (offset < size) {
616 		const ATOM_GPIO_PIN_ASSIGNMENT *pin_assignment =
617 			(const ATOM_GPIO_PIN_ASSIGNMENT *)(start + offset);
618 
619 		if (pinId == pin_assignment->ucGPIO_ID) {
620 			gpio_pin_assignment->uc_gpio_pin_bit_shift =
621 				pin_assignment->ucGpioPinBitShift;
622 			gpio_pin_assignment->us_gpio_pin_aindex =
623 				le16_to_cpu(pin_assignment->usGpioPin_AIndex);
624 			return true;
625 		}
626 
627 		offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
628 	}
629 
630 	return false;
631 }
632 
633 /**
634  * Private Function to get the PowerPlay Table Address.
635  * WARNING: The tabled returned by this function is in
636  * dynamically allocated memory.
637  * The caller has to release if by calling kfree.
638  */
639 static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device)
640 {
641 	u8 frev, crev;
642 	u16 size;
643 	void *table_address;
644 
645 	table_address = (ATOM_GPIO_PIN_LUT *)
646 		smu_atom_get_data_table(device,
647 				GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT),
648 				&size, &frev, &crev);
649 
650 	PP_ASSERT_WITH_CODE((NULL != table_address),
651 			"Error retrieving BIOS Table Address!", return NULL;);
652 
653 	return (ATOM_GPIO_PIN_LUT *)table_address;
654 }
655 
656 /**
657  * Returns 1 if the given pin id find in lookup table.
658  */
659 bool atomctrl_get_pp_assign_pin(
660 		struct pp_hwmgr *hwmgr,
661 		const uint32_t pinId,
662 		pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
663 {
664 	bool bRet = false;
665 	ATOM_GPIO_PIN_LUT *gpio_lookup_table =
666 		get_gpio_lookup_table(hwmgr->adev);
667 
668 	PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
669 			"Could not find GPIO lookup Table in BIOS.", return false);
670 
671 	bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
672 		gpio_pin_assignment);
673 
674 	return bRet;
675 }
676 
677 int atomctrl_calculate_voltage_evv_on_sclk(
678 		struct pp_hwmgr *hwmgr,
679 		uint8_t voltage_type,
680 		uint32_t sclk,
681 		uint16_t virtual_voltage_Id,
682 		uint16_t *voltage,
683 		uint16_t dpm_level,
684 		bool debug)
685 {
686 	ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo;
687 	struct amdgpu_device *adev = hwmgr->adev;
688 	EFUSE_LINEAR_FUNC_PARAM sRO_fuse;
689 	EFUSE_LINEAR_FUNC_PARAM sCACm_fuse;
690 	EFUSE_LINEAR_FUNC_PARAM sCACb_fuse;
691 	EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse;
692 	EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse;
693 	EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse;
694 	EFUSE_INPUT_PARAMETER sInput_FuseValues;
695 	READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues;
696 
697 	uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused;
698 	fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7;
699 	fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma;
700 	fInt fLkg_FT, repeat;
701 	fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX;
702 	fInt fRLL_LoadLine, fPowerDPMx, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin;
703 	fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM;
704 	fInt fSclk_margin, fSclk, fEVV_V;
705 	fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL;
706 	uint32_t ul_FT_Lkg_V0NORM;
707 	fInt fLn_MaxDivMin, fMin, fAverage, fRange;
708 	fInt fRoots[2];
709 	fInt fStepSize = GetScaledFraction(625, 100000);
710 
711 	int result;
712 
713 	getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *)
714 			smu_atom_get_data_table(hwmgr->adev,
715 					GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
716 					NULL, NULL, NULL);
717 
718 	if (!getASICProfilingInfo)
719 		return -1;
720 
721 	if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
722 	    (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
723 	     getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
724 		return -1;
725 
726 	/*-----------------------------------------------------------
727 	 *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL
728 	 *-----------------------------------------------------------
729 	 */
730 	fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000);
731 
732 	switch (dpm_level) {
733 	case 1:
734 		fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm1));
735 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000);
736 		break;
737 	case 2:
738 		fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm2));
739 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000);
740 		break;
741 	case 3:
742 		fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm3));
743 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000);
744 		break;
745 	case 4:
746 		fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm4));
747 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000);
748 		break;
749 	case 5:
750 		fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm5));
751 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000);
752 		break;
753 	case 6:
754 		fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm6));
755 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000);
756 		break;
757 	case 7:
758 		fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm7));
759 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
760 		break;
761 	default:
762 		pr_err("DPM Level not supported\n");
763 		fPowerDPMx = Convert_ULONG_ToFraction(1);
764 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
765 	}
766 
767 	/*-------------------------
768 	 * DECODING FUSE VALUES
769 	 * ------------------------
770 	 */
771 	/*Decode RO_Fused*/
772 	sRO_fuse = getASICProfilingInfo->sRoFuse;
773 
774 	sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex;
775 	sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB;
776 	sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength;
777 
778 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
779 
780 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
781 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
782 			(uint32_t *)&sOutput_FuseValues);
783 
784 	if (result)
785 		return result;
786 
787 	/* Finally, the actual fuse value */
788 	ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
789 	fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1);
790 	fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1);
791 	fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength);
792 
793 	sCACm_fuse = getASICProfilingInfo->sCACm;
794 
795 	sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex;
796 	sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB;
797 	sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength;
798 
799 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
800 
801 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
802 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
803 			(uint32_t *)&sOutput_FuseValues);
804 
805 	if (result)
806 		return result;
807 
808 	ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
809 	fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000);
810 	fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000);
811 
812 	fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength);
813 
814 	sCACb_fuse = getASICProfilingInfo->sCACb;
815 
816 	sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex;
817 	sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB;
818 	sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength;
819 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
820 
821 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
822 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
823 			(uint32_t *)&sOutput_FuseValues);
824 
825 	if (result)
826 		return result;
827 
828 	ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
829 	fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000);
830 	fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000);
831 
832 	fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength);
833 
834 	sKt_Beta_fuse = getASICProfilingInfo->sKt_b;
835 
836 	sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex;
837 	sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB;
838 	sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength;
839 
840 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
841 
842 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
843 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
844 			(uint32_t *)&sOutput_FuseValues);
845 
846 	if (result)
847 		return result;
848 
849 	ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
850 	fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000);
851 	fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000);
852 
853 	fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused,
854 			fAverage, fRange, sKt_Beta_fuse.ucEfuseLength);
855 
856 	sKv_m_fuse = getASICProfilingInfo->sKv_m;
857 
858 	sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex;
859 	sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB;
860 	sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength;
861 
862 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
863 
864 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
865 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
866 			(uint32_t *)&sOutput_FuseValues);
867 	if (result)
868 		return result;
869 
870 	ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
871 	fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000);
872 	fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000);
873 	fRange = fMultiply(fRange, ConvertToFraction(-1));
874 
875 	fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused,
876 			fAverage, fRange, sKv_m_fuse.ucEfuseLength);
877 
878 	sKv_b_fuse = getASICProfilingInfo->sKv_b;
879 
880 	sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex;
881 	sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB;
882 	sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength;
883 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
884 
885 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
886 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
887 			(uint32_t *)&sOutput_FuseValues);
888 
889 	if (result)
890 		return result;
891 
892 	ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
893 	fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000);
894 	fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000);
895 
896 	fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused,
897 			fAverage, fRange, sKv_b_fuse.ucEfuseLength);
898 
899 	/* Decoding the Leakage - No special struct container */
900 	/*
901 	 * usLkgEuseIndex=56
902 	 * ucLkgEfuseBitLSB=6
903 	 * ucLkgEfuseLength=10
904 	 * ulLkgEncodeLn_MaxDivMin=69077
905 	 * ulLkgEncodeMax=1000000
906 	 * ulLkgEncodeMin=1000
907 	 * ulEfuseLogisticAlpha=13
908 	 */
909 
910 	sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex;
911 	sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB;
912 	sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength;
913 
914 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
915 
916 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
917 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
918 			(uint32_t *)&sOutput_FuseValues);
919 
920 	if (result)
921 		return result;
922 
923 	ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
924 	fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000);
925 	fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000);
926 
927 	fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM,
928 			fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength);
929 	fLkg_FT = fFT_Lkg_V0NORM;
930 
931 	/*-------------------------------------------
932 	 * PART 2 - Grabbing all required values
933 	 *-------------------------------------------
934 	 */
935 	fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000),
936 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign)));
937 	fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000),
938 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign)));
939 	fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000),
940 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign)));
941 	fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000),
942 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign)));
943 	fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000),
944 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign)));
945 	fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000),
946 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign)));
947 	fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000),
948 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign)));
949 	fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000),
950 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign)));
951 
952 	fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a));
953 	fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b));
954 	fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c));
955 
956 	fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed));
957 
958 	fMargin_FMAX_mean = GetScaledFraction(
959 		le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000);
960 	fMargin_Plat_mean = GetScaledFraction(
961 		le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000);
962 	fMargin_FMAX_sigma = GetScaledFraction(
963 		le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000);
964 	fMargin_Plat_sigma = GetScaledFraction(
965 		le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000);
966 
967 	fMargin_DC_sigma = GetScaledFraction(
968 		le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100);
969 	fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000));
970 
971 	fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100));
972 	fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100));
973 	fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100));
974 	fKv_m_fused =  fNegate(fDivide(fKv_m_fused, ConvertToFraction(100)));
975 	fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10));
976 
977 	fSclk = GetScaledFraction(sclk, 100);
978 
979 	fV_max = fDivide(GetScaledFraction(
980 				 le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4));
981 	fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10);
982 	fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100);
983 	fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10);
984 	fV_FT = fDivide(GetScaledFraction(
985 				le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4));
986 	fV_min = fDivide(GetScaledFraction(
987 				 le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4));
988 
989 	/*-----------------------
990 	 * PART 3
991 	 *-----------------------
992 	 */
993 
994 	fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
995 	fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
996 	fC_Term = fAdd(fMargin_RO_c,
997 			fAdd(fMultiply(fSM_A0, fLkg_FT),
998 			fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
999 			fAdd(fMultiply(fSM_A3, fSclk),
1000 			fSubtract(fSM_A7, fRO_fused)))));
1001 
1002 	fVDDC_base = fSubtract(fRO_fused,
1003 			fSubtract(fMargin_RO_c,
1004 					fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk))));
1005 	fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2));
1006 
1007 	repeat = fSubtract(fVDDC_base,
1008 			fDivide(fMargin_DC_sigma, ConvertToFraction(1000)));
1009 
1010 	fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a,
1011 			fGetSquare(repeat)),
1012 			fAdd(fMultiply(fMargin_RO_b, repeat),
1013 			fMargin_RO_c));
1014 
1015 	fDC_SCLK = fSubtract(fRO_fused,
1016 			fSubtract(fRO_DC_margin,
1017 			fSubtract(fSM_A3,
1018 			fMultiply(fSM_A2, repeat))));
1019 	fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1));
1020 
1021 	fSigma_DC = fSubtract(fSclk, fDC_SCLK);
1022 
1023 	fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean);
1024 	fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean);
1025 	fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma);
1026 	fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma);
1027 
1028 	fSquared_Sigma_DC = fGetSquare(fSigma_DC);
1029 	fSquared_Sigma_CR = fGetSquare(fSigma_CR);
1030 	fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX);
1031 
1032 	fSclk_margin = fAdd(fMicro_FMAX,
1033 			fAdd(fMicro_CR,
1034 			fAdd(fMargin_fixed,
1035 			fSqrt(fAdd(fSquared_Sigma_FMAX,
1036 			fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR))))));
1037 	/*
1038 	 fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5;
1039 	 fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6;
1040 	 fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused;
1041 	 */
1042 
1043 	fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5);
1044 	fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6);
1045 	fC_Term = fAdd(fRO_DC_margin,
1046 			fAdd(fMultiply(fSM_A0, fLkg_FT),
1047 			fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT),
1048 			fAdd(fSclk, fSclk_margin)),
1049 			fAdd(fMultiply(fSM_A3,
1050 			fAdd(fSclk, fSclk_margin)),
1051 			fSubtract(fSM_A7, fRO_fused)))));
1052 
1053 	SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots);
1054 
1055 	if (GreaterThan(fRoots[0], fRoots[1]))
1056 		fEVV_V = fRoots[1];
1057 	else
1058 		fEVV_V = fRoots[0];
1059 
1060 	if (GreaterThan(fV_min, fEVV_V))
1061 		fEVV_V = fV_min;
1062 	else if (GreaterThan(fEVV_V, fV_max))
1063 		fEVV_V = fSubtract(fV_max, fStepSize);
1064 
1065 	fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0);
1066 
1067 	/*-----------------
1068 	 * PART 4
1069 	 *-----------------
1070 	 */
1071 
1072 	fV_x = fV_min;
1073 
1074 	while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) {
1075 		fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd(
1076 				fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk),
1077 				fGetSquare(fV_x)), fDerateTDP);
1078 
1079 		fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor,
1080 				fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused,
1081 				fT_prod), fKv_b_fused), fV_x)), fV_x)));
1082 		fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply(
1083 				fKt_Beta_fused, fT_prod)));
1084 		fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
1085 				fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT)));
1086 		fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
1087 				fKt_Beta_fused, fT_FT)));
1088 
1089 		fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right);
1090 
1091 		fTDP_Current = fDivide(fTDP_Power, fV_x);
1092 
1093 		fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine),
1094 				ConvertToFraction(10)));
1095 
1096 		fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0);
1097 
1098 		if (GreaterThan(fV_max, fV_NL) &&
1099 			(GreaterThan(fV_NL, fEVV_V) ||
1100 			Equal(fV_NL, fEVV_V))) {
1101 			fV_NL = fMultiply(fV_NL, ConvertToFraction(1000));
1102 
1103 			*voltage = (uint16_t)fV_NL.partial.real;
1104 			break;
1105 		} else
1106 			fV_x = fAdd(fV_x, fStepSize);
1107 	}
1108 
1109 	return result;
1110 }
1111 
1112 /** atomctrl_get_voltage_evv_on_sclk gets voltage via call to ATOM COMMAND table.
1113  * @param hwmgr	input: pointer to hwManager
1114  * @param voltage_type            input: type of EVV voltage VDDC or VDDGFX
1115  * @param sclk                        input: in 10Khz unit. DPM state SCLK frequency
1116  *		which is define in PPTable SCLK/VDDC dependence
1117  *				table associated with this virtual_voltage_Id
1118  * @param virtual_voltage_Id      input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
1119  * @param voltage		       output: real voltage level in unit of mv
1120  */
1121 int atomctrl_get_voltage_evv_on_sclk(
1122 		struct pp_hwmgr *hwmgr,
1123 		uint8_t voltage_type,
1124 		uint32_t sclk, uint16_t virtual_voltage_Id,
1125 		uint16_t *voltage)
1126 {
1127 	struct amdgpu_device *adev = hwmgr->adev;
1128 	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1129 	int result;
1130 
1131 	get_voltage_info_param_space.ucVoltageType   =
1132 		voltage_type;
1133 	get_voltage_info_param_space.ucVoltageMode   =
1134 		ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1135 	get_voltage_info_param_space.usVoltageLevel  =
1136 		cpu_to_le16(virtual_voltage_Id);
1137 	get_voltage_info_param_space.ulSCLKFreq      =
1138 		cpu_to_le32(sclk);
1139 
1140 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1141 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1142 			(uint32_t *)&get_voltage_info_param_space);
1143 
1144 	*voltage = result ? 0 :
1145 			le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
1146 				(&get_voltage_info_param_space))->usVoltageLevel);
1147 
1148 	return result;
1149 }
1150 
1151 /**
1152  * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table.
1153  * @param hwmgr	input: pointer to hwManager
1154  * @param virtual_voltage_id      input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
1155  * @param voltage		       output: real voltage level in unit of mv
1156  */
1157 int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
1158 			     uint16_t virtual_voltage_id,
1159 			     uint16_t *voltage)
1160 {
1161 	struct amdgpu_device *adev = hwmgr->adev;
1162 	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1163 	int result;
1164 	int entry_id;
1165 
1166 	/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
1167 	for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
1168 		if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) {
1169 			/* found */
1170 			break;
1171 		}
1172 	}
1173 
1174 	if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) {
1175 	        pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n");
1176 	        return -EINVAL;
1177 	}
1178 
1179 	get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
1180 	get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1181 	get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id;
1182 	get_voltage_info_param_space.ulSCLKFreq =
1183 		cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
1184 
1185 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1186 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1187 			(uint32_t *)&get_voltage_info_param_space);
1188 
1189 	if (0 != result)
1190 		return result;
1191 
1192 	*voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
1193 				(&get_voltage_info_param_space))->usVoltageLevel);
1194 
1195 	return result;
1196 }
1197 
1198 /**
1199  * Get the mpll reference clock in 10KHz
1200  */
1201 uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr)
1202 {
1203 	ATOM_COMMON_TABLE_HEADER *fw_info;
1204 	uint32_t clock;
1205 	u8 frev, crev;
1206 	u16 size;
1207 
1208 	fw_info = (ATOM_COMMON_TABLE_HEADER *)
1209 		smu_atom_get_data_table(hwmgr->adev,
1210 				GetIndexIntoMasterTable(DATA, FirmwareInfo),
1211 				&size, &frev, &crev);
1212 
1213 	if (fw_info == NULL)
1214 		clock = 2700;
1215 	else {
1216 		if ((fw_info->ucTableFormatRevision == 2) &&
1217 			(le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) {
1218 			ATOM_FIRMWARE_INFO_V2_1 *fwInfo_2_1 =
1219 				(ATOM_FIRMWARE_INFO_V2_1 *)fw_info;
1220 			clock = (uint32_t)(le16_to_cpu(fwInfo_2_1->usMemoryReferenceClock));
1221 		} else {
1222 			ATOM_FIRMWARE_INFO *fwInfo_0_0 =
1223 				(ATOM_FIRMWARE_INFO *)fw_info;
1224 			clock = (uint32_t)(le16_to_cpu(fwInfo_0_0->usReferenceClock));
1225 		}
1226 	}
1227 
1228 	return clock;
1229 }
1230 
1231 /**
1232  * Get the asic internal spread spectrum table
1233  */
1234 ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device)
1235 {
1236 	ATOM_ASIC_INTERNAL_SS_INFO *table = NULL;
1237 	u8 frev, crev;
1238 	u16 size;
1239 
1240 	table = (ATOM_ASIC_INTERNAL_SS_INFO *)
1241 		smu_atom_get_data_table(device,
1242 			GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info),
1243 			&size, &frev, &crev);
1244 
1245 	return table;
1246 }
1247 
1248 bool atomctrl_is_asic_internal_ss_supported(struct pp_hwmgr *hwmgr)
1249 {
1250 	ATOM_ASIC_INTERNAL_SS_INFO *table =
1251 		asic_internal_ss_get_ss_table(hwmgr->adev);
1252 
1253 	if (table)
1254 		return true;
1255 	else
1256 		return false;
1257 }
1258 
1259 /**
1260  * Get the asic internal spread spectrum assignment
1261  */
1262 static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
1263 		const uint8_t clockSource,
1264 		const uint32_t clockSpeed,
1265 		pp_atomctrl_internal_ss_info *ssEntry)
1266 {
1267 	ATOM_ASIC_INTERNAL_SS_INFO *table;
1268 	ATOM_ASIC_SS_ASSIGNMENT *ssInfo;
1269 	int entry_found = 0;
1270 
1271 	memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info));
1272 
1273 	table = asic_internal_ss_get_ss_table(hwmgr->adev);
1274 
1275 	if (NULL == table)
1276 		return -1;
1277 
1278 	ssInfo = &table->asSpreadSpectrum[0];
1279 
1280 	while (((uint8_t *)ssInfo - (uint8_t *)table) <
1281 		le16_to_cpu(table->sHeader.usStructureSize)) {
1282 		if ((clockSource == ssInfo->ucClockIndication) &&
1283 			((uint32_t)clockSpeed <= le32_to_cpu(ssInfo->ulTargetClockRange))) {
1284 			entry_found = 1;
1285 			break;
1286 		}
1287 
1288 		ssInfo = (ATOM_ASIC_SS_ASSIGNMENT *)((uint8_t *)ssInfo +
1289 				sizeof(ATOM_ASIC_SS_ASSIGNMENT));
1290 	}
1291 
1292 	if (entry_found) {
1293 		ssEntry->speed_spectrum_percentage =
1294 			le16_to_cpu(ssInfo->usSpreadSpectrumPercentage);
1295 		ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz);
1296 
1297 		if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
1298 			(GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
1299 			(GET_DATA_TABLE_MAJOR_REVISION(table) == 3)) {
1300 			ssEntry->speed_spectrum_rate /= 100;
1301 		}
1302 
1303 		switch (ssInfo->ucSpreadSpectrumMode) {
1304 		case 0:
1305 			ssEntry->speed_spectrum_mode =
1306 				pp_atomctrl_spread_spectrum_mode_down;
1307 			break;
1308 		case 1:
1309 			ssEntry->speed_spectrum_mode =
1310 				pp_atomctrl_spread_spectrum_mode_center;
1311 			break;
1312 		default:
1313 			ssEntry->speed_spectrum_mode =
1314 				pp_atomctrl_spread_spectrum_mode_down;
1315 			break;
1316 		}
1317 	}
1318 
1319 	return entry_found ? 0 : 1;
1320 }
1321 
1322 /**
1323  * Get the memory clock spread spectrum info
1324  */
1325 int atomctrl_get_memory_clock_spread_spectrum(
1326 		struct pp_hwmgr *hwmgr,
1327 		const uint32_t memory_clock,
1328 		pp_atomctrl_internal_ss_info *ssInfo)
1329 {
1330 	return asic_internal_ss_get_ss_asignment(hwmgr,
1331 			ASIC_INTERNAL_MEMORY_SS, memory_clock, ssInfo);
1332 }
1333 /**
1334  * Get the engine clock spread spectrum info
1335  */
1336 int atomctrl_get_engine_clock_spread_spectrum(
1337 		struct pp_hwmgr *hwmgr,
1338 		const uint32_t engine_clock,
1339 		pp_atomctrl_internal_ss_info *ssInfo)
1340 {
1341 	return asic_internal_ss_get_ss_asignment(hwmgr,
1342 			ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo);
1343 }
1344 
1345 int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
1346 		uint16_t end_index, uint32_t *efuse)
1347 {
1348 	struct amdgpu_device *adev = hwmgr->adev;
1349 	uint32_t mask;
1350 	int result;
1351 	READ_EFUSE_VALUE_PARAMETER efuse_param;
1352 
1353 	if ((end_index - start_index)  == 31)
1354 		mask = 0xFFFFFFFF;
1355 	else
1356 		mask = (1 << ((end_index - start_index) + 1)) - 1;
1357 
1358 	efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4);
1359 	efuse_param.sEfuse.ucBitShift = (uint8_t)
1360 			(start_index - ((start_index / 32) * 32));
1361 	efuse_param.sEfuse.ucBitLength  = (uint8_t)
1362 			((end_index - start_index) + 1);
1363 
1364 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1365 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
1366 			(uint32_t *)&efuse_param);
1367 	*efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask;
1368 
1369 	return result;
1370 }
1371 
1372 int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
1373 			      uint8_t level)
1374 {
1375 	struct amdgpu_device *adev = hwmgr->adev;
1376 	DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
1377 	int result;
1378 
1379 	memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq =
1380 		memory_clock & SET_CLOCK_FREQ_MASK;
1381 	memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag =
1382 		ADJUST_MC_SETTING_PARAM;
1383 	memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
1384 
1385 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1386 		 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
1387 		(uint32_t *)&memory_clock_parameters);
1388 
1389 	return result;
1390 }
1391 
1392 int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1393 				uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
1394 {
1395 	struct amdgpu_device *adev = hwmgr->adev;
1396 	int result;
1397 	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
1398 
1399 	get_voltage_info_param_space.ucVoltageType = voltage_type;
1400 	get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1401 	get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
1402 	get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
1403 
1404 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1405 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1406 			(uint32_t *)&get_voltage_info_param_space);
1407 
1408 	*voltage = result ? 0 :
1409 		le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
1410 
1411 	return result;
1412 }
1413 
1414 int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table)
1415 {
1416 
1417 	int i;
1418 	u8 frev, crev;
1419 	u16 size;
1420 
1421 	ATOM_SMU_INFO_V2_1 *psmu_info =
1422 		(ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
1423 			GetIndexIntoMasterTable(DATA, SMU_Info),
1424 			&size, &frev, &crev);
1425 
1426 
1427 	for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
1428 		table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
1429 		table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
1430 		table->entry[i].usFcw_pcc =
1431 			le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc);
1432 		table->entry[i].usFcw_trans_upper =
1433 			le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper);
1434 		table->entry[i].usRcw_trans_lower =
1435 			le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower);
1436 	}
1437 
1438 	return 0;
1439 }
1440 
1441 int atomctrl_get_vddc_shared_railinfo(struct pp_hwmgr *hwmgr, uint8_t *shared_rail)
1442 {
1443 	ATOM_SMU_INFO_V2_1 *psmu_info =
1444 		(ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
1445 			GetIndexIntoMasterTable(DATA, SMU_Info),
1446 			NULL, NULL, NULL);
1447 	if (!psmu_info)
1448 		return -1;
1449 
1450 	*shared_rail = psmu_info->ucSharePowerSource;
1451 
1452 	return 0;
1453 }
1454 
1455 int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
1456 				  struct pp_atom_ctrl__avfs_parameters *param)
1457 {
1458 	ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
1459 
1460 	if (param == NULL)
1461 		return -EINVAL;
1462 
1463 	profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
1464 			smu_atom_get_data_table(hwmgr->adev,
1465 					GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1466 					NULL, NULL, NULL);
1467 	if (!profile)
1468 		return -1;
1469 
1470 	param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0);
1471 	param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1);
1472 	param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2);
1473 	param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma);
1474 	param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean);
1475 	param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma);
1476 	param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0);
1477 	param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1);
1478 	param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2);
1479 	param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0);
1480 	param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1);
1481 	param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2);
1482 	param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1483 	param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1484 	param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1485 	param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1);
1486 	param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2);
1487 	param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b);
1488 	param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv);
1489 	param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
1490 	param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
1491 	param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
1492 	param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
1493 	param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor);
1494 	param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
1495 
1496 	return 0;
1497 }
1498 
1499 int  atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1500 				uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
1501 				uint16_t *load_line)
1502 {
1503 	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
1504 		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
1505 
1506 	const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
1507 
1508 	PP_ASSERT_WITH_CODE((NULL != voltage_info),
1509 			"Could not find Voltage Table in BIOS.", return -EINVAL);
1510 
1511 	voltage_object = atomctrl_lookup_voltage_type_v3
1512 		(voltage_info, voltage_type,  VOLTAGE_OBJ_SVID2);
1513 
1514 	*svd_gpio_id = voltage_object->asSVID2Obj.ucSVDGpioId;
1515 	*svc_gpio_id = voltage_object->asSVID2Obj.ucSVCGpioId;
1516 	*load_line = voltage_object->asSVID2Obj.usLoadLine_PSI;
1517 
1518 	return 0;
1519 }
1520 
1521 int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id)
1522 {
1523 	struct amdgpu_device *adev = hwmgr->adev;
1524 	SET_VOLTAGE_PS_ALLOCATION allocation;
1525 	SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters =
1526 			(SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage;
1527 	int result;
1528 
1529 	voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID;
1530 
1531 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1532 			GetIndexIntoMasterTable(COMMAND, SetVoltage),
1533 			(uint32_t *)voltage_parameters);
1534 
1535 	*virtual_voltage_id = voltage_parameters->usVoltageLevel;
1536 
1537 	return result;
1538 }
1539 
1540 int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
1541 					uint16_t *vddc, uint16_t *vddci,
1542 					uint16_t virtual_voltage_id,
1543 					uint16_t efuse_voltage_id)
1544 {
1545 	int i, j;
1546 	int ix;
1547 	u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
1548 	ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
1549 
1550 	*vddc = 0;
1551 	*vddci = 0;
1552 
1553 	ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
1554 
1555 	profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
1556 			smu_atom_get_data_table(hwmgr->adev,
1557 					ix,
1558 					NULL, NULL, NULL);
1559 	if (!profile)
1560 		return -EINVAL;
1561 
1562 	if ((profile->asHeader.ucTableFormatRevision >= 2) &&
1563 		(profile->asHeader.ucTableContentRevision >= 1) &&
1564 		(profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) {
1565 		leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset);
1566 		vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset);
1567 		vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset);
1568 		if (profile->ucElbVDDC_Num > 0) {
1569 			for (i = 0; i < profile->ucElbVDDC_Num; i++) {
1570 				if (vddc_id_buf[i] == virtual_voltage_id) {
1571 					for (j = 0; j < profile->ucLeakageBinNum; j++) {
1572 						if (efuse_voltage_id <= leakage_bin[j]) {
1573 							*vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
1574 							break;
1575 						}
1576 					}
1577 					break;
1578 				}
1579 			}
1580 		}
1581 
1582 		vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset);
1583 		vddci_buf   = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset);
1584 		if (profile->ucElbVDDCI_Num > 0) {
1585 			for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
1586 				if (vddci_id_buf[i] == virtual_voltage_id) {
1587 					for (j = 0; j < profile->ucLeakageBinNum; j++) {
1588 						if (efuse_voltage_id <= leakage_bin[j]) {
1589 							*vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
1590 							break;
1591 						}
1592 					}
1593 					break;
1594 				}
1595 			}
1596 		}
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
1603 							uint32_t *min_vddc)
1604 {
1605 	void *profile;
1606 
1607 	profile = smu_atom_get_data_table(hwmgr->adev,
1608 					GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1609 					NULL, NULL, NULL);
1610 
1611 	if (profile) {
1612 		switch (hwmgr->chip_id) {
1613 		case CHIP_TONGA:
1614 		case CHIP_FIJI:
1615 			*max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
1616 			*min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
1617 			return;
1618 		case CHIP_POLARIS11:
1619 		case CHIP_POLARIS10:
1620 		case CHIP_POLARIS12:
1621 			*max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
1622 			*min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
1623 			return;
1624 		default:
1625 			break;
1626 		}
1627 	}
1628 	*max_vddc = 0;
1629 	*min_vddc = 0;
1630 }
1631 
1632 int atomctrl_get_edc_hilo_leakage_offset_table(struct pp_hwmgr *hwmgr,
1633 					       AtomCtrl_HiLoLeakageOffsetTable *table)
1634 {
1635 	ATOM_GFX_INFO_V2_3 *gfxinfo = smu_atom_get_data_table(hwmgr->adev,
1636 					GetIndexIntoMasterTable(DATA, GFX_Info),
1637 					NULL, NULL, NULL);
1638 	if (!gfxinfo)
1639 		return -ENOENT;
1640 
1641 	table->usHiLoLeakageThreshold = gfxinfo->usHiLoLeakageThreshold;
1642 	table->usEdcDidtLoDpm7TableOffset = gfxinfo->usEdcDidtLoDpm7TableOffset;
1643 	table->usEdcDidtHiDpm7TableOffset = gfxinfo->usEdcDidtHiDpm7TableOffset;
1644 
1645 	return 0;
1646 }
1647 
1648 static AtomCtrl_EDCLeakgeTable *get_edc_leakage_table(struct pp_hwmgr *hwmgr,
1649 						      uint16_t offset)
1650 {
1651 	void *table_address;
1652 	char *temp;
1653 
1654 	table_address = smu_atom_get_data_table(hwmgr->adev,
1655 			GetIndexIntoMasterTable(DATA, GFX_Info),
1656 			NULL, NULL, NULL);
1657 	if (!table_address)
1658 		return NULL;
1659 
1660 	temp = (char *)table_address;
1661 	table_address += offset;
1662 
1663 	return (AtomCtrl_EDCLeakgeTable *)temp;
1664 }
1665 
1666 int atomctrl_get_edc_leakage_table(struct pp_hwmgr *hwmgr,
1667 				   AtomCtrl_EDCLeakgeTable *table,
1668 				   uint16_t offset)
1669 {
1670 	uint32_t length, i;
1671 	AtomCtrl_EDCLeakgeTable *leakage_table =
1672 		get_edc_leakage_table(hwmgr, offset);
1673 
1674 	if (!leakage_table)
1675 		return -ENOENT;
1676 
1677 	length = sizeof(leakage_table->DIDT_REG) /
1678 		 sizeof(leakage_table->DIDT_REG[0]);
1679 	for (i = 0; i < length; i++)
1680 		table->DIDT_REG[i] = leakage_table->DIDT_REG[i];
1681 
1682 	return 0;
1683 }
1684