xref: /openbmc/linux/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c (revision 44ad3baf1cca483e418b6aadf2d3994f69e0f16a)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include "atom.h"
28 #include "ppatomctrl.h"
29 #include "atombios.h"
30 #include "cgs_common.h"
31 #include "ppevvmath.h"
32 
33 #define MEM_ID_MASK           0xff000000
34 #define MEM_ID_SHIFT          24
35 #define CLOCK_RANGE_MASK      0x00ffffff
36 #define CLOCK_RANGE_SHIFT     0
37 #define LOW_NIBBLE_MASK       0xf
38 #define DATA_EQU_PREV         0
39 #define DATA_FROM_TABLE       4
40 
41 union voltage_object_info {
42 	struct _ATOM_VOLTAGE_OBJECT_INFO v1;
43 	struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
44 	struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
45 };
46 
atomctrl_retrieve_ac_timing(uint8_t index,ATOM_INIT_REG_BLOCK * reg_block,pp_atomctrl_mc_reg_table * table)47 static int atomctrl_retrieve_ac_timing(
48 		uint8_t index,
49 		ATOM_INIT_REG_BLOCK *reg_block,
50 		pp_atomctrl_mc_reg_table *table)
51 {
52 	uint32_t i, j;
53 	uint8_t tmem_id;
54 	ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
55 		((uint8_t *)reg_block + (2 * sizeof(uint16_t)) + le16_to_cpu(reg_block->usRegIndexTblSize));
56 
57 	uint8_t num_ranges = 0;
58 
59 	while (*(uint32_t *)reg_data != END_OF_REG_DATA_BLOCK &&
60 			num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES) {
61 		tmem_id = (uint8_t)((*(uint32_t *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
62 
63 		if (index == tmem_id) {
64 			table->mc_reg_table_entry[num_ranges].mclk_max =
65 				(uint32_t)((*(uint32_t *)reg_data & CLOCK_RANGE_MASK) >>
66 						CLOCK_RANGE_SHIFT);
67 
68 			for (i = 0, j = 1; i < table->last; i++) {
69 				if ((table->mc_reg_address[i].uc_pre_reg_data &
70 							LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
71 					table->mc_reg_table_entry[num_ranges].mc_data[i] =
72 						(uint32_t)*((uint32_t *)reg_data + j);
73 					j++;
74 				} else if ((table->mc_reg_address[i].uc_pre_reg_data &
75 							LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
76 					if (i)
77 						table->mc_reg_table_entry[num_ranges].mc_data[i] =
78 							table->mc_reg_table_entry[num_ranges].mc_data[i-1];
79 				}
80 			}
81 			num_ranges++;
82 		}
83 
84 		reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
85 			((uint8_t *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)) ;
86 	}
87 
88 	PP_ASSERT_WITH_CODE((*(uint32_t *)reg_data == END_OF_REG_DATA_BLOCK),
89 			"Invalid VramInfo table.", return -1);
90 	table->num_entries = num_ranges;
91 
92 	return 0;
93 }
94 
95 /**
96  * atomctrl_set_mc_reg_address_table - Get memory clock AC timing registers index from VBIOS table
97  * VBIOS set end of memory clock AC timing registers by ucPreRegDataLength bit6 = 1
98  * @reg_block: the address ATOM_INIT_REG_BLOCK
99  * @table: the address of MCRegTable
100  * Return:   0
101  */
atomctrl_set_mc_reg_address_table(ATOM_INIT_REG_BLOCK * reg_block,pp_atomctrl_mc_reg_table * table)102 static int atomctrl_set_mc_reg_address_table(
103 		ATOM_INIT_REG_BLOCK *reg_block,
104 		pp_atomctrl_mc_reg_table *table)
105 {
106 	uint8_t i = 0;
107 	uint8_t num_entries = (uint8_t)((le16_to_cpu(reg_block->usRegIndexTblSize))
108 			/ sizeof(ATOM_INIT_REG_INDEX_FORMAT));
109 	ATOM_INIT_REG_INDEX_FORMAT *format = &reg_block->asRegIndexBuf[0];
110 
111 	num_entries--;        /* subtract 1 data end mark entry */
112 
113 	PP_ASSERT_WITH_CODE((num_entries <= VBIOS_MC_REGISTER_ARRAY_SIZE),
114 			"Invalid VramInfo table.", return -1);
115 
116 	/* ucPreRegDataLength bit6 = 1 is the end of memory clock AC timing registers */
117 	while ((!(format->ucPreRegDataLength & ACCESS_PLACEHOLDER)) &&
118 			(i < num_entries)) {
119 		table->mc_reg_address[i].s1 =
120 			(uint16_t)(le16_to_cpu(format->usRegIndex));
121 		table->mc_reg_address[i].uc_pre_reg_data =
122 			format->ucPreRegDataLength;
123 
124 		i++;
125 		format = (ATOM_INIT_REG_INDEX_FORMAT *)
126 			((uint8_t *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
127 	}
128 
129 	table->last = i;
130 	return 0;
131 }
132 
atomctrl_initialize_mc_reg_table(struct pp_hwmgr * hwmgr,uint8_t module_index,pp_atomctrl_mc_reg_table * table)133 int atomctrl_initialize_mc_reg_table(
134 		struct pp_hwmgr *hwmgr,
135 		uint8_t module_index,
136 		pp_atomctrl_mc_reg_table *table)
137 {
138 	ATOM_VRAM_INFO_HEADER_V2_1 *vram_info;
139 	ATOM_INIT_REG_BLOCK *reg_block;
140 	int result = 0;
141 	u8 frev, crev;
142 	u16 size;
143 
144 	vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
145 		smu_atom_get_data_table(hwmgr->adev,
146 				GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
147 	if (!vram_info) {
148 		pr_err("Could not retrieve the VramInfo table!");
149 		return -EINVAL;
150 	}
151 
152 	if (module_index >= vram_info->ucNumOfVRAMModule) {
153 		pr_err("Invalid VramInfo table.");
154 		result = -1;
155 	} else if (vram_info->sHeader.ucTableFormatRevision < 2) {
156 		pr_err("Invalid VramInfo table.");
157 		result = -1;
158 	}
159 
160 	if (0 == result) {
161 		reg_block = (ATOM_INIT_REG_BLOCK *)
162 			((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset));
163 		result = atomctrl_set_mc_reg_address_table(reg_block, table);
164 	}
165 
166 	if (0 == result) {
167 		result = atomctrl_retrieve_ac_timing(module_index,
168 					reg_block, table);
169 	}
170 
171 	return result;
172 }
173 
atomctrl_initialize_mc_reg_table_v2_2(struct pp_hwmgr * hwmgr,uint8_t module_index,pp_atomctrl_mc_reg_table * table)174 int atomctrl_initialize_mc_reg_table_v2_2(
175 		struct pp_hwmgr *hwmgr,
176 		uint8_t module_index,
177 		pp_atomctrl_mc_reg_table *table)
178 {
179 	ATOM_VRAM_INFO_HEADER_V2_2 *vram_info;
180 	ATOM_INIT_REG_BLOCK *reg_block;
181 	int result = 0;
182 	u8 frev, crev;
183 	u16 size;
184 
185 	vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *)
186 		smu_atom_get_data_table(hwmgr->adev,
187 				GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
188 	if (!vram_info) {
189 		pr_err("Could not retrieve the VramInfo table!");
190 		return -EINVAL;
191 	}
192 
193 	if (module_index >= vram_info->ucNumOfVRAMModule) {
194 		pr_err("Invalid VramInfo table.");
195 		result = -1;
196 	} else if (vram_info->sHeader.ucTableFormatRevision < 2) {
197 		pr_err("Invalid VramInfo table.");
198 		result = -1;
199 	}
200 
201 	if (0 == result) {
202 		reg_block = (ATOM_INIT_REG_BLOCK *)
203 			((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset));
204 		result = atomctrl_set_mc_reg_address_table(reg_block, table);
205 	}
206 
207 	if (0 == result) {
208 		result = atomctrl_retrieve_ac_timing(module_index,
209 					reg_block, table);
210 	}
211 
212 	return result;
213 }
214 
215 /*
216  * Set DRAM timings based on engine clock and memory clock.
217  */
atomctrl_set_engine_dram_timings_rv770(struct pp_hwmgr * hwmgr,uint32_t engine_clock,uint32_t memory_clock)218 int atomctrl_set_engine_dram_timings_rv770(
219 		struct pp_hwmgr *hwmgr,
220 		uint32_t engine_clock,
221 		uint32_t memory_clock)
222 {
223 	struct amdgpu_device *adev = hwmgr->adev;
224 
225 	SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters;
226 
227 	/* They are both in 10KHz Units. */
228 	engine_clock_parameters.ulTargetEngineClock =
229 		cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |
230 			    ((COMPUTE_ENGINE_PLL_PARAM << 24)));
231 
232 	/* in 10 khz units.*/
233 	engine_clock_parameters.sReserved.ulClock =
234 		cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
235 
236 	return amdgpu_atom_execute_table(adev->mode_info.atom_context,
237 			GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
238 			(uint32_t *)&engine_clock_parameters);
239 }
240 
241 /*
242  * Private Function to get the PowerPlay Table Address.
243  * WARNING: The tabled returned by this function is in
244  * dynamically allocated memory.
245  * The caller has to release if by calling kfree.
246  */
get_voltage_info_table(void * device)247 static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device)
248 {
249 	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
250 	u8 frev, crev;
251 	u16 size;
252 	union voltage_object_info *voltage_info;
253 
254 	voltage_info = (union voltage_object_info *)
255 		smu_atom_get_data_table(device, index,
256 			&size, &frev, &crev);
257 
258 	if (voltage_info != NULL)
259 		return (ATOM_VOLTAGE_OBJECT_INFO *) &(voltage_info->v3);
260 	else
261 		return NULL;
262 }
263 
atomctrl_lookup_voltage_type_v3(const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table,uint8_t voltage_type,uint8_t voltage_mode)264 static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3(
265 		const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table,
266 		uint8_t voltage_type, uint8_t voltage_mode)
267 {
268 	unsigned int size = le16_to_cpu(voltage_object_info_table->sHeader.usStructureSize);
269 	unsigned int offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
270 	uint8_t *start = (uint8_t *)voltage_object_info_table;
271 
272 	while (offset < size) {
273 		const ATOM_VOLTAGE_OBJECT_V3 *voltage_object =
274 			(const ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
275 
276 		if (voltage_type == voltage_object->asGpioVoltageObj.sHeader.ucVoltageType &&
277 			voltage_mode == voltage_object->asGpioVoltageObj.sHeader.ucVoltageMode)
278 			return voltage_object;
279 
280 		offset += le16_to_cpu(voltage_object->asGpioVoltageObj.sHeader.usSize);
281 	}
282 
283 	return NULL;
284 }
285 
286 /**
287  * atomctrl_get_memory_pll_dividers_si
288  *
289  * @hwmgr:           input parameter: pointer to HwMgr
290  * @clock_value:     input parameter: memory clock
291  * @mpll_param:      output parameter: memory clock parameters
292  * @strobe_mode:     input parameter: 1 for strobe mode,  0 for performance mode
293  */
atomctrl_get_memory_pll_dividers_si(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param * mpll_param,bool strobe_mode)294 int atomctrl_get_memory_pll_dividers_si(
295 		struct pp_hwmgr *hwmgr,
296 		uint32_t clock_value,
297 		pp_atomctrl_memory_clock_param *mpll_param,
298 		bool strobe_mode)
299 {
300 	struct amdgpu_device *adev = hwmgr->adev;
301 	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
302 	int result;
303 
304 	mpll_parameters.ulClock = cpu_to_le32(clock_value);
305 	mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
306 
307 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
308 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
309 		(uint32_t *)&mpll_parameters);
310 
311 	if (0 == result) {
312 		mpll_param->mpll_fb_divider.clk_frac =
313 			le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac);
314 		mpll_param->mpll_fb_divider.cl_kf =
315 			le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv);
316 		mpll_param->mpll_post_divider =
317 			(uint32_t)mpll_parameters.ucPostDiv;
318 		mpll_param->vco_mode =
319 			(uint32_t)(mpll_parameters.ucPllCntlFlag &
320 					MPLL_CNTL_FLAG_VCO_MODE_MASK);
321 		mpll_param->yclk_sel =
322 			(uint32_t)((mpll_parameters.ucPllCntlFlag &
323 						MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0);
324 		mpll_param->qdr =
325 			(uint32_t)((mpll_parameters.ucPllCntlFlag &
326 						MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0);
327 		mpll_param->half_rate =
328 			(uint32_t)((mpll_parameters.ucPllCntlFlag &
329 						MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0);
330 		mpll_param->dll_speed =
331 			(uint32_t)(mpll_parameters.ucDllSpeed);
332 		mpll_param->bw_ctrl =
333 			(uint32_t)(mpll_parameters.ucBWCntl);
334 	}
335 
336 	return result;
337 }
338 
339 /**
340  * atomctrl_get_memory_pll_dividers_vi
341  *
342  * @hwmgr:                 input parameter: pointer to HwMgr
343  * @clock_value:           input parameter: memory clock
344  * @mpll_param:            output parameter: memory clock parameters
345  */
atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param * mpll_param)346 int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
347 		uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
348 {
349 	struct amdgpu_device *adev = hwmgr->adev;
350 	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
351 	int result;
352 
353 	mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
354 
355 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
356 			GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
357 			(uint32_t *)&mpll_parameters);
358 
359 	if (!result)
360 		mpll_param->mpll_post_divider =
361 				(uint32_t)mpll_parameters.ulClock.ucPostDiv;
362 
363 	return result;
364 }
365 
atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param_ai * mpll_param)366 int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
367 					uint32_t clock_value,
368 					pp_atomctrl_memory_clock_param_ai *mpll_param)
369 {
370 	struct amdgpu_device *adev = hwmgr->adev;
371 	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {{0}, 0, 0};
372 	int result;
373 
374 	mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
375 
376 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
377 			GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
378 			(uint32_t *)&mpll_parameters);
379 
380 	/* VEGAM's mpll takes sometime to finish computing */
381 	udelay(10);
382 
383 	if (!result) {
384 		mpll_param->ulMclk_fcw_int =
385 			le16_to_cpu(mpll_parameters.usMclk_fcw_int);
386 		mpll_param->ulMclk_fcw_frac =
387 			le16_to_cpu(mpll_parameters.usMclk_fcw_frac);
388 		mpll_param->ulClock =
389 			le32_to_cpu(mpll_parameters.ulClock.ulClock);
390 		mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv;
391 	}
392 
393 	return result;
394 }
395 
atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_kong * dividers)396 int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
397 					  uint32_t clock_value,
398 					  pp_atomctrl_clock_dividers_kong *dividers)
399 {
400 	struct amdgpu_device *adev = hwmgr->adev;
401 	COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
402 	int result;
403 
404 	pll_parameters.ulClock = cpu_to_le32(clock_value);
405 
406 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
407 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
408 		(uint32_t *)&pll_parameters);
409 
410 	if (0 == result) {
411 		dividers->pll_post_divider = pll_parameters.ucPostDiv;
412 		dividers->real_clock = le32_to_cpu(pll_parameters.ulClock);
413 	}
414 
415 	return result;
416 }
417 
atomctrl_get_engine_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_vi * dividers)418 int atomctrl_get_engine_pll_dividers_vi(
419 		struct pp_hwmgr *hwmgr,
420 		uint32_t clock_value,
421 		pp_atomctrl_clock_dividers_vi *dividers)
422 {
423 	struct amdgpu_device *adev = hwmgr->adev;
424 	COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
425 	int result;
426 
427 	pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
428 	pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
429 
430 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
431 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
432 		(uint32_t *)&pll_patameters);
433 
434 	if (0 == result) {
435 		dividers->pll_post_divider =
436 			pll_patameters.ulClock.ucPostDiv;
437 		dividers->real_clock =
438 			le32_to_cpu(pll_patameters.ulClock.ulClock);
439 
440 		dividers->ul_fb_div.ul_fb_div_frac =
441 			le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
442 		dividers->ul_fb_div.ul_fb_div =
443 			le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
444 
445 		dividers->uc_pll_ref_div =
446 			pll_patameters.ucPllRefDiv;
447 		dividers->uc_pll_post_div =
448 			pll_patameters.ucPllPostDiv;
449 		dividers->uc_pll_cntl_flag =
450 			pll_patameters.ucPllCntlFlag;
451 	}
452 
453 	return result;
454 }
455 
atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_ai * dividers)456 int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
457 		uint32_t clock_value,
458 		pp_atomctrl_clock_dividers_ai *dividers)
459 {
460 	struct amdgpu_device *adev = hwmgr->adev;
461 	COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
462 	int result;
463 
464 	pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
465 	pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
466 
467 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
468 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
469 		(uint32_t *)&pll_patameters);
470 
471 	if (0 == result) {
472 		dividers->usSclk_fcw_frac     = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
473 		dividers->usSclk_fcw_int      = le16_to_cpu(pll_patameters.usSclk_fcw_int);
474 		dividers->ucSclkPostDiv       = pll_patameters.ucSclkPostDiv;
475 		dividers->ucSclkVcoMode       = pll_patameters.ucSclkVcoMode;
476 		dividers->ucSclkPllRange      = pll_patameters.ucSclkPllRange;
477 		dividers->ucSscEnable         = pll_patameters.ucSscEnable;
478 		dividers->usSsc_fcw1_frac     = le16_to_cpu(pll_patameters.usSsc_fcw1_frac);
479 		dividers->usSsc_fcw1_int      = le16_to_cpu(pll_patameters.usSsc_fcw1_int);
480 		dividers->usPcc_fcw_int       = le16_to_cpu(pll_patameters.usPcc_fcw_int);
481 		dividers->usSsc_fcw_slew_frac = le16_to_cpu(pll_patameters.usSsc_fcw_slew_frac);
482 		dividers->usPcc_fcw_slew_frac = le16_to_cpu(pll_patameters.usPcc_fcw_slew_frac);
483 	}
484 	return result;
485 }
486 
atomctrl_get_dfs_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_vi * dividers)487 int atomctrl_get_dfs_pll_dividers_vi(
488 		struct pp_hwmgr *hwmgr,
489 		uint32_t clock_value,
490 		pp_atomctrl_clock_dividers_vi *dividers)
491 {
492 	struct amdgpu_device *adev = hwmgr->adev;
493 	COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
494 	int result;
495 
496 	pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
497 	pll_patameters.ulClock.ucPostDiv =
498 		COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
499 
500 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
501 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
502 		(uint32_t *)&pll_patameters);
503 
504 	if (0 == result) {
505 		dividers->pll_post_divider =
506 			pll_patameters.ulClock.ucPostDiv;
507 		dividers->real_clock =
508 			le32_to_cpu(pll_patameters.ulClock.ulClock);
509 
510 		dividers->ul_fb_div.ul_fb_div_frac =
511 			le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
512 		dividers->ul_fb_div.ul_fb_div =
513 			le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
514 
515 		dividers->uc_pll_ref_div =
516 			pll_patameters.ucPllRefDiv;
517 		dividers->uc_pll_post_div =
518 			pll_patameters.ucPllPostDiv;
519 		dividers->uc_pll_cntl_flag =
520 			pll_patameters.ucPllCntlFlag;
521 	}
522 
523 	return result;
524 }
525 
526 /*
527  * Get the reference clock in 10KHz
528  */
atomctrl_get_reference_clock(struct pp_hwmgr * hwmgr)529 uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
530 {
531 	ATOM_FIRMWARE_INFO *fw_info;
532 	u8 frev, crev;
533 	u16 size;
534 	uint32_t clock;
535 
536 	fw_info = (ATOM_FIRMWARE_INFO *)
537 		smu_atom_get_data_table(hwmgr->adev,
538 			GetIndexIntoMasterTable(DATA, FirmwareInfo),
539 			&size, &frev, &crev);
540 
541 	if (fw_info == NULL)
542 		clock = 2700;
543 	else
544 		clock = (uint32_t)(le16_to_cpu(fw_info->usReferenceClock));
545 
546 	return clock;
547 }
548 
549 /*
550  * Returns true if the given voltage type is controlled by GPIO pins.
551  * voltage_type is one of SET_VOLTAGE_TYPE_ASIC_VDDC,
552  * SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ.
553  * voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE
554  */
atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t voltage_mode)555 bool atomctrl_is_voltage_controlled_by_gpio_v3(
556 		struct pp_hwmgr *hwmgr,
557 		uint8_t voltage_type,
558 		uint8_t voltage_mode)
559 {
560 	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
561 		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
562 	bool ret;
563 
564 	PP_ASSERT_WITH_CODE((NULL != voltage_info),
565 			"Could not find Voltage Table in BIOS.", return false;);
566 
567 	ret = (NULL != atomctrl_lookup_voltage_type_v3
568 			(voltage_info, voltage_type, voltage_mode)) ? true : false;
569 
570 	return ret;
571 }
572 
atomctrl_get_voltage_table_v3(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t voltage_mode,pp_atomctrl_voltage_table * voltage_table)573 int atomctrl_get_voltage_table_v3(
574 		struct pp_hwmgr *hwmgr,
575 		uint8_t voltage_type,
576 		uint8_t voltage_mode,
577 		pp_atomctrl_voltage_table *voltage_table)
578 {
579 	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
580 		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
581 	const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
582 	unsigned int i;
583 
584 	PP_ASSERT_WITH_CODE((NULL != voltage_info),
585 			"Could not find Voltage Table in BIOS.", return -1;);
586 
587 	voltage_object = atomctrl_lookup_voltage_type_v3
588 		(voltage_info, voltage_type, voltage_mode);
589 
590 	if (voltage_object == NULL)
591 		return -1;
592 
593 	PP_ASSERT_WITH_CODE(
594 			(voltage_object->asGpioVoltageObj.ucGpioEntryNum <=
595 			PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES),
596 			"Too many voltage entries!",
597 			return -1;
598 			);
599 
600 	for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) {
601 		voltage_table->entries[i].value =
602 			le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue);
603 		voltage_table->entries[i].smio_low =
604 			le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId);
605 	}
606 
607 	voltage_table->mask_low    =
608 		le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal);
609 	voltage_table->count      =
610 		voltage_object->asGpioVoltageObj.ucGpioEntryNum;
611 	voltage_table->phase_delay =
612 		voltage_object->asGpioVoltageObj.ucPhaseDelay;
613 
614 	return 0;
615 }
616 
atomctrl_lookup_gpio_pin(ATOM_GPIO_PIN_LUT * gpio_lookup_table,const uint32_t pinId,pp_atomctrl_gpio_pin_assignment * gpio_pin_assignment)617 static bool atomctrl_lookup_gpio_pin(
618 		ATOM_GPIO_PIN_LUT * gpio_lookup_table,
619 		const uint32_t pinId,
620 		pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
621 {
622 	unsigned int size = le16_to_cpu(gpio_lookup_table->sHeader.usStructureSize);
623 	unsigned int offset = offsetof(ATOM_GPIO_PIN_LUT, asGPIO_Pin[0]);
624 	uint8_t *start = (uint8_t *)gpio_lookup_table;
625 
626 	while (offset < size) {
627 		const ATOM_GPIO_PIN_ASSIGNMENT *pin_assignment =
628 			(const ATOM_GPIO_PIN_ASSIGNMENT *)(start + offset);
629 
630 		if (pinId == pin_assignment->ucGPIO_ID) {
631 			gpio_pin_assignment->uc_gpio_pin_bit_shift =
632 				pin_assignment->ucGpioPinBitShift;
633 			gpio_pin_assignment->us_gpio_pin_aindex =
634 				le16_to_cpu(pin_assignment->usGpioPin_AIndex);
635 			return true;
636 		}
637 
638 		offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
639 	}
640 
641 	return false;
642 }
643 
644 /*
645  * Private Function to get the PowerPlay Table Address.
646  * WARNING: The tabled returned by this function is in
647  * dynamically allocated memory.
648  * The caller has to release if by calling kfree.
649  */
get_gpio_lookup_table(void * device)650 static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device)
651 {
652 	u8 frev, crev;
653 	u16 size;
654 	void *table_address;
655 
656 	table_address = (ATOM_GPIO_PIN_LUT *)
657 		smu_atom_get_data_table(device,
658 				GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT),
659 				&size, &frev, &crev);
660 
661 	PP_ASSERT_WITH_CODE((NULL != table_address),
662 			"Error retrieving BIOS Table Address!", return NULL;);
663 
664 	return (ATOM_GPIO_PIN_LUT *)table_address;
665 }
666 
667 /*
668  * Returns 1 if the given pin id find in lookup table.
669  */
atomctrl_get_pp_assign_pin(struct pp_hwmgr * hwmgr,const uint32_t pinId,pp_atomctrl_gpio_pin_assignment * gpio_pin_assignment)670 bool atomctrl_get_pp_assign_pin(
671 		struct pp_hwmgr *hwmgr,
672 		const uint32_t pinId,
673 		pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
674 {
675 	bool bRet = false;
676 	ATOM_GPIO_PIN_LUT *gpio_lookup_table =
677 		get_gpio_lookup_table(hwmgr->adev);
678 
679 	PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
680 			"Could not find GPIO lookup Table in BIOS.", return false);
681 
682 	bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
683 		gpio_pin_assignment);
684 
685 	return bRet;
686 }
687 
atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint16_t * voltage,uint16_t dpm_level,bool debug)688 int atomctrl_calculate_voltage_evv_on_sclk(
689 		struct pp_hwmgr *hwmgr,
690 		uint8_t voltage_type,
691 		uint32_t sclk,
692 		uint16_t virtual_voltage_Id,
693 		uint16_t *voltage,
694 		uint16_t dpm_level,
695 		bool debug)
696 {
697 	ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo;
698 	struct amdgpu_device *adev = hwmgr->adev;
699 	EFUSE_LINEAR_FUNC_PARAM sRO_fuse;
700 	EFUSE_LINEAR_FUNC_PARAM sCACm_fuse;
701 	EFUSE_LINEAR_FUNC_PARAM sCACb_fuse;
702 	EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse;
703 	EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse;
704 	EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse;
705 	EFUSE_INPUT_PARAMETER sInput_FuseValues;
706 	READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues;
707 
708 	uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused;
709 	fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7;
710 	fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma;
711 	fInt fLkg_FT, repeat;
712 	fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX;
713 	fInt fRLL_LoadLine, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin;
714 	fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM;
715 	fInt fSclk_margin, fSclk, fEVV_V;
716 	fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL;
717 	uint32_t ul_FT_Lkg_V0NORM;
718 	fInt fLn_MaxDivMin, fMin, fAverage, fRange;
719 	fInt fRoots[2];
720 	fInt fStepSize = GetScaledFraction(625, 100000);
721 
722 	int result;
723 
724 	getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *)
725 			smu_atom_get_data_table(hwmgr->adev,
726 					GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
727 					NULL, NULL, NULL);
728 
729 	if (!getASICProfilingInfo)
730 		return -1;
731 
732 	if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
733 	    (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
734 	     getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
735 		return -1;
736 
737 	/*-----------------------------------------------------------
738 	 *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL
739 	 *-----------------------------------------------------------
740 	 */
741 	fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000);
742 
743 	switch (dpm_level) {
744 	case 1:
745 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000);
746 		break;
747 	case 2:
748 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000);
749 		break;
750 	case 3:
751 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000);
752 		break;
753 	case 4:
754 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000);
755 		break;
756 	case 5:
757 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000);
758 		break;
759 	case 6:
760 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000);
761 		break;
762 	case 7:
763 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
764 		break;
765 	default:
766 		pr_err("DPM Level not supported\n");
767 		fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
768 	}
769 
770 	/*-------------------------
771 	 * DECODING FUSE VALUES
772 	 * ------------------------
773 	 */
774 	/*Decode RO_Fused*/
775 	sRO_fuse = getASICProfilingInfo->sRoFuse;
776 
777 	sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex;
778 	sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB;
779 	sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength;
780 
781 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
782 
783 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
784 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
785 			(uint32_t *)&sOutput_FuseValues);
786 
787 	if (result)
788 		return result;
789 
790 	/* Finally, the actual fuse value */
791 	ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
792 	fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1);
793 	fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1);
794 	fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength);
795 
796 	sCACm_fuse = getASICProfilingInfo->sCACm;
797 
798 	sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex;
799 	sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB;
800 	sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength;
801 
802 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
803 
804 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
805 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
806 			(uint32_t *)&sOutput_FuseValues);
807 
808 	if (result)
809 		return result;
810 
811 	ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
812 	fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000);
813 	fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000);
814 
815 	fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength);
816 
817 	sCACb_fuse = getASICProfilingInfo->sCACb;
818 
819 	sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex;
820 	sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB;
821 	sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength;
822 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
823 
824 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
825 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
826 			(uint32_t *)&sOutput_FuseValues);
827 
828 	if (result)
829 		return result;
830 
831 	ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
832 	fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000);
833 	fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000);
834 
835 	fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength);
836 
837 	sKt_Beta_fuse = getASICProfilingInfo->sKt_b;
838 
839 	sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex;
840 	sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB;
841 	sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength;
842 
843 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
844 
845 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
846 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
847 			(uint32_t *)&sOutput_FuseValues);
848 
849 	if (result)
850 		return result;
851 
852 	ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
853 	fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000);
854 	fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000);
855 
856 	fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused,
857 			fAverage, fRange, sKt_Beta_fuse.ucEfuseLength);
858 
859 	sKv_m_fuse = getASICProfilingInfo->sKv_m;
860 
861 	sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex;
862 	sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB;
863 	sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength;
864 
865 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
866 
867 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
868 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
869 			(uint32_t *)&sOutput_FuseValues);
870 	if (result)
871 		return result;
872 
873 	ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
874 	fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000);
875 	fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000);
876 	fRange = fMultiply(fRange, ConvertToFraction(-1));
877 
878 	fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused,
879 			fAverage, fRange, sKv_m_fuse.ucEfuseLength);
880 
881 	sKv_b_fuse = getASICProfilingInfo->sKv_b;
882 
883 	sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex;
884 	sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB;
885 	sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength;
886 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
887 
888 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
889 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
890 			(uint32_t *)&sOutput_FuseValues);
891 
892 	if (result)
893 		return result;
894 
895 	ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
896 	fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000);
897 	fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000);
898 
899 	fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused,
900 			fAverage, fRange, sKv_b_fuse.ucEfuseLength);
901 
902 	/* Decoding the Leakage - No special struct container */
903 	/*
904 	 * usLkgEuseIndex=56
905 	 * ucLkgEfuseBitLSB=6
906 	 * ucLkgEfuseLength=10
907 	 * ulLkgEncodeLn_MaxDivMin=69077
908 	 * ulLkgEncodeMax=1000000
909 	 * ulLkgEncodeMin=1000
910 	 * ulEfuseLogisticAlpha=13
911 	 */
912 
913 	sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex;
914 	sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB;
915 	sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength;
916 
917 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
918 
919 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
920 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
921 			(uint32_t *)&sOutput_FuseValues);
922 
923 	if (result)
924 		return result;
925 
926 	ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
927 	fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000);
928 	fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000);
929 
930 	fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM,
931 			fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength);
932 	fLkg_FT = fFT_Lkg_V0NORM;
933 
934 	/*-------------------------------------------
935 	 * PART 2 - Grabbing all required values
936 	 *-------------------------------------------
937 	 */
938 	fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000),
939 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign)));
940 	fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000),
941 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign)));
942 	fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000),
943 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign)));
944 	fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000),
945 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign)));
946 	fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000),
947 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign)));
948 	fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000),
949 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign)));
950 	fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000),
951 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign)));
952 	fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000),
953 			ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign)));
954 
955 	fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a));
956 	fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b));
957 	fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c));
958 
959 	fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed));
960 
961 	fMargin_FMAX_mean = GetScaledFraction(
962 		le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000);
963 	fMargin_Plat_mean = GetScaledFraction(
964 		le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000);
965 	fMargin_FMAX_sigma = GetScaledFraction(
966 		le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000);
967 	fMargin_Plat_sigma = GetScaledFraction(
968 		le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000);
969 
970 	fMargin_DC_sigma = GetScaledFraction(
971 		le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100);
972 	fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000));
973 
974 	fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100));
975 	fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100));
976 	fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100));
977 	fKv_m_fused =  fNegate(fDivide(fKv_m_fused, ConvertToFraction(100)));
978 	fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10));
979 
980 	fSclk = GetScaledFraction(sclk, 100);
981 
982 	fV_max = fDivide(GetScaledFraction(
983 				 le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4));
984 	fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10);
985 	fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100);
986 	fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10);
987 	fV_FT = fDivide(GetScaledFraction(
988 				le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4));
989 	fV_min = fDivide(GetScaledFraction(
990 				 le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4));
991 
992 	/*-----------------------
993 	 * PART 3
994 	 *-----------------------
995 	 */
996 
997 	fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
998 	fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
999 	fC_Term = fAdd(fMargin_RO_c,
1000 			fAdd(fMultiply(fSM_A0, fLkg_FT),
1001 			fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
1002 			fAdd(fMultiply(fSM_A3, fSclk),
1003 			fSubtract(fSM_A7, fRO_fused)))));
1004 
1005 	fVDDC_base = fSubtract(fRO_fused,
1006 			fSubtract(fMargin_RO_c,
1007 					fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk))));
1008 	fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2));
1009 
1010 	repeat = fSubtract(fVDDC_base,
1011 			fDivide(fMargin_DC_sigma, ConvertToFraction(1000)));
1012 
1013 	fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a,
1014 			fGetSquare(repeat)),
1015 			fAdd(fMultiply(fMargin_RO_b, repeat),
1016 			fMargin_RO_c));
1017 
1018 	fDC_SCLK = fSubtract(fRO_fused,
1019 			fSubtract(fRO_DC_margin,
1020 			fSubtract(fSM_A3,
1021 			fMultiply(fSM_A2, repeat))));
1022 	fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1));
1023 
1024 	fSigma_DC = fSubtract(fSclk, fDC_SCLK);
1025 
1026 	fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean);
1027 	fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean);
1028 	fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma);
1029 	fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma);
1030 
1031 	fSquared_Sigma_DC = fGetSquare(fSigma_DC);
1032 	fSquared_Sigma_CR = fGetSquare(fSigma_CR);
1033 	fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX);
1034 
1035 	fSclk_margin = fAdd(fMicro_FMAX,
1036 			fAdd(fMicro_CR,
1037 			fAdd(fMargin_fixed,
1038 			fSqrt(fAdd(fSquared_Sigma_FMAX,
1039 			fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR))))));
1040 	/*
1041 	 fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5;
1042 	 fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6;
1043 	 fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused;
1044 	 */
1045 
1046 	fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5);
1047 	fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6);
1048 	fC_Term = fAdd(fRO_DC_margin,
1049 			fAdd(fMultiply(fSM_A0, fLkg_FT),
1050 			fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT),
1051 			fAdd(fSclk, fSclk_margin)),
1052 			fAdd(fMultiply(fSM_A3,
1053 			fAdd(fSclk, fSclk_margin)),
1054 			fSubtract(fSM_A7, fRO_fused)))));
1055 
1056 	SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots);
1057 
1058 	if (GreaterThan(fRoots[0], fRoots[1]))
1059 		fEVV_V = fRoots[1];
1060 	else
1061 		fEVV_V = fRoots[0];
1062 
1063 	if (GreaterThan(fV_min, fEVV_V))
1064 		fEVV_V = fV_min;
1065 	else if (GreaterThan(fEVV_V, fV_max))
1066 		fEVV_V = fSubtract(fV_max, fStepSize);
1067 
1068 	fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0);
1069 
1070 	/*-----------------
1071 	 * PART 4
1072 	 *-----------------
1073 	 */
1074 
1075 	fV_x = fV_min;
1076 
1077 	while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) {
1078 		fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd(
1079 				fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk),
1080 				fGetSquare(fV_x)), fDerateTDP);
1081 
1082 		fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor,
1083 				fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused,
1084 				fT_prod), fKv_b_fused), fV_x)), fV_x)));
1085 		fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply(
1086 				fKt_Beta_fused, fT_prod)));
1087 		fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
1088 				fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT)));
1089 		fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
1090 				fKt_Beta_fused, fT_FT)));
1091 
1092 		fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right);
1093 
1094 		fTDP_Current = fDivide(fTDP_Power, fV_x);
1095 
1096 		fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine),
1097 				ConvertToFraction(10)));
1098 
1099 		fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0);
1100 
1101 		if (GreaterThan(fV_max, fV_NL) &&
1102 			(GreaterThan(fV_NL, fEVV_V) ||
1103 			Equal(fV_NL, fEVV_V))) {
1104 			fV_NL = fMultiply(fV_NL, ConvertToFraction(1000));
1105 
1106 			*voltage = (uint16_t)fV_NL.partial.real;
1107 			break;
1108 		} else
1109 			fV_x = fAdd(fV_x, fStepSize);
1110 	}
1111 
1112 	return result;
1113 }
1114 
1115 /**
1116  * atomctrl_get_voltage_evv_on_sclk: gets voltage via call to ATOM COMMAND table.
1117  * @hwmgr:              input: pointer to hwManager
1118  * @voltage_type:       input: type of EVV voltage VDDC or VDDGFX
1119  * @sclk:               input: in 10Khz unit. DPM state SCLK frequency
1120  *		         which is define in PPTable SCLK/VDDC dependence
1121  *			 table associated with this virtual_voltage_Id
1122  * @virtual_voltage_Id: input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
1123  * @voltage: 	        output: real voltage level in unit of mv
1124  */
atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint16_t * voltage)1125 int atomctrl_get_voltage_evv_on_sclk(
1126 		struct pp_hwmgr *hwmgr,
1127 		uint8_t voltage_type,
1128 		uint32_t sclk, uint16_t virtual_voltage_Id,
1129 		uint16_t *voltage)
1130 {
1131 	struct amdgpu_device *adev = hwmgr->adev;
1132 	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1133 	int result;
1134 
1135 	get_voltage_info_param_space.ucVoltageType   =
1136 		voltage_type;
1137 	get_voltage_info_param_space.ucVoltageMode   =
1138 		ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1139 	get_voltage_info_param_space.usVoltageLevel  =
1140 		cpu_to_le16(virtual_voltage_Id);
1141 	get_voltage_info_param_space.ulSCLKFreq      =
1142 		cpu_to_le32(sclk);
1143 
1144 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1145 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1146 			(uint32_t *)&get_voltage_info_param_space);
1147 
1148 	*voltage = result ? 0 :
1149 			le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
1150 				(&get_voltage_info_param_space))->usVoltageLevel);
1151 
1152 	return result;
1153 }
1154 
1155 /**
1156  * atomctrl_get_voltage_evv: gets voltage via call to ATOM COMMAND table.
1157  * @hwmgr:              input: pointer to hwManager
1158  * @virtual_voltage_id: input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
1159  * @voltage: 	       output: real voltage level in unit of mv
1160  */
atomctrl_get_voltage_evv(struct pp_hwmgr * hwmgr,uint16_t virtual_voltage_id,uint16_t * voltage)1161 int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
1162 			     uint16_t virtual_voltage_id,
1163 			     uint16_t *voltage)
1164 {
1165 	struct amdgpu_device *adev = hwmgr->adev;
1166 	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1167 	int result;
1168 	int entry_id;
1169 
1170 	/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
1171 	for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
1172 		if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) {
1173 			/* found */
1174 			break;
1175 		}
1176 	}
1177 
1178 	if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) {
1179 	        pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n");
1180 	        return -EINVAL;
1181 	}
1182 
1183 	get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
1184 	get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1185 	get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id;
1186 	get_voltage_info_param_space.ulSCLKFreq =
1187 		cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
1188 
1189 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1190 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1191 			(uint32_t *)&get_voltage_info_param_space);
1192 
1193 	if (0 != result)
1194 		return result;
1195 
1196 	*voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
1197 				(&get_voltage_info_param_space))->usVoltageLevel);
1198 
1199 	return result;
1200 }
1201 
1202 /*
1203  * Get the mpll reference clock in 10KHz
1204  */
atomctrl_get_mpll_reference_clock(struct pp_hwmgr * hwmgr)1205 uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr)
1206 {
1207 	ATOM_COMMON_TABLE_HEADER *fw_info;
1208 	uint32_t clock;
1209 	u8 frev, crev;
1210 	u16 size;
1211 
1212 	fw_info = (ATOM_COMMON_TABLE_HEADER *)
1213 		smu_atom_get_data_table(hwmgr->adev,
1214 				GetIndexIntoMasterTable(DATA, FirmwareInfo),
1215 				&size, &frev, &crev);
1216 
1217 	if (fw_info == NULL)
1218 		clock = 2700;
1219 	else {
1220 		if ((fw_info->ucTableFormatRevision == 2) &&
1221 			(le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) {
1222 			ATOM_FIRMWARE_INFO_V2_1 *fwInfo_2_1 =
1223 				(ATOM_FIRMWARE_INFO_V2_1 *)fw_info;
1224 			clock = (uint32_t)(le16_to_cpu(fwInfo_2_1->usMemoryReferenceClock));
1225 		} else {
1226 			ATOM_FIRMWARE_INFO *fwInfo_0_0 =
1227 				(ATOM_FIRMWARE_INFO *)fw_info;
1228 			clock = (uint32_t)(le16_to_cpu(fwInfo_0_0->usReferenceClock));
1229 		}
1230 	}
1231 
1232 	return clock;
1233 }
1234 
1235 /*
1236  * Get the asic internal spread spectrum table
1237  */
asic_internal_ss_get_ss_table(void * device)1238 static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device)
1239 {
1240 	ATOM_ASIC_INTERNAL_SS_INFO *table = NULL;
1241 	u8 frev, crev;
1242 	u16 size;
1243 
1244 	table = (ATOM_ASIC_INTERNAL_SS_INFO *)
1245 		smu_atom_get_data_table(device,
1246 			GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info),
1247 			&size, &frev, &crev);
1248 
1249 	return table;
1250 }
1251 
atomctrl_is_asic_internal_ss_supported(struct pp_hwmgr * hwmgr)1252 bool atomctrl_is_asic_internal_ss_supported(struct pp_hwmgr *hwmgr)
1253 {
1254 	ATOM_ASIC_INTERNAL_SS_INFO *table =
1255 		asic_internal_ss_get_ss_table(hwmgr->adev);
1256 
1257 	if (table)
1258 		return true;
1259 	else
1260 		return false;
1261 }
1262 
1263 /*
1264  * Get the asic internal spread spectrum assignment
1265  */
asic_internal_ss_get_ss_asignment(struct pp_hwmgr * hwmgr,const uint8_t clockSource,const uint32_t clockSpeed,pp_atomctrl_internal_ss_info * ssEntry)1266 static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
1267 		const uint8_t clockSource,
1268 		const uint32_t clockSpeed,
1269 		pp_atomctrl_internal_ss_info *ssEntry)
1270 {
1271 	ATOM_ASIC_INTERNAL_SS_INFO *table;
1272 	ATOM_ASIC_SS_ASSIGNMENT *ssInfo;
1273 	int entry_found = 0;
1274 
1275 	memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info));
1276 
1277 	table = asic_internal_ss_get_ss_table(hwmgr->adev);
1278 
1279 	if (NULL == table)
1280 		return -1;
1281 
1282 	ssInfo = &table->asSpreadSpectrum[0];
1283 
1284 	while (((uint8_t *)ssInfo - (uint8_t *)table) <
1285 		le16_to_cpu(table->sHeader.usStructureSize)) {
1286 		if ((clockSource == ssInfo->ucClockIndication) &&
1287 			((uint32_t)clockSpeed <= le32_to_cpu(ssInfo->ulTargetClockRange))) {
1288 			entry_found = 1;
1289 			break;
1290 		}
1291 
1292 		ssInfo = (ATOM_ASIC_SS_ASSIGNMENT *)((uint8_t *)ssInfo +
1293 				sizeof(ATOM_ASIC_SS_ASSIGNMENT));
1294 	}
1295 
1296 	if (entry_found) {
1297 		ssEntry->speed_spectrum_percentage =
1298 			le16_to_cpu(ssInfo->usSpreadSpectrumPercentage);
1299 		ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz);
1300 
1301 		if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
1302 			(GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
1303 			(GET_DATA_TABLE_MAJOR_REVISION(table) == 3)) {
1304 			ssEntry->speed_spectrum_rate /= 100;
1305 		}
1306 
1307 		switch (ssInfo->ucSpreadSpectrumMode) {
1308 		case 0:
1309 			ssEntry->speed_spectrum_mode =
1310 				pp_atomctrl_spread_spectrum_mode_down;
1311 			break;
1312 		case 1:
1313 			ssEntry->speed_spectrum_mode =
1314 				pp_atomctrl_spread_spectrum_mode_center;
1315 			break;
1316 		default:
1317 			ssEntry->speed_spectrum_mode =
1318 				pp_atomctrl_spread_spectrum_mode_down;
1319 			break;
1320 		}
1321 	}
1322 
1323 	return entry_found ? 0 : 1;
1324 }
1325 
1326 /*
1327  * Get the memory clock spread spectrum info
1328  */
atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr * hwmgr,const uint32_t memory_clock,pp_atomctrl_internal_ss_info * ssInfo)1329 int atomctrl_get_memory_clock_spread_spectrum(
1330 		struct pp_hwmgr *hwmgr,
1331 		const uint32_t memory_clock,
1332 		pp_atomctrl_internal_ss_info *ssInfo)
1333 {
1334 	return asic_internal_ss_get_ss_asignment(hwmgr,
1335 			ASIC_INTERNAL_MEMORY_SS, memory_clock, ssInfo);
1336 }
1337 
1338 /*
1339  * Get the engine clock spread spectrum info
1340  */
atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr * hwmgr,const uint32_t engine_clock,pp_atomctrl_internal_ss_info * ssInfo)1341 int atomctrl_get_engine_clock_spread_spectrum(
1342 		struct pp_hwmgr *hwmgr,
1343 		const uint32_t engine_clock,
1344 		pp_atomctrl_internal_ss_info *ssInfo)
1345 {
1346 	return asic_internal_ss_get_ss_asignment(hwmgr,
1347 			ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo);
1348 }
1349 
atomctrl_read_efuse(struct pp_hwmgr * hwmgr,uint16_t start_index,uint16_t end_index,uint32_t * efuse)1350 int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
1351 		uint16_t end_index, uint32_t *efuse)
1352 {
1353 	struct amdgpu_device *adev = hwmgr->adev;
1354 	uint32_t mask;
1355 	int result;
1356 	READ_EFUSE_VALUE_PARAMETER efuse_param;
1357 
1358 	if ((end_index - start_index)  == 31)
1359 		mask = 0xFFFFFFFF;
1360 	else
1361 		mask = (1 << ((end_index - start_index) + 1)) - 1;
1362 
1363 	efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4);
1364 	efuse_param.sEfuse.ucBitShift = (uint8_t)
1365 			(start_index - ((start_index / 32) * 32));
1366 	efuse_param.sEfuse.ucBitLength  = (uint8_t)
1367 			((end_index - start_index) + 1);
1368 
1369 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1370 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
1371 			(uint32_t *)&efuse_param);
1372 	*efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask;
1373 
1374 	return result;
1375 }
1376 
atomctrl_set_ac_timing_ai(struct pp_hwmgr * hwmgr,uint32_t memory_clock,uint8_t level)1377 int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
1378 			      uint8_t level)
1379 {
1380 	struct amdgpu_device *adev = hwmgr->adev;
1381 	DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
1382 	int result;
1383 
1384 	memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq =
1385 		memory_clock & SET_CLOCK_FREQ_MASK;
1386 	memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag =
1387 		ADJUST_MC_SETTING_PARAM;
1388 	memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
1389 
1390 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1391 		 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
1392 		(uint32_t *)&memory_clock_parameters);
1393 
1394 	return result;
1395 }
1396 
atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint32_t * voltage)1397 int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1398 				uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
1399 {
1400 	struct amdgpu_device *adev = hwmgr->adev;
1401 	int result;
1402 	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
1403 
1404 	get_voltage_info_param_space.ucVoltageType = voltage_type;
1405 	get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1406 	get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
1407 	get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
1408 
1409 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1410 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1411 			(uint32_t *)&get_voltage_info_param_space);
1412 
1413 	*voltage = result ? 0 :
1414 		le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
1415 
1416 	return result;
1417 }
1418 
atomctrl_get_smc_sclk_range_table(struct pp_hwmgr * hwmgr,struct pp_atom_ctrl_sclk_range_table * table)1419 int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table)
1420 {
1421 
1422 	int i;
1423 	u8 frev, crev;
1424 	u16 size;
1425 
1426 	ATOM_SMU_INFO_V2_1 *psmu_info =
1427 		(ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
1428 			GetIndexIntoMasterTable(DATA, SMU_Info),
1429 			&size, &frev, &crev);
1430 
1431 	if (!psmu_info)
1432 		return -EINVAL;
1433 
1434 	for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
1435 		table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
1436 		table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
1437 		table->entry[i].usFcw_pcc =
1438 			le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc);
1439 		table->entry[i].usFcw_trans_upper =
1440 			le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper);
1441 		table->entry[i].usRcw_trans_lower =
1442 			le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower);
1443 	}
1444 
1445 	return 0;
1446 }
1447 
atomctrl_get_vddc_shared_railinfo(struct pp_hwmgr * hwmgr,uint8_t * shared_rail)1448 int atomctrl_get_vddc_shared_railinfo(struct pp_hwmgr *hwmgr, uint8_t *shared_rail)
1449 {
1450 	ATOM_SMU_INFO_V2_1 *psmu_info =
1451 		(ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
1452 			GetIndexIntoMasterTable(DATA, SMU_Info),
1453 			NULL, NULL, NULL);
1454 	if (!psmu_info)
1455 		return -1;
1456 
1457 	*shared_rail = psmu_info->ucSharePowerSource;
1458 
1459 	return 0;
1460 }
1461 
atomctrl_get_avfs_information(struct pp_hwmgr * hwmgr,struct pp_atom_ctrl__avfs_parameters * param)1462 int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
1463 				  struct pp_atom_ctrl__avfs_parameters *param)
1464 {
1465 	ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
1466 
1467 	if (param == NULL)
1468 		return -EINVAL;
1469 
1470 	profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
1471 			smu_atom_get_data_table(hwmgr->adev,
1472 					GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1473 					NULL, NULL, NULL);
1474 	if (!profile)
1475 		return -1;
1476 
1477 	param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0);
1478 	param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1);
1479 	param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2);
1480 	param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma);
1481 	param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean);
1482 	param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma);
1483 	param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0);
1484 	param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1);
1485 	param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2);
1486 	param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0);
1487 	param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1);
1488 	param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2);
1489 	param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1490 	param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1491 	param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1492 	param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1);
1493 	param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2);
1494 	param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b);
1495 	param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv);
1496 	param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
1497 	param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
1498 	param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
1499 	param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
1500 	param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor);
1501 	param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
1502 
1503 	return 0;
1504 }
1505 
atomctrl_get_svi2_info(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t * svd_gpio_id,uint8_t * svc_gpio_id,uint16_t * load_line)1506 int  atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1507 				uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
1508 				uint16_t *load_line)
1509 {
1510 	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
1511 		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
1512 
1513 	const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
1514 
1515 	PP_ASSERT_WITH_CODE((NULL != voltage_info),
1516 			"Could not find Voltage Table in BIOS.", return -EINVAL);
1517 
1518 	voltage_object = atomctrl_lookup_voltage_type_v3
1519 		(voltage_info, voltage_type,  VOLTAGE_OBJ_SVID2);
1520 
1521 	*svd_gpio_id = voltage_object->asSVID2Obj.ucSVDGpioId;
1522 	*svc_gpio_id = voltage_object->asSVID2Obj.ucSVCGpioId;
1523 	*load_line = voltage_object->asSVID2Obj.usLoadLine_PSI;
1524 
1525 	return 0;
1526 }
1527 
atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr * hwmgr,uint16_t * virtual_voltage_id)1528 int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id)
1529 {
1530 	struct amdgpu_device *adev = hwmgr->adev;
1531 	SET_VOLTAGE_PS_ALLOCATION allocation;
1532 	SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters =
1533 			(SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage;
1534 	int result;
1535 
1536 	voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID;
1537 
1538 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1539 			GetIndexIntoMasterTable(COMMAND, SetVoltage),
1540 			(uint32_t *)voltage_parameters);
1541 
1542 	*virtual_voltage_id = voltage_parameters->usVoltageLevel;
1543 
1544 	return result;
1545 }
1546 
atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr * hwmgr,uint16_t * vddc,uint16_t * vddci,uint16_t virtual_voltage_id,uint16_t efuse_voltage_id)1547 int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
1548 					uint16_t *vddc, uint16_t *vddci,
1549 					uint16_t virtual_voltage_id,
1550 					uint16_t efuse_voltage_id)
1551 {
1552 	int i, j;
1553 	int ix;
1554 	u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
1555 	ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
1556 
1557 	*vddc = 0;
1558 	*vddci = 0;
1559 
1560 	ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
1561 
1562 	profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
1563 			smu_atom_get_data_table(hwmgr->adev,
1564 					ix,
1565 					NULL, NULL, NULL);
1566 	if (!profile)
1567 		return -EINVAL;
1568 
1569 	if ((profile->asHeader.ucTableFormatRevision >= 2) &&
1570 		(profile->asHeader.ucTableContentRevision >= 1) &&
1571 		(profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) {
1572 		leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset);
1573 		vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset);
1574 		vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset);
1575 		if (profile->ucElbVDDC_Num > 0) {
1576 			for (i = 0; i < profile->ucElbVDDC_Num; i++) {
1577 				if (vddc_id_buf[i] == virtual_voltage_id) {
1578 					for (j = 0; j < profile->ucLeakageBinNum; j++) {
1579 						if (efuse_voltage_id <= leakage_bin[j]) {
1580 							*vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
1581 							break;
1582 						}
1583 					}
1584 					break;
1585 				}
1586 			}
1587 		}
1588 
1589 		vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset);
1590 		vddci_buf   = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset);
1591 		if (profile->ucElbVDDCI_Num > 0) {
1592 			for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
1593 				if (vddci_id_buf[i] == virtual_voltage_id) {
1594 					for (j = 0; j < profile->ucLeakageBinNum; j++) {
1595 						if (efuse_voltage_id <= leakage_bin[j]) {
1596 							*vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
1597 							break;
1598 						}
1599 					}
1600 					break;
1601 				}
1602 			}
1603 		}
1604 	}
1605 
1606 	return 0;
1607 }
1608 
atomctrl_get_voltage_range(struct pp_hwmgr * hwmgr,uint32_t * max_vddc,uint32_t * min_vddc)1609 void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
1610 							uint32_t *min_vddc)
1611 {
1612 	void *profile;
1613 
1614 	profile = smu_atom_get_data_table(hwmgr->adev,
1615 					GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1616 					NULL, NULL, NULL);
1617 
1618 	if (profile) {
1619 		switch (hwmgr->chip_id) {
1620 		case CHIP_TONGA:
1621 		case CHIP_FIJI:
1622 			*max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
1623 			*min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
1624 			return;
1625 		case CHIP_POLARIS11:
1626 		case CHIP_POLARIS10:
1627 		case CHIP_POLARIS12:
1628 			*max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
1629 			*min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
1630 			return;
1631 		default:
1632 			break;
1633 		}
1634 	}
1635 	*max_vddc = 0;
1636 	*min_vddc = 0;
1637 }
1638 
atomctrl_get_edc_hilo_leakage_offset_table(struct pp_hwmgr * hwmgr,AtomCtrl_HiLoLeakageOffsetTable * table)1639 int atomctrl_get_edc_hilo_leakage_offset_table(struct pp_hwmgr *hwmgr,
1640 					       AtomCtrl_HiLoLeakageOffsetTable *table)
1641 {
1642 	ATOM_GFX_INFO_V2_3 *gfxinfo = smu_atom_get_data_table(hwmgr->adev,
1643 					GetIndexIntoMasterTable(DATA, GFX_Info),
1644 					NULL, NULL, NULL);
1645 	if (!gfxinfo)
1646 		return -ENOENT;
1647 
1648 	table->usHiLoLeakageThreshold = gfxinfo->usHiLoLeakageThreshold;
1649 	table->usEdcDidtLoDpm7TableOffset = gfxinfo->usEdcDidtLoDpm7TableOffset;
1650 	table->usEdcDidtHiDpm7TableOffset = gfxinfo->usEdcDidtHiDpm7TableOffset;
1651 
1652 	return 0;
1653 }
1654 
get_edc_leakage_table(struct pp_hwmgr * hwmgr,uint16_t offset)1655 static AtomCtrl_EDCLeakgeTable *get_edc_leakage_table(struct pp_hwmgr *hwmgr,
1656 						      uint16_t offset)
1657 {
1658 	void *table_address;
1659 	char *temp;
1660 
1661 	table_address = smu_atom_get_data_table(hwmgr->adev,
1662 			GetIndexIntoMasterTable(DATA, GFX_Info),
1663 			NULL, NULL, NULL);
1664 	if (!table_address)
1665 		return NULL;
1666 
1667 	temp = (char *)table_address;
1668 	table_address += offset;
1669 
1670 	return (AtomCtrl_EDCLeakgeTable *)temp;
1671 }
1672 
atomctrl_get_edc_leakage_table(struct pp_hwmgr * hwmgr,AtomCtrl_EDCLeakgeTable * table,uint16_t offset)1673 int atomctrl_get_edc_leakage_table(struct pp_hwmgr *hwmgr,
1674 				   AtomCtrl_EDCLeakgeTable *table,
1675 				   uint16_t offset)
1676 {
1677 	uint32_t length, i;
1678 	AtomCtrl_EDCLeakgeTable *leakage_table =
1679 		get_edc_leakage_table(hwmgr, offset);
1680 
1681 	if (!leakage_table)
1682 		return -ENOENT;
1683 
1684 	length = sizeof(leakage_table->DIDT_REG) /
1685 		 sizeof(leakage_table->DIDT_REG[0]);
1686 	for (i = 0; i < length; i++)
1687 		table->DIDT_REG[i] = leakage_table->DIDT_REG[i];
1688 
1689 	return 0;
1690 }
1691