1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "pp_debug.h"
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include "atom.h"
28 #include "ppatomctrl.h"
29 #include "atombios.h"
30 #include "cgs_common.h"
31 #include "ppevvmath.h"
32
33 #define MEM_ID_MASK 0xff000000
34 #define MEM_ID_SHIFT 24
35 #define CLOCK_RANGE_MASK 0x00ffffff
36 #define CLOCK_RANGE_SHIFT 0
37 #define LOW_NIBBLE_MASK 0xf
38 #define DATA_EQU_PREV 0
39 #define DATA_FROM_TABLE 4
40
41 union voltage_object_info {
42 struct _ATOM_VOLTAGE_OBJECT_INFO v1;
43 struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
44 struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
45 };
46
atomctrl_retrieve_ac_timing(uint8_t index,ATOM_INIT_REG_BLOCK * reg_block,pp_atomctrl_mc_reg_table * table)47 static int atomctrl_retrieve_ac_timing(
48 uint8_t index,
49 ATOM_INIT_REG_BLOCK *reg_block,
50 pp_atomctrl_mc_reg_table *table)
51 {
52 uint32_t i, j;
53 uint8_t tmem_id;
54 ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
55 ((uint8_t *)reg_block + (2 * sizeof(uint16_t)) + le16_to_cpu(reg_block->usRegIndexTblSize));
56
57 uint8_t num_ranges = 0;
58
59 while (*(uint32_t *)reg_data != END_OF_REG_DATA_BLOCK &&
60 num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES) {
61 tmem_id = (uint8_t)((*(uint32_t *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
62
63 if (index == tmem_id) {
64 table->mc_reg_table_entry[num_ranges].mclk_max =
65 (uint32_t)((*(uint32_t *)reg_data & CLOCK_RANGE_MASK) >>
66 CLOCK_RANGE_SHIFT);
67
68 for (i = 0, j = 1; i < table->last; i++) {
69 if ((table->mc_reg_address[i].uc_pre_reg_data &
70 LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
71 table->mc_reg_table_entry[num_ranges].mc_data[i] =
72 (uint32_t)*((uint32_t *)reg_data + j);
73 j++;
74 } else if ((table->mc_reg_address[i].uc_pre_reg_data &
75 LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
76 if (i)
77 table->mc_reg_table_entry[num_ranges].mc_data[i] =
78 table->mc_reg_table_entry[num_ranges].mc_data[i-1];
79 }
80 }
81 num_ranges++;
82 }
83
84 reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
85 ((uint8_t *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)) ;
86 }
87
88 PP_ASSERT_WITH_CODE((*(uint32_t *)reg_data == END_OF_REG_DATA_BLOCK),
89 "Invalid VramInfo table.", return -1);
90 table->num_entries = num_ranges;
91
92 return 0;
93 }
94
95 /**
96 * atomctrl_set_mc_reg_address_table - Get memory clock AC timing registers index from VBIOS table
97 * VBIOS set end of memory clock AC timing registers by ucPreRegDataLength bit6 = 1
98 * @reg_block: the address ATOM_INIT_REG_BLOCK
99 * @table: the address of MCRegTable
100 * Return: 0
101 */
atomctrl_set_mc_reg_address_table(ATOM_INIT_REG_BLOCK * reg_block,pp_atomctrl_mc_reg_table * table)102 static int atomctrl_set_mc_reg_address_table(
103 ATOM_INIT_REG_BLOCK *reg_block,
104 pp_atomctrl_mc_reg_table *table)
105 {
106 uint8_t i = 0;
107 uint8_t num_entries = (uint8_t)((le16_to_cpu(reg_block->usRegIndexTblSize))
108 / sizeof(ATOM_INIT_REG_INDEX_FORMAT));
109 ATOM_INIT_REG_INDEX_FORMAT *format = ®_block->asRegIndexBuf[0];
110
111 num_entries--; /* subtract 1 data end mark entry */
112
113 PP_ASSERT_WITH_CODE((num_entries <= VBIOS_MC_REGISTER_ARRAY_SIZE),
114 "Invalid VramInfo table.", return -1);
115
116 /* ucPreRegDataLength bit6 = 1 is the end of memory clock AC timing registers */
117 while ((!(format->ucPreRegDataLength & ACCESS_PLACEHOLDER)) &&
118 (i < num_entries)) {
119 table->mc_reg_address[i].s1 =
120 (uint16_t)(le16_to_cpu(format->usRegIndex));
121 table->mc_reg_address[i].uc_pre_reg_data =
122 format->ucPreRegDataLength;
123
124 i++;
125 format = (ATOM_INIT_REG_INDEX_FORMAT *)
126 ((uint8_t *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
127 }
128
129 table->last = i;
130 return 0;
131 }
132
atomctrl_initialize_mc_reg_table(struct pp_hwmgr * hwmgr,uint8_t module_index,pp_atomctrl_mc_reg_table * table)133 int atomctrl_initialize_mc_reg_table(
134 struct pp_hwmgr *hwmgr,
135 uint8_t module_index,
136 pp_atomctrl_mc_reg_table *table)
137 {
138 ATOM_VRAM_INFO_HEADER_V2_1 *vram_info;
139 ATOM_INIT_REG_BLOCK *reg_block;
140 int result = 0;
141 u8 frev, crev;
142 u16 size;
143
144 vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
145 smu_atom_get_data_table(hwmgr->adev,
146 GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
147
148 if (module_index >= vram_info->ucNumOfVRAMModule) {
149 pr_err("Invalid VramInfo table.");
150 result = -1;
151 } else if (vram_info->sHeader.ucTableFormatRevision < 2) {
152 pr_err("Invalid VramInfo table.");
153 result = -1;
154 }
155
156 if (0 == result) {
157 reg_block = (ATOM_INIT_REG_BLOCK *)
158 ((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset));
159 result = atomctrl_set_mc_reg_address_table(reg_block, table);
160 }
161
162 if (0 == result) {
163 result = atomctrl_retrieve_ac_timing(module_index,
164 reg_block, table);
165 }
166
167 return result;
168 }
169
atomctrl_initialize_mc_reg_table_v2_2(struct pp_hwmgr * hwmgr,uint8_t module_index,pp_atomctrl_mc_reg_table * table)170 int atomctrl_initialize_mc_reg_table_v2_2(
171 struct pp_hwmgr *hwmgr,
172 uint8_t module_index,
173 pp_atomctrl_mc_reg_table *table)
174 {
175 ATOM_VRAM_INFO_HEADER_V2_2 *vram_info;
176 ATOM_INIT_REG_BLOCK *reg_block;
177 int result = 0;
178 u8 frev, crev;
179 u16 size;
180
181 vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *)
182 smu_atom_get_data_table(hwmgr->adev,
183 GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
184
185 if (module_index >= vram_info->ucNumOfVRAMModule) {
186 pr_err("Invalid VramInfo table.");
187 result = -1;
188 } else if (vram_info->sHeader.ucTableFormatRevision < 2) {
189 pr_err("Invalid VramInfo table.");
190 result = -1;
191 }
192
193 if (0 == result) {
194 reg_block = (ATOM_INIT_REG_BLOCK *)
195 ((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset));
196 result = atomctrl_set_mc_reg_address_table(reg_block, table);
197 }
198
199 if (0 == result) {
200 result = atomctrl_retrieve_ac_timing(module_index,
201 reg_block, table);
202 }
203
204 return result;
205 }
206
207 /*
208 * Set DRAM timings based on engine clock and memory clock.
209 */
atomctrl_set_engine_dram_timings_rv770(struct pp_hwmgr * hwmgr,uint32_t engine_clock,uint32_t memory_clock)210 int atomctrl_set_engine_dram_timings_rv770(
211 struct pp_hwmgr *hwmgr,
212 uint32_t engine_clock,
213 uint32_t memory_clock)
214 {
215 struct amdgpu_device *adev = hwmgr->adev;
216
217 SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters;
218
219 /* They are both in 10KHz Units. */
220 engine_clock_parameters.ulTargetEngineClock =
221 cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |
222 ((COMPUTE_ENGINE_PLL_PARAM << 24)));
223
224 /* in 10 khz units.*/
225 engine_clock_parameters.sReserved.ulClock =
226 cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
227
228 return amdgpu_atom_execute_table(adev->mode_info.atom_context,
229 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
230 (uint32_t *)&engine_clock_parameters);
231 }
232
233 /*
234 * Private Function to get the PowerPlay Table Address.
235 * WARNING: The tabled returned by this function is in
236 * dynamically allocated memory.
237 * The caller has to release if by calling kfree.
238 */
get_voltage_info_table(void * device)239 static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device)
240 {
241 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
242 u8 frev, crev;
243 u16 size;
244 union voltage_object_info *voltage_info;
245
246 voltage_info = (union voltage_object_info *)
247 smu_atom_get_data_table(device, index,
248 &size, &frev, &crev);
249
250 if (voltage_info != NULL)
251 return (ATOM_VOLTAGE_OBJECT_INFO *) &(voltage_info->v3);
252 else
253 return NULL;
254 }
255
atomctrl_lookup_voltage_type_v3(const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table,uint8_t voltage_type,uint8_t voltage_mode)256 static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3(
257 const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table,
258 uint8_t voltage_type, uint8_t voltage_mode)
259 {
260 unsigned int size = le16_to_cpu(voltage_object_info_table->sHeader.usStructureSize);
261 unsigned int offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
262 uint8_t *start = (uint8_t *)voltage_object_info_table;
263
264 while (offset < size) {
265 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object =
266 (const ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
267
268 if (voltage_type == voltage_object->asGpioVoltageObj.sHeader.ucVoltageType &&
269 voltage_mode == voltage_object->asGpioVoltageObj.sHeader.ucVoltageMode)
270 return voltage_object;
271
272 offset += le16_to_cpu(voltage_object->asGpioVoltageObj.sHeader.usSize);
273 }
274
275 return NULL;
276 }
277
278 /**
279 * atomctrl_get_memory_pll_dividers_si
280 *
281 * @hwmgr: input parameter: pointer to HwMgr
282 * @clock_value: input parameter: memory clock
283 * @mpll_param: output parameter: memory clock parameters
284 * @strobe_mode: input parameter: 1 for strobe mode, 0 for performance mode
285 */
atomctrl_get_memory_pll_dividers_si(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param * mpll_param,bool strobe_mode)286 int atomctrl_get_memory_pll_dividers_si(
287 struct pp_hwmgr *hwmgr,
288 uint32_t clock_value,
289 pp_atomctrl_memory_clock_param *mpll_param,
290 bool strobe_mode)
291 {
292 struct amdgpu_device *adev = hwmgr->adev;
293 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
294 int result;
295
296 mpll_parameters.ulClock = cpu_to_le32(clock_value);
297 mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
298
299 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
300 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
301 (uint32_t *)&mpll_parameters);
302
303 if (0 == result) {
304 mpll_param->mpll_fb_divider.clk_frac =
305 le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac);
306 mpll_param->mpll_fb_divider.cl_kf =
307 le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv);
308 mpll_param->mpll_post_divider =
309 (uint32_t)mpll_parameters.ucPostDiv;
310 mpll_param->vco_mode =
311 (uint32_t)(mpll_parameters.ucPllCntlFlag &
312 MPLL_CNTL_FLAG_VCO_MODE_MASK);
313 mpll_param->yclk_sel =
314 (uint32_t)((mpll_parameters.ucPllCntlFlag &
315 MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0);
316 mpll_param->qdr =
317 (uint32_t)((mpll_parameters.ucPllCntlFlag &
318 MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0);
319 mpll_param->half_rate =
320 (uint32_t)((mpll_parameters.ucPllCntlFlag &
321 MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0);
322 mpll_param->dll_speed =
323 (uint32_t)(mpll_parameters.ucDllSpeed);
324 mpll_param->bw_ctrl =
325 (uint32_t)(mpll_parameters.ucBWCntl);
326 }
327
328 return result;
329 }
330
331 /**
332 * atomctrl_get_memory_pll_dividers_vi
333 *
334 * @hwmgr: input parameter: pointer to HwMgr
335 * @clock_value: input parameter: memory clock
336 * @mpll_param: output parameter: memory clock parameters
337 */
atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param * mpll_param)338 int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
339 uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
340 {
341 struct amdgpu_device *adev = hwmgr->adev;
342 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
343 int result;
344
345 mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
346
347 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
348 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
349 (uint32_t *)&mpll_parameters);
350
351 if (!result)
352 mpll_param->mpll_post_divider =
353 (uint32_t)mpll_parameters.ulClock.ucPostDiv;
354
355 return result;
356 }
357
atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param_ai * mpll_param)358 int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
359 uint32_t clock_value,
360 pp_atomctrl_memory_clock_param_ai *mpll_param)
361 {
362 struct amdgpu_device *adev = hwmgr->adev;
363 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {{0}, 0, 0};
364 int result;
365
366 mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
367
368 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
369 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
370 (uint32_t *)&mpll_parameters);
371
372 /* VEGAM's mpll takes sometime to finish computing */
373 udelay(10);
374
375 if (!result) {
376 mpll_param->ulMclk_fcw_int =
377 le16_to_cpu(mpll_parameters.usMclk_fcw_int);
378 mpll_param->ulMclk_fcw_frac =
379 le16_to_cpu(mpll_parameters.usMclk_fcw_frac);
380 mpll_param->ulClock =
381 le32_to_cpu(mpll_parameters.ulClock.ulClock);
382 mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv;
383 }
384
385 return result;
386 }
387
atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_kong * dividers)388 int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
389 uint32_t clock_value,
390 pp_atomctrl_clock_dividers_kong *dividers)
391 {
392 struct amdgpu_device *adev = hwmgr->adev;
393 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
394 int result;
395
396 pll_parameters.ulClock = cpu_to_le32(clock_value);
397
398 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
399 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
400 (uint32_t *)&pll_parameters);
401
402 if (0 == result) {
403 dividers->pll_post_divider = pll_parameters.ucPostDiv;
404 dividers->real_clock = le32_to_cpu(pll_parameters.ulClock);
405 }
406
407 return result;
408 }
409
atomctrl_get_engine_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_vi * dividers)410 int atomctrl_get_engine_pll_dividers_vi(
411 struct pp_hwmgr *hwmgr,
412 uint32_t clock_value,
413 pp_atomctrl_clock_dividers_vi *dividers)
414 {
415 struct amdgpu_device *adev = hwmgr->adev;
416 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
417 int result;
418
419 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
420 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
421
422 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
423 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
424 (uint32_t *)&pll_patameters);
425
426 if (0 == result) {
427 dividers->pll_post_divider =
428 pll_patameters.ulClock.ucPostDiv;
429 dividers->real_clock =
430 le32_to_cpu(pll_patameters.ulClock.ulClock);
431
432 dividers->ul_fb_div.ul_fb_div_frac =
433 le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
434 dividers->ul_fb_div.ul_fb_div =
435 le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
436
437 dividers->uc_pll_ref_div =
438 pll_patameters.ucPllRefDiv;
439 dividers->uc_pll_post_div =
440 pll_patameters.ucPllPostDiv;
441 dividers->uc_pll_cntl_flag =
442 pll_patameters.ucPllCntlFlag;
443 }
444
445 return result;
446 }
447
atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_ai * dividers)448 int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
449 uint32_t clock_value,
450 pp_atomctrl_clock_dividers_ai *dividers)
451 {
452 struct amdgpu_device *adev = hwmgr->adev;
453 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
454 int result;
455
456 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
457 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
458
459 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
460 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
461 (uint32_t *)&pll_patameters);
462
463 if (0 == result) {
464 dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
465 dividers->usSclk_fcw_int = le16_to_cpu(pll_patameters.usSclk_fcw_int);
466 dividers->ucSclkPostDiv = pll_patameters.ucSclkPostDiv;
467 dividers->ucSclkVcoMode = pll_patameters.ucSclkVcoMode;
468 dividers->ucSclkPllRange = pll_patameters.ucSclkPllRange;
469 dividers->ucSscEnable = pll_patameters.ucSscEnable;
470 dividers->usSsc_fcw1_frac = le16_to_cpu(pll_patameters.usSsc_fcw1_frac);
471 dividers->usSsc_fcw1_int = le16_to_cpu(pll_patameters.usSsc_fcw1_int);
472 dividers->usPcc_fcw_int = le16_to_cpu(pll_patameters.usPcc_fcw_int);
473 dividers->usSsc_fcw_slew_frac = le16_to_cpu(pll_patameters.usSsc_fcw_slew_frac);
474 dividers->usPcc_fcw_slew_frac = le16_to_cpu(pll_patameters.usPcc_fcw_slew_frac);
475 }
476 return result;
477 }
478
atomctrl_get_dfs_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_vi * dividers)479 int atomctrl_get_dfs_pll_dividers_vi(
480 struct pp_hwmgr *hwmgr,
481 uint32_t clock_value,
482 pp_atomctrl_clock_dividers_vi *dividers)
483 {
484 struct amdgpu_device *adev = hwmgr->adev;
485 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
486 int result;
487
488 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
489 pll_patameters.ulClock.ucPostDiv =
490 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
491
492 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
493 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
494 (uint32_t *)&pll_patameters);
495
496 if (0 == result) {
497 dividers->pll_post_divider =
498 pll_patameters.ulClock.ucPostDiv;
499 dividers->real_clock =
500 le32_to_cpu(pll_patameters.ulClock.ulClock);
501
502 dividers->ul_fb_div.ul_fb_div_frac =
503 le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
504 dividers->ul_fb_div.ul_fb_div =
505 le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
506
507 dividers->uc_pll_ref_div =
508 pll_patameters.ucPllRefDiv;
509 dividers->uc_pll_post_div =
510 pll_patameters.ucPllPostDiv;
511 dividers->uc_pll_cntl_flag =
512 pll_patameters.ucPllCntlFlag;
513 }
514
515 return result;
516 }
517
518 /*
519 * Get the reference clock in 10KHz
520 */
atomctrl_get_reference_clock(struct pp_hwmgr * hwmgr)521 uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
522 {
523 ATOM_FIRMWARE_INFO *fw_info;
524 u8 frev, crev;
525 u16 size;
526 uint32_t clock;
527
528 fw_info = (ATOM_FIRMWARE_INFO *)
529 smu_atom_get_data_table(hwmgr->adev,
530 GetIndexIntoMasterTable(DATA, FirmwareInfo),
531 &size, &frev, &crev);
532
533 if (fw_info == NULL)
534 clock = 2700;
535 else
536 clock = (uint32_t)(le16_to_cpu(fw_info->usReferenceClock));
537
538 return clock;
539 }
540
541 /*
542 * Returns true if the given voltage type is controlled by GPIO pins.
543 * voltage_type is one of SET_VOLTAGE_TYPE_ASIC_VDDC,
544 * SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ.
545 * voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE
546 */
atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t voltage_mode)547 bool atomctrl_is_voltage_controlled_by_gpio_v3(
548 struct pp_hwmgr *hwmgr,
549 uint8_t voltage_type,
550 uint8_t voltage_mode)
551 {
552 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
553 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
554 bool ret;
555
556 PP_ASSERT_WITH_CODE((NULL != voltage_info),
557 "Could not find Voltage Table in BIOS.", return false;);
558
559 ret = (NULL != atomctrl_lookup_voltage_type_v3
560 (voltage_info, voltage_type, voltage_mode)) ? true : false;
561
562 return ret;
563 }
564
atomctrl_get_voltage_table_v3(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t voltage_mode,pp_atomctrl_voltage_table * voltage_table)565 int atomctrl_get_voltage_table_v3(
566 struct pp_hwmgr *hwmgr,
567 uint8_t voltage_type,
568 uint8_t voltage_mode,
569 pp_atomctrl_voltage_table *voltage_table)
570 {
571 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
572 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
573 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
574 unsigned int i;
575
576 PP_ASSERT_WITH_CODE((NULL != voltage_info),
577 "Could not find Voltage Table in BIOS.", return -1;);
578
579 voltage_object = atomctrl_lookup_voltage_type_v3
580 (voltage_info, voltage_type, voltage_mode);
581
582 if (voltage_object == NULL)
583 return -1;
584
585 PP_ASSERT_WITH_CODE(
586 (voltage_object->asGpioVoltageObj.ucGpioEntryNum <=
587 PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES),
588 "Too many voltage entries!",
589 return -1;
590 );
591
592 for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) {
593 voltage_table->entries[i].value =
594 le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue);
595 voltage_table->entries[i].smio_low =
596 le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId);
597 }
598
599 voltage_table->mask_low =
600 le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal);
601 voltage_table->count =
602 voltage_object->asGpioVoltageObj.ucGpioEntryNum;
603 voltage_table->phase_delay =
604 voltage_object->asGpioVoltageObj.ucPhaseDelay;
605
606 return 0;
607 }
608
atomctrl_lookup_gpio_pin(ATOM_GPIO_PIN_LUT * gpio_lookup_table,const uint32_t pinId,pp_atomctrl_gpio_pin_assignment * gpio_pin_assignment)609 static bool atomctrl_lookup_gpio_pin(
610 ATOM_GPIO_PIN_LUT * gpio_lookup_table,
611 const uint32_t pinId,
612 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
613 {
614 unsigned int size = le16_to_cpu(gpio_lookup_table->sHeader.usStructureSize);
615 unsigned int offset = offsetof(ATOM_GPIO_PIN_LUT, asGPIO_Pin[0]);
616 uint8_t *start = (uint8_t *)gpio_lookup_table;
617
618 while (offset < size) {
619 const ATOM_GPIO_PIN_ASSIGNMENT *pin_assignment =
620 (const ATOM_GPIO_PIN_ASSIGNMENT *)(start + offset);
621
622 if (pinId == pin_assignment->ucGPIO_ID) {
623 gpio_pin_assignment->uc_gpio_pin_bit_shift =
624 pin_assignment->ucGpioPinBitShift;
625 gpio_pin_assignment->us_gpio_pin_aindex =
626 le16_to_cpu(pin_assignment->usGpioPin_AIndex);
627 return true;
628 }
629
630 offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
631 }
632
633 return false;
634 }
635
636 /*
637 * Private Function to get the PowerPlay Table Address.
638 * WARNING: The tabled returned by this function is in
639 * dynamically allocated memory.
640 * The caller has to release if by calling kfree.
641 */
get_gpio_lookup_table(void * device)642 static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device)
643 {
644 u8 frev, crev;
645 u16 size;
646 void *table_address;
647
648 table_address = (ATOM_GPIO_PIN_LUT *)
649 smu_atom_get_data_table(device,
650 GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT),
651 &size, &frev, &crev);
652
653 PP_ASSERT_WITH_CODE((NULL != table_address),
654 "Error retrieving BIOS Table Address!", return NULL;);
655
656 return (ATOM_GPIO_PIN_LUT *)table_address;
657 }
658
659 /*
660 * Returns 1 if the given pin id find in lookup table.
661 */
atomctrl_get_pp_assign_pin(struct pp_hwmgr * hwmgr,const uint32_t pinId,pp_atomctrl_gpio_pin_assignment * gpio_pin_assignment)662 bool atomctrl_get_pp_assign_pin(
663 struct pp_hwmgr *hwmgr,
664 const uint32_t pinId,
665 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
666 {
667 bool bRet = false;
668 ATOM_GPIO_PIN_LUT *gpio_lookup_table =
669 get_gpio_lookup_table(hwmgr->adev);
670
671 PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
672 "Could not find GPIO lookup Table in BIOS.", return false);
673
674 bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
675 gpio_pin_assignment);
676
677 return bRet;
678 }
679
atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint16_t * voltage,uint16_t dpm_level,bool debug)680 int atomctrl_calculate_voltage_evv_on_sclk(
681 struct pp_hwmgr *hwmgr,
682 uint8_t voltage_type,
683 uint32_t sclk,
684 uint16_t virtual_voltage_Id,
685 uint16_t *voltage,
686 uint16_t dpm_level,
687 bool debug)
688 {
689 ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo;
690 struct amdgpu_device *adev = hwmgr->adev;
691 EFUSE_LINEAR_FUNC_PARAM sRO_fuse;
692 EFUSE_LINEAR_FUNC_PARAM sCACm_fuse;
693 EFUSE_LINEAR_FUNC_PARAM sCACb_fuse;
694 EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse;
695 EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse;
696 EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse;
697 EFUSE_INPUT_PARAMETER sInput_FuseValues;
698 READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues;
699
700 uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused;
701 fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7;
702 fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma;
703 fInt fLkg_FT, repeat;
704 fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX;
705 fInt fRLL_LoadLine, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin;
706 fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM;
707 fInt fSclk_margin, fSclk, fEVV_V;
708 fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL;
709 uint32_t ul_FT_Lkg_V0NORM;
710 fInt fLn_MaxDivMin, fMin, fAverage, fRange;
711 fInt fRoots[2];
712 fInt fStepSize = GetScaledFraction(625, 100000);
713
714 int result;
715
716 getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *)
717 smu_atom_get_data_table(hwmgr->adev,
718 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
719 NULL, NULL, NULL);
720
721 if (!getASICProfilingInfo)
722 return -1;
723
724 if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
725 (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
726 getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
727 return -1;
728
729 /*-----------------------------------------------------------
730 *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL
731 *-----------------------------------------------------------
732 */
733 fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000);
734
735 switch (dpm_level) {
736 case 1:
737 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000);
738 break;
739 case 2:
740 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000);
741 break;
742 case 3:
743 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000);
744 break;
745 case 4:
746 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000);
747 break;
748 case 5:
749 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000);
750 break;
751 case 6:
752 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000);
753 break;
754 case 7:
755 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
756 break;
757 default:
758 pr_err("DPM Level not supported\n");
759 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
760 }
761
762 /*-------------------------
763 * DECODING FUSE VALUES
764 * ------------------------
765 */
766 /*Decode RO_Fused*/
767 sRO_fuse = getASICProfilingInfo->sRoFuse;
768
769 sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex;
770 sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB;
771 sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength;
772
773 sOutput_FuseValues.sEfuse = sInput_FuseValues;
774
775 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
776 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
777 (uint32_t *)&sOutput_FuseValues);
778
779 if (result)
780 return result;
781
782 /* Finally, the actual fuse value */
783 ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
784 fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1);
785 fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1);
786 fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength);
787
788 sCACm_fuse = getASICProfilingInfo->sCACm;
789
790 sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex;
791 sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB;
792 sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength;
793
794 sOutput_FuseValues.sEfuse = sInput_FuseValues;
795
796 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
797 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
798 (uint32_t *)&sOutput_FuseValues);
799
800 if (result)
801 return result;
802
803 ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
804 fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000);
805 fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000);
806
807 fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength);
808
809 sCACb_fuse = getASICProfilingInfo->sCACb;
810
811 sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex;
812 sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB;
813 sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength;
814 sOutput_FuseValues.sEfuse = sInput_FuseValues;
815
816 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
817 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
818 (uint32_t *)&sOutput_FuseValues);
819
820 if (result)
821 return result;
822
823 ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
824 fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000);
825 fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000);
826
827 fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength);
828
829 sKt_Beta_fuse = getASICProfilingInfo->sKt_b;
830
831 sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex;
832 sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB;
833 sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength;
834
835 sOutput_FuseValues.sEfuse = sInput_FuseValues;
836
837 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
838 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
839 (uint32_t *)&sOutput_FuseValues);
840
841 if (result)
842 return result;
843
844 ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
845 fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000);
846 fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000);
847
848 fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused,
849 fAverage, fRange, sKt_Beta_fuse.ucEfuseLength);
850
851 sKv_m_fuse = getASICProfilingInfo->sKv_m;
852
853 sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex;
854 sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB;
855 sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength;
856
857 sOutput_FuseValues.sEfuse = sInput_FuseValues;
858
859 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
860 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
861 (uint32_t *)&sOutput_FuseValues);
862 if (result)
863 return result;
864
865 ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
866 fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000);
867 fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000);
868 fRange = fMultiply(fRange, ConvertToFraction(-1));
869
870 fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused,
871 fAverage, fRange, sKv_m_fuse.ucEfuseLength);
872
873 sKv_b_fuse = getASICProfilingInfo->sKv_b;
874
875 sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex;
876 sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB;
877 sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength;
878 sOutput_FuseValues.sEfuse = sInput_FuseValues;
879
880 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
881 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
882 (uint32_t *)&sOutput_FuseValues);
883
884 if (result)
885 return result;
886
887 ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
888 fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000);
889 fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000);
890
891 fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused,
892 fAverage, fRange, sKv_b_fuse.ucEfuseLength);
893
894 /* Decoding the Leakage - No special struct container */
895 /*
896 * usLkgEuseIndex=56
897 * ucLkgEfuseBitLSB=6
898 * ucLkgEfuseLength=10
899 * ulLkgEncodeLn_MaxDivMin=69077
900 * ulLkgEncodeMax=1000000
901 * ulLkgEncodeMin=1000
902 * ulEfuseLogisticAlpha=13
903 */
904
905 sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex;
906 sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB;
907 sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength;
908
909 sOutput_FuseValues.sEfuse = sInput_FuseValues;
910
911 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
912 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
913 (uint32_t *)&sOutput_FuseValues);
914
915 if (result)
916 return result;
917
918 ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
919 fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000);
920 fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000);
921
922 fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM,
923 fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength);
924 fLkg_FT = fFT_Lkg_V0NORM;
925
926 /*-------------------------------------------
927 * PART 2 - Grabbing all required values
928 *-------------------------------------------
929 */
930 fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000),
931 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign)));
932 fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000),
933 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign)));
934 fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000),
935 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign)));
936 fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000),
937 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign)));
938 fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000),
939 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign)));
940 fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000),
941 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign)));
942 fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000),
943 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign)));
944 fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000),
945 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign)));
946
947 fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a));
948 fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b));
949 fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c));
950
951 fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed));
952
953 fMargin_FMAX_mean = GetScaledFraction(
954 le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000);
955 fMargin_Plat_mean = GetScaledFraction(
956 le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000);
957 fMargin_FMAX_sigma = GetScaledFraction(
958 le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000);
959 fMargin_Plat_sigma = GetScaledFraction(
960 le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000);
961
962 fMargin_DC_sigma = GetScaledFraction(
963 le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100);
964 fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000));
965
966 fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100));
967 fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100));
968 fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100));
969 fKv_m_fused = fNegate(fDivide(fKv_m_fused, ConvertToFraction(100)));
970 fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10));
971
972 fSclk = GetScaledFraction(sclk, 100);
973
974 fV_max = fDivide(GetScaledFraction(
975 le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4));
976 fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10);
977 fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100);
978 fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10);
979 fV_FT = fDivide(GetScaledFraction(
980 le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4));
981 fV_min = fDivide(GetScaledFraction(
982 le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4));
983
984 /*-----------------------
985 * PART 3
986 *-----------------------
987 */
988
989 fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
990 fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
991 fC_Term = fAdd(fMargin_RO_c,
992 fAdd(fMultiply(fSM_A0, fLkg_FT),
993 fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
994 fAdd(fMultiply(fSM_A3, fSclk),
995 fSubtract(fSM_A7, fRO_fused)))));
996
997 fVDDC_base = fSubtract(fRO_fused,
998 fSubtract(fMargin_RO_c,
999 fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk))));
1000 fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2));
1001
1002 repeat = fSubtract(fVDDC_base,
1003 fDivide(fMargin_DC_sigma, ConvertToFraction(1000)));
1004
1005 fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a,
1006 fGetSquare(repeat)),
1007 fAdd(fMultiply(fMargin_RO_b, repeat),
1008 fMargin_RO_c));
1009
1010 fDC_SCLK = fSubtract(fRO_fused,
1011 fSubtract(fRO_DC_margin,
1012 fSubtract(fSM_A3,
1013 fMultiply(fSM_A2, repeat))));
1014 fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1));
1015
1016 fSigma_DC = fSubtract(fSclk, fDC_SCLK);
1017
1018 fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean);
1019 fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean);
1020 fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma);
1021 fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma);
1022
1023 fSquared_Sigma_DC = fGetSquare(fSigma_DC);
1024 fSquared_Sigma_CR = fGetSquare(fSigma_CR);
1025 fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX);
1026
1027 fSclk_margin = fAdd(fMicro_FMAX,
1028 fAdd(fMicro_CR,
1029 fAdd(fMargin_fixed,
1030 fSqrt(fAdd(fSquared_Sigma_FMAX,
1031 fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR))))));
1032 /*
1033 fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5;
1034 fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6;
1035 fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused;
1036 */
1037
1038 fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5);
1039 fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6);
1040 fC_Term = fAdd(fRO_DC_margin,
1041 fAdd(fMultiply(fSM_A0, fLkg_FT),
1042 fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT),
1043 fAdd(fSclk, fSclk_margin)),
1044 fAdd(fMultiply(fSM_A3,
1045 fAdd(fSclk, fSclk_margin)),
1046 fSubtract(fSM_A7, fRO_fused)))));
1047
1048 SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots);
1049
1050 if (GreaterThan(fRoots[0], fRoots[1]))
1051 fEVV_V = fRoots[1];
1052 else
1053 fEVV_V = fRoots[0];
1054
1055 if (GreaterThan(fV_min, fEVV_V))
1056 fEVV_V = fV_min;
1057 else if (GreaterThan(fEVV_V, fV_max))
1058 fEVV_V = fSubtract(fV_max, fStepSize);
1059
1060 fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0);
1061
1062 /*-----------------
1063 * PART 4
1064 *-----------------
1065 */
1066
1067 fV_x = fV_min;
1068
1069 while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) {
1070 fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd(
1071 fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk),
1072 fGetSquare(fV_x)), fDerateTDP);
1073
1074 fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor,
1075 fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused,
1076 fT_prod), fKv_b_fused), fV_x)), fV_x)));
1077 fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply(
1078 fKt_Beta_fused, fT_prod)));
1079 fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
1080 fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT)));
1081 fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
1082 fKt_Beta_fused, fT_FT)));
1083
1084 fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right);
1085
1086 fTDP_Current = fDivide(fTDP_Power, fV_x);
1087
1088 fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine),
1089 ConvertToFraction(10)));
1090
1091 fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0);
1092
1093 if (GreaterThan(fV_max, fV_NL) &&
1094 (GreaterThan(fV_NL, fEVV_V) ||
1095 Equal(fV_NL, fEVV_V))) {
1096 fV_NL = fMultiply(fV_NL, ConvertToFraction(1000));
1097
1098 *voltage = (uint16_t)fV_NL.partial.real;
1099 break;
1100 } else
1101 fV_x = fAdd(fV_x, fStepSize);
1102 }
1103
1104 return result;
1105 }
1106
1107 /**
1108 * atomctrl_get_voltage_evv_on_sclk: gets voltage via call to ATOM COMMAND table.
1109 * @hwmgr: input: pointer to hwManager
1110 * @voltage_type: input: type of EVV voltage VDDC or VDDGFX
1111 * @sclk: input: in 10Khz unit. DPM state SCLK frequency
1112 * which is define in PPTable SCLK/VDDC dependence
1113 * table associated with this virtual_voltage_Id
1114 * @virtual_voltage_Id: input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
1115 * @voltage: output: real voltage level in unit of mv
1116 */
atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint16_t * voltage)1117 int atomctrl_get_voltage_evv_on_sclk(
1118 struct pp_hwmgr *hwmgr,
1119 uint8_t voltage_type,
1120 uint32_t sclk, uint16_t virtual_voltage_Id,
1121 uint16_t *voltage)
1122 {
1123 struct amdgpu_device *adev = hwmgr->adev;
1124 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1125 int result;
1126
1127 get_voltage_info_param_space.ucVoltageType =
1128 voltage_type;
1129 get_voltage_info_param_space.ucVoltageMode =
1130 ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1131 get_voltage_info_param_space.usVoltageLevel =
1132 cpu_to_le16(virtual_voltage_Id);
1133 get_voltage_info_param_space.ulSCLKFreq =
1134 cpu_to_le32(sclk);
1135
1136 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1137 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1138 (uint32_t *)&get_voltage_info_param_space);
1139
1140 *voltage = result ? 0 :
1141 le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
1142 (&get_voltage_info_param_space))->usVoltageLevel);
1143
1144 return result;
1145 }
1146
1147 /**
1148 * atomctrl_get_voltage_evv: gets voltage via call to ATOM COMMAND table.
1149 * @hwmgr: input: pointer to hwManager
1150 * @virtual_voltage_id: input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
1151 * @voltage: output: real voltage level in unit of mv
1152 */
atomctrl_get_voltage_evv(struct pp_hwmgr * hwmgr,uint16_t virtual_voltage_id,uint16_t * voltage)1153 int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
1154 uint16_t virtual_voltage_id,
1155 uint16_t *voltage)
1156 {
1157 struct amdgpu_device *adev = hwmgr->adev;
1158 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1159 int result;
1160 int entry_id;
1161
1162 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
1163 for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
1164 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) {
1165 /* found */
1166 break;
1167 }
1168 }
1169
1170 if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) {
1171 pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n");
1172 return -EINVAL;
1173 }
1174
1175 get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
1176 get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1177 get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id;
1178 get_voltage_info_param_space.ulSCLKFreq =
1179 cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
1180
1181 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1182 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1183 (uint32_t *)&get_voltage_info_param_space);
1184
1185 if (0 != result)
1186 return result;
1187
1188 *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
1189 (&get_voltage_info_param_space))->usVoltageLevel);
1190
1191 return result;
1192 }
1193
1194 /*
1195 * Get the mpll reference clock in 10KHz
1196 */
atomctrl_get_mpll_reference_clock(struct pp_hwmgr * hwmgr)1197 uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr)
1198 {
1199 ATOM_COMMON_TABLE_HEADER *fw_info;
1200 uint32_t clock;
1201 u8 frev, crev;
1202 u16 size;
1203
1204 fw_info = (ATOM_COMMON_TABLE_HEADER *)
1205 smu_atom_get_data_table(hwmgr->adev,
1206 GetIndexIntoMasterTable(DATA, FirmwareInfo),
1207 &size, &frev, &crev);
1208
1209 if (fw_info == NULL)
1210 clock = 2700;
1211 else {
1212 if ((fw_info->ucTableFormatRevision == 2) &&
1213 (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) {
1214 ATOM_FIRMWARE_INFO_V2_1 *fwInfo_2_1 =
1215 (ATOM_FIRMWARE_INFO_V2_1 *)fw_info;
1216 clock = (uint32_t)(le16_to_cpu(fwInfo_2_1->usMemoryReferenceClock));
1217 } else {
1218 ATOM_FIRMWARE_INFO *fwInfo_0_0 =
1219 (ATOM_FIRMWARE_INFO *)fw_info;
1220 clock = (uint32_t)(le16_to_cpu(fwInfo_0_0->usReferenceClock));
1221 }
1222 }
1223
1224 return clock;
1225 }
1226
1227 /*
1228 * Get the asic internal spread spectrum table
1229 */
asic_internal_ss_get_ss_table(void * device)1230 static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device)
1231 {
1232 ATOM_ASIC_INTERNAL_SS_INFO *table = NULL;
1233 u8 frev, crev;
1234 u16 size;
1235
1236 table = (ATOM_ASIC_INTERNAL_SS_INFO *)
1237 smu_atom_get_data_table(device,
1238 GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info),
1239 &size, &frev, &crev);
1240
1241 return table;
1242 }
1243
atomctrl_is_asic_internal_ss_supported(struct pp_hwmgr * hwmgr)1244 bool atomctrl_is_asic_internal_ss_supported(struct pp_hwmgr *hwmgr)
1245 {
1246 ATOM_ASIC_INTERNAL_SS_INFO *table =
1247 asic_internal_ss_get_ss_table(hwmgr->adev);
1248
1249 if (table)
1250 return true;
1251 else
1252 return false;
1253 }
1254
1255 /*
1256 * Get the asic internal spread spectrum assignment
1257 */
asic_internal_ss_get_ss_asignment(struct pp_hwmgr * hwmgr,const uint8_t clockSource,const uint32_t clockSpeed,pp_atomctrl_internal_ss_info * ssEntry)1258 static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
1259 const uint8_t clockSource,
1260 const uint32_t clockSpeed,
1261 pp_atomctrl_internal_ss_info *ssEntry)
1262 {
1263 ATOM_ASIC_INTERNAL_SS_INFO *table;
1264 ATOM_ASIC_SS_ASSIGNMENT *ssInfo;
1265 int entry_found = 0;
1266
1267 memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info));
1268
1269 table = asic_internal_ss_get_ss_table(hwmgr->adev);
1270
1271 if (NULL == table)
1272 return -1;
1273
1274 ssInfo = &table->asSpreadSpectrum[0];
1275
1276 while (((uint8_t *)ssInfo - (uint8_t *)table) <
1277 le16_to_cpu(table->sHeader.usStructureSize)) {
1278 if ((clockSource == ssInfo->ucClockIndication) &&
1279 ((uint32_t)clockSpeed <= le32_to_cpu(ssInfo->ulTargetClockRange))) {
1280 entry_found = 1;
1281 break;
1282 }
1283
1284 ssInfo = (ATOM_ASIC_SS_ASSIGNMENT *)((uint8_t *)ssInfo +
1285 sizeof(ATOM_ASIC_SS_ASSIGNMENT));
1286 }
1287
1288 if (entry_found) {
1289 ssEntry->speed_spectrum_percentage =
1290 le16_to_cpu(ssInfo->usSpreadSpectrumPercentage);
1291 ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz);
1292
1293 if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
1294 (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
1295 (GET_DATA_TABLE_MAJOR_REVISION(table) == 3)) {
1296 ssEntry->speed_spectrum_rate /= 100;
1297 }
1298
1299 switch (ssInfo->ucSpreadSpectrumMode) {
1300 case 0:
1301 ssEntry->speed_spectrum_mode =
1302 pp_atomctrl_spread_spectrum_mode_down;
1303 break;
1304 case 1:
1305 ssEntry->speed_spectrum_mode =
1306 pp_atomctrl_spread_spectrum_mode_center;
1307 break;
1308 default:
1309 ssEntry->speed_spectrum_mode =
1310 pp_atomctrl_spread_spectrum_mode_down;
1311 break;
1312 }
1313 }
1314
1315 return entry_found ? 0 : 1;
1316 }
1317
1318 /*
1319 * Get the memory clock spread spectrum info
1320 */
atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr * hwmgr,const uint32_t memory_clock,pp_atomctrl_internal_ss_info * ssInfo)1321 int atomctrl_get_memory_clock_spread_spectrum(
1322 struct pp_hwmgr *hwmgr,
1323 const uint32_t memory_clock,
1324 pp_atomctrl_internal_ss_info *ssInfo)
1325 {
1326 return asic_internal_ss_get_ss_asignment(hwmgr,
1327 ASIC_INTERNAL_MEMORY_SS, memory_clock, ssInfo);
1328 }
1329
1330 /*
1331 * Get the engine clock spread spectrum info
1332 */
atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr * hwmgr,const uint32_t engine_clock,pp_atomctrl_internal_ss_info * ssInfo)1333 int atomctrl_get_engine_clock_spread_spectrum(
1334 struct pp_hwmgr *hwmgr,
1335 const uint32_t engine_clock,
1336 pp_atomctrl_internal_ss_info *ssInfo)
1337 {
1338 return asic_internal_ss_get_ss_asignment(hwmgr,
1339 ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo);
1340 }
1341
atomctrl_read_efuse(struct pp_hwmgr * hwmgr,uint16_t start_index,uint16_t end_index,uint32_t * efuse)1342 int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
1343 uint16_t end_index, uint32_t *efuse)
1344 {
1345 struct amdgpu_device *adev = hwmgr->adev;
1346 uint32_t mask;
1347 int result;
1348 READ_EFUSE_VALUE_PARAMETER efuse_param;
1349
1350 if ((end_index - start_index) == 31)
1351 mask = 0xFFFFFFFF;
1352 else
1353 mask = (1 << ((end_index - start_index) + 1)) - 1;
1354
1355 efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4);
1356 efuse_param.sEfuse.ucBitShift = (uint8_t)
1357 (start_index - ((start_index / 32) * 32));
1358 efuse_param.sEfuse.ucBitLength = (uint8_t)
1359 ((end_index - start_index) + 1);
1360
1361 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1362 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
1363 (uint32_t *)&efuse_param);
1364 *efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask;
1365
1366 return result;
1367 }
1368
atomctrl_set_ac_timing_ai(struct pp_hwmgr * hwmgr,uint32_t memory_clock,uint8_t level)1369 int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
1370 uint8_t level)
1371 {
1372 struct amdgpu_device *adev = hwmgr->adev;
1373 DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
1374 int result;
1375
1376 memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq =
1377 memory_clock & SET_CLOCK_FREQ_MASK;
1378 memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag =
1379 ADJUST_MC_SETTING_PARAM;
1380 memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
1381
1382 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1383 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
1384 (uint32_t *)&memory_clock_parameters);
1385
1386 return result;
1387 }
1388
atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint32_t * voltage)1389 int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1390 uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
1391 {
1392 struct amdgpu_device *adev = hwmgr->adev;
1393 int result;
1394 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
1395
1396 get_voltage_info_param_space.ucVoltageType = voltage_type;
1397 get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1398 get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
1399 get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
1400
1401 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1402 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1403 (uint32_t *)&get_voltage_info_param_space);
1404
1405 *voltage = result ? 0 :
1406 le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
1407
1408 return result;
1409 }
1410
atomctrl_get_smc_sclk_range_table(struct pp_hwmgr * hwmgr,struct pp_atom_ctrl_sclk_range_table * table)1411 int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table)
1412 {
1413
1414 int i;
1415 u8 frev, crev;
1416 u16 size;
1417
1418 ATOM_SMU_INFO_V2_1 *psmu_info =
1419 (ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
1420 GetIndexIntoMasterTable(DATA, SMU_Info),
1421 &size, &frev, &crev);
1422
1423
1424 for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
1425 table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
1426 table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
1427 table->entry[i].usFcw_pcc =
1428 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc);
1429 table->entry[i].usFcw_trans_upper =
1430 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper);
1431 table->entry[i].usRcw_trans_lower =
1432 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower);
1433 }
1434
1435 return 0;
1436 }
1437
atomctrl_get_vddc_shared_railinfo(struct pp_hwmgr * hwmgr,uint8_t * shared_rail)1438 int atomctrl_get_vddc_shared_railinfo(struct pp_hwmgr *hwmgr, uint8_t *shared_rail)
1439 {
1440 ATOM_SMU_INFO_V2_1 *psmu_info =
1441 (ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
1442 GetIndexIntoMasterTable(DATA, SMU_Info),
1443 NULL, NULL, NULL);
1444 if (!psmu_info)
1445 return -1;
1446
1447 *shared_rail = psmu_info->ucSharePowerSource;
1448
1449 return 0;
1450 }
1451
atomctrl_get_avfs_information(struct pp_hwmgr * hwmgr,struct pp_atom_ctrl__avfs_parameters * param)1452 int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
1453 struct pp_atom_ctrl__avfs_parameters *param)
1454 {
1455 ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
1456
1457 if (param == NULL)
1458 return -EINVAL;
1459
1460 profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
1461 smu_atom_get_data_table(hwmgr->adev,
1462 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1463 NULL, NULL, NULL);
1464 if (!profile)
1465 return -1;
1466
1467 param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0);
1468 param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1);
1469 param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2);
1470 param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma);
1471 param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean);
1472 param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma);
1473 param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0);
1474 param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1);
1475 param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2);
1476 param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0);
1477 param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1);
1478 param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2);
1479 param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1480 param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1481 param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1482 param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1);
1483 param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2);
1484 param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b);
1485 param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv);
1486 param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
1487 param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
1488 param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
1489 param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
1490 param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor);
1491 param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
1492
1493 return 0;
1494 }
1495
atomctrl_get_svi2_info(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t * svd_gpio_id,uint8_t * svc_gpio_id,uint16_t * load_line)1496 int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1497 uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
1498 uint16_t *load_line)
1499 {
1500 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
1501 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
1502
1503 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
1504
1505 PP_ASSERT_WITH_CODE((NULL != voltage_info),
1506 "Could not find Voltage Table in BIOS.", return -EINVAL);
1507
1508 voltage_object = atomctrl_lookup_voltage_type_v3
1509 (voltage_info, voltage_type, VOLTAGE_OBJ_SVID2);
1510
1511 *svd_gpio_id = voltage_object->asSVID2Obj.ucSVDGpioId;
1512 *svc_gpio_id = voltage_object->asSVID2Obj.ucSVCGpioId;
1513 *load_line = voltage_object->asSVID2Obj.usLoadLine_PSI;
1514
1515 return 0;
1516 }
1517
atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr * hwmgr,uint16_t * virtual_voltage_id)1518 int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id)
1519 {
1520 struct amdgpu_device *adev = hwmgr->adev;
1521 SET_VOLTAGE_PS_ALLOCATION allocation;
1522 SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters =
1523 (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage;
1524 int result;
1525
1526 voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID;
1527
1528 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1529 GetIndexIntoMasterTable(COMMAND, SetVoltage),
1530 (uint32_t *)voltage_parameters);
1531
1532 *virtual_voltage_id = voltage_parameters->usVoltageLevel;
1533
1534 return result;
1535 }
1536
atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr * hwmgr,uint16_t * vddc,uint16_t * vddci,uint16_t virtual_voltage_id,uint16_t efuse_voltage_id)1537 int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
1538 uint16_t *vddc, uint16_t *vddci,
1539 uint16_t virtual_voltage_id,
1540 uint16_t efuse_voltage_id)
1541 {
1542 int i, j;
1543 int ix;
1544 u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
1545 ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
1546
1547 *vddc = 0;
1548 *vddci = 0;
1549
1550 ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
1551
1552 profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
1553 smu_atom_get_data_table(hwmgr->adev,
1554 ix,
1555 NULL, NULL, NULL);
1556 if (!profile)
1557 return -EINVAL;
1558
1559 if ((profile->asHeader.ucTableFormatRevision >= 2) &&
1560 (profile->asHeader.ucTableContentRevision >= 1) &&
1561 (profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) {
1562 leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset);
1563 vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset);
1564 vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset);
1565 if (profile->ucElbVDDC_Num > 0) {
1566 for (i = 0; i < profile->ucElbVDDC_Num; i++) {
1567 if (vddc_id_buf[i] == virtual_voltage_id) {
1568 for (j = 0; j < profile->ucLeakageBinNum; j++) {
1569 if (efuse_voltage_id <= leakage_bin[j]) {
1570 *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
1571 break;
1572 }
1573 }
1574 break;
1575 }
1576 }
1577 }
1578
1579 vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset);
1580 vddci_buf = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset);
1581 if (profile->ucElbVDDCI_Num > 0) {
1582 for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
1583 if (vddci_id_buf[i] == virtual_voltage_id) {
1584 for (j = 0; j < profile->ucLeakageBinNum; j++) {
1585 if (efuse_voltage_id <= leakage_bin[j]) {
1586 *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
1587 break;
1588 }
1589 }
1590 break;
1591 }
1592 }
1593 }
1594 }
1595
1596 return 0;
1597 }
1598
atomctrl_get_voltage_range(struct pp_hwmgr * hwmgr,uint32_t * max_vddc,uint32_t * min_vddc)1599 void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
1600 uint32_t *min_vddc)
1601 {
1602 void *profile;
1603
1604 profile = smu_atom_get_data_table(hwmgr->adev,
1605 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1606 NULL, NULL, NULL);
1607
1608 if (profile) {
1609 switch (hwmgr->chip_id) {
1610 case CHIP_TONGA:
1611 case CHIP_FIJI:
1612 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
1613 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
1614 return;
1615 case CHIP_POLARIS11:
1616 case CHIP_POLARIS10:
1617 case CHIP_POLARIS12:
1618 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
1619 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
1620 return;
1621 default:
1622 break;
1623 }
1624 }
1625 *max_vddc = 0;
1626 *min_vddc = 0;
1627 }
1628
atomctrl_get_edc_hilo_leakage_offset_table(struct pp_hwmgr * hwmgr,AtomCtrl_HiLoLeakageOffsetTable * table)1629 int atomctrl_get_edc_hilo_leakage_offset_table(struct pp_hwmgr *hwmgr,
1630 AtomCtrl_HiLoLeakageOffsetTable *table)
1631 {
1632 ATOM_GFX_INFO_V2_3 *gfxinfo = smu_atom_get_data_table(hwmgr->adev,
1633 GetIndexIntoMasterTable(DATA, GFX_Info),
1634 NULL, NULL, NULL);
1635 if (!gfxinfo)
1636 return -ENOENT;
1637
1638 table->usHiLoLeakageThreshold = gfxinfo->usHiLoLeakageThreshold;
1639 table->usEdcDidtLoDpm7TableOffset = gfxinfo->usEdcDidtLoDpm7TableOffset;
1640 table->usEdcDidtHiDpm7TableOffset = gfxinfo->usEdcDidtHiDpm7TableOffset;
1641
1642 return 0;
1643 }
1644
get_edc_leakage_table(struct pp_hwmgr * hwmgr,uint16_t offset)1645 static AtomCtrl_EDCLeakgeTable *get_edc_leakage_table(struct pp_hwmgr *hwmgr,
1646 uint16_t offset)
1647 {
1648 void *table_address;
1649 char *temp;
1650
1651 table_address = smu_atom_get_data_table(hwmgr->adev,
1652 GetIndexIntoMasterTable(DATA, GFX_Info),
1653 NULL, NULL, NULL);
1654 if (!table_address)
1655 return NULL;
1656
1657 temp = (char *)table_address;
1658 table_address += offset;
1659
1660 return (AtomCtrl_EDCLeakgeTable *)temp;
1661 }
1662
atomctrl_get_edc_leakage_table(struct pp_hwmgr * hwmgr,AtomCtrl_EDCLeakgeTable * table,uint16_t offset)1663 int atomctrl_get_edc_leakage_table(struct pp_hwmgr *hwmgr,
1664 AtomCtrl_EDCLeakgeTable *table,
1665 uint16_t offset)
1666 {
1667 uint32_t length, i;
1668 AtomCtrl_EDCLeakgeTable *leakage_table =
1669 get_edc_leakage_table(hwmgr, offset);
1670
1671 if (!leakage_table)
1672 return -ENOENT;
1673
1674 length = sizeof(leakage_table->DIDT_REG) /
1675 sizeof(leakage_table->DIDT_REG[0]);
1676 for (i = 0; i < length; i++)
1677 table->DIDT_REG[i] = leakage_table->DIDT_REG[i];
1678
1679 return 0;
1680 }
1681