1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <asm/div64.h>
30 #include <drm/amdgpu_drm.h>
31 #include "ppatomctrl.h"
32 #include "atombios.h"
33 #include "pptable_v1_0.h"
34 #include "pppcielanes.h"
35 #include "amd_pcie_helpers.h"
36 #include "hardwaremanager.h"
37 #include "process_pptables_v1_0.h"
38 #include "cgs_common.h"
39 
40 #include "smu7_common.h"
41 
42 #include "hwmgr.h"
43 #include "smu7_hwmgr.h"
44 #include "smu_ucode_xfer_vi.h"
45 #include "smu7_powertune.h"
46 #include "smu7_dyn_defaults.h"
47 #include "smu7_thermal.h"
48 #include "smu7_clockpowergating.h"
49 #include "processpptables.h"
50 #include "pp_thermal.h"
51 #include "smu7_baco.h"
52 #include "smu7_smumgr.h"
53 #include "polaris10_smumgr.h"
54 
55 #include "ivsrcid/ivsrcid_vislands30.h"
56 
57 #define MC_CG_ARB_FREQ_F0           0x0a
58 #define MC_CG_ARB_FREQ_F1           0x0b
59 #define MC_CG_ARB_FREQ_F2           0x0c
60 #define MC_CG_ARB_FREQ_F3           0x0d
61 
62 #define MC_CG_SEQ_DRAMCONF_S0       0x05
63 #define MC_CG_SEQ_DRAMCONF_S1       0x06
64 #define MC_CG_SEQ_YCLK_SUSPEND      0x04
65 #define MC_CG_SEQ_YCLK_RESUME       0x0a
66 
67 #define SMC_CG_IND_START            0xc0030000
68 #define SMC_CG_IND_END              0xc0040000
69 
70 #define MEM_FREQ_LOW_LATENCY        25000
71 #define MEM_FREQ_HIGH_LATENCY       80000
72 
73 #define MEM_LATENCY_HIGH            45
74 #define MEM_LATENCY_LOW             35
75 #define MEM_LATENCY_ERR             0xFFFF
76 
77 #define MC_SEQ_MISC0_GDDR5_SHIFT 28
78 #define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
79 #define MC_SEQ_MISC0_GDDR5_VALUE 5
80 
81 #define PCIE_BUS_CLK                10000
82 #define TCLK                        (PCIE_BUS_CLK / 10)
83 
84 static struct profile_mode_setting smu7_profiling[7] =
85 					{{0, 0, 0, 0, 0, 0, 0, 0},
86 					 {1, 0, 100, 30, 1, 0, 100, 10},
87 					 {1, 10, 0, 30, 0, 0, 0, 0},
88 					 {0, 0, 0, 0, 1, 10, 16, 31},
89 					 {1, 0, 11, 50, 1, 0, 100, 10},
90 					 {1, 0, 5, 30, 0, 0, 0, 0},
91 					 {0, 0, 0, 0, 0, 0, 0, 0},
92 					};
93 
94 #define PPSMC_MSG_SetVBITimeout_VEGAM    ((uint16_t) 0x310)
95 
96 #define ixPWR_SVI2_PLANE1_LOAD                     0xC0200280
97 #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK                    0x00000020L
98 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK                 0x00000040L
99 #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT                  0x00000005
100 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT               0x00000006
101 
102 #define STRAP_EVV_REVISION_MSB		2211
103 #define STRAP_EVV_REVISION_LSB		2208
104 
105 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
106 enum DPM_EVENT_SRC {
107 	DPM_EVENT_SRC_ANALOG = 0,
108 	DPM_EVENT_SRC_EXTERNAL = 1,
109 	DPM_EVENT_SRC_DIGITAL = 2,
110 	DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
111 	DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
112 };
113 
114 #define ixDIDT_SQ_EDC_CTRL                         0x0013
115 #define ixDIDT_SQ_EDC_THRESHOLD                    0x0014
116 #define ixDIDT_SQ_EDC_STALL_PATTERN_1_2            0x0015
117 #define ixDIDT_SQ_EDC_STALL_PATTERN_3_4            0x0016
118 #define ixDIDT_SQ_EDC_STALL_PATTERN_5_6            0x0017
119 #define ixDIDT_SQ_EDC_STALL_PATTERN_7              0x0018
120 
121 #define ixDIDT_TD_EDC_CTRL                         0x0053
122 #define ixDIDT_TD_EDC_THRESHOLD                    0x0054
123 #define ixDIDT_TD_EDC_STALL_PATTERN_1_2            0x0055
124 #define ixDIDT_TD_EDC_STALL_PATTERN_3_4            0x0056
125 #define ixDIDT_TD_EDC_STALL_PATTERN_5_6            0x0057
126 #define ixDIDT_TD_EDC_STALL_PATTERN_7              0x0058
127 
128 #define ixDIDT_TCP_EDC_CTRL                        0x0073
129 #define ixDIDT_TCP_EDC_THRESHOLD                   0x0074
130 #define ixDIDT_TCP_EDC_STALL_PATTERN_1_2           0x0075
131 #define ixDIDT_TCP_EDC_STALL_PATTERN_3_4           0x0076
132 #define ixDIDT_TCP_EDC_STALL_PATTERN_5_6           0x0077
133 #define ixDIDT_TCP_EDC_STALL_PATTERN_7             0x0078
134 
135 #define ixDIDT_DB_EDC_CTRL                         0x0033
136 #define ixDIDT_DB_EDC_THRESHOLD                    0x0034
137 #define ixDIDT_DB_EDC_STALL_PATTERN_1_2            0x0035
138 #define ixDIDT_DB_EDC_STALL_PATTERN_3_4            0x0036
139 #define ixDIDT_DB_EDC_STALL_PATTERN_5_6            0x0037
140 #define ixDIDT_DB_EDC_STALL_PATTERN_7              0x0038
141 
142 uint32_t DIDTEDCConfig_P12[] = {
143     ixDIDT_SQ_EDC_STALL_PATTERN_1_2,
144     ixDIDT_SQ_EDC_STALL_PATTERN_3_4,
145     ixDIDT_SQ_EDC_STALL_PATTERN_5_6,
146     ixDIDT_SQ_EDC_STALL_PATTERN_7,
147     ixDIDT_SQ_EDC_THRESHOLD,
148     ixDIDT_SQ_EDC_CTRL,
149     ixDIDT_TD_EDC_STALL_PATTERN_1_2,
150     ixDIDT_TD_EDC_STALL_PATTERN_3_4,
151     ixDIDT_TD_EDC_STALL_PATTERN_5_6,
152     ixDIDT_TD_EDC_STALL_PATTERN_7,
153     ixDIDT_TD_EDC_THRESHOLD,
154     ixDIDT_TD_EDC_CTRL,
155     ixDIDT_TCP_EDC_STALL_PATTERN_1_2,
156     ixDIDT_TCP_EDC_STALL_PATTERN_3_4,
157     ixDIDT_TCP_EDC_STALL_PATTERN_5_6,
158     ixDIDT_TCP_EDC_STALL_PATTERN_7,
159     ixDIDT_TCP_EDC_THRESHOLD,
160     ixDIDT_TCP_EDC_CTRL,
161     ixDIDT_DB_EDC_STALL_PATTERN_1_2,
162     ixDIDT_DB_EDC_STALL_PATTERN_3_4,
163     ixDIDT_DB_EDC_STALL_PATTERN_5_6,
164     ixDIDT_DB_EDC_STALL_PATTERN_7,
165     ixDIDT_DB_EDC_THRESHOLD,
166     ixDIDT_DB_EDC_CTRL,
167     0xFFFFFFFF // End of list
168 };
169 
170 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
171 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
172 		enum pp_clock_type type, uint32_t mask);
173 static int smu7_notify_has_display(struct pp_hwmgr *hwmgr);
174 
175 static struct smu7_power_state *cast_phw_smu7_power_state(
176 				  struct pp_hw_power_state *hw_ps)
177 {
178 	PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
179 				"Invalid Powerstate Type!",
180 				 return NULL);
181 
182 	return (struct smu7_power_state *)hw_ps;
183 }
184 
185 static const struct smu7_power_state *cast_const_phw_smu7_power_state(
186 				 const struct pp_hw_power_state *hw_ps)
187 {
188 	PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
189 				"Invalid Powerstate Type!",
190 				 return NULL);
191 
192 	return (const struct smu7_power_state *)hw_ps;
193 }
194 
195 /**
196  * smu7_get_mc_microcode_version - Find the MC microcode version and store it in the HwMgr struct
197  *
198  * @hwmgr:  the address of the powerplay hardware manager.
199  * Return:   always 0
200  */
201 static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
202 {
203 	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
204 
205 	hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
206 
207 	return 0;
208 }
209 
210 static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
211 {
212 	uint32_t speedCntl = 0;
213 
214 	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
215 	speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
216 			ixPCIE_LC_SPEED_CNTL);
217 	return((uint16_t)PHM_GET_FIELD(speedCntl,
218 			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
219 }
220 
221 static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
222 {
223 	uint32_t link_width;
224 
225 	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
226 	link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
227 			PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
228 
229 	PP_ASSERT_WITH_CODE((7 >= link_width),
230 			"Invalid PCIe lane width!", return 0);
231 
232 	return decode_pcie_lane_width(link_width);
233 }
234 
235 /**
236  * smu7_enable_smc_voltage_controller - Enable voltage control
237  *
238  * @hwmgr  the address of the powerplay hardware manager.
239  * Return:   always PP_Result_OK
240  */
241 static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
242 {
243 	if (hwmgr->chip_id >= CHIP_POLARIS10 &&
244 	    hwmgr->chip_id <= CHIP_VEGAM) {
245 		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
246 				CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
247 		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
248 				CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
249 	}
250 
251 	if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
252 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);
253 
254 	return 0;
255 }
256 
257 /**
258  * smu7_voltage_control - Checks if we want to support voltage control
259  *
260  * @hwmgr:  the address of the powerplay hardware manager.
261  */
262 static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
263 {
264 	const struct smu7_hwmgr *data =
265 			(const struct smu7_hwmgr *)(hwmgr->backend);
266 
267 	return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
268 }
269 
270 /**
271  * smu7_enable_voltage_control - Enable voltage control
272  *
273  * @hwmgr:  the address of the powerplay hardware manager.
274  * Return:   always 0
275  */
276 static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
277 {
278 	/* enable voltage control */
279 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
280 			GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
281 
282 	return 0;
283 }
284 
285 static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
286 		struct phm_clock_voltage_dependency_table *voltage_dependency_table
287 		)
288 {
289 	uint32_t i;
290 
291 	PP_ASSERT_WITH_CODE((NULL != voltage_table),
292 			"Voltage Dependency Table empty.", return -EINVAL;);
293 
294 	voltage_table->mask_low = 0;
295 	voltage_table->phase_delay = 0;
296 	voltage_table->count = voltage_dependency_table->count;
297 
298 	for (i = 0; i < voltage_dependency_table->count; i++) {
299 		voltage_table->entries[i].value =
300 			voltage_dependency_table->entries[i].v;
301 		voltage_table->entries[i].smio_low = 0;
302 	}
303 
304 	return 0;
305 }
306 
307 
308 /**
309  * smu7_construct_voltage_tables - Create Voltage Tables.
310  *
311  * @hwmgr:  the address of the powerplay hardware manager.
312  * Return:   always 0
313  */
314 static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
315 {
316 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
317 	struct phm_ppt_v1_information *table_info =
318 			(struct phm_ppt_v1_information *)hwmgr->pptable;
319 	int result = 0;
320 	uint32_t tmp;
321 
322 	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
323 		result = atomctrl_get_voltage_table_v3(hwmgr,
324 				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
325 				&(data->mvdd_voltage_table));
326 		PP_ASSERT_WITH_CODE((0 == result),
327 				"Failed to retrieve MVDD table.",
328 				return result);
329 	} else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
330 		if (hwmgr->pp_table_version == PP_TABLE_V1)
331 			result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
332 					table_info->vdd_dep_on_mclk);
333 		else if (hwmgr->pp_table_version == PP_TABLE_V0)
334 			result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
335 					hwmgr->dyn_state.mvdd_dependency_on_mclk);
336 
337 		PP_ASSERT_WITH_CODE((0 == result),
338 				"Failed to retrieve SVI2 MVDD table from dependency table.",
339 				return result;);
340 	}
341 
342 	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
343 		result = atomctrl_get_voltage_table_v3(hwmgr,
344 				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
345 				&(data->vddci_voltage_table));
346 		PP_ASSERT_WITH_CODE((0 == result),
347 				"Failed to retrieve VDDCI table.",
348 				return result);
349 	} else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
350 		if (hwmgr->pp_table_version == PP_TABLE_V1)
351 			result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
352 					table_info->vdd_dep_on_mclk);
353 		else if (hwmgr->pp_table_version == PP_TABLE_V0)
354 			result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
355 					hwmgr->dyn_state.vddci_dependency_on_mclk);
356 		PP_ASSERT_WITH_CODE((0 == result),
357 				"Failed to retrieve SVI2 VDDCI table from dependency table.",
358 				return result);
359 	}
360 
361 	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
362 		/* VDDGFX has only SVI2 voltage control */
363 		result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
364 					table_info->vddgfx_lookup_table);
365 		PP_ASSERT_WITH_CODE((0 == result),
366 			"Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
367 	}
368 
369 
370 	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
371 		result = atomctrl_get_voltage_table_v3(hwmgr,
372 					VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
373 					&data->vddc_voltage_table);
374 		PP_ASSERT_WITH_CODE((0 == result),
375 			"Failed to retrieve VDDC table.", return result;);
376 	} else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
377 
378 		if (hwmgr->pp_table_version == PP_TABLE_V0)
379 			result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
380 					hwmgr->dyn_state.vddc_dependency_on_mclk);
381 		else if (hwmgr->pp_table_version == PP_TABLE_V1)
382 			result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
383 				table_info->vddc_lookup_table);
384 
385 		PP_ASSERT_WITH_CODE((0 == result),
386 			"Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
387 	}
388 
389 	tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
390 	PP_ASSERT_WITH_CODE(
391 			(data->vddc_voltage_table.count <= tmp),
392 		"Too many voltage values for VDDC. Trimming to fit state table.",
393 			phm_trim_voltage_table_to_fit_state_table(tmp,
394 						&(data->vddc_voltage_table)));
395 
396 	tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
397 	PP_ASSERT_WITH_CODE(
398 			(data->vddgfx_voltage_table.count <= tmp),
399 		"Too many voltage values for VDDC. Trimming to fit state table.",
400 			phm_trim_voltage_table_to_fit_state_table(tmp,
401 						&(data->vddgfx_voltage_table)));
402 
403 	tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
404 	PP_ASSERT_WITH_CODE(
405 			(data->vddci_voltage_table.count <= tmp),
406 		"Too many voltage values for VDDCI. Trimming to fit state table.",
407 			phm_trim_voltage_table_to_fit_state_table(tmp,
408 					&(data->vddci_voltage_table)));
409 
410 	tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
411 	PP_ASSERT_WITH_CODE(
412 			(data->mvdd_voltage_table.count <= tmp),
413 		"Too many voltage values for MVDD. Trimming to fit state table.",
414 			phm_trim_voltage_table_to_fit_state_table(tmp,
415 						&(data->mvdd_voltage_table)));
416 
417 	return 0;
418 }
419 
420 /**
421  * smu7_program_static_screen_threshold_parameters - Programs static screed detection parameters
422  *
423  * @hwmgr:  the address of the powerplay hardware manager.
424  * Return:   always 0
425  */
426 static int smu7_program_static_screen_threshold_parameters(
427 							struct pp_hwmgr *hwmgr)
428 {
429 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
430 
431 	/* Set static screen threshold unit */
432 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
433 			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
434 			data->static_screen_threshold_unit);
435 	/* Set static screen threshold */
436 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
437 			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
438 			data->static_screen_threshold);
439 
440 	return 0;
441 }
442 
443 /**
444  * smu7_enable_display_gap - Setup display gap for glitch free memory clock switching.
445  *
446  * @hwmgr:  the address of the powerplay hardware manager.
447  * Return:   always  0
448  */
449 static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
450 {
451 	uint32_t display_gap =
452 			cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
453 					ixCG_DISPLAY_GAP_CNTL);
454 
455 	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
456 			DISP_GAP, DISPLAY_GAP_IGNORE);
457 
458 	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
459 			DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
460 
461 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
462 			ixCG_DISPLAY_GAP_CNTL, display_gap);
463 
464 	return 0;
465 }
466 
467 /**
468  * smu7_program_voting_clients - Programs activity state transition voting clients
469  *
470  * @hwmgr:  the address of the powerplay hardware manager.
471  * Return:   always  0
472  */
473 static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
474 {
475 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
476 	int i;
477 
478 	/* Clear reset for voting clients before enabling DPM */
479 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
480 			SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
481 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
482 			SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
483 
484 	for (i = 0; i < 8; i++)
485 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
486 					ixCG_FREQ_TRAN_VOTING_0 + i * 4,
487 					data->voting_rights_clients[i]);
488 	return 0;
489 }
490 
491 static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
492 {
493 	int i;
494 
495 	/* Reset voting clients before disabling DPM */
496 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
497 			SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
498 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
499 			SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
500 
501 	for (i = 0; i < 8; i++)
502 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
503 				ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
504 
505 	return 0;
506 }
507 
508 /* Copy one arb setting to another and then switch the active set.
509  * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
510  */
511 static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
512 		uint32_t arb_src, uint32_t arb_dest)
513 {
514 	uint32_t mc_arb_dram_timing;
515 	uint32_t mc_arb_dram_timing2;
516 	uint32_t burst_time;
517 	uint32_t mc_cg_config;
518 
519 	switch (arb_src) {
520 	case MC_CG_ARB_FREQ_F0:
521 		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
522 		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
523 		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
524 		break;
525 	case MC_CG_ARB_FREQ_F1:
526 		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
527 		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
528 		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
529 		break;
530 	default:
531 		return -EINVAL;
532 	}
533 
534 	switch (arb_dest) {
535 	case MC_CG_ARB_FREQ_F0:
536 		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
537 		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
538 		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
539 		break;
540 	case MC_CG_ARB_FREQ_F1:
541 		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
542 		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
543 		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
544 		break;
545 	default:
546 		return -EINVAL;
547 	}
548 
549 	mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
550 	mc_cg_config |= 0x0000000F;
551 	cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
552 	PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
553 
554 	return 0;
555 }
556 
557 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
558 {
559 	return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
560 }
561 
562 /**
563  * smu7_initial_switch_from_arbf0_to_f1 - Initial switch from ARB F0->F1
564  *
565  * @hwmgr:  the address of the powerplay hardware manager.
566  * Return:   always 0
567  * This function is to be called from the SetPowerState table.
568  */
569 static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
570 {
571 	return smu7_copy_and_switch_arb_sets(hwmgr,
572 			MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
573 }
574 
575 static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
576 {
577 	uint32_t tmp;
578 
579 	tmp = (cgs_read_ind_register(hwmgr->device,
580 			CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
581 			0x0000ff00) >> 8;
582 
583 	if (tmp == MC_CG_ARB_FREQ_F0)
584 		return 0;
585 
586 	return smu7_copy_and_switch_arb_sets(hwmgr,
587 			tmp, MC_CG_ARB_FREQ_F0);
588 }
589 
590 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
591 {
592 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
593 
594 	struct phm_ppt_v1_information *table_info =
595 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
596 	struct phm_ppt_v1_pcie_table *pcie_table = NULL;
597 
598 	uint32_t i, max_entry;
599 	uint32_t tmp;
600 
601 	PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
602 			data->use_pcie_power_saving_levels), "No pcie performance levels!",
603 			return -EINVAL);
604 
605 	if (table_info != NULL)
606 		pcie_table = table_info->pcie_table;
607 
608 	if (data->use_pcie_performance_levels &&
609 			!data->use_pcie_power_saving_levels) {
610 		data->pcie_gen_power_saving = data->pcie_gen_performance;
611 		data->pcie_lane_power_saving = data->pcie_lane_performance;
612 	} else if (!data->use_pcie_performance_levels &&
613 			data->use_pcie_power_saving_levels) {
614 		data->pcie_gen_performance = data->pcie_gen_power_saving;
615 		data->pcie_lane_performance = data->pcie_lane_power_saving;
616 	}
617 	tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
618 	phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
619 					tmp,
620 					MAX_REGULAR_DPM_NUMBER);
621 
622 	if (pcie_table != NULL) {
623 		/* max_entry is used to make sure we reserve one PCIE level
624 		 * for boot level (fix for A+A PSPP issue).
625 		 * If PCIE table from PPTable have ULV entry + 8 entries,
626 		 * then ignore the last entry.*/
627 		max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
628 		for (i = 1; i < max_entry; i++) {
629 			phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
630 					get_pcie_gen_support(data->pcie_gen_cap,
631 							pcie_table->entries[i].gen_speed),
632 					get_pcie_lane_support(data->pcie_lane_cap,
633 							pcie_table->entries[i].lane_width));
634 		}
635 		data->dpm_table.pcie_speed_table.count = max_entry - 1;
636 		smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
637 	} else {
638 		/* Hardcode Pcie Table */
639 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
640 				get_pcie_gen_support(data->pcie_gen_cap,
641 						PP_Min_PCIEGen),
642 				get_pcie_lane_support(data->pcie_lane_cap,
643 						PP_Max_PCIELane));
644 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
645 				get_pcie_gen_support(data->pcie_gen_cap,
646 						PP_Min_PCIEGen),
647 				get_pcie_lane_support(data->pcie_lane_cap,
648 						PP_Max_PCIELane));
649 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
650 				get_pcie_gen_support(data->pcie_gen_cap,
651 						PP_Max_PCIEGen),
652 				get_pcie_lane_support(data->pcie_lane_cap,
653 						PP_Max_PCIELane));
654 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
655 				get_pcie_gen_support(data->pcie_gen_cap,
656 						PP_Max_PCIEGen),
657 				get_pcie_lane_support(data->pcie_lane_cap,
658 						PP_Max_PCIELane));
659 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
660 				get_pcie_gen_support(data->pcie_gen_cap,
661 						PP_Max_PCIEGen),
662 				get_pcie_lane_support(data->pcie_lane_cap,
663 						PP_Max_PCIELane));
664 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
665 				get_pcie_gen_support(data->pcie_gen_cap,
666 						PP_Max_PCIEGen),
667 				get_pcie_lane_support(data->pcie_lane_cap,
668 						PP_Max_PCIELane));
669 
670 		data->dpm_table.pcie_speed_table.count = 6;
671 	}
672 	/* Populate last level for boot PCIE level, but do not increment count. */
673 	if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
674 		for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
675 			phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
676 				get_pcie_gen_support(data->pcie_gen_cap,
677 						PP_Max_PCIEGen),
678 				data->vbios_boot_state.pcie_lane_bootup_value);
679 	} else {
680 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
681 			data->dpm_table.pcie_speed_table.count,
682 			get_pcie_gen_support(data->pcie_gen_cap,
683 					PP_Min_PCIEGen),
684 			get_pcie_lane_support(data->pcie_lane_cap,
685 					PP_Max_PCIELane));
686 	}
687 	return 0;
688 }
689 
690 static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
691 {
692 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
693 
694 	memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
695 
696 	phm_reset_single_dpm_table(
697 			&data->dpm_table.sclk_table,
698 				smum_get_mac_definition(hwmgr,
699 					SMU_MAX_LEVELS_GRAPHICS),
700 					MAX_REGULAR_DPM_NUMBER);
701 	phm_reset_single_dpm_table(
702 			&data->dpm_table.mclk_table,
703 			smum_get_mac_definition(hwmgr,
704 				SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
705 
706 	phm_reset_single_dpm_table(
707 			&data->dpm_table.vddc_table,
708 				smum_get_mac_definition(hwmgr,
709 					SMU_MAX_LEVELS_VDDC),
710 					MAX_REGULAR_DPM_NUMBER);
711 	phm_reset_single_dpm_table(
712 			&data->dpm_table.vddci_table,
713 			smum_get_mac_definition(hwmgr,
714 				SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
715 
716 	phm_reset_single_dpm_table(
717 			&data->dpm_table.mvdd_table,
718 				smum_get_mac_definition(hwmgr,
719 					SMU_MAX_LEVELS_MVDD),
720 					MAX_REGULAR_DPM_NUMBER);
721 	return 0;
722 }
723 /*
724  * This function is to initialize all DPM state tables
725  * for SMU7 based on the dependency table.
726  * Dynamic state patching function will then trim these
727  * state tables to the allowed range based
728  * on the power policy or external client requests,
729  * such as UVD request, etc.
730  */
731 
732 static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
733 {
734 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
735 	struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
736 		hwmgr->dyn_state.vddc_dependency_on_sclk;
737 	struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
738 		hwmgr->dyn_state.vddc_dependency_on_mclk;
739 	struct phm_cac_leakage_table *std_voltage_table =
740 		hwmgr->dyn_state.cac_leakage_table;
741 	uint32_t i;
742 
743 	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
744 		"SCLK dependency table is missing. This table is mandatory", return -EINVAL);
745 	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
746 		"SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
747 
748 	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
749 		"MCLK dependency table is missing. This table is mandatory", return -EINVAL);
750 	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
751 		"VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
752 
753 
754 	/* Initialize Sclk DPM table based on allow Sclk values*/
755 	data->dpm_table.sclk_table.count = 0;
756 
757 	for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
758 		if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
759 				allowed_vdd_sclk_table->entries[i].clk) {
760 			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
761 				allowed_vdd_sclk_table->entries[i].clk;
762 			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
763 			data->dpm_table.sclk_table.count++;
764 		}
765 	}
766 
767 	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
768 		"MCLK dependency table is missing. This table is mandatory", return -EINVAL);
769 	/* Initialize Mclk DPM table based on allow Mclk values */
770 	data->dpm_table.mclk_table.count = 0;
771 	for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
772 		if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
773 			allowed_vdd_mclk_table->entries[i].clk) {
774 			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
775 				allowed_vdd_mclk_table->entries[i].clk;
776 			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
777 			data->dpm_table.mclk_table.count++;
778 		}
779 	}
780 
781 	/* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
782 	for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
783 		data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
784 		data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
785 		/* param1 is for corresponding std voltage */
786 		data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
787 	}
788 
789 	data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
790 	allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
791 
792 	if (NULL != allowed_vdd_mclk_table) {
793 		/* Initialize Vddci DPM table based on allow Mclk values */
794 		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
795 			data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
796 			data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
797 		}
798 		data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
799 	}
800 
801 	allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
802 
803 	if (NULL != allowed_vdd_mclk_table) {
804 		/*
805 		 * Initialize MVDD DPM table based on allow Mclk
806 		 * values
807 		 */
808 		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
809 			data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
810 			data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
811 		}
812 		data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
813 	}
814 
815 	return 0;
816 }
817 
818 static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
819 {
820 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
821 	struct phm_ppt_v1_information *table_info =
822 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
823 	uint32_t i;
824 
825 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
826 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
827 
828 	if (table_info == NULL)
829 		return -EINVAL;
830 
831 	dep_sclk_table = table_info->vdd_dep_on_sclk;
832 	dep_mclk_table = table_info->vdd_dep_on_mclk;
833 
834 	PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
835 			"SCLK dependency table is missing.",
836 			return -EINVAL);
837 	PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
838 			"SCLK dependency table count is 0.",
839 			return -EINVAL);
840 
841 	PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
842 			"MCLK dependency table is missing.",
843 			return -EINVAL);
844 	PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
845 			"MCLK dependency table count is 0",
846 			return -EINVAL);
847 
848 	/* Initialize Sclk DPM table based on allow Sclk values */
849 	data->dpm_table.sclk_table.count = 0;
850 	for (i = 0; i < dep_sclk_table->count; i++) {
851 		if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
852 						dep_sclk_table->entries[i].clk) {
853 
854 			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
855 					dep_sclk_table->entries[i].clk;
856 
857 			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
858 					(i == 0) ? true : false;
859 			data->dpm_table.sclk_table.count++;
860 		}
861 	}
862 	if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
863 		hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
864 	/* Initialize Mclk DPM table based on allow Mclk values */
865 	data->dpm_table.mclk_table.count = 0;
866 	for (i = 0; i < dep_mclk_table->count; i++) {
867 		if (i == 0 || data->dpm_table.mclk_table.dpm_levels
868 				[data->dpm_table.mclk_table.count - 1].value !=
869 						dep_mclk_table->entries[i].clk) {
870 			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
871 							dep_mclk_table->entries[i].clk;
872 			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
873 							(i == 0) ? true : false;
874 			data->dpm_table.mclk_table.count++;
875 		}
876 	}
877 
878 	if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
879 		hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
880 	return 0;
881 }
882 
883 static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
884 {
885 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
886 	struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
887 	struct phm_ppt_v1_information *table_info =
888 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
889 	uint32_t i;
890 
891 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
892 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
893 	struct phm_odn_performance_level *entries;
894 
895 	if (table_info == NULL)
896 		return -EINVAL;
897 
898 	dep_sclk_table = table_info->vdd_dep_on_sclk;
899 	dep_mclk_table = table_info->vdd_dep_on_mclk;
900 
901 	odn_table->odn_core_clock_dpm_levels.num_of_pl =
902 						data->golden_dpm_table.sclk_table.count;
903 	entries = odn_table->odn_core_clock_dpm_levels.entries;
904 	for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
905 		entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
906 		entries[i].enabled = true;
907 		entries[i].vddc = dep_sclk_table->entries[i].vddc;
908 	}
909 
910 	smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
911 		(struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
912 
913 	odn_table->odn_memory_clock_dpm_levels.num_of_pl =
914 						data->golden_dpm_table.mclk_table.count;
915 	entries = odn_table->odn_memory_clock_dpm_levels.entries;
916 	for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
917 		entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
918 		entries[i].enabled = true;
919 		entries[i].vddc = dep_mclk_table->entries[i].vddc;
920 	}
921 
922 	smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
923 		(struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
924 
925 	return 0;
926 }
927 
928 static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
929 {
930 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
931 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
932 	struct phm_ppt_v1_information *table_info =
933 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
934 	uint32_t min_vddc = 0;
935 	uint32_t max_vddc = 0;
936 
937 	if (!table_info)
938 		return;
939 
940 	dep_sclk_table = table_info->vdd_dep_on_sclk;
941 
942 	atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
943 
944 	if (min_vddc == 0 || min_vddc > 2000
945 		|| min_vddc > dep_sclk_table->entries[0].vddc)
946 		min_vddc = dep_sclk_table->entries[0].vddc;
947 
948 	if (max_vddc == 0 || max_vddc > 2000
949 		|| max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
950 		max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
951 
952 	data->odn_dpm_table.min_vddc = min_vddc;
953 	data->odn_dpm_table.max_vddc = max_vddc;
954 }
955 
956 static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
957 {
958 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
959 	struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
960 	struct phm_ppt_v1_information *table_info =
961 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
962 	uint32_t i;
963 
964 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
965 	struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
966 
967 	if (table_info == NULL)
968 		return;
969 
970 	for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
971 		if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
972 					data->dpm_table.sclk_table.dpm_levels[i].value) {
973 			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
974 			break;
975 		}
976 	}
977 
978 	for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
979 		if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
980 					data->dpm_table.mclk_table.dpm_levels[i].value) {
981 			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
982 			break;
983 		}
984 	}
985 
986 	dep_table = table_info->vdd_dep_on_mclk;
987 	odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
988 
989 	for (i = 0; i < dep_table->count; i++) {
990 		if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
991 			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
992 			return;
993 		}
994 	}
995 
996 	dep_table = table_info->vdd_dep_on_sclk;
997 	odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
998 	for (i = 0; i < dep_table->count; i++) {
999 		if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
1000 			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
1001 			return;
1002 		}
1003 	}
1004 	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1005 		data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
1006 		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
1007 	}
1008 }
1009 
1010 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1011 {
1012 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1013 
1014 	smu7_reset_dpm_tables(hwmgr);
1015 
1016 	if (hwmgr->pp_table_version == PP_TABLE_V1)
1017 		smu7_setup_dpm_tables_v1(hwmgr);
1018 	else if (hwmgr->pp_table_version == PP_TABLE_V0)
1019 		smu7_setup_dpm_tables_v0(hwmgr);
1020 
1021 	smu7_setup_default_pcie_table(hwmgr);
1022 
1023 	/* save a copy of the default DPM table */
1024 	memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1025 			sizeof(struct smu7_dpm_table));
1026 
1027 	/* initialize ODN table */
1028 	if (hwmgr->od_enabled) {
1029 		if (data->odn_dpm_table.max_vddc) {
1030 			smu7_check_dpm_table_updated(hwmgr);
1031 		} else {
1032 			smu7_setup_voltage_range_from_vbios(hwmgr);
1033 			smu7_odn_initial_default_setting(hwmgr);
1034 		}
1035 	}
1036 	return 0;
1037 }
1038 
1039 static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
1040 {
1041 
1042 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1043 			PHM_PlatformCaps_RegulatorHot))
1044 		return smum_send_msg_to_smc(hwmgr,
1045 				PPSMC_MSG_EnableVRHotGPIOInterrupt,
1046 				NULL);
1047 
1048 	return 0;
1049 }
1050 
1051 static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
1052 {
1053 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1054 			SCLK_PWRMGT_OFF, 0);
1055 	return 0;
1056 }
1057 
1058 static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
1059 {
1060 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1061 
1062 	if (data->ulv_supported)
1063 		return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);
1064 
1065 	return 0;
1066 }
1067 
1068 static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
1069 {
1070 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1071 
1072 	if (data->ulv_supported)
1073 		return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);
1074 
1075 	return 0;
1076 }
1077 
1078 static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1079 {
1080 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1081 			PHM_PlatformCaps_SclkDeepSleep)) {
1082 		if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
1083 			PP_ASSERT_WITH_CODE(false,
1084 					"Attempt to enable Master Deep Sleep switch failed!",
1085 					return -EINVAL);
1086 	} else {
1087 		if (smum_send_msg_to_smc(hwmgr,
1088 				PPSMC_MSG_MASTER_DeepSleep_OFF,
1089 				NULL)) {
1090 			PP_ASSERT_WITH_CODE(false,
1091 					"Attempt to disable Master Deep Sleep switch failed!",
1092 					return -EINVAL);
1093 		}
1094 	}
1095 
1096 	return 0;
1097 }
1098 
1099 static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1100 {
1101 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1102 			PHM_PlatformCaps_SclkDeepSleep)) {
1103 		if (smum_send_msg_to_smc(hwmgr,
1104 				PPSMC_MSG_MASTER_DeepSleep_OFF,
1105 				NULL)) {
1106 			PP_ASSERT_WITH_CODE(false,
1107 					"Attempt to disable Master Deep Sleep switch failed!",
1108 					return -EINVAL);
1109 		}
1110 	}
1111 
1112 	return 0;
1113 }
1114 
1115 static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
1116 {
1117 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1118 	uint32_t soft_register_value = 0;
1119 	uint32_t handshake_disables_offset = data->soft_regs_start
1120 				+ smum_get_offsetof(hwmgr,
1121 					SMU_SoftRegisters, HandshakeDisables);
1122 
1123 	soft_register_value = cgs_read_ind_register(hwmgr->device,
1124 				CGS_IND_REG__SMC, handshake_disables_offset);
1125 	soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
1126 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1127 			handshake_disables_offset, soft_register_value);
1128 	return 0;
1129 }
1130 
1131 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
1132 {
1133 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1134 	uint32_t soft_register_value = 0;
1135 	uint32_t handshake_disables_offset = data->soft_regs_start
1136 				+ smum_get_offsetof(hwmgr,
1137 					SMU_SoftRegisters, HandshakeDisables);
1138 
1139 	soft_register_value = cgs_read_ind_register(hwmgr->device,
1140 				CGS_IND_REG__SMC, handshake_disables_offset);
1141 	soft_register_value |= smum_get_mac_definition(hwmgr,
1142 					SMU_UVD_MCLK_HANDSHAKE_DISABLE);
1143 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1144 			handshake_disables_offset, soft_register_value);
1145 	return 0;
1146 }
1147 
1148 static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1149 {
1150 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1151 
1152 	/* enable SCLK dpm */
1153 	if (!data->sclk_dpm_key_disabled) {
1154 		if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1155 		    hwmgr->chip_id <= CHIP_VEGAM)
1156 			smu7_disable_sclk_vce_handshake(hwmgr);
1157 
1158 		PP_ASSERT_WITH_CODE(
1159 		(0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
1160 		"Failed to enable SCLK DPM during DPM Start Function!",
1161 		return -EINVAL);
1162 	}
1163 
1164 	/* enable MCLK dpm */
1165 	if (0 == data->mclk_dpm_key_disabled) {
1166 		if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
1167 			smu7_disable_handshake_uvd(hwmgr);
1168 
1169 		PP_ASSERT_WITH_CODE(
1170 				(0 == smum_send_msg_to_smc(hwmgr,
1171 						PPSMC_MSG_MCLKDPM_Enable,
1172 						NULL)),
1173 				"Failed to enable MCLK DPM during DPM Start Function!",
1174 				return -EINVAL);
1175 
1176 		if ((hwmgr->chip_family == AMDGPU_FAMILY_CI) ||
1177 		    (hwmgr->chip_id == CHIP_POLARIS10) ||
1178 		    (hwmgr->chip_id == CHIP_POLARIS11) ||
1179 		    (hwmgr->chip_id == CHIP_POLARIS12) ||
1180 		    (hwmgr->chip_id == CHIP_TONGA))
1181 			PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1182 
1183 
1184 		if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1185 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
1186 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
1187 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
1188 			udelay(10);
1189 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
1190 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
1191 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
1192 		} else {
1193 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
1194 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1195 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1196 			udelay(10);
1197 			if (hwmgr->chip_id == CHIP_VEGAM) {
1198 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
1199 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
1200 			} else {
1201 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1202 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1203 			}
1204 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1205 		}
1206 	}
1207 
1208 	return 0;
1209 }
1210 
1211 static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1212 {
1213 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1214 
1215 	/*enable general power management */
1216 
1217 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1218 			GLOBAL_PWRMGT_EN, 1);
1219 
1220 	/* enable sclk deep sleep */
1221 
1222 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1223 			DYNAMIC_PM_EN, 1);
1224 
1225 	/* prepare for PCIE DPM */
1226 
1227 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1228 			data->soft_regs_start +
1229 			smum_get_offsetof(hwmgr, SMU_SoftRegisters,
1230 						VoltageChangeTimeout), 0x1000);
1231 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
1232 			SWRST_COMMAND_1, RESETLC, 0x0);
1233 
1234 	if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
1235 		cgs_write_register(hwmgr->device, 0x1488,
1236 			(cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1237 
1238 	if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1239 		pr_err("Failed to enable Sclk DPM and Mclk DPM!");
1240 		return -EINVAL;
1241 	}
1242 
1243 	/* enable PCIE dpm */
1244 	if (0 == data->pcie_dpm_key_disabled) {
1245 		PP_ASSERT_WITH_CODE(
1246 				(0 == smum_send_msg_to_smc(hwmgr,
1247 						PPSMC_MSG_PCIeDPM_Enable,
1248 						NULL)),
1249 				"Failed to enable pcie DPM during DPM Start Function!",
1250 				return -EINVAL);
1251 	}
1252 
1253 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1254 				PHM_PlatformCaps_Falcon_QuickTransition)) {
1255 		PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
1256 				PPSMC_MSG_EnableACDCGPIOInterrupt,
1257 				NULL)),
1258 				"Failed to enable AC DC GPIO Interrupt!",
1259 				);
1260 	}
1261 
1262 	return 0;
1263 }
1264 
1265 static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1266 {
1267 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1268 
1269 	/* disable SCLK dpm */
1270 	if (!data->sclk_dpm_key_disabled) {
1271 		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1272 				"Trying to disable SCLK DPM when DPM is disabled",
1273 				return 0);
1274 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
1275 	}
1276 
1277 	/* disable MCLK dpm */
1278 	if (!data->mclk_dpm_key_disabled) {
1279 		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1280 				"Trying to disable MCLK DPM when DPM is disabled",
1281 				return 0);
1282 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
1283 	}
1284 
1285 	return 0;
1286 }
1287 
1288 static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1289 {
1290 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1291 
1292 	/* disable general power management */
1293 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1294 			GLOBAL_PWRMGT_EN, 0);
1295 	/* disable sclk deep sleep */
1296 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1297 			DYNAMIC_PM_EN, 0);
1298 
1299 	/* disable PCIE dpm */
1300 	if (!data->pcie_dpm_key_disabled) {
1301 		PP_ASSERT_WITH_CODE(
1302 				(smum_send_msg_to_smc(hwmgr,
1303 						PPSMC_MSG_PCIeDPM_Disable,
1304 						NULL) == 0),
1305 				"Failed to disable pcie DPM during DPM Stop Function!",
1306 				return -EINVAL);
1307 	}
1308 
1309 	smu7_disable_sclk_mclk_dpm(hwmgr);
1310 
1311 	PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1312 			"Trying to disable voltage DPM when DPM is disabled",
1313 			return 0);
1314 
1315 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);
1316 
1317 	return 0;
1318 }
1319 
1320 static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1321 {
1322 	bool protection;
1323 	enum DPM_EVENT_SRC src;
1324 
1325 	switch (sources) {
1326 	default:
1327 		pr_err("Unknown throttling event sources.");
1328 		fallthrough;
1329 	case 0:
1330 		protection = false;
1331 		/* src is unused */
1332 		break;
1333 	case (1 << PHM_AutoThrottleSource_Thermal):
1334 		protection = true;
1335 		src = DPM_EVENT_SRC_DIGITAL;
1336 		break;
1337 	case (1 << PHM_AutoThrottleSource_External):
1338 		protection = true;
1339 		src = DPM_EVENT_SRC_EXTERNAL;
1340 		break;
1341 	case (1 << PHM_AutoThrottleSource_External) |
1342 			(1 << PHM_AutoThrottleSource_Thermal):
1343 		protection = true;
1344 		src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1345 		break;
1346 	}
1347 	/* Order matters - don't enable thermal protection for the wrong source. */
1348 	if (protection) {
1349 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1350 				DPM_EVENT_SRC, src);
1351 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1352 				THERMAL_PROTECTION_DIS,
1353 				!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1354 						PHM_PlatformCaps_ThermalController));
1355 	} else
1356 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1357 				THERMAL_PROTECTION_DIS, 1);
1358 }
1359 
1360 static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1361 		PHM_AutoThrottleSource source)
1362 {
1363 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1364 
1365 	if (!(data->active_auto_throttle_sources & (1 << source))) {
1366 		data->active_auto_throttle_sources |= 1 << source;
1367 		smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1368 	}
1369 	return 0;
1370 }
1371 
1372 static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1373 {
1374 	return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1375 }
1376 
1377 static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1378 		PHM_AutoThrottleSource source)
1379 {
1380 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1381 
1382 	if (data->active_auto_throttle_sources & (1 << source)) {
1383 		data->active_auto_throttle_sources &= ~(1 << source);
1384 		smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1385 	}
1386 	return 0;
1387 }
1388 
1389 static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1390 {
1391 	return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1392 }
1393 
1394 static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1395 {
1396 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1397 	data->pcie_performance_request = true;
1398 
1399 	return 0;
1400 }
1401 
1402 static int smu7_program_edc_didt_registers(struct pp_hwmgr *hwmgr,
1403 					   uint32_t *cac_config_regs,
1404 					   AtomCtrl_EDCLeakgeTable *edc_leakage_table)
1405 {
1406 	uint32_t data, i = 0;
1407 
1408 	while (cac_config_regs[i] != 0xFFFFFFFF) {
1409 		data = edc_leakage_table->DIDT_REG[i];
1410 		cgs_write_ind_register(hwmgr->device,
1411 				       CGS_IND_REG__DIDT,
1412 				       cac_config_regs[i],
1413 				       data);
1414 		i++;
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
1421 {
1422 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1423 	int ret = 0;
1424 
1425 	if (!data->disable_edc_leakage_controller &&
1426 	    data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
1427 	    data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
1428 		ret = smu7_program_edc_didt_registers(hwmgr,
1429 						      DIDTEDCConfig_P12,
1430 						      &data->edc_leakage_table);
1431 		if (ret)
1432 			return ret;
1433 
1434 		ret = smum_send_msg_to_smc(hwmgr,
1435 					   (PPSMC_Msg)PPSMC_MSG_EnableEDCController,
1436 					   NULL);
1437 	} else {
1438 		ret = smum_send_msg_to_smc(hwmgr,
1439 					   (PPSMC_Msg)PPSMC_MSG_DisableEDCController,
1440 					   NULL);
1441 	}
1442 
1443 	return ret;
1444 }
1445 
1446 static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1447 {
1448 	int tmp_result = 0;
1449 	int result = 0;
1450 
1451 	if (smu7_voltage_control(hwmgr)) {
1452 		tmp_result = smu7_enable_voltage_control(hwmgr);
1453 		PP_ASSERT_WITH_CODE(tmp_result == 0,
1454 				"Failed to enable voltage control!",
1455 				result = tmp_result);
1456 
1457 		tmp_result = smu7_construct_voltage_tables(hwmgr);
1458 		PP_ASSERT_WITH_CODE((0 == tmp_result),
1459 				"Failed to construct voltage tables!",
1460 				result = tmp_result);
1461 	}
1462 	smum_initialize_mc_reg_table(hwmgr);
1463 
1464 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1465 			PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1466 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1467 				GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1468 
1469 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1470 			PHM_PlatformCaps_ThermalController))
1471 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1472 				GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1473 
1474 	tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1475 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1476 			"Failed to program static screen threshold parameters!",
1477 			result = tmp_result);
1478 
1479 	tmp_result = smu7_enable_display_gap(hwmgr);
1480 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1481 			"Failed to enable display gap!", result = tmp_result);
1482 
1483 	tmp_result = smu7_program_voting_clients(hwmgr);
1484 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1485 			"Failed to program voting clients!", result = tmp_result);
1486 
1487 	tmp_result = smum_process_firmware_header(hwmgr);
1488 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1489 			"Failed to process firmware header!", result = tmp_result);
1490 
1491 	if (hwmgr->chip_id != CHIP_VEGAM) {
1492 		tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1493 		PP_ASSERT_WITH_CODE((0 == tmp_result),
1494 				"Failed to initialize switch from ArbF0 to F1!",
1495 				result = tmp_result);
1496 	}
1497 
1498 	result = smu7_setup_default_dpm_tables(hwmgr);
1499 	PP_ASSERT_WITH_CODE(0 == result,
1500 			"Failed to setup default DPM tables!", return result);
1501 
1502 	tmp_result = smum_init_smc_table(hwmgr);
1503 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1504 			"Failed to initialize SMC table!", result = tmp_result);
1505 
1506 	tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1507 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1508 			"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1509 
1510 	if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1511 	    hwmgr->chip_id <= CHIP_VEGAM) {
1512 		tmp_result = smu7_notify_has_display(hwmgr);
1513 		PP_ASSERT_WITH_CODE((0 == tmp_result),
1514 				"Failed to enable display setting!", result = tmp_result);
1515 	} else {
1516 		smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
1517 	}
1518 
1519 	if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1520 	    hwmgr->chip_id <= CHIP_VEGAM) {
1521 		tmp_result = smu7_populate_edc_leakage_registers(hwmgr);
1522 		PP_ASSERT_WITH_CODE((0 == tmp_result),
1523 				"Failed to populate edc leakage registers!", result = tmp_result);
1524 	}
1525 
1526 	tmp_result = smu7_enable_sclk_control(hwmgr);
1527 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1528 			"Failed to enable SCLK control!", result = tmp_result);
1529 
1530 	tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1531 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1532 			"Failed to enable voltage control!", result = tmp_result);
1533 
1534 	tmp_result = smu7_enable_ulv(hwmgr);
1535 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1536 			"Failed to enable ULV!", result = tmp_result);
1537 
1538 	tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1539 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1540 			"Failed to enable deep sleep master switch!", result = tmp_result);
1541 
1542 	tmp_result = smu7_enable_didt_config(hwmgr);
1543 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1544 			"Failed to enable deep sleep master switch!", result = tmp_result);
1545 
1546 	tmp_result = smu7_start_dpm(hwmgr);
1547 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1548 			"Failed to start DPM!", result = tmp_result);
1549 
1550 	tmp_result = smu7_enable_smc_cac(hwmgr);
1551 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1552 			"Failed to enable SMC CAC!", result = tmp_result);
1553 
1554 	tmp_result = smu7_enable_power_containment(hwmgr);
1555 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1556 			"Failed to enable power containment!", result = tmp_result);
1557 
1558 	tmp_result = smu7_power_control_set_level(hwmgr);
1559 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1560 			"Failed to power control set level!", result = tmp_result);
1561 
1562 	tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1563 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1564 			"Failed to enable thermal auto throttle!", result = tmp_result);
1565 
1566 	tmp_result = smu7_pcie_performance_request(hwmgr);
1567 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1568 			"pcie performance request failed!", result = tmp_result);
1569 
1570 	return 0;
1571 }
1572 
1573 static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1574 {
1575 	if (!hwmgr->avfs_supported)
1576 		return 0;
1577 
1578 	if (enable) {
1579 		if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1580 				CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1581 			PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1582 					hwmgr, PPSMC_MSG_EnableAvfs, NULL),
1583 					"Failed to enable AVFS!",
1584 					return -EINVAL);
1585 		}
1586 	} else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1587 			CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1588 		PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1589 				hwmgr, PPSMC_MSG_DisableAvfs, NULL),
1590 				"Failed to disable AVFS!",
1591 				return -EINVAL);
1592 	}
1593 
1594 	return 0;
1595 }
1596 
1597 static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1598 {
1599 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1600 
1601 	if (!hwmgr->avfs_supported)
1602 		return 0;
1603 
1604 	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1605 		smu7_avfs_control(hwmgr, false);
1606 	} else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1607 		smu7_avfs_control(hwmgr, false);
1608 		smu7_avfs_control(hwmgr, true);
1609 	} else {
1610 		smu7_avfs_control(hwmgr, true);
1611 	}
1612 
1613 	return 0;
1614 }
1615 
1616 static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1617 {
1618 	int tmp_result, result = 0;
1619 
1620 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1621 			PHM_PlatformCaps_ThermalController))
1622 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1623 				GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1624 
1625 	tmp_result = smu7_disable_power_containment(hwmgr);
1626 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1627 			"Failed to disable power containment!", result = tmp_result);
1628 
1629 	tmp_result = smu7_disable_smc_cac(hwmgr);
1630 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1631 			"Failed to disable SMC CAC!", result = tmp_result);
1632 
1633 	tmp_result = smu7_disable_didt_config(hwmgr);
1634 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1635 			"Failed to disable DIDT!", result = tmp_result);
1636 
1637 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1638 			CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1639 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1640 			GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1641 
1642 	tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1643 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1644 			"Failed to disable thermal auto throttle!", result = tmp_result);
1645 
1646 	tmp_result = smu7_avfs_control(hwmgr, false);
1647 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1648 			"Failed to disable AVFS!", result = tmp_result);
1649 
1650 	tmp_result = smu7_stop_dpm(hwmgr);
1651 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1652 			"Failed to stop DPM!", result = tmp_result);
1653 
1654 	tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1655 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1656 			"Failed to disable deep sleep master switch!", result = tmp_result);
1657 
1658 	tmp_result = smu7_disable_ulv(hwmgr);
1659 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1660 			"Failed to disable ULV!", result = tmp_result);
1661 
1662 	tmp_result = smu7_clear_voting_clients(hwmgr);
1663 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1664 			"Failed to clear voting clients!", result = tmp_result);
1665 
1666 	tmp_result = smu7_reset_to_default(hwmgr);
1667 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1668 			"Failed to reset to default!", result = tmp_result);
1669 
1670 	tmp_result = smum_stop_smc(hwmgr);
1671 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1672 			"Failed to stop smc!", result = tmp_result);
1673 
1674 	tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1675 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1676 			"Failed to force to switch arbf0!", result = tmp_result);
1677 
1678 	return result;
1679 }
1680 
1681 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1682 {
1683 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1684 	struct phm_ppt_v1_information *table_info =
1685 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1686 	struct amdgpu_device *adev = hwmgr->adev;
1687 	uint8_t tmp1, tmp2;
1688 	uint16_t tmp3 = 0;
1689 
1690 	data->dll_default_on = false;
1691 	data->mclk_dpm0_activity_target = 0xa;
1692 	data->vddc_vddgfx_delta = 300;
1693 	data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1694 	data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1695 	data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1696 	data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1697 	data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1698 	data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1699 	data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1700 	data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1701 	data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1702 	data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1703 
1704 	data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1705 	data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1706 	data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1707 	/* need to set voltage control types before EVV patching */
1708 	data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1709 	data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1710 	data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1711 	data->enable_tdc_limit_feature = true;
1712 	data->enable_pkg_pwr_tracking_feature = true;
1713 	data->force_pcie_gen = PP_PCIEGenInvalid;
1714 	data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1715 	data->current_profile_setting.bupdate_sclk = 1;
1716 	data->current_profile_setting.sclk_up_hyst = 0;
1717 	data->current_profile_setting.sclk_down_hyst = 100;
1718 	data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1719 	data->current_profile_setting.bupdate_mclk = 1;
1720 	if (hwmgr->chip_id >= CHIP_POLARIS10) {
1721 		if (adev->gmc.vram_width == 256) {
1722 			data->current_profile_setting.mclk_up_hyst = 10;
1723 			data->current_profile_setting.mclk_down_hyst = 60;
1724 			data->current_profile_setting.mclk_activity = 25;
1725 		} else if (adev->gmc.vram_width == 128) {
1726 			data->current_profile_setting.mclk_up_hyst = 5;
1727 			data->current_profile_setting.mclk_down_hyst = 16;
1728 			data->current_profile_setting.mclk_activity = 20;
1729 		} else if (adev->gmc.vram_width == 64) {
1730 			data->current_profile_setting.mclk_up_hyst = 3;
1731 			data->current_profile_setting.mclk_down_hyst = 16;
1732 			data->current_profile_setting.mclk_activity = 20;
1733 		}
1734 	} else {
1735 		data->current_profile_setting.mclk_up_hyst = 0;
1736 		data->current_profile_setting.mclk_down_hyst = 100;
1737 		data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
1738 	}
1739 	hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1740 	hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1741 	hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1742 
1743 	if (hwmgr->chip_id  == CHIP_HAWAII) {
1744 		data->thermal_temp_setting.temperature_low = 94500;
1745 		data->thermal_temp_setting.temperature_high = 95000;
1746 		data->thermal_temp_setting.temperature_shutdown = 104000;
1747 	} else {
1748 		data->thermal_temp_setting.temperature_low = 99500;
1749 		data->thermal_temp_setting.temperature_high = 100000;
1750 		data->thermal_temp_setting.temperature_shutdown = 104000;
1751 	}
1752 
1753 	data->fast_watermark_threshold = 100;
1754 	if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1755 			VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1756 		data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1757 	else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1758 			VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1759 		data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1760 
1761 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1762 			PHM_PlatformCaps_ControlVDDGFX)) {
1763 		if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1764 			VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1765 			data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1766 		}
1767 	}
1768 
1769 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1770 			PHM_PlatformCaps_EnableMVDDControl)) {
1771 		if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1772 				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1773 			data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1774 		else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1775 				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1776 			data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1777 	}
1778 
1779 	if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
1780 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1781 			PHM_PlatformCaps_ControlVDDGFX);
1782 
1783 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1784 			PHM_PlatformCaps_ControlVDDCI)) {
1785 		if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1786 				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1787 			data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1788 		else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1789 				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1790 			data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1791 	}
1792 
1793 	if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1794 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1795 				PHM_PlatformCaps_EnableMVDDControl);
1796 
1797 	if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1798 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1799 				PHM_PlatformCaps_ControlVDDCI);
1800 
1801 	data->vddc_phase_shed_control = 1;
1802 	if ((hwmgr->chip_id == CHIP_POLARIS12) ||
1803 	    ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) ||
1804 	    ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
1805 	    ASICID_IS_P30(adev->pdev->device, adev->pdev->revision) ||
1806 	    ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
1807 		if (data->voltage_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1808 			atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1809 							&tmp3);
1810 			tmp3 = (tmp3 >> 5) & 0x3;
1811 			data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
1812 		}
1813 	} else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1814 		data->vddc_phase_shed_control = 1;
1815 	}
1816 
1817 	if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
1818 		&& (table_info->cac_dtp_table->usClockStretchAmount != 0))
1819 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1820 					PHM_PlatformCaps_ClockStretcher);
1821 
1822 	data->pcie_gen_performance.max = PP_PCIEGen1;
1823 	data->pcie_gen_performance.min = PP_PCIEGen3;
1824 	data->pcie_gen_power_saving.max = PP_PCIEGen1;
1825 	data->pcie_gen_power_saving.min = PP_PCIEGen3;
1826 	data->pcie_lane_performance.max = 0;
1827 	data->pcie_lane_performance.min = 16;
1828 	data->pcie_lane_power_saving.max = 0;
1829 	data->pcie_lane_power_saving.min = 16;
1830 
1831 
1832 	if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1833 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1834 			      PHM_PlatformCaps_UVDPowerGating);
1835 	if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1836 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1837 			      PHM_PlatformCaps_VCEPowerGating);
1838 
1839 	data->disable_edc_leakage_controller = true;
1840 	if (((adev->asic_type == CHIP_POLARIS10) && hwmgr->is_kicker) ||
1841 	    ((adev->asic_type == CHIP_POLARIS11) && hwmgr->is_kicker) ||
1842 	    (adev->asic_type == CHIP_POLARIS12) ||
1843 	    (adev->asic_type == CHIP_VEGAM))
1844 		data->disable_edc_leakage_controller = false;
1845 
1846 	if (!atomctrl_is_asic_internal_ss_supported(hwmgr)) {
1847 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1848 			PHM_PlatformCaps_MemorySpreadSpectrumSupport);
1849 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1850 			PHM_PlatformCaps_EngineSpreadSpectrumSupport);
1851 	}
1852 
1853 	if ((adev->pdev->device == 0x699F) &&
1854 	    (adev->pdev->revision == 0xCF)) {
1855 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1856 				PHM_PlatformCaps_PowerContainment);
1857 		data->enable_tdc_limit_feature = false;
1858 		data->enable_pkg_pwr_tracking_feature = false;
1859 		data->disable_edc_leakage_controller = true;
1860 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1861 					PHM_PlatformCaps_ClockStretcher);
1862 	}
1863 }
1864 
1865 static int smu7_calculate_ro_range(struct pp_hwmgr *hwmgr)
1866 {
1867 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1868 	struct amdgpu_device *adev = hwmgr->adev;
1869 	uint32_t asicrev1, evv_revision, max = 0, min = 0;
1870 
1871 	atomctrl_read_efuse(hwmgr, STRAP_EVV_REVISION_LSB, STRAP_EVV_REVISION_MSB,
1872 			&evv_revision);
1873 
1874 	atomctrl_read_efuse(hwmgr, 568, 579, &asicrev1);
1875 
1876 	if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) ||
1877 	    ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
1878 		min = 1200;
1879 		max = 2500;
1880 	} else if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
1881 		   ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
1882 		min = 900;
1883 		max= 2100;
1884 	} else if (hwmgr->chip_id == CHIP_POLARIS10) {
1885 		if (adev->pdev->subsystem_vendor == 0x106B) {
1886 			min = 1000;
1887 			max = 2300;
1888 		} else {
1889 			if (evv_revision == 0) {
1890 				min = 1000;
1891 				max = 2300;
1892 			} else if (evv_revision == 1) {
1893 				if (asicrev1 == 326) {
1894 					min = 1200;
1895 					max = 2500;
1896 					/* TODO: PATCH RO in VBIOS */
1897 				} else {
1898 					min = 1200;
1899 					max = 2000;
1900 				}
1901 			} else if (evv_revision == 2) {
1902 				min = 1200;
1903 				max = 2500;
1904 			}
1905 		}
1906 	} else {
1907 		min = 1100;
1908 		max = 2100;
1909 	}
1910 
1911 	data->ro_range_minimum = min;
1912 	data->ro_range_maximum = max;
1913 
1914 	/* TODO: PATCH RO in VBIOS here */
1915 
1916 	return 0;
1917 }
1918 
1919 /**
1920  * smu7_get_evv_voltages - Get Leakage VDDC based on leakage ID.
1921  *
1922  * @hwmgr:  the address of the powerplay hardware manager.
1923  * Return:   always 0
1924  */
1925 static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1926 {
1927 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1928 	uint16_t vv_id;
1929 	uint16_t vddc = 0;
1930 	uint16_t vddgfx = 0;
1931 	uint16_t i, j;
1932 	uint32_t sclk = 0;
1933 	struct phm_ppt_v1_information *table_info =
1934 			(struct phm_ppt_v1_information *)hwmgr->pptable;
1935 	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1936 
1937 	if (hwmgr->chip_id == CHIP_POLARIS10 ||
1938 	    hwmgr->chip_id == CHIP_POLARIS11 ||
1939 	    hwmgr->chip_id == CHIP_POLARIS12)
1940 		smu7_calculate_ro_range(hwmgr);
1941 
1942 	for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1943 		vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1944 
1945 		if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1946 			if ((hwmgr->pp_table_version == PP_TABLE_V1)
1947 			    && !phm_get_sclk_for_voltage_evv(hwmgr,
1948 						table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1949 				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1950 							PHM_PlatformCaps_ClockStretcher)) {
1951 					sclk_table = table_info->vdd_dep_on_sclk;
1952 
1953 					for (j = 1; j < sclk_table->count; j++) {
1954 						if (sclk_table->entries[j].clk == sclk &&
1955 								sclk_table->entries[j].cks_enable == 0) {
1956 							sclk += 5000;
1957 							break;
1958 						}
1959 					}
1960 				}
1961 				if (0 == atomctrl_get_voltage_evv_on_sclk
1962 				    (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1963 				     vv_id, &vddgfx)) {
1964 					/* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
1965 					PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1966 
1967 					/* the voltage should not be zero nor equal to leakage ID */
1968 					if (vddgfx != 0 && vddgfx != vv_id) {
1969 						data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1970 						data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1971 						data->vddcgfx_leakage.count++;
1972 					}
1973 				} else {
1974 					pr_info("Error retrieving EVV voltage value!\n");
1975 				}
1976 			}
1977 		} else {
1978 			if ((hwmgr->pp_table_version == PP_TABLE_V0)
1979 				|| !phm_get_sclk_for_voltage_evv(hwmgr,
1980 					table_info->vddc_lookup_table, vv_id, &sclk)) {
1981 				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1982 						PHM_PlatformCaps_ClockStretcher)) {
1983 					if (table_info == NULL)
1984 						return -EINVAL;
1985 					sclk_table = table_info->vdd_dep_on_sclk;
1986 
1987 					for (j = 1; j < sclk_table->count; j++) {
1988 						if (sclk_table->entries[j].clk == sclk &&
1989 								sclk_table->entries[j].cks_enable == 0) {
1990 							sclk += 5000;
1991 							break;
1992 						}
1993 					}
1994 				}
1995 
1996 				if (phm_get_voltage_evv_on_sclk(hwmgr,
1997 							VOLTAGE_TYPE_VDDC,
1998 							sclk, vv_id, &vddc) == 0) {
1999 					if (vddc >= 2000 || vddc == 0)
2000 						return -EINVAL;
2001 				} else {
2002 					pr_debug("failed to retrieving EVV voltage!\n");
2003 					continue;
2004 				}
2005 
2006 				/* the voltage should not be zero nor equal to leakage ID */
2007 				if (vddc != 0 && vddc != vv_id) {
2008 					data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
2009 					data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
2010 					data->vddc_leakage.count++;
2011 				}
2012 			}
2013 		}
2014 	}
2015 
2016 	return 0;
2017 }
2018 
2019 /**
2020  * smu7_patch_ppt_v1_with_vdd_leakage - Change virtual leakage voltage to actual value.
2021  *
2022  * @hwmgr:  the address of the powerplay hardware manager.
2023  * @voltage: pointer to changing voltage
2024  * @leakage_table: pointer to leakage table
2025  */
2026 static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2027 		uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
2028 {
2029 	uint32_t index;
2030 
2031 	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
2032 	for (index = 0; index < leakage_table->count; index++) {
2033 		/* if this voltage matches a leakage voltage ID */
2034 		/* patch with actual leakage voltage */
2035 		if (leakage_table->leakage_id[index] == *voltage) {
2036 			*voltage = leakage_table->actual_voltage[index];
2037 			break;
2038 		}
2039 	}
2040 
2041 	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2042 		pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2043 }
2044 
2045 /**
2046  * smu7_patch_lookup_table_with_leakage - Patch voltage lookup table by EVV leakages.
2047  *
2048  * @hwmgr:  the address of the powerplay hardware manager.
2049  * @lookup_table: pointer to voltage lookup table
2050  * @leakage_table: pointer to leakage table
2051  * Return:     always 0
2052  */
2053 static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
2054 		phm_ppt_v1_voltage_lookup_table *lookup_table,
2055 		struct smu7_leakage_voltage *leakage_table)
2056 {
2057 	uint32_t i;
2058 
2059 	for (i = 0; i < lookup_table->count; i++)
2060 		smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2061 				&lookup_table->entries[i].us_vdd, leakage_table);
2062 
2063 	return 0;
2064 }
2065 
2066 static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
2067 		struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
2068 		uint16_t *vddc)
2069 {
2070 	struct phm_ppt_v1_information *table_info =
2071 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
2072 	smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
2073 	hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
2074 			table_info->max_clock_voltage_on_dc.vddc;
2075 	return 0;
2076 }
2077 
2078 static int smu7_patch_voltage_dependency_tables_with_lookup_table(
2079 		struct pp_hwmgr *hwmgr)
2080 {
2081 	uint8_t entry_id;
2082 	uint8_t voltage_id;
2083 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2084 	struct phm_ppt_v1_information *table_info =
2085 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
2086 
2087 	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2088 			table_info->vdd_dep_on_sclk;
2089 	struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
2090 			table_info->vdd_dep_on_mclk;
2091 	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2092 			table_info->mm_dep_table;
2093 
2094 	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2095 		for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2096 			voltage_id = sclk_table->entries[entry_id].vddInd;
2097 			sclk_table->entries[entry_id].vddgfx =
2098 				table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
2099 		}
2100 	} else {
2101 		for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2102 			voltage_id = sclk_table->entries[entry_id].vddInd;
2103 			sclk_table->entries[entry_id].vddc =
2104 				table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2105 		}
2106 	}
2107 
2108 	for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
2109 		voltage_id = mclk_table->entries[entry_id].vddInd;
2110 		mclk_table->entries[entry_id].vddc =
2111 			table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2112 	}
2113 
2114 	for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
2115 		voltage_id = mm_table->entries[entry_id].vddcInd;
2116 		mm_table->entries[entry_id].vddc =
2117 			table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2118 	}
2119 
2120 	return 0;
2121 
2122 }
2123 
2124 static int phm_add_voltage(struct pp_hwmgr *hwmgr,
2125 			phm_ppt_v1_voltage_lookup_table *look_up_table,
2126 			phm_ppt_v1_voltage_lookup_record *record)
2127 {
2128 	uint32_t i;
2129 
2130 	PP_ASSERT_WITH_CODE((NULL != look_up_table),
2131 		"Lookup Table empty.", return -EINVAL);
2132 	PP_ASSERT_WITH_CODE((0 != look_up_table->count),
2133 		"Lookup Table empty.", return -EINVAL);
2134 
2135 	i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
2136 	PP_ASSERT_WITH_CODE((i >= look_up_table->count),
2137 		"Lookup Table is full.", return -EINVAL);
2138 
2139 	/* This is to avoid entering duplicate calculated records. */
2140 	for (i = 0; i < look_up_table->count; i++) {
2141 		if (look_up_table->entries[i].us_vdd == record->us_vdd) {
2142 			if (look_up_table->entries[i].us_calculated == 1)
2143 				return 0;
2144 			break;
2145 		}
2146 	}
2147 
2148 	look_up_table->entries[i].us_calculated = 1;
2149 	look_up_table->entries[i].us_vdd = record->us_vdd;
2150 	look_up_table->entries[i].us_cac_low = record->us_cac_low;
2151 	look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
2152 	look_up_table->entries[i].us_cac_high = record->us_cac_high;
2153 	/* Only increment the count when we're appending, not replacing duplicate entry. */
2154 	if (i == look_up_table->count)
2155 		look_up_table->count++;
2156 
2157 	return 0;
2158 }
2159 
2160 
2161 static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
2162 {
2163 	uint8_t entry_id;
2164 	struct phm_ppt_v1_voltage_lookup_record v_record;
2165 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2166 	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2167 
2168 	phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
2169 	phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
2170 
2171 	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2172 		for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2173 			if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
2174 				v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
2175 					sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
2176 			else
2177 				v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
2178 					sclk_table->entries[entry_id].vdd_offset;
2179 
2180 			sclk_table->entries[entry_id].vddc =
2181 				v_record.us_cac_low = v_record.us_cac_mid =
2182 				v_record.us_cac_high = v_record.us_vdd;
2183 
2184 			phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
2185 		}
2186 
2187 		for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
2188 			if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
2189 				v_record.us_vdd = mclk_table->entries[entry_id].vddc +
2190 					mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
2191 			else
2192 				v_record.us_vdd = mclk_table->entries[entry_id].vddc +
2193 					mclk_table->entries[entry_id].vdd_offset;
2194 
2195 			mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
2196 				v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
2197 			phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
2198 		}
2199 	}
2200 	return 0;
2201 }
2202 
2203 static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
2204 {
2205 	uint8_t entry_id;
2206 	struct phm_ppt_v1_voltage_lookup_record v_record;
2207 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2208 	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2209 	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
2210 
2211 	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2212 		for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
2213 			if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
2214 				v_record.us_vdd = mm_table->entries[entry_id].vddc +
2215 					mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
2216 			else
2217 				v_record.us_vdd = mm_table->entries[entry_id].vddc +
2218 					mm_table->entries[entry_id].vddgfx_offset;
2219 
2220 			/* Add the calculated VDDGFX to the VDDGFX lookup table */
2221 			mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
2222 				v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
2223 			phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
2224 		}
2225 	}
2226 	return 0;
2227 }
2228 
2229 static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
2230 		struct phm_ppt_v1_voltage_lookup_table *lookup_table)
2231 {
2232 	uint32_t table_size, i, j;
2233 	table_size = lookup_table->count;
2234 
2235 	PP_ASSERT_WITH_CODE(0 != lookup_table->count,
2236 		"Lookup table is empty", return -EINVAL);
2237 
2238 	/* Sorting voltages */
2239 	for (i = 0; i < table_size - 1; i++) {
2240 		for (j = i + 1; j > 0; j--) {
2241 			if (lookup_table->entries[j].us_vdd <
2242 					lookup_table->entries[j - 1].us_vdd) {
2243 				swap(lookup_table->entries[j - 1],
2244 				     lookup_table->entries[j]);
2245 			}
2246 		}
2247 	}
2248 
2249 	return 0;
2250 }
2251 
2252 static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
2253 {
2254 	int result = 0;
2255 	int tmp_result;
2256 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2257 	struct phm_ppt_v1_information *table_info =
2258 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
2259 
2260 	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2261 		tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2262 			table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
2263 		if (tmp_result != 0)
2264 			result = tmp_result;
2265 
2266 		smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2267 			&table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
2268 	} else {
2269 
2270 		tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2271 				table_info->vddc_lookup_table, &(data->vddc_leakage));
2272 		if (tmp_result)
2273 			result = tmp_result;
2274 
2275 		tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
2276 				&(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
2277 		if (tmp_result)
2278 			result = tmp_result;
2279 	}
2280 
2281 	tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
2282 	if (tmp_result)
2283 		result = tmp_result;
2284 
2285 	tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
2286 	if (tmp_result)
2287 		result = tmp_result;
2288 
2289 	tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
2290 	if (tmp_result)
2291 		result = tmp_result;
2292 
2293 	tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
2294 	if (tmp_result)
2295 		result = tmp_result;
2296 
2297 	tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
2298 	if (tmp_result)
2299 		result = tmp_result;
2300 
2301 	return result;
2302 }
2303 
2304 static int smu7_find_highest_vddc(struct pp_hwmgr *hwmgr)
2305 {
2306 	struct phm_ppt_v1_information *table_info =
2307 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
2308 	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2309 						table_info->vdd_dep_on_sclk;
2310 	struct phm_ppt_v1_voltage_lookup_table *lookup_table =
2311 						table_info->vddc_lookup_table;
2312 	uint16_t highest_voltage;
2313 	uint32_t i;
2314 
2315 	highest_voltage = allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2316 
2317 	for (i = 0; i < lookup_table->count; i++) {
2318 		if (lookup_table->entries[i].us_vdd < ATOM_VIRTUAL_VOLTAGE_ID0 &&
2319 		    lookup_table->entries[i].us_vdd > highest_voltage)
2320 			highest_voltage = lookup_table->entries[i].us_vdd;
2321 	}
2322 
2323 	return highest_voltage;
2324 }
2325 
2326 static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
2327 {
2328 	struct phm_ppt_v1_information *table_info =
2329 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
2330 
2331 	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2332 						table_info->vdd_dep_on_sclk;
2333 	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
2334 						table_info->vdd_dep_on_mclk;
2335 
2336 	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
2337 		"VDD dependency on SCLK table is missing.",
2338 		return -EINVAL);
2339 	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
2340 		"VDD dependency on SCLK table has to have is missing.",
2341 		return -EINVAL);
2342 
2343 	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
2344 		"VDD dependency on MCLK table is missing",
2345 		return -EINVAL);
2346 	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
2347 		"VDD dependency on MCLK table has to have is missing.",
2348 		return -EINVAL);
2349 
2350 	table_info->max_clock_voltage_on_ac.sclk =
2351 		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
2352 	table_info->max_clock_voltage_on_ac.mclk =
2353 		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
2354 	if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM)
2355 		table_info->max_clock_voltage_on_ac.vddc =
2356 			smu7_find_highest_vddc(hwmgr);
2357 	else
2358 		table_info->max_clock_voltage_on_ac.vddc =
2359 			allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2360 	table_info->max_clock_voltage_on_ac.vddci =
2361 		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
2362 
2363 	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
2364 	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
2365 	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
2366 	hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
2367 
2368 	return 0;
2369 }
2370 
2371 static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2372 {
2373 	struct phm_ppt_v1_information *table_info =
2374 		       (struct phm_ppt_v1_information *)(hwmgr->pptable);
2375 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
2376 	struct phm_ppt_v1_voltage_lookup_table *lookup_table;
2377 	uint32_t i;
2378 	uint32_t hw_revision, sub_vendor_id, sub_sys_id;
2379 	struct amdgpu_device *adev = hwmgr->adev;
2380 
2381 	if (table_info != NULL) {
2382 		dep_mclk_table = table_info->vdd_dep_on_mclk;
2383 		lookup_table = table_info->vddc_lookup_table;
2384 	} else
2385 		return 0;
2386 
2387 	hw_revision = adev->pdev->revision;
2388 	sub_sys_id = adev->pdev->subsystem_device;
2389 	sub_vendor_id = adev->pdev->subsystem_vendor;
2390 
2391 	if (adev->pdev->device == 0x67DF && hw_revision == 0xC7 &&
2392 	    ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
2393 	     (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
2394 	     (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
2395 
2396 		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
2397 					      CGS_IND_REG__SMC,
2398 					      PWR_CKS_CNTL,
2399 					      CKS_STRETCH_AMOUNT,
2400 					      0x3);
2401 
2402 		if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2403 			return 0;
2404 
2405 		for (i = 0; i < lookup_table->count; i++) {
2406 			if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2407 				dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2408 				return 0;
2409 			}
2410 		}
2411 	}
2412 	return 0;
2413 }
2414 
2415 static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
2416 {
2417 	struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2418 	uint32_t temp_reg;
2419 	struct phm_ppt_v1_information *table_info =
2420 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
2421 
2422 
2423 	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2424 		temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2425 		switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2426 		case 0:
2427 			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2428 			break;
2429 		case 1:
2430 			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2431 			break;
2432 		case 2:
2433 			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2434 			break;
2435 		case 3:
2436 			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2437 			break;
2438 		case 4:
2439 			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2440 			break;
2441 		default:
2442 			break;
2443 		}
2444 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2445 	}
2446 
2447 	if (table_info == NULL)
2448 		return 0;
2449 
2450 	if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
2451 		hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
2452 		hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
2453 			(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2454 
2455 		hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
2456 			(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2457 
2458 		hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
2459 
2460 		hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2461 
2462 		hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2463 			(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2464 
2465 		hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2466 
2467 		table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2468 								(table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2469 
2470 		table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2471 		table_info->cac_dtp_table->usOperatingTempStep = 1;
2472 		table_info->cac_dtp_table->usOperatingTempHyst = 1;
2473 
2474 		hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2475 			       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2476 
2477 		hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2478 			       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2479 
2480 		hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2481 			       table_info->cac_dtp_table->usOperatingTempMinLimit;
2482 
2483 		hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2484 			       table_info->cac_dtp_table->usOperatingTempMaxLimit;
2485 
2486 		hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2487 			       table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2488 
2489 		hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2490 			       table_info->cac_dtp_table->usOperatingTempStep;
2491 
2492 		hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2493 			       table_info->cac_dtp_table->usTargetOperatingTemp;
2494 		if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2495 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2496 					PHM_PlatformCaps_ODFuzzyFanControlSupport);
2497 	}
2498 
2499 	return 0;
2500 }
2501 
2502 /**
2503  * smu7_patch_ppt_v0_with_vdd_leakage - Change virtual leakage voltage to actual value.
2504  *
2505  * @hwmgr:  the address of the powerplay hardware manager.
2506  * @voltage: pointer to changing voltage
2507  * @leakage_table: pointer to leakage table
2508  */
2509 static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2510 		uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2511 {
2512 	uint32_t index;
2513 
2514 	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
2515 	for (index = 0; index < leakage_table->count; index++) {
2516 		/* if this voltage matches a leakage voltage ID */
2517 		/* patch with actual leakage voltage */
2518 		if (leakage_table->leakage_id[index] == *voltage) {
2519 			*voltage = leakage_table->actual_voltage[index];
2520 			break;
2521 		}
2522 	}
2523 
2524 	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2525 		pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2526 }
2527 
2528 
2529 static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2530 			      struct phm_clock_voltage_dependency_table *tab)
2531 {
2532 	uint16_t i;
2533 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2534 
2535 	if (tab)
2536 		for (i = 0; i < tab->count; i++)
2537 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2538 						&data->vddc_leakage);
2539 
2540 	return 0;
2541 }
2542 
2543 static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2544 			       struct phm_clock_voltage_dependency_table *tab)
2545 {
2546 	uint16_t i;
2547 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2548 
2549 	if (tab)
2550 		for (i = 0; i < tab->count; i++)
2551 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2552 							&data->vddci_leakage);
2553 
2554 	return 0;
2555 }
2556 
2557 static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2558 				  struct phm_vce_clock_voltage_dependency_table *tab)
2559 {
2560 	uint16_t i;
2561 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2562 
2563 	if (tab)
2564 		for (i = 0; i < tab->count; i++)
2565 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2566 							&data->vddc_leakage);
2567 
2568 	return 0;
2569 }
2570 
2571 
2572 static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2573 				  struct phm_uvd_clock_voltage_dependency_table *tab)
2574 {
2575 	uint16_t i;
2576 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2577 
2578 	if (tab)
2579 		for (i = 0; i < tab->count; i++)
2580 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2581 							&data->vddc_leakage);
2582 
2583 	return 0;
2584 }
2585 
2586 static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2587 					 struct phm_phase_shedding_limits_table *tab)
2588 {
2589 	uint16_t i;
2590 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2591 
2592 	if (tab)
2593 		for (i = 0; i < tab->count; i++)
2594 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2595 							&data->vddc_leakage);
2596 
2597 	return 0;
2598 }
2599 
2600 static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2601 				   struct phm_samu_clock_voltage_dependency_table *tab)
2602 {
2603 	uint16_t i;
2604 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2605 
2606 	if (tab)
2607 		for (i = 0; i < tab->count; i++)
2608 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2609 							&data->vddc_leakage);
2610 
2611 	return 0;
2612 }
2613 
2614 static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2615 				  struct phm_acp_clock_voltage_dependency_table *tab)
2616 {
2617 	uint16_t i;
2618 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2619 
2620 	if (tab)
2621 		for (i = 0; i < tab->count; i++)
2622 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2623 					&data->vddc_leakage);
2624 
2625 	return 0;
2626 }
2627 
2628 static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2629 				  struct phm_clock_and_voltage_limits *tab)
2630 {
2631 	uint32_t vddc, vddci;
2632 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2633 
2634 	if (tab) {
2635 		vddc = tab->vddc;
2636 		smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2637 						   &data->vddc_leakage);
2638 		tab->vddc = vddc;
2639 		vddci = tab->vddci;
2640 		smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2641 						   &data->vddci_leakage);
2642 		tab->vddci = vddci;
2643 	}
2644 
2645 	return 0;
2646 }
2647 
2648 static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2649 {
2650 	uint32_t i;
2651 	uint32_t vddc;
2652 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2653 
2654 	if (tab) {
2655 		for (i = 0; i < tab->count; i++) {
2656 			vddc = (uint32_t)(tab->entries[i].Vddc);
2657 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2658 			tab->entries[i].Vddc = (uint16_t)vddc;
2659 		}
2660 	}
2661 
2662 	return 0;
2663 }
2664 
2665 static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2666 {
2667 	int tmp;
2668 
2669 	tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2670 	if (tmp)
2671 		return -EINVAL;
2672 
2673 	tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2674 	if (tmp)
2675 		return -EINVAL;
2676 
2677 	tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2678 	if (tmp)
2679 		return -EINVAL;
2680 
2681 	tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2682 	if (tmp)
2683 		return -EINVAL;
2684 
2685 	tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2686 	if (tmp)
2687 		return -EINVAL;
2688 
2689 	tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2690 	if (tmp)
2691 		return -EINVAL;
2692 
2693 	tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2694 	if (tmp)
2695 		return -EINVAL;
2696 
2697 	tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2698 	if (tmp)
2699 		return -EINVAL;
2700 
2701 	tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2702 	if (tmp)
2703 		return -EINVAL;
2704 
2705 	tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2706 	if (tmp)
2707 		return -EINVAL;
2708 
2709 	tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2710 	if (tmp)
2711 		return -EINVAL;
2712 
2713 	tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2714 	if (tmp)
2715 		return -EINVAL;
2716 
2717 	return 0;
2718 }
2719 
2720 
2721 static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2722 {
2723 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2724 
2725 	struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2726 	struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2727 	struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2728 
2729 	PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2730 		"VDDC dependency on SCLK table is missing. This table is mandatory",
2731 		return -EINVAL);
2732 	PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2733 		"VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2734 		return -EINVAL);
2735 
2736 	PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2737 		"VDDC dependency on MCLK table is missing. This table is mandatory",
2738 		return -EINVAL);
2739 	PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2740 		"VDD dependency on MCLK table has to have is missing. This table is mandatory",
2741 		return -EINVAL);
2742 
2743 	data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2744 	data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2745 
2746 	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2747 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2748 	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2749 		allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2750 	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2751 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2752 
2753 	if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2754 		data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2755 		data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2756 	}
2757 
2758 	if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
2759 		hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2760 
2761 	return 0;
2762 }
2763 
2764 static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2765 {
2766 	kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2767 	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
2768 	kfree(hwmgr->backend);
2769 	hwmgr->backend = NULL;
2770 
2771 	return 0;
2772 }
2773 
2774 static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2775 {
2776 	uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2777 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2778 	int i;
2779 
2780 	if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2781 		for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2782 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2783 			if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2784 								virtual_voltage_id,
2785 								efuse_voltage_id) == 0) {
2786 				if (vddc != 0 && vddc != virtual_voltage_id) {
2787 					data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2788 					data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2789 					data->vddc_leakage.count++;
2790 				}
2791 				if (vddci != 0 && vddci != virtual_voltage_id) {
2792 					data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2793 					data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2794 					data->vddci_leakage.count++;
2795 				}
2796 			}
2797 		}
2798 	}
2799 	return 0;
2800 }
2801 
2802 #define LEAKAGE_ID_MSB			463
2803 #define LEAKAGE_ID_LSB			454
2804 
2805 static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
2806 {
2807 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2808 	uint32_t efuse;
2809 	uint16_t offset;
2810 	int ret = 0;
2811 
2812 	if (data->disable_edc_leakage_controller)
2813 		return 0;
2814 
2815 	ret = atomctrl_get_edc_hilo_leakage_offset_table(hwmgr,
2816 							 &data->edc_hilo_leakage_offset_from_vbios);
2817 	if (ret)
2818 		return ret;
2819 
2820 	if (data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
2821 	    data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
2822 		atomctrl_read_efuse(hwmgr, LEAKAGE_ID_LSB, LEAKAGE_ID_MSB, &efuse);
2823 		if (efuse < data->edc_hilo_leakage_offset_from_vbios.usHiLoLeakageThreshold)
2824 			offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset;
2825 		else
2826 			offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset;
2827 
2828 		ret = atomctrl_get_edc_leakage_table(hwmgr,
2829 						     &data->edc_leakage_table,
2830 						     offset);
2831 		if (ret)
2832 			return ret;
2833 	}
2834 
2835 	return ret;
2836 }
2837 
2838 static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2839 {
2840 	struct smu7_hwmgr *data;
2841 	int result = 0;
2842 
2843 	data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2844 	if (data == NULL)
2845 		return -ENOMEM;
2846 
2847 	hwmgr->backend = data;
2848 	smu7_patch_voltage_workaround(hwmgr);
2849 	smu7_init_dpm_defaults(hwmgr);
2850 
2851 	/* Get leakage voltage based on leakage ID. */
2852 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2853 			PHM_PlatformCaps_EVV)) {
2854 		result = smu7_get_evv_voltages(hwmgr);
2855 		if (result) {
2856 			pr_info("Get EVV Voltage Failed.  Abort Driver loading!\n");
2857 			return -EINVAL;
2858 		}
2859 	} else {
2860 		smu7_get_elb_voltages(hwmgr);
2861 	}
2862 
2863 	if (hwmgr->pp_table_version == PP_TABLE_V1) {
2864 		smu7_complete_dependency_tables(hwmgr);
2865 		smu7_set_private_data_based_on_pptable_v1(hwmgr);
2866 	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2867 		smu7_patch_dependency_tables_with_leakage(hwmgr);
2868 		smu7_set_private_data_based_on_pptable_v0(hwmgr);
2869 	}
2870 
2871 	/* Initalize Dynamic State Adjustment Rule Settings */
2872 	result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2873 
2874 	if (0 == result) {
2875 		struct amdgpu_device *adev = hwmgr->adev;
2876 
2877 		data->is_tlu_enabled = false;
2878 
2879 		hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2880 							SMU7_MAX_HARDWARE_POWERLEVELS;
2881 		hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2882 		hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2883 
2884 		data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2885 		if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2886 			data->pcie_spc_cap = 20;
2887 		else
2888 			data->pcie_spc_cap = 16;
2889 		data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2890 
2891 		hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2892 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2893 		hwmgr->platform_descriptor.clockStep.engineClock = 500;
2894 		hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2895 		smu7_thermal_parameter_init(hwmgr);
2896 	} else {
2897 		/* Ignore return value in here, we are cleaning up a mess. */
2898 		smu7_hwmgr_backend_fini(hwmgr);
2899 	}
2900 
2901 	result = smu7_update_edc_leakage_table(hwmgr);
2902 	if (result)
2903 		return result;
2904 
2905 	return 0;
2906 }
2907 
2908 static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2909 {
2910 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2911 	uint32_t level, tmp;
2912 
2913 	if (!data->pcie_dpm_key_disabled) {
2914 		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2915 			level = 0;
2916 			tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2917 			while (tmp >>= 1)
2918 				level++;
2919 
2920 			if (level)
2921 				smum_send_msg_to_smc_with_parameter(hwmgr,
2922 						PPSMC_MSG_PCIeDPM_ForceLevel, level,
2923 						NULL);
2924 		}
2925 	}
2926 
2927 	if (!data->sclk_dpm_key_disabled) {
2928 		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2929 			level = 0;
2930 			tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2931 			while (tmp >>= 1)
2932 				level++;
2933 
2934 			if (level)
2935 				smum_send_msg_to_smc_with_parameter(hwmgr,
2936 						PPSMC_MSG_SCLKDPM_SetEnabledMask,
2937 						(1 << level),
2938 						NULL);
2939 		}
2940 	}
2941 
2942 	if (!data->mclk_dpm_key_disabled) {
2943 		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2944 			level = 0;
2945 			tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2946 			while (tmp >>= 1)
2947 				level++;
2948 
2949 			if (level)
2950 				smum_send_msg_to_smc_with_parameter(hwmgr,
2951 						PPSMC_MSG_MCLKDPM_SetEnabledMask,
2952 						(1 << level),
2953 						NULL);
2954 		}
2955 	}
2956 
2957 	return 0;
2958 }
2959 
2960 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2961 {
2962 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2963 
2964 	if (hwmgr->pp_table_version == PP_TABLE_V1)
2965 		phm_apply_dal_min_voltage_request(hwmgr);
2966 /* TO DO  for v0 iceland and Ci*/
2967 
2968 	if (!data->sclk_dpm_key_disabled) {
2969 		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2970 			smum_send_msg_to_smc_with_parameter(hwmgr,
2971 					PPSMC_MSG_SCLKDPM_SetEnabledMask,
2972 					data->dpm_level_enable_mask.sclk_dpm_enable_mask,
2973 					NULL);
2974 	}
2975 
2976 	if (!data->mclk_dpm_key_disabled) {
2977 		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2978 			smum_send_msg_to_smc_with_parameter(hwmgr,
2979 					PPSMC_MSG_MCLKDPM_SetEnabledMask,
2980 					data->dpm_level_enable_mask.mclk_dpm_enable_mask,
2981 					NULL);
2982 	}
2983 
2984 	return 0;
2985 }
2986 
2987 static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2988 {
2989 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2990 
2991 	if (!smum_is_dpm_running(hwmgr))
2992 		return -EINVAL;
2993 
2994 	if (!data->pcie_dpm_key_disabled) {
2995 		smum_send_msg_to_smc(hwmgr,
2996 				PPSMC_MSG_PCIeDPM_UnForceLevel,
2997 				NULL);
2998 	}
2999 
3000 	return smu7_upload_dpm_level_enable_mask(hwmgr);
3001 }
3002 
3003 static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3004 {
3005 	struct smu7_hwmgr *data =
3006 			(struct smu7_hwmgr *)(hwmgr->backend);
3007 	uint32_t level;
3008 
3009 	if (!data->sclk_dpm_key_disabled)
3010 		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3011 			level = phm_get_lowest_enabled_level(hwmgr,
3012 							      data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3013 			smum_send_msg_to_smc_with_parameter(hwmgr,
3014 							    PPSMC_MSG_SCLKDPM_SetEnabledMask,
3015 							    (1 << level),
3016 							    NULL);
3017 
3018 	}
3019 
3020 	if (!data->mclk_dpm_key_disabled) {
3021 		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3022 			level = phm_get_lowest_enabled_level(hwmgr,
3023 							      data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3024 			smum_send_msg_to_smc_with_parameter(hwmgr,
3025 							    PPSMC_MSG_MCLKDPM_SetEnabledMask,
3026 							    (1 << level),
3027 							    NULL);
3028 		}
3029 	}
3030 
3031 	if (!data->pcie_dpm_key_disabled) {
3032 		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3033 			level = phm_get_lowest_enabled_level(hwmgr,
3034 							      data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3035 			smum_send_msg_to_smc_with_parameter(hwmgr,
3036 							    PPSMC_MSG_PCIeDPM_ForceLevel,
3037 							    (level),
3038 							    NULL);
3039 		}
3040 	}
3041 
3042 	return 0;
3043 }
3044 
3045 static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
3046 				uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
3047 {
3048 	uint32_t percentage;
3049 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3050 	struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
3051 	int32_t tmp_mclk;
3052 	int32_t tmp_sclk;
3053 	int32_t count;
3054 
3055 	if (golden_dpm_table->mclk_table.count < 1)
3056 		return -EINVAL;
3057 
3058 	percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
3059 			golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
3060 
3061 	if (golden_dpm_table->mclk_table.count == 1) {
3062 		percentage = 70;
3063 		tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
3064 		*mclk_mask = golden_dpm_table->mclk_table.count - 1;
3065 	} else {
3066 		tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
3067 		*mclk_mask = golden_dpm_table->mclk_table.count - 2;
3068 	}
3069 
3070 	tmp_sclk = tmp_mclk * percentage / 100;
3071 
3072 	if (hwmgr->pp_table_version == PP_TABLE_V0) {
3073 		for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
3074 			count >= 0; count--) {
3075 			if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
3076 				tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
3077 				*sclk_mask = count;
3078 				break;
3079 			}
3080 		}
3081 		if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3082 			*sclk_mask = 0;
3083 			tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
3084 		}
3085 
3086 		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3087 			*sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
3088 	} else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3089 		struct phm_ppt_v1_information *table_info =
3090 				(struct phm_ppt_v1_information *)(hwmgr->pptable);
3091 
3092 		for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
3093 			if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
3094 				tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
3095 				*sclk_mask = count;
3096 				break;
3097 			}
3098 		}
3099 		if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3100 			*sclk_mask = 0;
3101 			tmp_sclk =  table_info->vdd_dep_on_sclk->entries[0].clk;
3102 		}
3103 
3104 		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3105 			*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
3106 	}
3107 
3108 	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
3109 		*mclk_mask = 0;
3110 	else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3111 		*mclk_mask = golden_dpm_table->mclk_table.count - 1;
3112 
3113 	*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
3114 	hwmgr->pstate_sclk = tmp_sclk;
3115 	hwmgr->pstate_mclk = tmp_mclk;
3116 
3117 	return 0;
3118 }
3119 
3120 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
3121 				enum amd_dpm_forced_level level)
3122 {
3123 	int ret = 0;
3124 	uint32_t sclk_mask = 0;
3125 	uint32_t mclk_mask = 0;
3126 	uint32_t pcie_mask = 0;
3127 
3128 	if (hwmgr->pstate_sclk == 0)
3129 		smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
3130 
3131 	switch (level) {
3132 	case AMD_DPM_FORCED_LEVEL_HIGH:
3133 		ret = smu7_force_dpm_highest(hwmgr);
3134 		break;
3135 	case AMD_DPM_FORCED_LEVEL_LOW:
3136 		ret = smu7_force_dpm_lowest(hwmgr);
3137 		break;
3138 	case AMD_DPM_FORCED_LEVEL_AUTO:
3139 		ret = smu7_unforce_dpm_levels(hwmgr);
3140 		break;
3141 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
3142 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
3143 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
3144 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
3145 		ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
3146 		if (ret)
3147 			return ret;
3148 		smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
3149 		smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
3150 		smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
3151 		break;
3152 	case AMD_DPM_FORCED_LEVEL_MANUAL:
3153 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
3154 	default:
3155 		break;
3156 	}
3157 
3158 	if (!ret) {
3159 		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3160 			smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
3161 		else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3162 			smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
3163 	}
3164 	return ret;
3165 }
3166 
3167 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
3168 {
3169 	return sizeof(struct smu7_power_state);
3170 }
3171 
3172 static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
3173 				 uint32_t vblank_time_us)
3174 {
3175 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3176 	uint32_t switch_limit_us;
3177 
3178 	switch (hwmgr->chip_id) {
3179 	case CHIP_POLARIS10:
3180 	case CHIP_POLARIS11:
3181 	case CHIP_POLARIS12:
3182 		if (hwmgr->is_kicker || (hwmgr->chip_id == CHIP_POLARIS12))
3183 			switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
3184 		else
3185 			switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
3186 		break;
3187 	case CHIP_VEGAM:
3188 		switch_limit_us = 30;
3189 		break;
3190 	default:
3191 		switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
3192 		break;
3193 	}
3194 
3195 	if (vblank_time_us < switch_limit_us)
3196 		return true;
3197 	else
3198 		return false;
3199 }
3200 
3201 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3202 				struct pp_power_state *request_ps,
3203 			const struct pp_power_state *current_ps)
3204 {
3205 	struct amdgpu_device *adev = hwmgr->adev;
3206 	struct smu7_power_state *smu7_ps =
3207 				cast_phw_smu7_power_state(&request_ps->hardware);
3208 	uint32_t sclk;
3209 	uint32_t mclk;
3210 	struct PP_Clocks minimum_clocks = {0};
3211 	bool disable_mclk_switching;
3212 	bool disable_mclk_switching_for_frame_lock;
3213 	bool disable_mclk_switching_for_display;
3214 	const struct phm_clock_and_voltage_limits *max_limits;
3215 	uint32_t i;
3216 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3217 	struct phm_ppt_v1_information *table_info =
3218 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
3219 	int32_t count;
3220 	int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3221 	uint32_t latency;
3222 	bool latency_allowed = false;
3223 
3224 	data->battery_state = (PP_StateUILabel_Battery ==
3225 			request_ps->classification.ui_label);
3226 	data->mclk_ignore_signal = false;
3227 
3228 	PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
3229 				 "VI should always have 2 performance levels",
3230 				);
3231 
3232 	max_limits = adev->pm.ac_power ?
3233 			&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3234 			&(hwmgr->dyn_state.max_clock_voltage_on_dc);
3235 
3236 	/* Cap clock DPM tables at DC MAX if it is in DC. */
3237 	if (!adev->pm.ac_power) {
3238 		for (i = 0; i < smu7_ps->performance_level_count; i++) {
3239 			if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
3240 				smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
3241 			if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
3242 				smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
3243 		}
3244 	}
3245 
3246 	minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3247 	minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3248 
3249 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3250 			PHM_PlatformCaps_StablePState)) {
3251 		max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3252 		stable_pstate_sclk = (max_limits->sclk * 75) / 100;
3253 
3254 		for (count = table_info->vdd_dep_on_sclk->count - 1;
3255 				count >= 0; count--) {
3256 			if (stable_pstate_sclk >=
3257 					table_info->vdd_dep_on_sclk->entries[count].clk) {
3258 				stable_pstate_sclk =
3259 						table_info->vdd_dep_on_sclk->entries[count].clk;
3260 				break;
3261 			}
3262 		}
3263 
3264 		if (count < 0)
3265 			stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3266 
3267 		stable_pstate_mclk = max_limits->mclk;
3268 
3269 		minimum_clocks.engineClock = stable_pstate_sclk;
3270 		minimum_clocks.memoryClock = stable_pstate_mclk;
3271 	}
3272 
3273 	disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3274 				    hwmgr->platform_descriptor.platformCaps,
3275 				    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3276 
3277 	disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
3278 						!hwmgr->display_config->multi_monitor_in_sync) ||
3279 						smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
3280 
3281 	disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
3282 					 disable_mclk_switching_for_display;
3283 
3284 	if (hwmgr->display_config->num_display == 0) {
3285 		if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM)
3286 			data->mclk_ignore_signal = true;
3287 		else
3288 			disable_mclk_switching = false;
3289 	}
3290 
3291 	sclk = smu7_ps->performance_levels[0].engine_clock;
3292 	mclk = smu7_ps->performance_levels[0].memory_clock;
3293 
3294 	if (disable_mclk_switching &&
3295 	    (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
3296 	    hwmgr->chip_id <= CHIP_VEGAM)))
3297 		mclk = smu7_ps->performance_levels
3298 		[smu7_ps->performance_level_count - 1].memory_clock;
3299 
3300 	if (sclk < minimum_clocks.engineClock)
3301 		sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3302 				max_limits->sclk : minimum_clocks.engineClock;
3303 
3304 	if (mclk < minimum_clocks.memoryClock)
3305 		mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3306 				max_limits->mclk : minimum_clocks.memoryClock;
3307 
3308 	smu7_ps->performance_levels[0].engine_clock = sclk;
3309 	smu7_ps->performance_levels[0].memory_clock = mclk;
3310 
3311 	smu7_ps->performance_levels[1].engine_clock =
3312 		(smu7_ps->performance_levels[1].engine_clock >=
3313 				smu7_ps->performance_levels[0].engine_clock) ?
3314 						smu7_ps->performance_levels[1].engine_clock :
3315 						smu7_ps->performance_levels[0].engine_clock;
3316 
3317 	if (disable_mclk_switching) {
3318 		if (mclk < smu7_ps->performance_levels[1].memory_clock)
3319 			mclk = smu7_ps->performance_levels[1].memory_clock;
3320 
3321 		if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) {
3322 			if (disable_mclk_switching_for_display) {
3323 				/* Find the lowest MCLK frequency that is within
3324 				 * the tolerable latency defined in DAL
3325 				 */
3326 				latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3327 				for (i = 0; i < data->mclk_latency_table.count; i++) {
3328 					if (data->mclk_latency_table.entries[i].latency <= latency) {
3329 						latency_allowed = true;
3330 
3331 						if ((data->mclk_latency_table.entries[i].frequency >=
3332 								smu7_ps->performance_levels[0].memory_clock) &&
3333 						    (data->mclk_latency_table.entries[i].frequency <=
3334 								smu7_ps->performance_levels[1].memory_clock)) {
3335 							mclk = data->mclk_latency_table.entries[i].frequency;
3336 							break;
3337 						}
3338 					}
3339 				}
3340 				if ((i >= data->mclk_latency_table.count - 1) && !latency_allowed) {
3341 					data->mclk_ignore_signal = true;
3342 				} else {
3343 					data->mclk_ignore_signal = false;
3344 				}
3345 			}
3346 
3347 			if (disable_mclk_switching_for_frame_lock)
3348 				mclk = smu7_ps->performance_levels[1].memory_clock;
3349 		}
3350 
3351 		smu7_ps->performance_levels[0].memory_clock = mclk;
3352 
3353 		if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
3354 		      hwmgr->chip_id <= CHIP_VEGAM))
3355 			smu7_ps->performance_levels[1].memory_clock = mclk;
3356 	} else {
3357 		if (smu7_ps->performance_levels[1].memory_clock <
3358 				smu7_ps->performance_levels[0].memory_clock)
3359 			smu7_ps->performance_levels[1].memory_clock =
3360 					smu7_ps->performance_levels[0].memory_clock;
3361 	}
3362 
3363 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3364 			PHM_PlatformCaps_StablePState)) {
3365 		for (i = 0; i < smu7_ps->performance_level_count; i++) {
3366 			smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
3367 			smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
3368 			smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
3369 			smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3370 		}
3371 	}
3372 	return 0;
3373 }
3374 
3375 
3376 static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3377 {
3378 	struct pp_power_state  *ps;
3379 	struct smu7_power_state  *smu7_ps;
3380 
3381 	if (hwmgr == NULL)
3382 		return -EINVAL;
3383 
3384 	ps = hwmgr->request_ps;
3385 
3386 	if (ps == NULL)
3387 		return -EINVAL;
3388 
3389 	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3390 
3391 	if (low)
3392 		return smu7_ps->performance_levels[0].memory_clock;
3393 	else
3394 		return smu7_ps->performance_levels
3395 				[smu7_ps->performance_level_count-1].memory_clock;
3396 }
3397 
3398 static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3399 {
3400 	struct pp_power_state  *ps;
3401 	struct smu7_power_state  *smu7_ps;
3402 
3403 	if (hwmgr == NULL)
3404 		return -EINVAL;
3405 
3406 	ps = hwmgr->request_ps;
3407 
3408 	if (ps == NULL)
3409 		return -EINVAL;
3410 
3411 	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3412 
3413 	if (low)
3414 		return smu7_ps->performance_levels[0].engine_clock;
3415 	else
3416 		return smu7_ps->performance_levels
3417 				[smu7_ps->performance_level_count-1].engine_clock;
3418 }
3419 
3420 static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3421 					struct pp_hw_power_state *hw_ps)
3422 {
3423 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3424 	struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
3425 	ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3426 	uint16_t size;
3427 	uint8_t frev, crev;
3428 	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3429 
3430 	/* First retrieve the Boot clocks and VDDC from the firmware info table.
3431 	 * We assume here that fw_info is unchanged if this call fails.
3432 	 */
3433 	fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
3434 			&size, &frev, &crev);
3435 	if (!fw_info)
3436 		/* During a test, there is no firmware info table. */
3437 		return 0;
3438 
3439 	/* Patch the state. */
3440 	data->vbios_boot_state.sclk_bootup_value =
3441 			le32_to_cpu(fw_info->ulDefaultEngineClock);
3442 	data->vbios_boot_state.mclk_bootup_value =
3443 			le32_to_cpu(fw_info->ulDefaultMemoryClock);
3444 	data->vbios_boot_state.mvdd_bootup_value =
3445 			le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3446 	data->vbios_boot_state.vddc_bootup_value =
3447 			le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3448 	data->vbios_boot_state.vddci_bootup_value =
3449 			le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3450 	data->vbios_boot_state.pcie_gen_bootup_value =
3451 			smu7_get_current_pcie_speed(hwmgr);
3452 
3453 	data->vbios_boot_state.pcie_lane_bootup_value =
3454 			(uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
3455 
3456 	/* set boot power state */
3457 	ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3458 	ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3459 	ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3460 	ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3461 
3462 	return 0;
3463 }
3464 
3465 static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
3466 {
3467 	int result;
3468 	unsigned long ret = 0;
3469 
3470 	if (hwmgr->pp_table_version == PP_TABLE_V0) {
3471 		result = pp_tables_get_num_of_entries(hwmgr, &ret);
3472 		return result ? 0 : ret;
3473 	} else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3474 		result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
3475 		return result;
3476 	}
3477 	return 0;
3478 }
3479 
3480 static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3481 		void *state, struct pp_power_state *power_state,
3482 		void *pp_table, uint32_t classification_flag)
3483 {
3484 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3485 	struct smu7_power_state  *smu7_power_state =
3486 			(struct smu7_power_state *)(&(power_state->hardware));
3487 	struct smu7_performance_level *performance_level;
3488 	ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3489 	ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3490 			(ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3491 	PPTable_Generic_SubTable_Header *sclk_dep_table =
3492 			(PPTable_Generic_SubTable_Header *)
3493 			(((unsigned long)powerplay_table) +
3494 				le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3495 
3496 	ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3497 			(ATOM_Tonga_MCLK_Dependency_Table *)
3498 			(((unsigned long)powerplay_table) +
3499 				le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3500 
3501 	/* The following fields are not initialized here: id orderedList allStatesList */
3502 	power_state->classification.ui_label =
3503 			(le16_to_cpu(state_entry->usClassification) &
3504 			ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3505 			ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3506 	power_state->classification.flags = classification_flag;
3507 	/* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3508 
3509 	power_state->classification.temporary_state = false;
3510 	power_state->classification.to_be_deleted = false;
3511 
3512 	power_state->validation.disallowOnDC =
3513 			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3514 					ATOM_Tonga_DISALLOW_ON_DC));
3515 
3516 	power_state->pcie.lanes = 0;
3517 
3518 	power_state->display.disableFrameModulation = false;
3519 	power_state->display.limitRefreshrate = false;
3520 	power_state->display.enableVariBright =
3521 			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3522 					ATOM_Tonga_ENABLE_VARIBRIGHT));
3523 
3524 	power_state->validation.supportedPowerLevels = 0;
3525 	power_state->uvd_clocks.VCLK = 0;
3526 	power_state->uvd_clocks.DCLK = 0;
3527 	power_state->temperatures.min = 0;
3528 	power_state->temperatures.max = 0;
3529 
3530 	performance_level = &(smu7_power_state->performance_levels
3531 			[smu7_power_state->performance_level_count++]);
3532 
3533 	PP_ASSERT_WITH_CODE(
3534 			(smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3535 			"Performance levels exceeds SMC limit!",
3536 			return -EINVAL);
3537 
3538 	PP_ASSERT_WITH_CODE(
3539 			(smu7_power_state->performance_level_count <=
3540 					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3541 			"Performance levels exceeds Driver limit!",
3542 			return -EINVAL);
3543 
3544 	/* Performance levels are arranged from low to high. */
3545 	performance_level->memory_clock = mclk_dep_table->entries
3546 			[state_entry->ucMemoryClockIndexLow].ulMclk;
3547 	if (sclk_dep_table->ucRevId == 0)
3548 		performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3549 			[state_entry->ucEngineClockIndexLow].ulSclk;
3550 	else if (sclk_dep_table->ucRevId == 1)
3551 		performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3552 			[state_entry->ucEngineClockIndexLow].ulSclk;
3553 	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3554 			state_entry->ucPCIEGenLow);
3555 	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3556 			state_entry->ucPCIELaneLow);
3557 
3558 	performance_level = &(smu7_power_state->performance_levels
3559 			[smu7_power_state->performance_level_count++]);
3560 	performance_level->memory_clock = mclk_dep_table->entries
3561 			[state_entry->ucMemoryClockIndexHigh].ulMclk;
3562 
3563 	if (sclk_dep_table->ucRevId == 0)
3564 		performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3565 			[state_entry->ucEngineClockIndexHigh].ulSclk;
3566 	else if (sclk_dep_table->ucRevId == 1)
3567 		performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3568 			[state_entry->ucEngineClockIndexHigh].ulSclk;
3569 
3570 	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3571 			state_entry->ucPCIEGenHigh);
3572 	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3573 			state_entry->ucPCIELaneHigh);
3574 
3575 	return 0;
3576 }
3577 
3578 static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3579 		unsigned long entry_index, struct pp_power_state *state)
3580 {
3581 	int result;
3582 	struct smu7_power_state *ps;
3583 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3584 	struct phm_ppt_v1_information *table_info =
3585 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
3586 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3587 			table_info->vdd_dep_on_mclk;
3588 
3589 	state->hardware.magic = PHM_VIslands_Magic;
3590 
3591 	ps = (struct smu7_power_state *)(&state->hardware);
3592 
3593 	result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3594 			smu7_get_pp_table_entry_callback_func_v1);
3595 
3596 	/* This is the earliest time we have all the dependency table and the VBIOS boot state
3597 	 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3598 	 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3599 	 */
3600 	if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3601 		if (dep_mclk_table->entries[0].clk !=
3602 				data->vbios_boot_state.mclk_bootup_value)
3603 			pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3604 					"does not match VBIOS boot MCLK level");
3605 		if (dep_mclk_table->entries[0].vddci !=
3606 				data->vbios_boot_state.vddci_bootup_value)
3607 			pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3608 					"does not match VBIOS boot VDDCI level");
3609 	}
3610 
3611 	/* set DC compatible flag if this state supports DC */
3612 	if (!state->validation.disallowOnDC)
3613 		ps->dc_compatible = true;
3614 
3615 	if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3616 		data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3617 
3618 	ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3619 	ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3620 
3621 	if (!result) {
3622 		uint32_t i;
3623 
3624 		switch (state->classification.ui_label) {
3625 		case PP_StateUILabel_Performance:
3626 			data->use_pcie_performance_levels = true;
3627 			for (i = 0; i < ps->performance_level_count; i++) {
3628 				if (data->pcie_gen_performance.max <
3629 						ps->performance_levels[i].pcie_gen)
3630 					data->pcie_gen_performance.max =
3631 							ps->performance_levels[i].pcie_gen;
3632 
3633 				if (data->pcie_gen_performance.min >
3634 						ps->performance_levels[i].pcie_gen)
3635 					data->pcie_gen_performance.min =
3636 							ps->performance_levels[i].pcie_gen;
3637 
3638 				if (data->pcie_lane_performance.max <
3639 						ps->performance_levels[i].pcie_lane)
3640 					data->pcie_lane_performance.max =
3641 							ps->performance_levels[i].pcie_lane;
3642 				if (data->pcie_lane_performance.min >
3643 						ps->performance_levels[i].pcie_lane)
3644 					data->pcie_lane_performance.min =
3645 							ps->performance_levels[i].pcie_lane;
3646 			}
3647 			break;
3648 		case PP_StateUILabel_Battery:
3649 			data->use_pcie_power_saving_levels = true;
3650 
3651 			for (i = 0; i < ps->performance_level_count; i++) {
3652 				if (data->pcie_gen_power_saving.max <
3653 						ps->performance_levels[i].pcie_gen)
3654 					data->pcie_gen_power_saving.max =
3655 							ps->performance_levels[i].pcie_gen;
3656 
3657 				if (data->pcie_gen_power_saving.min >
3658 						ps->performance_levels[i].pcie_gen)
3659 					data->pcie_gen_power_saving.min =
3660 							ps->performance_levels[i].pcie_gen;
3661 
3662 				if (data->pcie_lane_power_saving.max <
3663 						ps->performance_levels[i].pcie_lane)
3664 					data->pcie_lane_power_saving.max =
3665 							ps->performance_levels[i].pcie_lane;
3666 
3667 				if (data->pcie_lane_power_saving.min >
3668 						ps->performance_levels[i].pcie_lane)
3669 					data->pcie_lane_power_saving.min =
3670 							ps->performance_levels[i].pcie_lane;
3671 			}
3672 			break;
3673 		default:
3674 			break;
3675 		}
3676 	}
3677 	return 0;
3678 }
3679 
3680 static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3681 					struct pp_hw_power_state *power_state,
3682 					unsigned int index, const void *clock_info)
3683 {
3684 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3685 	struct smu7_power_state  *ps = cast_phw_smu7_power_state(power_state);
3686 	const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3687 	struct smu7_performance_level *performance_level;
3688 	uint32_t engine_clock, memory_clock;
3689 	uint16_t pcie_gen_from_bios;
3690 
3691 	engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3692 	memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3693 
3694 	if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3695 		data->highest_mclk = memory_clock;
3696 
3697 	PP_ASSERT_WITH_CODE(
3698 			(ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3699 			"Performance levels exceeds SMC limit!",
3700 			return -EINVAL);
3701 
3702 	PP_ASSERT_WITH_CODE(
3703 			(ps->performance_level_count <
3704 					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3705 			"Performance levels exceeds Driver limit, Skip!",
3706 			return 0);
3707 
3708 	performance_level = &(ps->performance_levels
3709 			[ps->performance_level_count++]);
3710 
3711 	/* Performance levels are arranged from low to high. */
3712 	performance_level->memory_clock = memory_clock;
3713 	performance_level->engine_clock = engine_clock;
3714 
3715 	pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3716 
3717 	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3718 	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3719 
3720 	return 0;
3721 }
3722 
3723 static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3724 		unsigned long entry_index, struct pp_power_state *state)
3725 {
3726 	int result;
3727 	struct smu7_power_state *ps;
3728 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3729 	struct phm_clock_voltage_dependency_table *dep_mclk_table =
3730 			hwmgr->dyn_state.vddci_dependency_on_mclk;
3731 
3732 	memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3733 
3734 	state->hardware.magic = PHM_VIslands_Magic;
3735 
3736 	ps = (struct smu7_power_state *)(&state->hardware);
3737 
3738 	result = pp_tables_get_entry(hwmgr, entry_index, state,
3739 			smu7_get_pp_table_entry_callback_func_v0);
3740 
3741 	/*
3742 	 * This is the earliest time we have all the dependency table
3743 	 * and the VBIOS boot state as
3744 	 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3745 	 * state if there is only one VDDCI/MCLK level, check if it's
3746 	 * the same as VBIOS boot state
3747 	 */
3748 	if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3749 		if (dep_mclk_table->entries[0].clk !=
3750 				data->vbios_boot_state.mclk_bootup_value)
3751 			pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3752 					"does not match VBIOS boot MCLK level");
3753 		if (dep_mclk_table->entries[0].v !=
3754 				data->vbios_boot_state.vddci_bootup_value)
3755 			pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3756 					"does not match VBIOS boot VDDCI level");
3757 	}
3758 
3759 	/* set DC compatible flag if this state supports DC */
3760 	if (!state->validation.disallowOnDC)
3761 		ps->dc_compatible = true;
3762 
3763 	if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3764 		data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3765 
3766 	ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3767 	ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3768 
3769 	if (!result) {
3770 		uint32_t i;
3771 
3772 		switch (state->classification.ui_label) {
3773 		case PP_StateUILabel_Performance:
3774 			data->use_pcie_performance_levels = true;
3775 
3776 			for (i = 0; i < ps->performance_level_count; i++) {
3777 				if (data->pcie_gen_performance.max <
3778 						ps->performance_levels[i].pcie_gen)
3779 					data->pcie_gen_performance.max =
3780 							ps->performance_levels[i].pcie_gen;
3781 
3782 				if (data->pcie_gen_performance.min >
3783 						ps->performance_levels[i].pcie_gen)
3784 					data->pcie_gen_performance.min =
3785 							ps->performance_levels[i].pcie_gen;
3786 
3787 				if (data->pcie_lane_performance.max <
3788 						ps->performance_levels[i].pcie_lane)
3789 					data->pcie_lane_performance.max =
3790 							ps->performance_levels[i].pcie_lane;
3791 
3792 				if (data->pcie_lane_performance.min >
3793 						ps->performance_levels[i].pcie_lane)
3794 					data->pcie_lane_performance.min =
3795 							ps->performance_levels[i].pcie_lane;
3796 			}
3797 			break;
3798 		case PP_StateUILabel_Battery:
3799 			data->use_pcie_power_saving_levels = true;
3800 
3801 			for (i = 0; i < ps->performance_level_count; i++) {
3802 				if (data->pcie_gen_power_saving.max <
3803 						ps->performance_levels[i].pcie_gen)
3804 					data->pcie_gen_power_saving.max =
3805 							ps->performance_levels[i].pcie_gen;
3806 
3807 				if (data->pcie_gen_power_saving.min >
3808 						ps->performance_levels[i].pcie_gen)
3809 					data->pcie_gen_power_saving.min =
3810 							ps->performance_levels[i].pcie_gen;
3811 
3812 				if (data->pcie_lane_power_saving.max <
3813 						ps->performance_levels[i].pcie_lane)
3814 					data->pcie_lane_power_saving.max =
3815 							ps->performance_levels[i].pcie_lane;
3816 
3817 				if (data->pcie_lane_power_saving.min >
3818 						ps->performance_levels[i].pcie_lane)
3819 					data->pcie_lane_power_saving.min =
3820 							ps->performance_levels[i].pcie_lane;
3821 			}
3822 			break;
3823 		default:
3824 			break;
3825 		}
3826 	}
3827 	return 0;
3828 }
3829 
3830 static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3831 		unsigned long entry_index, struct pp_power_state *state)
3832 {
3833 	if (hwmgr->pp_table_version == PP_TABLE_V0)
3834 		return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3835 	else if (hwmgr->pp_table_version == PP_TABLE_V1)
3836 		return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3837 
3838 	return 0;
3839 }
3840 
3841 static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3842 {
3843 	struct amdgpu_device *adev = hwmgr->adev;
3844 	int i;
3845 	u32 tmp = 0;
3846 
3847 	if (!query)
3848 		return -EINVAL;
3849 
3850 	/*
3851 	 * PPSMC_MSG_GetCurrPkgPwr is not supported on:
3852 	 *  - Hawaii
3853 	 *  - Bonaire
3854 	 *  - Fiji
3855 	 *  - Tonga
3856 	 */
3857 	if ((adev->asic_type != CHIP_HAWAII) &&
3858 	    (adev->asic_type != CHIP_BONAIRE) &&
3859 	    (adev->asic_type != CHIP_FIJI) &&
3860 	    (adev->asic_type != CHIP_TONGA)) {
3861 		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp);
3862 		*query = tmp;
3863 
3864 		if (tmp != 0)
3865 			return 0;
3866 	}
3867 
3868 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL);
3869 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3870 							ixSMU_PM_STATUS_95, 0);
3871 
3872 	for (i = 0; i < 10; i++) {
3873 		msleep(500);
3874 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL);
3875 		tmp = cgs_read_ind_register(hwmgr->device,
3876 						CGS_IND_REG__SMC,
3877 						ixSMU_PM_STATUS_95);
3878 		if (tmp != 0)
3879 			break;
3880 	}
3881 	*query = tmp;
3882 
3883 	return 0;
3884 }
3885 
3886 static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3887 			    void *value, int *size)
3888 {
3889 	uint32_t sclk, mclk, activity_percent;
3890 	uint32_t offset, val_vid;
3891 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3892 
3893 	/* size must be at least 4 bytes for all sensors */
3894 	if (*size < 4)
3895 		return -EINVAL;
3896 
3897 	switch (idx) {
3898 	case AMDGPU_PP_SENSOR_GFX_SCLK:
3899 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
3900 		*((uint32_t *)value) = sclk;
3901 		*size = 4;
3902 		return 0;
3903 	case AMDGPU_PP_SENSOR_GFX_MCLK:
3904 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
3905 		*((uint32_t *)value) = mclk;
3906 		*size = 4;
3907 		return 0;
3908 	case AMDGPU_PP_SENSOR_GPU_LOAD:
3909 	case AMDGPU_PP_SENSOR_MEM_LOAD:
3910 		offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
3911 								SMU_SoftRegisters,
3912 								(idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
3913 								AverageGraphicsActivity:
3914 								AverageMemoryActivity);
3915 
3916 		activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3917 		activity_percent += 0x80;
3918 		activity_percent >>= 8;
3919 		*((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3920 		*size = 4;
3921 		return 0;
3922 	case AMDGPU_PP_SENSOR_GPU_TEMP:
3923 		*((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
3924 		*size = 4;
3925 		return 0;
3926 	case AMDGPU_PP_SENSOR_UVD_POWER:
3927 		*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3928 		*size = 4;
3929 		return 0;
3930 	case AMDGPU_PP_SENSOR_VCE_POWER:
3931 		*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3932 		*size = 4;
3933 		return 0;
3934 	case AMDGPU_PP_SENSOR_GPU_POWER:
3935 		return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
3936 	case AMDGPU_PP_SENSOR_VDDGFX:
3937 		if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
3938 		    (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
3939 			val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3940 					CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
3941 		else
3942 			val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3943 					CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
3944 
3945 		*((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
3946 		return 0;
3947 	default:
3948 		return -EINVAL;
3949 	}
3950 }
3951 
3952 static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3953 {
3954 	const struct phm_set_power_state_input *states =
3955 			(const struct phm_set_power_state_input *)input;
3956 	const struct smu7_power_state *smu7_ps =
3957 			cast_const_phw_smu7_power_state(states->pnew_state);
3958 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3959 	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3960 	uint32_t sclk = smu7_ps->performance_levels
3961 			[smu7_ps->performance_level_count - 1].engine_clock;
3962 	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3963 	uint32_t mclk = smu7_ps->performance_levels
3964 			[smu7_ps->performance_level_count - 1].memory_clock;
3965 	struct PP_Clocks min_clocks = {0};
3966 	uint32_t i;
3967 
3968 	for (i = 0; i < sclk_table->count; i++) {
3969 		if (sclk == sclk_table->dpm_levels[i].value)
3970 			break;
3971 	}
3972 
3973 	if (i >= sclk_table->count) {
3974 		if (sclk > sclk_table->dpm_levels[i-1].value) {
3975 			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3976 			sclk_table->dpm_levels[i-1].value = sclk;
3977 		}
3978 	} else {
3979 	/* TODO: Check SCLK in DAL's minimum clocks
3980 	 * in case DeepSleep divider update is required.
3981 	 */
3982 		if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3983 			(min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3984 				data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3985 			data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3986 	}
3987 
3988 	for (i = 0; i < mclk_table->count; i++) {
3989 		if (mclk == mclk_table->dpm_levels[i].value)
3990 			break;
3991 	}
3992 
3993 	if (i >= mclk_table->count) {
3994 		if (mclk > mclk_table->dpm_levels[i-1].value) {
3995 			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3996 			mclk_table->dpm_levels[i-1].value = mclk;
3997 		}
3998 	}
3999 
4000 	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4001 		data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4002 
4003 	return 0;
4004 }
4005 
4006 static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
4007 		const struct smu7_power_state *smu7_ps)
4008 {
4009 	uint32_t i;
4010 	uint32_t sclk, max_sclk = 0;
4011 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4012 	struct smu7_dpm_table *dpm_table = &data->dpm_table;
4013 
4014 	for (i = 0; i < smu7_ps->performance_level_count; i++) {
4015 		sclk = smu7_ps->performance_levels[i].engine_clock;
4016 		if (max_sclk < sclk)
4017 			max_sclk = sclk;
4018 	}
4019 
4020 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
4021 		if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
4022 			return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
4023 					dpm_table->pcie_speed_table.dpm_levels
4024 					[dpm_table->pcie_speed_table.count - 1].value :
4025 					dpm_table->pcie_speed_table.dpm_levels[i].value);
4026 	}
4027 
4028 	return 0;
4029 }
4030 
4031 static int smu7_request_link_speed_change_before_state_change(
4032 		struct pp_hwmgr *hwmgr, const void *input)
4033 {
4034 	const struct phm_set_power_state_input *states =
4035 			(const struct phm_set_power_state_input *)input;
4036 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4037 	const struct smu7_power_state *smu7_nps =
4038 			cast_const_phw_smu7_power_state(states->pnew_state);
4039 	const struct smu7_power_state *polaris10_cps =
4040 			cast_const_phw_smu7_power_state(states->pcurrent_state);
4041 
4042 	uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
4043 	uint16_t current_link_speed;
4044 
4045 	if (data->force_pcie_gen == PP_PCIEGenInvalid)
4046 		current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
4047 	else
4048 		current_link_speed = data->force_pcie_gen;
4049 
4050 	data->force_pcie_gen = PP_PCIEGenInvalid;
4051 	data->pspp_notify_required = false;
4052 
4053 	if (target_link_speed > current_link_speed) {
4054 		switch (target_link_speed) {
4055 #ifdef CONFIG_ACPI
4056 		case PP_PCIEGen3:
4057 			if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
4058 				break;
4059 			data->force_pcie_gen = PP_PCIEGen2;
4060 			if (current_link_speed == PP_PCIEGen2)
4061 				break;
4062 			fallthrough;
4063 		case PP_PCIEGen2:
4064 			if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
4065 				break;
4066 			fallthrough;
4067 #endif
4068 		default:
4069 			data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
4070 			break;
4071 		}
4072 	} else {
4073 		if (target_link_speed < current_link_speed)
4074 			data->pspp_notify_required = true;
4075 	}
4076 
4077 	return 0;
4078 }
4079 
4080 static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4081 {
4082 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4083 
4084 	if (0 == data->need_update_smu7_dpm_table)
4085 		return 0;
4086 
4087 	if ((0 == data->sclk_dpm_key_disabled) &&
4088 		(data->need_update_smu7_dpm_table &
4089 			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4090 		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4091 				"Trying to freeze SCLK DPM when DPM is disabled",
4092 				);
4093 		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4094 				PPSMC_MSG_SCLKDPM_FreezeLevel,
4095 				NULL),
4096 				"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
4097 				return -EINVAL);
4098 	}
4099 
4100 	if ((0 == data->mclk_dpm_key_disabled) &&
4101 		!data->mclk_ignore_signal &&
4102 		(data->need_update_smu7_dpm_table &
4103 		 DPMTABLE_OD_UPDATE_MCLK)) {
4104 		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4105 				"Trying to freeze MCLK DPM when DPM is disabled",
4106 				);
4107 		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4108 				PPSMC_MSG_MCLKDPM_FreezeLevel,
4109 				NULL),
4110 				"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
4111 				return -EINVAL);
4112 	}
4113 
4114 	return 0;
4115 }
4116 
4117 static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
4118 		struct pp_hwmgr *hwmgr, const void *input)
4119 {
4120 	int result = 0;
4121 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4122 	struct smu7_dpm_table *dpm_table = &data->dpm_table;
4123 	uint32_t count;
4124 	struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4125 	struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4126 	struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4127 
4128 	if (0 == data->need_update_smu7_dpm_table)
4129 		return 0;
4130 
4131 	if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
4132 		for (count = 0; count < dpm_table->sclk_table.count; count++) {
4133 			dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
4134 			dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
4135 		}
4136 	}
4137 
4138 	if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
4139 		for (count = 0; count < dpm_table->mclk_table.count; count++) {
4140 			dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
4141 			dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
4142 		}
4143 	}
4144 
4145 	if (data->need_update_smu7_dpm_table &
4146 			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4147 		result = smum_populate_all_graphic_levels(hwmgr);
4148 		PP_ASSERT_WITH_CODE((0 == result),
4149 				"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4150 				return result);
4151 	}
4152 
4153 	if (data->need_update_smu7_dpm_table &
4154 			(DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
4155 		/*populate MCLK dpm table to SMU7 */
4156 		result = smum_populate_all_memory_levels(hwmgr);
4157 		PP_ASSERT_WITH_CODE((0 == result),
4158 				"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
4159 				return result);
4160 	}
4161 
4162 	return result;
4163 }
4164 
4165 static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4166 			  struct smu7_single_dpm_table *dpm_table,
4167 			uint32_t low_limit, uint32_t high_limit)
4168 {
4169 	uint32_t i;
4170 
4171 	/* force the trim if mclk_switching is disabled to prevent flicker */
4172 	bool force_trim = (low_limit == high_limit);
4173 	for (i = 0; i < dpm_table->count; i++) {
4174 	/*skip the trim if od is enabled*/
4175 		if ((!hwmgr->od_enabled || force_trim)
4176 			&& (dpm_table->dpm_levels[i].value < low_limit
4177 			|| dpm_table->dpm_levels[i].value > high_limit))
4178 			dpm_table->dpm_levels[i].enabled = false;
4179 		else
4180 			dpm_table->dpm_levels[i].enabled = true;
4181 	}
4182 
4183 	return 0;
4184 }
4185 
4186 static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
4187 		const struct smu7_power_state *smu7_ps)
4188 {
4189 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4190 	uint32_t high_limit_count;
4191 
4192 	PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
4193 			"power state did not have any performance level",
4194 			return -EINVAL);
4195 
4196 	high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
4197 
4198 	smu7_trim_single_dpm_states(hwmgr,
4199 			&(data->dpm_table.sclk_table),
4200 			smu7_ps->performance_levels[0].engine_clock,
4201 			smu7_ps->performance_levels[high_limit_count].engine_clock);
4202 
4203 	smu7_trim_single_dpm_states(hwmgr,
4204 			&(data->dpm_table.mclk_table),
4205 			smu7_ps->performance_levels[0].memory_clock,
4206 			smu7_ps->performance_levels[high_limit_count].memory_clock);
4207 
4208 	return 0;
4209 }
4210 
4211 static int smu7_generate_dpm_level_enable_mask(
4212 		struct pp_hwmgr *hwmgr, const void *input)
4213 {
4214 	int result = 0;
4215 	const struct phm_set_power_state_input *states =
4216 			(const struct phm_set_power_state_input *)input;
4217 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4218 	const struct smu7_power_state *smu7_ps =
4219 			cast_const_phw_smu7_power_state(states->pnew_state);
4220 
4221 
4222 	result = smu7_trim_dpm_states(hwmgr, smu7_ps);
4223 	if (result)
4224 		return result;
4225 
4226 	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
4227 			phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
4228 	data->dpm_level_enable_mask.mclk_dpm_enable_mask =
4229 			phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
4230 	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
4231 			phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
4232 
4233 	return 0;
4234 }
4235 
4236 static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4237 {
4238 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4239 
4240 	if (0 == data->need_update_smu7_dpm_table)
4241 		return 0;
4242 
4243 	if ((0 == data->sclk_dpm_key_disabled) &&
4244 		(data->need_update_smu7_dpm_table &
4245 		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4246 
4247 		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4248 				"Trying to Unfreeze SCLK DPM when DPM is disabled",
4249 				);
4250 		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4251 				PPSMC_MSG_SCLKDPM_UnfreezeLevel,
4252 				NULL),
4253 			"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4254 			return -EINVAL);
4255 	}
4256 
4257 	if ((0 == data->mclk_dpm_key_disabled) &&
4258 		!data->mclk_ignore_signal &&
4259 		(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4260 
4261 		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4262 				"Trying to Unfreeze MCLK DPM when DPM is disabled",
4263 				);
4264 		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4265 				PPSMC_MSG_MCLKDPM_UnfreezeLevel,
4266 				NULL),
4267 		    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4268 		    return -EINVAL);
4269 	}
4270 
4271 	data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
4272 
4273 	return 0;
4274 }
4275 
4276 static int smu7_notify_link_speed_change_after_state_change(
4277 		struct pp_hwmgr *hwmgr, const void *input)
4278 {
4279 	const struct phm_set_power_state_input *states =
4280 			(const struct phm_set_power_state_input *)input;
4281 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4282 	const struct smu7_power_state *smu7_ps =
4283 			cast_const_phw_smu7_power_state(states->pnew_state);
4284 	uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
4285 	uint8_t  request;
4286 
4287 	if (data->pspp_notify_required) {
4288 		if (target_link_speed == PP_PCIEGen3)
4289 			request = PCIE_PERF_REQ_GEN3;
4290 		else if (target_link_speed == PP_PCIEGen2)
4291 			request = PCIE_PERF_REQ_GEN2;
4292 		else
4293 			request = PCIE_PERF_REQ_GEN1;
4294 
4295 		if (request == PCIE_PERF_REQ_GEN1 &&
4296 				smu7_get_current_pcie_speed(hwmgr) > 0)
4297 			return 0;
4298 
4299 #ifdef CONFIG_ACPI
4300 		if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
4301 			if (PP_PCIEGen2 == target_link_speed)
4302 				pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
4303 			else
4304 				pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
4305 		}
4306 #endif
4307 	}
4308 
4309 	return 0;
4310 }
4311 
4312 static int smu7_notify_no_display(struct pp_hwmgr *hwmgr)
4313 {
4314 	return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL) == 0) ?  0 : -EINVAL;
4315 }
4316 
4317 static int smu7_notify_has_display(struct pp_hwmgr *hwmgr)
4318 {
4319 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4320 
4321 	if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
4322 		if (hwmgr->chip_id == CHIP_VEGAM)
4323 			smum_send_msg_to_smc_with_parameter(hwmgr,
4324 					(PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2,
4325 					NULL);
4326 		else
4327 			smum_send_msg_to_smc_with_parameter(hwmgr,
4328 					(PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2,
4329 					NULL);
4330 		data->last_sent_vbi_timeout = data->frame_time_x2;
4331 	}
4332 
4333 	return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ?  0 : -EINVAL;
4334 }
4335 
4336 static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
4337 {
4338 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4339 	int result = 0;
4340 
4341 	if (data->mclk_ignore_signal)
4342 		result = smu7_notify_no_display(hwmgr);
4343 	else
4344 		result = smu7_notify_has_display(hwmgr);
4345 
4346 	return result;
4347 }
4348 
4349 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4350 {
4351 	int tmp_result, result = 0;
4352 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4353 
4354 	tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4355 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4356 			"Failed to find DPM states clocks in DPM table!",
4357 			result = tmp_result);
4358 
4359 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4360 			PHM_PlatformCaps_PCIEPerformanceRequest)) {
4361 		tmp_result =
4362 			smu7_request_link_speed_change_before_state_change(hwmgr, input);
4363 		PP_ASSERT_WITH_CODE((0 == tmp_result),
4364 				"Failed to request link speed change before state change!",
4365 				result = tmp_result);
4366 	}
4367 
4368 	tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
4369 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4370 			"Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4371 
4372 	tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4373 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4374 			"Failed to populate and upload SCLK MCLK DPM levels!",
4375 			result = tmp_result);
4376 
4377 	/*
4378 	 * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
4379 	 * That effectively disables AVFS feature.
4380 	 */
4381 	if (hwmgr->hardcode_pp_table != NULL)
4382 		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4383 
4384 	tmp_result = smu7_update_avfs(hwmgr);
4385 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4386 			"Failed to update avfs voltages!",
4387 			result = tmp_result);
4388 
4389 	tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
4390 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4391 			"Failed to generate DPM level enabled mask!",
4392 			result = tmp_result);
4393 
4394 	tmp_result = smum_update_sclk_threshold(hwmgr);
4395 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4396 			"Failed to update SCLK threshold!",
4397 			result = tmp_result);
4398 
4399 	tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
4400 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4401 			"Failed to unfreeze SCLK MCLK DPM!",
4402 			result = tmp_result);
4403 
4404 	tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
4405 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4406 			"Failed to upload DPM level enabled mask!",
4407 			result = tmp_result);
4408 
4409 	tmp_result = smu7_notify_smc_display(hwmgr);
4410 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4411 			"Failed to notify smc display settings!",
4412 			result = tmp_result);
4413 
4414 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4415 			PHM_PlatformCaps_PCIEPerformanceRequest)) {
4416 		tmp_result =
4417 			smu7_notify_link_speed_change_after_state_change(hwmgr, input);
4418 		PP_ASSERT_WITH_CODE((0 == tmp_result),
4419 				"Failed to notify link speed change after state change!",
4420 				result = tmp_result);
4421 	}
4422 	data->apply_optimized_settings = false;
4423 	return result;
4424 }
4425 
4426 static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4427 {
4428 	hwmgr->thermal_controller.
4429 	advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4430 
4431 	return smum_send_msg_to_smc_with_parameter(hwmgr,
4432 			PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm,
4433 			NULL);
4434 }
4435 
4436 static int
4437 smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4438 {
4439 	return 0;
4440 }
4441 
4442 /**
4443  * smu7_program_display_gap - Programs the display gap
4444  *
4445  * @hwmgr:  the address of the powerplay hardware manager.
4446  * Return:   always OK
4447  */
4448 static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
4449 {
4450 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4451 	uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4452 	uint32_t display_gap2;
4453 	uint32_t pre_vbi_time_in_us;
4454 	uint32_t frame_time_in_us;
4455 	uint32_t ref_clock, refresh_rate;
4456 
4457 	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4458 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4459 
4460 	ref_clock =  amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
4461 	refresh_rate = hwmgr->display_config->vrefresh;
4462 
4463 	if (0 == refresh_rate)
4464 		refresh_rate = 60;
4465 
4466 	frame_time_in_us = 1000000 / refresh_rate;
4467 
4468 	pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
4469 
4470 	data->frame_time_x2 = frame_time_in_us * 2 / 100;
4471 
4472 	if (data->frame_time_x2 < 280) {
4473 		pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
4474 		data->frame_time_x2 = 280;
4475 	}
4476 
4477 	display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4478 
4479 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4480 
4481 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4482 			data->soft_regs_start + smum_get_offsetof(hwmgr,
4483 							SMU_SoftRegisters,
4484 							PreVBlankGap), 0x64);
4485 
4486 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4487 			data->soft_regs_start + smum_get_offsetof(hwmgr,
4488 							SMU_SoftRegisters,
4489 							VBlankTimeout),
4490 					(frame_time_in_us - pre_vbi_time_in_us));
4491 
4492 	return 0;
4493 }
4494 
4495 static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4496 {
4497 	return smu7_program_display_gap(hwmgr);
4498 }
4499 
4500 /**
4501  * smu7_set_max_fan_rpm_output - Set maximum target operating fan output RPM
4502  *
4503  * @hwmgr:  the address of the powerplay hardware manager.
4504  * @usMaxFanRpm:  max operating fan RPM value.
4505  * Return:   The response that came from the SMC.
4506  */
4507 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4508 {
4509 	hwmgr->thermal_controller.
4510 	advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4511 
4512 	return smum_send_msg_to_smc_with_parameter(hwmgr,
4513 			PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm,
4514 			NULL);
4515 }
4516 
4517 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
4518 	.process = phm_irq_process,
4519 };
4520 
4521 static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4522 {
4523 	struct amdgpu_irq_src *source =
4524 		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
4525 
4526 	if (!source)
4527 		return -ENOMEM;
4528 
4529 	source->funcs = &smu7_irq_funcs;
4530 
4531 	amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4532 			AMDGPU_IRQ_CLIENTID_LEGACY,
4533 			VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
4534 			source);
4535 	amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4536 			AMDGPU_IRQ_CLIENTID_LEGACY,
4537 			VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
4538 			source);
4539 
4540 	/* Register CTF(GPIO_19) interrupt */
4541 	amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4542 			AMDGPU_IRQ_CLIENTID_LEGACY,
4543 			VISLANDS30_IV_SRCID_GPIO_19,
4544 			source);
4545 
4546 	return 0;
4547 }
4548 
4549 static bool
4550 smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4551 {
4552 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4553 	bool is_update_required = false;
4554 
4555 	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4556 		is_update_required = true;
4557 
4558 	if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh)
4559 		is_update_required = true;
4560 
4561 	if (hwmgr->chip_id >= CHIP_POLARIS10 &&
4562 	    hwmgr->chip_id <= CHIP_VEGAM &&
4563 	    data->last_sent_vbi_timeout != data->frame_time_x2)
4564 		is_update_required = true;
4565 
4566 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4567 		if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
4568 			(data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4569 			hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4570 			is_update_required = true;
4571 	}
4572 	return is_update_required;
4573 }
4574 
4575 static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4576 							   const struct smu7_performance_level *pl2)
4577 {
4578 	return ((pl1->memory_clock == pl2->memory_clock) &&
4579 		  (pl1->engine_clock == pl2->engine_clock) &&
4580 		  (pl1->pcie_gen == pl2->pcie_gen) &&
4581 		  (pl1->pcie_lane == pl2->pcie_lane));
4582 }
4583 
4584 static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4585 		const struct pp_hw_power_state *pstate1,
4586 		const struct pp_hw_power_state *pstate2, bool *equal)
4587 {
4588 	const struct smu7_power_state *psa;
4589 	const struct smu7_power_state *psb;
4590 	int i;
4591 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4592 
4593 	if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4594 		return -EINVAL;
4595 
4596 	psa = cast_const_phw_smu7_power_state(pstate1);
4597 	psb = cast_const_phw_smu7_power_state(pstate2);
4598 	/* If the two states don't even have the same number of performance levels they cannot be the same state. */
4599 	if (psa->performance_level_count != psb->performance_level_count) {
4600 		*equal = false;
4601 		return 0;
4602 	}
4603 
4604 	for (i = 0; i < psa->performance_level_count; i++) {
4605 		if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4606 			/* If we have found even one performance level pair that is different the states are different. */
4607 			*equal = false;
4608 			return 0;
4609 		}
4610 	}
4611 
4612 	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4613 	*equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4614 	*equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4615 	*equal &= (psa->sclk_threshold == psb->sclk_threshold);
4616 	/* For OD call, set value based on flag */
4617 	*equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4618 							DPMTABLE_OD_UPDATE_MCLK |
4619 							DPMTABLE_OD_UPDATE_VDDC));
4620 
4621 	return 0;
4622 }
4623 
4624 static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
4625 {
4626 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4627 
4628 	uint32_t tmp;
4629 
4630 	/* Read MC indirect register offset 0x9F bits [3:0] to see
4631 	 * if VBIOS has already loaded a full version of MC ucode
4632 	 * or not.
4633 	 */
4634 
4635 	smu7_get_mc_microcode_version(hwmgr);
4636 
4637 	data->need_long_memory_training = false;
4638 
4639 	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4640 							ixMC_IO_DEBUG_UP_13);
4641 	tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4642 
4643 	if (tmp & (1 << 23)) {
4644 		data->mem_latency_high = MEM_LATENCY_HIGH;
4645 		data->mem_latency_low = MEM_LATENCY_LOW;
4646 		if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4647 		    (hwmgr->chip_id == CHIP_POLARIS11) ||
4648 		    (hwmgr->chip_id == CHIP_POLARIS12))
4649 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL);
4650 	} else {
4651 		data->mem_latency_high = 330;
4652 		data->mem_latency_low = 330;
4653 		if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4654 		    (hwmgr->chip_id == CHIP_POLARIS11) ||
4655 		    (hwmgr->chip_id == CHIP_POLARIS12))
4656 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL);
4657 	}
4658 
4659 	return 0;
4660 }
4661 
4662 static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4663 {
4664 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4665 
4666 	data->clock_registers.vCG_SPLL_FUNC_CNTL         =
4667 		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4668 	data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
4669 		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4670 	data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
4671 		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4672 	data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
4673 		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4674 	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
4675 		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4676 	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4677 		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4678 	data->clock_registers.vDLL_CNTL                  =
4679 		cgs_read_register(hwmgr->device, mmDLL_CNTL);
4680 	data->clock_registers.vMCLK_PWRMGT_CNTL          =
4681 		cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4682 	data->clock_registers.vMPLL_AD_FUNC_CNTL         =
4683 		cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4684 	data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
4685 		cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4686 	data->clock_registers.vMPLL_FUNC_CNTL            =
4687 		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4688 	data->clock_registers.vMPLL_FUNC_CNTL_1          =
4689 		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4690 	data->clock_registers.vMPLL_FUNC_CNTL_2          =
4691 		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4692 	data->clock_registers.vMPLL_SS1                  =
4693 		cgs_read_register(hwmgr->device, mmMPLL_SS1);
4694 	data->clock_registers.vMPLL_SS2                  =
4695 		cgs_read_register(hwmgr->device, mmMPLL_SS2);
4696 	return 0;
4697 
4698 }
4699 
4700 /**
4701  * smu7_get_memory_type - Find out if memory is GDDR5.
4702  *
4703  * @hwmgr:  the address of the powerplay hardware manager.
4704  * Return:   always 0
4705  */
4706 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4707 {
4708 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4709 	struct amdgpu_device *adev = hwmgr->adev;
4710 
4711 	data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
4712 
4713 	return 0;
4714 }
4715 
4716 /**
4717  * smu7_enable_acpi_power_management - Enables Dynamic Power Management by SMC
4718  *
4719  * @hwmgr:  the address of the powerplay hardware manager.
4720  * Return:   always 0
4721  */
4722 static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4723 {
4724 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4725 			GENERAL_PWRMGT, STATIC_PM_EN, 1);
4726 
4727 	return 0;
4728 }
4729 
4730 /**
4731  * smu7_init_power_gate_state - Initialize PowerGating States for different engines
4732  *
4733  * @hwmgr:  the address of the powerplay hardware manager.
4734  * Return:   always 0
4735  */
4736 static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
4737 {
4738 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4739 
4740 	data->uvd_power_gated = false;
4741 	data->vce_power_gated = false;
4742 
4743 	return 0;
4744 }
4745 
4746 static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4747 {
4748 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4749 
4750 	data->low_sclk_interrupt_threshold = 0;
4751 	return 0;
4752 }
4753 
4754 static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
4755 {
4756 	int tmp_result, result = 0;
4757 
4758 	smu7_check_mc_firmware(hwmgr);
4759 
4760 	tmp_result = smu7_read_clock_registers(hwmgr);
4761 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4762 			"Failed to read clock registers!", result = tmp_result);
4763 
4764 	tmp_result = smu7_get_memory_type(hwmgr);
4765 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4766 			"Failed to get memory type!", result = tmp_result);
4767 
4768 	tmp_result = smu7_enable_acpi_power_management(hwmgr);
4769 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4770 			"Failed to enable ACPI power management!", result = tmp_result);
4771 
4772 	tmp_result = smu7_init_power_gate_state(hwmgr);
4773 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4774 			"Failed to init power gate state!", result = tmp_result);
4775 
4776 	tmp_result = smu7_get_mc_microcode_version(hwmgr);
4777 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4778 			"Failed to get MC microcode version!", result = tmp_result);
4779 
4780 	tmp_result = smu7_init_sclk_threshold(hwmgr);
4781 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4782 			"Failed to init sclk threshold!", result = tmp_result);
4783 
4784 	return result;
4785 }
4786 
4787 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4788 		enum pp_clock_type type, uint32_t mask)
4789 {
4790 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4791 
4792 	if (mask == 0)
4793 		return -EINVAL;
4794 
4795 	switch (type) {
4796 	case PP_SCLK:
4797 		if (!data->sclk_dpm_key_disabled)
4798 			smum_send_msg_to_smc_with_parameter(hwmgr,
4799 					PPSMC_MSG_SCLKDPM_SetEnabledMask,
4800 					data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask,
4801 					NULL);
4802 		break;
4803 	case PP_MCLK:
4804 		if (!data->mclk_dpm_key_disabled)
4805 			smum_send_msg_to_smc_with_parameter(hwmgr,
4806 					PPSMC_MSG_MCLKDPM_SetEnabledMask,
4807 					data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask,
4808 					NULL);
4809 		break;
4810 	case PP_PCIE:
4811 	{
4812 		uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4813 
4814 		if (!data->pcie_dpm_key_disabled) {
4815 			if (fls(tmp) != ffs(tmp))
4816 				smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel,
4817 						NULL);
4818 			else
4819 				smum_send_msg_to_smc_with_parameter(hwmgr,
4820 					PPSMC_MSG_PCIeDPM_ForceLevel,
4821 					fls(tmp) - 1,
4822 					NULL);
4823 		}
4824 		break;
4825 	}
4826 	default:
4827 		break;
4828 	}
4829 
4830 	return 0;
4831 }
4832 
4833 static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4834 		enum pp_clock_type type, char *buf)
4835 {
4836 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4837 	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4838 	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4839 	struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4840 	struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4841 	struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4842 	struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4843 	int i, now, size = 0;
4844 	uint32_t clock, pcie_speed;
4845 
4846 	switch (type) {
4847 	case PP_SCLK:
4848 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
4849 
4850 		for (i = 0; i < sclk_table->count; i++) {
4851 			if (clock > sclk_table->dpm_levels[i].value)
4852 				continue;
4853 			break;
4854 		}
4855 		now = i;
4856 
4857 		for (i = 0; i < sclk_table->count; i++)
4858 			size += sprintf(buf + size, "%d: %uMhz %s\n",
4859 					i, sclk_table->dpm_levels[i].value / 100,
4860 					(i == now) ? "*" : "");
4861 		break;
4862 	case PP_MCLK:
4863 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
4864 
4865 		for (i = 0; i < mclk_table->count; i++) {
4866 			if (clock > mclk_table->dpm_levels[i].value)
4867 				continue;
4868 			break;
4869 		}
4870 		now = i;
4871 
4872 		for (i = 0; i < mclk_table->count; i++)
4873 			size += sprintf(buf + size, "%d: %uMhz %s\n",
4874 					i, mclk_table->dpm_levels[i].value / 100,
4875 					(i == now) ? "*" : "");
4876 		break;
4877 	case PP_PCIE:
4878 		pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4879 		for (i = 0; i < pcie_table->count; i++) {
4880 			if (pcie_speed != pcie_table->dpm_levels[i].value)
4881 				continue;
4882 			break;
4883 		}
4884 		now = i;
4885 
4886 		for (i = 0; i < pcie_table->count; i++)
4887 			size += sprintf(buf + size, "%d: %s %s\n", i,
4888 					(pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
4889 					(pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
4890 					(pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
4891 					(i == now) ? "*" : "");
4892 		break;
4893 	case OD_SCLK:
4894 		if (hwmgr->od_enabled) {
4895 			size = sprintf(buf, "%s:\n", "OD_SCLK");
4896 			for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4897 				size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4898 					i, odn_sclk_table->entries[i].clock/100,
4899 					odn_sclk_table->entries[i].vddc);
4900 		}
4901 		break;
4902 	case OD_MCLK:
4903 		if (hwmgr->od_enabled) {
4904 			size = sprintf(buf, "%s:\n", "OD_MCLK");
4905 			for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4906 				size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4907 					i, odn_mclk_table->entries[i].clock/100,
4908 					odn_mclk_table->entries[i].vddc);
4909 		}
4910 		break;
4911 	case OD_RANGE:
4912 		if (hwmgr->od_enabled) {
4913 			size = sprintf(buf, "%s:\n", "OD_RANGE");
4914 			size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4915 				data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4916 				hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4917 			size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4918 				data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4919 				hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4920 			size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4921 				data->odn_dpm_table.min_vddc,
4922 				data->odn_dpm_table.max_vddc);
4923 		}
4924 		break;
4925 	default:
4926 		break;
4927 	}
4928 	return size;
4929 }
4930 
4931 static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4932 {
4933 	switch (mode) {
4934 	case AMD_FAN_CTRL_NONE:
4935 		smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4936 		break;
4937 	case AMD_FAN_CTRL_MANUAL:
4938 		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4939 			PHM_PlatformCaps_MicrocodeFanControl))
4940 			smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4941 		break;
4942 	case AMD_FAN_CTRL_AUTO:
4943 		if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
4944 			smu7_fan_ctrl_start_smc_fan_control(hwmgr);
4945 		break;
4946 	default:
4947 		break;
4948 	}
4949 }
4950 
4951 static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4952 {
4953 	return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
4954 }
4955 
4956 static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4957 {
4958 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4959 	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4960 	struct smu7_single_dpm_table *golden_sclk_table =
4961 			&(data->golden_dpm_table.sclk_table);
4962 	int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
4963 	int golden_value = golden_sclk_table->dpm_levels
4964 			[golden_sclk_table->count - 1].value;
4965 
4966 	value -= golden_value;
4967 	value = DIV_ROUND_UP(value * 100, golden_value);
4968 
4969 	return value;
4970 }
4971 
4972 static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4973 {
4974 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4975 	struct smu7_single_dpm_table *golden_sclk_table =
4976 			&(data->golden_dpm_table.sclk_table);
4977 	struct pp_power_state  *ps;
4978 	struct smu7_power_state  *smu7_ps;
4979 
4980 	if (value > 20)
4981 		value = 20;
4982 
4983 	ps = hwmgr->request_ps;
4984 
4985 	if (ps == NULL)
4986 		return -EINVAL;
4987 
4988 	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4989 
4990 	smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4991 			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4992 			value / 100 +
4993 			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4994 
4995 	return 0;
4996 }
4997 
4998 static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4999 {
5000 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5001 	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5002 	struct smu7_single_dpm_table *golden_mclk_table =
5003 			&(data->golden_dpm_table.mclk_table);
5004         int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
5005 	int golden_value = golden_mclk_table->dpm_levels
5006 			[golden_mclk_table->count - 1].value;
5007 
5008 	value -= golden_value;
5009 	value = DIV_ROUND_UP(value * 100, golden_value);
5010 
5011 	return value;
5012 }
5013 
5014 static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5015 {
5016 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5017 	struct smu7_single_dpm_table *golden_mclk_table =
5018 			&(data->golden_dpm_table.mclk_table);
5019 	struct pp_power_state  *ps;
5020 	struct smu7_power_state  *smu7_ps;
5021 
5022 	if (value > 20)
5023 		value = 20;
5024 
5025 	ps = hwmgr->request_ps;
5026 
5027 	if (ps == NULL)
5028 		return -EINVAL;
5029 
5030 	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
5031 
5032 	smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
5033 			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
5034 			value / 100 +
5035 			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5036 
5037 	return 0;
5038 }
5039 
5040 
5041 static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
5042 {
5043 	struct phm_ppt_v1_information *table_info =
5044 			(struct phm_ppt_v1_information *)hwmgr->pptable;
5045 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
5046 	struct phm_clock_voltage_dependency_table *sclk_table;
5047 	int i;
5048 
5049 	if (hwmgr->pp_table_version == PP_TABLE_V1) {
5050 		if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
5051 			return -EINVAL;
5052 		dep_sclk_table = table_info->vdd_dep_on_sclk;
5053 		for (i = 0; i < dep_sclk_table->count; i++)
5054 			clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
5055 		clocks->count = dep_sclk_table->count;
5056 	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
5057 		sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
5058 		for (i = 0; i < sclk_table->count; i++)
5059 			clocks->clock[i] = sclk_table->entries[i].clk * 10;
5060 		clocks->count = sclk_table->count;
5061 	}
5062 
5063 	return 0;
5064 }
5065 
5066 static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
5067 {
5068 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5069 
5070 	if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
5071 		return data->mem_latency_high;
5072 	else if (clk >= MEM_FREQ_HIGH_LATENCY)
5073 		return data->mem_latency_low;
5074 	else
5075 		return MEM_LATENCY_ERR;
5076 }
5077 
5078 static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
5079 {
5080 	struct phm_ppt_v1_information *table_info =
5081 			(struct phm_ppt_v1_information *)hwmgr->pptable;
5082 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
5083 	int i;
5084 	struct phm_clock_voltage_dependency_table *mclk_table;
5085 
5086 	if (hwmgr->pp_table_version == PP_TABLE_V1) {
5087 		if (table_info == NULL)
5088 			return -EINVAL;
5089 		dep_mclk_table = table_info->vdd_dep_on_mclk;
5090 		for (i = 0; i < dep_mclk_table->count; i++) {
5091 			clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
5092 			clocks->latency[i] = smu7_get_mem_latency(hwmgr,
5093 						dep_mclk_table->entries[i].clk);
5094 		}
5095 		clocks->count = dep_mclk_table->count;
5096 	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
5097 		mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
5098 		for (i = 0; i < mclk_table->count; i++)
5099 			clocks->clock[i] = mclk_table->entries[i].clk * 10;
5100 		clocks->count = mclk_table->count;
5101 	}
5102 	return 0;
5103 }
5104 
5105 static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
5106 						struct amd_pp_clocks *clocks)
5107 {
5108 	switch (type) {
5109 	case amd_pp_sys_clock:
5110 		smu7_get_sclks(hwmgr, clocks);
5111 		break;
5112 	case amd_pp_mem_clock:
5113 		smu7_get_mclks(hwmgr, clocks);
5114 		break;
5115 	default:
5116 		return -EINVAL;
5117 	}
5118 
5119 	return 0;
5120 }
5121 
5122 static int smu7_get_sclks_with_latency(struct pp_hwmgr *hwmgr,
5123 				       struct pp_clock_levels_with_latency *clocks)
5124 {
5125 	struct phm_ppt_v1_information *table_info =
5126 			(struct phm_ppt_v1_information *)hwmgr->pptable;
5127 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
5128 			table_info->vdd_dep_on_sclk;
5129 	int i;
5130 
5131 	clocks->num_levels = 0;
5132 	for (i = 0; i < dep_sclk_table->count; i++) {
5133 		if (dep_sclk_table->entries[i].clk) {
5134 			clocks->data[clocks->num_levels].clocks_in_khz =
5135 				dep_sclk_table->entries[i].clk * 10;
5136 			clocks->num_levels++;
5137 		}
5138 	}
5139 
5140 	return 0;
5141 }
5142 
5143 static int smu7_get_mclks_with_latency(struct pp_hwmgr *hwmgr,
5144 				       struct pp_clock_levels_with_latency *clocks)
5145 {
5146 	struct phm_ppt_v1_information *table_info =
5147 			(struct phm_ppt_v1_information *)hwmgr->pptable;
5148 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
5149 			table_info->vdd_dep_on_mclk;
5150 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5151 	int i;
5152 
5153 	clocks->num_levels = 0;
5154 	data->mclk_latency_table.count = 0;
5155 	for (i = 0; i < dep_mclk_table->count; i++) {
5156 		if (dep_mclk_table->entries[i].clk) {
5157 			clocks->data[clocks->num_levels].clocks_in_khz =
5158 					dep_mclk_table->entries[i].clk * 10;
5159 			data->mclk_latency_table.entries[data->mclk_latency_table.count].frequency =
5160 					dep_mclk_table->entries[i].clk;
5161 			clocks->data[clocks->num_levels].latency_in_us =
5162 				data->mclk_latency_table.entries[data->mclk_latency_table.count].latency =
5163 					smu7_get_mem_latency(hwmgr, dep_mclk_table->entries[i].clk);
5164 			clocks->num_levels++;
5165 			data->mclk_latency_table.count++;
5166 		}
5167 	}
5168 
5169 	return 0;
5170 }
5171 
5172 static int smu7_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
5173 					       enum amd_pp_clock_type type,
5174 					       struct pp_clock_levels_with_latency *clocks)
5175 {
5176 	if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
5177 	      hwmgr->chip_id <= CHIP_VEGAM))
5178 		return -EINVAL;
5179 
5180 	switch (type) {
5181 	case amd_pp_sys_clock:
5182 		smu7_get_sclks_with_latency(hwmgr, clocks);
5183 		break;
5184 	case amd_pp_mem_clock:
5185 		smu7_get_mclks_with_latency(hwmgr, clocks);
5186 		break;
5187 	default:
5188 		return -EINVAL;
5189 	}
5190 
5191 	return 0;
5192 }
5193 
5194 static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
5195 						 void *clock_range)
5196 {
5197 	struct phm_ppt_v1_information *table_info =
5198 			(struct phm_ppt_v1_information *)hwmgr->pptable;
5199 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
5200 			table_info->vdd_dep_on_mclk;
5201 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
5202 			table_info->vdd_dep_on_sclk;
5203 	struct polaris10_smumgr *smu_data =
5204 			(struct polaris10_smumgr *)(hwmgr->smu_backend);
5205 	SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
5206 	struct dm_pp_wm_sets_with_clock_ranges *watermarks =
5207 			(struct dm_pp_wm_sets_with_clock_ranges *)clock_range;
5208 	uint32_t i, j, k;
5209 	bool valid_entry;
5210 
5211 	if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
5212 	      hwmgr->chip_id <= CHIP_VEGAM))
5213 		return -EINVAL;
5214 
5215 	for (i = 0; i < dep_mclk_table->count; i++) {
5216 		for (j = 0; j < dep_sclk_table->count; j++) {
5217 			valid_entry = false;
5218 			for (k = 0; k < watermarks->num_wm_sets; k++) {
5219 				if (dep_sclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz &&
5220 				    dep_sclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz &&
5221 				    dep_mclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz &&
5222 				    dep_mclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz) {
5223 					valid_entry = true;
5224 					table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id;
5225 					break;
5226 				}
5227 			}
5228 			PP_ASSERT_WITH_CODE(valid_entry,
5229 					"Clock is not in range of specified clock range for watermark from DAL!  Using highest water mark set.",
5230 					table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k - 1].wm_set_id);
5231 		}
5232 	}
5233 
5234 	return smu7_copy_bytes_to_smc(hwmgr,
5235 				      smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, DisplayWatermark),
5236 				      (uint8_t *)table->DisplayWatermark,
5237 				      sizeof(uint8_t) * SMU74_MAX_LEVELS_MEMORY * SMU74_MAX_LEVELS_GRAPHICS,
5238 				      SMC_RAM_END);
5239 }
5240 
5241 static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
5242 					uint32_t virtual_addr_low,
5243 					uint32_t virtual_addr_hi,
5244 					uint32_t mc_addr_low,
5245 					uint32_t mc_addr_hi,
5246 					uint32_t size)
5247 {
5248 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5249 
5250 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5251 					data->soft_regs_start +
5252 					smum_get_offsetof(hwmgr,
5253 					SMU_SoftRegisters, DRAM_LOG_ADDR_H),
5254 					mc_addr_hi);
5255 
5256 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5257 					data->soft_regs_start +
5258 					smum_get_offsetof(hwmgr,
5259 					SMU_SoftRegisters, DRAM_LOG_ADDR_L),
5260 					mc_addr_low);
5261 
5262 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5263 					data->soft_regs_start +
5264 					smum_get_offsetof(hwmgr,
5265 					SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
5266 					virtual_addr_hi);
5267 
5268 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5269 					data->soft_regs_start +
5270 					smum_get_offsetof(hwmgr,
5271 					SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
5272 					virtual_addr_low);
5273 
5274 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5275 					data->soft_regs_start +
5276 					smum_get_offsetof(hwmgr,
5277 					SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
5278 					size);
5279 	return 0;
5280 }
5281 
5282 static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
5283 					struct amd_pp_simple_clock_info *clocks)
5284 {
5285 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5286 	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5287 	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5288 
5289 	if (clocks == NULL)
5290 		return -EINVAL;
5291 
5292 	clocks->memory_max_clock = mclk_table->count > 1 ?
5293 				mclk_table->dpm_levels[mclk_table->count-1].value :
5294 				mclk_table->dpm_levels[0].value;
5295 	clocks->engine_max_clock = sclk_table->count > 1 ?
5296 				sclk_table->dpm_levels[sclk_table->count-1].value :
5297 				sclk_table->dpm_levels[0].value;
5298 	return 0;
5299 }
5300 
5301 static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
5302 		struct PP_TemperatureRange *thermal_data)
5303 {
5304 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5305 	struct phm_ppt_v1_information *table_info =
5306 			(struct phm_ppt_v1_information *)hwmgr->pptable;
5307 
5308 	memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
5309 
5310 	if (hwmgr->pp_table_version == PP_TABLE_V1)
5311 		thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
5312 			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
5313 	else if (hwmgr->pp_table_version == PP_TABLE_V0)
5314 		thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
5315 			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
5316 
5317 	return 0;
5318 }
5319 
5320 static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
5321 					enum PP_OD_DPM_TABLE_COMMAND type,
5322 					uint32_t clk,
5323 					uint32_t voltage)
5324 {
5325 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5326 
5327 	if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
5328 		pr_info("OD voltage is out of range [%d - %d] mV\n",
5329 						data->odn_dpm_table.min_vddc,
5330 						data->odn_dpm_table.max_vddc);
5331 		return false;
5332 	}
5333 
5334 	if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
5335 		if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
5336 			hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
5337 			pr_info("OD engine clock is out of range [%d - %d] MHz\n",
5338 				data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
5339 				hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
5340 			return false;
5341 		}
5342 	} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
5343 		if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
5344 			hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
5345 			pr_info("OD memory clock is out of range [%d - %d] MHz\n",
5346 				data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
5347 				hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
5348 			return false;
5349 		}
5350 	} else {
5351 		return false;
5352 	}
5353 
5354 	return true;
5355 }
5356 
5357 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
5358 					enum PP_OD_DPM_TABLE_COMMAND type,
5359 					long *input, uint32_t size)
5360 {
5361 	uint32_t i;
5362 	struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
5363 	struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
5364 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5365 
5366 	uint32_t input_clk;
5367 	uint32_t input_vol;
5368 	uint32_t input_level;
5369 
5370 	PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
5371 				return -EINVAL);
5372 
5373 	if (!hwmgr->od_enabled) {
5374 		pr_info("OverDrive feature not enabled\n");
5375 		return -EINVAL;
5376 	}
5377 
5378 	if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
5379 		podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
5380 		podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
5381 		PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
5382 				"Failed to get ODN SCLK and Voltage tables",
5383 				return -EINVAL);
5384 	} else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
5385 		podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
5386 		podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
5387 
5388 		PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
5389 			"Failed to get ODN MCLK and Voltage tables",
5390 			return -EINVAL);
5391 	} else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
5392 		smu7_odn_initial_default_setting(hwmgr);
5393 		return 0;
5394 	} else if (PP_OD_COMMIT_DPM_TABLE == type) {
5395 		smu7_check_dpm_table_updated(hwmgr);
5396 		return 0;
5397 	} else {
5398 		return -EINVAL;
5399 	}
5400 
5401 	for (i = 0; i < size; i += 3) {
5402 		if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
5403 			pr_info("invalid clock voltage input \n");
5404 			return 0;
5405 		}
5406 		input_level = input[i];
5407 		input_clk = input[i+1] * 100;
5408 		input_vol = input[i+2];
5409 
5410 		if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
5411 			podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
5412 			podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
5413 			podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
5414 			podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
5415 			podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol;
5416 		} else {
5417 			return -EINVAL;
5418 		}
5419 	}
5420 
5421 	return 0;
5422 }
5423 
5424 static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
5425 {
5426 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5427 	uint32_t i, size = 0;
5428 	uint32_t len;
5429 
5430 	static const char *profile_name[7] = {"BOOTUP_DEFAULT",
5431 					"3D_FULL_SCREEN",
5432 					"POWER_SAVING",
5433 					"VIDEO",
5434 					"VR",
5435 					"COMPUTE",
5436 					"CUSTOM"};
5437 
5438 	static const char *title[8] = {"NUM",
5439 			"MODE_NAME",
5440 			"SCLK_UP_HYST",
5441 			"SCLK_DOWN_HYST",
5442 			"SCLK_ACTIVE_LEVEL",
5443 			"MCLK_UP_HYST",
5444 			"MCLK_DOWN_HYST",
5445 			"MCLK_ACTIVE_LEVEL"};
5446 
5447 	if (!buf)
5448 		return -EINVAL;
5449 
5450 	size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
5451 			title[0], title[1], title[2], title[3],
5452 			title[4], title[5], title[6], title[7]);
5453 
5454 	len = ARRAY_SIZE(smu7_profiling);
5455 
5456 	for (i = 0; i < len; i++) {
5457 		if (i == hwmgr->power_profile_mode) {
5458 			size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
5459 			i, profile_name[i], "*",
5460 			data->current_profile_setting.sclk_up_hyst,
5461 			data->current_profile_setting.sclk_down_hyst,
5462 			data->current_profile_setting.sclk_activity,
5463 			data->current_profile_setting.mclk_up_hyst,
5464 			data->current_profile_setting.mclk_down_hyst,
5465 			data->current_profile_setting.mclk_activity);
5466 			continue;
5467 		}
5468 		if (smu7_profiling[i].bupdate_sclk)
5469 			size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
5470 			i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
5471 			smu7_profiling[i].sclk_down_hyst,
5472 			smu7_profiling[i].sclk_activity);
5473 		else
5474 			size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
5475 			i, profile_name[i], "-", "-", "-");
5476 
5477 		if (smu7_profiling[i].bupdate_mclk)
5478 			size += sprintf(buf + size, "%16d %16d %16d\n",
5479 			smu7_profiling[i].mclk_up_hyst,
5480 			smu7_profiling[i].mclk_down_hyst,
5481 			smu7_profiling[i].mclk_activity);
5482 		else
5483 			size += sprintf(buf + size, "%16s %16s %16s\n",
5484 			"-", "-", "-");
5485 	}
5486 
5487 	return size;
5488 }
5489 
5490 static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
5491 					enum PP_SMC_POWER_PROFILE requst)
5492 {
5493 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5494 	uint32_t tmp, level;
5495 
5496 	if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
5497 		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
5498 			level = 0;
5499 			tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
5500 			while (tmp >>= 1)
5501 				level++;
5502 			if (level > 0)
5503 				smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
5504 		}
5505 	} else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
5506 		smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
5507 	}
5508 }
5509 
5510 static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
5511 {
5512 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5513 	struct profile_mode_setting tmp;
5514 	enum PP_SMC_POWER_PROFILE mode;
5515 
5516 	if (input == NULL)
5517 		return -EINVAL;
5518 
5519 	mode = input[size];
5520 	switch (mode) {
5521 	case PP_SMC_POWER_PROFILE_CUSTOM:
5522 		if (size < 8 && size != 0)
5523 			return -EINVAL;
5524 		/* If only CUSTOM is passed in, use the saved values. Check
5525 		 * that we actually have a CUSTOM profile by ensuring that
5526 		 * the "use sclk" or the "use mclk" bits are set
5527 		 */
5528 		tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
5529 		if (size == 0) {
5530 			if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
5531 				return -EINVAL;
5532 		} else {
5533 			tmp.bupdate_sclk = input[0];
5534 			tmp.sclk_up_hyst = input[1];
5535 			tmp.sclk_down_hyst = input[2];
5536 			tmp.sclk_activity = input[3];
5537 			tmp.bupdate_mclk = input[4];
5538 			tmp.mclk_up_hyst = input[5];
5539 			tmp.mclk_down_hyst = input[6];
5540 			tmp.mclk_activity = input[7];
5541 			smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
5542 		}
5543 		if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5544 			memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
5545 			hwmgr->power_profile_mode = mode;
5546 		}
5547 		break;
5548 	case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
5549 	case PP_SMC_POWER_PROFILE_POWERSAVING:
5550 	case PP_SMC_POWER_PROFILE_VIDEO:
5551 	case PP_SMC_POWER_PROFILE_VR:
5552 	case PP_SMC_POWER_PROFILE_COMPUTE:
5553 		if (mode == hwmgr->power_profile_mode)
5554 			return 0;
5555 
5556 		memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
5557 		if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5558 			if (tmp.bupdate_sclk) {
5559 				data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
5560 				data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
5561 				data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
5562 				data->current_profile_setting.sclk_activity = tmp.sclk_activity;
5563 			}
5564 			if (tmp.bupdate_mclk) {
5565 				data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
5566 				data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
5567 				data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
5568 				data->current_profile_setting.mclk_activity = tmp.mclk_activity;
5569 			}
5570 			smu7_patch_compute_profile_mode(hwmgr, mode);
5571 			hwmgr->power_profile_mode = mode;
5572 		}
5573 		break;
5574 	default:
5575 		return -EINVAL;
5576 	}
5577 
5578 	return 0;
5579 }
5580 
5581 static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
5582 				PHM_PerformanceLevelDesignation designation, uint32_t index,
5583 				PHM_PerformanceLevel *level)
5584 {
5585 	const struct smu7_power_state *ps;
5586 	uint32_t i;
5587 
5588 	if (level == NULL || hwmgr == NULL || state == NULL)
5589 		return -EINVAL;
5590 
5591 	ps = cast_const_phw_smu7_power_state(state);
5592 
5593 	i = index > ps->performance_level_count - 1 ?
5594 			ps->performance_level_count - 1 : index;
5595 
5596 	level->coreClock = ps->performance_levels[i].engine_clock;
5597 	level->memory_clock = ps->performance_levels[i].memory_clock;
5598 
5599 	return 0;
5600 }
5601 
5602 static int smu7_power_off_asic(struct pp_hwmgr *hwmgr)
5603 {
5604 	int result;
5605 
5606 	result = smu7_disable_dpm_tasks(hwmgr);
5607 	PP_ASSERT_WITH_CODE((0 == result),
5608 			"[disable_dpm_tasks] Failed to disable DPM!",
5609 			);
5610 
5611 	return result;
5612 }
5613 
5614 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
5615 	.backend_init = &smu7_hwmgr_backend_init,
5616 	.backend_fini = &smu7_hwmgr_backend_fini,
5617 	.asic_setup = &smu7_setup_asic_task,
5618 	.dynamic_state_management_enable = &smu7_enable_dpm_tasks,
5619 	.apply_state_adjust_rules = smu7_apply_state_adjust_rules,
5620 	.force_dpm_level = &smu7_force_dpm_level,
5621 	.power_state_set = smu7_set_power_state_tasks,
5622 	.get_power_state_size = smu7_get_power_state_size,
5623 	.get_mclk = smu7_dpm_get_mclk,
5624 	.get_sclk = smu7_dpm_get_sclk,
5625 	.patch_boot_state = smu7_dpm_patch_boot_state,
5626 	.get_pp_table_entry = smu7_get_pp_table_entry,
5627 	.get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
5628 	.powerdown_uvd = smu7_powerdown_uvd,
5629 	.powergate_uvd = smu7_powergate_uvd,
5630 	.powergate_vce = smu7_powergate_vce,
5631 	.disable_clock_power_gating = smu7_disable_clock_power_gating,
5632 	.update_clock_gatings = smu7_update_clock_gatings,
5633 	.notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
5634 	.display_config_changed = smu7_display_configuration_changed_task,
5635 	.set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
5636 	.set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
5637 	.stop_thermal_controller = smu7_thermal_stop_thermal_controller,
5638 	.get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
5639 	.get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
5640 	.set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
5641 	.reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
5642 	.get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
5643 	.set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
5644 	.uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
5645 	.register_irq_handlers = smu7_register_irq_handlers,
5646 	.check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
5647 	.check_states_equal = smu7_check_states_equal,
5648 	.set_fan_control_mode = smu7_set_fan_control_mode,
5649 	.get_fan_control_mode = smu7_get_fan_control_mode,
5650 	.force_clock_level = smu7_force_clock_level,
5651 	.print_clock_levels = smu7_print_clock_levels,
5652 	.powergate_gfx = smu7_powergate_gfx,
5653 	.get_sclk_od = smu7_get_sclk_od,
5654 	.set_sclk_od = smu7_set_sclk_od,
5655 	.get_mclk_od = smu7_get_mclk_od,
5656 	.set_mclk_od = smu7_set_mclk_od,
5657 	.get_clock_by_type = smu7_get_clock_by_type,
5658 	.get_clock_by_type_with_latency = smu7_get_clock_by_type_with_latency,
5659 	.set_watermarks_for_clocks_ranges = smu7_set_watermarks_for_clocks_ranges,
5660 	.read_sensor = smu7_read_sensor,
5661 	.dynamic_state_management_disable = smu7_disable_dpm_tasks,
5662 	.avfs_control = smu7_avfs_control,
5663 	.disable_smc_firmware_ctf = smu7_thermal_disable_alert,
5664 	.start_thermal_controller = smu7_start_thermal_controller,
5665 	.notify_cac_buffer_info = smu7_notify_cac_buffer_info,
5666 	.get_max_high_clocks = smu7_get_max_high_clocks,
5667 	.get_thermal_temperature_range = smu7_get_thermal_temperature_range,
5668 	.odn_edit_dpm_table = smu7_odn_edit_dpm_table,
5669 	.set_power_limit = smu7_set_power_limit,
5670 	.get_power_profile_mode = smu7_get_power_profile_mode,
5671 	.set_power_profile_mode = smu7_set_power_profile_mode,
5672 	.get_performance_level = smu7_get_performance_level,
5673 	.get_asic_baco_capability = smu7_baco_get_capability,
5674 	.get_asic_baco_state = smu7_baco_get_state,
5675 	.set_asic_baco_state = smu7_baco_set_state,
5676 	.power_off_asic = smu7_power_off_asic,
5677 };
5678 
5679 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
5680 		uint32_t clock_insr)
5681 {
5682 	uint8_t i;
5683 	uint32_t temp;
5684 	uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
5685 
5686 	PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
5687 	for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
5688 		temp = clock >> i;
5689 
5690 		if (temp >= min || i == 0)
5691 			break;
5692 	}
5693 	return i;
5694 }
5695 
5696 int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
5697 {
5698 	hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
5699 	if (hwmgr->pp_table_version == PP_TABLE_V0)
5700 		hwmgr->pptable_func = &pptable_funcs;
5701 	else if (hwmgr->pp_table_version == PP_TABLE_V1)
5702 		hwmgr->pptable_func = &pptable_v1_0_funcs;
5703 
5704 	return 0;
5705 }
5706