1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 
28 #include "hwmgr.h"
29 #include "amd_powerplay.h"
30 #include "vega20_smumgr.h"
31 #include "hardwaremanager.h"
32 #include "ppatomfwctrl.h"
33 #include "atomfirmware.h"
34 #include "cgs_common.h"
35 #include "vega20_powertune.h"
36 #include "vega20_inc.h"
37 #include "pppcielanes.h"
38 #include "vega20_hwmgr.h"
39 #include "vega20_processpptables.h"
40 #include "vega20_pptable.h"
41 #include "vega20_thermal.h"
42 #include "vega20_ppsmc.h"
43 #include "pp_debug.h"
44 #include "amd_pcie_helpers.h"
45 #include "ppinterrupt.h"
46 #include "pp_overdriver.h"
47 #include "pp_thermal.h"
48 #include "soc15_common.h"
49 #include "vega20_baco.h"
50 #include "smuio/smuio_9_0_offset.h"
51 #include "smuio/smuio_9_0_sh_mask.h"
52 #include "nbio/nbio_7_4_sh_mask.h"
53 
54 #define smnPCIE_LC_SPEED_CNTL			0x11140290
55 #define smnPCIE_LC_LINK_WIDTH_CNTL		0x11140288
56 
57 #define LINK_WIDTH_MAX				6
58 #define LINK_SPEED_MAX				3
59 static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
60 static const int link_speed[] = {25, 50, 80, 160};
61 
62 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
63 {
64 	struct vega20_hwmgr *data =
65 			(struct vega20_hwmgr *)(hwmgr->backend);
66 
67 	data->gfxclk_average_alpha = PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT;
68 	data->socclk_average_alpha = PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT;
69 	data->uclk_average_alpha = PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT;
70 	data->gfx_activity_average_alpha = PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT;
71 	data->lowest_uclk_reserved_for_ulv = PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT;
72 
73 	data->display_voltage_mode = PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT;
74 	data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
75 	data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
76 	data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
77 	data->disp_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
78 	data->disp_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
79 	data->disp_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
80 	data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
81 	data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
82 	data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
83 	data->phy_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
84 	data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
85 	data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
86 
87 	/*
88 	 * Disable the following features for now:
89 	 *   GFXCLK DS
90 	 *   SOCLK DS
91 	 *   LCLK DS
92 	 *   DCEFCLK DS
93 	 *   FCLK DS
94 	 *   MP1CLK DS
95 	 *   MP0CLK DS
96 	 */
97 	data->registry_data.disallowed_features = 0xE0041C00;
98 	/* ECC feature should be disabled on old SMUs */
99 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
100 	if (hwmgr->smu_version < 0x282100)
101 		data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
102 
103 	if (!(hwmgr->feature_mask & PP_PCIE_DPM_MASK))
104 		data->registry_data.disallowed_features |= FEATURE_DPM_LINK_MASK;
105 
106 	if (!(hwmgr->feature_mask & PP_SCLK_DPM_MASK))
107 		data->registry_data.disallowed_features |= FEATURE_DPM_GFXCLK_MASK;
108 
109 	if (!(hwmgr->feature_mask & PP_SOCCLK_DPM_MASK))
110 		data->registry_data.disallowed_features |= FEATURE_DPM_SOCCLK_MASK;
111 
112 	if (!(hwmgr->feature_mask & PP_MCLK_DPM_MASK))
113 		data->registry_data.disallowed_features |= FEATURE_DPM_UCLK_MASK;
114 
115 	if (!(hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK))
116 		data->registry_data.disallowed_features |= FEATURE_DPM_DCEFCLK_MASK;
117 
118 	if (!(hwmgr->feature_mask & PP_ULV_MASK))
119 		data->registry_data.disallowed_features |= FEATURE_ULV_MASK;
120 
121 	if (!(hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK))
122 		data->registry_data.disallowed_features |= FEATURE_DS_GFXCLK_MASK;
123 
124 	data->registry_data.od_state_in_dc_support = 0;
125 	data->registry_data.thermal_support = 1;
126 	data->registry_data.skip_baco_hardware = 0;
127 
128 	data->registry_data.log_avfs_param = 0;
129 	data->registry_data.sclk_throttle_low_notification = 1;
130 	data->registry_data.force_dpm_high = 0;
131 	data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
132 
133 	data->registry_data.didt_support = 0;
134 	if (data->registry_data.didt_support) {
135 		data->registry_data.didt_mode = 6;
136 		data->registry_data.sq_ramping_support = 1;
137 		data->registry_data.db_ramping_support = 0;
138 		data->registry_data.td_ramping_support = 0;
139 		data->registry_data.tcp_ramping_support = 0;
140 		data->registry_data.dbr_ramping_support = 0;
141 		data->registry_data.edc_didt_support = 1;
142 		data->registry_data.gc_didt_support = 0;
143 		data->registry_data.psm_didt_support = 0;
144 	}
145 
146 	data->registry_data.pcie_lane_override = 0xff;
147 	data->registry_data.pcie_speed_override = 0xff;
148 	data->registry_data.pcie_clock_override = 0xffffffff;
149 	data->registry_data.regulator_hot_gpio_support = 1;
150 	data->registry_data.ac_dc_switch_gpio_support = 0;
151 	data->registry_data.quick_transition_support = 0;
152 	data->registry_data.zrpm_start_temp = 0xffff;
153 	data->registry_data.zrpm_stop_temp = 0xffff;
154 	data->registry_data.od8_feature_enable = 1;
155 	data->registry_data.disable_water_mark = 0;
156 	data->registry_data.disable_pp_tuning = 0;
157 	data->registry_data.disable_xlpp_tuning = 0;
158 	data->registry_data.disable_workload_policy = 0;
159 	data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
160 	data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
161 	data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
162 	data->registry_data.force_workload_policy_mask = 0;
163 	data->registry_data.disable_3d_fs_detection = 0;
164 	data->registry_data.fps_support = 1;
165 	data->registry_data.disable_auto_wattman = 1;
166 	data->registry_data.auto_wattman_debug = 0;
167 	data->registry_data.auto_wattman_sample_period = 100;
168 	data->registry_data.fclk_gfxclk_ratio = 0;
169 	data->registry_data.auto_wattman_threshold = 50;
170 	data->registry_data.gfxoff_controlled_by_driver = 1;
171 	data->gfxoff_allowed = false;
172 	data->counter_gfxoff = 0;
173 	data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
174 }
175 
176 static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
177 {
178 	struct vega20_hwmgr *data =
179 			(struct vega20_hwmgr *)(hwmgr->backend);
180 	struct amdgpu_device *adev = hwmgr->adev;
181 
182 	if (data->vddci_control == VEGA20_VOLTAGE_CONTROL_NONE)
183 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
184 				PHM_PlatformCaps_ControlVDDCI);
185 
186 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
187 			PHM_PlatformCaps_TablelessHardwareInterface);
188 
189 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
190 			PHM_PlatformCaps_BACO);
191 
192 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
193 			PHM_PlatformCaps_EnableSMU7ThermalManagement);
194 
195 	if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
196 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
197 				PHM_PlatformCaps_UVDPowerGating);
198 
199 	if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
200 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
201 				PHM_PlatformCaps_VCEPowerGating);
202 
203 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204 			PHM_PlatformCaps_UnTabledHardwareInterface);
205 
206 	if (data->registry_data.od8_feature_enable)
207 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
208 				PHM_PlatformCaps_OD8inACSupport);
209 
210 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
211 			PHM_PlatformCaps_ActivityReporting);
212 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
213 			PHM_PlatformCaps_FanSpeedInTableIsRPM);
214 
215 	if (data->registry_data.od_state_in_dc_support) {
216 		if (data->registry_data.od8_feature_enable)
217 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
218 					PHM_PlatformCaps_OD8inDCSupport);
219 	}
220 
221 	if (data->registry_data.thermal_support &&
222 	    data->registry_data.fuzzy_fan_control_support &&
223 	    hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
224 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
225 				PHM_PlatformCaps_ODFuzzyFanControlSupport);
226 
227 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
228 			PHM_PlatformCaps_DynamicPowerManagement);
229 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
230 			PHM_PlatformCaps_SMC);
231 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
232 			PHM_PlatformCaps_ThermalPolicyDelay);
233 
234 	if (data->registry_data.force_dpm_high)
235 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
236 				PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
237 
238 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
239 			PHM_PlatformCaps_DynamicUVDState);
240 
241 	if (data->registry_data.sclk_throttle_low_notification)
242 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
243 				PHM_PlatformCaps_SclkThrottleLowNotification);
244 
245 	/* power tune caps */
246 	/* assume disabled */
247 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
248 			PHM_PlatformCaps_PowerContainment);
249 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
250 			PHM_PlatformCaps_DiDtSupport);
251 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
252 			PHM_PlatformCaps_SQRamping);
253 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
254 			PHM_PlatformCaps_DBRamping);
255 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
256 			PHM_PlatformCaps_TDRamping);
257 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
258 			PHM_PlatformCaps_TCPRamping);
259 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
260 			PHM_PlatformCaps_DBRRamping);
261 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
262 			PHM_PlatformCaps_DiDtEDCEnable);
263 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
264 			PHM_PlatformCaps_GCEDC);
265 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
266 			PHM_PlatformCaps_PSM);
267 
268 	if (data->registry_data.didt_support) {
269 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
270 				PHM_PlatformCaps_DiDtSupport);
271 		if (data->registry_data.sq_ramping_support)
272 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
273 					PHM_PlatformCaps_SQRamping);
274 		if (data->registry_data.db_ramping_support)
275 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
276 					PHM_PlatformCaps_DBRamping);
277 		if (data->registry_data.td_ramping_support)
278 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
279 					PHM_PlatformCaps_TDRamping);
280 		if (data->registry_data.tcp_ramping_support)
281 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
282 					PHM_PlatformCaps_TCPRamping);
283 		if (data->registry_data.dbr_ramping_support)
284 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
285 					PHM_PlatformCaps_DBRRamping);
286 		if (data->registry_data.edc_didt_support)
287 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
288 					PHM_PlatformCaps_DiDtEDCEnable);
289 		if (data->registry_data.gc_didt_support)
290 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
291 					PHM_PlatformCaps_GCEDC);
292 		if (data->registry_data.psm_didt_support)
293 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
294 					PHM_PlatformCaps_PSM);
295 	}
296 
297 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
298 			PHM_PlatformCaps_RegulatorHot);
299 
300 	if (data->registry_data.ac_dc_switch_gpio_support) {
301 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
302 				PHM_PlatformCaps_AutomaticDCTransition);
303 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
304 				PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
305 	}
306 
307 	if (data->registry_data.quick_transition_support) {
308 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
309 				PHM_PlatformCaps_AutomaticDCTransition);
310 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
311 				PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
312 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
313 				PHM_PlatformCaps_Falcon_QuickTransition);
314 	}
315 
316 	if (data->lowest_uclk_reserved_for_ulv != PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT) {
317 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
318 				PHM_PlatformCaps_LowestUclkReservedForUlv);
319 		if (data->lowest_uclk_reserved_for_ulv == 1)
320 			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
321 					PHM_PlatformCaps_LowestUclkReservedForUlv);
322 	}
323 
324 	if (data->registry_data.custom_fan_support)
325 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
326 				PHM_PlatformCaps_CustomFanControlSupport);
327 
328 	return 0;
329 }
330 
331 static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
332 {
333 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
334 	struct amdgpu_device *adev = hwmgr->adev;
335 	uint32_t top32, bottom32;
336 	int i;
337 
338 	data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
339 			FEATURE_DPM_PREFETCHER_BIT;
340 	data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
341 			FEATURE_DPM_GFXCLK_BIT;
342 	data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
343 			FEATURE_DPM_UCLK_BIT;
344 	data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
345 			FEATURE_DPM_SOCCLK_BIT;
346 	data->smu_features[GNLD_DPM_UVD].smu_feature_id =
347 			FEATURE_DPM_UVD_BIT;
348 	data->smu_features[GNLD_DPM_VCE].smu_feature_id =
349 			FEATURE_DPM_VCE_BIT;
350 	data->smu_features[GNLD_ULV].smu_feature_id =
351 			FEATURE_ULV_BIT;
352 	data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
353 			FEATURE_DPM_MP0CLK_BIT;
354 	data->smu_features[GNLD_DPM_LINK].smu_feature_id =
355 			FEATURE_DPM_LINK_BIT;
356 	data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
357 			FEATURE_DPM_DCEFCLK_BIT;
358 	data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
359 			FEATURE_DS_GFXCLK_BIT;
360 	data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
361 			FEATURE_DS_SOCCLK_BIT;
362 	data->smu_features[GNLD_DS_LCLK].smu_feature_id =
363 			FEATURE_DS_LCLK_BIT;
364 	data->smu_features[GNLD_PPT].smu_feature_id =
365 			FEATURE_PPT_BIT;
366 	data->smu_features[GNLD_TDC].smu_feature_id =
367 			FEATURE_TDC_BIT;
368 	data->smu_features[GNLD_THERMAL].smu_feature_id =
369 			FEATURE_THERMAL_BIT;
370 	data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
371 			FEATURE_GFX_PER_CU_CG_BIT;
372 	data->smu_features[GNLD_RM].smu_feature_id =
373 			FEATURE_RM_BIT;
374 	data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
375 			FEATURE_DS_DCEFCLK_BIT;
376 	data->smu_features[GNLD_ACDC].smu_feature_id =
377 			FEATURE_ACDC_BIT;
378 	data->smu_features[GNLD_VR0HOT].smu_feature_id =
379 			FEATURE_VR0HOT_BIT;
380 	data->smu_features[GNLD_VR1HOT].smu_feature_id =
381 			FEATURE_VR1HOT_BIT;
382 	data->smu_features[GNLD_FW_CTF].smu_feature_id =
383 			FEATURE_FW_CTF_BIT;
384 	data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
385 			FEATURE_LED_DISPLAY_BIT;
386 	data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
387 			FEATURE_FAN_CONTROL_BIT;
388 	data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
389 	data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
390 	data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
391 	data->smu_features[GNLD_DPM_FCLK].smu_feature_id = FEATURE_DPM_FCLK_BIT;
392 	data->smu_features[GNLD_DS_FCLK].smu_feature_id = FEATURE_DS_FCLK_BIT;
393 	data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
394 	data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
395 	data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
396 	data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
397 
398 	for (i = 0; i < GNLD_FEATURES_MAX; i++) {
399 		data->smu_features[i].smu_feature_bitmap =
400 			(uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
401 		data->smu_features[i].allowed =
402 			((data->registry_data.disallowed_features >> i) & 1) ?
403 			false : true;
404 	}
405 
406 	/* Get the SN to turn into a Unique ID */
407 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
408 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
409 
410 	adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
411 }
412 
413 static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
414 {
415 	return 0;
416 }
417 
418 static int vega20_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
419 {
420 	kfree(hwmgr->backend);
421 	hwmgr->backend = NULL;
422 
423 	return 0;
424 }
425 
426 static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
427 {
428 	struct vega20_hwmgr *data;
429 	struct amdgpu_device *adev = hwmgr->adev;
430 
431 	data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL);
432 	if (data == NULL)
433 		return -ENOMEM;
434 
435 	hwmgr->backend = data;
436 
437 	hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
438 	hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
439 	hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
440 
441 	vega20_set_default_registry_data(hwmgr);
442 
443 	data->disable_dpm_mask = 0xff;
444 
445 	/* need to set voltage control types before EVV patching */
446 	data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE;
447 	data->mvdd_control = VEGA20_VOLTAGE_CONTROL_NONE;
448 	data->vddci_control = VEGA20_VOLTAGE_CONTROL_NONE;
449 
450 	data->water_marks_bitmap = 0;
451 	data->avfs_exist = false;
452 
453 	vega20_set_features_platform_caps(hwmgr);
454 
455 	vega20_init_dpm_defaults(hwmgr);
456 
457 	/* Parse pptable data read from VBIOS */
458 	vega20_set_private_data_based_on_pptable(hwmgr);
459 
460 	data->is_tlu_enabled = false;
461 
462 	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
463 			VEGA20_MAX_HARDWARE_POWERLEVELS;
464 	hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
465 	hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
466 
467 	hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
468 	/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
469 	hwmgr->platform_descriptor.clockStep.engineClock = 500;
470 	hwmgr->platform_descriptor.clockStep.memoryClock = 500;
471 
472 	data->total_active_cus = adev->gfx.cu_info.number;
473 	data->is_custom_profile_set = false;
474 
475 	return 0;
476 }
477 
478 static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr)
479 {
480 	struct vega20_hwmgr *data =
481 			(struct vega20_hwmgr *)(hwmgr->backend);
482 
483 	data->low_sclk_interrupt_threshold = 0;
484 
485 	return 0;
486 }
487 
488 static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
489 {
490 	struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
491 	int ret = 0;
492 	bool use_baco = (amdgpu_in_reset(adev) &&
493 			 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
494 		(adev->in_runpm && amdgpu_asic_supports_baco(adev));
495 
496 	ret = vega20_init_sclk_threshold(hwmgr);
497 	PP_ASSERT_WITH_CODE(!ret,
498 			"Failed to init sclk threshold!",
499 			return ret);
500 
501 	if (use_baco) {
502 		ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
503 		if (ret)
504 			pr_err("Failed to apply vega20 baco workaround!\n");
505 	}
506 
507 	return ret;
508 }
509 
510 /*
511  * @fn vega20_init_dpm_state
512  * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
513  *
514  * @param    dpm_state - the address of the DPM Table to initiailize.
515  * @return   None.
516  */
517 static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state)
518 {
519 	dpm_state->soft_min_level = 0x0;
520 	dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT;
521 	dpm_state->hard_min_level = 0x0;
522 	dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT;
523 }
524 
525 static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
526 		PPCLK_e clk_id, uint32_t *num_of_levels)
527 {
528 	int ret = 0;
529 
530 	ret = smum_send_msg_to_smc_with_parameter(hwmgr,
531 			PPSMC_MSG_GetDpmFreqByIndex,
532 			(clk_id << 16 | 0xFF),
533 			num_of_levels);
534 	PP_ASSERT_WITH_CODE(!ret,
535 			"[GetNumOfDpmLevel] failed to get dpm levels!",
536 			return ret);
537 
538 	return ret;
539 }
540 
541 static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
542 		PPCLK_e clk_id, uint32_t index, uint32_t *clk)
543 {
544 	int ret = 0;
545 
546 	ret = smum_send_msg_to_smc_with_parameter(hwmgr,
547 			PPSMC_MSG_GetDpmFreqByIndex,
548 			(clk_id << 16 | index),
549 			clk);
550 	PP_ASSERT_WITH_CODE(!ret,
551 			"[GetDpmFreqByIndex] failed to get dpm freq by index!",
552 			return ret);
553 
554 	return ret;
555 }
556 
557 static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
558 		struct vega20_single_dpm_table *dpm_table, PPCLK_e clk_id)
559 {
560 	int ret = 0;
561 	uint32_t i, num_of_levels, clk;
562 
563 	ret = vega20_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
564 	PP_ASSERT_WITH_CODE(!ret,
565 			"[SetupSingleDpmTable] failed to get clk levels!",
566 			return ret);
567 
568 	dpm_table->count = num_of_levels;
569 
570 	for (i = 0; i < num_of_levels; i++) {
571 		ret = vega20_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
572 		PP_ASSERT_WITH_CODE(!ret,
573 			"[SetupSingleDpmTable] failed to get clk of specific level!",
574 			return ret);
575 		dpm_table->dpm_levels[i].value = clk;
576 		dpm_table->dpm_levels[i].enabled = true;
577 	}
578 
579 	return ret;
580 }
581 
582 static int vega20_setup_gfxclk_dpm_table(struct pp_hwmgr *hwmgr)
583 {
584 	struct vega20_hwmgr *data =
585 			(struct vega20_hwmgr *)(hwmgr->backend);
586 	struct vega20_single_dpm_table *dpm_table;
587 	int ret = 0;
588 
589 	dpm_table = &(data->dpm_table.gfx_table);
590 	if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
591 		ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
592 		PP_ASSERT_WITH_CODE(!ret,
593 				"[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
594 				return ret);
595 	} else {
596 		dpm_table->count = 1;
597 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
598 	}
599 
600 	return ret;
601 }
602 
603 static int vega20_setup_memclk_dpm_table(struct pp_hwmgr *hwmgr)
604 {
605 	struct vega20_hwmgr *data =
606 			(struct vega20_hwmgr *)(hwmgr->backend);
607 	struct vega20_single_dpm_table *dpm_table;
608 	int ret = 0;
609 
610 	dpm_table = &(data->dpm_table.mem_table);
611 	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
612 		ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
613 		PP_ASSERT_WITH_CODE(!ret,
614 				"[SetupDefaultDpmTable] failed to get memclk dpm levels!",
615 				return ret);
616 	} else {
617 		dpm_table->count = 1;
618 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
619 	}
620 
621 	return ret;
622 }
623 
624 /*
625  * This function is to initialize all DPM state tables
626  * for SMU based on the dependency table.
627  * Dynamic state patching function will then trim these
628  * state tables to the allowed range based
629  * on the power policy or external client requests,
630  * such as UVD request, etc.
631  */
632 static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
633 {
634 	struct vega20_hwmgr *data =
635 			(struct vega20_hwmgr *)(hwmgr->backend);
636 	struct vega20_single_dpm_table *dpm_table;
637 	int ret = 0;
638 
639 	memset(&data->dpm_table, 0, sizeof(data->dpm_table));
640 
641 	/* socclk */
642 	dpm_table = &(data->dpm_table.soc_table);
643 	if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
644 		ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
645 		PP_ASSERT_WITH_CODE(!ret,
646 				"[SetupDefaultDpmTable] failed to get socclk dpm levels!",
647 				return ret);
648 	} else {
649 		dpm_table->count = 1;
650 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
651 	}
652 	vega20_init_dpm_state(&(dpm_table->dpm_state));
653 
654 	/* gfxclk */
655 	dpm_table = &(data->dpm_table.gfx_table);
656 	ret = vega20_setup_gfxclk_dpm_table(hwmgr);
657 	if (ret)
658 		return ret;
659 	vega20_init_dpm_state(&(dpm_table->dpm_state));
660 
661 	/* memclk */
662 	dpm_table = &(data->dpm_table.mem_table);
663 	ret = vega20_setup_memclk_dpm_table(hwmgr);
664 	if (ret)
665 		return ret;
666 	vega20_init_dpm_state(&(dpm_table->dpm_state));
667 
668 	/* eclk */
669 	dpm_table = &(data->dpm_table.eclk_table);
670 	if (data->smu_features[GNLD_DPM_VCE].enabled) {
671 		ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
672 		PP_ASSERT_WITH_CODE(!ret,
673 				"[SetupDefaultDpmTable] failed to get eclk dpm levels!",
674 				return ret);
675 	} else {
676 		dpm_table->count = 1;
677 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
678 	}
679 	vega20_init_dpm_state(&(dpm_table->dpm_state));
680 
681 	/* vclk */
682 	dpm_table = &(data->dpm_table.vclk_table);
683 	if (data->smu_features[GNLD_DPM_UVD].enabled) {
684 		ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
685 		PP_ASSERT_WITH_CODE(!ret,
686 				"[SetupDefaultDpmTable] failed to get vclk dpm levels!",
687 				return ret);
688 	} else {
689 		dpm_table->count = 1;
690 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
691 	}
692 	vega20_init_dpm_state(&(dpm_table->dpm_state));
693 
694 	/* dclk */
695 	dpm_table = &(data->dpm_table.dclk_table);
696 	if (data->smu_features[GNLD_DPM_UVD].enabled) {
697 		ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
698 		PP_ASSERT_WITH_CODE(!ret,
699 				"[SetupDefaultDpmTable] failed to get dclk dpm levels!",
700 				return ret);
701 	} else {
702 		dpm_table->count = 1;
703 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
704 	}
705 	vega20_init_dpm_state(&(dpm_table->dpm_state));
706 
707 	/* dcefclk */
708 	dpm_table = &(data->dpm_table.dcef_table);
709 	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
710 		ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
711 		PP_ASSERT_WITH_CODE(!ret,
712 				"[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
713 				return ret);
714 	} else {
715 		dpm_table->count = 1;
716 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
717 	}
718 	vega20_init_dpm_state(&(dpm_table->dpm_state));
719 
720 	/* pixclk */
721 	dpm_table = &(data->dpm_table.pixel_table);
722 	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
723 		ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
724 		PP_ASSERT_WITH_CODE(!ret,
725 				"[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
726 				return ret);
727 	} else
728 		dpm_table->count = 0;
729 	vega20_init_dpm_state(&(dpm_table->dpm_state));
730 
731 	/* dispclk */
732 	dpm_table = &(data->dpm_table.display_table);
733 	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
734 		ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
735 		PP_ASSERT_WITH_CODE(!ret,
736 				"[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
737 				return ret);
738 	} else
739 		dpm_table->count = 0;
740 	vega20_init_dpm_state(&(dpm_table->dpm_state));
741 
742 	/* phyclk */
743 	dpm_table = &(data->dpm_table.phy_table);
744 	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
745 		ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
746 		PP_ASSERT_WITH_CODE(!ret,
747 				"[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
748 				return ret);
749 	} else
750 		dpm_table->count = 0;
751 	vega20_init_dpm_state(&(dpm_table->dpm_state));
752 
753 	/* fclk */
754 	dpm_table = &(data->dpm_table.fclk_table);
755 	if (data->smu_features[GNLD_DPM_FCLK].enabled) {
756 		ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_FCLK);
757 		PP_ASSERT_WITH_CODE(!ret,
758 				"[SetupDefaultDpmTable] failed to get fclk dpm levels!",
759 				return ret);
760 	} else {
761 		dpm_table->count = 1;
762 		dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100;
763 	}
764 	vega20_init_dpm_state(&(dpm_table->dpm_state));
765 
766 	/* save a copy of the default DPM table */
767 	memcpy(&(data->golden_dpm_table), &(data->dpm_table),
768 			sizeof(struct vega20_dpm_table));
769 
770 	return 0;
771 }
772 
773 /**
774  * vega20_init_smc_table - Initializes the SMC table and uploads it
775  *
776  * @hwmgr:  the address of the powerplay hardware manager.
777  * return:  always 0
778  */
779 static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
780 {
781 	int result;
782 	struct vega20_hwmgr *data =
783 			(struct vega20_hwmgr *)(hwmgr->backend);
784 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
785 	struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
786 	struct phm_ppt_v3_information *pptable_information =
787 		(struct phm_ppt_v3_information *)hwmgr->pptable;
788 
789 	result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
790 	PP_ASSERT_WITH_CODE(!result,
791 			"[InitSMCTable] Failed to get vbios bootup values!",
792 			return result);
793 
794 	data->vbios_boot_state.vddc     = boot_up_values.usVddc;
795 	data->vbios_boot_state.vddci    = boot_up_values.usVddci;
796 	data->vbios_boot_state.mvddc    = boot_up_values.usMvddc;
797 	data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
798 	data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
799 	data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
800 	data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
801 	data->vbios_boot_state.eclock = boot_up_values.ulEClk;
802 	data->vbios_boot_state.vclock = boot_up_values.ulVClk;
803 	data->vbios_boot_state.dclock = boot_up_values.ulDClk;
804 	data->vbios_boot_state.fclock = boot_up_values.ulFClk;
805 	data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
806 
807 	smum_send_msg_to_smc_with_parameter(hwmgr,
808 			PPSMC_MSG_SetMinDeepSleepDcefclk,
809 		(uint32_t)(data->vbios_boot_state.dcef_clock / 100),
810 			NULL);
811 
812 	memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
813 
814 	result = smum_smc_table_manager(hwmgr,
815 					(uint8_t *)pp_table, TABLE_PPTABLE, false);
816 	PP_ASSERT_WITH_CODE(!result,
817 			"[InitSMCTable] Failed to upload PPtable!",
818 			return result);
819 
820 	return 0;
821 }
822 
823 /*
824  * Override PCIe link speed and link width for DPM Level 1. PPTable entries
825  * reflect the ASIC capabilities and not the system capabilities. For e.g.
826  * Vega20 board in a PCI Gen3 system. In this case, when SMU's tries to switch
827  * to DPM1, it fails as system doesn't support Gen4.
828  */
829 static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
830 {
831 	struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
832 	struct vega20_hwmgr *data =
833 			(struct vega20_hwmgr *)(hwmgr->backend);
834 	uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
835 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
836 	int i;
837 	int ret;
838 
839 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
840 		pcie_gen = 3;
841 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
842 		pcie_gen = 2;
843 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
844 		pcie_gen = 1;
845 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
846 		pcie_gen = 0;
847 
848 	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
849 		pcie_width = 6;
850 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
851 		pcie_width = 5;
852 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
853 		pcie_width = 4;
854 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
855 		pcie_width = 3;
856 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
857 		pcie_width = 2;
858 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
859 		pcie_width = 1;
860 
861 	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
862 	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
863 	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
864 	 */
865 	for (i = 0; i < NUM_LINK_LEVELS; i++) {
866 		pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
867 			pp_table->PcieGenSpeed[i];
868 		pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
869 			pp_table->PcieLaneCount[i];
870 
871 		if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
872 		    pp_table->PcieLaneCount[i]) {
873 			smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
874 			ret = smum_send_msg_to_smc_with_parameter(hwmgr,
875 				PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
876 				NULL);
877 			PP_ASSERT_WITH_CODE(!ret,
878 				"[OverridePcieParameters] Attempt to override pcie params failed!",
879 				return ret);
880 		}
881 
882 		/* update the pptable */
883 		pp_table->PcieGenSpeed[i] = pcie_gen_arg;
884 		pp_table->PcieLaneCount[i] = pcie_width_arg;
885 	}
886 
887 	/* override to the highest if it's disabled from ppfeaturmask */
888 	if (data->registry_data.pcie_dpm_key_disabled) {
889 		for (i = 0; i < NUM_LINK_LEVELS; i++) {
890 			smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
891 			ret = smum_send_msg_to_smc_with_parameter(hwmgr,
892 				PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
893 				NULL);
894 			PP_ASSERT_WITH_CODE(!ret,
895 				"[OverridePcieParameters] Attempt to override pcie params failed!",
896 				return ret);
897 
898 			pp_table->PcieGenSpeed[i] = pcie_gen;
899 			pp_table->PcieLaneCount[i] = pcie_width;
900 		}
901 		ret = vega20_enable_smc_features(hwmgr,
902 				false,
903 				data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
904 		PP_ASSERT_WITH_CODE(!ret,
905 				"Attempt to Disable DPM LINK Failed!",
906 				return ret);
907 		data->smu_features[GNLD_DPM_LINK].enabled = false;
908 		data->smu_features[GNLD_DPM_LINK].supported = false;
909 	}
910 
911 	return 0;
912 }
913 
914 static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
915 {
916 	struct vega20_hwmgr *data =
917 			(struct vega20_hwmgr *)(hwmgr->backend);
918 	uint32_t allowed_features_low = 0, allowed_features_high = 0;
919 	int i;
920 	int ret = 0;
921 
922 	for (i = 0; i < GNLD_FEATURES_MAX; i++)
923 		if (data->smu_features[i].allowed)
924 			data->smu_features[i].smu_feature_id > 31 ?
925 				(allowed_features_high |=
926 				 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT)
927 				  & 0xFFFFFFFF)) :
928 				(allowed_features_low |=
929 				 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT)
930 				  & 0xFFFFFFFF));
931 
932 	ret = smum_send_msg_to_smc_with_parameter(hwmgr,
933 		PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL);
934 	PP_ASSERT_WITH_CODE(!ret,
935 		"[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!",
936 		return ret);
937 
938 	ret = smum_send_msg_to_smc_with_parameter(hwmgr,
939 		PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL);
940 	PP_ASSERT_WITH_CODE(!ret,
941 		"[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
942 		return ret);
943 
944 	return 0;
945 }
946 
947 static int vega20_run_btc(struct pp_hwmgr *hwmgr)
948 {
949 	return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL);
950 }
951 
952 static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr)
953 {
954 	return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL);
955 }
956 
957 static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
958 {
959 	struct vega20_hwmgr *data =
960 			(struct vega20_hwmgr *)(hwmgr->backend);
961 	uint64_t features_enabled;
962 	int i;
963 	bool enabled;
964 	int ret = 0;
965 
966 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
967 			PPSMC_MSG_EnableAllSmuFeatures,
968 			NULL)) == 0,
969 			"[EnableAllSMUFeatures] Failed to enable all smu features!",
970 			return ret);
971 
972 	ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
973 	PP_ASSERT_WITH_CODE(!ret,
974 			"[EnableAllSmuFeatures] Failed to get enabled smc features!",
975 			return ret);
976 
977 	for (i = 0; i < GNLD_FEATURES_MAX; i++) {
978 		enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
979 			true : false;
980 		data->smu_features[i].enabled = enabled;
981 		data->smu_features[i].supported = enabled;
982 
983 #if 0
984 		if (data->smu_features[i].allowed && !enabled)
985 			pr_info("[EnableAllSMUFeatures] feature %d is expected enabled!", i);
986 		else if (!data->smu_features[i].allowed && enabled)
987 			pr_info("[EnableAllSMUFeatures] feature %d is expected disabled!", i);
988 #endif
989 	}
990 
991 	return 0;
992 }
993 
994 static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
995 {
996 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
997 
998 	if (data->smu_features[GNLD_DPM_UCLK].enabled)
999 		return smum_send_msg_to_smc_with_parameter(hwmgr,
1000 			PPSMC_MSG_SetUclkFastSwitch,
1001 			1,
1002 			NULL);
1003 
1004 	return 0;
1005 }
1006 
1007 static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
1008 {
1009 	struct vega20_hwmgr *data =
1010 			(struct vega20_hwmgr *)(hwmgr->backend);
1011 
1012 	return smum_send_msg_to_smc_with_parameter(hwmgr,
1013 			PPSMC_MSG_SetFclkGfxClkRatio,
1014 			data->registry_data.fclk_gfxclk_ratio,
1015 			NULL);
1016 }
1017 
1018 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
1019 {
1020 	struct vega20_hwmgr *data =
1021 			(struct vega20_hwmgr *)(hwmgr->backend);
1022 	int i, ret = 0;
1023 
1024 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
1025 			PPSMC_MSG_DisableAllSmuFeatures,
1026 			NULL)) == 0,
1027 			"[DisableAllSMUFeatures] Failed to disable all smu features!",
1028 			return ret);
1029 
1030 	for (i = 0; i < GNLD_FEATURES_MAX; i++)
1031 		data->smu_features[i].enabled = 0;
1032 
1033 	return 0;
1034 }
1035 
1036 static int vega20_od8_set_feature_capabilities(
1037 		struct pp_hwmgr *hwmgr)
1038 {
1039 	struct phm_ppt_v3_information *pptable_information =
1040 		(struct phm_ppt_v3_information *)hwmgr->pptable;
1041 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1042 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1043 	struct vega20_od8_settings *od_settings = &(data->od8_settings);
1044 
1045 	od_settings->overdrive8_capabilities = 0;
1046 
1047 	if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1048 		if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] &&
1049 		    pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 &&
1050 		    pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 &&
1051 		    (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >=
1052 		    pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN]))
1053 			od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS;
1054 
1055 		if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] &&
1056 		    (pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >=
1057 		     pp_table->MinVoltageGfx / VOLTAGE_SCALE) &&
1058 		    (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <=
1059 		     pp_table->MaxVoltageGfx / VOLTAGE_SCALE) &&
1060 		    (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] >=
1061 		     pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1]))
1062 			od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE;
1063 	}
1064 
1065 	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1066 		pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] =
1067 			data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value;
1068 		if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] &&
1069 		    pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 &&
1070 		    pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 &&
1071 		    (pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] >=
1072 		    pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX]))
1073 			od_settings->overdrive8_capabilities |= OD8_UCLK_MAX;
1074 	}
1075 
1076 	if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] &&
1077 	    pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
1078 	    pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100 &&
1079 	    pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
1080 	    pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100)
1081 		od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT;
1082 
1083 	if (data->smu_features[GNLD_FAN_CONTROL].enabled) {
1084 		if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] &&
1085 		    pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
1086 		    pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
1087 		    (pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >=
1088 		     pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT]))
1089 			od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK;
1090 
1091 		if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] &&
1092 		    (pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] >=
1093 		    (pp_table->FanPwmMin * pp_table->FanMaximumRpm / 100)) &&
1094 		    pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 &&
1095 		    (pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >=
1096 		     pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED]))
1097 			od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN;
1098 	}
1099 
1100 	if (data->smu_features[GNLD_THERMAL].enabled) {
1101 		if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] &&
1102 		    pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
1103 		    pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
1104 		    (pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >=
1105 		     pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP]))
1106 			od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN;
1107 
1108 		if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] &&
1109 		    pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
1110 		    pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
1111 		    (pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >=
1112 		     pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX]))
1113 			od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM;
1114 	}
1115 
1116 	if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE])
1117 		od_settings->overdrive8_capabilities |= OD8_MEMORY_TIMING_TUNE;
1118 
1119 	if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL] &&
1120 	    pp_table->FanZeroRpmEnable)
1121 		od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
1122 
1123 	if (!od_settings->overdrive8_capabilities)
1124 		hwmgr->od_enabled = false;
1125 
1126 	return 0;
1127 }
1128 
1129 static int vega20_od8_set_feature_id(
1130 		struct pp_hwmgr *hwmgr)
1131 {
1132 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1133 	struct vega20_od8_settings *od_settings = &(data->od8_settings);
1134 
1135 	if (od_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
1136 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
1137 			OD8_GFXCLK_LIMITS;
1138 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
1139 			OD8_GFXCLK_LIMITS;
1140 	} else {
1141 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
1142 			0;
1143 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
1144 			0;
1145 	}
1146 
1147 	if (od_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
1148 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
1149 			OD8_GFXCLK_CURVE;
1150 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
1151 			OD8_GFXCLK_CURVE;
1152 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
1153 			OD8_GFXCLK_CURVE;
1154 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
1155 			OD8_GFXCLK_CURVE;
1156 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
1157 			OD8_GFXCLK_CURVE;
1158 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
1159 			OD8_GFXCLK_CURVE;
1160 	} else {
1161 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
1162 			0;
1163 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
1164 			0;
1165 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
1166 			0;
1167 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
1168 			0;
1169 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
1170 			0;
1171 		od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
1172 			0;
1173 	}
1174 
1175 	if (od_settings->overdrive8_capabilities & OD8_UCLK_MAX)
1176 		od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = OD8_UCLK_MAX;
1177 	else
1178 		od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = 0;
1179 
1180 	if (od_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
1181 		od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = OD8_POWER_LIMIT;
1182 	else
1183 		od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = 0;
1184 
1185 	if (od_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
1186 		od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
1187 			OD8_ACOUSTIC_LIMIT_SCLK;
1188 	else
1189 		od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
1190 			0;
1191 
1192 	if (od_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
1193 		od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
1194 			OD8_FAN_SPEED_MIN;
1195 	else
1196 		od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
1197 			0;
1198 
1199 	if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
1200 		od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
1201 			OD8_TEMPERATURE_FAN;
1202 	else
1203 		od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
1204 			0;
1205 
1206 	if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
1207 		od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
1208 			OD8_TEMPERATURE_SYSTEM;
1209 	else
1210 		od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
1211 			0;
1212 
1213 	return 0;
1214 }
1215 
1216 static int vega20_od8_get_gfx_clock_base_voltage(
1217 		struct pp_hwmgr *hwmgr,
1218 		uint32_t *voltage,
1219 		uint32_t freq)
1220 {
1221 	int ret = 0;
1222 
1223 	ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1224 			PPSMC_MSG_GetAVFSVoltageByDpm,
1225 			((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
1226 			voltage);
1227 	PP_ASSERT_WITH_CODE(!ret,
1228 			"[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
1229 			return ret);
1230 
1231 	*voltage = *voltage / VOLTAGE_SCALE;
1232 
1233 	return 0;
1234 }
1235 
1236 static int vega20_od8_initialize_default_settings(
1237 		struct pp_hwmgr *hwmgr)
1238 {
1239 	struct phm_ppt_v3_information *pptable_information =
1240 		(struct phm_ppt_v3_information *)hwmgr->pptable;
1241 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1242 	struct vega20_od8_settings *od8_settings = &(data->od8_settings);
1243 	OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table);
1244 	int i, ret = 0;
1245 
1246 	/* Set Feature Capabilities */
1247 	vega20_od8_set_feature_capabilities(hwmgr);
1248 
1249 	/* Map FeatureID to individual settings */
1250 	vega20_od8_set_feature_id(hwmgr);
1251 
1252 	/* Set default values */
1253 	ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, true);
1254 	PP_ASSERT_WITH_CODE(!ret,
1255 			"Failed to export over drive table!",
1256 			return ret);
1257 
1258 	if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
1259 		od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
1260 			od_table->GfxclkFmin;
1261 		od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
1262 			od_table->GfxclkFmax;
1263 	} else {
1264 		od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
1265 			0;
1266 		od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
1267 			0;
1268 	}
1269 
1270 	if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
1271 		od_table->GfxclkFreq1 = od_table->GfxclkFmin;
1272 		od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
1273 			od_table->GfxclkFreq1;
1274 
1275 		od_table->GfxclkFreq3 = od_table->GfxclkFmax;
1276 		od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
1277 			od_table->GfxclkFreq3;
1278 
1279 		od_table->GfxclkFreq2 = (od_table->GfxclkFreq1 + od_table->GfxclkFreq3) / 2;
1280 		od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
1281 			od_table->GfxclkFreq2;
1282 
1283 		PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1284 				   &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value),
1285 				     od_table->GfxclkFreq1),
1286 				"[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1287 				od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0);
1288 		od_table->GfxclkVolt1 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value
1289 			* VOLTAGE_SCALE;
1290 
1291 		PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1292 				   &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value),
1293 				     od_table->GfxclkFreq2),
1294 				"[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1295 				od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0);
1296 		od_table->GfxclkVolt2 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value
1297 			* VOLTAGE_SCALE;
1298 
1299 		PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1300 				   &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value),
1301 				     od_table->GfxclkFreq3),
1302 				"[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1303 				od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0);
1304 		od_table->GfxclkVolt3 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value
1305 			* VOLTAGE_SCALE;
1306 	} else {
1307 		od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
1308 			0;
1309 		od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value =
1310 			0;
1311 		od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
1312 			0;
1313 		od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value =
1314 			0;
1315 		od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
1316 			0;
1317 		od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value =
1318 			0;
1319 	}
1320 
1321 	if (od8_settings->overdrive8_capabilities & OD8_UCLK_MAX)
1322 		od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
1323 			od_table->UclkFmax;
1324 	else
1325 		od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
1326 			0;
1327 
1328 	if (od8_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
1329 		od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
1330 			od_table->OverDrivePct;
1331 	else
1332 		od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
1333 			0;
1334 
1335 	if (od8_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
1336 		od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
1337 			od_table->FanMaximumRpm;
1338 	else
1339 		od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
1340 			0;
1341 
1342 	if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
1343 		od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
1344 			od_table->FanMinimumPwm * data->smc_state_table.pp_table.FanMaximumRpm / 100;
1345 	else
1346 		od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
1347 			0;
1348 
1349 	if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
1350 		od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
1351 			od_table->FanTargetTemperature;
1352 	else
1353 		od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
1354 			0;
1355 
1356 	if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
1357 		od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
1358 			od_table->MaxOpTemp;
1359 	else
1360 		od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
1361 			0;
1362 
1363 	for (i = 0; i < OD8_SETTING_COUNT; i++) {
1364 		if (od8_settings->od8_settings_array[i].feature_id) {
1365 			od8_settings->od8_settings_array[i].min_value =
1366 				pptable_information->od_settings_min[i];
1367 			od8_settings->od8_settings_array[i].max_value =
1368 				pptable_information->od_settings_max[i];
1369 			od8_settings->od8_settings_array[i].current_value =
1370 				od8_settings->od8_settings_array[i].default_value;
1371 		} else {
1372 			od8_settings->od8_settings_array[i].min_value =
1373 				0;
1374 			od8_settings->od8_settings_array[i].max_value =
1375 				0;
1376 			od8_settings->od8_settings_array[i].current_value =
1377 				0;
1378 		}
1379 	}
1380 
1381 	ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, false);
1382 	PP_ASSERT_WITH_CODE(!ret,
1383 			"Failed to import over drive table!",
1384 			return ret);
1385 
1386 	return 0;
1387 }
1388 
1389 static int vega20_od8_set_settings(
1390 		struct pp_hwmgr *hwmgr,
1391 		uint32_t index,
1392 		uint32_t value)
1393 {
1394 	OverDriveTable_t od_table;
1395 	int ret = 0;
1396 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1397 	struct vega20_od8_single_setting *od8_settings =
1398 			data->od8_settings.od8_settings_array;
1399 
1400 	ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, true);
1401 	PP_ASSERT_WITH_CODE(!ret,
1402 			"Failed to export over drive table!",
1403 			return ret);
1404 
1405 	switch(index) {
1406 	case OD8_SETTING_GFXCLK_FMIN:
1407 		od_table.GfxclkFmin = (uint16_t)value;
1408 		break;
1409 	case OD8_SETTING_GFXCLK_FMAX:
1410 		if (value < od8_settings[OD8_SETTING_GFXCLK_FMAX].min_value ||
1411 		    value > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value)
1412 			return -EINVAL;
1413 
1414 		od_table.GfxclkFmax = (uint16_t)value;
1415 		break;
1416 	case OD8_SETTING_GFXCLK_FREQ1:
1417 		od_table.GfxclkFreq1 = (uint16_t)value;
1418 		break;
1419 	case OD8_SETTING_GFXCLK_VOLTAGE1:
1420 		od_table.GfxclkVolt1 = (uint16_t)value;
1421 		break;
1422 	case OD8_SETTING_GFXCLK_FREQ2:
1423 		od_table.GfxclkFreq2 = (uint16_t)value;
1424 		break;
1425 	case OD8_SETTING_GFXCLK_VOLTAGE2:
1426 		od_table.GfxclkVolt2 = (uint16_t)value;
1427 		break;
1428 	case OD8_SETTING_GFXCLK_FREQ3:
1429 		od_table.GfxclkFreq3 = (uint16_t)value;
1430 		break;
1431 	case OD8_SETTING_GFXCLK_VOLTAGE3:
1432 		od_table.GfxclkVolt3 = (uint16_t)value;
1433 		break;
1434 	case OD8_SETTING_UCLK_FMAX:
1435 		if (value < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
1436 		    value > od8_settings[OD8_SETTING_UCLK_FMAX].max_value)
1437 			return -EINVAL;
1438 		od_table.UclkFmax = (uint16_t)value;
1439 		break;
1440 	case OD8_SETTING_POWER_PERCENTAGE:
1441 		od_table.OverDrivePct = (int16_t)value;
1442 		break;
1443 	case OD8_SETTING_FAN_ACOUSTIC_LIMIT:
1444 		od_table.FanMaximumRpm = (uint16_t)value;
1445 		break;
1446 	case OD8_SETTING_FAN_MIN_SPEED:
1447 		od_table.FanMinimumPwm = (uint16_t)value;
1448 		break;
1449 	case OD8_SETTING_FAN_TARGET_TEMP:
1450 		od_table.FanTargetTemperature = (uint16_t)value;
1451 		break;
1452 	case OD8_SETTING_OPERATING_TEMP_MAX:
1453 		od_table.MaxOpTemp = (uint16_t)value;
1454 		break;
1455 	}
1456 
1457 	ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, false);
1458 	PP_ASSERT_WITH_CODE(!ret,
1459 			"Failed to import over drive table!",
1460 			return ret);
1461 
1462 	return 0;
1463 }
1464 
1465 static int vega20_get_sclk_od(
1466 		struct pp_hwmgr *hwmgr)
1467 {
1468 	struct vega20_hwmgr *data = hwmgr->backend;
1469 	struct vega20_single_dpm_table *sclk_table =
1470 			&(data->dpm_table.gfx_table);
1471 	struct vega20_single_dpm_table *golden_sclk_table =
1472 			&(data->golden_dpm_table.gfx_table);
1473 	int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
1474 	int golden_value = golden_sclk_table->dpm_levels
1475 			[golden_sclk_table->count - 1].value;
1476 
1477 	/* od percentage */
1478 	value -= golden_value;
1479 	value = DIV_ROUND_UP(value * 100, golden_value);
1480 
1481 	return value;
1482 }
1483 
1484 static int vega20_set_sclk_od(
1485 		struct pp_hwmgr *hwmgr, uint32_t value)
1486 {
1487 	struct vega20_hwmgr *data = hwmgr->backend;
1488 	struct vega20_single_dpm_table *golden_sclk_table =
1489 			&(data->golden_dpm_table.gfx_table);
1490 	uint32_t od_sclk;
1491 	int ret = 0;
1492 
1493 	od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value;
1494 	od_sclk /= 100;
1495 	od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
1496 
1497 	ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk);
1498 	PP_ASSERT_WITH_CODE(!ret,
1499 			"[SetSclkOD] failed to set od gfxclk!",
1500 			return ret);
1501 
1502 	/* retrieve updated gfxclk table */
1503 	ret = vega20_setup_gfxclk_dpm_table(hwmgr);
1504 	PP_ASSERT_WITH_CODE(!ret,
1505 			"[SetSclkOD] failed to refresh gfxclk table!",
1506 			return ret);
1507 
1508 	return 0;
1509 }
1510 
1511 static int vega20_get_mclk_od(
1512 		struct pp_hwmgr *hwmgr)
1513 {
1514 	struct vega20_hwmgr *data = hwmgr->backend;
1515 	struct vega20_single_dpm_table *mclk_table =
1516 			&(data->dpm_table.mem_table);
1517 	struct vega20_single_dpm_table *golden_mclk_table =
1518 			&(data->golden_dpm_table.mem_table);
1519 	int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
1520 	int golden_value = golden_mclk_table->dpm_levels
1521 			[golden_mclk_table->count - 1].value;
1522 
1523 	/* od percentage */
1524 	value -= golden_value;
1525 	value = DIV_ROUND_UP(value * 100, golden_value);
1526 
1527 	return value;
1528 }
1529 
1530 static int vega20_set_mclk_od(
1531 		struct pp_hwmgr *hwmgr, uint32_t value)
1532 {
1533 	struct vega20_hwmgr *data = hwmgr->backend;
1534 	struct vega20_single_dpm_table *golden_mclk_table =
1535 			&(data->golden_dpm_table.mem_table);
1536 	uint32_t od_mclk;
1537 	int ret = 0;
1538 
1539 	od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value;
1540 	od_mclk /= 100;
1541 	od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
1542 
1543 	ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk);
1544 	PP_ASSERT_WITH_CODE(!ret,
1545 			"[SetMclkOD] failed to set od memclk!",
1546 			return ret);
1547 
1548 	/* retrieve updated memclk table */
1549 	ret = vega20_setup_memclk_dpm_table(hwmgr);
1550 	PP_ASSERT_WITH_CODE(!ret,
1551 			"[SetMclkOD] failed to refresh memclk table!",
1552 			return ret);
1553 
1554 	return 0;
1555 }
1556 
1557 static void vega20_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
1558 {
1559 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1560 	struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table);
1561 	struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table);
1562 
1563 	if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
1564 	    mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
1565 		hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
1566 		hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
1567 	} else {
1568 		hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
1569 		hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
1570 	}
1571 
1572 	hwmgr->pstate_sclk_peak = gfx_table->dpm_levels[gfx_table->count - 1].value;
1573 	hwmgr->pstate_mclk_peak = mem_table->dpm_levels[mem_table->count - 1].value;
1574 }
1575 
1576 static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
1577 		PP_Clock *clock, PPCLK_e clock_select)
1578 {
1579 	int ret = 0;
1580 
1581 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1582 			PPSMC_MSG_GetDcModeMaxDpmFreq,
1583 			(clock_select << 16),
1584 			clock)) == 0,
1585 			"[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
1586 			return ret);
1587 
1588 	/* if DC limit is zero, return AC limit */
1589 	if (*clock == 0) {
1590 		PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1591 			PPSMC_MSG_GetMaxDpmFreq,
1592 			(clock_select << 16),
1593 			clock)) == 0,
1594 			"[GetMaxSustainableClock] failed to get max AC clock from SMC!",
1595 			return ret);
1596 	}
1597 
1598 	return 0;
1599 }
1600 
1601 static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr)
1602 {
1603 	struct vega20_hwmgr *data =
1604 		(struct vega20_hwmgr *)(hwmgr->backend);
1605 	struct vega20_max_sustainable_clocks *max_sustainable_clocks =
1606 		&(data->max_sustainable_clocks);
1607 	int ret = 0;
1608 
1609 	max_sustainable_clocks->uclock = data->vbios_boot_state.mem_clock / 100;
1610 	max_sustainable_clocks->soc_clock = data->vbios_boot_state.soc_clock / 100;
1611 	max_sustainable_clocks->dcef_clock = data->vbios_boot_state.dcef_clock / 100;
1612 	max_sustainable_clocks->display_clock = 0xFFFFFFFF;
1613 	max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
1614 	max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
1615 
1616 	if (data->smu_features[GNLD_DPM_UCLK].enabled)
1617 		PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1618 				&(max_sustainable_clocks->uclock),
1619 				PPCLK_UCLK)) == 0,
1620 				"[InitMaxSustainableClocks] failed to get max UCLK from SMC!",
1621 				return ret);
1622 
1623 	if (data->smu_features[GNLD_DPM_SOCCLK].enabled)
1624 		PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1625 				&(max_sustainable_clocks->soc_clock),
1626 				PPCLK_SOCCLK)) == 0,
1627 				"[InitMaxSustainableClocks] failed to get max SOCCLK from SMC!",
1628 				return ret);
1629 
1630 	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1631 		PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1632 				&(max_sustainable_clocks->dcef_clock),
1633 				PPCLK_DCEFCLK)) == 0,
1634 				"[InitMaxSustainableClocks] failed to get max DCEFCLK from SMC!",
1635 				return ret);
1636 		PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1637 				&(max_sustainable_clocks->display_clock),
1638 				PPCLK_DISPCLK)) == 0,
1639 				"[InitMaxSustainableClocks] failed to get max DISPCLK from SMC!",
1640 				return ret);
1641 		PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1642 				&(max_sustainable_clocks->phy_clock),
1643 				PPCLK_PHYCLK)) == 0,
1644 				"[InitMaxSustainableClocks] failed to get max PHYCLK from SMC!",
1645 				return ret);
1646 		PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1647 				&(max_sustainable_clocks->pixel_clock),
1648 				PPCLK_PIXCLK)) == 0,
1649 				"[InitMaxSustainableClocks] failed to get max PIXCLK from SMC!",
1650 				return ret);
1651 	}
1652 
1653 	if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
1654 		max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
1655 
1656 	return 0;
1657 }
1658 
1659 static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
1660 {
1661 	int result;
1662 
1663 	result = smum_send_msg_to_smc(hwmgr,
1664 		PPSMC_MSG_SetMGpuFanBoostLimitRpm,
1665 		NULL);
1666 	PP_ASSERT_WITH_CODE(!result,
1667 			"[EnableMgpuFan] Failed to enable mgpu fan boost!",
1668 			return result);
1669 
1670 	return 0;
1671 }
1672 
1673 static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
1674 {
1675 	struct vega20_hwmgr *data =
1676 		(struct vega20_hwmgr *)(hwmgr->backend);
1677 
1678 	data->uvd_power_gated = true;
1679 	data->vce_power_gated = true;
1680 }
1681 
1682 static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1683 {
1684 	int result = 0;
1685 
1686 	smum_send_msg_to_smc_with_parameter(hwmgr,
1687 			PPSMC_MSG_NumOfDisplays, 0, NULL);
1688 
1689 	result = vega20_set_allowed_featuresmask(hwmgr);
1690 	PP_ASSERT_WITH_CODE(!result,
1691 			"[EnableDPMTasks] Failed to set allowed featuresmask!\n",
1692 			return result);
1693 
1694 	result = vega20_init_smc_table(hwmgr);
1695 	PP_ASSERT_WITH_CODE(!result,
1696 			"[EnableDPMTasks] Failed to initialize SMC table!",
1697 			return result);
1698 
1699 	result = vega20_run_btc(hwmgr);
1700 	PP_ASSERT_WITH_CODE(!result,
1701 			"[EnableDPMTasks] Failed to run btc!",
1702 			return result);
1703 
1704 	result = vega20_run_btc_afll(hwmgr);
1705 	PP_ASSERT_WITH_CODE(!result,
1706 			"[EnableDPMTasks] Failed to run btc afll!",
1707 			return result);
1708 
1709 	result = vega20_enable_all_smu_features(hwmgr);
1710 	PP_ASSERT_WITH_CODE(!result,
1711 			"[EnableDPMTasks] Failed to enable all smu features!",
1712 			return result);
1713 
1714 	result = vega20_override_pcie_parameters(hwmgr);
1715 	PP_ASSERT_WITH_CODE(!result,
1716 			"[EnableDPMTasks] Failed to override pcie parameters!",
1717 			return result);
1718 
1719 	result = vega20_notify_smc_display_change(hwmgr);
1720 	PP_ASSERT_WITH_CODE(!result,
1721 			"[EnableDPMTasks] Failed to notify smc display change!",
1722 			return result);
1723 
1724 	result = vega20_send_clock_ratio(hwmgr);
1725 	PP_ASSERT_WITH_CODE(!result,
1726 			"[EnableDPMTasks] Failed to send clock ratio!",
1727 			return result);
1728 
1729 	/* Initialize UVD/VCE powergating state */
1730 	vega20_init_powergate_state(hwmgr);
1731 
1732 	result = vega20_setup_default_dpm_tables(hwmgr);
1733 	PP_ASSERT_WITH_CODE(!result,
1734 			"[EnableDPMTasks] Failed to setup default DPM tables!",
1735 			return result);
1736 
1737 	result = vega20_init_max_sustainable_clocks(hwmgr);
1738 	PP_ASSERT_WITH_CODE(!result,
1739 			"[EnableDPMTasks] Failed to get maximum sustainable clocks!",
1740 			return result);
1741 
1742 	result = vega20_power_control_set_level(hwmgr);
1743 	PP_ASSERT_WITH_CODE(!result,
1744 			"[EnableDPMTasks] Failed to power control set level!",
1745 			return result);
1746 
1747 	result = vega20_od8_initialize_default_settings(hwmgr);
1748 	PP_ASSERT_WITH_CODE(!result,
1749 			"[EnableDPMTasks] Failed to initialize odn settings!",
1750 			return result);
1751 
1752 	vega20_populate_umdpstate_clocks(hwmgr);
1753 
1754 	result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
1755 			POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
1756 	PP_ASSERT_WITH_CODE(!result,
1757 			"[GetPptLimit] get default PPT limit failed!",
1758 			return result);
1759 	hwmgr->power_limit =
1760 		hwmgr->default_power_limit;
1761 
1762 	return 0;
1763 }
1764 
1765 static uint32_t vega20_find_lowest_dpm_level(
1766 		struct vega20_single_dpm_table *table)
1767 {
1768 	uint32_t i;
1769 
1770 	for (i = 0; i < table->count; i++) {
1771 		if (table->dpm_levels[i].enabled)
1772 			break;
1773 	}
1774 	if (i >= table->count) {
1775 		i = 0;
1776 		table->dpm_levels[i].enabled = true;
1777 	}
1778 
1779 	return i;
1780 }
1781 
1782 static uint32_t vega20_find_highest_dpm_level(
1783 		struct vega20_single_dpm_table *table)
1784 {
1785 	int i = 0;
1786 
1787 	PP_ASSERT_WITH_CODE(table != NULL,
1788 			"[FindHighestDPMLevel] DPM Table does not exist!",
1789 			return 0);
1790 	PP_ASSERT_WITH_CODE(table->count > 0,
1791 			"[FindHighestDPMLevel] DPM Table has no entry!",
1792 			return 0);
1793 	PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
1794 			"[FindHighestDPMLevel] DPM Table has too many entries!",
1795 			return MAX_REGULAR_DPM_NUMBER - 1);
1796 
1797 	for (i = table->count - 1; i >= 0; i--) {
1798 		if (table->dpm_levels[i].enabled)
1799 			break;
1800 	}
1801 	if (i < 0) {
1802 		i = 0;
1803 		table->dpm_levels[i].enabled = true;
1804 	}
1805 
1806 	return i;
1807 }
1808 
1809 static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
1810 {
1811 	struct vega20_hwmgr *data =
1812 			(struct vega20_hwmgr *)(hwmgr->backend);
1813 	uint32_t min_freq;
1814 	int ret = 0;
1815 
1816 	if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
1817 	   (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
1818 		min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
1819 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1820 					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1821 					(PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
1822 					NULL)),
1823 					"Failed to set soft min gfxclk !",
1824 					return ret);
1825 	}
1826 
1827 	if (data->smu_features[GNLD_DPM_UCLK].enabled &&
1828 	   (feature_mask & FEATURE_DPM_UCLK_MASK)) {
1829 		min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
1830 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1831 					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1832 					(PPCLK_UCLK << 16) | (min_freq & 0xffff),
1833 					NULL)),
1834 					"Failed to set soft min memclk !",
1835 					return ret);
1836 	}
1837 
1838 	if (data->smu_features[GNLD_DPM_UVD].enabled &&
1839 	   (feature_mask & FEATURE_DPM_UVD_MASK)) {
1840 		min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
1841 
1842 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1843 					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1844 					(PPCLK_VCLK << 16) | (min_freq & 0xffff),
1845 					NULL)),
1846 					"Failed to set soft min vclk!",
1847 					return ret);
1848 
1849 		min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
1850 
1851 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1852 					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1853 					(PPCLK_DCLK << 16) | (min_freq & 0xffff),
1854 					NULL)),
1855 					"Failed to set soft min dclk!",
1856 					return ret);
1857 	}
1858 
1859 	if (data->smu_features[GNLD_DPM_VCE].enabled &&
1860 	   (feature_mask & FEATURE_DPM_VCE_MASK)) {
1861 		min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
1862 
1863 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1864 					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1865 					(PPCLK_ECLK << 16) | (min_freq & 0xffff),
1866 					NULL)),
1867 					"Failed to set soft min eclk!",
1868 					return ret);
1869 	}
1870 
1871 	if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
1872 	   (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
1873 		min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
1874 
1875 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1876 					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1877 					(PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
1878 					NULL)),
1879 					"Failed to set soft min socclk!",
1880 					return ret);
1881 	}
1882 
1883 	if (data->smu_features[GNLD_DPM_FCLK].enabled &&
1884 	   (feature_mask & FEATURE_DPM_FCLK_MASK)) {
1885 		min_freq = data->dpm_table.fclk_table.dpm_state.soft_min_level;
1886 
1887 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1888 					hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1889 					(PPCLK_FCLK << 16) | (min_freq & 0xffff),
1890 					NULL)),
1891 					"Failed to set soft min fclk!",
1892 					return ret);
1893 	}
1894 
1895 	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled &&
1896 	   (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) {
1897 		min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level;
1898 
1899 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1900 					hwmgr, PPSMC_MSG_SetHardMinByFreq,
1901 					(PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
1902 					NULL)),
1903 					"Failed to set hard min dcefclk!",
1904 					return ret);
1905 	}
1906 
1907 	return ret;
1908 }
1909 
1910 static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
1911 {
1912 	struct vega20_hwmgr *data =
1913 			(struct vega20_hwmgr *)(hwmgr->backend);
1914 	uint32_t max_freq;
1915 	int ret = 0;
1916 
1917 	if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
1918 	   (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
1919 		max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
1920 
1921 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1922 					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1923 					(PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
1924 					NULL)),
1925 					"Failed to set soft max gfxclk!",
1926 					return ret);
1927 	}
1928 
1929 	if (data->smu_features[GNLD_DPM_UCLK].enabled &&
1930 	   (feature_mask & FEATURE_DPM_UCLK_MASK)) {
1931 		max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
1932 
1933 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1934 					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1935 					(PPCLK_UCLK << 16) | (max_freq & 0xffff),
1936 					NULL)),
1937 					"Failed to set soft max memclk!",
1938 					return ret);
1939 	}
1940 
1941 	if (data->smu_features[GNLD_DPM_UVD].enabled &&
1942 	   (feature_mask & FEATURE_DPM_UVD_MASK)) {
1943 		max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
1944 
1945 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1946 					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1947 					(PPCLK_VCLK << 16) | (max_freq & 0xffff),
1948 					NULL)),
1949 					"Failed to set soft max vclk!",
1950 					return ret);
1951 
1952 		max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
1953 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1954 					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1955 					(PPCLK_DCLK << 16) | (max_freq & 0xffff),
1956 					NULL)),
1957 					"Failed to set soft max dclk!",
1958 					return ret);
1959 	}
1960 
1961 	if (data->smu_features[GNLD_DPM_VCE].enabled &&
1962 	   (feature_mask & FEATURE_DPM_VCE_MASK)) {
1963 		max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
1964 
1965 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1966 					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1967 					(PPCLK_ECLK << 16) | (max_freq & 0xffff),
1968 					NULL)),
1969 					"Failed to set soft max eclk!",
1970 					return ret);
1971 	}
1972 
1973 	if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
1974 	   (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
1975 		max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
1976 
1977 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1978 					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1979 					(PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
1980 					NULL)),
1981 					"Failed to set soft max socclk!",
1982 					return ret);
1983 	}
1984 
1985 	if (data->smu_features[GNLD_DPM_FCLK].enabled &&
1986 	   (feature_mask & FEATURE_DPM_FCLK_MASK)) {
1987 		max_freq = data->dpm_table.fclk_table.dpm_state.soft_max_level;
1988 
1989 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1990 					hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1991 					(PPCLK_FCLK << 16) | (max_freq & 0xffff),
1992 					NULL)),
1993 					"Failed to set soft max fclk!",
1994 					return ret);
1995 	}
1996 
1997 	return ret;
1998 }
1999 
2000 static int vega20_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
2001 {
2002 	struct vega20_hwmgr *data =
2003 			(struct vega20_hwmgr *)(hwmgr->backend);
2004 	int ret = 0;
2005 
2006 	if (data->smu_features[GNLD_DPM_VCE].supported) {
2007 		if (data->smu_features[GNLD_DPM_VCE].enabled == enable) {
2008 			if (enable)
2009 				PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already enabled!\n");
2010 			else
2011 				PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already disabled!\n");
2012 		}
2013 
2014 		ret = vega20_enable_smc_features(hwmgr,
2015 				enable,
2016 				data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap);
2017 		PP_ASSERT_WITH_CODE(!ret,
2018 				"Attempt to Enable/Disable DPM VCE Failed!",
2019 				return ret);
2020 		data->smu_features[GNLD_DPM_VCE].enabled = enable;
2021 	}
2022 
2023 	return 0;
2024 }
2025 
2026 static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
2027 		uint32_t *clock,
2028 		PPCLK_e clock_select,
2029 		bool max)
2030 {
2031 	int ret;
2032 	*clock = 0;
2033 
2034 	if (max) {
2035 		PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2036 				PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16),
2037 				clock)) == 0,
2038 				"[GetClockRanges] Failed to get max clock from SMC!",
2039 				return ret);
2040 	} else {
2041 		PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2042 				PPSMC_MSG_GetMinDpmFreq,
2043 				(clock_select << 16),
2044 				clock)) == 0,
2045 				"[GetClockRanges] Failed to get min clock from SMC!",
2046 				return ret);
2047 	}
2048 
2049 	return 0;
2050 }
2051 
2052 static uint32_t vega20_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
2053 {
2054 	struct vega20_hwmgr *data =
2055 			(struct vega20_hwmgr *)(hwmgr->backend);
2056 	uint32_t gfx_clk;
2057 	int ret = 0;
2058 
2059 	PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled,
2060 			"[GetSclks]: gfxclk dpm not enabled!\n",
2061 			return -EPERM);
2062 
2063 	if (low) {
2064 		ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false);
2065 		PP_ASSERT_WITH_CODE(!ret,
2066 			"[GetSclks]: fail to get min PPCLK_GFXCLK\n",
2067 			return ret);
2068 	} else {
2069 		ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true);
2070 		PP_ASSERT_WITH_CODE(!ret,
2071 			"[GetSclks]: fail to get max PPCLK_GFXCLK\n",
2072 			return ret);
2073 	}
2074 
2075 	return (gfx_clk * 100);
2076 }
2077 
2078 static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
2079 {
2080 	struct vega20_hwmgr *data =
2081 			(struct vega20_hwmgr *)(hwmgr->backend);
2082 	uint32_t mem_clk;
2083 	int ret = 0;
2084 
2085 	PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled,
2086 			"[MemMclks]: memclk dpm not enabled!\n",
2087 			return -EPERM);
2088 
2089 	if (low) {
2090 		ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false);
2091 		PP_ASSERT_WITH_CODE(!ret,
2092 			"[GetMclks]: fail to get min PPCLK_UCLK\n",
2093 			return ret);
2094 	} else {
2095 		ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true);
2096 		PP_ASSERT_WITH_CODE(!ret,
2097 			"[GetMclks]: fail to get max PPCLK_UCLK\n",
2098 			return ret);
2099 	}
2100 
2101 	return (mem_clk * 100);
2102 }
2103 
2104 static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr,
2105 				    SmuMetrics_t *metrics_table,
2106 				    bool bypass_cache)
2107 {
2108 	struct vega20_hwmgr *data =
2109 			(struct vega20_hwmgr *)(hwmgr->backend);
2110 	int ret = 0;
2111 
2112 	if (bypass_cache ||
2113 	    !data->metrics_time ||
2114 	    time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) {
2115 		ret = smum_smc_table_manager(hwmgr,
2116 					     (uint8_t *)(&data->metrics_table),
2117 					     TABLE_SMU_METRICS,
2118 					     true);
2119 		if (ret) {
2120 			pr_info("Failed to export SMU metrics table!\n");
2121 			return ret;
2122 		}
2123 		data->metrics_time = jiffies;
2124 	}
2125 
2126 	if (metrics_table)
2127 		memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
2128 
2129 	return ret;
2130 }
2131 
2132 static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
2133 		uint32_t *query)
2134 {
2135 	int ret = 0;
2136 	SmuMetrics_t metrics_table;
2137 
2138 	ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2139 	if (ret)
2140 		return ret;
2141 
2142 	/* For the 40.46 release, they changed the value name */
2143 	if (hwmgr->smu_version == 0x282e00)
2144 		*query = metrics_table.AverageSocketPower << 8;
2145 	else
2146 		*query = metrics_table.CurrSocketPower << 8;
2147 
2148 	return ret;
2149 }
2150 
2151 static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
2152 		PPCLK_e clk_id, uint32_t *clk_freq)
2153 {
2154 	int ret = 0;
2155 
2156 	*clk_freq = 0;
2157 
2158 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2159 			PPSMC_MSG_GetDpmClockFreq, (clk_id << 16),
2160 			clk_freq)) == 0,
2161 			"[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
2162 			return ret);
2163 
2164 	*clk_freq = *clk_freq * 100;
2165 
2166 	return 0;
2167 }
2168 
2169 static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
2170 		int idx,
2171 		uint32_t *activity_percent)
2172 {
2173 	int ret = 0;
2174 	SmuMetrics_t metrics_table;
2175 
2176 	ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2177 	if (ret)
2178 		return ret;
2179 
2180 	switch (idx) {
2181 	case AMDGPU_PP_SENSOR_GPU_LOAD:
2182 		*activity_percent = metrics_table.AverageGfxActivity;
2183 		break;
2184 	case AMDGPU_PP_SENSOR_MEM_LOAD:
2185 		*activity_percent = metrics_table.AverageUclkActivity;
2186 		break;
2187 	default:
2188 		pr_err("Invalid index for retrieving clock activity\n");
2189 		return -EINVAL;
2190 	}
2191 
2192 	return ret;
2193 }
2194 
2195 static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
2196 			      void *value, int *size)
2197 {
2198 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2199 	struct amdgpu_device *adev = hwmgr->adev;
2200 	SmuMetrics_t metrics_table;
2201 	uint32_t val_vid;
2202 	int ret = 0;
2203 
2204 	switch (idx) {
2205 	case AMDGPU_PP_SENSOR_GFX_SCLK:
2206 		ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2207 		if (ret)
2208 			return ret;
2209 
2210 		*((uint32_t *)value) = metrics_table.AverageGfxclkFrequency * 100;
2211 		*size = 4;
2212 		break;
2213 	case AMDGPU_PP_SENSOR_GFX_MCLK:
2214 		ret = vega20_get_current_clk_freq(hwmgr,
2215 				PPCLK_UCLK,
2216 				(uint32_t *)value);
2217 		if (!ret)
2218 			*size = 4;
2219 		break;
2220 	case AMDGPU_PP_SENSOR_GPU_LOAD:
2221 	case AMDGPU_PP_SENSOR_MEM_LOAD:
2222 		ret = vega20_get_current_activity_percent(hwmgr, idx, (uint32_t *)value);
2223 		if (!ret)
2224 			*size = 4;
2225 		break;
2226 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
2227 		*((uint32_t *)value) = vega20_thermal_get_temperature(hwmgr);
2228 		*size = 4;
2229 		break;
2230 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
2231 		ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2232 		if (ret)
2233 			return ret;
2234 
2235 		*((uint32_t *)value) = metrics_table.TemperatureEdge *
2236 			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2237 		*size = 4;
2238 		break;
2239 	case AMDGPU_PP_SENSOR_MEM_TEMP:
2240 		ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2241 		if (ret)
2242 			return ret;
2243 
2244 		*((uint32_t *)value) = metrics_table.TemperatureHBM *
2245 			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2246 		*size = 4;
2247 		break;
2248 	case AMDGPU_PP_SENSOR_UVD_POWER:
2249 		*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
2250 		*size = 4;
2251 		break;
2252 	case AMDGPU_PP_SENSOR_VCE_POWER:
2253 		*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
2254 		*size = 4;
2255 		break;
2256 	case AMDGPU_PP_SENSOR_GPU_POWER:
2257 		*size = 16;
2258 		ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value);
2259 		break;
2260 	case AMDGPU_PP_SENSOR_VDDGFX:
2261 		val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
2262 			SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
2263 			SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
2264 		*((uint32_t *)value) =
2265 			(uint32_t)convert_to_vddc((uint8_t)val_vid);
2266 		break;
2267 	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2268 		ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value);
2269 		if (!ret)
2270 			*size = 8;
2271 		break;
2272 	default:
2273 		ret = -EOPNOTSUPP;
2274 		break;
2275 	}
2276 	return ret;
2277 }
2278 
2279 static int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
2280 		struct pp_display_clock_request *clock_req)
2281 {
2282 	int result = 0;
2283 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2284 	enum amd_pp_clock_type clk_type = clock_req->clock_type;
2285 	uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
2286 	PPCLK_e clk_select = 0;
2287 	uint32_t clk_request = 0;
2288 
2289 	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
2290 		switch (clk_type) {
2291 		case amd_pp_dcef_clock:
2292 			clk_select = PPCLK_DCEFCLK;
2293 			break;
2294 		case amd_pp_disp_clock:
2295 			clk_select = PPCLK_DISPCLK;
2296 			break;
2297 		case amd_pp_pixel_clock:
2298 			clk_select = PPCLK_PIXCLK;
2299 			break;
2300 		case amd_pp_phy_clock:
2301 			clk_select = PPCLK_PHYCLK;
2302 			break;
2303 		default:
2304 			pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
2305 			result = -EINVAL;
2306 			break;
2307 		}
2308 
2309 		if (!result) {
2310 			clk_request = (clk_select << 16) | clk_freq;
2311 			result = smum_send_msg_to_smc_with_parameter(hwmgr,
2312 					PPSMC_MSG_SetHardMinByFreq,
2313 					clk_request,
2314 					NULL);
2315 		}
2316 	}
2317 
2318 	return result;
2319 }
2320 
2321 static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
2322 				PHM_PerformanceLevelDesignation designation, uint32_t index,
2323 				PHM_PerformanceLevel *level)
2324 {
2325 	return 0;
2326 }
2327 
2328 static int vega20_notify_smc_display_config_after_ps_adjustment(
2329 		struct pp_hwmgr *hwmgr)
2330 {
2331 	struct vega20_hwmgr *data =
2332 			(struct vega20_hwmgr *)(hwmgr->backend);
2333 	struct vega20_single_dpm_table *dpm_table =
2334 			&data->dpm_table.mem_table;
2335 	struct PP_Clocks min_clocks = {0};
2336 	struct pp_display_clock_request clock_req;
2337 	int ret = 0;
2338 
2339 	min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
2340 	min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
2341 	min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
2342 
2343 	if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
2344 		clock_req.clock_type = amd_pp_dcef_clock;
2345 		clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10;
2346 		if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) {
2347 			if (data->smu_features[GNLD_DS_DCEFCLK].supported)
2348 				PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
2349 					hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
2350 					min_clocks.dcefClockInSR / 100,
2351 					NULL)) == 0,
2352 					"Attempt to set divider for DCEFCLK Failed!",
2353 					return ret);
2354 		} else {
2355 			pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
2356 		}
2357 	}
2358 
2359 	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
2360 		dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
2361 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2362 				PPSMC_MSG_SetHardMinByFreq,
2363 				(PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
2364 				NULL)),
2365 				"[SetHardMinFreq] Set hard min uclk failed!",
2366 				return ret);
2367 	}
2368 
2369 	return 0;
2370 }
2371 
2372 static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
2373 {
2374 	struct vega20_hwmgr *data =
2375 			(struct vega20_hwmgr *)(hwmgr->backend);
2376 	uint32_t soft_level;
2377 	int ret = 0;
2378 
2379 	soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2380 
2381 	data->dpm_table.gfx_table.dpm_state.soft_min_level =
2382 		data->dpm_table.gfx_table.dpm_state.soft_max_level =
2383 		data->dpm_table.gfx_table.dpm_levels[soft_level].value;
2384 
2385 	soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2386 
2387 	data->dpm_table.mem_table.dpm_state.soft_min_level =
2388 		data->dpm_table.mem_table.dpm_state.soft_max_level =
2389 		data->dpm_table.mem_table.dpm_levels[soft_level].value;
2390 
2391 	soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2392 
2393 	data->dpm_table.soc_table.dpm_state.soft_min_level =
2394 		data->dpm_table.soc_table.dpm_state.soft_max_level =
2395 		data->dpm_table.soc_table.dpm_levels[soft_level].value;
2396 
2397 	ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2398 						 FEATURE_DPM_UCLK_MASK |
2399 						 FEATURE_DPM_SOCCLK_MASK);
2400 	PP_ASSERT_WITH_CODE(!ret,
2401 			"Failed to upload boot level to highest!",
2402 			return ret);
2403 
2404 	ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2405 						 FEATURE_DPM_UCLK_MASK |
2406 						 FEATURE_DPM_SOCCLK_MASK);
2407 	PP_ASSERT_WITH_CODE(!ret,
2408 			"Failed to upload dpm max level to highest!",
2409 			return ret);
2410 
2411 	return 0;
2412 }
2413 
2414 static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2415 {
2416 	struct vega20_hwmgr *data =
2417 			(struct vega20_hwmgr *)(hwmgr->backend);
2418 	uint32_t soft_level;
2419 	int ret = 0;
2420 
2421 	soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2422 
2423 	data->dpm_table.gfx_table.dpm_state.soft_min_level =
2424 		data->dpm_table.gfx_table.dpm_state.soft_max_level =
2425 		data->dpm_table.gfx_table.dpm_levels[soft_level].value;
2426 
2427 	soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2428 
2429 	data->dpm_table.mem_table.dpm_state.soft_min_level =
2430 		data->dpm_table.mem_table.dpm_state.soft_max_level =
2431 		data->dpm_table.mem_table.dpm_levels[soft_level].value;
2432 
2433 	soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2434 
2435 	data->dpm_table.soc_table.dpm_state.soft_min_level =
2436 		data->dpm_table.soc_table.dpm_state.soft_max_level =
2437 		data->dpm_table.soc_table.dpm_levels[soft_level].value;
2438 
2439 	ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2440 						 FEATURE_DPM_UCLK_MASK |
2441 						 FEATURE_DPM_SOCCLK_MASK);
2442 	PP_ASSERT_WITH_CODE(!ret,
2443 			"Failed to upload boot level to highest!",
2444 			return ret);
2445 
2446 	ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2447 						 FEATURE_DPM_UCLK_MASK |
2448 						 FEATURE_DPM_SOCCLK_MASK);
2449 	PP_ASSERT_WITH_CODE(!ret,
2450 			"Failed to upload dpm max level to highest!",
2451 			return ret);
2452 
2453 	return 0;
2454 
2455 }
2456 
2457 static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2458 {
2459 	struct vega20_hwmgr *data =
2460 			(struct vega20_hwmgr *)(hwmgr->backend);
2461 	uint32_t soft_min_level, soft_max_level;
2462 	int ret = 0;
2463 
2464 	/* gfxclk soft min/max settings */
2465 	soft_min_level =
2466 		vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2467 	soft_max_level =
2468 		vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2469 
2470 	data->dpm_table.gfx_table.dpm_state.soft_min_level =
2471 		data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2472 	data->dpm_table.gfx_table.dpm_state.soft_max_level =
2473 		data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2474 
2475 	/* uclk soft min/max settings */
2476 	soft_min_level =
2477 		vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2478 	soft_max_level =
2479 		vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2480 
2481 	data->dpm_table.mem_table.dpm_state.soft_min_level =
2482 		data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2483 	data->dpm_table.mem_table.dpm_state.soft_max_level =
2484 		data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2485 
2486 	/* socclk soft min/max settings */
2487 	soft_min_level =
2488 		vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2489 	soft_max_level =
2490 		vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2491 
2492 	data->dpm_table.soc_table.dpm_state.soft_min_level =
2493 		data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2494 	data->dpm_table.soc_table.dpm_state.soft_max_level =
2495 		data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2496 
2497 	ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2498 						 FEATURE_DPM_UCLK_MASK |
2499 						 FEATURE_DPM_SOCCLK_MASK);
2500 	PP_ASSERT_WITH_CODE(!ret,
2501 			"Failed to upload DPM Bootup Levels!",
2502 			return ret);
2503 
2504 	ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2505 						 FEATURE_DPM_UCLK_MASK |
2506 						 FEATURE_DPM_SOCCLK_MASK);
2507 	PP_ASSERT_WITH_CODE(!ret,
2508 			"Failed to upload DPM Max Levels!",
2509 			return ret);
2510 
2511 	return 0;
2512 }
2513 
2514 static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2515 				uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
2516 {
2517 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2518 	struct vega20_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
2519 	struct vega20_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
2520 	struct vega20_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
2521 
2522 	*sclk_mask = 0;
2523 	*mclk_mask = 0;
2524 	*soc_mask  = 0;
2525 
2526 	if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
2527 	    mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL &&
2528 	    soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) {
2529 		*sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL;
2530 		*mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL;
2531 		*soc_mask  = VEGA20_UMD_PSTATE_SOCCLK_LEVEL;
2532 	}
2533 
2534 	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2535 		*sclk_mask = 0;
2536 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
2537 		*mclk_mask = 0;
2538 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2539 		*sclk_mask = gfx_dpm_table->count - 1;
2540 		*mclk_mask = mem_dpm_table->count - 1;
2541 		*soc_mask  = soc_dpm_table->count - 1;
2542 	}
2543 
2544 	return 0;
2545 }
2546 
2547 static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2548 		enum pp_clock_type type, uint32_t mask)
2549 {
2550 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2551 	uint32_t soft_min_level, soft_max_level, hard_min_level;
2552 	int ret = 0;
2553 
2554 	switch (type) {
2555 	case PP_SCLK:
2556 		soft_min_level = mask ? (ffs(mask) - 1) : 0;
2557 		soft_max_level = mask ? (fls(mask) - 1) : 0;
2558 
2559 		if (soft_max_level >= data->dpm_table.gfx_table.count) {
2560 			pr_err("Clock level specified %d is over max allowed %d\n",
2561 					soft_max_level,
2562 					data->dpm_table.gfx_table.count - 1);
2563 			return -EINVAL;
2564 		}
2565 
2566 		data->dpm_table.gfx_table.dpm_state.soft_min_level =
2567 			data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2568 		data->dpm_table.gfx_table.dpm_state.soft_max_level =
2569 			data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2570 
2571 		ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
2572 		PP_ASSERT_WITH_CODE(!ret,
2573 			"Failed to upload boot level to lowest!",
2574 			return ret);
2575 
2576 		ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
2577 		PP_ASSERT_WITH_CODE(!ret,
2578 			"Failed to upload dpm max level to highest!",
2579 			return ret);
2580 		break;
2581 
2582 	case PP_MCLK:
2583 		soft_min_level = mask ? (ffs(mask) - 1) : 0;
2584 		soft_max_level = mask ? (fls(mask) - 1) : 0;
2585 
2586 		if (soft_max_level >= data->dpm_table.mem_table.count) {
2587 			pr_err("Clock level specified %d is over max allowed %d\n",
2588 					soft_max_level,
2589 					data->dpm_table.mem_table.count - 1);
2590 			return -EINVAL;
2591 		}
2592 
2593 		data->dpm_table.mem_table.dpm_state.soft_min_level =
2594 			data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2595 		data->dpm_table.mem_table.dpm_state.soft_max_level =
2596 			data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2597 
2598 		ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
2599 		PP_ASSERT_WITH_CODE(!ret,
2600 			"Failed to upload boot level to lowest!",
2601 			return ret);
2602 
2603 		ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
2604 		PP_ASSERT_WITH_CODE(!ret,
2605 			"Failed to upload dpm max level to highest!",
2606 			return ret);
2607 
2608 		break;
2609 
2610 	case PP_SOCCLK:
2611 		soft_min_level = mask ? (ffs(mask) - 1) : 0;
2612 		soft_max_level = mask ? (fls(mask) - 1) : 0;
2613 
2614 		if (soft_max_level >= data->dpm_table.soc_table.count) {
2615 			pr_err("Clock level specified %d is over max allowed %d\n",
2616 					soft_max_level,
2617 					data->dpm_table.soc_table.count - 1);
2618 			return -EINVAL;
2619 		}
2620 
2621 		data->dpm_table.soc_table.dpm_state.soft_min_level =
2622 			data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2623 		data->dpm_table.soc_table.dpm_state.soft_max_level =
2624 			data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2625 
2626 		ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_SOCCLK_MASK);
2627 		PP_ASSERT_WITH_CODE(!ret,
2628 			"Failed to upload boot level to lowest!",
2629 			return ret);
2630 
2631 		ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_SOCCLK_MASK);
2632 		PP_ASSERT_WITH_CODE(!ret,
2633 			"Failed to upload dpm max level to highest!",
2634 			return ret);
2635 
2636 		break;
2637 
2638 	case PP_FCLK:
2639 		soft_min_level = mask ? (ffs(mask) - 1) : 0;
2640 		soft_max_level = mask ? (fls(mask) - 1) : 0;
2641 
2642 		if (soft_max_level >= data->dpm_table.fclk_table.count) {
2643 			pr_err("Clock level specified %d is over max allowed %d\n",
2644 					soft_max_level,
2645 					data->dpm_table.fclk_table.count - 1);
2646 			return -EINVAL;
2647 		}
2648 
2649 		data->dpm_table.fclk_table.dpm_state.soft_min_level =
2650 			data->dpm_table.fclk_table.dpm_levels[soft_min_level].value;
2651 		data->dpm_table.fclk_table.dpm_state.soft_max_level =
2652 			data->dpm_table.fclk_table.dpm_levels[soft_max_level].value;
2653 
2654 		ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_FCLK_MASK);
2655 		PP_ASSERT_WITH_CODE(!ret,
2656 			"Failed to upload boot level to lowest!",
2657 			return ret);
2658 
2659 		ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_FCLK_MASK);
2660 		PP_ASSERT_WITH_CODE(!ret,
2661 			"Failed to upload dpm max level to highest!",
2662 			return ret);
2663 
2664 		break;
2665 
2666 	case PP_DCEFCLK:
2667 		hard_min_level = mask ? (ffs(mask) - 1) : 0;
2668 
2669 		if (hard_min_level >= data->dpm_table.dcef_table.count) {
2670 			pr_err("Clock level specified %d is over max allowed %d\n",
2671 					hard_min_level,
2672 					data->dpm_table.dcef_table.count - 1);
2673 			return -EINVAL;
2674 		}
2675 
2676 		data->dpm_table.dcef_table.dpm_state.hard_min_level =
2677 			data->dpm_table.dcef_table.dpm_levels[hard_min_level].value;
2678 
2679 		ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_DCEFCLK_MASK);
2680 		PP_ASSERT_WITH_CODE(!ret,
2681 			"Failed to upload boot level to lowest!",
2682 			return ret);
2683 
2684 		//TODO: Setting DCEFCLK max dpm level is not supported
2685 
2686 		break;
2687 
2688 	case PP_PCIE:
2689 		soft_min_level = mask ? (ffs(mask) - 1) : 0;
2690 		soft_max_level = mask ? (fls(mask) - 1) : 0;
2691 		if (soft_min_level >= NUM_LINK_LEVELS ||
2692 		    soft_max_level >= NUM_LINK_LEVELS)
2693 			return -EINVAL;
2694 
2695 		ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2696 			PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level,
2697 			NULL);
2698 		PP_ASSERT_WITH_CODE(!ret,
2699 			"Failed to set min link dpm level!",
2700 			return ret);
2701 
2702 		break;
2703 
2704 	default:
2705 		break;
2706 	}
2707 
2708 	return 0;
2709 }
2710 
2711 static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
2712 				enum amd_dpm_forced_level level)
2713 {
2714 	int ret = 0;
2715 	uint32_t sclk_mask, mclk_mask, soc_mask;
2716 
2717 	switch (level) {
2718 	case AMD_DPM_FORCED_LEVEL_HIGH:
2719 		ret = vega20_force_dpm_highest(hwmgr);
2720 		break;
2721 
2722 	case AMD_DPM_FORCED_LEVEL_LOW:
2723 		ret = vega20_force_dpm_lowest(hwmgr);
2724 		break;
2725 
2726 	case AMD_DPM_FORCED_LEVEL_AUTO:
2727 		ret = vega20_unforce_dpm_levels(hwmgr);
2728 		break;
2729 
2730 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2731 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2732 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2733 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2734 		ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
2735 		if (ret)
2736 			return ret;
2737 		vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
2738 		vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
2739 		vega20_force_clock_level(hwmgr, PP_SOCCLK, 1 << soc_mask);
2740 		break;
2741 
2742 	case AMD_DPM_FORCED_LEVEL_MANUAL:
2743 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2744 	default:
2745 		break;
2746 	}
2747 
2748 	return ret;
2749 }
2750 
2751 static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr)
2752 {
2753 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2754 
2755 	if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
2756 		return AMD_FAN_CTRL_MANUAL;
2757 	else
2758 		return AMD_FAN_CTRL_AUTO;
2759 }
2760 
2761 static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
2762 {
2763 	switch (mode) {
2764 	case AMD_FAN_CTRL_NONE:
2765 		vega20_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
2766 		break;
2767 	case AMD_FAN_CTRL_MANUAL:
2768 		if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
2769 			vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
2770 		break;
2771 	case AMD_FAN_CTRL_AUTO:
2772 		if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
2773 			vega20_fan_ctrl_start_smc_fan_control(hwmgr);
2774 		break;
2775 	default:
2776 		break;
2777 	}
2778 }
2779 
2780 static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr,
2781 		struct amd_pp_simple_clock_info *info)
2782 {
2783 #if 0
2784 	struct phm_ppt_v2_information *table_info =
2785 			(struct phm_ppt_v2_information *)hwmgr->pptable;
2786 	struct phm_clock_and_voltage_limits *max_limits =
2787 			&table_info->max_clock_voltage_on_ac;
2788 
2789 	info->engine_max_clock = max_limits->sclk;
2790 	info->memory_max_clock = max_limits->mclk;
2791 #endif
2792 	return 0;
2793 }
2794 
2795 
2796 static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
2797 		struct pp_clock_levels_with_latency *clocks)
2798 {
2799 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2800 	struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2801 	int i, count;
2802 
2803 	if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
2804 		return -1;
2805 
2806 	count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2807 	clocks->num_levels = count;
2808 
2809 	for (i = 0; i < count; i++) {
2810 		clocks->data[i].clocks_in_khz =
2811 			dpm_table->dpm_levels[i].value * 1000;
2812 		clocks->data[i].latency_in_us = 0;
2813 	}
2814 
2815 	return 0;
2816 }
2817 
2818 static uint32_t vega20_get_mem_latency(struct pp_hwmgr *hwmgr,
2819 		uint32_t clock)
2820 {
2821 	return 25;
2822 }
2823 
2824 static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
2825 		struct pp_clock_levels_with_latency *clocks)
2826 {
2827 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2828 	struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table);
2829 	int i, count;
2830 
2831 	if (!data->smu_features[GNLD_DPM_UCLK].enabled)
2832 		return -1;
2833 
2834 	count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2835 	clocks->num_levels = data->mclk_latency_table.count = count;
2836 
2837 	for (i = 0; i < count; i++) {
2838 		clocks->data[i].clocks_in_khz =
2839 			data->mclk_latency_table.entries[i].frequency =
2840 			dpm_table->dpm_levels[i].value * 1000;
2841 		clocks->data[i].latency_in_us =
2842 			data->mclk_latency_table.entries[i].latency =
2843 			vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
2844 	}
2845 
2846 	return 0;
2847 }
2848 
2849 static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
2850 		struct pp_clock_levels_with_latency *clocks)
2851 {
2852 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2853 	struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table);
2854 	int i, count;
2855 
2856 	if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
2857 		return -1;
2858 
2859 	count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2860 	clocks->num_levels = count;
2861 
2862 	for (i = 0; i < count; i++) {
2863 		clocks->data[i].clocks_in_khz =
2864 			dpm_table->dpm_levels[i].value * 1000;
2865 		clocks->data[i].latency_in_us = 0;
2866 	}
2867 
2868 	return 0;
2869 }
2870 
2871 static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
2872 		struct pp_clock_levels_with_latency *clocks)
2873 {
2874 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2875 	struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table);
2876 	int i, count;
2877 
2878 	if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
2879 		return -1;
2880 
2881 	count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2882 	clocks->num_levels = count;
2883 
2884 	for (i = 0; i < count; i++) {
2885 		clocks->data[i].clocks_in_khz =
2886 			dpm_table->dpm_levels[i].value * 1000;
2887 		clocks->data[i].latency_in_us = 0;
2888 	}
2889 
2890 	return 0;
2891 
2892 }
2893 
2894 static int vega20_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
2895 		enum amd_pp_clock_type type,
2896 		struct pp_clock_levels_with_latency *clocks)
2897 {
2898 	int ret;
2899 
2900 	switch (type) {
2901 	case amd_pp_sys_clock:
2902 		ret = vega20_get_sclks(hwmgr, clocks);
2903 		break;
2904 	case amd_pp_mem_clock:
2905 		ret = vega20_get_memclocks(hwmgr, clocks);
2906 		break;
2907 	case amd_pp_dcef_clock:
2908 		ret = vega20_get_dcefclocks(hwmgr, clocks);
2909 		break;
2910 	case amd_pp_soc_clock:
2911 		ret = vega20_get_socclocks(hwmgr, clocks);
2912 		break;
2913 	default:
2914 		return -EINVAL;
2915 	}
2916 
2917 	return ret;
2918 }
2919 
2920 static int vega20_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
2921 		enum amd_pp_clock_type type,
2922 		struct pp_clock_levels_with_voltage *clocks)
2923 {
2924 	clocks->num_levels = 0;
2925 
2926 	return 0;
2927 }
2928 
2929 static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
2930 						   void *clock_ranges)
2931 {
2932 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2933 	Watermarks_t *table = &(data->smc_state_table.water_marks_table);
2934 	struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
2935 
2936 	if (!data->registry_data.disable_water_mark &&
2937 	    data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2938 	    data->smu_features[GNLD_DPM_SOCCLK].supported) {
2939 		smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
2940 		data->water_marks_bitmap |= WaterMarksExist;
2941 		data->water_marks_bitmap &= ~WaterMarksLoaded;
2942 	}
2943 
2944 	return 0;
2945 }
2946 
2947 static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
2948 					enum PP_OD_DPM_TABLE_COMMAND type,
2949 					long *input, uint32_t size)
2950 {
2951 	struct vega20_hwmgr *data =
2952 			(struct vega20_hwmgr *)(hwmgr->backend);
2953 	struct vega20_od8_single_setting *od8_settings =
2954 			data->od8_settings.od8_settings_array;
2955 	OverDriveTable_t *od_table =
2956 			&(data->smc_state_table.overdrive_table);
2957 	int32_t input_clk, input_vol, i;
2958 	uint32_t input_index;
2959 	int od8_id;
2960 	int ret;
2961 
2962 	PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
2963 				return -EINVAL);
2964 
2965 	switch (type) {
2966 	case PP_OD_EDIT_SCLK_VDDC_TABLE:
2967 		if (!(od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
2968 		      od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id)) {
2969 			pr_info("Sclk min/max frequency overdrive not supported\n");
2970 			return -EOPNOTSUPP;
2971 		}
2972 
2973 		for (i = 0; i < size; i += 2) {
2974 			if (i + 2 > size) {
2975 				pr_info("invalid number of input parameters %d\n",
2976 					size);
2977 				return -EINVAL;
2978 			}
2979 
2980 			input_index = input[i];
2981 			input_clk = input[i + 1];
2982 
2983 			if (input_index != 0 && input_index != 1) {
2984 				pr_info("Invalid index %d\n", input_index);
2985 				pr_info("Support min/max sclk frequency setting only which index by 0/1\n");
2986 				return -EINVAL;
2987 			}
2988 
2989 			if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value ||
2990 			    input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) {
2991 				pr_info("clock freq %d is not within allowed range [%d - %d]\n",
2992 					input_clk,
2993 					od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
2994 					od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
2995 				return -EINVAL;
2996 			}
2997 
2998 			if ((input_index == 0 && od_table->GfxclkFmin != input_clk) ||
2999 			    (input_index == 1 && od_table->GfxclkFmax != input_clk))
3000 				data->gfxclk_overdrive = true;
3001 
3002 			if (input_index == 0)
3003 				od_table->GfxclkFmin = input_clk;
3004 			else
3005 				od_table->GfxclkFmax = input_clk;
3006 		}
3007 
3008 		break;
3009 
3010 	case PP_OD_EDIT_MCLK_VDDC_TABLE:
3011 		if (!od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3012 			pr_info("Mclk max frequency overdrive not supported\n");
3013 			return -EOPNOTSUPP;
3014 		}
3015 
3016 		for (i = 0; i < size; i += 2) {
3017 			if (i + 2 > size) {
3018 				pr_info("invalid number of input parameters %d\n",
3019 					size);
3020 				return -EINVAL;
3021 			}
3022 
3023 			input_index = input[i];
3024 			input_clk = input[i + 1];
3025 
3026 			if (input_index != 1) {
3027 				pr_info("Invalid index %d\n", input_index);
3028 				pr_info("Support max Mclk frequency setting only which index by 1\n");
3029 				return -EINVAL;
3030 			}
3031 
3032 			if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
3033 			    input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) {
3034 				pr_info("clock freq %d is not within allowed range [%d - %d]\n",
3035 					input_clk,
3036 					od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
3037 					od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
3038 				return -EINVAL;
3039 			}
3040 
3041 			if (input_index == 1 && od_table->UclkFmax != input_clk)
3042 				data->memclk_overdrive = true;
3043 
3044 			od_table->UclkFmax = input_clk;
3045 		}
3046 
3047 		break;
3048 
3049 	case PP_OD_EDIT_VDDC_CURVE:
3050 		if (!(od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3051 		    od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3052 		    od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3053 		    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3054 		    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3055 		    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) {
3056 			pr_info("Voltage curve calibrate not supported\n");
3057 			return -EOPNOTSUPP;
3058 		}
3059 
3060 		for (i = 0; i < size; i += 3) {
3061 			if (i + 3 > size) {
3062 				pr_info("invalid number of input parameters %d\n",
3063 					size);
3064 				return -EINVAL;
3065 			}
3066 
3067 			input_index = input[i];
3068 			input_clk = input[i + 1];
3069 			input_vol = input[i + 2];
3070 
3071 			if (input_index > 2) {
3072 				pr_info("Setting for point %d is not supported\n",
3073 						input_index + 1);
3074 				pr_info("Three supported points index by 0, 1, 2\n");
3075 				return -EINVAL;
3076 			}
3077 
3078 			od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index;
3079 			if (input_clk < od8_settings[od8_id].min_value ||
3080 			    input_clk > od8_settings[od8_id].max_value) {
3081 				pr_info("clock freq %d is not within allowed range [%d - %d]\n",
3082 					input_clk,
3083 					od8_settings[od8_id].min_value,
3084 					od8_settings[od8_id].max_value);
3085 				return -EINVAL;
3086 			}
3087 
3088 			od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index;
3089 			if (input_vol < od8_settings[od8_id].min_value ||
3090 			    input_vol > od8_settings[od8_id].max_value) {
3091 				pr_info("clock voltage %d is not within allowed range [%d - %d]\n",
3092 					input_vol,
3093 					od8_settings[od8_id].min_value,
3094 					od8_settings[od8_id].max_value);
3095 				return -EINVAL;
3096 			}
3097 
3098 			switch (input_index) {
3099 			case 0:
3100 				od_table->GfxclkFreq1 = input_clk;
3101 				od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE;
3102 				break;
3103 			case 1:
3104 				od_table->GfxclkFreq2 = input_clk;
3105 				od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE;
3106 				break;
3107 			case 2:
3108 				od_table->GfxclkFreq3 = input_clk;
3109 				od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE;
3110 				break;
3111 			}
3112 		}
3113 		break;
3114 
3115 	case PP_OD_RESTORE_DEFAULT_TABLE:
3116 		data->gfxclk_overdrive = false;
3117 		data->memclk_overdrive = false;
3118 
3119 		ret = smum_smc_table_manager(hwmgr,
3120 					     (uint8_t *)od_table,
3121 					     TABLE_OVERDRIVE, true);
3122 		PP_ASSERT_WITH_CODE(!ret,
3123 				"Failed to export overdrive table!",
3124 				return ret);
3125 		break;
3126 
3127 	case PP_OD_COMMIT_DPM_TABLE:
3128 		ret = smum_smc_table_manager(hwmgr,
3129 					     (uint8_t *)od_table,
3130 					     TABLE_OVERDRIVE, false);
3131 		PP_ASSERT_WITH_CODE(!ret,
3132 				"Failed to import overdrive table!",
3133 				return ret);
3134 
3135 		/* retrieve updated gfxclk table */
3136 		if (data->gfxclk_overdrive) {
3137 			data->gfxclk_overdrive = false;
3138 
3139 			ret = vega20_setup_gfxclk_dpm_table(hwmgr);
3140 			if (ret)
3141 				return ret;
3142 		}
3143 
3144 		/* retrieve updated memclk table */
3145 		if (data->memclk_overdrive) {
3146 			data->memclk_overdrive = false;
3147 
3148 			ret = vega20_setup_memclk_dpm_table(hwmgr);
3149 			if (ret)
3150 				return ret;
3151 		}
3152 		break;
3153 
3154 	default:
3155 		return -EINVAL;
3156 	}
3157 
3158 	return 0;
3159 }
3160 
3161 static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr,
3162 				enum pp_mp1_state mp1_state)
3163 {
3164 	uint16_t msg;
3165 	int ret;
3166 
3167 	switch (mp1_state) {
3168 	case PP_MP1_STATE_SHUTDOWN:
3169 		msg = PPSMC_MSG_PrepareMp1ForShutdown;
3170 		break;
3171 	case PP_MP1_STATE_UNLOAD:
3172 		msg = PPSMC_MSG_PrepareMp1ForUnload;
3173 		break;
3174 	case PP_MP1_STATE_RESET:
3175 		msg = PPSMC_MSG_PrepareMp1ForReset;
3176 		break;
3177 	case PP_MP1_STATE_NONE:
3178 	default:
3179 		return 0;
3180 	}
3181 
3182 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
3183 			    "[PrepareMp1] Failed!",
3184 			    return ret);
3185 
3186 	return 0;
3187 }
3188 
3189 static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
3190 {
3191 	static const char *ppfeature_name[] = {
3192 				"DPM_PREFETCHER",
3193 				"GFXCLK_DPM",
3194 				"UCLK_DPM",
3195 				"SOCCLK_DPM",
3196 				"UVD_DPM",
3197 				"VCE_DPM",
3198 				"ULV",
3199 				"MP0CLK_DPM",
3200 				"LINK_DPM",
3201 				"DCEFCLK_DPM",
3202 				"GFXCLK_DS",
3203 				"SOCCLK_DS",
3204 				"LCLK_DS",
3205 				"PPT",
3206 				"TDC",
3207 				"THERMAL",
3208 				"GFX_PER_CU_CG",
3209 				"RM",
3210 				"DCEFCLK_DS",
3211 				"ACDC",
3212 				"VR0HOT",
3213 				"VR1HOT",
3214 				"FW_CTF",
3215 				"LED_DISPLAY",
3216 				"FAN_CONTROL",
3217 				"GFX_EDC",
3218 				"GFXOFF",
3219 				"CG",
3220 				"FCLK_DPM",
3221 				"FCLK_DS",
3222 				"MP1CLK_DS",
3223 				"MP0CLK_DS",
3224 				"XGMI",
3225 				"ECC"};
3226 	static const char *output_title[] = {
3227 				"FEATURES",
3228 				"BITMASK",
3229 				"ENABLEMENT"};
3230 	uint64_t features_enabled;
3231 	int i;
3232 	int ret = 0;
3233 	int size = 0;
3234 
3235 	phm_get_sysfs_buf(&buf, &size);
3236 
3237 	ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3238 	PP_ASSERT_WITH_CODE(!ret,
3239 			"[EnableAllSmuFeatures] Failed to get enabled smc features!",
3240 			return ret);
3241 
3242 	size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
3243 	size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
3244 				output_title[0],
3245 				output_title[1],
3246 				output_title[2]);
3247 	for (i = 0; i < GNLD_FEATURES_MAX; i++) {
3248 		size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
3249 					ppfeature_name[i],
3250 					1ULL << i,
3251 					(features_enabled & (1ULL << i)) ? "Y" : "N");
3252 	}
3253 
3254 	return size;
3255 }
3256 
3257 static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
3258 {
3259 	struct vega20_hwmgr *data =
3260 			(struct vega20_hwmgr *)(hwmgr->backend);
3261 	uint64_t features_enabled, features_to_enable, features_to_disable;
3262 	int i, ret = 0;
3263 	bool enabled;
3264 
3265 	if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
3266 		return -EINVAL;
3267 
3268 	ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3269 	if (ret)
3270 		return ret;
3271 
3272 	features_to_disable =
3273 		features_enabled & ~new_ppfeature_masks;
3274 	features_to_enable =
3275 		~features_enabled & new_ppfeature_masks;
3276 
3277 	pr_debug("features_to_disable 0x%llx\n", features_to_disable);
3278 	pr_debug("features_to_enable 0x%llx\n", features_to_enable);
3279 
3280 	if (features_to_disable) {
3281 		ret = vega20_enable_smc_features(hwmgr, false, features_to_disable);
3282 		if (ret)
3283 			return ret;
3284 	}
3285 
3286 	if (features_to_enable) {
3287 		ret = vega20_enable_smc_features(hwmgr, true, features_to_enable);
3288 		if (ret)
3289 			return ret;
3290 	}
3291 
3292 	/* Update the cached feature enablement state */
3293 	ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3294 	if (ret)
3295 		return ret;
3296 
3297 	for (i = 0; i < GNLD_FEATURES_MAX; i++) {
3298 		enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
3299 			true : false;
3300 		data->smu_features[i].enabled = enabled;
3301 	}
3302 
3303 	return 0;
3304 }
3305 
3306 static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
3307 {
3308 	struct amdgpu_device *adev = hwmgr->adev;
3309 
3310 	return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
3311 		PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
3312 		>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
3313 }
3314 
3315 static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
3316 {
3317 	uint32_t width_level;
3318 
3319 	width_level = vega20_get_current_pcie_link_width_level(hwmgr);
3320 	if (width_level > LINK_WIDTH_MAX)
3321 		width_level = 0;
3322 
3323 	return link_width[width_level];
3324 }
3325 
3326 static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
3327 {
3328 	struct amdgpu_device *adev = hwmgr->adev;
3329 
3330 	return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
3331 		PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
3332 		>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
3333 }
3334 
3335 static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
3336 {
3337 	uint32_t speed_level;
3338 
3339 	speed_level = vega20_get_current_pcie_link_speed_level(hwmgr);
3340 	if (speed_level > LINK_SPEED_MAX)
3341 		speed_level = 0;
3342 
3343 	return link_speed[speed_level];
3344 }
3345 
3346 static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
3347 		enum pp_clock_type type, char *buf)
3348 {
3349 	struct vega20_hwmgr *data =
3350 			(struct vega20_hwmgr *)(hwmgr->backend);
3351 	struct vega20_od8_single_setting *od8_settings =
3352 			data->od8_settings.od8_settings_array;
3353 	OverDriveTable_t *od_table =
3354 			&(data->smc_state_table.overdrive_table);
3355 	PPTable_t *pptable = &(data->smc_state_table.pp_table);
3356 	struct pp_clock_levels_with_latency clocks;
3357 	struct vega20_single_dpm_table *fclk_dpm_table =
3358 			&(data->dpm_table.fclk_table);
3359 	int i, now, size = 0;
3360 	int ret = 0;
3361 	uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
3362 
3363 	switch (type) {
3364 	case PP_SCLK:
3365 		ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
3366 		PP_ASSERT_WITH_CODE(!ret,
3367 				"Attempt to get current gfx clk Failed!",
3368 				return ret);
3369 
3370 		if (vega20_get_sclks(hwmgr, &clocks)) {
3371 			size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3372 				now / 100);
3373 			break;
3374 		}
3375 
3376 		for (i = 0; i < clocks.num_levels; i++)
3377 			size += sprintf(buf + size, "%d: %uMhz %s\n",
3378 				i, clocks.data[i].clocks_in_khz / 1000,
3379 				(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3380 		break;
3381 
3382 	case PP_MCLK:
3383 		ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now);
3384 		PP_ASSERT_WITH_CODE(!ret,
3385 				"Attempt to get current mclk freq Failed!",
3386 				return ret);
3387 
3388 		if (vega20_get_memclocks(hwmgr, &clocks)) {
3389 			size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3390 				now / 100);
3391 			break;
3392 		}
3393 
3394 		for (i = 0; i < clocks.num_levels; i++)
3395 			size += sprintf(buf + size, "%d: %uMhz %s\n",
3396 				i, clocks.data[i].clocks_in_khz / 1000,
3397 				(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3398 		break;
3399 
3400 	case PP_SOCCLK:
3401 		ret = vega20_get_current_clk_freq(hwmgr, PPCLK_SOCCLK, &now);
3402 		PP_ASSERT_WITH_CODE(!ret,
3403 				"Attempt to get current socclk freq Failed!",
3404 				return ret);
3405 
3406 		if (vega20_get_socclocks(hwmgr, &clocks)) {
3407 			size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3408 				now / 100);
3409 			break;
3410 		}
3411 
3412 		for (i = 0; i < clocks.num_levels; i++)
3413 			size += sprintf(buf + size, "%d: %uMhz %s\n",
3414 				i, clocks.data[i].clocks_in_khz / 1000,
3415 				(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3416 		break;
3417 
3418 	case PP_FCLK:
3419 		ret = vega20_get_current_clk_freq(hwmgr, PPCLK_FCLK, &now);
3420 		PP_ASSERT_WITH_CODE(!ret,
3421 				"Attempt to get current fclk freq Failed!",
3422 				return ret);
3423 
3424 		for (i = 0; i < fclk_dpm_table->count; i++)
3425 			size += sprintf(buf + size, "%d: %uMhz %s\n",
3426 				i, fclk_dpm_table->dpm_levels[i].value,
3427 				fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
3428 		break;
3429 
3430 	case PP_DCEFCLK:
3431 		ret = vega20_get_current_clk_freq(hwmgr, PPCLK_DCEFCLK, &now);
3432 		PP_ASSERT_WITH_CODE(!ret,
3433 				"Attempt to get current dcefclk freq Failed!",
3434 				return ret);
3435 
3436 		if (vega20_get_dcefclocks(hwmgr, &clocks)) {
3437 			size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3438 				now / 100);
3439 			break;
3440 		}
3441 
3442 		for (i = 0; i < clocks.num_levels; i++)
3443 			size += sprintf(buf + size, "%d: %uMhz %s\n",
3444 				i, clocks.data[i].clocks_in_khz / 1000,
3445 				(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3446 		break;
3447 
3448 	case PP_PCIE:
3449 		current_gen_speed =
3450 			vega20_get_current_pcie_link_speed_level(hwmgr);
3451 		current_lane_width =
3452 			vega20_get_current_pcie_link_width_level(hwmgr);
3453 		for (i = 0; i < NUM_LINK_LEVELS; i++) {
3454 			gen_speed = pptable->PcieGenSpeed[i];
3455 			lane_width = pptable->PcieLaneCount[i];
3456 
3457 			size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
3458 					(gen_speed == 0) ? "2.5GT/s," :
3459 					(gen_speed == 1) ? "5.0GT/s," :
3460 					(gen_speed == 2) ? "8.0GT/s," :
3461 					(gen_speed == 3) ? "16.0GT/s," : "",
3462 					(lane_width == 1) ? "x1" :
3463 					(lane_width == 2) ? "x2" :
3464 					(lane_width == 3) ? "x4" :
3465 					(lane_width == 4) ? "x8" :
3466 					(lane_width == 5) ? "x12" :
3467 					(lane_width == 6) ? "x16" : "",
3468 					pptable->LclkFreq[i],
3469 					(current_gen_speed == gen_speed) &&
3470 					(current_lane_width == lane_width) ?
3471 					"*" : "");
3472 		}
3473 		break;
3474 
3475 	case OD_SCLK:
3476 		if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
3477 		    od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
3478 			size += sprintf(buf + size, "%s:\n", "OD_SCLK");
3479 			size += sprintf(buf + size, "0: %10uMhz\n",
3480 				od_table->GfxclkFmin);
3481 			size += sprintf(buf + size, "1: %10uMhz\n",
3482 				od_table->GfxclkFmax);
3483 		}
3484 		break;
3485 
3486 	case OD_MCLK:
3487 		if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3488 			size += sprintf(buf + size, "%s:\n", "OD_MCLK");
3489 			size += sprintf(buf + size, "1: %10uMhz\n",
3490 				od_table->UclkFmax);
3491 		}
3492 
3493 		break;
3494 
3495 	case OD_VDDC_CURVE:
3496 		if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3497 		    od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3498 		    od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3499 		    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3500 		    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3501 		    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
3502 			size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE");
3503 			size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
3504 				od_table->GfxclkFreq1,
3505 				od_table->GfxclkVolt1 / VOLTAGE_SCALE);
3506 			size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
3507 				od_table->GfxclkFreq2,
3508 				od_table->GfxclkVolt2 / VOLTAGE_SCALE);
3509 			size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
3510 				od_table->GfxclkFreq3,
3511 				od_table->GfxclkVolt3 / VOLTAGE_SCALE);
3512 		}
3513 
3514 		break;
3515 
3516 	case OD_RANGE:
3517 		size += sprintf(buf + size, "%s:\n", "OD_RANGE");
3518 
3519 		if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
3520 		    od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
3521 			size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
3522 				od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
3523 				od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
3524 		}
3525 
3526 		if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3527 			size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
3528 				od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
3529 				od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
3530 		}
3531 
3532 		if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3533 		    od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3534 		    od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3535 		    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3536 		    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3537 		    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
3538 			size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
3539 				od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
3540 				od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
3541 			size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
3542 				od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
3543 				od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
3544 			size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
3545 				od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
3546 				od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
3547 			size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
3548 				od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
3549 				od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
3550 			size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
3551 				od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
3552 				od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
3553 			size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
3554 				od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
3555 				od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
3556 		}
3557 
3558 		break;
3559 	default:
3560 		break;
3561 	}
3562 	return size;
3563 }
3564 
3565 static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
3566 		struct vega20_single_dpm_table *dpm_table)
3567 {
3568 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3569 	int ret = 0;
3570 
3571 	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
3572 		PP_ASSERT_WITH_CODE(dpm_table->count > 0,
3573 				"[SetUclkToHightestDpmLevel] Dpm table has no entry!",
3574 				return -EINVAL);
3575 		PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
3576 				"[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
3577 				return -EINVAL);
3578 
3579 		dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3580 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
3581 				PPSMC_MSG_SetHardMinByFreq,
3582 				(PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
3583 				NULL)),
3584 				"[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
3585 				return ret);
3586 	}
3587 
3588 	return ret;
3589 }
3590 
3591 static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
3592 {
3593 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3594 	struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table);
3595 	int ret = 0;
3596 
3597 	if (data->smu_features[GNLD_DPM_FCLK].enabled) {
3598 		PP_ASSERT_WITH_CODE(dpm_table->count > 0,
3599 				"[SetFclkToHightestDpmLevel] Dpm table has no entry!",
3600 				return -EINVAL);
3601 		PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS,
3602 				"[SetFclkToHightestDpmLevel] Dpm table has too many entries!",
3603 				return -EINVAL);
3604 
3605 		dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3606 		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
3607 				PPSMC_MSG_SetSoftMinByFreq,
3608 				(PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level,
3609 				NULL)),
3610 				"[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
3611 				return ret);
3612 	}
3613 
3614 	return ret;
3615 }
3616 
3617 static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3618 {
3619 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3620 	int ret = 0;
3621 
3622 	smum_send_msg_to_smc_with_parameter(hwmgr,
3623 			PPSMC_MSG_NumOfDisplays, 0, NULL);
3624 
3625 	ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
3626 			&data->dpm_table.mem_table);
3627 	if (ret)
3628 		return ret;
3629 
3630 	return vega20_set_fclk_to_highest_dpm_level(hwmgr);
3631 }
3632 
3633 static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3634 {
3635 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3636 	int result = 0;
3637 	Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
3638 
3639 	if ((data->water_marks_bitmap & WaterMarksExist) &&
3640 	    !(data->water_marks_bitmap & WaterMarksLoaded)) {
3641 		result = smum_smc_table_manager(hwmgr,
3642 						(uint8_t *)wm_table, TABLE_WATERMARKS, false);
3643 		PP_ASSERT_WITH_CODE(!result,
3644 				"Failed to update WMTABLE!",
3645 				return result);
3646 		data->water_marks_bitmap |= WaterMarksLoaded;
3647 	}
3648 
3649 	if ((data->water_marks_bitmap & WaterMarksExist) &&
3650 	    data->smu_features[GNLD_DPM_DCEFCLK].supported &&
3651 	    data->smu_features[GNLD_DPM_SOCCLK].supported) {
3652 		result = smum_send_msg_to_smc_with_parameter(hwmgr,
3653 			PPSMC_MSG_NumOfDisplays,
3654 			hwmgr->display_config->num_display,
3655 			NULL);
3656 	}
3657 
3658 	return result;
3659 }
3660 
3661 static int vega20_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
3662 {
3663 	struct vega20_hwmgr *data =
3664 			(struct vega20_hwmgr *)(hwmgr->backend);
3665 	int ret = 0;
3666 
3667 	if (data->smu_features[GNLD_DPM_UVD].supported) {
3668 		if (data->smu_features[GNLD_DPM_UVD].enabled == enable) {
3669 			if (enable)
3670 				PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already enabled!\n");
3671 			else
3672 				PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already disabled!\n");
3673 		}
3674 
3675 		ret = vega20_enable_smc_features(hwmgr,
3676 				enable,
3677 				data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap);
3678 		PP_ASSERT_WITH_CODE(!ret,
3679 				"[EnableDisableUVDDPM] Attempt to Enable/Disable DPM UVD Failed!",
3680 				return ret);
3681 		data->smu_features[GNLD_DPM_UVD].enabled = enable;
3682 	}
3683 
3684 	return 0;
3685 }
3686 
3687 static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
3688 {
3689 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3690 
3691 	if (data->vce_power_gated == bgate)
3692 		return ;
3693 
3694 	data->vce_power_gated = bgate;
3695 	if (bgate) {
3696 		vega20_enable_disable_vce_dpm(hwmgr, !bgate);
3697 		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
3698 						AMD_IP_BLOCK_TYPE_VCE,
3699 						AMD_PG_STATE_GATE);
3700 	} else {
3701 		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
3702 						AMD_IP_BLOCK_TYPE_VCE,
3703 						AMD_PG_STATE_UNGATE);
3704 		vega20_enable_disable_vce_dpm(hwmgr, !bgate);
3705 	}
3706 
3707 }
3708 
3709 static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
3710 {
3711 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3712 
3713 	if (data->uvd_power_gated == bgate)
3714 		return ;
3715 
3716 	data->uvd_power_gated = bgate;
3717 	vega20_enable_disable_uvd_dpm(hwmgr, !bgate);
3718 }
3719 
3720 static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3721 {
3722 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3723 	struct vega20_single_dpm_table *dpm_table;
3724 	bool vblank_too_short = false;
3725 	bool disable_mclk_switching;
3726 	bool disable_fclk_switching;
3727 	uint32_t i, latency;
3728 
3729 	disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
3730                            !hwmgr->display_config->multi_monitor_in_sync) ||
3731                             vblank_too_short;
3732 	latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3733 
3734 	/* gfxclk */
3735 	dpm_table = &(data->dpm_table.gfx_table);
3736 	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3737 	dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3738 	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3739 	dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3740 
3741 	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3742 		if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
3743 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
3744 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
3745 		}
3746 
3747 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3748 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3749 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
3750 		}
3751 
3752 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3753 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3754 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3755 		}
3756 	}
3757 
3758 	/* memclk */
3759 	dpm_table = &(data->dpm_table.mem_table);
3760 	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3761 	dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3762 	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3763 	dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3764 
3765 	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3766 		if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
3767 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
3768 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
3769 		}
3770 
3771 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
3772 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3773 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
3774 		}
3775 
3776 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3777 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3778 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3779 		}
3780 	}
3781 
3782 	/* honour DAL's UCLK Hardmin */
3783 	if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
3784 		dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
3785 
3786 	/* Hardmin is dependent on displayconfig */
3787 	if (disable_mclk_switching) {
3788 		dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3789 		for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
3790 			if (data->mclk_latency_table.entries[i].latency <= latency) {
3791 				if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
3792 					dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
3793 					break;
3794 				}
3795 			}
3796 		}
3797 	}
3798 
3799 	if (hwmgr->display_config->nb_pstate_switch_disable)
3800 		dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3801 
3802 	if ((disable_mclk_switching &&
3803 	    (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
3804 	     hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
3805 		disable_fclk_switching = true;
3806 	else
3807 		disable_fclk_switching = false;
3808 
3809 	/* fclk */
3810 	dpm_table = &(data->dpm_table.fclk_table);
3811 	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3812 	dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3813 	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3814 	dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3815 	if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
3816 		dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3817 
3818 	/* vclk */
3819 	dpm_table = &(data->dpm_table.vclk_table);
3820 	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3821 	dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3822 	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3823 	dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3824 
3825 	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3826 		if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
3827 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3828 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3829 		}
3830 
3831 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3832 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3833 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3834 		}
3835 	}
3836 
3837 	/* dclk */
3838 	dpm_table = &(data->dpm_table.dclk_table);
3839 	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3840 	dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3841 	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3842 	dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3843 
3844 	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3845 		if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
3846 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3847 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3848 		}
3849 
3850 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3851 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3852 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3853 		}
3854 	}
3855 
3856 	/* socclk */
3857 	dpm_table = &(data->dpm_table.soc_table);
3858 	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3859 	dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3860 	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3861 	dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3862 
3863 	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3864 		if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
3865 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
3866 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
3867 		}
3868 
3869 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3870 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3871 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3872 		}
3873 	}
3874 
3875 	/* eclk */
3876 	dpm_table = &(data->dpm_table.eclk_table);
3877 	dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3878 	dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3879 	dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3880 	dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3881 
3882 	if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3883 		if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
3884 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
3885 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
3886 		}
3887 
3888 		if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3889 			dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3890 			dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3891 		}
3892 	}
3893 
3894 	return 0;
3895 }
3896 
3897 static bool
3898 vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
3899 {
3900 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3901 	bool is_update_required = false;
3902 
3903 	if (data->display_timing.num_existing_displays !=
3904 			hwmgr->display_config->num_display)
3905 		is_update_required = true;
3906 
3907 	if (data->registry_data.gfx_clk_deep_sleep_support &&
3908 	   (data->display_timing.min_clock_in_sr !=
3909 	    hwmgr->display_config->min_core_set_clock_in_sr))
3910 		is_update_required = true;
3911 
3912 	return is_update_required;
3913 }
3914 
3915 static int vega20_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
3916 {
3917 	int ret = 0;
3918 
3919 	ret = vega20_disable_all_smu_features(hwmgr);
3920 	PP_ASSERT_WITH_CODE(!ret,
3921 			"[DisableDpmTasks] Failed to disable all smu features!",
3922 			return ret);
3923 
3924 	return 0;
3925 }
3926 
3927 static int vega20_power_off_asic(struct pp_hwmgr *hwmgr)
3928 {
3929 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3930 	int result;
3931 
3932 	result = vega20_disable_dpm_tasks(hwmgr);
3933 	PP_ASSERT_WITH_CODE((0 == result),
3934 			"[PowerOffAsic] Failed to disable DPM!",
3935 			);
3936 	data->water_marks_bitmap &= ~(WaterMarksLoaded);
3937 
3938 	return result;
3939 }
3940 
3941 static int conv_power_profile_to_pplib_workload(int power_profile)
3942 {
3943 	int pplib_workload = 0;
3944 
3945 	switch (power_profile) {
3946 	case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
3947 		pplib_workload = WORKLOAD_DEFAULT_BIT;
3948 		break;
3949 	case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
3950 		pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
3951 		break;
3952 	case PP_SMC_POWER_PROFILE_POWERSAVING:
3953 		pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
3954 		break;
3955 	case PP_SMC_POWER_PROFILE_VIDEO:
3956 		pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
3957 		break;
3958 	case PP_SMC_POWER_PROFILE_VR:
3959 		pplib_workload = WORKLOAD_PPLIB_VR_BIT;
3960 		break;
3961 	case PP_SMC_POWER_PROFILE_COMPUTE:
3962 		pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
3963 		break;
3964 	case PP_SMC_POWER_PROFILE_CUSTOM:
3965 		pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
3966 		break;
3967 	}
3968 
3969 	return pplib_workload;
3970 }
3971 
3972 static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
3973 {
3974 	DpmActivityMonitorCoeffInt_t activity_monitor;
3975 	uint32_t i, size = 0;
3976 	uint16_t workload_type = 0;
3977 	static const char *title[] = {
3978 			"PROFILE_INDEX(NAME)",
3979 			"CLOCK_TYPE(NAME)",
3980 			"FPS",
3981 			"UseRlcBusy",
3982 			"MinActiveFreqType",
3983 			"MinActiveFreq",
3984 			"BoosterFreqType",
3985 			"BoosterFreq",
3986 			"PD_Data_limit_c",
3987 			"PD_Data_error_coeff",
3988 			"PD_Data_error_rate_coeff"};
3989 	int result = 0;
3990 
3991 	if (!buf)
3992 		return -EINVAL;
3993 
3994 	phm_get_sysfs_buf(&buf, &size);
3995 
3996 	size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
3997 			title[0], title[1], title[2], title[3], title[4], title[5],
3998 			title[6], title[7], title[8], title[9], title[10]);
3999 
4000 	for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
4001 		/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
4002 		workload_type = conv_power_profile_to_pplib_workload(i);
4003 		result = vega20_get_activity_monitor_coeff(hwmgr,
4004 				(uint8_t *)(&activity_monitor), workload_type);
4005 		PP_ASSERT_WITH_CODE(!result,
4006 				"[GetPowerProfile] Failed to get activity monitor!",
4007 				return result);
4008 
4009 		size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
4010 			i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
4011 
4012 		size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4013 			" ",
4014 			0,
4015 			"GFXCLK",
4016 			activity_monitor.Gfx_FPS,
4017 			activity_monitor.Gfx_UseRlcBusy,
4018 			activity_monitor.Gfx_MinActiveFreqType,
4019 			activity_monitor.Gfx_MinActiveFreq,
4020 			activity_monitor.Gfx_BoosterFreqType,
4021 			activity_monitor.Gfx_BoosterFreq,
4022 			activity_monitor.Gfx_PD_Data_limit_c,
4023 			activity_monitor.Gfx_PD_Data_error_coeff,
4024 			activity_monitor.Gfx_PD_Data_error_rate_coeff);
4025 
4026 		size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4027 			" ",
4028 			1,
4029 			"SOCCLK",
4030 			activity_monitor.Soc_FPS,
4031 			activity_monitor.Soc_UseRlcBusy,
4032 			activity_monitor.Soc_MinActiveFreqType,
4033 			activity_monitor.Soc_MinActiveFreq,
4034 			activity_monitor.Soc_BoosterFreqType,
4035 			activity_monitor.Soc_BoosterFreq,
4036 			activity_monitor.Soc_PD_Data_limit_c,
4037 			activity_monitor.Soc_PD_Data_error_coeff,
4038 			activity_monitor.Soc_PD_Data_error_rate_coeff);
4039 
4040 		size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4041 			" ",
4042 			2,
4043 			"UCLK",
4044 			activity_monitor.Mem_FPS,
4045 			activity_monitor.Mem_UseRlcBusy,
4046 			activity_monitor.Mem_MinActiveFreqType,
4047 			activity_monitor.Mem_MinActiveFreq,
4048 			activity_monitor.Mem_BoosterFreqType,
4049 			activity_monitor.Mem_BoosterFreq,
4050 			activity_monitor.Mem_PD_Data_limit_c,
4051 			activity_monitor.Mem_PD_Data_error_coeff,
4052 			activity_monitor.Mem_PD_Data_error_rate_coeff);
4053 
4054 		size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4055 			" ",
4056 			3,
4057 			"FCLK",
4058 			activity_monitor.Fclk_FPS,
4059 			activity_monitor.Fclk_UseRlcBusy,
4060 			activity_monitor.Fclk_MinActiveFreqType,
4061 			activity_monitor.Fclk_MinActiveFreq,
4062 			activity_monitor.Fclk_BoosterFreqType,
4063 			activity_monitor.Fclk_BoosterFreq,
4064 			activity_monitor.Fclk_PD_Data_limit_c,
4065 			activity_monitor.Fclk_PD_Data_error_coeff,
4066 			activity_monitor.Fclk_PD_Data_error_rate_coeff);
4067 	}
4068 
4069 	return size;
4070 }
4071 
4072 static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4073 {
4074 	DpmActivityMonitorCoeffInt_t activity_monitor;
4075 	int workload_type, result = 0;
4076 	uint32_t power_profile_mode = input[size];
4077 
4078 	if (power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
4079 		pr_err("Invalid power profile mode %d\n", power_profile_mode);
4080 		return -EINVAL;
4081 	}
4082 
4083 	if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
4084 		struct vega20_hwmgr *data =
4085 			(struct vega20_hwmgr *)(hwmgr->backend);
4086 		if (size == 0 && !data->is_custom_profile_set)
4087 			return -EINVAL;
4088 		if (size < 10 && size != 0)
4089 			return -EINVAL;
4090 
4091 		result = vega20_get_activity_monitor_coeff(hwmgr,
4092 				(uint8_t *)(&activity_monitor),
4093 				WORKLOAD_PPLIB_CUSTOM_BIT);
4094 		PP_ASSERT_WITH_CODE(!result,
4095 				"[SetPowerProfile] Failed to get activity monitor!",
4096 				return result);
4097 
4098 		/* If size==0, then we want to apply the already-configured
4099 		 * CUSTOM profile again. Just apply it, since we checked its
4100 		 * validity above
4101 		 */
4102 		if (size == 0)
4103 			goto out;
4104 
4105 		switch (input[0]) {
4106 		case 0: /* Gfxclk */
4107 			activity_monitor.Gfx_FPS = input[1];
4108 			activity_monitor.Gfx_UseRlcBusy = input[2];
4109 			activity_monitor.Gfx_MinActiveFreqType = input[3];
4110 			activity_monitor.Gfx_MinActiveFreq = input[4];
4111 			activity_monitor.Gfx_BoosterFreqType = input[5];
4112 			activity_monitor.Gfx_BoosterFreq = input[6];
4113 			activity_monitor.Gfx_PD_Data_limit_c = input[7];
4114 			activity_monitor.Gfx_PD_Data_error_coeff = input[8];
4115 			activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
4116 			break;
4117 		case 1: /* Socclk */
4118 			activity_monitor.Soc_FPS = input[1];
4119 			activity_monitor.Soc_UseRlcBusy = input[2];
4120 			activity_monitor.Soc_MinActiveFreqType = input[3];
4121 			activity_monitor.Soc_MinActiveFreq = input[4];
4122 			activity_monitor.Soc_BoosterFreqType = input[5];
4123 			activity_monitor.Soc_BoosterFreq = input[6];
4124 			activity_monitor.Soc_PD_Data_limit_c = input[7];
4125 			activity_monitor.Soc_PD_Data_error_coeff = input[8];
4126 			activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
4127 			break;
4128 		case 2: /* Uclk */
4129 			activity_monitor.Mem_FPS = input[1];
4130 			activity_monitor.Mem_UseRlcBusy = input[2];
4131 			activity_monitor.Mem_MinActiveFreqType = input[3];
4132 			activity_monitor.Mem_MinActiveFreq = input[4];
4133 			activity_monitor.Mem_BoosterFreqType = input[5];
4134 			activity_monitor.Mem_BoosterFreq = input[6];
4135 			activity_monitor.Mem_PD_Data_limit_c = input[7];
4136 			activity_monitor.Mem_PD_Data_error_coeff = input[8];
4137 			activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
4138 			break;
4139 		case 3: /* Fclk */
4140 			activity_monitor.Fclk_FPS = input[1];
4141 			activity_monitor.Fclk_UseRlcBusy = input[2];
4142 			activity_monitor.Fclk_MinActiveFreqType = input[3];
4143 			activity_monitor.Fclk_MinActiveFreq = input[4];
4144 			activity_monitor.Fclk_BoosterFreqType = input[5];
4145 			activity_monitor.Fclk_BoosterFreq = input[6];
4146 			activity_monitor.Fclk_PD_Data_limit_c = input[7];
4147 			activity_monitor.Fclk_PD_Data_error_coeff = input[8];
4148 			activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
4149 			break;
4150 		}
4151 
4152 		result = vega20_set_activity_monitor_coeff(hwmgr,
4153 				(uint8_t *)(&activity_monitor),
4154 				WORKLOAD_PPLIB_CUSTOM_BIT);
4155 		data->is_custom_profile_set = true;
4156 		PP_ASSERT_WITH_CODE(!result,
4157 				"[SetPowerProfile] Failed to set activity monitor!",
4158 				return result);
4159 	}
4160 
4161 out:
4162 	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
4163 	workload_type =
4164 		conv_power_profile_to_pplib_workload(power_profile_mode);
4165 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
4166 						1 << workload_type,
4167 						NULL);
4168 
4169 	hwmgr->power_profile_mode = power_profile_mode;
4170 
4171 	return 0;
4172 }
4173 
4174 static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4175 					uint32_t virtual_addr_low,
4176 					uint32_t virtual_addr_hi,
4177 					uint32_t mc_addr_low,
4178 					uint32_t mc_addr_hi,
4179 					uint32_t size)
4180 {
4181 	smum_send_msg_to_smc_with_parameter(hwmgr,
4182 					PPSMC_MSG_SetSystemVirtualDramAddrHigh,
4183 					virtual_addr_hi,
4184 					NULL);
4185 	smum_send_msg_to_smc_with_parameter(hwmgr,
4186 					PPSMC_MSG_SetSystemVirtualDramAddrLow,
4187 					virtual_addr_low,
4188 					NULL);
4189 	smum_send_msg_to_smc_with_parameter(hwmgr,
4190 					PPSMC_MSG_DramLogSetDramAddrHigh,
4191 					mc_addr_hi,
4192 					NULL);
4193 
4194 	smum_send_msg_to_smc_with_parameter(hwmgr,
4195 					PPSMC_MSG_DramLogSetDramAddrLow,
4196 					mc_addr_low,
4197 					NULL);
4198 
4199 	smum_send_msg_to_smc_with_parameter(hwmgr,
4200 					PPSMC_MSG_DramLogSetDramSize,
4201 					size,
4202 					NULL);
4203 	return 0;
4204 }
4205 
4206 static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4207 		struct PP_TemperatureRange *thermal_data)
4208 {
4209 	struct phm_ppt_v3_information *pptable_information =
4210 		(struct phm_ppt_v3_information *)hwmgr->pptable;
4211 	struct vega20_hwmgr *data =
4212 			(struct vega20_hwmgr *)(hwmgr->backend);
4213 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
4214 
4215 	memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
4216 
4217 	thermal_data->max = pp_table->TedgeLimit *
4218 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4219 	thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
4220 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4221 	thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
4222 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4223 	thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
4224 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4225 	thermal_data->mem_crit_max = pp_table->ThbmLimit *
4226 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4227 	thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
4228 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4229 	thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp *
4230 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4231 
4232 	return 0;
4233 }
4234 
4235 static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
4236 {
4237 	int res;
4238 
4239 	/* I2C bus access can happen very early, when SMU not loaded yet */
4240 	if (!vega20_is_smc_ram_running(hwmgr))
4241 		return 0;
4242 
4243 	res = smum_send_msg_to_smc_with_parameter(hwmgr,
4244 						  (acquire ?
4245 						  PPSMC_MSG_RequestI2CBus :
4246 						  PPSMC_MSG_ReleaseI2CBus),
4247 						  0,
4248 						  NULL);
4249 
4250 	PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res);
4251 	return res;
4252 }
4253 
4254 static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
4255 				enum pp_df_cstate state)
4256 {
4257 	int ret;
4258 
4259 	/* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
4260 	if (hwmgr->smu_version < 0x283200) {
4261 		pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
4262 		return -EINVAL;
4263 	}
4264 
4265 	ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state,
4266 				NULL);
4267 	if (ret)
4268 		pr_err("SetDfCstate failed!\n");
4269 
4270 	return ret;
4271 }
4272 
4273 static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
4274 				  uint32_t pstate)
4275 {
4276 	int ret;
4277 
4278 	ret = smum_send_msg_to_smc_with_parameter(hwmgr,
4279 						  PPSMC_MSG_SetXgmiMode,
4280 						  pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
4281 						  NULL);
4282 	if (ret)
4283 		pr_err("SetXgmiPstate failed!\n");
4284 
4285 	return ret;
4286 }
4287 
4288 static void vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
4289 {
4290 	memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
4291 
4292 	gpu_metrics->common_header.structure_size =
4293 				sizeof(struct gpu_metrics_v1_0);
4294 	gpu_metrics->common_header.format_revision = 1;
4295 	gpu_metrics->common_header.content_revision = 0;
4296 
4297 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
4298 }
4299 
4300 static ssize_t vega20_get_gpu_metrics(struct pp_hwmgr *hwmgr,
4301 				      void **table)
4302 {
4303 	struct vega20_hwmgr *data =
4304 			(struct vega20_hwmgr *)(hwmgr->backend);
4305 	struct gpu_metrics_v1_0 *gpu_metrics =
4306 			&data->gpu_metrics_table;
4307 	SmuMetrics_t metrics;
4308 	uint32_t fan_speed_rpm;
4309 	int ret;
4310 
4311 	ret = vega20_get_metrics_table(hwmgr, &metrics, true);
4312 	if (ret)
4313 		return ret;
4314 
4315 	vega20_init_gpu_metrics_v1_0(gpu_metrics);
4316 
4317 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
4318 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
4319 	gpu_metrics->temperature_mem = metrics.TemperatureHBM;
4320 	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
4321 	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
4322 	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
4323 
4324 	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
4325 	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
4326 
4327 	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
4328 
4329 	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
4330 	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
4331 	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
4332 
4333 	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
4334 	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
4335 	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
4336 	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
4337 	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
4338 
4339 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
4340 
4341 	vega20_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm);
4342 	gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm;
4343 
4344 	gpu_metrics->pcie_link_width =
4345 			vega20_get_current_pcie_link_width(hwmgr);
4346 	gpu_metrics->pcie_link_speed =
4347 			vega20_get_current_pcie_link_speed(hwmgr);
4348 
4349 	*table = (void *)gpu_metrics;
4350 
4351 	return sizeof(struct gpu_metrics_v1_0);
4352 }
4353 
4354 static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
4355 	/* init/fini related */
4356 	.backend_init = vega20_hwmgr_backend_init,
4357 	.backend_fini = vega20_hwmgr_backend_fini,
4358 	.asic_setup = vega20_setup_asic_task,
4359 	.power_off_asic = vega20_power_off_asic,
4360 	.dynamic_state_management_enable = vega20_enable_dpm_tasks,
4361 	.dynamic_state_management_disable = vega20_disable_dpm_tasks,
4362 	/* power state related */
4363 	.apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
4364 	.pre_display_config_changed = vega20_pre_display_configuration_changed_task,
4365 	.display_config_changed = vega20_display_configuration_changed_task,
4366 	.check_smc_update_required_for_display_configuration =
4367 		vega20_check_smc_update_required_for_display_configuration,
4368 	.notify_smc_display_config_after_ps_adjustment =
4369 		vega20_notify_smc_display_config_after_ps_adjustment,
4370 	/* export to DAL */
4371 	.get_sclk = vega20_dpm_get_sclk,
4372 	.get_mclk = vega20_dpm_get_mclk,
4373 	.get_dal_power_level = vega20_get_dal_power_level,
4374 	.get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
4375 	.get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage,
4376 	.set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges,
4377 	.display_clock_voltage_request = vega20_display_clock_voltage_request,
4378 	.get_performance_level = vega20_get_performance_level,
4379 	/* UMD pstate, profile related */
4380 	.force_dpm_level = vega20_dpm_force_dpm_level,
4381 	.get_power_profile_mode = vega20_get_power_profile_mode,
4382 	.set_power_profile_mode = vega20_set_power_profile_mode,
4383 	/* od related */
4384 	.set_power_limit = vega20_set_power_limit,
4385 	.get_sclk_od = vega20_get_sclk_od,
4386 	.set_sclk_od = vega20_set_sclk_od,
4387 	.get_mclk_od = vega20_get_mclk_od,
4388 	.set_mclk_od = vega20_set_mclk_od,
4389 	.odn_edit_dpm_table = vega20_odn_edit_dpm_table,
4390 	/* for sysfs to retrive/set gfxclk/memclk */
4391 	.force_clock_level = vega20_force_clock_level,
4392 	.print_clock_levels = vega20_print_clock_levels,
4393 	.read_sensor = vega20_read_sensor,
4394 	.get_ppfeature_status = vega20_get_ppfeature_status,
4395 	.set_ppfeature_status = vega20_set_ppfeature_status,
4396 	/* powergate related */
4397 	.powergate_uvd = vega20_power_gate_uvd,
4398 	.powergate_vce = vega20_power_gate_vce,
4399 	/* thermal related */
4400 	.start_thermal_controller = vega20_start_thermal_controller,
4401 	.stop_thermal_controller = vega20_thermal_stop_thermal_controller,
4402 	.get_thermal_temperature_range = vega20_get_thermal_temperature_range,
4403 	.register_irq_handlers = smu9_register_irq_handlers,
4404 	.disable_smc_firmware_ctf = vega20_thermal_disable_alert,
4405 	/* fan control related */
4406 	.get_fan_speed_pwm = vega20_fan_ctrl_get_fan_speed_pwm,
4407 	.set_fan_speed_pwm = vega20_fan_ctrl_set_fan_speed_pwm,
4408 	.get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info,
4409 	.get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm,
4410 	.set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm,
4411 	.get_fan_control_mode = vega20_get_fan_control_mode,
4412 	.set_fan_control_mode = vega20_set_fan_control_mode,
4413 	/* smu memory related */
4414 	.notify_cac_buffer_info = vega20_notify_cac_buffer_info,
4415 	.enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost,
4416 	/* BACO related */
4417 	.get_asic_baco_capability = vega20_baco_get_capability,
4418 	.get_asic_baco_state = vega20_baco_get_state,
4419 	.set_asic_baco_state = vega20_baco_set_state,
4420 	.set_mp1_state = vega20_set_mp1_state,
4421 	.smu_i2c_bus_access = vega20_smu_i2c_bus_access,
4422 	.set_df_cstate = vega20_set_df_cstate,
4423 	.set_xgmi_pstate = vega20_set_xgmi_pstate,
4424 	.get_gpu_metrics = vega20_get_gpu_metrics,
4425 };
4426 
4427 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
4428 {
4429 	hwmgr->hwmgr_func = &vega20_hwmgr_funcs;
4430 	hwmgr->pptable_func = &vega20_pptable_funcs;
4431 
4432 	return 0;
4433 }
4434