1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #define SWSMU_CODE_LAYER_L2
25 
26 #include <linux/firmware.h>
27 #include <linux/pci.h>
28 #include <linux/i2c.h>
29 #include "amdgpu.h"
30 #include "amdgpu_smu.h"
31 #include "atomfirmware.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "amdgpu_atombios.h"
34 #include "soc15_common.h"
35 #include "smu_v11_0.h"
36 #include "smu11_driver_if_navi10.h"
37 #include "atom.h"
38 #include "navi10_ppt.h"
39 #include "smu_v11_0_pptable.h"
40 #include "smu_v11_0_ppsmc.h"
41 #include "nbio/nbio_2_3_offset.h"
42 #include "nbio/nbio_2_3_sh_mask.h"
43 #include "thm/thm_11_0_2_offset.h"
44 #include "thm/thm_11_0_2_sh_mask.h"
45 
46 #include "asic_reg/mp/mp_11_0_sh_mask.h"
47 #include "smu_cmn.h"
48 #include "smu_11_0_cdr_table.h"
49 
50 /*
51  * DO NOT use these for err/warn/info/debug messages.
52  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
53  * They are more MGPU friendly.
54  */
55 #undef pr_err
56 #undef pr_warn
57 #undef pr_info
58 #undef pr_debug
59 
60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
61 
62 #define FEATURE_MASK(feature) (1ULL << feature)
63 #define SMC_DPM_FEATURE ( \
64 	FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
65 	FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)	 | \
66 	FEATURE_MASK(FEATURE_DPM_GFX_PACE_BIT)	 | \
67 	FEATURE_MASK(FEATURE_DPM_UCLK_BIT)	 | \
68 	FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)	 | \
69 	FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)	 | \
70 	FEATURE_MASK(FEATURE_DPM_LINK_BIT)	 | \
71 	FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
72 
73 #define SMU_11_0_GFX_BUSY_THRESHOLD 15
74 
75 static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
76 	MSG_MAP(TestMessage,			PPSMC_MSG_TestMessage,			1),
77 	MSG_MAP(GetSmuVersion,			PPSMC_MSG_GetSmuVersion,		1),
78 	MSG_MAP(GetDriverIfVersion,		PPSMC_MSG_GetDriverIfVersion,		1),
79 	MSG_MAP(SetAllowedFeaturesMaskLow,	PPSMC_MSG_SetAllowedFeaturesMaskLow,	0),
80 	MSG_MAP(SetAllowedFeaturesMaskHigh,	PPSMC_MSG_SetAllowedFeaturesMaskHigh,	0),
81 	MSG_MAP(EnableAllSmuFeatures,		PPSMC_MSG_EnableAllSmuFeatures,		0),
82 	MSG_MAP(DisableAllSmuFeatures,		PPSMC_MSG_DisableAllSmuFeatures,	0),
83 	MSG_MAP(EnableSmuFeaturesLow,		PPSMC_MSG_EnableSmuFeaturesLow,		1),
84 	MSG_MAP(EnableSmuFeaturesHigh,		PPSMC_MSG_EnableSmuFeaturesHigh,	1),
85 	MSG_MAP(DisableSmuFeaturesLow,		PPSMC_MSG_DisableSmuFeaturesLow,	1),
86 	MSG_MAP(DisableSmuFeaturesHigh,		PPSMC_MSG_DisableSmuFeaturesHigh,	1),
87 	MSG_MAP(GetEnabledSmuFeaturesLow,	PPSMC_MSG_GetEnabledSmuFeaturesLow,	1),
88 	MSG_MAP(GetEnabledSmuFeaturesHigh,	PPSMC_MSG_GetEnabledSmuFeaturesHigh,	1),
89 	MSG_MAP(SetWorkloadMask,		PPSMC_MSG_SetWorkloadMask,		1),
90 	MSG_MAP(SetPptLimit,			PPSMC_MSG_SetPptLimit,			0),
91 	MSG_MAP(SetDriverDramAddrHigh,		PPSMC_MSG_SetDriverDramAddrHigh,	0),
92 	MSG_MAP(SetDriverDramAddrLow,		PPSMC_MSG_SetDriverDramAddrLow,		0),
93 	MSG_MAP(SetToolsDramAddrHigh,		PPSMC_MSG_SetToolsDramAddrHigh,		0),
94 	MSG_MAP(SetToolsDramAddrLow,		PPSMC_MSG_SetToolsDramAddrLow,		0),
95 	MSG_MAP(TransferTableSmu2Dram,		PPSMC_MSG_TransferTableSmu2Dram,	0),
96 	MSG_MAP(TransferTableDram2Smu,		PPSMC_MSG_TransferTableDram2Smu,	0),
97 	MSG_MAP(UseDefaultPPTable,		PPSMC_MSG_UseDefaultPPTable,		0),
98 	MSG_MAP(UseBackupPPTable,		PPSMC_MSG_UseBackupPPTable,		0),
99 	MSG_MAP(RunBtc,				PPSMC_MSG_RunBtc,			0),
100 	MSG_MAP(EnterBaco,			PPSMC_MSG_EnterBaco,			0),
101 	MSG_MAP(SetSoftMinByFreq,		PPSMC_MSG_SetSoftMinByFreq,		0),
102 	MSG_MAP(SetSoftMaxByFreq,		PPSMC_MSG_SetSoftMaxByFreq,		0),
103 	MSG_MAP(SetHardMinByFreq,		PPSMC_MSG_SetHardMinByFreq,		1),
104 	MSG_MAP(SetHardMaxByFreq,		PPSMC_MSG_SetHardMaxByFreq,		0),
105 	MSG_MAP(GetMinDpmFreq,			PPSMC_MSG_GetMinDpmFreq,		1),
106 	MSG_MAP(GetMaxDpmFreq,			PPSMC_MSG_GetMaxDpmFreq,		1),
107 	MSG_MAP(GetDpmFreqByIndex,		PPSMC_MSG_GetDpmFreqByIndex,		1),
108 	MSG_MAP(SetMemoryChannelConfig,		PPSMC_MSG_SetMemoryChannelConfig,	0),
109 	MSG_MAP(SetGeminiMode,			PPSMC_MSG_SetGeminiMode,		0),
110 	MSG_MAP(SetGeminiApertureHigh,		PPSMC_MSG_SetGeminiApertureHigh,	0),
111 	MSG_MAP(SetGeminiApertureLow,		PPSMC_MSG_SetGeminiApertureLow,		0),
112 	MSG_MAP(OverridePcieParameters,		PPSMC_MSG_OverridePcieParameters,	0),
113 	MSG_MAP(SetMinDeepSleepDcefclk,		PPSMC_MSG_SetMinDeepSleepDcefclk,	0),
114 	MSG_MAP(ReenableAcDcInterrupt,		PPSMC_MSG_ReenableAcDcInterrupt,	0),
115 	MSG_MAP(NotifyPowerSource,		PPSMC_MSG_NotifyPowerSource,		0),
116 	MSG_MAP(SetUclkFastSwitch,		PPSMC_MSG_SetUclkFastSwitch,		0),
117 	MSG_MAP(SetVideoFps,			PPSMC_MSG_SetVideoFps,			0),
118 	MSG_MAP(PrepareMp1ForUnload,		PPSMC_MSG_PrepareMp1ForUnload,		1),
119 	MSG_MAP(DramLogSetDramAddrHigh,		PPSMC_MSG_DramLogSetDramAddrHigh,	0),
120 	MSG_MAP(DramLogSetDramAddrLow,		PPSMC_MSG_DramLogSetDramAddrLow,	0),
121 	MSG_MAP(DramLogSetDramSize,		PPSMC_MSG_DramLogSetDramSize,		0),
122 	MSG_MAP(ConfigureGfxDidt,		PPSMC_MSG_ConfigureGfxDidt,		0),
123 	MSG_MAP(NumOfDisplays,			PPSMC_MSG_NumOfDisplays,		0),
124 	MSG_MAP(SetSystemVirtualDramAddrHigh,	PPSMC_MSG_SetSystemVirtualDramAddrHigh,	0),
125 	MSG_MAP(SetSystemVirtualDramAddrLow,	PPSMC_MSG_SetSystemVirtualDramAddrLow,	0),
126 	MSG_MAP(AllowGfxOff,			PPSMC_MSG_AllowGfxOff,			0),
127 	MSG_MAP(DisallowGfxOff,			PPSMC_MSG_DisallowGfxOff,		0),
128 	MSG_MAP(GetPptLimit,			PPSMC_MSG_GetPptLimit,			0),
129 	MSG_MAP(GetDcModeMaxDpmFreq,		PPSMC_MSG_GetDcModeMaxDpmFreq,		1),
130 	MSG_MAP(GetDebugData,			PPSMC_MSG_GetDebugData,			0),
131 	MSG_MAP(ExitBaco,			PPSMC_MSG_ExitBaco,			0),
132 	MSG_MAP(PrepareMp1ForReset,		PPSMC_MSG_PrepareMp1ForReset,		0),
133 	MSG_MAP(PrepareMp1ForShutdown,		PPSMC_MSG_PrepareMp1ForShutdown,	0),
134 	MSG_MAP(PowerUpVcn,			PPSMC_MSG_PowerUpVcn,			0),
135 	MSG_MAP(PowerDownVcn,			PPSMC_MSG_PowerDownVcn,			0),
136 	MSG_MAP(PowerUpJpeg,			PPSMC_MSG_PowerUpJpeg,			0),
137 	MSG_MAP(PowerDownJpeg,			PPSMC_MSG_PowerDownJpeg,		0),
138 	MSG_MAP(BacoAudioD3PME,			PPSMC_MSG_BacoAudioD3PME,		0),
139 	MSG_MAP(ArmD3,				PPSMC_MSG_ArmD3,			0),
140 	MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange,	0),
141 	MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE,	PPSMC_MSG_DALEnableDummyPstateChange,	0),
142 	MSG_MAP(GetVoltageByDpm,		PPSMC_MSG_GetVoltageByDpm,		0),
143 	MSG_MAP(GetVoltageByDpmOverdrive,	PPSMC_MSG_GetVoltageByDpmOverdrive,	0),
144 	MSG_MAP(SetMGpuFanBoostLimitRpm,	PPSMC_MSG_SetMGpuFanBoostLimitRpm,	0),
145 	MSG_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH, PPSMC_MSG_SetDriverDummyTableDramAddrHigh, 0),
146 	MSG_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW, PPSMC_MSG_SetDriverDummyTableDramAddrLow, 0),
147 	MSG_MAP(GET_UMC_FW_WA,			PPSMC_MSG_GetUMCFWWA,			0),
148 };
149 
150 static struct cmn2asic_mapping navi10_clk_map[SMU_CLK_COUNT] = {
151 	CLK_MAP(GFXCLK, PPCLK_GFXCLK),
152 	CLK_MAP(SCLK,	PPCLK_GFXCLK),
153 	CLK_MAP(SOCCLK, PPCLK_SOCCLK),
154 	CLK_MAP(FCLK, PPCLK_SOCCLK),
155 	CLK_MAP(UCLK, PPCLK_UCLK),
156 	CLK_MAP(MCLK, PPCLK_UCLK),
157 	CLK_MAP(DCLK, PPCLK_DCLK),
158 	CLK_MAP(VCLK, PPCLK_VCLK),
159 	CLK_MAP(DCEFCLK, PPCLK_DCEFCLK),
160 	CLK_MAP(DISPCLK, PPCLK_DISPCLK),
161 	CLK_MAP(PIXCLK, PPCLK_PIXCLK),
162 	CLK_MAP(PHYCLK, PPCLK_PHYCLK),
163 };
164 
165 static struct cmn2asic_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
166 	FEA_MAP(DPM_PREFETCHER),
167 	FEA_MAP(DPM_GFXCLK),
168 	FEA_MAP(DPM_GFX_PACE),
169 	FEA_MAP(DPM_UCLK),
170 	FEA_MAP(DPM_SOCCLK),
171 	FEA_MAP(DPM_MP0CLK),
172 	FEA_MAP(DPM_LINK),
173 	FEA_MAP(DPM_DCEFCLK),
174 	FEA_MAP(MEM_VDDCI_SCALING),
175 	FEA_MAP(MEM_MVDD_SCALING),
176 	FEA_MAP(DS_GFXCLK),
177 	FEA_MAP(DS_SOCCLK),
178 	FEA_MAP(DS_LCLK),
179 	FEA_MAP(DS_DCEFCLK),
180 	FEA_MAP(DS_UCLK),
181 	FEA_MAP(GFX_ULV),
182 	FEA_MAP(FW_DSTATE),
183 	FEA_MAP(GFXOFF),
184 	FEA_MAP(BACO),
185 	FEA_MAP(VCN_PG),
186 	FEA_MAP(JPEG_PG),
187 	FEA_MAP(USB_PG),
188 	FEA_MAP(RSMU_SMN_CG),
189 	FEA_MAP(PPT),
190 	FEA_MAP(TDC),
191 	FEA_MAP(GFX_EDC),
192 	FEA_MAP(APCC_PLUS),
193 	FEA_MAP(GTHR),
194 	FEA_MAP(ACDC),
195 	FEA_MAP(VR0HOT),
196 	FEA_MAP(VR1HOT),
197 	FEA_MAP(FW_CTF),
198 	FEA_MAP(FAN_CONTROL),
199 	FEA_MAP(THERMAL),
200 	FEA_MAP(GFX_DCS),
201 	FEA_MAP(RM),
202 	FEA_MAP(LED_DISPLAY),
203 	FEA_MAP(GFX_SS),
204 	FEA_MAP(OUT_OF_BAND_MONITOR),
205 	FEA_MAP(TEMP_DEPENDENT_VMIN),
206 	FEA_MAP(MMHUB_PG),
207 	FEA_MAP(ATHUB_PG),
208 	FEA_MAP(APCC_DFLL),
209 };
210 
211 static struct cmn2asic_mapping navi10_table_map[SMU_TABLE_COUNT] = {
212 	TAB_MAP(PPTABLE),
213 	TAB_MAP(WATERMARKS),
214 	TAB_MAP(AVFS),
215 	TAB_MAP(AVFS_PSM_DEBUG),
216 	TAB_MAP(AVFS_FUSE_OVERRIDE),
217 	TAB_MAP(PMSTATUSLOG),
218 	TAB_MAP(SMU_METRICS),
219 	TAB_MAP(DRIVER_SMU_CONFIG),
220 	TAB_MAP(ACTIVITY_MONITOR_COEFF),
221 	TAB_MAP(OVERDRIVE),
222 	TAB_MAP(I2C_COMMANDS),
223 	TAB_MAP(PACE),
224 };
225 
226 static struct cmn2asic_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
227 	PWR_MAP(AC),
228 	PWR_MAP(DC),
229 };
230 
231 static struct cmn2asic_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
232 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT,	WORKLOAD_PPLIB_DEFAULT_BIT),
233 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,		WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
234 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,		WORKLOAD_PPLIB_POWER_SAVING_BIT),
235 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,		WORKLOAD_PPLIB_VIDEO_BIT),
236 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,			WORKLOAD_PPLIB_VR_BIT),
237 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,		WORKLOAD_PPLIB_COMPUTE_BIT),
238 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,		WORKLOAD_PPLIB_CUSTOM_BIT),
239 };
240 
241 static bool is_asic_secure(struct smu_context *smu)
242 {
243 	struct amdgpu_device *adev = smu->adev;
244 	bool is_secure = true;
245 	uint32_t mp0_fw_intf;
246 
247 	mp0_fw_intf = RREG32_PCIE(MP0_Public |
248 				   (smnMP0_FW_INTF & 0xffffffff));
249 
250 	if (!(mp0_fw_intf & (1 << 19)))
251 		is_secure = false;
252 
253 	return is_secure;
254 }
255 
256 static int
257 navi10_get_allowed_feature_mask(struct smu_context *smu,
258 				  uint32_t *feature_mask, uint32_t num)
259 {
260 	struct amdgpu_device *adev = smu->adev;
261 
262 	if (num > 2)
263 		return -EINVAL;
264 
265 	memset(feature_mask, 0, sizeof(uint32_t) * num);
266 
267 	*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
268 				| FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
269 				| FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
270 				| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
271 				| FEATURE_MASK(FEATURE_PPT_BIT)
272 				| FEATURE_MASK(FEATURE_TDC_BIT)
273 				| FEATURE_MASK(FEATURE_GFX_EDC_BIT)
274 				| FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
275 				| FEATURE_MASK(FEATURE_VR0HOT_BIT)
276 				| FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
277 				| FEATURE_MASK(FEATURE_THERMAL_BIT)
278 				| FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
279 				| FEATURE_MASK(FEATURE_DS_LCLK_BIT)
280 				| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
281 				| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
282 				| FEATURE_MASK(FEATURE_BACO_BIT)
283 				| FEATURE_MASK(FEATURE_GFX_SS_BIT)
284 				| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
285 				| FEATURE_MASK(FEATURE_FW_CTF_BIT)
286 				| FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
287 
288 	if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
289 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
290 
291 	if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
292 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
293 
294 	if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
295 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
296 
297 	if (adev->pm.pp_feature & PP_ULV_MASK)
298 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
299 
300 	if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
301 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
302 
303 	if (adev->pm.pp_feature & PP_GFXOFF_MASK)
304 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
305 
306 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
307 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
308 
309 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
310 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
311 
312 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
313 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT);
314 
315 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
316 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
317 
318 	if (smu->dc_controlled_by_gpio)
319 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
320 
321 	if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
322 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
323 
324 	/* DPM UCLK enablement should be skipped for navi10 A0 secure board */
325 	if (!(is_asic_secure(smu) &&
326 	     (adev->asic_type == CHIP_NAVI10) &&
327 	     (adev->rev_id == 0)) &&
328 	    (adev->pm.pp_feature & PP_MCLK_DPM_MASK))
329 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
330 				| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
331 				| FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
332 
333 	/* DS SOCCLK enablement should be skipped for navi10 A0 secure board */
334 	if (is_asic_secure(smu) &&
335 	    (adev->asic_type == CHIP_NAVI10) &&
336 	    (adev->rev_id == 0))
337 		*(uint64_t *)feature_mask &=
338 				~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
339 
340 	return 0;
341 }
342 
343 static void navi10_check_bxco_support(struct smu_context *smu)
344 {
345 	struct smu_table_context *table_context = &smu->smu_table;
346 	struct smu_11_0_powerplay_table *powerplay_table =
347 		table_context->power_play_table;
348 	struct smu_baco_context *smu_baco = &smu->smu_baco;
349 	struct amdgpu_device *adev = smu->adev;
350 	uint32_t val;
351 
352 	if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
353 	    powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) {
354 		val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
355 		smu_baco->platform_support =
356 			(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
357 									false;
358 	}
359 }
360 
361 static int navi10_check_powerplay_table(struct smu_context *smu)
362 {
363 	struct smu_table_context *table_context = &smu->smu_table;
364 	struct smu_11_0_powerplay_table *powerplay_table =
365 		table_context->power_play_table;
366 
367 	if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
368 		smu->dc_controlled_by_gpio = true;
369 
370 	navi10_check_bxco_support(smu);
371 
372 	table_context->thermal_controller_type =
373 		powerplay_table->thermal_controller_type;
374 
375 	/*
376 	 * Instead of having its own buffer space and get overdrive_table copied,
377 	 * smu->od_settings just points to the actual overdrive_table
378 	 */
379 	smu->od_settings = &powerplay_table->overdrive_table;
380 
381 	return 0;
382 }
383 
384 static int navi10_append_powerplay_table(struct smu_context *smu)
385 {
386 	struct amdgpu_device *adev = smu->adev;
387 	struct smu_table_context *table_context = &smu->smu_table;
388 	PPTable_t *smc_pptable = table_context->driver_pptable;
389 	struct atom_smc_dpm_info_v4_5 *smc_dpm_table;
390 	struct atom_smc_dpm_info_v4_7 *smc_dpm_table_v4_7;
391 	int index, ret;
392 
393 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
394 					   smc_dpm_info);
395 
396 	ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
397 				      (uint8_t **)&smc_dpm_table);
398 	if (ret)
399 		return ret;
400 
401 	dev_info(adev->dev, "smc_dpm_info table revision(format.content): %d.%d\n",
402 			smc_dpm_table->table_header.format_revision,
403 			smc_dpm_table->table_header.content_revision);
404 
405 	if (smc_dpm_table->table_header.format_revision != 4) {
406 		dev_err(adev->dev, "smc_dpm_info table format revision is not 4!\n");
407 		return -EINVAL;
408 	}
409 
410 	switch (smc_dpm_table->table_header.content_revision) {
411 	case 5: /* nv10 and nv14 */
412 		memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
413 			sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
414 		break;
415 	case 7: /* nv12 */
416 		ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
417 					      (uint8_t **)&smc_dpm_table_v4_7);
418 		if (ret)
419 			return ret;
420 		memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
421 			sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
422 		break;
423 	default:
424 		dev_err(smu->adev->dev, "smc_dpm_info with unsupported content revision %d!\n",
425 				smc_dpm_table->table_header.content_revision);
426 		return -EINVAL;
427 	}
428 
429 	if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
430 		/* TODO: remove it once SMU fw fix it */
431 		smc_pptable->DebugOverrides |= DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN;
432 	}
433 
434 	return 0;
435 }
436 
437 static int navi10_store_powerplay_table(struct smu_context *smu)
438 {
439 	struct smu_table_context *table_context = &smu->smu_table;
440 	struct smu_11_0_powerplay_table *powerplay_table =
441 		table_context->power_play_table;
442 
443 	memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
444 	       sizeof(PPTable_t));
445 
446 	return 0;
447 }
448 
449 static int navi10_set_mp1_state(struct smu_context *smu,
450 				enum pp_mp1_state mp1_state)
451 {
452 	struct amdgpu_device *adev = smu->adev;
453 	uint32_t mp1_fw_flags;
454 	int ret = 0;
455 
456 	ret = smu_cmn_set_mp1_state(smu, mp1_state);
457 	if (ret)
458 		return ret;
459 
460 	if (mp1_state == PP_MP1_STATE_UNLOAD) {
461 		mp1_fw_flags = RREG32_PCIE(MP1_Public |
462 					   (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
463 
464 		mp1_fw_flags &= ~MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK;
465 
466 		WREG32_PCIE(MP1_Public |
467 			    (smnMP1_FIRMWARE_FLAGS & 0xffffffff), mp1_fw_flags);
468 	}
469 
470 	return 0;
471 }
472 
473 static int navi10_setup_pptable(struct smu_context *smu)
474 {
475 	int ret = 0;
476 
477 	ret = smu_v11_0_setup_pptable(smu);
478 	if (ret)
479 		return ret;
480 
481 	ret = navi10_store_powerplay_table(smu);
482 	if (ret)
483 		return ret;
484 
485 	ret = navi10_append_powerplay_table(smu);
486 	if (ret)
487 		return ret;
488 
489 	ret = navi10_check_powerplay_table(smu);
490 	if (ret)
491 		return ret;
492 
493 	return ret;
494 }
495 
496 static int navi10_tables_init(struct smu_context *smu)
497 {
498 	struct smu_table_context *smu_table = &smu->smu_table;
499 	struct smu_table *tables = smu_table->tables;
500 
501 	SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
502 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
503 	SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
504 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
505 	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_NV1X_t),
506 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
507 	SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
508 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
509 	SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
510 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
511 	SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
512 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
513 	SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
514 		       sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
515 		       AMDGPU_GEM_DOMAIN_VRAM);
516 
517 	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_NV1X_t),
518 					   GFP_KERNEL);
519 	if (!smu_table->metrics_table)
520 		goto err0_out;
521 	smu_table->metrics_time = 0;
522 
523 	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
524 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
525 	if (!smu_table->gpu_metrics_table)
526 		goto err1_out;
527 
528 	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
529 	if (!smu_table->watermarks_table)
530 		goto err2_out;
531 
532 	return 0;
533 
534 err2_out:
535 	kfree(smu_table->gpu_metrics_table);
536 err1_out:
537 	kfree(smu_table->metrics_table);
538 err0_out:
539 	return -ENOMEM;
540 }
541 
542 static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
543 					      MetricsMember_t member,
544 					      uint32_t *value)
545 {
546 	struct smu_table_context *smu_table= &smu->smu_table;
547 	SmuMetrics_legacy_t *metrics =
548 		(SmuMetrics_legacy_t *)smu_table->metrics_table;
549 	int ret = 0;
550 
551 	mutex_lock(&smu->metrics_lock);
552 
553 	ret = smu_cmn_get_metrics_table_locked(smu,
554 					       NULL,
555 					       false);
556 	if (ret) {
557 		mutex_unlock(&smu->metrics_lock);
558 		return ret;
559 	}
560 
561 	switch (member) {
562 	case METRICS_CURR_GFXCLK:
563 		*value = metrics->CurrClock[PPCLK_GFXCLK];
564 		break;
565 	case METRICS_CURR_SOCCLK:
566 		*value = metrics->CurrClock[PPCLK_SOCCLK];
567 		break;
568 	case METRICS_CURR_UCLK:
569 		*value = metrics->CurrClock[PPCLK_UCLK];
570 		break;
571 	case METRICS_CURR_VCLK:
572 		*value = metrics->CurrClock[PPCLK_VCLK];
573 		break;
574 	case METRICS_CURR_DCLK:
575 		*value = metrics->CurrClock[PPCLK_DCLK];
576 		break;
577 	case METRICS_CURR_DCEFCLK:
578 		*value = metrics->CurrClock[PPCLK_DCEFCLK];
579 		break;
580 	case METRICS_AVERAGE_GFXCLK:
581 		*value = metrics->AverageGfxclkFrequency;
582 		break;
583 	case METRICS_AVERAGE_SOCCLK:
584 		*value = metrics->AverageSocclkFrequency;
585 		break;
586 	case METRICS_AVERAGE_UCLK:
587 		*value = metrics->AverageUclkFrequency;
588 		break;
589 	case METRICS_AVERAGE_GFXACTIVITY:
590 		*value = metrics->AverageGfxActivity;
591 		break;
592 	case METRICS_AVERAGE_MEMACTIVITY:
593 		*value = metrics->AverageUclkActivity;
594 		break;
595 	case METRICS_AVERAGE_SOCKETPOWER:
596 		*value = metrics->AverageSocketPower << 8;
597 		break;
598 	case METRICS_TEMPERATURE_EDGE:
599 		*value = metrics->TemperatureEdge *
600 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
601 		break;
602 	case METRICS_TEMPERATURE_HOTSPOT:
603 		*value = metrics->TemperatureHotspot *
604 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
605 		break;
606 	case METRICS_TEMPERATURE_MEM:
607 		*value = metrics->TemperatureMem *
608 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
609 		break;
610 	case METRICS_TEMPERATURE_VRGFX:
611 		*value = metrics->TemperatureVrGfx *
612 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
613 		break;
614 	case METRICS_TEMPERATURE_VRSOC:
615 		*value = metrics->TemperatureVrSoc *
616 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
617 		break;
618 	case METRICS_THROTTLER_STATUS:
619 		*value = metrics->ThrottlerStatus;
620 		break;
621 	case METRICS_CURR_FANSPEED:
622 		*value = metrics->CurrFanSpeed;
623 		break;
624 	default:
625 		*value = UINT_MAX;
626 		break;
627 	}
628 
629 	mutex_unlock(&smu->metrics_lock);
630 
631 	return ret;
632 }
633 
634 static int navi10_get_smu_metrics_data(struct smu_context *smu,
635 				       MetricsMember_t member,
636 				       uint32_t *value)
637 {
638 	struct smu_table_context *smu_table= &smu->smu_table;
639 	SmuMetrics_t *metrics =
640 		(SmuMetrics_t *)smu_table->metrics_table;
641 	int ret = 0;
642 
643 	mutex_lock(&smu->metrics_lock);
644 
645 	ret = smu_cmn_get_metrics_table_locked(smu,
646 					       NULL,
647 					       false);
648 	if (ret) {
649 		mutex_unlock(&smu->metrics_lock);
650 		return ret;
651 	}
652 
653 	switch (member) {
654 	case METRICS_CURR_GFXCLK:
655 		*value = metrics->CurrClock[PPCLK_GFXCLK];
656 		break;
657 	case METRICS_CURR_SOCCLK:
658 		*value = metrics->CurrClock[PPCLK_SOCCLK];
659 		break;
660 	case METRICS_CURR_UCLK:
661 		*value = metrics->CurrClock[PPCLK_UCLK];
662 		break;
663 	case METRICS_CURR_VCLK:
664 		*value = metrics->CurrClock[PPCLK_VCLK];
665 		break;
666 	case METRICS_CURR_DCLK:
667 		*value = metrics->CurrClock[PPCLK_DCLK];
668 		break;
669 	case METRICS_CURR_DCEFCLK:
670 		*value = metrics->CurrClock[PPCLK_DCEFCLK];
671 		break;
672 	case METRICS_AVERAGE_GFXCLK:
673 		if (metrics->AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD)
674 			*value = metrics->AverageGfxclkFrequencyPreDs;
675 		else
676 			*value = metrics->AverageGfxclkFrequencyPostDs;
677 		break;
678 	case METRICS_AVERAGE_SOCCLK:
679 		*value = metrics->AverageSocclkFrequency;
680 		break;
681 	case METRICS_AVERAGE_UCLK:
682 		*value = metrics->AverageUclkFrequencyPostDs;
683 		break;
684 	case METRICS_AVERAGE_GFXACTIVITY:
685 		*value = metrics->AverageGfxActivity;
686 		break;
687 	case METRICS_AVERAGE_MEMACTIVITY:
688 		*value = metrics->AverageUclkActivity;
689 		break;
690 	case METRICS_AVERAGE_SOCKETPOWER:
691 		*value = metrics->AverageSocketPower << 8;
692 		break;
693 	case METRICS_TEMPERATURE_EDGE:
694 		*value = metrics->TemperatureEdge *
695 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
696 		break;
697 	case METRICS_TEMPERATURE_HOTSPOT:
698 		*value = metrics->TemperatureHotspot *
699 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
700 		break;
701 	case METRICS_TEMPERATURE_MEM:
702 		*value = metrics->TemperatureMem *
703 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
704 		break;
705 	case METRICS_TEMPERATURE_VRGFX:
706 		*value = metrics->TemperatureVrGfx *
707 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
708 		break;
709 	case METRICS_TEMPERATURE_VRSOC:
710 		*value = metrics->TemperatureVrSoc *
711 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
712 		break;
713 	case METRICS_THROTTLER_STATUS:
714 		*value = metrics->ThrottlerStatus;
715 		break;
716 	case METRICS_CURR_FANSPEED:
717 		*value = metrics->CurrFanSpeed;
718 		break;
719 	default:
720 		*value = UINT_MAX;
721 		break;
722 	}
723 
724 	mutex_unlock(&smu->metrics_lock);
725 
726 	return ret;
727 }
728 
729 static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu,
730 					      MetricsMember_t member,
731 					      uint32_t *value)
732 {
733 	struct smu_table_context *smu_table= &smu->smu_table;
734 	SmuMetrics_NV12_legacy_t *metrics =
735 		(SmuMetrics_NV12_legacy_t *)smu_table->metrics_table;
736 	int ret = 0;
737 
738 	mutex_lock(&smu->metrics_lock);
739 
740 	ret = smu_cmn_get_metrics_table_locked(smu,
741 					       NULL,
742 					       false);
743 	if (ret) {
744 		mutex_unlock(&smu->metrics_lock);
745 		return ret;
746 	}
747 
748 	switch (member) {
749 	case METRICS_CURR_GFXCLK:
750 		*value = metrics->CurrClock[PPCLK_GFXCLK];
751 		break;
752 	case METRICS_CURR_SOCCLK:
753 		*value = metrics->CurrClock[PPCLK_SOCCLK];
754 		break;
755 	case METRICS_CURR_UCLK:
756 		*value = metrics->CurrClock[PPCLK_UCLK];
757 		break;
758 	case METRICS_CURR_VCLK:
759 		*value = metrics->CurrClock[PPCLK_VCLK];
760 		break;
761 	case METRICS_CURR_DCLK:
762 		*value = metrics->CurrClock[PPCLK_DCLK];
763 		break;
764 	case METRICS_CURR_DCEFCLK:
765 		*value = metrics->CurrClock[PPCLK_DCEFCLK];
766 		break;
767 	case METRICS_AVERAGE_GFXCLK:
768 		*value = metrics->AverageGfxclkFrequency;
769 		break;
770 	case METRICS_AVERAGE_SOCCLK:
771 		*value = metrics->AverageSocclkFrequency;
772 		break;
773 	case METRICS_AVERAGE_UCLK:
774 		*value = metrics->AverageUclkFrequency;
775 		break;
776 	case METRICS_AVERAGE_GFXACTIVITY:
777 		*value = metrics->AverageGfxActivity;
778 		break;
779 	case METRICS_AVERAGE_MEMACTIVITY:
780 		*value = metrics->AverageUclkActivity;
781 		break;
782 	case METRICS_AVERAGE_SOCKETPOWER:
783 		*value = metrics->AverageSocketPower << 8;
784 		break;
785 	case METRICS_TEMPERATURE_EDGE:
786 		*value = metrics->TemperatureEdge *
787 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
788 		break;
789 	case METRICS_TEMPERATURE_HOTSPOT:
790 		*value = metrics->TemperatureHotspot *
791 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
792 		break;
793 	case METRICS_TEMPERATURE_MEM:
794 		*value = metrics->TemperatureMem *
795 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
796 		break;
797 	case METRICS_TEMPERATURE_VRGFX:
798 		*value = metrics->TemperatureVrGfx *
799 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
800 		break;
801 	case METRICS_TEMPERATURE_VRSOC:
802 		*value = metrics->TemperatureVrSoc *
803 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
804 		break;
805 	case METRICS_THROTTLER_STATUS:
806 		*value = metrics->ThrottlerStatus;
807 		break;
808 	case METRICS_CURR_FANSPEED:
809 		*value = metrics->CurrFanSpeed;
810 		break;
811 	default:
812 		*value = UINT_MAX;
813 		break;
814 	}
815 
816 	mutex_unlock(&smu->metrics_lock);
817 
818 	return ret;
819 }
820 
821 static int navi12_get_smu_metrics_data(struct smu_context *smu,
822 				       MetricsMember_t member,
823 				       uint32_t *value)
824 {
825 	struct smu_table_context *smu_table= &smu->smu_table;
826 	SmuMetrics_NV12_t *metrics =
827 		(SmuMetrics_NV12_t *)smu_table->metrics_table;
828 	int ret = 0;
829 
830 	mutex_lock(&smu->metrics_lock);
831 
832 	ret = smu_cmn_get_metrics_table_locked(smu,
833 					       NULL,
834 					       false);
835 	if (ret) {
836 		mutex_unlock(&smu->metrics_lock);
837 		return ret;
838 	}
839 
840 	switch (member) {
841 	case METRICS_CURR_GFXCLK:
842 		*value = metrics->CurrClock[PPCLK_GFXCLK];
843 		break;
844 	case METRICS_CURR_SOCCLK:
845 		*value = metrics->CurrClock[PPCLK_SOCCLK];
846 		break;
847 	case METRICS_CURR_UCLK:
848 		*value = metrics->CurrClock[PPCLK_UCLK];
849 		break;
850 	case METRICS_CURR_VCLK:
851 		*value = metrics->CurrClock[PPCLK_VCLK];
852 		break;
853 	case METRICS_CURR_DCLK:
854 		*value = metrics->CurrClock[PPCLK_DCLK];
855 		break;
856 	case METRICS_CURR_DCEFCLK:
857 		*value = metrics->CurrClock[PPCLK_DCEFCLK];
858 		break;
859 	case METRICS_AVERAGE_GFXCLK:
860 		if (metrics->AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD)
861 			*value = metrics->AverageGfxclkFrequencyPreDs;
862 		else
863 			*value = metrics->AverageGfxclkFrequencyPostDs;
864 		break;
865 	case METRICS_AVERAGE_SOCCLK:
866 		*value = metrics->AverageSocclkFrequency;
867 		break;
868 	case METRICS_AVERAGE_UCLK:
869 		*value = metrics->AverageUclkFrequencyPostDs;
870 		break;
871 	case METRICS_AVERAGE_GFXACTIVITY:
872 		*value = metrics->AverageGfxActivity;
873 		break;
874 	case METRICS_AVERAGE_MEMACTIVITY:
875 		*value = metrics->AverageUclkActivity;
876 		break;
877 	case METRICS_AVERAGE_SOCKETPOWER:
878 		*value = metrics->AverageSocketPower << 8;
879 		break;
880 	case METRICS_TEMPERATURE_EDGE:
881 		*value = metrics->TemperatureEdge *
882 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
883 		break;
884 	case METRICS_TEMPERATURE_HOTSPOT:
885 		*value = metrics->TemperatureHotspot *
886 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
887 		break;
888 	case METRICS_TEMPERATURE_MEM:
889 		*value = metrics->TemperatureMem *
890 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
891 		break;
892 	case METRICS_TEMPERATURE_VRGFX:
893 		*value = metrics->TemperatureVrGfx *
894 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
895 		break;
896 	case METRICS_TEMPERATURE_VRSOC:
897 		*value = metrics->TemperatureVrSoc *
898 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
899 		break;
900 	case METRICS_THROTTLER_STATUS:
901 		*value = metrics->ThrottlerStatus;
902 		break;
903 	case METRICS_CURR_FANSPEED:
904 		*value = metrics->CurrFanSpeed;
905 		break;
906 	default:
907 		*value = UINT_MAX;
908 		break;
909 	}
910 
911 	mutex_unlock(&smu->metrics_lock);
912 
913 	return ret;
914 }
915 
916 static int navi1x_get_smu_metrics_data(struct smu_context *smu,
917 				       MetricsMember_t member,
918 				       uint32_t *value)
919 {
920 	struct amdgpu_device *adev = smu->adev;
921 	uint32_t smu_version;
922 	int ret = 0;
923 
924 	ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
925 	if (ret) {
926 		dev_err(adev->dev, "Failed to get smu version!\n");
927 		return ret;
928 	}
929 
930 	switch (adev->asic_type) {
931 	case CHIP_NAVI12:
932 		if (smu_version > 0x00341C00)
933 			ret = navi12_get_smu_metrics_data(smu, member, value);
934 		else
935 			ret = navi12_get_legacy_smu_metrics_data(smu, member, value);
936 		break;
937 	case CHIP_NAVI10:
938 	case CHIP_NAVI14:
939 	default:
940 		if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) ||
941 		      ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00))
942 			ret = navi10_get_smu_metrics_data(smu, member, value);
943 		else
944 			ret = navi10_get_legacy_smu_metrics_data(smu, member, value);
945 		break;
946 	}
947 
948 	return ret;
949 }
950 
951 static int navi10_allocate_dpm_context(struct smu_context *smu)
952 {
953 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
954 
955 	smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
956 				       GFP_KERNEL);
957 	if (!smu_dpm->dpm_context)
958 		return -ENOMEM;
959 
960 	smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
961 
962 	return 0;
963 }
964 
965 static int navi10_init_smc_tables(struct smu_context *smu)
966 {
967 	int ret = 0;
968 
969 	ret = navi10_tables_init(smu);
970 	if (ret)
971 		return ret;
972 
973 	ret = navi10_allocate_dpm_context(smu);
974 	if (ret)
975 		return ret;
976 
977 	return smu_v11_0_init_smc_tables(smu);
978 }
979 
980 static int navi10_set_default_dpm_table(struct smu_context *smu)
981 {
982 	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
983 	PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
984 	struct smu_11_0_dpm_table *dpm_table;
985 	int ret = 0;
986 
987 	/* socclk dpm table setup */
988 	dpm_table = &dpm_context->dpm_tables.soc_table;
989 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
990 		ret = smu_v11_0_set_single_dpm_table(smu,
991 						     SMU_SOCCLK,
992 						     dpm_table);
993 		if (ret)
994 			return ret;
995 		dpm_table->is_fine_grained =
996 			!driver_ppt->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete;
997 	} else {
998 		dpm_table->count = 1;
999 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
1000 		dpm_table->dpm_levels[0].enabled = true;
1001 		dpm_table->min = dpm_table->dpm_levels[0].value;
1002 		dpm_table->max = dpm_table->dpm_levels[0].value;
1003 	}
1004 
1005 	/* gfxclk dpm table setup */
1006 	dpm_table = &dpm_context->dpm_tables.gfx_table;
1007 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
1008 		ret = smu_v11_0_set_single_dpm_table(smu,
1009 						     SMU_GFXCLK,
1010 						     dpm_table);
1011 		if (ret)
1012 			return ret;
1013 		dpm_table->is_fine_grained =
1014 			!driver_ppt->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete;
1015 	} else {
1016 		dpm_table->count = 1;
1017 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
1018 		dpm_table->dpm_levels[0].enabled = true;
1019 		dpm_table->min = dpm_table->dpm_levels[0].value;
1020 		dpm_table->max = dpm_table->dpm_levels[0].value;
1021 	}
1022 
1023 	/* uclk dpm table setup */
1024 	dpm_table = &dpm_context->dpm_tables.uclk_table;
1025 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1026 		ret = smu_v11_0_set_single_dpm_table(smu,
1027 						     SMU_UCLK,
1028 						     dpm_table);
1029 		if (ret)
1030 			return ret;
1031 		dpm_table->is_fine_grained =
1032 			!driver_ppt->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete;
1033 	} else {
1034 		dpm_table->count = 1;
1035 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
1036 		dpm_table->dpm_levels[0].enabled = true;
1037 		dpm_table->min = dpm_table->dpm_levels[0].value;
1038 		dpm_table->max = dpm_table->dpm_levels[0].value;
1039 	}
1040 
1041 	/* vclk dpm table setup */
1042 	dpm_table = &dpm_context->dpm_tables.vclk_table;
1043 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
1044 		ret = smu_v11_0_set_single_dpm_table(smu,
1045 						     SMU_VCLK,
1046 						     dpm_table);
1047 		if (ret)
1048 			return ret;
1049 		dpm_table->is_fine_grained =
1050 			!driver_ppt->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete;
1051 	} else {
1052 		dpm_table->count = 1;
1053 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
1054 		dpm_table->dpm_levels[0].enabled = true;
1055 		dpm_table->min = dpm_table->dpm_levels[0].value;
1056 		dpm_table->max = dpm_table->dpm_levels[0].value;
1057 	}
1058 
1059 	/* dclk dpm table setup */
1060 	dpm_table = &dpm_context->dpm_tables.dclk_table;
1061 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
1062 		ret = smu_v11_0_set_single_dpm_table(smu,
1063 						     SMU_DCLK,
1064 						     dpm_table);
1065 		if (ret)
1066 			return ret;
1067 		dpm_table->is_fine_grained =
1068 			!driver_ppt->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete;
1069 	} else {
1070 		dpm_table->count = 1;
1071 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
1072 		dpm_table->dpm_levels[0].enabled = true;
1073 		dpm_table->min = dpm_table->dpm_levels[0].value;
1074 		dpm_table->max = dpm_table->dpm_levels[0].value;
1075 	}
1076 
1077 	/* dcefclk dpm table setup */
1078 	dpm_table = &dpm_context->dpm_tables.dcef_table;
1079 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1080 		ret = smu_v11_0_set_single_dpm_table(smu,
1081 						     SMU_DCEFCLK,
1082 						     dpm_table);
1083 		if (ret)
1084 			return ret;
1085 		dpm_table->is_fine_grained =
1086 			!driver_ppt->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete;
1087 	} else {
1088 		dpm_table->count = 1;
1089 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
1090 		dpm_table->dpm_levels[0].enabled = true;
1091 		dpm_table->min = dpm_table->dpm_levels[0].value;
1092 		dpm_table->max = dpm_table->dpm_levels[0].value;
1093 	}
1094 
1095 	/* pixelclk dpm table setup */
1096 	dpm_table = &dpm_context->dpm_tables.pixel_table;
1097 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1098 		ret = smu_v11_0_set_single_dpm_table(smu,
1099 						     SMU_PIXCLK,
1100 						     dpm_table);
1101 		if (ret)
1102 			return ret;
1103 		dpm_table->is_fine_grained =
1104 			!driver_ppt->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete;
1105 	} else {
1106 		dpm_table->count = 1;
1107 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
1108 		dpm_table->dpm_levels[0].enabled = true;
1109 		dpm_table->min = dpm_table->dpm_levels[0].value;
1110 		dpm_table->max = dpm_table->dpm_levels[0].value;
1111 	}
1112 
1113 	/* displayclk dpm table setup */
1114 	dpm_table = &dpm_context->dpm_tables.display_table;
1115 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1116 		ret = smu_v11_0_set_single_dpm_table(smu,
1117 						     SMU_DISPCLK,
1118 						     dpm_table);
1119 		if (ret)
1120 			return ret;
1121 		dpm_table->is_fine_grained =
1122 			!driver_ppt->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete;
1123 	} else {
1124 		dpm_table->count = 1;
1125 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
1126 		dpm_table->dpm_levels[0].enabled = true;
1127 		dpm_table->min = dpm_table->dpm_levels[0].value;
1128 		dpm_table->max = dpm_table->dpm_levels[0].value;
1129 	}
1130 
1131 	/* phyclk dpm table setup */
1132 	dpm_table = &dpm_context->dpm_tables.phy_table;
1133 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1134 		ret = smu_v11_0_set_single_dpm_table(smu,
1135 						     SMU_PHYCLK,
1136 						     dpm_table);
1137 		if (ret)
1138 			return ret;
1139 		dpm_table->is_fine_grained =
1140 			!driver_ppt->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete;
1141 	} else {
1142 		dpm_table->count = 1;
1143 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
1144 		dpm_table->dpm_levels[0].enabled = true;
1145 		dpm_table->min = dpm_table->dpm_levels[0].value;
1146 		dpm_table->max = dpm_table->dpm_levels[0].value;
1147 	}
1148 
1149 	return 0;
1150 }
1151 
1152 static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
1153 {
1154 	int ret = 0;
1155 
1156 	if (enable) {
1157 		/* vcn dpm on is a prerequisite for vcn power gate messages */
1158 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
1159 			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
1160 			if (ret)
1161 				return ret;
1162 		}
1163 	} else {
1164 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
1165 			ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
1166 			if (ret)
1167 				return ret;
1168 		}
1169 	}
1170 
1171 	return ret;
1172 }
1173 
1174 static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
1175 {
1176 	int ret = 0;
1177 
1178 	if (enable) {
1179 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
1180 			ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
1181 			if (ret)
1182 				return ret;
1183 		}
1184 	} else {
1185 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
1186 			ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
1187 			if (ret)
1188 				return ret;
1189 		}
1190 	}
1191 
1192 	return ret;
1193 }
1194 
1195 static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
1196 				       enum smu_clk_type clk_type,
1197 				       uint32_t *value)
1198 {
1199 	MetricsMember_t member_type;
1200 	int clk_id = 0;
1201 
1202 	clk_id = smu_cmn_to_asic_specific_index(smu,
1203 						CMN2ASIC_MAPPING_CLK,
1204 						clk_type);
1205 	if (clk_id < 0)
1206 		return clk_id;
1207 
1208 	switch (clk_id) {
1209 	case PPCLK_GFXCLK:
1210 		member_type = METRICS_CURR_GFXCLK;
1211 		break;
1212 	case PPCLK_UCLK:
1213 		member_type = METRICS_CURR_UCLK;
1214 		break;
1215 	case PPCLK_SOCCLK:
1216 		member_type = METRICS_CURR_SOCCLK;
1217 		break;
1218 	case PPCLK_VCLK:
1219 		member_type = METRICS_CURR_VCLK;
1220 		break;
1221 	case PPCLK_DCLK:
1222 		member_type = METRICS_CURR_DCLK;
1223 		break;
1224 	case PPCLK_DCEFCLK:
1225 		member_type = METRICS_CURR_DCEFCLK;
1226 		break;
1227 	default:
1228 		return -EINVAL;
1229 	}
1230 
1231 	return navi1x_get_smu_metrics_data(smu,
1232 					   member_type,
1233 					   value);
1234 }
1235 
1236 static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
1237 {
1238 	PPTable_t *pptable = smu->smu_table.driver_pptable;
1239 	DpmDescriptor_t *dpm_desc = NULL;
1240 	uint32_t clk_index = 0;
1241 
1242 	clk_index = smu_cmn_to_asic_specific_index(smu,
1243 						   CMN2ASIC_MAPPING_CLK,
1244 						   clk_type);
1245 	dpm_desc = &pptable->DpmDescriptor[clk_index];
1246 
1247 	/* 0 - Fine grained DPM, 1 - Discrete DPM */
1248 	return dpm_desc->SnapToDiscrete == 0;
1249 }
1250 
1251 static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
1252 {
1253 	return od_table->cap[cap];
1254 }
1255 
1256 static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
1257 					enum SMU_11_0_ODSETTING_ID setting,
1258 					uint32_t *min, uint32_t *max)
1259 {
1260 	if (min)
1261 		*min = od_table->min[setting];
1262 	if (max)
1263 		*max = od_table->max[setting];
1264 }
1265 
1266 static int navi10_print_clk_levels(struct smu_context *smu,
1267 			enum smu_clk_type clk_type, char *buf)
1268 {
1269 	uint16_t *curve_settings;
1270 	int i, size = 0, ret = 0;
1271 	uint32_t cur_value = 0, value = 0, count = 0;
1272 	uint32_t freq_values[3] = {0};
1273 	uint32_t mark_index = 0;
1274 	struct smu_table_context *table_context = &smu->smu_table;
1275 	uint32_t gen_speed, lane_width;
1276 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1277 	struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1278 	PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
1279 	OverDriveTable_t *od_table =
1280 		(OverDriveTable_t *)table_context->overdrive_table;
1281 	struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
1282 	uint32_t min_value, max_value;
1283 
1284 	switch (clk_type) {
1285 	case SMU_GFXCLK:
1286 	case SMU_SCLK:
1287 	case SMU_SOCCLK:
1288 	case SMU_MCLK:
1289 	case SMU_UCLK:
1290 	case SMU_FCLK:
1291 	case SMU_VCLK:
1292 	case SMU_DCLK:
1293 	case SMU_DCEFCLK:
1294 		ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
1295 		if (ret)
1296 			return size;
1297 
1298 		ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
1299 		if (ret)
1300 			return size;
1301 
1302 		if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
1303 			for (i = 0; i < count; i++) {
1304 				ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
1305 				if (ret)
1306 					return size;
1307 
1308 				size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
1309 						cur_value == value ? "*" : "");
1310 			}
1311 		} else {
1312 			ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
1313 			if (ret)
1314 				return size;
1315 			ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
1316 			if (ret)
1317 				return size;
1318 
1319 			freq_values[1] = cur_value;
1320 			mark_index = cur_value == freq_values[0] ? 0 :
1321 				     cur_value == freq_values[2] ? 2 : 1;
1322 			if (mark_index != 1)
1323 				freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
1324 
1325 			for (i = 0; i < 3; i++) {
1326 				size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
1327 						i == mark_index ? "*" : "");
1328 			}
1329 
1330 		}
1331 		break;
1332 	case SMU_PCIE:
1333 		gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
1334 		lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
1335 		for (i = 0; i < NUM_LINK_LEVELS; i++)
1336 			size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
1337 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
1338 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
1339 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
1340 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
1341 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
1342 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
1343 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
1344 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
1345 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
1346 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
1347 					pptable->LclkFreq[i],
1348 					(gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
1349 					(lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
1350 					"*" : "");
1351 		break;
1352 	case SMU_OD_SCLK:
1353 		if (!smu->od_enabled || !od_table || !od_settings)
1354 			break;
1355 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
1356 			break;
1357 		size += sprintf(buf + size, "OD_SCLK:\n");
1358 		size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
1359 		break;
1360 	case SMU_OD_MCLK:
1361 		if (!smu->od_enabled || !od_table || !od_settings)
1362 			break;
1363 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
1364 			break;
1365 		size += sprintf(buf + size, "OD_MCLK:\n");
1366 		size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
1367 		break;
1368 	case SMU_OD_VDDC_CURVE:
1369 		if (!smu->od_enabled || !od_table || !od_settings)
1370 			break;
1371 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
1372 			break;
1373 		size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
1374 		for (i = 0; i < 3; i++) {
1375 			switch (i) {
1376 			case 0:
1377 				curve_settings = &od_table->GfxclkFreq1;
1378 				break;
1379 			case 1:
1380 				curve_settings = &od_table->GfxclkFreq2;
1381 				break;
1382 			case 2:
1383 				curve_settings = &od_table->GfxclkFreq3;
1384 				break;
1385 			default:
1386 				break;
1387 			}
1388 			size += sprintf(buf + size, "%d: %uMHz %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
1389 		}
1390 		break;
1391 	case SMU_OD_RANGE:
1392 		if (!smu->od_enabled || !od_table || !od_settings)
1393 			break;
1394 		size = sprintf(buf, "%s:\n", "OD_RANGE");
1395 
1396 		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
1397 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
1398 						    &min_value, NULL);
1399 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
1400 						    NULL, &max_value);
1401 			size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
1402 					min_value, max_value);
1403 		}
1404 
1405 		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
1406 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
1407 						    &min_value, &max_value);
1408 			size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
1409 					min_value, max_value);
1410 		}
1411 
1412 		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
1413 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
1414 						    &min_value, &max_value);
1415 			size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
1416 					min_value, max_value);
1417 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
1418 						    &min_value, &max_value);
1419 			size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
1420 					min_value, max_value);
1421 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
1422 						    &min_value, &max_value);
1423 			size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
1424 					min_value, max_value);
1425 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
1426 						    &min_value, &max_value);
1427 			size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
1428 					min_value, max_value);
1429 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
1430 						    &min_value, &max_value);
1431 			size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
1432 					min_value, max_value);
1433 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
1434 						    &min_value, &max_value);
1435 			size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
1436 					min_value, max_value);
1437 		}
1438 
1439 		break;
1440 	default:
1441 		break;
1442 	}
1443 
1444 	return size;
1445 }
1446 
1447 static int navi10_force_clk_levels(struct smu_context *smu,
1448 				   enum smu_clk_type clk_type, uint32_t mask)
1449 {
1450 
1451 	int ret = 0, size = 0;
1452 	uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
1453 
1454 	soft_min_level = mask ? (ffs(mask) - 1) : 0;
1455 	soft_max_level = mask ? (fls(mask) - 1) : 0;
1456 
1457 	switch (clk_type) {
1458 	case SMU_GFXCLK:
1459 	case SMU_SCLK:
1460 	case SMU_SOCCLK:
1461 	case SMU_MCLK:
1462 	case SMU_UCLK:
1463 	case SMU_FCLK:
1464 		/* There is only 2 levels for fine grained DPM */
1465 		if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
1466 			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
1467 			soft_min_level = (soft_min_level >= 1 ? 1 : 0);
1468 		}
1469 
1470 		ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
1471 		if (ret)
1472 			return size;
1473 
1474 		ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
1475 		if (ret)
1476 			return size;
1477 
1478 		ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
1479 		if (ret)
1480 			return size;
1481 		break;
1482 	case SMU_DCEFCLK:
1483 		dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
1484 		break;
1485 
1486 	default:
1487 		break;
1488 	}
1489 
1490 	return size;
1491 }
1492 
1493 static int navi10_populate_umd_state_clk(struct smu_context *smu)
1494 {
1495 	struct smu_11_0_dpm_context *dpm_context =
1496 				smu->smu_dpm.dpm_context;
1497 	struct smu_11_0_dpm_table *gfx_table =
1498 				&dpm_context->dpm_tables.gfx_table;
1499 	struct smu_11_0_dpm_table *mem_table =
1500 				&dpm_context->dpm_tables.uclk_table;
1501 	struct smu_11_0_dpm_table *soc_table =
1502 				&dpm_context->dpm_tables.soc_table;
1503 	struct smu_umd_pstate_table *pstate_table =
1504 				&smu->pstate_table;
1505 	struct amdgpu_device *adev = smu->adev;
1506 	uint32_t sclk_freq;
1507 
1508 	pstate_table->gfxclk_pstate.min = gfx_table->min;
1509 	switch (adev->asic_type) {
1510 	case CHIP_NAVI10:
1511 		switch (adev->pdev->revision) {
1512 		case 0xf0: /* XTX */
1513 		case 0xc0:
1514 			sclk_freq = NAVI10_PEAK_SCLK_XTX;
1515 			break;
1516 		case 0xf1: /* XT */
1517 		case 0xc1:
1518 			sclk_freq = NAVI10_PEAK_SCLK_XT;
1519 			break;
1520 		default: /* XL */
1521 			sclk_freq = NAVI10_PEAK_SCLK_XL;
1522 			break;
1523 		}
1524 		break;
1525 	case CHIP_NAVI14:
1526 		switch (adev->pdev->revision) {
1527 		case 0xc7: /* XT */
1528 		case 0xf4:
1529 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK;
1530 			break;
1531 		case 0xc1: /* XTM */
1532 		case 0xf2:
1533 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK;
1534 			break;
1535 		case 0xc3: /* XLM */
1536 		case 0xf3:
1537 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
1538 			break;
1539 		case 0xc5: /* XTX */
1540 		case 0xf6:
1541 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
1542 			break;
1543 		default: /* XL */
1544 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK;
1545 			break;
1546 		}
1547 		break;
1548 	case CHIP_NAVI12:
1549 		sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
1550 		break;
1551 	default:
1552 		sclk_freq = gfx_table->dpm_levels[gfx_table->count - 1].value;
1553 		break;
1554 	}
1555 	pstate_table->gfxclk_pstate.peak = sclk_freq;
1556 
1557 	pstate_table->uclk_pstate.min = mem_table->min;
1558 	pstate_table->uclk_pstate.peak = mem_table->max;
1559 
1560 	pstate_table->socclk_pstate.min = soc_table->min;
1561 	pstate_table->socclk_pstate.peak = soc_table->max;
1562 
1563 	if (gfx_table->max > NAVI10_UMD_PSTATE_PROFILING_GFXCLK &&
1564 	    mem_table->max > NAVI10_UMD_PSTATE_PROFILING_MEMCLK &&
1565 	    soc_table->max > NAVI10_UMD_PSTATE_PROFILING_SOCCLK) {
1566 		pstate_table->gfxclk_pstate.standard =
1567 			NAVI10_UMD_PSTATE_PROFILING_GFXCLK;
1568 		pstate_table->uclk_pstate.standard =
1569 			NAVI10_UMD_PSTATE_PROFILING_MEMCLK;
1570 		pstate_table->socclk_pstate.standard =
1571 			NAVI10_UMD_PSTATE_PROFILING_SOCCLK;
1572 	} else {
1573 		pstate_table->gfxclk_pstate.standard =
1574 			pstate_table->gfxclk_pstate.min;
1575 		pstate_table->uclk_pstate.standard =
1576 			pstate_table->uclk_pstate.min;
1577 		pstate_table->socclk_pstate.standard =
1578 			pstate_table->socclk_pstate.min;
1579 	}
1580 
1581 	return 0;
1582 }
1583 
1584 static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
1585 						 enum smu_clk_type clk_type,
1586 						 struct pp_clock_levels_with_latency *clocks)
1587 {
1588 	int ret = 0, i = 0;
1589 	uint32_t level_count = 0, freq = 0;
1590 
1591 	switch (clk_type) {
1592 	case SMU_GFXCLK:
1593 	case SMU_DCEFCLK:
1594 	case SMU_SOCCLK:
1595 	case SMU_MCLK:
1596 	case SMU_UCLK:
1597 		ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &level_count);
1598 		if (ret)
1599 			return ret;
1600 
1601 		level_count = min(level_count, (uint32_t)MAX_NUM_CLOCKS);
1602 		clocks->num_levels = level_count;
1603 
1604 		for (i = 0; i < level_count; i++) {
1605 			ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &freq);
1606 			if (ret)
1607 				return ret;
1608 
1609 			clocks->data[i].clocks_in_khz = freq * 1000;
1610 			clocks->data[i].latency_in_us = 0;
1611 		}
1612 		break;
1613 	default:
1614 		break;
1615 	}
1616 
1617 	return ret;
1618 }
1619 
1620 static int navi10_pre_display_config_changed(struct smu_context *smu)
1621 {
1622 	int ret = 0;
1623 	uint32_t max_freq = 0;
1624 
1625 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
1626 	if (ret)
1627 		return ret;
1628 
1629 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1630 		ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &max_freq);
1631 		if (ret)
1632 			return ret;
1633 		ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, max_freq);
1634 		if (ret)
1635 			return ret;
1636 	}
1637 
1638 	return ret;
1639 }
1640 
1641 static int navi10_display_config_changed(struct smu_context *smu)
1642 {
1643 	int ret = 0;
1644 
1645 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1646 	    smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1647 	    smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1648 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
1649 						  smu->display_config->num_display,
1650 						  NULL);
1651 		if (ret)
1652 			return ret;
1653 	}
1654 
1655 	return ret;
1656 }
1657 
1658 static bool navi10_is_dpm_running(struct smu_context *smu)
1659 {
1660 	int ret = 0;
1661 	uint32_t feature_mask[2];
1662 	uint64_t feature_enabled;
1663 
1664 	ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
1665 	if (ret)
1666 		return false;
1667 
1668 	feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
1669 
1670 	return !!(feature_enabled & SMC_DPM_FEATURE);
1671 }
1672 
1673 static int navi10_get_fan_speed_percent(struct smu_context *smu,
1674 					uint32_t *speed)
1675 {
1676 	int ret;
1677 	u32 rpm;
1678 
1679 	if (!speed)
1680 		return -EINVAL;
1681 
1682 	switch (smu_v11_0_get_fan_control_mode(smu)) {
1683 	case AMD_FAN_CTRL_AUTO:
1684 		ret = navi1x_get_smu_metrics_data(smu,
1685 						  METRICS_CURR_FANSPEED,
1686 						  &rpm);
1687 		if (!ret && smu->fan_max_rpm)
1688 			*speed = rpm * 100 / smu->fan_max_rpm;
1689 		return ret;
1690 	default:
1691 		*speed = smu->user_dpm_profile.fan_speed_percent;
1692 		return 0;
1693 	}
1694 }
1695 
1696 static int navi10_get_fan_parameters(struct smu_context *smu)
1697 {
1698 	PPTable_t *pptable = smu->smu_table.driver_pptable;
1699 
1700 	smu->fan_max_rpm = pptable->FanMaximumRpm;
1701 
1702 	return 0;
1703 }
1704 
1705 static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
1706 {
1707 	DpmActivityMonitorCoeffInt_t activity_monitor;
1708 	uint32_t i, size = 0;
1709 	int16_t workload_type = 0;
1710 	static const char *profile_name[] = {
1711 					"BOOTUP_DEFAULT",
1712 					"3D_FULL_SCREEN",
1713 					"POWER_SAVING",
1714 					"VIDEO",
1715 					"VR",
1716 					"COMPUTE",
1717 					"CUSTOM"};
1718 	static const char *title[] = {
1719 			"PROFILE_INDEX(NAME)",
1720 			"CLOCK_TYPE(NAME)",
1721 			"FPS",
1722 			"MinFreqType",
1723 			"MinActiveFreqType",
1724 			"MinActiveFreq",
1725 			"BoosterFreqType",
1726 			"BoosterFreq",
1727 			"PD_Data_limit_c",
1728 			"PD_Data_error_coeff",
1729 			"PD_Data_error_rate_coeff"};
1730 	int result = 0;
1731 
1732 	if (!buf)
1733 		return -EINVAL;
1734 
1735 	size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
1736 			title[0], title[1], title[2], title[3], title[4], title[5],
1737 			title[6], title[7], title[8], title[9], title[10]);
1738 
1739 	for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1740 		/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1741 		workload_type = smu_cmn_to_asic_specific_index(smu,
1742 							       CMN2ASIC_MAPPING_WORKLOAD,
1743 							       i);
1744 		if (workload_type < 0)
1745 			return -EINVAL;
1746 
1747 		result = smu_cmn_update_table(smu,
1748 					  SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
1749 					  (void *)(&activity_monitor), false);
1750 		if (result) {
1751 			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1752 			return result;
1753 		}
1754 
1755 		size += sprintf(buf + size, "%2d %14s%s:\n",
1756 			i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1757 
1758 		size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1759 			" ",
1760 			0,
1761 			"GFXCLK",
1762 			activity_monitor.Gfx_FPS,
1763 			activity_monitor.Gfx_MinFreqStep,
1764 			activity_monitor.Gfx_MinActiveFreqType,
1765 			activity_monitor.Gfx_MinActiveFreq,
1766 			activity_monitor.Gfx_BoosterFreqType,
1767 			activity_monitor.Gfx_BoosterFreq,
1768 			activity_monitor.Gfx_PD_Data_limit_c,
1769 			activity_monitor.Gfx_PD_Data_error_coeff,
1770 			activity_monitor.Gfx_PD_Data_error_rate_coeff);
1771 
1772 		size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1773 			" ",
1774 			1,
1775 			"SOCCLK",
1776 			activity_monitor.Soc_FPS,
1777 			activity_monitor.Soc_MinFreqStep,
1778 			activity_monitor.Soc_MinActiveFreqType,
1779 			activity_monitor.Soc_MinActiveFreq,
1780 			activity_monitor.Soc_BoosterFreqType,
1781 			activity_monitor.Soc_BoosterFreq,
1782 			activity_monitor.Soc_PD_Data_limit_c,
1783 			activity_monitor.Soc_PD_Data_error_coeff,
1784 			activity_monitor.Soc_PD_Data_error_rate_coeff);
1785 
1786 		size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1787 			" ",
1788 			2,
1789 			"MEMLK",
1790 			activity_monitor.Mem_FPS,
1791 			activity_monitor.Mem_MinFreqStep,
1792 			activity_monitor.Mem_MinActiveFreqType,
1793 			activity_monitor.Mem_MinActiveFreq,
1794 			activity_monitor.Mem_BoosterFreqType,
1795 			activity_monitor.Mem_BoosterFreq,
1796 			activity_monitor.Mem_PD_Data_limit_c,
1797 			activity_monitor.Mem_PD_Data_error_coeff,
1798 			activity_monitor.Mem_PD_Data_error_rate_coeff);
1799 	}
1800 
1801 	return size;
1802 }
1803 
1804 static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1805 {
1806 	DpmActivityMonitorCoeffInt_t activity_monitor;
1807 	int workload_type, ret = 0;
1808 
1809 	smu->power_profile_mode = input[size];
1810 
1811 	if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1812 		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
1813 		return -EINVAL;
1814 	}
1815 
1816 	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1817 
1818 		ret = smu_cmn_update_table(smu,
1819 				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
1820 				       (void *)(&activity_monitor), false);
1821 		if (ret) {
1822 			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1823 			return ret;
1824 		}
1825 
1826 		switch (input[0]) {
1827 		case 0: /* Gfxclk */
1828 			activity_monitor.Gfx_FPS = input[1];
1829 			activity_monitor.Gfx_MinFreqStep = input[2];
1830 			activity_monitor.Gfx_MinActiveFreqType = input[3];
1831 			activity_monitor.Gfx_MinActiveFreq = input[4];
1832 			activity_monitor.Gfx_BoosterFreqType = input[5];
1833 			activity_monitor.Gfx_BoosterFreq = input[6];
1834 			activity_monitor.Gfx_PD_Data_limit_c = input[7];
1835 			activity_monitor.Gfx_PD_Data_error_coeff = input[8];
1836 			activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
1837 			break;
1838 		case 1: /* Socclk */
1839 			activity_monitor.Soc_FPS = input[1];
1840 			activity_monitor.Soc_MinFreqStep = input[2];
1841 			activity_monitor.Soc_MinActiveFreqType = input[3];
1842 			activity_monitor.Soc_MinActiveFreq = input[4];
1843 			activity_monitor.Soc_BoosterFreqType = input[5];
1844 			activity_monitor.Soc_BoosterFreq = input[6];
1845 			activity_monitor.Soc_PD_Data_limit_c = input[7];
1846 			activity_monitor.Soc_PD_Data_error_coeff = input[8];
1847 			activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
1848 			break;
1849 		case 2: /* Memlk */
1850 			activity_monitor.Mem_FPS = input[1];
1851 			activity_monitor.Mem_MinFreqStep = input[2];
1852 			activity_monitor.Mem_MinActiveFreqType = input[3];
1853 			activity_monitor.Mem_MinActiveFreq = input[4];
1854 			activity_monitor.Mem_BoosterFreqType = input[5];
1855 			activity_monitor.Mem_BoosterFreq = input[6];
1856 			activity_monitor.Mem_PD_Data_limit_c = input[7];
1857 			activity_monitor.Mem_PD_Data_error_coeff = input[8];
1858 			activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
1859 			break;
1860 		}
1861 
1862 		ret = smu_cmn_update_table(smu,
1863 				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
1864 				       (void *)(&activity_monitor), true);
1865 		if (ret) {
1866 			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
1867 			return ret;
1868 		}
1869 	}
1870 
1871 	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1872 	workload_type = smu_cmn_to_asic_specific_index(smu,
1873 						       CMN2ASIC_MAPPING_WORKLOAD,
1874 						       smu->power_profile_mode);
1875 	if (workload_type < 0)
1876 		return -EINVAL;
1877 	smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1878 				    1 << workload_type, NULL);
1879 
1880 	return ret;
1881 }
1882 
1883 static int navi10_notify_smc_display_config(struct smu_context *smu)
1884 {
1885 	struct smu_clocks min_clocks = {0};
1886 	struct pp_display_clock_request clock_req;
1887 	int ret = 0;
1888 
1889 	min_clocks.dcef_clock = smu->display_config->min_dcef_set_clk;
1890 	min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
1891 	min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
1892 
1893 	if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1894 		clock_req.clock_type = amd_pp_dcef_clock;
1895 		clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
1896 
1897 		ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
1898 		if (!ret) {
1899 			if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
1900 				ret = smu_cmn_send_smc_msg_with_param(smu,
1901 								  SMU_MSG_SetMinDeepSleepDcefclk,
1902 								  min_clocks.dcef_clock_in_sr/100,
1903 								  NULL);
1904 				if (ret) {
1905 					dev_err(smu->adev->dev, "Attempt to set divider for DCEFCLK Failed!");
1906 					return ret;
1907 				}
1908 			}
1909 		} else {
1910 			dev_info(smu->adev->dev, "Attempt to set Hard Min for DCEFCLK Failed!");
1911 		}
1912 	}
1913 
1914 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1915 		ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
1916 		if (ret) {
1917 			dev_err(smu->adev->dev, "[%s] Set hard min uclk failed!", __func__);
1918 			return ret;
1919 		}
1920 	}
1921 
1922 	return 0;
1923 }
1924 
1925 static int navi10_set_watermarks_table(struct smu_context *smu,
1926 				       struct pp_smu_wm_range_sets *clock_ranges)
1927 {
1928 	Watermarks_t *table = smu->smu_table.watermarks_table;
1929 	int ret = 0;
1930 	int i;
1931 
1932 	if (clock_ranges) {
1933 		if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
1934 		    clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
1935 			return -EINVAL;
1936 
1937 		for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
1938 			table->WatermarkRow[WM_DCEFCLK][i].MinClock =
1939 				clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
1940 			table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
1941 				clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
1942 			table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
1943 				clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
1944 			table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
1945 				clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
1946 
1947 			table->WatermarkRow[WM_DCEFCLK][i].WmSetting =
1948 				clock_ranges->reader_wm_sets[i].wm_inst;
1949 		}
1950 
1951 		for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
1952 			table->WatermarkRow[WM_SOCCLK][i].MinClock =
1953 				clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
1954 			table->WatermarkRow[WM_SOCCLK][i].MaxClock =
1955 				clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
1956 			table->WatermarkRow[WM_SOCCLK][i].MinUclk =
1957 				clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
1958 			table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
1959 				clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
1960 
1961 			table->WatermarkRow[WM_SOCCLK][i].WmSetting =
1962 				clock_ranges->writer_wm_sets[i].wm_inst;
1963 		}
1964 
1965 		smu->watermarks_bitmap |= WATERMARKS_EXIST;
1966 	}
1967 
1968 	/* pass data to smu controller */
1969 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1970 	     !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1971 		ret = smu_cmn_write_watermarks_table(smu);
1972 		if (ret) {
1973 			dev_err(smu->adev->dev, "Failed to update WMTABLE!");
1974 			return ret;
1975 		}
1976 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1977 	}
1978 
1979 	return 0;
1980 }
1981 
1982 static int navi10_read_sensor(struct smu_context *smu,
1983 				 enum amd_pp_sensors sensor,
1984 				 void *data, uint32_t *size)
1985 {
1986 	int ret = 0;
1987 	struct smu_table_context *table_context = &smu->smu_table;
1988 	PPTable_t *pptable = table_context->driver_pptable;
1989 
1990 	if(!data || !size)
1991 		return -EINVAL;
1992 
1993 	mutex_lock(&smu->sensor_lock);
1994 	switch (sensor) {
1995 	case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
1996 		*(uint32_t *)data = pptable->FanMaximumRpm;
1997 		*size = 4;
1998 		break;
1999 	case AMDGPU_PP_SENSOR_MEM_LOAD:
2000 		ret = navi1x_get_smu_metrics_data(smu,
2001 						  METRICS_AVERAGE_MEMACTIVITY,
2002 						  (uint32_t *)data);
2003 		*size = 4;
2004 		break;
2005 	case AMDGPU_PP_SENSOR_GPU_LOAD:
2006 		ret = navi1x_get_smu_metrics_data(smu,
2007 						  METRICS_AVERAGE_GFXACTIVITY,
2008 						  (uint32_t *)data);
2009 		*size = 4;
2010 		break;
2011 	case AMDGPU_PP_SENSOR_GPU_POWER:
2012 		ret = navi1x_get_smu_metrics_data(smu,
2013 						  METRICS_AVERAGE_SOCKETPOWER,
2014 						  (uint32_t *)data);
2015 		*size = 4;
2016 		break;
2017 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
2018 		ret = navi1x_get_smu_metrics_data(smu,
2019 						  METRICS_TEMPERATURE_HOTSPOT,
2020 						  (uint32_t *)data);
2021 		*size = 4;
2022 		break;
2023 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
2024 		ret = navi1x_get_smu_metrics_data(smu,
2025 						  METRICS_TEMPERATURE_EDGE,
2026 						  (uint32_t *)data);
2027 		*size = 4;
2028 		break;
2029 	case AMDGPU_PP_SENSOR_MEM_TEMP:
2030 		ret = navi1x_get_smu_metrics_data(smu,
2031 						  METRICS_TEMPERATURE_MEM,
2032 						  (uint32_t *)data);
2033 		*size = 4;
2034 		break;
2035 	case AMDGPU_PP_SENSOR_GFX_MCLK:
2036 		ret = navi10_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
2037 		*(uint32_t *)data *= 100;
2038 		*size = 4;
2039 		break;
2040 	case AMDGPU_PP_SENSOR_GFX_SCLK:
2041 		ret = navi1x_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXCLK, (uint32_t *)data);
2042 		*(uint32_t *)data *= 100;
2043 		*size = 4;
2044 		break;
2045 	case AMDGPU_PP_SENSOR_VDDGFX:
2046 		ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
2047 		*size = 4;
2048 		break;
2049 	default:
2050 		ret = -EOPNOTSUPP;
2051 		break;
2052 	}
2053 	mutex_unlock(&smu->sensor_lock);
2054 
2055 	return ret;
2056 }
2057 
2058 static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states)
2059 {
2060 	uint32_t num_discrete_levels = 0;
2061 	uint16_t *dpm_levels = NULL;
2062 	uint16_t i = 0;
2063 	struct smu_table_context *table_context = &smu->smu_table;
2064 	PPTable_t *driver_ppt = NULL;
2065 
2066 	if (!clocks_in_khz || !num_states || !table_context->driver_pptable)
2067 		return -EINVAL;
2068 
2069 	driver_ppt = table_context->driver_pptable;
2070 	num_discrete_levels = driver_ppt->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels;
2071 	dpm_levels = driver_ppt->FreqTableUclk;
2072 
2073 	if (num_discrete_levels == 0 || dpm_levels == NULL)
2074 		return -EINVAL;
2075 
2076 	*num_states = num_discrete_levels;
2077 	for (i = 0; i < num_discrete_levels; i++) {
2078 		/* convert to khz */
2079 		*clocks_in_khz = (*dpm_levels) * 1000;
2080 		clocks_in_khz++;
2081 		dpm_levels++;
2082 	}
2083 
2084 	return 0;
2085 }
2086 
2087 static int navi10_get_thermal_temperature_range(struct smu_context *smu,
2088 						struct smu_temperature_range *range)
2089 {
2090 	struct smu_table_context *table_context = &smu->smu_table;
2091 	struct smu_11_0_powerplay_table *powerplay_table =
2092 				table_context->power_play_table;
2093 	PPTable_t *pptable = smu->smu_table.driver_pptable;
2094 
2095 	if (!range)
2096 		return -EINVAL;
2097 
2098 	memcpy(range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
2099 
2100 	range->max = pptable->TedgeLimit *
2101 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2102 	range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
2103 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2104 	range->hotspot_crit_max = pptable->ThotspotLimit *
2105 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2106 	range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
2107 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2108 	range->mem_crit_max = pptable->TmemLimit *
2109 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2110 	range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM)*
2111 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2112 	range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
2113 
2114 	return 0;
2115 }
2116 
2117 static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
2118 						bool disable_memory_clock_switch)
2119 {
2120 	int ret = 0;
2121 	struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
2122 		(struct smu_11_0_max_sustainable_clocks *)
2123 			smu->smu_table.max_sustainable_clocks;
2124 	uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal;
2125 	uint32_t max_memory_clock = max_sustainable_clocks->uclock;
2126 
2127 	if(smu->disable_uclk_switch == disable_memory_clock_switch)
2128 		return 0;
2129 
2130 	if(disable_memory_clock_switch)
2131 		ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, max_memory_clock, 0);
2132 	else
2133 		ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_memory_clock, 0);
2134 
2135 	if(!ret)
2136 		smu->disable_uclk_switch = disable_memory_clock_switch;
2137 
2138 	return ret;
2139 }
2140 
2141 static int navi10_get_power_limit(struct smu_context *smu)
2142 {
2143 	struct smu_11_0_powerplay_table *powerplay_table =
2144 		(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
2145 	struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
2146 	PPTable_t *pptable = smu->smu_table.driver_pptable;
2147 	uint32_t power_limit, od_percent;
2148 
2149 	if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
2150 		/* the last hope to figure out the ppt limit */
2151 		if (!pptable) {
2152 			dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!");
2153 			return -EINVAL;
2154 		}
2155 		power_limit =
2156 			pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
2157 	}
2158 	smu->current_power_limit = smu->default_power_limit = power_limit;
2159 
2160 	if (smu->od_enabled &&
2161 	    navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
2162 		od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
2163 
2164 		dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
2165 
2166 		power_limit *= (100 + od_percent);
2167 		power_limit /= 100;
2168 	}
2169 	smu->max_power_limit = power_limit;
2170 
2171 	return 0;
2172 }
2173 
2174 static int navi10_update_pcie_parameters(struct smu_context *smu,
2175 				     uint32_t pcie_gen_cap,
2176 				     uint32_t pcie_width_cap)
2177 {
2178 	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
2179 	PPTable_t *pptable = smu->smu_table.driver_pptable;
2180 	uint32_t smu_pcie_arg;
2181 	int ret, i;
2182 
2183 	/* lclk dpm table setup */
2184 	for (i = 0; i < MAX_PCIE_CONF; i++) {
2185 		dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pptable->PcieGenSpeed[i];
2186 		dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pptable->PcieLaneCount[i];
2187 	}
2188 
2189 	for (i = 0; i < NUM_LINK_LEVELS; i++) {
2190 		smu_pcie_arg = (i << 16) |
2191 			((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
2192 				(pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
2193 					pptable->PcieLaneCount[i] : pcie_width_cap);
2194 		ret = smu_cmn_send_smc_msg_with_param(smu,
2195 					  SMU_MSG_OverridePcieParameters,
2196 					  smu_pcie_arg,
2197 					  NULL);
2198 
2199 		if (ret)
2200 			return ret;
2201 
2202 		if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
2203 			dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
2204 		if (pptable->PcieLaneCount[i] > pcie_width_cap)
2205 			dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
2206 	}
2207 
2208 	return 0;
2209 }
2210 
2211 static inline void navi10_dump_od_table(struct smu_context *smu,
2212 					OverDriveTable_t *od_table)
2213 {
2214 	dev_dbg(smu->adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
2215 	dev_dbg(smu->adev->dev, "OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
2216 	dev_dbg(smu->adev->dev, "OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
2217 	dev_dbg(smu->adev->dev, "OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
2218 	dev_dbg(smu->adev->dev, "OD: UclkFmax: %d\n", od_table->UclkFmax);
2219 	dev_dbg(smu->adev->dev, "OD: OverDrivePct: %d\n", od_table->OverDrivePct);
2220 }
2221 
2222 static int navi10_od_setting_check_range(struct smu_context *smu,
2223 					 struct smu_11_0_overdrive_table *od_table,
2224 					 enum SMU_11_0_ODSETTING_ID setting,
2225 					 uint32_t value)
2226 {
2227 	if (value < od_table->min[setting]) {
2228 		dev_warn(smu->adev->dev, "OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
2229 		return -EINVAL;
2230 	}
2231 	if (value > od_table->max[setting]) {
2232 		dev_warn(smu->adev->dev, "OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
2233 		return -EINVAL;
2234 	}
2235 	return 0;
2236 }
2237 
2238 static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
2239 						     uint16_t *voltage,
2240 						     uint32_t freq)
2241 {
2242 	uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16);
2243 	uint32_t value = 0;
2244 	int ret;
2245 
2246 	ret = smu_cmn_send_smc_msg_with_param(smu,
2247 					  SMU_MSG_GetVoltageByDpm,
2248 					  param,
2249 					  &value);
2250 	if (ret) {
2251 		dev_err(smu->adev->dev, "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
2252 		return ret;
2253 	}
2254 
2255 	*voltage = (uint16_t)value;
2256 
2257 	return 0;
2258 }
2259 
2260 static int navi10_set_default_od_settings(struct smu_context *smu)
2261 {
2262 	OverDriveTable_t *od_table =
2263 		(OverDriveTable_t *)smu->smu_table.overdrive_table;
2264 	OverDriveTable_t *boot_od_table =
2265 		(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
2266 	int ret = 0;
2267 
2268 	ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, false);
2269 	if (ret) {
2270 		dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
2271 		return ret;
2272 	}
2273 
2274 	if (!od_table->GfxclkVolt1) {
2275 		ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
2276 								&od_table->GfxclkVolt1,
2277 								od_table->GfxclkFreq1);
2278 		if (ret)
2279 			return ret;
2280 	}
2281 
2282 	if (!od_table->GfxclkVolt2) {
2283 		ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
2284 								&od_table->GfxclkVolt2,
2285 								od_table->GfxclkFreq2);
2286 		if (ret)
2287 			return ret;
2288 	}
2289 
2290 	if (!od_table->GfxclkVolt3) {
2291 		ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
2292 								&od_table->GfxclkVolt3,
2293 								od_table->GfxclkFreq3);
2294 		if (ret)
2295 			return ret;
2296 	}
2297 
2298 	memcpy(boot_od_table, od_table, sizeof(OverDriveTable_t));
2299 
2300 	navi10_dump_od_table(smu, od_table);
2301 
2302 	return 0;
2303 }
2304 
2305 static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
2306 	int i;
2307 	int ret = 0;
2308 	struct smu_table_context *table_context = &smu->smu_table;
2309 	OverDriveTable_t *od_table;
2310 	struct smu_11_0_overdrive_table *od_settings;
2311 	enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
2312 	uint16_t *freq_ptr, *voltage_ptr;
2313 	od_table = (OverDriveTable_t *)table_context->overdrive_table;
2314 
2315 	if (!smu->od_enabled) {
2316 		dev_warn(smu->adev->dev, "OverDrive is not enabled!\n");
2317 		return -EINVAL;
2318 	}
2319 
2320 	if (!smu->od_settings) {
2321 		dev_err(smu->adev->dev, "OD board limits are not set!\n");
2322 		return -ENOENT;
2323 	}
2324 
2325 	od_settings = smu->od_settings;
2326 
2327 	switch (type) {
2328 	case PP_OD_EDIT_SCLK_VDDC_TABLE:
2329 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
2330 			dev_warn(smu->adev->dev, "GFXCLK_LIMITS not supported!\n");
2331 			return -ENOTSUPP;
2332 		}
2333 		if (!table_context->overdrive_table) {
2334 			dev_err(smu->adev->dev, "Overdrive is not initialized\n");
2335 			return -EINVAL;
2336 		}
2337 		for (i = 0; i < size; i += 2) {
2338 			if (i + 2 > size) {
2339 				dev_info(smu->adev->dev, "invalid number of input parameters %d\n", size);
2340 				return -EINVAL;
2341 			}
2342 			switch (input[i]) {
2343 			case 0:
2344 				freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
2345 				freq_ptr = &od_table->GfxclkFmin;
2346 				if (input[i + 1] > od_table->GfxclkFmax) {
2347 					dev_info(smu->adev->dev, "GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
2348 						input[i + 1],
2349 						od_table->GfxclkFmin);
2350 					return -EINVAL;
2351 				}
2352 				break;
2353 			case 1:
2354 				freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
2355 				freq_ptr = &od_table->GfxclkFmax;
2356 				if (input[i + 1] < od_table->GfxclkFmin) {
2357 					dev_info(smu->adev->dev, "GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
2358 						input[i + 1],
2359 						od_table->GfxclkFmax);
2360 					return -EINVAL;
2361 				}
2362 				break;
2363 			default:
2364 				dev_info(smu->adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
2365 				dev_info(smu->adev->dev, "Supported indices: [0:min,1:max]\n");
2366 				return -EINVAL;
2367 			}
2368 			ret = navi10_od_setting_check_range(smu, od_settings, freq_setting, input[i + 1]);
2369 			if (ret)
2370 				return ret;
2371 			*freq_ptr = input[i + 1];
2372 		}
2373 		break;
2374 	case PP_OD_EDIT_MCLK_VDDC_TABLE:
2375 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
2376 			dev_warn(smu->adev->dev, "UCLK_MAX not supported!\n");
2377 			return -ENOTSUPP;
2378 		}
2379 		if (size < 2) {
2380 			dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
2381 			return -EINVAL;
2382 		}
2383 		if (input[0] != 1) {
2384 			dev_info(smu->adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
2385 			dev_info(smu->adev->dev, "Supported indices: [1:max]\n");
2386 			return -EINVAL;
2387 		}
2388 		ret = navi10_od_setting_check_range(smu, od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
2389 		if (ret)
2390 			return ret;
2391 		od_table->UclkFmax = input[1];
2392 		break;
2393 	case PP_OD_RESTORE_DEFAULT_TABLE:
2394 		if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
2395 			dev_err(smu->adev->dev, "Overdrive table was not initialized!\n");
2396 			return -EINVAL;
2397 		}
2398 		memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
2399 		break;
2400 	case PP_OD_COMMIT_DPM_TABLE:
2401 		navi10_dump_od_table(smu, od_table);
2402 		ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
2403 		if (ret) {
2404 			dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
2405 			return ret;
2406 		}
2407 		break;
2408 	case PP_OD_EDIT_VDDC_CURVE:
2409 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
2410 			dev_warn(smu->adev->dev, "GFXCLK_CURVE not supported!\n");
2411 			return -ENOTSUPP;
2412 		}
2413 		if (size < 3) {
2414 			dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
2415 			return -EINVAL;
2416 		}
2417 		if (!od_table) {
2418 			dev_info(smu->adev->dev, "Overdrive is not initialized\n");
2419 			return -EINVAL;
2420 		}
2421 
2422 		switch (input[0]) {
2423 		case 0:
2424 			freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
2425 			voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
2426 			freq_ptr = &od_table->GfxclkFreq1;
2427 			voltage_ptr = &od_table->GfxclkVolt1;
2428 			break;
2429 		case 1:
2430 			freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
2431 			voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
2432 			freq_ptr = &od_table->GfxclkFreq2;
2433 			voltage_ptr = &od_table->GfxclkVolt2;
2434 			break;
2435 		case 2:
2436 			freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
2437 			voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
2438 			freq_ptr = &od_table->GfxclkFreq3;
2439 			voltage_ptr = &od_table->GfxclkVolt3;
2440 			break;
2441 		default:
2442 			dev_info(smu->adev->dev, "Invalid VDDC_CURVE index: %ld\n", input[0]);
2443 			dev_info(smu->adev->dev, "Supported indices: [0, 1, 2]\n");
2444 			return -EINVAL;
2445 		}
2446 		ret = navi10_od_setting_check_range(smu, od_settings, freq_setting, input[1]);
2447 		if (ret)
2448 			return ret;
2449 		// Allow setting zero to disable the OverDrive VDDC curve
2450 		if (input[2] != 0) {
2451 			ret = navi10_od_setting_check_range(smu, od_settings, voltage_setting, input[2]);
2452 			if (ret)
2453 				return ret;
2454 			*freq_ptr = input[1];
2455 			*voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
2456 			dev_dbg(smu->adev->dev, "OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
2457 		} else {
2458 			// If setting 0, disable all voltage curve settings
2459 			od_table->GfxclkVolt1 = 0;
2460 			od_table->GfxclkVolt2 = 0;
2461 			od_table->GfxclkVolt3 = 0;
2462 		}
2463 		navi10_dump_od_table(smu, od_table);
2464 		break;
2465 	default:
2466 		return -ENOSYS;
2467 	}
2468 	return ret;
2469 }
2470 
2471 static int navi10_run_btc(struct smu_context *smu)
2472 {
2473 	int ret = 0;
2474 
2475 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
2476 	if (ret)
2477 		dev_err(smu->adev->dev, "RunBtc failed!\n");
2478 
2479 	return ret;
2480 }
2481 
2482 static bool navi10_need_umc_cdr_workaround(struct smu_context *smu)
2483 {
2484 	struct amdgpu_device *adev = smu->adev;
2485 
2486 	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
2487 		return false;
2488 
2489 	if (adev->asic_type == CHIP_NAVI10 ||
2490 	    adev->asic_type == CHIP_NAVI14)
2491 		return true;
2492 
2493 	return false;
2494 }
2495 
2496 static int navi10_umc_hybrid_cdr_workaround(struct smu_context *smu)
2497 {
2498 	uint32_t uclk_count, uclk_min, uclk_max;
2499 	int ret = 0;
2500 
2501 	/* This workaround can be applied only with uclk dpm enabled */
2502 	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
2503 		return 0;
2504 
2505 	ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
2506 	if (ret)
2507 		return ret;
2508 
2509 	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
2510 	if (ret)
2511 		return ret;
2512 
2513 	/*
2514 	 * The NAVI10_UMC_HYBRID_CDR_WORKAROUND_UCLK_THRESHOLD is 750Mhz.
2515 	 * This workaround is needed only when the max uclk frequency
2516 	 * not greater than that.
2517 	 */
2518 	if (uclk_max > 0x2EE)
2519 		return 0;
2520 
2521 	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
2522 	if (ret)
2523 		return ret;
2524 
2525 	/* Force UCLK out of the highest DPM */
2526 	ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, uclk_min);
2527 	if (ret)
2528 		return ret;
2529 
2530 	/* Revert the UCLK Hardmax */
2531 	ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, uclk_max);
2532 	if (ret)
2533 		return ret;
2534 
2535 	/*
2536 	 * In this case, SMU already disabled dummy pstate during enablement
2537 	 * of UCLK DPM, we have to re-enabled it.
2538 	 */
2539 	return smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
2540 }
2541 
2542 static int navi10_set_dummy_pstates_table_location(struct smu_context *smu)
2543 {
2544 	struct smu_table_context *smu_table = &smu->smu_table;
2545 	struct smu_table *dummy_read_table =
2546 				&smu_table->dummy_read_1_table;
2547 	char *dummy_table = dummy_read_table->cpu_addr;
2548 	int ret = 0;
2549 	uint32_t i;
2550 
2551 	for (i = 0; i < 0x40000; i += 0x1000 * 2) {
2552 		memcpy(dummy_table, &NoDbiPrbs7[0], 0x1000);
2553 		dummy_table += 0x1000;
2554 		memcpy(dummy_table, &DbiPrbs7[0], 0x1000);
2555 		dummy_table += 0x1000;
2556 	}
2557 
2558 	amdgpu_asic_flush_hdp(smu->adev, NULL);
2559 
2560 	ret = smu_cmn_send_smc_msg_with_param(smu,
2561 					      SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH,
2562 					      upper_32_bits(dummy_read_table->mc_address),
2563 					      NULL);
2564 	if (ret)
2565 		return ret;
2566 
2567 	return smu_cmn_send_smc_msg_with_param(smu,
2568 					       SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW,
2569 					       lower_32_bits(dummy_read_table->mc_address),
2570 					       NULL);
2571 }
2572 
2573 static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
2574 {
2575 	struct amdgpu_device *adev = smu->adev;
2576 	uint8_t umc_fw_greater_than_v136 = false;
2577 	uint8_t umc_fw_disable_cdr = false;
2578 	uint32_t pmfw_version;
2579 	uint32_t param;
2580 	int ret = 0;
2581 
2582 	if (!navi10_need_umc_cdr_workaround(smu))
2583 		return 0;
2584 
2585 	ret = smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
2586 	if (ret) {
2587 		dev_err(adev->dev, "Failed to get smu version!\n");
2588 		return ret;
2589 	}
2590 
2591 	/*
2592 	 * The messages below are only supported by Navi10 42.53.0 and later
2593 	 * PMFWs and Navi14 53.29.0 and later PMFWs.
2594 	 * - PPSMC_MSG_SetDriverDummyTableDramAddrHigh
2595 	 * - PPSMC_MSG_SetDriverDummyTableDramAddrLow
2596 	 * - PPSMC_MSG_GetUMCFWWA
2597 	 */
2598 	if (((adev->asic_type == CHIP_NAVI10) && (pmfw_version >= 0x2a3500)) ||
2599 	    ((adev->asic_type == CHIP_NAVI14) && (pmfw_version >= 0x351D00))) {
2600 		ret = smu_cmn_send_smc_msg_with_param(smu,
2601 						      SMU_MSG_GET_UMC_FW_WA,
2602 						      0,
2603 						      &param);
2604 		if (ret)
2605 			return ret;
2606 
2607 		/* First bit indicates if the UMC f/w is above v137 */
2608 		umc_fw_greater_than_v136 = param & 0x1;
2609 
2610 		/* Second bit indicates if hybrid-cdr is disabled */
2611 		umc_fw_disable_cdr = param & 0x2;
2612 
2613 		/* w/a only allowed if UMC f/w is <= 136 */
2614 		if (umc_fw_greater_than_v136)
2615 			return 0;
2616 
2617 		if (umc_fw_disable_cdr) {
2618 			if (adev->asic_type == CHIP_NAVI10)
2619 				return navi10_umc_hybrid_cdr_workaround(smu);
2620 		} else {
2621 			return navi10_set_dummy_pstates_table_location(smu);
2622 		}
2623 	} else {
2624 		if (adev->asic_type == CHIP_NAVI10)
2625 			return navi10_umc_hybrid_cdr_workaround(smu);
2626 	}
2627 
2628 	return 0;
2629 }
2630 
2631 static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
2632 					     void **table)
2633 {
2634 	struct smu_table_context *smu_table = &smu->smu_table;
2635 	struct gpu_metrics_v1_3 *gpu_metrics =
2636 		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
2637 	SmuMetrics_legacy_t metrics;
2638 	int ret = 0;
2639 
2640 	mutex_lock(&smu->metrics_lock);
2641 
2642 	ret = smu_cmn_get_metrics_table_locked(smu,
2643 					       NULL,
2644 					       true);
2645 	if (ret) {
2646 		mutex_unlock(&smu->metrics_lock);
2647 		return ret;
2648 	}
2649 
2650 	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_legacy_t));
2651 
2652 	mutex_unlock(&smu->metrics_lock);
2653 
2654 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
2655 
2656 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
2657 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
2658 	gpu_metrics->temperature_mem = metrics.TemperatureMem;
2659 	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
2660 	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
2661 	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
2662 
2663 	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
2664 	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
2665 
2666 	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
2667 
2668 	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
2669 	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
2670 	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
2671 
2672 	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
2673 	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
2674 	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
2675 	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
2676 	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
2677 
2678 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
2679 
2680 	gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
2681 
2682 	gpu_metrics->pcie_link_width =
2683 			smu_v11_0_get_current_pcie_link_width(smu);
2684 	gpu_metrics->pcie_link_speed =
2685 			smu_v11_0_get_current_pcie_link_speed(smu);
2686 
2687 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2688 
2689 	if (metrics.CurrGfxVoltageOffset)
2690 		gpu_metrics->voltage_gfx =
2691 			(155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
2692 	if (metrics.CurrMemVidOffset)
2693 		gpu_metrics->voltage_mem =
2694 			(155000 - 625 * metrics.CurrMemVidOffset) / 100;
2695 	if (metrics.CurrSocVoltageOffset)
2696 		gpu_metrics->voltage_soc =
2697 			(155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
2698 
2699 	*table = (void *)gpu_metrics;
2700 
2701 	return sizeof(struct gpu_metrics_v1_3);
2702 }
2703 
2704 static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
2705 				      void **table)
2706 {
2707 	struct smu_table_context *smu_table = &smu->smu_table;
2708 	struct gpu_metrics_v1_3 *gpu_metrics =
2709 		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
2710 	SmuMetrics_t metrics;
2711 	int ret = 0;
2712 
2713 	mutex_lock(&smu->metrics_lock);
2714 
2715 	ret = smu_cmn_get_metrics_table_locked(smu,
2716 					       NULL,
2717 					       true);
2718 	if (ret) {
2719 		mutex_unlock(&smu->metrics_lock);
2720 		return ret;
2721 	}
2722 
2723 	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
2724 
2725 	mutex_unlock(&smu->metrics_lock);
2726 
2727 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
2728 
2729 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
2730 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
2731 	gpu_metrics->temperature_mem = metrics.TemperatureMem;
2732 	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
2733 	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
2734 	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
2735 
2736 	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
2737 	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
2738 
2739 	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
2740 
2741 	if (metrics.AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD)
2742 		gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs;
2743 	else
2744 		gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs;
2745 
2746 	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
2747 	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs;
2748 
2749 	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
2750 	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
2751 	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
2752 	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
2753 	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
2754 
2755 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
2756 
2757 	gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
2758 
2759 	gpu_metrics->pcie_link_width = metrics.PcieWidth;
2760 	gpu_metrics->pcie_link_speed = link_speed[metrics.PcieRate];
2761 
2762 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2763 
2764 	if (metrics.CurrGfxVoltageOffset)
2765 		gpu_metrics->voltage_gfx =
2766 			(155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
2767 	if (metrics.CurrMemVidOffset)
2768 		gpu_metrics->voltage_mem =
2769 			(155000 - 625 * metrics.CurrMemVidOffset) / 100;
2770 	if (metrics.CurrSocVoltageOffset)
2771 		gpu_metrics->voltage_soc =
2772 			(155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
2773 
2774 	*table = (void *)gpu_metrics;
2775 
2776 	return sizeof(struct gpu_metrics_v1_3);
2777 }
2778 
2779 static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
2780 					     void **table)
2781 {
2782 	struct smu_table_context *smu_table = &smu->smu_table;
2783 	struct gpu_metrics_v1_3 *gpu_metrics =
2784 		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
2785 	SmuMetrics_NV12_legacy_t metrics;
2786 	int ret = 0;
2787 
2788 	mutex_lock(&smu->metrics_lock);
2789 
2790 	ret = smu_cmn_get_metrics_table_locked(smu,
2791 					       NULL,
2792 					       true);
2793 	if (ret) {
2794 		mutex_unlock(&smu->metrics_lock);
2795 		return ret;
2796 	}
2797 
2798 	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_legacy_t));
2799 
2800 	mutex_unlock(&smu->metrics_lock);
2801 
2802 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
2803 
2804 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
2805 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
2806 	gpu_metrics->temperature_mem = metrics.TemperatureMem;
2807 	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
2808 	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
2809 	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
2810 
2811 	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
2812 	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
2813 
2814 	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
2815 
2816 	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
2817 	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
2818 	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
2819 
2820 	gpu_metrics->energy_accumulator = metrics.EnergyAccumulator;
2821 	gpu_metrics->average_vclk0_frequency = metrics.AverageVclkFrequency;
2822 	gpu_metrics->average_dclk0_frequency = metrics.AverageDclkFrequency;
2823 	gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage;
2824 
2825 	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
2826 	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
2827 	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
2828 	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
2829 	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
2830 
2831 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
2832 
2833 	gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
2834 
2835 	gpu_metrics->pcie_link_width =
2836 			smu_v11_0_get_current_pcie_link_width(smu);
2837 	gpu_metrics->pcie_link_speed =
2838 			smu_v11_0_get_current_pcie_link_speed(smu);
2839 
2840 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2841 
2842 	if (metrics.CurrGfxVoltageOffset)
2843 		gpu_metrics->voltage_gfx =
2844 			(155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
2845 	if (metrics.CurrMemVidOffset)
2846 		gpu_metrics->voltage_mem =
2847 			(155000 - 625 * metrics.CurrMemVidOffset) / 100;
2848 	if (metrics.CurrSocVoltageOffset)
2849 		gpu_metrics->voltage_soc =
2850 			(155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
2851 
2852 	*table = (void *)gpu_metrics;
2853 
2854 	return sizeof(struct gpu_metrics_v1_3);
2855 }
2856 
2857 static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
2858 				      void **table)
2859 {
2860 	struct smu_table_context *smu_table = &smu->smu_table;
2861 	struct gpu_metrics_v1_3 *gpu_metrics =
2862 		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
2863 	SmuMetrics_NV12_t metrics;
2864 	int ret = 0;
2865 
2866 	mutex_lock(&smu->metrics_lock);
2867 
2868 	ret = smu_cmn_get_metrics_table_locked(smu,
2869 					       NULL,
2870 					       true);
2871 	if (ret) {
2872 		mutex_unlock(&smu->metrics_lock);
2873 		return ret;
2874 	}
2875 
2876 	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t));
2877 
2878 	mutex_unlock(&smu->metrics_lock);
2879 
2880 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
2881 
2882 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
2883 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
2884 	gpu_metrics->temperature_mem = metrics.TemperatureMem;
2885 	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
2886 	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
2887 	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
2888 
2889 	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
2890 	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
2891 
2892 	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
2893 
2894 	if (metrics.AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD)
2895 		gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs;
2896 	else
2897 		gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs;
2898 
2899 	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
2900 	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs;
2901 
2902 	gpu_metrics->energy_accumulator = metrics.EnergyAccumulator;
2903 	gpu_metrics->average_vclk0_frequency = metrics.AverageVclkFrequency;
2904 	gpu_metrics->average_dclk0_frequency = metrics.AverageDclkFrequency;
2905 	gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage;
2906 
2907 	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
2908 	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
2909 	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
2910 	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
2911 	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
2912 
2913 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
2914 
2915 	gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
2916 
2917 	gpu_metrics->pcie_link_width = metrics.PcieWidth;
2918 	gpu_metrics->pcie_link_speed = link_speed[metrics.PcieRate];
2919 
2920 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2921 
2922 	if (metrics.CurrGfxVoltageOffset)
2923 		gpu_metrics->voltage_gfx =
2924 			(155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
2925 	if (metrics.CurrMemVidOffset)
2926 		gpu_metrics->voltage_mem =
2927 			(155000 - 625 * metrics.CurrMemVidOffset) / 100;
2928 	if (metrics.CurrSocVoltageOffset)
2929 		gpu_metrics->voltage_soc =
2930 			(155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
2931 
2932 	*table = (void *)gpu_metrics;
2933 
2934 	return sizeof(struct gpu_metrics_v1_3);
2935 }
2936 
2937 static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
2938 				      void **table)
2939 {
2940 	struct amdgpu_device *adev = smu->adev;
2941 	uint32_t smu_version;
2942 	int ret = 0;
2943 
2944 	ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
2945 	if (ret) {
2946 		dev_err(adev->dev, "Failed to get smu version!\n");
2947 		return ret;
2948 	}
2949 
2950 	switch (adev->asic_type) {
2951 	case CHIP_NAVI12:
2952 		if (smu_version > 0x00341C00)
2953 			ret = navi12_get_gpu_metrics(smu, table);
2954 		else
2955 			ret = navi12_get_legacy_gpu_metrics(smu, table);
2956 		break;
2957 	case CHIP_NAVI10:
2958 	case CHIP_NAVI14:
2959 	default:
2960 		if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) ||
2961 		      ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00))
2962 			ret = navi10_get_gpu_metrics(smu, table);
2963 		else
2964 			ret =navi10_get_legacy_gpu_metrics(smu, table);
2965 		break;
2966 	}
2967 
2968 	return ret;
2969 }
2970 
2971 static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
2972 {
2973 	struct smu_table_context *table_context = &smu->smu_table;
2974 	PPTable_t *smc_pptable = table_context->driver_pptable;
2975 	struct amdgpu_device *adev = smu->adev;
2976 	uint32_t param = 0;
2977 
2978 	/* Navi12 does not support this */
2979 	if (adev->asic_type == CHIP_NAVI12)
2980 		return 0;
2981 
2982 	/*
2983 	 * Skip the MGpuFanBoost setting for those ASICs
2984 	 * which do not support it
2985 	 */
2986 	if (!smc_pptable->MGpuFanBoostLimitRpm)
2987 		return 0;
2988 
2989 	/* Workaround for WS SKU */
2990 	if (adev->pdev->device == 0x7312 &&
2991 	    adev->pdev->revision == 0)
2992 		param = 0xD188;
2993 
2994 	return smu_cmn_send_smc_msg_with_param(smu,
2995 					       SMU_MSG_SetMGpuFanBoostLimitRpm,
2996 					       param,
2997 					       NULL);
2998 }
2999 
3000 static int navi10_post_smu_init(struct smu_context *smu)
3001 {
3002 	struct amdgpu_device *adev = smu->adev;
3003 	int ret = 0;
3004 
3005 	if (amdgpu_sriov_vf(adev))
3006 		return 0;
3007 
3008 	ret = navi10_run_umc_cdr_workaround(smu);
3009 	if (ret) {
3010 		dev_err(adev->dev, "Failed to apply umc cdr workaround!\n");
3011 		return ret;
3012 	}
3013 
3014 	if (!smu->dc_controlled_by_gpio) {
3015 		/*
3016 		 * For Navi1X, manually switch it to AC mode as PMFW
3017 		 * may boot it with DC mode.
3018 		 */
3019 		ret = smu_v11_0_set_power_source(smu,
3020 						 adev->pm.ac_power ?
3021 						 SMU_POWER_SOURCE_AC :
3022 						 SMU_POWER_SOURCE_DC);
3023 		if (ret) {
3024 			dev_err(adev->dev, "Failed to switch to %s mode!\n",
3025 					adev->pm.ac_power ? "AC" : "DC");
3026 			return ret;
3027 		}
3028 	}
3029 
3030 	return ret;
3031 }
3032 
3033 static const struct pptable_funcs navi10_ppt_funcs = {
3034 	.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
3035 	.set_default_dpm_table = navi10_set_default_dpm_table,
3036 	.dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
3037 	.dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
3038 	.print_clk_levels = navi10_print_clk_levels,
3039 	.force_clk_levels = navi10_force_clk_levels,
3040 	.populate_umd_state_clk = navi10_populate_umd_state_clk,
3041 	.get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
3042 	.pre_display_config_changed = navi10_pre_display_config_changed,
3043 	.display_config_changed = navi10_display_config_changed,
3044 	.notify_smc_display_config = navi10_notify_smc_display_config,
3045 	.is_dpm_running = navi10_is_dpm_running,
3046 	.get_fan_speed_percent = navi10_get_fan_speed_percent,
3047 	.get_power_profile_mode = navi10_get_power_profile_mode,
3048 	.set_power_profile_mode = navi10_set_power_profile_mode,
3049 	.set_watermarks_table = navi10_set_watermarks_table,
3050 	.read_sensor = navi10_read_sensor,
3051 	.get_uclk_dpm_states = navi10_get_uclk_dpm_states,
3052 	.set_performance_level = smu_v11_0_set_performance_level,
3053 	.get_thermal_temperature_range = navi10_get_thermal_temperature_range,
3054 	.display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
3055 	.get_power_limit = navi10_get_power_limit,
3056 	.update_pcie_parameters = navi10_update_pcie_parameters,
3057 	.init_microcode = smu_v11_0_init_microcode,
3058 	.load_microcode = smu_v11_0_load_microcode,
3059 	.fini_microcode = smu_v11_0_fini_microcode,
3060 	.init_smc_tables = navi10_init_smc_tables,
3061 	.fini_smc_tables = smu_v11_0_fini_smc_tables,
3062 	.init_power = smu_v11_0_init_power,
3063 	.fini_power = smu_v11_0_fini_power,
3064 	.check_fw_status = smu_v11_0_check_fw_status,
3065 	.setup_pptable = navi10_setup_pptable,
3066 	.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
3067 	.check_fw_version = smu_v11_0_check_fw_version,
3068 	.write_pptable = smu_cmn_write_pptable,
3069 	.set_driver_table_location = smu_v11_0_set_driver_table_location,
3070 	.set_tool_table_location = smu_v11_0_set_tool_table_location,
3071 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
3072 	.system_features_control = smu_v11_0_system_features_control,
3073 	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
3074 	.send_smc_msg = smu_cmn_send_smc_msg,
3075 	.init_display_count = smu_v11_0_init_display_count,
3076 	.set_allowed_mask = smu_v11_0_set_allowed_mask,
3077 	.get_enabled_mask = smu_cmn_get_enabled_mask,
3078 	.feature_is_enabled = smu_cmn_feature_is_enabled,
3079 	.disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
3080 	.notify_display_change = smu_v11_0_notify_display_change,
3081 	.set_power_limit = smu_v11_0_set_power_limit,
3082 	.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
3083 	.enable_thermal_alert = smu_v11_0_enable_thermal_alert,
3084 	.disable_thermal_alert = smu_v11_0_disable_thermal_alert,
3085 	.set_min_dcef_deep_sleep = smu_v11_0_set_min_deep_sleep_dcefclk,
3086 	.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
3087 	.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
3088 	.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
3089 	.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
3090 	.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
3091 	.gfx_off_control = smu_v11_0_gfx_off_control,
3092 	.register_irq_handler = smu_v11_0_register_irq_handler,
3093 	.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
3094 	.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
3095 	.baco_is_support = smu_v11_0_baco_is_support,
3096 	.baco_get_state = smu_v11_0_baco_get_state,
3097 	.baco_set_state = smu_v11_0_baco_set_state,
3098 	.baco_enter = smu_v11_0_baco_enter,
3099 	.baco_exit = smu_v11_0_baco_exit,
3100 	.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
3101 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
3102 	.set_default_od_settings = navi10_set_default_od_settings,
3103 	.od_edit_dpm_table = navi10_od_edit_dpm_table,
3104 	.run_btc = navi10_run_btc,
3105 	.set_power_source = smu_v11_0_set_power_source,
3106 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
3107 	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
3108 	.get_gpu_metrics = navi1x_get_gpu_metrics,
3109 	.enable_mgpu_fan_boost = navi10_enable_mgpu_fan_boost,
3110 	.gfx_ulv_control = smu_v11_0_gfx_ulv_control,
3111 	.deep_sleep_control = smu_v11_0_deep_sleep_control,
3112 	.get_fan_parameters = navi10_get_fan_parameters,
3113 	.post_init = navi10_post_smu_init,
3114 	.interrupt_work = smu_v11_0_interrupt_work,
3115 	.set_mp1_state = navi10_set_mp1_state,
3116 };
3117 
3118 void navi10_set_ppt_funcs(struct smu_context *smu)
3119 {
3120 	smu->ppt_funcs = &navi10_ppt_funcs;
3121 	smu->message_map = navi10_message_map;
3122 	smu->clock_map = navi10_clk_map;
3123 	smu->feature_map = navi10_feature_mask_map;
3124 	smu->table_map = navi10_table_map;
3125 	smu->pwr_src_map = navi10_pwr_src_map;
3126 	smu->workload_map = navi10_workload_map;
3127 }
3128