1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #define SWSMU_CODE_LAYER_L2
25 
26 #include <linux/firmware.h>
27 #include <linux/pci.h>
28 #include <linux/i2c.h>
29 #include "amdgpu.h"
30 #include "amdgpu_smu.h"
31 #include "atomfirmware.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "amdgpu_atombios.h"
34 #include "soc15_common.h"
35 #include "smu_v11_0.h"
36 #include "smu11_driver_if_navi10.h"
37 #include "atom.h"
38 #include "navi10_ppt.h"
39 #include "smu_v11_0_pptable.h"
40 #include "smu_v11_0_ppsmc.h"
41 #include "nbio/nbio_2_3_offset.h"
42 #include "nbio/nbio_2_3_sh_mask.h"
43 #include "thm/thm_11_0_2_offset.h"
44 #include "thm/thm_11_0_2_sh_mask.h"
45 
46 #include "asic_reg/mp/mp_11_0_sh_mask.h"
47 #include "smu_cmn.h"
48 
49 /*
50  * DO NOT use these for err/warn/info/debug messages.
51  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
52  * They are more MGPU friendly.
53  */
54 #undef pr_err
55 #undef pr_warn
56 #undef pr_info
57 #undef pr_debug
58 
59 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
60 
61 #define FEATURE_MASK(feature) (1ULL << feature)
62 #define SMC_DPM_FEATURE ( \
63 	FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
64 	FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)	 | \
65 	FEATURE_MASK(FEATURE_DPM_GFX_PACE_BIT)	 | \
66 	FEATURE_MASK(FEATURE_DPM_UCLK_BIT)	 | \
67 	FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)	 | \
68 	FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)	 | \
69 	FEATURE_MASK(FEATURE_DPM_LINK_BIT)	 | \
70 	FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
71 
72 static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
73 	MSG_MAP(TestMessage,			PPSMC_MSG_TestMessage,			1),
74 	MSG_MAP(GetSmuVersion,			PPSMC_MSG_GetSmuVersion,		1),
75 	MSG_MAP(GetDriverIfVersion,		PPSMC_MSG_GetDriverIfVersion,		1),
76 	MSG_MAP(SetAllowedFeaturesMaskLow,	PPSMC_MSG_SetAllowedFeaturesMaskLow,	0),
77 	MSG_MAP(SetAllowedFeaturesMaskHigh,	PPSMC_MSG_SetAllowedFeaturesMaskHigh,	0),
78 	MSG_MAP(EnableAllSmuFeatures,		PPSMC_MSG_EnableAllSmuFeatures,		0),
79 	MSG_MAP(DisableAllSmuFeatures,		PPSMC_MSG_DisableAllSmuFeatures,	0),
80 	MSG_MAP(EnableSmuFeaturesLow,		PPSMC_MSG_EnableSmuFeaturesLow,		1),
81 	MSG_MAP(EnableSmuFeaturesHigh,		PPSMC_MSG_EnableSmuFeaturesHigh,	1),
82 	MSG_MAP(DisableSmuFeaturesLow,		PPSMC_MSG_DisableSmuFeaturesLow,	1),
83 	MSG_MAP(DisableSmuFeaturesHigh,		PPSMC_MSG_DisableSmuFeaturesHigh,	1),
84 	MSG_MAP(GetEnabledSmuFeaturesLow,	PPSMC_MSG_GetEnabledSmuFeaturesLow,	1),
85 	MSG_MAP(GetEnabledSmuFeaturesHigh,	PPSMC_MSG_GetEnabledSmuFeaturesHigh,	1),
86 	MSG_MAP(SetWorkloadMask,		PPSMC_MSG_SetWorkloadMask,		1),
87 	MSG_MAP(SetPptLimit,			PPSMC_MSG_SetPptLimit,			0),
88 	MSG_MAP(SetDriverDramAddrHigh,		PPSMC_MSG_SetDriverDramAddrHigh,	0),
89 	MSG_MAP(SetDriverDramAddrLow,		PPSMC_MSG_SetDriverDramAddrLow,		0),
90 	MSG_MAP(SetToolsDramAddrHigh,		PPSMC_MSG_SetToolsDramAddrHigh,		0),
91 	MSG_MAP(SetToolsDramAddrLow,		PPSMC_MSG_SetToolsDramAddrLow,		0),
92 	MSG_MAP(TransferTableSmu2Dram,		PPSMC_MSG_TransferTableSmu2Dram,	0),
93 	MSG_MAP(TransferTableDram2Smu,		PPSMC_MSG_TransferTableDram2Smu,	0),
94 	MSG_MAP(UseDefaultPPTable,		PPSMC_MSG_UseDefaultPPTable,		0),
95 	MSG_MAP(UseBackupPPTable,		PPSMC_MSG_UseBackupPPTable,		0),
96 	MSG_MAP(RunBtc,				PPSMC_MSG_RunBtc,			0),
97 	MSG_MAP(EnterBaco,			PPSMC_MSG_EnterBaco,			0),
98 	MSG_MAP(SetSoftMinByFreq,		PPSMC_MSG_SetSoftMinByFreq,		0),
99 	MSG_MAP(SetSoftMaxByFreq,		PPSMC_MSG_SetSoftMaxByFreq,		0),
100 	MSG_MAP(SetHardMinByFreq,		PPSMC_MSG_SetHardMinByFreq,		1),
101 	MSG_MAP(SetHardMaxByFreq,		PPSMC_MSG_SetHardMaxByFreq,		0),
102 	MSG_MAP(GetMinDpmFreq,			PPSMC_MSG_GetMinDpmFreq,		1),
103 	MSG_MAP(GetMaxDpmFreq,			PPSMC_MSG_GetMaxDpmFreq,		1),
104 	MSG_MAP(GetDpmFreqByIndex,		PPSMC_MSG_GetDpmFreqByIndex,		1),
105 	MSG_MAP(SetMemoryChannelConfig,		PPSMC_MSG_SetMemoryChannelConfig,	0),
106 	MSG_MAP(SetGeminiMode,			PPSMC_MSG_SetGeminiMode,		0),
107 	MSG_MAP(SetGeminiApertureHigh,		PPSMC_MSG_SetGeminiApertureHigh,	0),
108 	MSG_MAP(SetGeminiApertureLow,		PPSMC_MSG_SetGeminiApertureLow,		0),
109 	MSG_MAP(OverridePcieParameters,		PPSMC_MSG_OverridePcieParameters,	0),
110 	MSG_MAP(SetMinDeepSleepDcefclk,		PPSMC_MSG_SetMinDeepSleepDcefclk,	0),
111 	MSG_MAP(ReenableAcDcInterrupt,		PPSMC_MSG_ReenableAcDcInterrupt,	0),
112 	MSG_MAP(NotifyPowerSource,		PPSMC_MSG_NotifyPowerSource,		0),
113 	MSG_MAP(SetUclkFastSwitch,		PPSMC_MSG_SetUclkFastSwitch,		0),
114 	MSG_MAP(SetVideoFps,			PPSMC_MSG_SetVideoFps,			0),
115 	MSG_MAP(PrepareMp1ForUnload,		PPSMC_MSG_PrepareMp1ForUnload,		1),
116 	MSG_MAP(DramLogSetDramAddrHigh,		PPSMC_MSG_DramLogSetDramAddrHigh,	0),
117 	MSG_MAP(DramLogSetDramAddrLow,		PPSMC_MSG_DramLogSetDramAddrLow,	0),
118 	MSG_MAP(DramLogSetDramSize,		PPSMC_MSG_DramLogSetDramSize,		0),
119 	MSG_MAP(ConfigureGfxDidt,		PPSMC_MSG_ConfigureGfxDidt,		0),
120 	MSG_MAP(NumOfDisplays,			PPSMC_MSG_NumOfDisplays,		0),
121 	MSG_MAP(SetSystemVirtualDramAddrHigh,	PPSMC_MSG_SetSystemVirtualDramAddrHigh,	0),
122 	MSG_MAP(SetSystemVirtualDramAddrLow,	PPSMC_MSG_SetSystemVirtualDramAddrLow,	0),
123 	MSG_MAP(AllowGfxOff,			PPSMC_MSG_AllowGfxOff,			0),
124 	MSG_MAP(DisallowGfxOff,			PPSMC_MSG_DisallowGfxOff,		0),
125 	MSG_MAP(GetPptLimit,			PPSMC_MSG_GetPptLimit,			0),
126 	MSG_MAP(GetDcModeMaxDpmFreq,		PPSMC_MSG_GetDcModeMaxDpmFreq,		1),
127 	MSG_MAP(GetDebugData,			PPSMC_MSG_GetDebugData,			0),
128 	MSG_MAP(ExitBaco,			PPSMC_MSG_ExitBaco,			0),
129 	MSG_MAP(PrepareMp1ForReset,		PPSMC_MSG_PrepareMp1ForReset,		0),
130 	MSG_MAP(PrepareMp1ForShutdown,		PPSMC_MSG_PrepareMp1ForShutdown,	0),
131 	MSG_MAP(PowerUpVcn,			PPSMC_MSG_PowerUpVcn,			0),
132 	MSG_MAP(PowerDownVcn,			PPSMC_MSG_PowerDownVcn,			0),
133 	MSG_MAP(PowerUpJpeg,			PPSMC_MSG_PowerUpJpeg,			0),
134 	MSG_MAP(PowerDownJpeg,			PPSMC_MSG_PowerDownJpeg,		0),
135 	MSG_MAP(BacoAudioD3PME,			PPSMC_MSG_BacoAudioD3PME,		0),
136 	MSG_MAP(ArmD3,				PPSMC_MSG_ArmD3,			0),
137 	MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange,	0),
138 	MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE,	PPSMC_MSG_DALEnableDummyPstateChange,	0),
139 	MSG_MAP(GetVoltageByDpm,		PPSMC_MSG_GetVoltageByDpm,		0),
140 	MSG_MAP(GetVoltageByDpmOverdrive,	PPSMC_MSG_GetVoltageByDpmOverdrive,	0),
141 	MSG_MAP(SetMGpuFanBoostLimitRpm,	PPSMC_MSG_SetMGpuFanBoostLimitRpm,	0),
142 };
143 
144 static struct cmn2asic_mapping navi10_clk_map[SMU_CLK_COUNT] = {
145 	CLK_MAP(GFXCLK, PPCLK_GFXCLK),
146 	CLK_MAP(SCLK,	PPCLK_GFXCLK),
147 	CLK_MAP(SOCCLK, PPCLK_SOCCLK),
148 	CLK_MAP(FCLK, PPCLK_SOCCLK),
149 	CLK_MAP(UCLK, PPCLK_UCLK),
150 	CLK_MAP(MCLK, PPCLK_UCLK),
151 	CLK_MAP(DCLK, PPCLK_DCLK),
152 	CLK_MAP(VCLK, PPCLK_VCLK),
153 	CLK_MAP(DCEFCLK, PPCLK_DCEFCLK),
154 	CLK_MAP(DISPCLK, PPCLK_DISPCLK),
155 	CLK_MAP(PIXCLK, PPCLK_PIXCLK),
156 	CLK_MAP(PHYCLK, PPCLK_PHYCLK),
157 };
158 
159 static struct cmn2asic_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
160 	FEA_MAP(DPM_PREFETCHER),
161 	FEA_MAP(DPM_GFXCLK),
162 	FEA_MAP(DPM_GFX_PACE),
163 	FEA_MAP(DPM_UCLK),
164 	FEA_MAP(DPM_SOCCLK),
165 	FEA_MAP(DPM_MP0CLK),
166 	FEA_MAP(DPM_LINK),
167 	FEA_MAP(DPM_DCEFCLK),
168 	FEA_MAP(MEM_VDDCI_SCALING),
169 	FEA_MAP(MEM_MVDD_SCALING),
170 	FEA_MAP(DS_GFXCLK),
171 	FEA_MAP(DS_SOCCLK),
172 	FEA_MAP(DS_LCLK),
173 	FEA_MAP(DS_DCEFCLK),
174 	FEA_MAP(DS_UCLK),
175 	FEA_MAP(GFX_ULV),
176 	FEA_MAP(FW_DSTATE),
177 	FEA_MAP(GFXOFF),
178 	FEA_MAP(BACO),
179 	FEA_MAP(VCN_PG),
180 	FEA_MAP(JPEG_PG),
181 	FEA_MAP(USB_PG),
182 	FEA_MAP(RSMU_SMN_CG),
183 	FEA_MAP(PPT),
184 	FEA_MAP(TDC),
185 	FEA_MAP(GFX_EDC),
186 	FEA_MAP(APCC_PLUS),
187 	FEA_MAP(GTHR),
188 	FEA_MAP(ACDC),
189 	FEA_MAP(VR0HOT),
190 	FEA_MAP(VR1HOT),
191 	FEA_MAP(FW_CTF),
192 	FEA_MAP(FAN_CONTROL),
193 	FEA_MAP(THERMAL),
194 	FEA_MAP(GFX_DCS),
195 	FEA_MAP(RM),
196 	FEA_MAP(LED_DISPLAY),
197 	FEA_MAP(GFX_SS),
198 	FEA_MAP(OUT_OF_BAND_MONITOR),
199 	FEA_MAP(TEMP_DEPENDENT_VMIN),
200 	FEA_MAP(MMHUB_PG),
201 	FEA_MAP(ATHUB_PG),
202 	FEA_MAP(APCC_DFLL),
203 };
204 
205 static struct cmn2asic_mapping navi10_table_map[SMU_TABLE_COUNT] = {
206 	TAB_MAP(PPTABLE),
207 	TAB_MAP(WATERMARKS),
208 	TAB_MAP(AVFS),
209 	TAB_MAP(AVFS_PSM_DEBUG),
210 	TAB_MAP(AVFS_FUSE_OVERRIDE),
211 	TAB_MAP(PMSTATUSLOG),
212 	TAB_MAP(SMU_METRICS),
213 	TAB_MAP(DRIVER_SMU_CONFIG),
214 	TAB_MAP(ACTIVITY_MONITOR_COEFF),
215 	TAB_MAP(OVERDRIVE),
216 	TAB_MAP(I2C_COMMANDS),
217 	TAB_MAP(PACE),
218 };
219 
220 static struct cmn2asic_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
221 	PWR_MAP(AC),
222 	PWR_MAP(DC),
223 };
224 
225 static struct cmn2asic_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
226 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT,	WORKLOAD_PPLIB_DEFAULT_BIT),
227 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,		WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
228 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,		WORKLOAD_PPLIB_POWER_SAVING_BIT),
229 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,		WORKLOAD_PPLIB_VIDEO_BIT),
230 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,			WORKLOAD_PPLIB_VR_BIT),
231 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,		WORKLOAD_PPLIB_COMPUTE_BIT),
232 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,		WORKLOAD_PPLIB_CUSTOM_BIT),
233 };
234 
235 static bool is_asic_secure(struct smu_context *smu)
236 {
237 	struct amdgpu_device *adev = smu->adev;
238 	bool is_secure = true;
239 	uint32_t mp0_fw_intf;
240 
241 	mp0_fw_intf = RREG32_PCIE(MP0_Public |
242 				   (smnMP0_FW_INTF & 0xffffffff));
243 
244 	if (!(mp0_fw_intf & (1 << 19)))
245 		is_secure = false;
246 
247 	return is_secure;
248 }
249 
250 static int
251 navi10_get_allowed_feature_mask(struct smu_context *smu,
252 				  uint32_t *feature_mask, uint32_t num)
253 {
254 	struct amdgpu_device *adev = smu->adev;
255 
256 	if (num > 2)
257 		return -EINVAL;
258 
259 	memset(feature_mask, 0, sizeof(uint32_t) * num);
260 
261 	*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
262 				| FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
263 				| FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
264 				| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
265 				| FEATURE_MASK(FEATURE_PPT_BIT)
266 				| FEATURE_MASK(FEATURE_TDC_BIT)
267 				| FEATURE_MASK(FEATURE_GFX_EDC_BIT)
268 				| FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
269 				| FEATURE_MASK(FEATURE_VR0HOT_BIT)
270 				| FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
271 				| FEATURE_MASK(FEATURE_THERMAL_BIT)
272 				| FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
273 				| FEATURE_MASK(FEATURE_DS_LCLK_BIT)
274 				| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
275 				| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
276 				| FEATURE_MASK(FEATURE_BACO_BIT)
277 				| FEATURE_MASK(FEATURE_GFX_SS_BIT)
278 				| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
279 				| FEATURE_MASK(FEATURE_FW_CTF_BIT)
280 				| FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
281 
282 	if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
283 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
284 
285 	if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
286 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
287 
288 	if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
289 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
290 
291 	if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
292 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
293 
294 	if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
295 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
296 				| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
297 				| FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
298 
299 	if (adev->pm.pp_feature & PP_ULV_MASK)
300 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
301 
302 	if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
303 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
304 
305 	if (adev->pm.pp_feature & PP_GFXOFF_MASK)
306 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
307 
308 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
309 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
310 
311 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
312 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
313 
314 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
315 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT);
316 
317 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
318 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
319 
320 	if (smu->dc_controlled_by_gpio)
321 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
322 
323 	/* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
324 	if (is_asic_secure(smu)) {
325 		/* only for navi10 A0 */
326 		if ((adev->asic_type == CHIP_NAVI10) &&
327 			(adev->rev_id == 0)) {
328 			*(uint64_t *)feature_mask &=
329 					~(FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
330 					  | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
331 					  | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT));
332 			*(uint64_t *)feature_mask &=
333 					~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
334 		}
335 	}
336 
337 	return 0;
338 }
339 
340 static int navi10_check_powerplay_table(struct smu_context *smu)
341 {
342 	struct smu_table_context *table_context = &smu->smu_table;
343 	struct smu_11_0_powerplay_table *powerplay_table =
344 		table_context->power_play_table;
345 	struct smu_baco_context *smu_baco = &smu->smu_baco;
346 
347 	if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
348 		smu->dc_controlled_by_gpio = true;
349 
350 	mutex_lock(&smu_baco->mutex);
351 	if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
352 	    powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
353 		smu_baco->platform_support = true;
354 	mutex_unlock(&smu_baco->mutex);
355 
356 	table_context->thermal_controller_type =
357 		powerplay_table->thermal_controller_type;
358 
359 	/*
360 	 * Instead of having its own buffer space and get overdrive_table copied,
361 	 * smu->od_settings just points to the actual overdrive_table
362 	 */
363 	smu->od_settings = &powerplay_table->overdrive_table;
364 
365 	return 0;
366 }
367 
368 static int navi10_append_powerplay_table(struct smu_context *smu)
369 {
370 	struct amdgpu_device *adev = smu->adev;
371 	struct smu_table_context *table_context = &smu->smu_table;
372 	PPTable_t *smc_pptable = table_context->driver_pptable;
373 	struct atom_smc_dpm_info_v4_5 *smc_dpm_table;
374 	struct atom_smc_dpm_info_v4_7 *smc_dpm_table_v4_7;
375 	int index, ret;
376 
377 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
378 					   smc_dpm_info);
379 
380 	ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
381 				      (uint8_t **)&smc_dpm_table);
382 	if (ret)
383 		return ret;
384 
385 	dev_info(adev->dev, "smc_dpm_info table revision(format.content): %d.%d\n",
386 			smc_dpm_table->table_header.format_revision,
387 			smc_dpm_table->table_header.content_revision);
388 
389 	if (smc_dpm_table->table_header.format_revision != 4) {
390 		dev_err(adev->dev, "smc_dpm_info table format revision is not 4!\n");
391 		return -EINVAL;
392 	}
393 
394 	switch (smc_dpm_table->table_header.content_revision) {
395 	case 5: /* nv10 and nv14 */
396 		memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
397 			sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
398 		break;
399 	case 7: /* nv12 */
400 		ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
401 					      (uint8_t **)&smc_dpm_table_v4_7);
402 		if (ret)
403 			return ret;
404 		memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
405 			sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
406 		break;
407 	default:
408 		dev_err(smu->adev->dev, "smc_dpm_info with unsupported content revision %d!\n",
409 				smc_dpm_table->table_header.content_revision);
410 		return -EINVAL;
411 	}
412 
413 	if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
414 		/* TODO: remove it once SMU fw fix it */
415 		smc_pptable->DebugOverrides |= DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN;
416 	}
417 
418 	return 0;
419 }
420 
421 static int navi10_store_powerplay_table(struct smu_context *smu)
422 {
423 	struct smu_table_context *table_context = &smu->smu_table;
424 	struct smu_11_0_powerplay_table *powerplay_table =
425 		table_context->power_play_table;
426 
427 	memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
428 	       sizeof(PPTable_t));
429 
430 	return 0;
431 }
432 
433 static int navi10_setup_pptable(struct smu_context *smu)
434 {
435 	int ret = 0;
436 
437 	ret = smu_v11_0_setup_pptable(smu);
438 	if (ret)
439 		return ret;
440 
441 	ret = navi10_store_powerplay_table(smu);
442 	if (ret)
443 		return ret;
444 
445 	ret = navi10_append_powerplay_table(smu);
446 	if (ret)
447 		return ret;
448 
449 	ret = navi10_check_powerplay_table(smu);
450 	if (ret)
451 		return ret;
452 
453 	return ret;
454 }
455 
456 static int navi10_tables_init(struct smu_context *smu)
457 {
458 	struct smu_table_context *smu_table = &smu->smu_table;
459 	struct smu_table *tables = smu_table->tables;
460 	struct amdgpu_device *adev = smu->adev;
461 
462 	SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
463 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
464 	SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
465 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
466 	if (adev->asic_type == CHIP_NAVI12)
467 		SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_NV12_t),
468 			       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
469 	else
470 		SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
471 			       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
472 	SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
473 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
474 	SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
475 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
476 	SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
477 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
478 	SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
479 		       sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
480 		       AMDGPU_GEM_DOMAIN_VRAM);
481 
482 	smu_table->metrics_table = kzalloc(adev->asic_type == CHIP_NAVI12 ?
483 					   sizeof(SmuMetrics_NV12_t) :
484 					   sizeof(SmuMetrics_t), GFP_KERNEL);
485 	if (!smu_table->metrics_table)
486 		goto err0_out;
487 	smu_table->metrics_time = 0;
488 
489 	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0);
490 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
491 	if (!smu_table->gpu_metrics_table)
492 		goto err1_out;
493 
494 	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
495 	if (!smu_table->watermarks_table)
496 		goto err2_out;
497 
498 	return 0;
499 
500 err2_out:
501 	kfree(smu_table->gpu_metrics_table);
502 err1_out:
503 	kfree(smu_table->metrics_table);
504 err0_out:
505 	return -ENOMEM;
506 }
507 
508 static int navi10_get_smu_metrics_data(struct smu_context *smu,
509 				       MetricsMember_t member,
510 				       uint32_t *value)
511 {
512 	struct smu_table_context *smu_table= &smu->smu_table;
513 	/*
514 	 * This works for NV12 also. As although NV12 uses a different
515 	 * SmuMetrics structure from other NV1X ASICs, they share the
516 	 * same offsets for the heading parts(those members used here).
517 	 */
518 	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
519 	int ret = 0;
520 
521 	mutex_lock(&smu->metrics_lock);
522 
523 	ret = smu_cmn_get_metrics_table_locked(smu,
524 					       NULL,
525 					       false);
526 	if (ret) {
527 		mutex_unlock(&smu->metrics_lock);
528 		return ret;
529 	}
530 
531 	switch (member) {
532 	case METRICS_CURR_GFXCLK:
533 		*value = metrics->CurrClock[PPCLK_GFXCLK];
534 		break;
535 	case METRICS_CURR_SOCCLK:
536 		*value = metrics->CurrClock[PPCLK_SOCCLK];
537 		break;
538 	case METRICS_CURR_UCLK:
539 		*value = metrics->CurrClock[PPCLK_UCLK];
540 		break;
541 	case METRICS_CURR_VCLK:
542 		*value = metrics->CurrClock[PPCLK_VCLK];
543 		break;
544 	case METRICS_CURR_DCLK:
545 		*value = metrics->CurrClock[PPCLK_DCLK];
546 		break;
547 	case METRICS_CURR_DCEFCLK:
548 		*value = metrics->CurrClock[PPCLK_DCEFCLK];
549 		break;
550 	case METRICS_AVERAGE_GFXCLK:
551 		*value = metrics->AverageGfxclkFrequency;
552 		break;
553 	case METRICS_AVERAGE_SOCCLK:
554 		*value = metrics->AverageSocclkFrequency;
555 		break;
556 	case METRICS_AVERAGE_UCLK:
557 		*value = metrics->AverageUclkFrequency;
558 		break;
559 	case METRICS_AVERAGE_GFXACTIVITY:
560 		*value = metrics->AverageGfxActivity;
561 		break;
562 	case METRICS_AVERAGE_MEMACTIVITY:
563 		*value = metrics->AverageUclkActivity;
564 		break;
565 	case METRICS_AVERAGE_SOCKETPOWER:
566 		*value = metrics->AverageSocketPower << 8;
567 		break;
568 	case METRICS_TEMPERATURE_EDGE:
569 		*value = metrics->TemperatureEdge *
570 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
571 		break;
572 	case METRICS_TEMPERATURE_HOTSPOT:
573 		*value = metrics->TemperatureHotspot *
574 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
575 		break;
576 	case METRICS_TEMPERATURE_MEM:
577 		*value = metrics->TemperatureMem *
578 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
579 		break;
580 	case METRICS_TEMPERATURE_VRGFX:
581 		*value = metrics->TemperatureVrGfx *
582 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
583 		break;
584 	case METRICS_TEMPERATURE_VRSOC:
585 		*value = metrics->TemperatureVrSoc *
586 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
587 		break;
588 	case METRICS_THROTTLER_STATUS:
589 		*value = metrics->ThrottlerStatus;
590 		break;
591 	case METRICS_CURR_FANSPEED:
592 		*value = metrics->CurrFanSpeed;
593 		break;
594 	default:
595 		*value = UINT_MAX;
596 		break;
597 	}
598 
599 	mutex_unlock(&smu->metrics_lock);
600 
601 	return ret;
602 }
603 
604 static int navi10_allocate_dpm_context(struct smu_context *smu)
605 {
606 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
607 
608 	smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
609 				       GFP_KERNEL);
610 	if (!smu_dpm->dpm_context)
611 		return -ENOMEM;
612 
613 	smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
614 
615 	return 0;
616 }
617 
618 static int navi10_init_smc_tables(struct smu_context *smu)
619 {
620 	int ret = 0;
621 
622 	ret = navi10_tables_init(smu);
623 	if (ret)
624 		return ret;
625 
626 	ret = navi10_allocate_dpm_context(smu);
627 	if (ret)
628 		return ret;
629 
630 	return smu_v11_0_init_smc_tables(smu);
631 }
632 
633 static int navi10_set_default_dpm_table(struct smu_context *smu)
634 {
635 	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
636 	PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
637 	struct smu_11_0_dpm_table *dpm_table;
638 	int ret = 0;
639 
640 	/* socclk dpm table setup */
641 	dpm_table = &dpm_context->dpm_tables.soc_table;
642 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
643 		ret = smu_v11_0_set_single_dpm_table(smu,
644 						     SMU_SOCCLK,
645 						     dpm_table);
646 		if (ret)
647 			return ret;
648 		dpm_table->is_fine_grained =
649 			!driver_ppt->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete;
650 	} else {
651 		dpm_table->count = 1;
652 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
653 		dpm_table->dpm_levels[0].enabled = true;
654 		dpm_table->min = dpm_table->dpm_levels[0].value;
655 		dpm_table->max = dpm_table->dpm_levels[0].value;
656 	}
657 
658 	/* gfxclk dpm table setup */
659 	dpm_table = &dpm_context->dpm_tables.gfx_table;
660 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
661 		ret = smu_v11_0_set_single_dpm_table(smu,
662 						     SMU_GFXCLK,
663 						     dpm_table);
664 		if (ret)
665 			return ret;
666 		dpm_table->is_fine_grained =
667 			!driver_ppt->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete;
668 	} else {
669 		dpm_table->count = 1;
670 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
671 		dpm_table->dpm_levels[0].enabled = true;
672 		dpm_table->min = dpm_table->dpm_levels[0].value;
673 		dpm_table->max = dpm_table->dpm_levels[0].value;
674 	}
675 
676 	/* uclk dpm table setup */
677 	dpm_table = &dpm_context->dpm_tables.uclk_table;
678 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
679 		ret = smu_v11_0_set_single_dpm_table(smu,
680 						     SMU_UCLK,
681 						     dpm_table);
682 		if (ret)
683 			return ret;
684 		dpm_table->is_fine_grained =
685 			!driver_ppt->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete;
686 	} else {
687 		dpm_table->count = 1;
688 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
689 		dpm_table->dpm_levels[0].enabled = true;
690 		dpm_table->min = dpm_table->dpm_levels[0].value;
691 		dpm_table->max = dpm_table->dpm_levels[0].value;
692 	}
693 
694 	/* vclk dpm table setup */
695 	dpm_table = &dpm_context->dpm_tables.vclk_table;
696 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
697 		ret = smu_v11_0_set_single_dpm_table(smu,
698 						     SMU_VCLK,
699 						     dpm_table);
700 		if (ret)
701 			return ret;
702 		dpm_table->is_fine_grained =
703 			!driver_ppt->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete;
704 	} else {
705 		dpm_table->count = 1;
706 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
707 		dpm_table->dpm_levels[0].enabled = true;
708 		dpm_table->min = dpm_table->dpm_levels[0].value;
709 		dpm_table->max = dpm_table->dpm_levels[0].value;
710 	}
711 
712 	/* dclk dpm table setup */
713 	dpm_table = &dpm_context->dpm_tables.dclk_table;
714 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
715 		ret = smu_v11_0_set_single_dpm_table(smu,
716 						     SMU_DCLK,
717 						     dpm_table);
718 		if (ret)
719 			return ret;
720 		dpm_table->is_fine_grained =
721 			!driver_ppt->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete;
722 	} else {
723 		dpm_table->count = 1;
724 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
725 		dpm_table->dpm_levels[0].enabled = true;
726 		dpm_table->min = dpm_table->dpm_levels[0].value;
727 		dpm_table->max = dpm_table->dpm_levels[0].value;
728 	}
729 
730 	/* dcefclk dpm table setup */
731 	dpm_table = &dpm_context->dpm_tables.dcef_table;
732 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
733 		ret = smu_v11_0_set_single_dpm_table(smu,
734 						     SMU_DCEFCLK,
735 						     dpm_table);
736 		if (ret)
737 			return ret;
738 		dpm_table->is_fine_grained =
739 			!driver_ppt->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete;
740 	} else {
741 		dpm_table->count = 1;
742 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
743 		dpm_table->dpm_levels[0].enabled = true;
744 		dpm_table->min = dpm_table->dpm_levels[0].value;
745 		dpm_table->max = dpm_table->dpm_levels[0].value;
746 	}
747 
748 	/* pixelclk dpm table setup */
749 	dpm_table = &dpm_context->dpm_tables.pixel_table;
750 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
751 		ret = smu_v11_0_set_single_dpm_table(smu,
752 						     SMU_PIXCLK,
753 						     dpm_table);
754 		if (ret)
755 			return ret;
756 		dpm_table->is_fine_grained =
757 			!driver_ppt->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete;
758 	} else {
759 		dpm_table->count = 1;
760 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
761 		dpm_table->dpm_levels[0].enabled = true;
762 		dpm_table->min = dpm_table->dpm_levels[0].value;
763 		dpm_table->max = dpm_table->dpm_levels[0].value;
764 	}
765 
766 	/* displayclk dpm table setup */
767 	dpm_table = &dpm_context->dpm_tables.display_table;
768 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
769 		ret = smu_v11_0_set_single_dpm_table(smu,
770 						     SMU_DISPCLK,
771 						     dpm_table);
772 		if (ret)
773 			return ret;
774 		dpm_table->is_fine_grained =
775 			!driver_ppt->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete;
776 	} else {
777 		dpm_table->count = 1;
778 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
779 		dpm_table->dpm_levels[0].enabled = true;
780 		dpm_table->min = dpm_table->dpm_levels[0].value;
781 		dpm_table->max = dpm_table->dpm_levels[0].value;
782 	}
783 
784 	/* phyclk dpm table setup */
785 	dpm_table = &dpm_context->dpm_tables.phy_table;
786 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
787 		ret = smu_v11_0_set_single_dpm_table(smu,
788 						     SMU_PHYCLK,
789 						     dpm_table);
790 		if (ret)
791 			return ret;
792 		dpm_table->is_fine_grained =
793 			!driver_ppt->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete;
794 	} else {
795 		dpm_table->count = 1;
796 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
797 		dpm_table->dpm_levels[0].enabled = true;
798 		dpm_table->min = dpm_table->dpm_levels[0].value;
799 		dpm_table->max = dpm_table->dpm_levels[0].value;
800 	}
801 
802 	return 0;
803 }
804 
805 static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
806 {
807 	int ret = 0;
808 
809 	if (enable) {
810 		/* vcn dpm on is a prerequisite for vcn power gate messages */
811 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
812 			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
813 			if (ret)
814 				return ret;
815 		}
816 	} else {
817 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
818 			ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
819 			if (ret)
820 				return ret;
821 		}
822 	}
823 
824 	return ret;
825 }
826 
827 static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
828 {
829 	int ret = 0;
830 
831 	if (enable) {
832 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
833 			ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
834 			if (ret)
835 				return ret;
836 		}
837 	} else {
838 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
839 			ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
840 			if (ret)
841 				return ret;
842 		}
843 	}
844 
845 	return ret;
846 }
847 
848 static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
849 				       enum smu_clk_type clk_type,
850 				       uint32_t *value)
851 {
852 	MetricsMember_t member_type;
853 	int clk_id = 0;
854 
855 	clk_id = smu_cmn_to_asic_specific_index(smu,
856 						CMN2ASIC_MAPPING_CLK,
857 						clk_type);
858 	if (clk_id < 0)
859 		return clk_id;
860 
861 	switch (clk_id) {
862 	case PPCLK_GFXCLK:
863 		member_type = METRICS_CURR_GFXCLK;
864 		break;
865 	case PPCLK_UCLK:
866 		member_type = METRICS_CURR_UCLK;
867 		break;
868 	case PPCLK_SOCCLK:
869 		member_type = METRICS_CURR_SOCCLK;
870 		break;
871 	case PPCLK_VCLK:
872 		member_type = METRICS_CURR_VCLK;
873 		break;
874 	case PPCLK_DCLK:
875 		member_type = METRICS_CURR_DCLK;
876 		break;
877 	case PPCLK_DCEFCLK:
878 		member_type = METRICS_CURR_DCEFCLK;
879 		break;
880 	default:
881 		return -EINVAL;
882 	}
883 
884 	return navi10_get_smu_metrics_data(smu,
885 					   member_type,
886 					   value);
887 }
888 
889 static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
890 {
891 	PPTable_t *pptable = smu->smu_table.driver_pptable;
892 	DpmDescriptor_t *dpm_desc = NULL;
893 	uint32_t clk_index = 0;
894 
895 	clk_index = smu_cmn_to_asic_specific_index(smu,
896 						   CMN2ASIC_MAPPING_CLK,
897 						   clk_type);
898 	dpm_desc = &pptable->DpmDescriptor[clk_index];
899 
900 	/* 0 - Fine grained DPM, 1 - Discrete DPM */
901 	return dpm_desc->SnapToDiscrete == 0 ? true : false;
902 }
903 
904 static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
905 {
906 	return od_table->cap[cap];
907 }
908 
909 static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
910 					enum SMU_11_0_ODSETTING_ID setting,
911 					uint32_t *min, uint32_t *max)
912 {
913 	if (min)
914 		*min = od_table->min[setting];
915 	if (max)
916 		*max = od_table->max[setting];
917 }
918 
919 static int navi10_print_clk_levels(struct smu_context *smu,
920 			enum smu_clk_type clk_type, char *buf)
921 {
922 	uint16_t *curve_settings;
923 	int i, size = 0, ret = 0;
924 	uint32_t cur_value = 0, value = 0, count = 0;
925 	uint32_t freq_values[3] = {0};
926 	uint32_t mark_index = 0;
927 	struct smu_table_context *table_context = &smu->smu_table;
928 	uint32_t gen_speed, lane_width;
929 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
930 	struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
931 	PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
932 	OverDriveTable_t *od_table =
933 		(OverDriveTable_t *)table_context->overdrive_table;
934 	struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
935 	uint32_t min_value, max_value;
936 
937 	switch (clk_type) {
938 	case SMU_GFXCLK:
939 	case SMU_SCLK:
940 	case SMU_SOCCLK:
941 	case SMU_MCLK:
942 	case SMU_UCLK:
943 	case SMU_FCLK:
944 	case SMU_DCEFCLK:
945 		ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
946 		if (ret)
947 			return size;
948 
949 		ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
950 		if (ret)
951 			return size;
952 
953 		if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
954 			for (i = 0; i < count; i++) {
955 				ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
956 				if (ret)
957 					return size;
958 
959 				size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
960 						cur_value == value ? "*" : "");
961 			}
962 		} else {
963 			ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
964 			if (ret)
965 				return size;
966 			ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
967 			if (ret)
968 				return size;
969 
970 			freq_values[1] = cur_value;
971 			mark_index = cur_value == freq_values[0] ? 0 :
972 				     cur_value == freq_values[2] ? 2 : 1;
973 			if (mark_index != 1)
974 				freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
975 
976 			for (i = 0; i < 3; i++) {
977 				size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
978 						i == mark_index ? "*" : "");
979 			}
980 
981 		}
982 		break;
983 	case SMU_PCIE:
984 		gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
985 		lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
986 		for (i = 0; i < NUM_LINK_LEVELS; i++)
987 			size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
988 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
989 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
990 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
991 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
992 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
993 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
994 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
995 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
996 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
997 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
998 					pptable->LclkFreq[i],
999 					(gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
1000 					(lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
1001 					"*" : "");
1002 		break;
1003 	case SMU_OD_SCLK:
1004 		if (!smu->od_enabled || !od_table || !od_settings)
1005 			break;
1006 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
1007 			break;
1008 		size += sprintf(buf + size, "OD_SCLK:\n");
1009 		size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
1010 		break;
1011 	case SMU_OD_MCLK:
1012 		if (!smu->od_enabled || !od_table || !od_settings)
1013 			break;
1014 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
1015 			break;
1016 		size += sprintf(buf + size, "OD_MCLK:\n");
1017 		size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
1018 		break;
1019 	case SMU_OD_VDDC_CURVE:
1020 		if (!smu->od_enabled || !od_table || !od_settings)
1021 			break;
1022 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
1023 			break;
1024 		size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
1025 		for (i = 0; i < 3; i++) {
1026 			switch (i) {
1027 			case 0:
1028 				curve_settings = &od_table->GfxclkFreq1;
1029 				break;
1030 			case 1:
1031 				curve_settings = &od_table->GfxclkFreq2;
1032 				break;
1033 			case 2:
1034 				curve_settings = &od_table->GfxclkFreq3;
1035 				break;
1036 			default:
1037 				break;
1038 			}
1039 			size += sprintf(buf + size, "%d: %uMHz %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
1040 		}
1041 		break;
1042 	case SMU_OD_RANGE:
1043 		if (!smu->od_enabled || !od_table || !od_settings)
1044 			break;
1045 		size = sprintf(buf, "%s:\n", "OD_RANGE");
1046 
1047 		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
1048 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
1049 						    &min_value, NULL);
1050 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
1051 						    NULL, &max_value);
1052 			size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
1053 					min_value, max_value);
1054 		}
1055 
1056 		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
1057 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
1058 						    &min_value, &max_value);
1059 			size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
1060 					min_value, max_value);
1061 		}
1062 
1063 		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
1064 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
1065 						    &min_value, &max_value);
1066 			size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
1067 					min_value, max_value);
1068 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
1069 						    &min_value, &max_value);
1070 			size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
1071 					min_value, max_value);
1072 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
1073 						    &min_value, &max_value);
1074 			size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
1075 					min_value, max_value);
1076 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
1077 						    &min_value, &max_value);
1078 			size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
1079 					min_value, max_value);
1080 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
1081 						    &min_value, &max_value);
1082 			size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
1083 					min_value, max_value);
1084 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
1085 						    &min_value, &max_value);
1086 			size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
1087 					min_value, max_value);
1088 		}
1089 
1090 		break;
1091 	default:
1092 		break;
1093 	}
1094 
1095 	return size;
1096 }
1097 
1098 static int navi10_force_clk_levels(struct smu_context *smu,
1099 				   enum smu_clk_type clk_type, uint32_t mask)
1100 {
1101 
1102 	int ret = 0, size = 0;
1103 	uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
1104 
1105 	soft_min_level = mask ? (ffs(mask) - 1) : 0;
1106 	soft_max_level = mask ? (fls(mask) - 1) : 0;
1107 
1108 	switch (clk_type) {
1109 	case SMU_GFXCLK:
1110 	case SMU_SCLK:
1111 	case SMU_SOCCLK:
1112 	case SMU_MCLK:
1113 	case SMU_UCLK:
1114 	case SMU_DCEFCLK:
1115 	case SMU_FCLK:
1116 		/* There is only 2 levels for fine grained DPM */
1117 		if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
1118 			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
1119 			soft_min_level = (soft_min_level >= 1 ? 1 : 0);
1120 		}
1121 
1122 		ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
1123 		if (ret)
1124 			return size;
1125 
1126 		ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
1127 		if (ret)
1128 			return size;
1129 
1130 		ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
1131 		if (ret)
1132 			return size;
1133 		break;
1134 	default:
1135 		break;
1136 	}
1137 
1138 	return size;
1139 }
1140 
1141 static int navi10_populate_umd_state_clk(struct smu_context *smu)
1142 {
1143 	struct smu_11_0_dpm_context *dpm_context =
1144 				smu->smu_dpm.dpm_context;
1145 	struct smu_11_0_dpm_table *gfx_table =
1146 				&dpm_context->dpm_tables.gfx_table;
1147 	struct smu_11_0_dpm_table *mem_table =
1148 				&dpm_context->dpm_tables.uclk_table;
1149 	struct smu_11_0_dpm_table *soc_table =
1150 				&dpm_context->dpm_tables.soc_table;
1151 	struct smu_umd_pstate_table *pstate_table =
1152 				&smu->pstate_table;
1153 	struct amdgpu_device *adev = smu->adev;
1154 	uint32_t sclk_freq;
1155 
1156 	pstate_table->gfxclk_pstate.min = gfx_table->min;
1157 	switch (adev->asic_type) {
1158 	case CHIP_NAVI10:
1159 		switch (adev->pdev->revision) {
1160 		case 0xf0: /* XTX */
1161 		case 0xc0:
1162 			sclk_freq = NAVI10_PEAK_SCLK_XTX;
1163 			break;
1164 		case 0xf1: /* XT */
1165 		case 0xc1:
1166 			sclk_freq = NAVI10_PEAK_SCLK_XT;
1167 			break;
1168 		default: /* XL */
1169 			sclk_freq = NAVI10_PEAK_SCLK_XL;
1170 			break;
1171 		}
1172 		break;
1173 	case CHIP_NAVI14:
1174 		switch (adev->pdev->revision) {
1175 		case 0xc7: /* XT */
1176 		case 0xf4:
1177 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK;
1178 			break;
1179 		case 0xc1: /* XTM */
1180 		case 0xf2:
1181 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK;
1182 			break;
1183 		case 0xc3: /* XLM */
1184 		case 0xf3:
1185 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
1186 			break;
1187 		case 0xc5: /* XTX */
1188 		case 0xf6:
1189 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
1190 			break;
1191 		default: /* XL */
1192 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK;
1193 			break;
1194 		}
1195 		break;
1196 	case CHIP_NAVI12:
1197 		sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
1198 		break;
1199 	default:
1200 		sclk_freq = gfx_table->dpm_levels[gfx_table->count - 1].value;
1201 		break;
1202 	}
1203 	pstate_table->gfxclk_pstate.peak = sclk_freq;
1204 
1205 	pstate_table->uclk_pstate.min = mem_table->min;
1206 	pstate_table->uclk_pstate.peak = mem_table->max;
1207 
1208 	pstate_table->socclk_pstate.min = soc_table->min;
1209 	pstate_table->socclk_pstate.peak = soc_table->max;
1210 
1211 	if (gfx_table->max > NAVI10_UMD_PSTATE_PROFILING_GFXCLK &&
1212 	    mem_table->max > NAVI10_UMD_PSTATE_PROFILING_MEMCLK &&
1213 	    soc_table->max > NAVI10_UMD_PSTATE_PROFILING_SOCCLK) {
1214 		pstate_table->gfxclk_pstate.standard =
1215 			NAVI10_UMD_PSTATE_PROFILING_GFXCLK;
1216 		pstate_table->uclk_pstate.standard =
1217 			NAVI10_UMD_PSTATE_PROFILING_MEMCLK;
1218 		pstate_table->socclk_pstate.standard =
1219 			NAVI10_UMD_PSTATE_PROFILING_SOCCLK;
1220 	} else {
1221 		pstate_table->gfxclk_pstate.standard =
1222 			pstate_table->gfxclk_pstate.min;
1223 		pstate_table->uclk_pstate.standard =
1224 			pstate_table->uclk_pstate.min;
1225 		pstate_table->socclk_pstate.standard =
1226 			pstate_table->socclk_pstate.min;
1227 	}
1228 
1229 	return 0;
1230 }
1231 
1232 static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
1233 						 enum smu_clk_type clk_type,
1234 						 struct pp_clock_levels_with_latency *clocks)
1235 {
1236 	int ret = 0, i = 0;
1237 	uint32_t level_count = 0, freq = 0;
1238 
1239 	switch (clk_type) {
1240 	case SMU_GFXCLK:
1241 	case SMU_DCEFCLK:
1242 	case SMU_SOCCLK:
1243 	case SMU_MCLK:
1244 	case SMU_UCLK:
1245 		ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &level_count);
1246 		if (ret)
1247 			return ret;
1248 
1249 		level_count = min(level_count, (uint32_t)MAX_NUM_CLOCKS);
1250 		clocks->num_levels = level_count;
1251 
1252 		for (i = 0; i < level_count; i++) {
1253 			ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &freq);
1254 			if (ret)
1255 				return ret;
1256 
1257 			clocks->data[i].clocks_in_khz = freq * 1000;
1258 			clocks->data[i].latency_in_us = 0;
1259 		}
1260 		break;
1261 	default:
1262 		break;
1263 	}
1264 
1265 	return ret;
1266 }
1267 
1268 static int navi10_pre_display_config_changed(struct smu_context *smu)
1269 {
1270 	int ret = 0;
1271 	uint32_t max_freq = 0;
1272 
1273 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
1274 	if (ret)
1275 		return ret;
1276 
1277 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1278 		ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &max_freq);
1279 		if (ret)
1280 			return ret;
1281 		ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, max_freq);
1282 		if (ret)
1283 			return ret;
1284 	}
1285 
1286 	return ret;
1287 }
1288 
1289 static int navi10_display_config_changed(struct smu_context *smu)
1290 {
1291 	int ret = 0;
1292 
1293 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1294 	    smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1295 	    smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1296 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
1297 						  smu->display_config->num_display,
1298 						  NULL);
1299 		if (ret)
1300 			return ret;
1301 	}
1302 
1303 	return ret;
1304 }
1305 
1306 static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
1307 {
1308 	if (!value)
1309 		return -EINVAL;
1310 
1311 	return navi10_get_smu_metrics_data(smu,
1312 					   METRICS_AVERAGE_SOCKETPOWER,
1313 					   value);
1314 }
1315 
1316 static int navi10_get_current_activity_percent(struct smu_context *smu,
1317 					       enum amd_pp_sensors sensor,
1318 					       uint32_t *value)
1319 {
1320 	int ret = 0;
1321 
1322 	if (!value)
1323 		return -EINVAL;
1324 
1325 	switch (sensor) {
1326 	case AMDGPU_PP_SENSOR_GPU_LOAD:
1327 		ret = navi10_get_smu_metrics_data(smu,
1328 						  METRICS_AVERAGE_GFXACTIVITY,
1329 						  value);
1330 		break;
1331 	case AMDGPU_PP_SENSOR_MEM_LOAD:
1332 		ret = navi10_get_smu_metrics_data(smu,
1333 						  METRICS_AVERAGE_MEMACTIVITY,
1334 						  value);
1335 		break;
1336 	default:
1337 		dev_err(smu->adev->dev, "Invalid sensor for retrieving clock activity\n");
1338 		return -EINVAL;
1339 	}
1340 
1341 	return ret;
1342 }
1343 
1344 static bool navi10_is_dpm_running(struct smu_context *smu)
1345 {
1346 	int ret = 0;
1347 	uint32_t feature_mask[2];
1348 	uint64_t feature_enabled;
1349 
1350 	ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
1351 	if (ret)
1352 		return false;
1353 
1354 	feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
1355 
1356 	return !!(feature_enabled & SMC_DPM_FEATURE);
1357 }
1358 
1359 static int navi10_get_fan_speed_rpm(struct smu_context *smu,
1360 				    uint32_t *speed)
1361 {
1362 	if (!speed)
1363 		return -EINVAL;
1364 
1365 	switch (smu_v11_0_get_fan_control_mode(smu)) {
1366 	case AMD_FAN_CTRL_AUTO:
1367 		return navi10_get_smu_metrics_data(smu,
1368 						   METRICS_CURR_FANSPEED,
1369 						   speed);
1370 	default:
1371 		return smu_v11_0_get_fan_speed_rpm(smu, speed);
1372 	}
1373 }
1374 
1375 static int navi10_get_fan_parameters(struct smu_context *smu)
1376 {
1377 	PPTable_t *pptable = smu->smu_table.driver_pptable;
1378 
1379 	smu->fan_max_rpm = pptable->FanMaximumRpm;
1380 
1381 	return 0;
1382 }
1383 
1384 static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
1385 {
1386 	DpmActivityMonitorCoeffInt_t activity_monitor;
1387 	uint32_t i, size = 0;
1388 	int16_t workload_type = 0;
1389 	static const char *profile_name[] = {
1390 					"BOOTUP_DEFAULT",
1391 					"3D_FULL_SCREEN",
1392 					"POWER_SAVING",
1393 					"VIDEO",
1394 					"VR",
1395 					"COMPUTE",
1396 					"CUSTOM"};
1397 	static const char *title[] = {
1398 			"PROFILE_INDEX(NAME)",
1399 			"CLOCK_TYPE(NAME)",
1400 			"FPS",
1401 			"MinFreqType",
1402 			"MinActiveFreqType",
1403 			"MinActiveFreq",
1404 			"BoosterFreqType",
1405 			"BoosterFreq",
1406 			"PD_Data_limit_c",
1407 			"PD_Data_error_coeff",
1408 			"PD_Data_error_rate_coeff"};
1409 	int result = 0;
1410 
1411 	if (!buf)
1412 		return -EINVAL;
1413 
1414 	size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
1415 			title[0], title[1], title[2], title[3], title[4], title[5],
1416 			title[6], title[7], title[8], title[9], title[10]);
1417 
1418 	for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1419 		/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1420 		workload_type = smu_cmn_to_asic_specific_index(smu,
1421 							       CMN2ASIC_MAPPING_WORKLOAD,
1422 							       i);
1423 		if (workload_type < 0)
1424 			return -EINVAL;
1425 
1426 		result = smu_cmn_update_table(smu,
1427 					  SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
1428 					  (void *)(&activity_monitor), false);
1429 		if (result) {
1430 			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1431 			return result;
1432 		}
1433 
1434 		size += sprintf(buf + size, "%2d %14s%s:\n",
1435 			i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1436 
1437 		size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1438 			" ",
1439 			0,
1440 			"GFXCLK",
1441 			activity_monitor.Gfx_FPS,
1442 			activity_monitor.Gfx_MinFreqStep,
1443 			activity_monitor.Gfx_MinActiveFreqType,
1444 			activity_monitor.Gfx_MinActiveFreq,
1445 			activity_monitor.Gfx_BoosterFreqType,
1446 			activity_monitor.Gfx_BoosterFreq,
1447 			activity_monitor.Gfx_PD_Data_limit_c,
1448 			activity_monitor.Gfx_PD_Data_error_coeff,
1449 			activity_monitor.Gfx_PD_Data_error_rate_coeff);
1450 
1451 		size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1452 			" ",
1453 			1,
1454 			"SOCCLK",
1455 			activity_monitor.Soc_FPS,
1456 			activity_monitor.Soc_MinFreqStep,
1457 			activity_monitor.Soc_MinActiveFreqType,
1458 			activity_monitor.Soc_MinActiveFreq,
1459 			activity_monitor.Soc_BoosterFreqType,
1460 			activity_monitor.Soc_BoosterFreq,
1461 			activity_monitor.Soc_PD_Data_limit_c,
1462 			activity_monitor.Soc_PD_Data_error_coeff,
1463 			activity_monitor.Soc_PD_Data_error_rate_coeff);
1464 
1465 		size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1466 			" ",
1467 			2,
1468 			"MEMLK",
1469 			activity_monitor.Mem_FPS,
1470 			activity_monitor.Mem_MinFreqStep,
1471 			activity_monitor.Mem_MinActiveFreqType,
1472 			activity_monitor.Mem_MinActiveFreq,
1473 			activity_monitor.Mem_BoosterFreqType,
1474 			activity_monitor.Mem_BoosterFreq,
1475 			activity_monitor.Mem_PD_Data_limit_c,
1476 			activity_monitor.Mem_PD_Data_error_coeff,
1477 			activity_monitor.Mem_PD_Data_error_rate_coeff);
1478 	}
1479 
1480 	return size;
1481 }
1482 
1483 static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1484 {
1485 	DpmActivityMonitorCoeffInt_t activity_monitor;
1486 	int workload_type, ret = 0;
1487 
1488 	smu->power_profile_mode = input[size];
1489 
1490 	if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1491 		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
1492 		return -EINVAL;
1493 	}
1494 
1495 	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1496 
1497 		ret = smu_cmn_update_table(smu,
1498 				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
1499 				       (void *)(&activity_monitor), false);
1500 		if (ret) {
1501 			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1502 			return ret;
1503 		}
1504 
1505 		switch (input[0]) {
1506 		case 0: /* Gfxclk */
1507 			activity_monitor.Gfx_FPS = input[1];
1508 			activity_monitor.Gfx_MinFreqStep = input[2];
1509 			activity_monitor.Gfx_MinActiveFreqType = input[3];
1510 			activity_monitor.Gfx_MinActiveFreq = input[4];
1511 			activity_monitor.Gfx_BoosterFreqType = input[5];
1512 			activity_monitor.Gfx_BoosterFreq = input[6];
1513 			activity_monitor.Gfx_PD_Data_limit_c = input[7];
1514 			activity_monitor.Gfx_PD_Data_error_coeff = input[8];
1515 			activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
1516 			break;
1517 		case 1: /* Socclk */
1518 			activity_monitor.Soc_FPS = input[1];
1519 			activity_monitor.Soc_MinFreqStep = input[2];
1520 			activity_monitor.Soc_MinActiveFreqType = input[3];
1521 			activity_monitor.Soc_MinActiveFreq = input[4];
1522 			activity_monitor.Soc_BoosterFreqType = input[5];
1523 			activity_monitor.Soc_BoosterFreq = input[6];
1524 			activity_monitor.Soc_PD_Data_limit_c = input[7];
1525 			activity_monitor.Soc_PD_Data_error_coeff = input[8];
1526 			activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
1527 			break;
1528 		case 2: /* Memlk */
1529 			activity_monitor.Mem_FPS = input[1];
1530 			activity_monitor.Mem_MinFreqStep = input[2];
1531 			activity_monitor.Mem_MinActiveFreqType = input[3];
1532 			activity_monitor.Mem_MinActiveFreq = input[4];
1533 			activity_monitor.Mem_BoosterFreqType = input[5];
1534 			activity_monitor.Mem_BoosterFreq = input[6];
1535 			activity_monitor.Mem_PD_Data_limit_c = input[7];
1536 			activity_monitor.Mem_PD_Data_error_coeff = input[8];
1537 			activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
1538 			break;
1539 		}
1540 
1541 		ret = smu_cmn_update_table(smu,
1542 				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
1543 				       (void *)(&activity_monitor), true);
1544 		if (ret) {
1545 			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
1546 			return ret;
1547 		}
1548 	}
1549 
1550 	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1551 	workload_type = smu_cmn_to_asic_specific_index(smu,
1552 						       CMN2ASIC_MAPPING_WORKLOAD,
1553 						       smu->power_profile_mode);
1554 	if (workload_type < 0)
1555 		return -EINVAL;
1556 	smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1557 				    1 << workload_type, NULL);
1558 
1559 	return ret;
1560 }
1561 
1562 static int navi10_notify_smc_display_config(struct smu_context *smu)
1563 {
1564 	struct smu_clocks min_clocks = {0};
1565 	struct pp_display_clock_request clock_req;
1566 	int ret = 0;
1567 
1568 	min_clocks.dcef_clock = smu->display_config->min_dcef_set_clk;
1569 	min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
1570 	min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
1571 
1572 	if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1573 		clock_req.clock_type = amd_pp_dcef_clock;
1574 		clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
1575 
1576 		ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
1577 		if (!ret) {
1578 			if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
1579 				ret = smu_cmn_send_smc_msg_with_param(smu,
1580 								  SMU_MSG_SetMinDeepSleepDcefclk,
1581 								  min_clocks.dcef_clock_in_sr/100,
1582 								  NULL);
1583 				if (ret) {
1584 					dev_err(smu->adev->dev, "Attempt to set divider for DCEFCLK Failed!");
1585 					return ret;
1586 				}
1587 			}
1588 		} else {
1589 			dev_info(smu->adev->dev, "Attempt to set Hard Min for DCEFCLK Failed!");
1590 		}
1591 	}
1592 
1593 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1594 		ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
1595 		if (ret) {
1596 			dev_err(smu->adev->dev, "[%s] Set hard min uclk failed!", __func__);
1597 			return ret;
1598 		}
1599 	}
1600 
1601 	return 0;
1602 }
1603 
1604 static int navi10_set_watermarks_table(struct smu_context *smu,
1605 				       struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
1606 {
1607 	Watermarks_t *table = smu->smu_table.watermarks_table;
1608 	int ret = 0;
1609 	int i;
1610 
1611 	if (clock_ranges) {
1612 		if (clock_ranges->num_wm_dmif_sets > 4 ||
1613 		    clock_ranges->num_wm_mcif_sets > 4)
1614 			return -EINVAL;
1615 
1616 		for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
1617 			table->WatermarkRow[1][i].MinClock =
1618 				cpu_to_le16((uint16_t)
1619 				(clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
1620 				1000));
1621 			table->WatermarkRow[1][i].MaxClock =
1622 				cpu_to_le16((uint16_t)
1623 				(clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
1624 				1000));
1625 			table->WatermarkRow[1][i].MinUclk =
1626 				cpu_to_le16((uint16_t)
1627 				(clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1628 				1000));
1629 			table->WatermarkRow[1][i].MaxUclk =
1630 				cpu_to_le16((uint16_t)
1631 				(clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1632 				1000));
1633 			table->WatermarkRow[1][i].WmSetting = (uint8_t)
1634 					clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
1635 		}
1636 
1637 		for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
1638 			table->WatermarkRow[0][i].MinClock =
1639 				cpu_to_le16((uint16_t)
1640 				(clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
1641 				1000));
1642 			table->WatermarkRow[0][i].MaxClock =
1643 				cpu_to_le16((uint16_t)
1644 				(clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
1645 				1000));
1646 			table->WatermarkRow[0][i].MinUclk =
1647 				cpu_to_le16((uint16_t)
1648 				(clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1649 				1000));
1650 			table->WatermarkRow[0][i].MaxUclk =
1651 				cpu_to_le16((uint16_t)
1652 				(clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1653 				1000));
1654 			table->WatermarkRow[0][i].WmSetting = (uint8_t)
1655 					clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
1656 		}
1657 
1658 		smu->watermarks_bitmap |= WATERMARKS_EXIST;
1659 	}
1660 
1661 	/* pass data to smu controller */
1662 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1663 	     !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1664 		ret = smu_cmn_write_watermarks_table(smu);
1665 		if (ret) {
1666 			dev_err(smu->adev->dev, "Failed to update WMTABLE!");
1667 			return ret;
1668 		}
1669 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1670 	}
1671 
1672 	return 0;
1673 }
1674 
1675 static int navi10_thermal_get_temperature(struct smu_context *smu,
1676 					     enum amd_pp_sensors sensor,
1677 					     uint32_t *value)
1678 {
1679 	int ret = 0;
1680 
1681 	if (!value)
1682 		return -EINVAL;
1683 
1684 	switch (sensor) {
1685 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1686 		ret = navi10_get_smu_metrics_data(smu,
1687 						  METRICS_TEMPERATURE_HOTSPOT,
1688 						  value);
1689 		break;
1690 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
1691 		ret = navi10_get_smu_metrics_data(smu,
1692 						  METRICS_TEMPERATURE_EDGE,
1693 						  value);
1694 		break;
1695 	case AMDGPU_PP_SENSOR_MEM_TEMP:
1696 		ret = navi10_get_smu_metrics_data(smu,
1697 						  METRICS_TEMPERATURE_MEM,
1698 						  value);
1699 		break;
1700 	default:
1701 		dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
1702 		return -EINVAL;
1703 	}
1704 
1705 	return ret;
1706 }
1707 
1708 static int navi10_read_sensor(struct smu_context *smu,
1709 				 enum amd_pp_sensors sensor,
1710 				 void *data, uint32_t *size)
1711 {
1712 	int ret = 0;
1713 	struct smu_table_context *table_context = &smu->smu_table;
1714 	PPTable_t *pptable = table_context->driver_pptable;
1715 
1716 	if(!data || !size)
1717 		return -EINVAL;
1718 
1719 	mutex_lock(&smu->sensor_lock);
1720 	switch (sensor) {
1721 	case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
1722 		*(uint32_t *)data = pptable->FanMaximumRpm;
1723 		*size = 4;
1724 		break;
1725 	case AMDGPU_PP_SENSOR_MEM_LOAD:
1726 	case AMDGPU_PP_SENSOR_GPU_LOAD:
1727 		ret = navi10_get_current_activity_percent(smu, sensor, (uint32_t *)data);
1728 		*size = 4;
1729 		break;
1730 	case AMDGPU_PP_SENSOR_GPU_POWER:
1731 		ret = navi10_get_gpu_power(smu, (uint32_t *)data);
1732 		*size = 4;
1733 		break;
1734 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1735 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
1736 	case AMDGPU_PP_SENSOR_MEM_TEMP:
1737 		ret = navi10_thermal_get_temperature(smu, sensor, (uint32_t *)data);
1738 		*size = 4;
1739 		break;
1740 	case AMDGPU_PP_SENSOR_GFX_MCLK:
1741 		ret = navi10_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
1742 		*(uint32_t *)data *= 100;
1743 		*size = 4;
1744 		break;
1745 	case AMDGPU_PP_SENSOR_GFX_SCLK:
1746 		ret = navi10_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
1747 		*(uint32_t *)data *= 100;
1748 		*size = 4;
1749 		break;
1750 	case AMDGPU_PP_SENSOR_VDDGFX:
1751 		ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
1752 		*size = 4;
1753 		break;
1754 	default:
1755 		ret = -EOPNOTSUPP;
1756 		break;
1757 	}
1758 	mutex_unlock(&smu->sensor_lock);
1759 
1760 	return ret;
1761 }
1762 
1763 static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states)
1764 {
1765 	uint32_t num_discrete_levels = 0;
1766 	uint16_t *dpm_levels = NULL;
1767 	uint16_t i = 0;
1768 	struct smu_table_context *table_context = &smu->smu_table;
1769 	PPTable_t *driver_ppt = NULL;
1770 
1771 	if (!clocks_in_khz || !num_states || !table_context->driver_pptable)
1772 		return -EINVAL;
1773 
1774 	driver_ppt = table_context->driver_pptable;
1775 	num_discrete_levels = driver_ppt->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels;
1776 	dpm_levels = driver_ppt->FreqTableUclk;
1777 
1778 	if (num_discrete_levels == 0 || dpm_levels == NULL)
1779 		return -EINVAL;
1780 
1781 	*num_states = num_discrete_levels;
1782 	for (i = 0; i < num_discrete_levels; i++) {
1783 		/* convert to khz */
1784 		*clocks_in_khz = (*dpm_levels) * 1000;
1785 		clocks_in_khz++;
1786 		dpm_levels++;
1787 	}
1788 
1789 	return 0;
1790 }
1791 
1792 static int navi10_get_thermal_temperature_range(struct smu_context *smu,
1793 						struct smu_temperature_range *range)
1794 {
1795 	struct smu_table_context *table_context = &smu->smu_table;
1796 	struct smu_11_0_powerplay_table *powerplay_table =
1797 				table_context->power_play_table;
1798 	PPTable_t *pptable = smu->smu_table.driver_pptable;
1799 
1800 	if (!range)
1801 		return -EINVAL;
1802 
1803 	memcpy(range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
1804 
1805 	range->max = pptable->TedgeLimit *
1806 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1807 	range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
1808 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1809 	range->hotspot_crit_max = pptable->ThotspotLimit *
1810 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1811 	range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
1812 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1813 	range->mem_crit_max = pptable->TmemLimit *
1814 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1815 	range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM)*
1816 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1817 	range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
1818 
1819 	return 0;
1820 }
1821 
1822 static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
1823 						bool disable_memory_clock_switch)
1824 {
1825 	int ret = 0;
1826 	struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
1827 		(struct smu_11_0_max_sustainable_clocks *)
1828 			smu->smu_table.max_sustainable_clocks;
1829 	uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal;
1830 	uint32_t max_memory_clock = max_sustainable_clocks->uclock;
1831 
1832 	if(smu->disable_uclk_switch == disable_memory_clock_switch)
1833 		return 0;
1834 
1835 	if(disable_memory_clock_switch)
1836 		ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, max_memory_clock, 0);
1837 	else
1838 		ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_memory_clock, 0);
1839 
1840 	if(!ret)
1841 		smu->disable_uclk_switch = disable_memory_clock_switch;
1842 
1843 	return ret;
1844 }
1845 
1846 static int navi10_get_power_limit(struct smu_context *smu)
1847 {
1848 	struct smu_11_0_powerplay_table *powerplay_table =
1849 		(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
1850 	struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
1851 	PPTable_t *pptable = smu->smu_table.driver_pptable;
1852 	uint32_t power_limit, od_percent;
1853 
1854 	if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
1855 		/* the last hope to figure out the ppt limit */
1856 		if (!pptable) {
1857 			dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!");
1858 			return -EINVAL;
1859 		}
1860 		power_limit =
1861 			pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
1862 	}
1863 	smu->current_power_limit = power_limit;
1864 
1865 	if (smu->od_enabled &&
1866 	    navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
1867 		od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
1868 
1869 		dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
1870 
1871 		power_limit *= (100 + od_percent);
1872 		power_limit /= 100;
1873 	}
1874 	smu->max_power_limit = power_limit;
1875 
1876 	return 0;
1877 }
1878 
1879 static int navi10_update_pcie_parameters(struct smu_context *smu,
1880 				     uint32_t pcie_gen_cap,
1881 				     uint32_t pcie_width_cap)
1882 {
1883 	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1884 	PPTable_t *pptable = smu->smu_table.driver_pptable;
1885 	uint32_t smu_pcie_arg;
1886 	int ret, i;
1887 
1888 	/* lclk dpm table setup */
1889 	for (i = 0; i < MAX_PCIE_CONF; i++) {
1890 		dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pptable->PcieGenSpeed[i];
1891 		dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pptable->PcieLaneCount[i];
1892 	}
1893 
1894 	for (i = 0; i < NUM_LINK_LEVELS; i++) {
1895 		smu_pcie_arg = (i << 16) |
1896 			((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
1897 				(pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
1898 					pptable->PcieLaneCount[i] : pcie_width_cap);
1899 		ret = smu_cmn_send_smc_msg_with_param(smu,
1900 					  SMU_MSG_OverridePcieParameters,
1901 					  smu_pcie_arg,
1902 					  NULL);
1903 
1904 		if (ret)
1905 			return ret;
1906 
1907 		if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
1908 			dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
1909 		if (pptable->PcieLaneCount[i] > pcie_width_cap)
1910 			dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
1911 	}
1912 
1913 	return 0;
1914 }
1915 
1916 static inline void navi10_dump_od_table(struct smu_context *smu,
1917 					OverDriveTable_t *od_table)
1918 {
1919 	dev_dbg(smu->adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
1920 	dev_dbg(smu->adev->dev, "OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
1921 	dev_dbg(smu->adev->dev, "OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
1922 	dev_dbg(smu->adev->dev, "OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
1923 	dev_dbg(smu->adev->dev, "OD: UclkFmax: %d\n", od_table->UclkFmax);
1924 	dev_dbg(smu->adev->dev, "OD: OverDrivePct: %d\n", od_table->OverDrivePct);
1925 }
1926 
1927 static int navi10_od_setting_check_range(struct smu_context *smu,
1928 					 struct smu_11_0_overdrive_table *od_table,
1929 					 enum SMU_11_0_ODSETTING_ID setting,
1930 					 uint32_t value)
1931 {
1932 	if (value < od_table->min[setting]) {
1933 		dev_warn(smu->adev->dev, "OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
1934 		return -EINVAL;
1935 	}
1936 	if (value > od_table->max[setting]) {
1937 		dev_warn(smu->adev->dev, "OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
1938 		return -EINVAL;
1939 	}
1940 	return 0;
1941 }
1942 
1943 static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
1944 						     uint16_t *voltage,
1945 						     uint32_t freq)
1946 {
1947 	uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16);
1948 	uint32_t value = 0;
1949 	int ret;
1950 
1951 	ret = smu_cmn_send_smc_msg_with_param(smu,
1952 					  SMU_MSG_GetVoltageByDpm,
1953 					  param,
1954 					  &value);
1955 	if (ret) {
1956 		dev_err(smu->adev->dev, "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
1957 		return ret;
1958 	}
1959 
1960 	*voltage = (uint16_t)value;
1961 
1962 	return 0;
1963 }
1964 
1965 static bool navi10_is_baco_supported(struct smu_context *smu)
1966 {
1967 	struct amdgpu_device *adev = smu->adev;
1968 	uint32_t val;
1969 
1970 	if (amdgpu_sriov_vf(adev) || (!smu_v11_0_baco_is_support(smu)))
1971 		return false;
1972 
1973 	val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
1974 	return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
1975 }
1976 
1977 static int navi10_set_default_od_settings(struct smu_context *smu)
1978 {
1979 	OverDriveTable_t *od_table =
1980 		(OverDriveTable_t *)smu->smu_table.overdrive_table;
1981 	OverDriveTable_t *boot_od_table =
1982 		(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
1983 	int ret = 0;
1984 
1985 	ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, false);
1986 	if (ret) {
1987 		dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
1988 		return ret;
1989 	}
1990 
1991 	if (!od_table->GfxclkVolt1) {
1992 		ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
1993 								&od_table->GfxclkVolt1,
1994 								od_table->GfxclkFreq1);
1995 		if (ret)
1996 			return ret;
1997 	}
1998 
1999 	if (!od_table->GfxclkVolt2) {
2000 		ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
2001 								&od_table->GfxclkVolt2,
2002 								od_table->GfxclkFreq2);
2003 		if (ret)
2004 			return ret;
2005 	}
2006 
2007 	if (!od_table->GfxclkVolt3) {
2008 		ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
2009 								&od_table->GfxclkVolt3,
2010 								od_table->GfxclkFreq3);
2011 		if (ret)
2012 			return ret;
2013 	}
2014 
2015 	memcpy(boot_od_table, od_table, sizeof(OverDriveTable_t));
2016 
2017 	navi10_dump_od_table(smu, od_table);
2018 
2019 	return 0;
2020 }
2021 
2022 static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
2023 	int i;
2024 	int ret = 0;
2025 	struct smu_table_context *table_context = &smu->smu_table;
2026 	OverDriveTable_t *od_table;
2027 	struct smu_11_0_overdrive_table *od_settings;
2028 	enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
2029 	uint16_t *freq_ptr, *voltage_ptr;
2030 	od_table = (OverDriveTable_t *)table_context->overdrive_table;
2031 
2032 	if (!smu->od_enabled) {
2033 		dev_warn(smu->adev->dev, "OverDrive is not enabled!\n");
2034 		return -EINVAL;
2035 	}
2036 
2037 	if (!smu->od_settings) {
2038 		dev_err(smu->adev->dev, "OD board limits are not set!\n");
2039 		return -ENOENT;
2040 	}
2041 
2042 	od_settings = smu->od_settings;
2043 
2044 	switch (type) {
2045 	case PP_OD_EDIT_SCLK_VDDC_TABLE:
2046 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
2047 			dev_warn(smu->adev->dev, "GFXCLK_LIMITS not supported!\n");
2048 			return -ENOTSUPP;
2049 		}
2050 		if (!table_context->overdrive_table) {
2051 			dev_err(smu->adev->dev, "Overdrive is not initialized\n");
2052 			return -EINVAL;
2053 		}
2054 		for (i = 0; i < size; i += 2) {
2055 			if (i + 2 > size) {
2056 				dev_info(smu->adev->dev, "invalid number of input parameters %d\n", size);
2057 				return -EINVAL;
2058 			}
2059 			switch (input[i]) {
2060 			case 0:
2061 				freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
2062 				freq_ptr = &od_table->GfxclkFmin;
2063 				if (input[i + 1] > od_table->GfxclkFmax) {
2064 					dev_info(smu->adev->dev, "GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
2065 						input[i + 1],
2066 						od_table->GfxclkFmin);
2067 					return -EINVAL;
2068 				}
2069 				break;
2070 			case 1:
2071 				freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
2072 				freq_ptr = &od_table->GfxclkFmax;
2073 				if (input[i + 1] < od_table->GfxclkFmin) {
2074 					dev_info(smu->adev->dev, "GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
2075 						input[i + 1],
2076 						od_table->GfxclkFmax);
2077 					return -EINVAL;
2078 				}
2079 				break;
2080 			default:
2081 				dev_info(smu->adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
2082 				dev_info(smu->adev->dev, "Supported indices: [0:min,1:max]\n");
2083 				return -EINVAL;
2084 			}
2085 			ret = navi10_od_setting_check_range(smu, od_settings, freq_setting, input[i + 1]);
2086 			if (ret)
2087 				return ret;
2088 			*freq_ptr = input[i + 1];
2089 		}
2090 		break;
2091 	case PP_OD_EDIT_MCLK_VDDC_TABLE:
2092 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
2093 			dev_warn(smu->adev->dev, "UCLK_MAX not supported!\n");
2094 			return -ENOTSUPP;
2095 		}
2096 		if (size < 2) {
2097 			dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
2098 			return -EINVAL;
2099 		}
2100 		if (input[0] != 1) {
2101 			dev_info(smu->adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
2102 			dev_info(smu->adev->dev, "Supported indices: [1:max]\n");
2103 			return -EINVAL;
2104 		}
2105 		ret = navi10_od_setting_check_range(smu, od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
2106 		if (ret)
2107 			return ret;
2108 		od_table->UclkFmax = input[1];
2109 		break;
2110 	case PP_OD_RESTORE_DEFAULT_TABLE:
2111 		if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
2112 			dev_err(smu->adev->dev, "Overdrive table was not initialized!\n");
2113 			return -EINVAL;
2114 		}
2115 		memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
2116 		break;
2117 	case PP_OD_COMMIT_DPM_TABLE:
2118 		navi10_dump_od_table(smu, od_table);
2119 		ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
2120 		if (ret) {
2121 			dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
2122 			return ret;
2123 		}
2124 		break;
2125 	case PP_OD_EDIT_VDDC_CURVE:
2126 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
2127 			dev_warn(smu->adev->dev, "GFXCLK_CURVE not supported!\n");
2128 			return -ENOTSUPP;
2129 		}
2130 		if (size < 3) {
2131 			dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
2132 			return -EINVAL;
2133 		}
2134 		if (!od_table) {
2135 			dev_info(smu->adev->dev, "Overdrive is not initialized\n");
2136 			return -EINVAL;
2137 		}
2138 
2139 		switch (input[0]) {
2140 		case 0:
2141 			freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
2142 			voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
2143 			freq_ptr = &od_table->GfxclkFreq1;
2144 			voltage_ptr = &od_table->GfxclkVolt1;
2145 			break;
2146 		case 1:
2147 			freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
2148 			voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
2149 			freq_ptr = &od_table->GfxclkFreq2;
2150 			voltage_ptr = &od_table->GfxclkVolt2;
2151 			break;
2152 		case 2:
2153 			freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
2154 			voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
2155 			freq_ptr = &od_table->GfxclkFreq3;
2156 			voltage_ptr = &od_table->GfxclkVolt3;
2157 			break;
2158 		default:
2159 			dev_info(smu->adev->dev, "Invalid VDDC_CURVE index: %ld\n", input[0]);
2160 			dev_info(smu->adev->dev, "Supported indices: [0, 1, 2]\n");
2161 			return -EINVAL;
2162 		}
2163 		ret = navi10_od_setting_check_range(smu, od_settings, freq_setting, input[1]);
2164 		if (ret)
2165 			return ret;
2166 		// Allow setting zero to disable the OverDrive VDDC curve
2167 		if (input[2] != 0) {
2168 			ret = navi10_od_setting_check_range(smu, od_settings, voltage_setting, input[2]);
2169 			if (ret)
2170 				return ret;
2171 			*freq_ptr = input[1];
2172 			*voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
2173 			dev_dbg(smu->adev->dev, "OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
2174 		} else {
2175 			// If setting 0, disable all voltage curve settings
2176 			od_table->GfxclkVolt1 = 0;
2177 			od_table->GfxclkVolt2 = 0;
2178 			od_table->GfxclkVolt3 = 0;
2179 		}
2180 		navi10_dump_od_table(smu, od_table);
2181 		break;
2182 	default:
2183 		return -ENOSYS;
2184 	}
2185 	return ret;
2186 }
2187 
2188 static int navi10_run_btc(struct smu_context *smu)
2189 {
2190 	int ret = 0;
2191 
2192 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
2193 	if (ret)
2194 		dev_err(smu->adev->dev, "RunBtc failed!\n");
2195 
2196 	return ret;
2197 }
2198 
2199 static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
2200 {
2201 	int result = 0;
2202 
2203 	if (!enable)
2204 		result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
2205 	else
2206 		result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
2207 
2208 	return result;
2209 }
2210 
2211 static inline bool navi10_need_umc_cdr_12gbps_workaround(struct amdgpu_device *adev)
2212 {
2213 	if (adev->asic_type != CHIP_NAVI10)
2214 		return false;
2215 
2216 	if (adev->pdev->device == 0x731f &&
2217 	    (adev->pdev->revision == 0xc2 ||
2218 	     adev->pdev->revision == 0xc3 ||
2219 	     adev->pdev->revision == 0xca ||
2220 	     adev->pdev->revision == 0xcb))
2221 		return true;
2222 	else
2223 		return false;
2224 }
2225 
2226 static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
2227 {
2228 	uint32_t uclk_count, uclk_min, uclk_max;
2229 	uint32_t smu_version;
2230 	int ret = 0;
2231 
2232 	if (!navi10_need_umc_cdr_12gbps_workaround(smu->adev))
2233 		return 0;
2234 
2235 	ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
2236 	if (ret)
2237 		return ret;
2238 
2239 	/* This workaround is available only for 42.50 or later SMC firmwares */
2240 	if (smu_version < 0x2A3200)
2241 		return 0;
2242 
2243 	ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
2244 	if (ret)
2245 		return ret;
2246 
2247 	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
2248 	if (ret)
2249 		return ret;
2250 
2251 	ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
2252 	if (ret)
2253 		return ret;
2254 
2255 	/* Force UCLK out of the highest DPM */
2256 	ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, uclk_min);
2257 	if (ret)
2258 		return ret;
2259 
2260 	/* Revert the UCLK Hardmax */
2261 	ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, uclk_max);
2262 	if (ret)
2263 		return ret;
2264 
2265 	/*
2266 	 * In this case, SMU already disabled dummy pstate during enablement
2267 	 * of UCLK DPM, we have to re-enabled it.
2268 	 * */
2269 	return navi10_dummy_pstate_control(smu, true);
2270 }
2271 
2272 static void navi10_fill_i2c_req(SwI2cRequest_t  *req, bool write,
2273 				  uint8_t address, uint32_t numbytes,
2274 				  uint8_t *data)
2275 {
2276 	int i;
2277 
2278 	BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
2279 
2280 	req->I2CcontrollerPort = 0;
2281 	req->I2CSpeed = 2;
2282 	req->SlaveAddress = address;
2283 	req->NumCmds = numbytes;
2284 
2285 	for (i = 0; i < numbytes; i++) {
2286 		SwI2cCmd_t *cmd =  &req->SwI2cCmds[i];
2287 
2288 		/* First 2 bytes are always write for lower 2b EEPROM address */
2289 		if (i < 2)
2290 			cmd->Cmd = 1;
2291 		else
2292 			cmd->Cmd = write;
2293 
2294 
2295 		/* Add RESTART for read  after address filled */
2296 		cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
2297 
2298 		/* Add STOP in the end */
2299 		cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
2300 
2301 		/* Fill with data regardless if read or write to simplify code */
2302 		cmd->RegisterAddr = data[i];
2303 	}
2304 }
2305 
2306 static int navi10_i2c_read_data(struct i2c_adapter *control,
2307 					       uint8_t address,
2308 					       uint8_t *data,
2309 					       uint32_t numbytes)
2310 {
2311 	uint32_t  i, ret = 0;
2312 	SwI2cRequest_t req;
2313 	struct amdgpu_device *adev = to_amdgpu_device(control);
2314 	struct smu_table_context *smu_table = &adev->smu.smu_table;
2315 	struct smu_table *table = &smu_table->driver_table;
2316 
2317 	memset(&req, 0, sizeof(req));
2318 	navi10_fill_i2c_req(&req, false, address, numbytes, data);
2319 
2320 	mutex_lock(&adev->smu.mutex);
2321 	/* Now read data starting with that address */
2322 	ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
2323 				   true);
2324 	mutex_unlock(&adev->smu.mutex);
2325 
2326 	if (!ret) {
2327 		SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
2328 
2329 		/* Assume SMU  fills res.SwI2cCmds[i].Data with read bytes */
2330 		for (i = 0; i < numbytes; i++)
2331 			data[i] = res->SwI2cCmds[i].Data;
2332 
2333 		dev_dbg(adev->dev, "navi10_i2c_read_data, address = %x, bytes = %d, data :",
2334 				  (uint16_t)address, numbytes);
2335 
2336 		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
2337 			       8, 1, data, numbytes, false);
2338 	} else
2339 		dev_err(adev->dev, "navi10_i2c_read_data - error occurred :%x", ret);
2340 
2341 	return ret;
2342 }
2343 
2344 static int navi10_i2c_write_data(struct i2c_adapter *control,
2345 						uint8_t address,
2346 						uint8_t *data,
2347 						uint32_t numbytes)
2348 {
2349 	uint32_t ret;
2350 	SwI2cRequest_t req;
2351 	struct amdgpu_device *adev = to_amdgpu_device(control);
2352 
2353 	memset(&req, 0, sizeof(req));
2354 	navi10_fill_i2c_req(&req, true, address, numbytes, data);
2355 
2356 	mutex_lock(&adev->smu.mutex);
2357 	ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
2358 	mutex_unlock(&adev->smu.mutex);
2359 
2360 	if (!ret) {
2361 		dev_dbg(adev->dev, "navi10_i2c_write(), address = %x, bytes = %d , data: ",
2362 					 (uint16_t)address, numbytes);
2363 
2364 		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
2365 			       8, 1, data, numbytes, false);
2366 		/*
2367 		 * According to EEPROM spec there is a MAX of 10 ms required for
2368 		 * EEPROM to flush internal RX buffer after STOP was issued at the
2369 		 * end of write transaction. During this time the EEPROM will not be
2370 		 * responsive to any more commands - so wait a bit more.
2371 		 */
2372 		msleep(10);
2373 
2374 	} else
2375 		dev_err(adev->dev, "navi10_i2c_write- error occurred :%x", ret);
2376 
2377 	return ret;
2378 }
2379 
2380 static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
2381 			      struct i2c_msg *msgs, int num)
2382 {
2383 	uint32_t  i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
2384 	uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
2385 
2386 	for (i = 0; i < num; i++) {
2387 		/*
2388 		 * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
2389 		 * once and hence the data needs to be spliced into chunks and sent each
2390 		 * chunk separately
2391 		 */
2392 		data_size = msgs[i].len - 2;
2393 		data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
2394 		next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
2395 		data_ptr = msgs[i].buf + 2;
2396 
2397 		for (j = 0; j < data_size / data_chunk_size; j++) {
2398 			/* Insert the EEPROM dest addess, bits 0-15 */
2399 			data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
2400 			data_chunk[1] = (next_eeprom_addr & 0xff);
2401 
2402 			if (msgs[i].flags & I2C_M_RD) {
2403 				ret = navi10_i2c_read_data(i2c_adap,
2404 							     (uint8_t)msgs[i].addr,
2405 							     data_chunk, MAX_SW_I2C_COMMANDS);
2406 
2407 				memcpy(data_ptr, data_chunk + 2, data_chunk_size);
2408 			} else {
2409 
2410 				memcpy(data_chunk + 2, data_ptr, data_chunk_size);
2411 
2412 				ret = navi10_i2c_write_data(i2c_adap,
2413 							      (uint8_t)msgs[i].addr,
2414 							      data_chunk, MAX_SW_I2C_COMMANDS);
2415 			}
2416 
2417 			if (ret) {
2418 				num = -EIO;
2419 				goto fail;
2420 			}
2421 
2422 			next_eeprom_addr += data_chunk_size;
2423 			data_ptr += data_chunk_size;
2424 		}
2425 
2426 		if (data_size % data_chunk_size) {
2427 			data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
2428 			data_chunk[1] = (next_eeprom_addr & 0xff);
2429 
2430 			if (msgs[i].flags & I2C_M_RD) {
2431 				ret = navi10_i2c_read_data(i2c_adap,
2432 							     (uint8_t)msgs[i].addr,
2433 							     data_chunk, (data_size % data_chunk_size) + 2);
2434 
2435 				memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
2436 			} else {
2437 				memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
2438 
2439 				ret = navi10_i2c_write_data(i2c_adap,
2440 							      (uint8_t)msgs[i].addr,
2441 							      data_chunk, (data_size % data_chunk_size) + 2);
2442 			}
2443 
2444 			if (ret) {
2445 				num = -EIO;
2446 				goto fail;
2447 			}
2448 		}
2449 	}
2450 
2451 fail:
2452 	return num;
2453 }
2454 
2455 static u32 navi10_i2c_func(struct i2c_adapter *adap)
2456 {
2457 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
2458 }
2459 
2460 
2461 static const struct i2c_algorithm navi10_i2c_algo = {
2462 	.master_xfer = navi10_i2c_xfer,
2463 	.functionality = navi10_i2c_func,
2464 };
2465 
2466 static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
2467 {
2468 	struct amdgpu_device *adev = to_amdgpu_device(control);
2469 	int res;
2470 
2471 	control->owner = THIS_MODULE;
2472 	control->class = I2C_CLASS_SPD;
2473 	control->dev.parent = &adev->pdev->dev;
2474 	control->algo = &navi10_i2c_algo;
2475 	snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
2476 
2477 	res = i2c_add_adapter(control);
2478 	if (res)
2479 		DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
2480 
2481 	return res;
2482 }
2483 
2484 static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
2485 {
2486 	i2c_del_adapter(control);
2487 }
2488 
2489 static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
2490 				      void **table)
2491 {
2492 	struct smu_table_context *smu_table = &smu->smu_table;
2493 	struct gpu_metrics_v1_0 *gpu_metrics =
2494 		(struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table;
2495 	struct amdgpu_device *adev = smu->adev;
2496 	SmuMetrics_NV12_t nv12_metrics = { 0 };
2497 	SmuMetrics_t metrics;
2498 	int ret = 0;
2499 
2500 	mutex_lock(&smu->metrics_lock);
2501 
2502 	ret = smu_cmn_get_metrics_table_locked(smu,
2503 					       NULL,
2504 					       true);
2505 	if (ret) {
2506 		mutex_unlock(&smu->metrics_lock);
2507 		return ret;
2508 	}
2509 
2510 	memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
2511 	if (adev->asic_type == CHIP_NAVI12)
2512 		memcpy(&nv12_metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t));
2513 
2514 	mutex_unlock(&smu->metrics_lock);
2515 
2516 	smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
2517 
2518 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
2519 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
2520 	gpu_metrics->temperature_mem = metrics.TemperatureMem;
2521 	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
2522 	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
2523 	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
2524 
2525 	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
2526 	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
2527 
2528 	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
2529 
2530 	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
2531 	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
2532 	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
2533 
2534 	if (adev->asic_type == CHIP_NAVI12) {
2535 		gpu_metrics->energy_accumulator = nv12_metrics.EnergyAccumulator;
2536 		gpu_metrics->average_vclk0_frequency = nv12_metrics.AverageVclkFrequency;
2537 		gpu_metrics->average_dclk0_frequency = nv12_metrics.AverageDclkFrequency;
2538 		gpu_metrics->average_mm_activity = nv12_metrics.VcnActivityPercentage;
2539 	}
2540 
2541 	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
2542 	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
2543 	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
2544 	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
2545 	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
2546 
2547 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
2548 
2549 	gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
2550 
2551 	gpu_metrics->pcie_link_width =
2552 			smu_v11_0_get_current_pcie_link_width(smu);
2553 	gpu_metrics->pcie_link_speed =
2554 			smu_v11_0_get_current_pcie_link_speed(smu);
2555 
2556 	*table = (void *)gpu_metrics;
2557 
2558 	return sizeof(struct gpu_metrics_v1_0);
2559 }
2560 
2561 static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
2562 {
2563 	struct amdgpu_device *adev = smu->adev;
2564 	uint32_t param = 0;
2565 
2566 	/* Navi12 does not support this */
2567 	if (adev->asic_type == CHIP_NAVI12)
2568 		return 0;
2569 
2570 	/* Workaround for WS SKU */
2571 	if (adev->pdev->device == 0x7312 &&
2572 	    adev->pdev->revision == 0)
2573 		param = 0xD188;
2574 
2575 	return smu_cmn_send_smc_msg_with_param(smu,
2576 					       SMU_MSG_SetMGpuFanBoostLimitRpm,
2577 					       param,
2578 					       NULL);
2579 }
2580 
2581 static const struct pptable_funcs navi10_ppt_funcs = {
2582 	.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
2583 	.set_default_dpm_table = navi10_set_default_dpm_table,
2584 	.dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
2585 	.dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
2586 	.i2c_init = navi10_i2c_control_init,
2587 	.i2c_fini = navi10_i2c_control_fini,
2588 	.print_clk_levels = navi10_print_clk_levels,
2589 	.force_clk_levels = navi10_force_clk_levels,
2590 	.populate_umd_state_clk = navi10_populate_umd_state_clk,
2591 	.get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
2592 	.pre_display_config_changed = navi10_pre_display_config_changed,
2593 	.display_config_changed = navi10_display_config_changed,
2594 	.notify_smc_display_config = navi10_notify_smc_display_config,
2595 	.is_dpm_running = navi10_is_dpm_running,
2596 	.get_fan_speed_rpm = navi10_get_fan_speed_rpm,
2597 	.get_power_profile_mode = navi10_get_power_profile_mode,
2598 	.set_power_profile_mode = navi10_set_power_profile_mode,
2599 	.set_watermarks_table = navi10_set_watermarks_table,
2600 	.read_sensor = navi10_read_sensor,
2601 	.get_uclk_dpm_states = navi10_get_uclk_dpm_states,
2602 	.set_performance_level = smu_v11_0_set_performance_level,
2603 	.get_thermal_temperature_range = navi10_get_thermal_temperature_range,
2604 	.display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
2605 	.get_power_limit = navi10_get_power_limit,
2606 	.update_pcie_parameters = navi10_update_pcie_parameters,
2607 	.init_microcode = smu_v11_0_init_microcode,
2608 	.load_microcode = smu_v11_0_load_microcode,
2609 	.fini_microcode = smu_v11_0_fini_microcode,
2610 	.init_smc_tables = navi10_init_smc_tables,
2611 	.fini_smc_tables = smu_v11_0_fini_smc_tables,
2612 	.init_power = smu_v11_0_init_power,
2613 	.fini_power = smu_v11_0_fini_power,
2614 	.check_fw_status = smu_v11_0_check_fw_status,
2615 	.setup_pptable = navi10_setup_pptable,
2616 	.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
2617 	.check_fw_version = smu_v11_0_check_fw_version,
2618 	.write_pptable = smu_cmn_write_pptable,
2619 	.set_driver_table_location = smu_v11_0_set_driver_table_location,
2620 	.set_tool_table_location = smu_v11_0_set_tool_table_location,
2621 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
2622 	.system_features_control = smu_v11_0_system_features_control,
2623 	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
2624 	.send_smc_msg = smu_cmn_send_smc_msg,
2625 	.init_display_count = smu_v11_0_init_display_count,
2626 	.set_allowed_mask = smu_v11_0_set_allowed_mask,
2627 	.get_enabled_mask = smu_cmn_get_enabled_mask,
2628 	.feature_is_enabled = smu_cmn_feature_is_enabled,
2629 	.disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
2630 	.notify_display_change = smu_v11_0_notify_display_change,
2631 	.set_power_limit = smu_v11_0_set_power_limit,
2632 	.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
2633 	.enable_thermal_alert = smu_v11_0_enable_thermal_alert,
2634 	.disable_thermal_alert = smu_v11_0_disable_thermal_alert,
2635 	.set_min_dcef_deep_sleep = smu_v11_0_set_min_deep_sleep_dcefclk,
2636 	.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
2637 	.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
2638 	.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
2639 	.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
2640 	.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
2641 	.gfx_off_control = smu_v11_0_gfx_off_control,
2642 	.register_irq_handler = smu_v11_0_register_irq_handler,
2643 	.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
2644 	.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
2645 	.baco_is_support= navi10_is_baco_supported,
2646 	.baco_get_state = smu_v11_0_baco_get_state,
2647 	.baco_set_state = smu_v11_0_baco_set_state,
2648 	.baco_enter = smu_v11_0_baco_enter,
2649 	.baco_exit = smu_v11_0_baco_exit,
2650 	.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
2651 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
2652 	.set_default_od_settings = navi10_set_default_od_settings,
2653 	.od_edit_dpm_table = navi10_od_edit_dpm_table,
2654 	.run_btc = navi10_run_btc,
2655 	.disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
2656 	.set_power_source = smu_v11_0_set_power_source,
2657 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2658 	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2659 	.get_gpu_metrics = navi10_get_gpu_metrics,
2660 	.enable_mgpu_fan_boost = navi10_enable_mgpu_fan_boost,
2661 	.gfx_ulv_control = smu_v11_0_gfx_ulv_control,
2662 	.deep_sleep_control = smu_v11_0_deep_sleep_control,
2663 	.get_fan_parameters = navi10_get_fan_parameters,
2664 };
2665 
2666 void navi10_set_ppt_funcs(struct smu_context *smu)
2667 {
2668 	smu->ppt_funcs = &navi10_ppt_funcs;
2669 	smu->message_map = navi10_message_map;
2670 	smu->clock_map = navi10_clk_map;
2671 	smu->feature_map = navi10_feature_mask_map;
2672 	smu->table_map = navi10_table_map;
2673 	smu->pwr_src_map = navi10_pwr_src_map;
2674 	smu->workload_map = navi10_workload_map;
2675 }
2676