1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #define SWSMU_CODE_LAYER_L2
25 
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "smu_v11_0.h"
29 #include "smu11_driver_if_vangogh.h"
30 #include "vangogh_ppt.h"
31 #include "smu_v11_5_ppsmc.h"
32 #include "smu_v11_5_pmfw.h"
33 #include "smu_cmn.h"
34 #include "soc15_common.h"
35 #include "asic_reg/gc/gc_10_3_0_offset.h"
36 #include "asic_reg/gc/gc_10_3_0_sh_mask.h"
37 #include <asm/processor.h>
38 
39 /*
40  * DO NOT use these for err/warn/info/debug messages.
41  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
42  * They are more MGPU friendly.
43  */
44 #undef pr_err
45 #undef pr_warn
46 #undef pr_info
47 #undef pr_debug
48 
49 #define FEATURE_MASK(feature) (1ULL << feature)
50 #define SMC_DPM_FEATURE ( \
51 	FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
52 	FEATURE_MASK(FEATURE_VCN_DPM_BIT)	 | \
53 	FEATURE_MASK(FEATURE_FCLK_DPM_BIT)	 | \
54 	FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT)	 | \
55 	FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)	 | \
56 	FEATURE_MASK(FEATURE_LCLK_DPM_BIT)	 | \
57 	FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT)	 | \
58 	FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \
59 	FEATURE_MASK(FEATURE_GFX_DPM_BIT))
60 
61 static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
62 	MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,			0),
63 	MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,		0),
64 	MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion,	0),
65 	MSG_MAP(EnableGfxOff,                   PPSMC_MSG_EnableGfxOff,			0),
66 	MSG_MAP(AllowGfxOff,                    PPSMC_MSG_AllowGfxOff,          0),
67 	MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisallowGfxOff,		0),
68 	MSG_MAP(PowerDownIspByTile,             PPSMC_MSG_PowerDownIspByTile,	0),
69 	MSG_MAP(PowerUpIspByTile,               PPSMC_MSG_PowerUpIspByTile,		0),
70 	MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn,			0),
71 	MSG_MAP(PowerUpVcn,                     PPSMC_MSG_PowerUpVcn,			0),
72 	MSG_MAP(RlcPowerNotify,                 PPSMC_MSG_RlcPowerNotify,		0),
73 	MSG_MAP(SetHardMinVcn,                  PPSMC_MSG_SetHardMinVcn,		0),
74 	MSG_MAP(SetSoftMinGfxclk,               PPSMC_MSG_SetSoftMinGfxclk,		0),
75 	MSG_MAP(ActiveProcessNotify,            PPSMC_MSG_ActiveProcessNotify,		0),
76 	MSG_MAP(SetHardMinIspiclkByFreq,        PPSMC_MSG_SetHardMinIspiclkByFreq,	0),
77 	MSG_MAP(SetHardMinIspxclkByFreq,        PPSMC_MSG_SetHardMinIspxclkByFreq,	0),
78 	MSG_MAP(SetDriverDramAddrHigh,          PPSMC_MSG_SetDriverDramAddrHigh,	0),
79 	MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverDramAddrLow,		0),
80 	MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,	0),
81 	MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,	0),
82 	MSG_MAP(GfxDeviceDriverReset,           PPSMC_MSG_GfxDeviceDriverReset,		0),
83 	MSG_MAP(GetEnabledSmuFeatures,          PPSMC_MSG_GetEnabledSmuFeatures,	0),
84 	MSG_MAP(SetHardMinSocclkByFreq,         PPSMC_MSG_SetHardMinSocclkByFreq,	0),
85 	MSG_MAP(SetSoftMinFclk,                 PPSMC_MSG_SetSoftMinFclk,		0),
86 	MSG_MAP(SetSoftMinVcn,                  PPSMC_MSG_SetSoftMinVcn,		0),
87 	MSG_MAP(EnablePostCode,                 PPSMC_MSG_EnablePostCode,		0),
88 	MSG_MAP(GetGfxclkFrequency,             PPSMC_MSG_GetGfxclkFrequency,	0),
89 	MSG_MAP(GetFclkFrequency,               PPSMC_MSG_GetFclkFrequency,		0),
90 	MSG_MAP(SetSoftMaxGfxClk,               PPSMC_MSG_SetSoftMaxGfxClk,		0),
91 	MSG_MAP(SetHardMinGfxClk,               PPSMC_MSG_SetHardMinGfxClk,		0),
92 	MSG_MAP(SetSoftMaxSocclkByFreq,         PPSMC_MSG_SetSoftMaxSocclkByFreq,	0),
93 	MSG_MAP(SetSoftMaxFclkByFreq,           PPSMC_MSG_SetSoftMaxFclkByFreq,		0),
94 	MSG_MAP(SetSoftMaxVcn,                  PPSMC_MSG_SetSoftMaxVcn,			0),
95 	MSG_MAP(SetPowerLimitPercentage,        PPSMC_MSG_SetPowerLimitPercentage,	0),
96 	MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg,			0),
97 	MSG_MAP(PowerUpJpeg,                    PPSMC_MSG_PowerUpJpeg,				0),
98 	MSG_MAP(SetHardMinFclkByFreq,           PPSMC_MSG_SetHardMinFclkByFreq,		0),
99 	MSG_MAP(SetSoftMinSocclkByFreq,         PPSMC_MSG_SetSoftMinSocclkByFreq,	0),
100 	MSG_MAP(PowerUpCvip,                    PPSMC_MSG_PowerUpCvip,				0),
101 	MSG_MAP(PowerDownCvip,                  PPSMC_MSG_PowerDownCvip,			0),
102 	MSG_MAP(GetPptLimit,                        PPSMC_MSG_GetPptLimit,			0),
103 	MSG_MAP(GetThermalLimit,                    PPSMC_MSG_GetThermalLimit,		0),
104 	MSG_MAP(GetCurrentTemperature,              PPSMC_MSG_GetCurrentTemperature, 0),
105 	MSG_MAP(GetCurrentPower,                    PPSMC_MSG_GetCurrentPower,		 0),
106 	MSG_MAP(GetCurrentVoltage,                  PPSMC_MSG_GetCurrentVoltage,	 0),
107 	MSG_MAP(GetCurrentCurrent,                  PPSMC_MSG_GetCurrentCurrent,	 0),
108 	MSG_MAP(GetAverageCpuActivity,              PPSMC_MSG_GetAverageCpuActivity, 0),
109 	MSG_MAP(GetAverageGfxActivity,              PPSMC_MSG_GetAverageGfxActivity, 0),
110 	MSG_MAP(GetAveragePower,                    PPSMC_MSG_GetAveragePower,		 0),
111 	MSG_MAP(GetAverageTemperature,              PPSMC_MSG_GetAverageTemperature, 0),
112 	MSG_MAP(SetAveragePowerTimeConstant,        PPSMC_MSG_SetAveragePowerTimeConstant,			0),
113 	MSG_MAP(SetAverageActivityTimeConstant,     PPSMC_MSG_SetAverageActivityTimeConstant,		0),
114 	MSG_MAP(SetAverageTemperatureTimeConstant,  PPSMC_MSG_SetAverageTemperatureTimeConstant,	0),
115 	MSG_MAP(SetMitigationEndHysteresis,         PPSMC_MSG_SetMitigationEndHysteresis,			0),
116 	MSG_MAP(GetCurrentFreq,                     PPSMC_MSG_GetCurrentFreq,						0),
117 	MSG_MAP(SetReducedPptLimit,                 PPSMC_MSG_SetReducedPptLimit,					0),
118 	MSG_MAP(SetReducedThermalLimit,             PPSMC_MSG_SetReducedThermalLimit,				0),
119 	MSG_MAP(DramLogSetDramAddr,                 PPSMC_MSG_DramLogSetDramAddr,					0),
120 	MSG_MAP(StartDramLogging,                   PPSMC_MSG_StartDramLogging,						0),
121 	MSG_MAP(StopDramLogging,                    PPSMC_MSG_StopDramLogging,						0),
122 	MSG_MAP(SetSoftMinCclk,                     PPSMC_MSG_SetSoftMinCclk,						0),
123 	MSG_MAP(SetSoftMaxCclk,                     PPSMC_MSG_SetSoftMaxCclk,						0),
124 	MSG_MAP(RequestActiveWgp,                   PPSMC_MSG_RequestActiveWgp,                     0),
125 	MSG_MAP(SetFastPPTLimit,                    PPSMC_MSG_SetFastPPTLimit,						0),
126 	MSG_MAP(SetSlowPPTLimit,                    PPSMC_MSG_SetSlowPPTLimit,						0),
127 	MSG_MAP(GetFastPPTLimit,                    PPSMC_MSG_GetFastPPTLimit,						0),
128 	MSG_MAP(GetSlowPPTLimit,                    PPSMC_MSG_GetSlowPPTLimit,						0),
129 };
130 
131 static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
132 	FEA_MAP(PPT),
133 	FEA_MAP(TDC),
134 	FEA_MAP(THERMAL),
135 	FEA_MAP(DS_GFXCLK),
136 	FEA_MAP(DS_SOCCLK),
137 	FEA_MAP(DS_LCLK),
138 	FEA_MAP(DS_FCLK),
139 	FEA_MAP(DS_MP1CLK),
140 	FEA_MAP(DS_MP0CLK),
141 	FEA_MAP(ATHUB_PG),
142 	FEA_MAP(CCLK_DPM),
143 	FEA_MAP(FAN_CONTROLLER),
144 	FEA_MAP(ULV),
145 	FEA_MAP(VCN_DPM),
146 	FEA_MAP(LCLK_DPM),
147 	FEA_MAP(SHUBCLK_DPM),
148 	FEA_MAP(DCFCLK_DPM),
149 	FEA_MAP(DS_DCFCLK),
150 	FEA_MAP(S0I2),
151 	FEA_MAP(SMU_LOW_POWER),
152 	FEA_MAP(GFX_DEM),
153 	FEA_MAP(PSI),
154 	FEA_MAP(PROCHOT),
155 	FEA_MAP(CPUOFF),
156 	FEA_MAP(STAPM),
157 	FEA_MAP(S0I3),
158 	FEA_MAP(DF_CSTATES),
159 	FEA_MAP(PERF_LIMIT),
160 	FEA_MAP(CORE_DLDO),
161 	FEA_MAP(RSMU_LOW_POWER),
162 	FEA_MAP(SMN_LOW_POWER),
163 	FEA_MAP(THM_LOW_POWER),
164 	FEA_MAP(SMUIO_LOW_POWER),
165 	FEA_MAP(MP1_LOW_POWER),
166 	FEA_MAP(DS_VCN),
167 	FEA_MAP(CPPC),
168 	FEA_MAP(OS_CSTATES),
169 	FEA_MAP(ISP_DPM),
170 	FEA_MAP(A55_DPM),
171 	FEA_MAP(CVIP_DSP_DPM),
172 	FEA_MAP(MSMU_LOW_POWER),
173 	FEA_MAP_REVERSE(SOCCLK),
174 	FEA_MAP_REVERSE(FCLK),
175 	FEA_MAP_HALF_REVERSE(GFX),
176 };
177 
178 static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
179 	TAB_MAP_VALID(WATERMARKS),
180 	TAB_MAP_VALID(SMU_METRICS),
181 	TAB_MAP_VALID(CUSTOM_DPM),
182 	TAB_MAP_VALID(DPMCLOCKS),
183 };
184 
185 static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
186 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,		WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
187 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,		WORKLOAD_PPLIB_VIDEO_BIT),
188 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,			WORKLOAD_PPLIB_VR_BIT),
189 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,		WORKLOAD_PPLIB_COMPUTE_BIT),
190 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,		WORKLOAD_PPLIB_CUSTOM_BIT),
191 };
192 
193 static const uint8_t vangogh_throttler_map[] = {
194 	[THROTTLER_STATUS_BIT_SPL]	= (SMU_THROTTLER_SPL_BIT),
195 	[THROTTLER_STATUS_BIT_FPPT]	= (SMU_THROTTLER_FPPT_BIT),
196 	[THROTTLER_STATUS_BIT_SPPT]	= (SMU_THROTTLER_SPPT_BIT),
197 	[THROTTLER_STATUS_BIT_SPPT_APU]	= (SMU_THROTTLER_SPPT_APU_BIT),
198 	[THROTTLER_STATUS_BIT_THM_CORE]	= (SMU_THROTTLER_TEMP_CORE_BIT),
199 	[THROTTLER_STATUS_BIT_THM_GFX]	= (SMU_THROTTLER_TEMP_GPU_BIT),
200 	[THROTTLER_STATUS_BIT_THM_SOC]	= (SMU_THROTTLER_TEMP_SOC_BIT),
201 	[THROTTLER_STATUS_BIT_TDC_VDD]	= (SMU_THROTTLER_TDC_VDD_BIT),
202 	[THROTTLER_STATUS_BIT_TDC_SOC]	= (SMU_THROTTLER_TDC_SOC_BIT),
203 	[THROTTLER_STATUS_BIT_TDC_GFX]	= (SMU_THROTTLER_TDC_GFX_BIT),
204 	[THROTTLER_STATUS_BIT_TDC_CVIP]	= (SMU_THROTTLER_TDC_CVIP_BIT),
205 };
206 
207 static int vangogh_tables_init(struct smu_context *smu)
208 {
209 	struct smu_table_context *smu_table = &smu->smu_table;
210 	struct smu_table *tables = smu_table->tables;
211 	struct amdgpu_device *adev = smu->adev;
212 	uint32_t if_version;
213 	uint32_t ret = 0;
214 
215 	ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
216 	if (ret) {
217 		dev_err(adev->dev, "Failed to get smu if version!\n");
218 		goto err0_out;
219 	}
220 
221 	SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
222 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
223 	SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
224 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
225 	SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
226 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
227 	SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),
228 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
229 
230 	if (if_version < 0x3) {
231 		SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
232 				PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
233 		smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
234 	} else {
235 		SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
236 				PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
237 		smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
238 	}
239 	if (!smu_table->metrics_table)
240 		goto err0_out;
241 	smu_table->metrics_time = 0;
242 
243 	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
244 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
245 	if (!smu_table->gpu_metrics_table)
246 		goto err1_out;
247 
248 	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
249 	if (!smu_table->watermarks_table)
250 		goto err2_out;
251 
252 	smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
253 	if (!smu_table->clocks_table)
254 		goto err3_out;
255 
256 	return 0;
257 
258 err3_out:
259 	kfree(smu_table->watermarks_table);
260 err2_out:
261 	kfree(smu_table->gpu_metrics_table);
262 err1_out:
263 	kfree(smu_table->metrics_table);
264 err0_out:
265 	return -ENOMEM;
266 }
267 
268 static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
269 				       MetricsMember_t member,
270 				       uint32_t *value)
271 {
272 	struct smu_table_context *smu_table = &smu->smu_table;
273 	SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
274 	int ret = 0;
275 
276 	ret = smu_cmn_get_metrics_table(smu,
277 					NULL,
278 					false);
279 	if (ret)
280 		return ret;
281 
282 	switch (member) {
283 	case METRICS_CURR_GFXCLK:
284 		*value = metrics->GfxclkFrequency;
285 		break;
286 	case METRICS_AVERAGE_SOCCLK:
287 		*value = metrics->SocclkFrequency;
288 		break;
289 	case METRICS_AVERAGE_VCLK:
290 		*value = metrics->VclkFrequency;
291 		break;
292 	case METRICS_AVERAGE_DCLK:
293 		*value = metrics->DclkFrequency;
294 		break;
295 	case METRICS_CURR_UCLK:
296 		*value = metrics->MemclkFrequency;
297 		break;
298 	case METRICS_AVERAGE_GFXACTIVITY:
299 		*value = metrics->GfxActivity / 100;
300 		break;
301 	case METRICS_AVERAGE_VCNACTIVITY:
302 		*value = metrics->UvdActivity;
303 		break;
304 	case METRICS_AVERAGE_SOCKETPOWER:
305 		*value = (metrics->CurrentSocketPower << 8) /
306 		1000 ;
307 		break;
308 	case METRICS_TEMPERATURE_EDGE:
309 		*value = metrics->GfxTemperature / 100 *
310 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
311 		break;
312 	case METRICS_TEMPERATURE_HOTSPOT:
313 		*value = metrics->SocTemperature / 100 *
314 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
315 		break;
316 	case METRICS_THROTTLER_STATUS:
317 		*value = metrics->ThrottlerStatus;
318 		break;
319 	case METRICS_VOLTAGE_VDDGFX:
320 		*value = metrics->Voltage[2];
321 		break;
322 	case METRICS_VOLTAGE_VDDSOC:
323 		*value = metrics->Voltage[1];
324 		break;
325 	case METRICS_AVERAGE_CPUCLK:
326 		memcpy(value, &metrics->CoreFrequency[0],
327 		       smu->cpu_core_num * sizeof(uint16_t));
328 		break;
329 	default:
330 		*value = UINT_MAX;
331 		break;
332 	}
333 
334 	return ret;
335 }
336 
337 static int vangogh_get_smu_metrics_data(struct smu_context *smu,
338 				       MetricsMember_t member,
339 				       uint32_t *value)
340 {
341 	struct smu_table_context *smu_table = &smu->smu_table;
342 	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
343 	int ret = 0;
344 
345 	ret = smu_cmn_get_metrics_table(smu,
346 					NULL,
347 					false);
348 	if (ret)
349 		return ret;
350 
351 	switch (member) {
352 	case METRICS_CURR_GFXCLK:
353 		*value = metrics->Current.GfxclkFrequency;
354 		break;
355 	case METRICS_AVERAGE_SOCCLK:
356 		*value = metrics->Current.SocclkFrequency;
357 		break;
358 	case METRICS_AVERAGE_VCLK:
359 		*value = metrics->Current.VclkFrequency;
360 		break;
361 	case METRICS_AVERAGE_DCLK:
362 		*value = metrics->Current.DclkFrequency;
363 		break;
364 	case METRICS_CURR_UCLK:
365 		*value = metrics->Current.MemclkFrequency;
366 		break;
367 	case METRICS_AVERAGE_GFXACTIVITY:
368 		*value = metrics->Current.GfxActivity;
369 		break;
370 	case METRICS_AVERAGE_VCNACTIVITY:
371 		*value = metrics->Current.UvdActivity;
372 		break;
373 	case METRICS_AVERAGE_SOCKETPOWER:
374 		*value = (metrics->Current.CurrentSocketPower << 8) /
375 		1000;
376 		break;
377 	case METRICS_TEMPERATURE_EDGE:
378 		*value = metrics->Current.GfxTemperature / 100 *
379 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
380 		break;
381 	case METRICS_TEMPERATURE_HOTSPOT:
382 		*value = metrics->Current.SocTemperature / 100 *
383 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
384 		break;
385 	case METRICS_THROTTLER_STATUS:
386 		*value = metrics->Current.ThrottlerStatus;
387 		break;
388 	case METRICS_VOLTAGE_VDDGFX:
389 		*value = metrics->Current.Voltage[2];
390 		break;
391 	case METRICS_VOLTAGE_VDDSOC:
392 		*value = metrics->Current.Voltage[1];
393 		break;
394 	case METRICS_AVERAGE_CPUCLK:
395 		memcpy(value, &metrics->Current.CoreFrequency[0],
396 		       smu->cpu_core_num * sizeof(uint16_t));
397 		break;
398 	default:
399 		*value = UINT_MAX;
400 		break;
401 	}
402 
403 	return ret;
404 }
405 
406 static int vangogh_common_get_smu_metrics_data(struct smu_context *smu,
407 				       MetricsMember_t member,
408 				       uint32_t *value)
409 {
410 	struct amdgpu_device *adev = smu->adev;
411 	uint32_t if_version;
412 	int ret = 0;
413 
414 	ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
415 	if (ret) {
416 		dev_err(adev->dev, "Failed to get smu if version!\n");
417 		return ret;
418 	}
419 
420 	if (if_version < 0x3)
421 		ret = vangogh_get_legacy_smu_metrics_data(smu, member, value);
422 	else
423 		ret = vangogh_get_smu_metrics_data(smu, member, value);
424 
425 	return ret;
426 }
427 
428 static int vangogh_allocate_dpm_context(struct smu_context *smu)
429 {
430 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
431 
432 	smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
433 				       GFP_KERNEL);
434 	if (!smu_dpm->dpm_context)
435 		return -ENOMEM;
436 
437 	smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
438 
439 	return 0;
440 }
441 
442 static int vangogh_init_smc_tables(struct smu_context *smu)
443 {
444 	int ret = 0;
445 
446 	ret = vangogh_tables_init(smu);
447 	if (ret)
448 		return ret;
449 
450 	ret = vangogh_allocate_dpm_context(smu);
451 	if (ret)
452 		return ret;
453 
454 #ifdef CONFIG_X86
455 	/* AMD x86 APU only */
456 	smu->cpu_core_num = boot_cpu_data.x86_max_cores;
457 #else
458 	smu->cpu_core_num = 4;
459 #endif
460 
461 	return smu_v11_0_init_smc_tables(smu);
462 }
463 
464 static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
465 {
466 	int ret = 0;
467 
468 	if (enable) {
469 		/* vcn dpm on is a prerequisite for vcn power gate messages */
470 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
471 		if (ret)
472 			return ret;
473 	} else {
474 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
475 		if (ret)
476 			return ret;
477 	}
478 
479 	return ret;
480 }
481 
482 static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
483 {
484 	int ret = 0;
485 
486 	if (enable) {
487 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
488 		if (ret)
489 			return ret;
490 	} else {
491 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
492 		if (ret)
493 			return ret;
494 	}
495 
496 	return ret;
497 }
498 
499 static bool vangogh_is_dpm_running(struct smu_context *smu)
500 {
501 	struct amdgpu_device *adev = smu->adev;
502 	int ret = 0;
503 	uint32_t feature_mask[2];
504 	uint64_t feature_enabled;
505 
506 	/* we need to re-init after suspend so return false */
507 	if (adev->in_suspend)
508 		return false;
509 
510 	ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
511 
512 	if (ret)
513 		return false;
514 
515 	feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
516 				((uint64_t)feature_mask[1] << 32));
517 
518 	return !!(feature_enabled & SMC_DPM_FEATURE);
519 }
520 
521 static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
522 						uint32_t dpm_level, uint32_t *freq)
523 {
524 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
525 
526 	if (!clk_table || clk_type >= SMU_CLK_COUNT)
527 		return -EINVAL;
528 
529 	switch (clk_type) {
530 	case SMU_SOCCLK:
531 		if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
532 			return -EINVAL;
533 		*freq = clk_table->SocClocks[dpm_level];
534 		break;
535 	case SMU_VCLK:
536 		if (dpm_level >= clk_table->VcnClkLevelsEnabled)
537 			return -EINVAL;
538 		*freq = clk_table->VcnClocks[dpm_level].vclk;
539 		break;
540 	case SMU_DCLK:
541 		if (dpm_level >= clk_table->VcnClkLevelsEnabled)
542 			return -EINVAL;
543 		*freq = clk_table->VcnClocks[dpm_level].dclk;
544 		break;
545 	case SMU_UCLK:
546 	case SMU_MCLK:
547 		if (dpm_level >= clk_table->NumDfPstatesEnabled)
548 			return -EINVAL;
549 		*freq = clk_table->DfPstateTable[dpm_level].memclk;
550 
551 		break;
552 	case SMU_FCLK:
553 		if (dpm_level >= clk_table->NumDfPstatesEnabled)
554 			return -EINVAL;
555 		*freq = clk_table->DfPstateTable[dpm_level].fclk;
556 		break;
557 	default:
558 		return -EINVAL;
559 	}
560 
561 	return 0;
562 }
563 
564 static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
565 			enum smu_clk_type clk_type, char *buf)
566 {
567 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
568 	SmuMetrics_legacy_t metrics;
569 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
570 	int i, size = 0, ret = 0;
571 	uint32_t cur_value = 0, value = 0, count = 0;
572 	bool cur_value_match_level = false;
573 
574 	memset(&metrics, 0, sizeof(metrics));
575 
576 	ret = smu_cmn_get_metrics_table(smu, &metrics, false);
577 	if (ret)
578 		return ret;
579 
580 	smu_cmn_get_sysfs_buf(&buf, &size);
581 
582 	switch (clk_type) {
583 	case SMU_OD_SCLK:
584 		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
585 			size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
586 			size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
587 			(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
588 			size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
589 			(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
590 		}
591 		break;
592 	case SMU_OD_CCLK:
593 		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
594 			size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
595 			size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
596 			(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
597 			size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
598 			(smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
599 		}
600 		break;
601 	case SMU_OD_RANGE:
602 		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
603 			size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
604 			size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
605 				smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
606 			size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
607 				smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
608 		}
609 		break;
610 	case SMU_SOCCLK:
611 		/* the level 3 ~ 6 of socclk use the same frequency for vangogh */
612 		count = clk_table->NumSocClkLevelsEnabled;
613 		cur_value = metrics.SocclkFrequency;
614 		break;
615 	case SMU_VCLK:
616 		count = clk_table->VcnClkLevelsEnabled;
617 		cur_value = metrics.VclkFrequency;
618 		break;
619 	case SMU_DCLK:
620 		count = clk_table->VcnClkLevelsEnabled;
621 		cur_value = metrics.DclkFrequency;
622 		break;
623 	case SMU_MCLK:
624 		count = clk_table->NumDfPstatesEnabled;
625 		cur_value = metrics.MemclkFrequency;
626 		break;
627 	case SMU_FCLK:
628 		count = clk_table->NumDfPstatesEnabled;
629 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
630 		if (ret)
631 			return ret;
632 		break;
633 	default:
634 		break;
635 	}
636 
637 	switch (clk_type) {
638 	case SMU_SOCCLK:
639 	case SMU_VCLK:
640 	case SMU_DCLK:
641 	case SMU_MCLK:
642 	case SMU_FCLK:
643 		for (i = 0; i < count; i++) {
644 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
645 			if (ret)
646 				return ret;
647 			if (!value)
648 				continue;
649 			size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
650 					cur_value == value ? "*" : "");
651 			if (cur_value == value)
652 				cur_value_match_level = true;
653 		}
654 
655 		if (!cur_value_match_level)
656 			size += sysfs_emit_at(buf, size, "   %uMhz *\n", cur_value);
657 		break;
658 	default:
659 		break;
660 	}
661 
662 	return size;
663 }
664 
665 static int vangogh_print_clk_levels(struct smu_context *smu,
666 			enum smu_clk_type clk_type, char *buf)
667 {
668 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
669 	SmuMetrics_t metrics;
670 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
671 	int i, size = 0, ret = 0;
672 	uint32_t cur_value = 0, value = 0, count = 0;
673 	bool cur_value_match_level = false;
674 	uint32_t min, max;
675 
676 	memset(&metrics, 0, sizeof(metrics));
677 
678 	ret = smu_cmn_get_metrics_table(smu, &metrics, false);
679 	if (ret)
680 		return ret;
681 
682 	smu_cmn_get_sysfs_buf(&buf, &size);
683 
684 	switch (clk_type) {
685 	case SMU_OD_SCLK:
686 		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
687 			size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
688 			size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
689 			(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
690 			size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
691 			(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
692 		}
693 		break;
694 	case SMU_OD_CCLK:
695 		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
696 			size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
697 			size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
698 			(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
699 			size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
700 			(smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
701 		}
702 		break;
703 	case SMU_OD_RANGE:
704 		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
705 			size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
706 			size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
707 				smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
708 			size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
709 				smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
710 		}
711 		break;
712 	case SMU_SOCCLK:
713 		/* the level 3 ~ 6 of socclk use the same frequency for vangogh */
714 		count = clk_table->NumSocClkLevelsEnabled;
715 		cur_value = metrics.Current.SocclkFrequency;
716 		break;
717 	case SMU_VCLK:
718 		count = clk_table->VcnClkLevelsEnabled;
719 		cur_value = metrics.Current.VclkFrequency;
720 		break;
721 	case SMU_DCLK:
722 		count = clk_table->VcnClkLevelsEnabled;
723 		cur_value = metrics.Current.DclkFrequency;
724 		break;
725 	case SMU_MCLK:
726 		count = clk_table->NumDfPstatesEnabled;
727 		cur_value = metrics.Current.MemclkFrequency;
728 		break;
729 	case SMU_FCLK:
730 		count = clk_table->NumDfPstatesEnabled;
731 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
732 		if (ret)
733 			return ret;
734 		break;
735 	case SMU_GFXCLK:
736 	case SMU_SCLK:
737 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value);
738 		if (ret) {
739 			return ret;
740 		}
741 		break;
742 	default:
743 		break;
744 	}
745 
746 	switch (clk_type) {
747 	case SMU_SOCCLK:
748 	case SMU_VCLK:
749 	case SMU_DCLK:
750 	case SMU_MCLK:
751 	case SMU_FCLK:
752 		for (i = 0; i < count; i++) {
753 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
754 			if (ret)
755 				return ret;
756 			if (!value)
757 				continue;
758 			size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
759 					cur_value == value ? "*" : "");
760 			if (cur_value == value)
761 				cur_value_match_level = true;
762 		}
763 
764 		if (!cur_value_match_level)
765 			size += sysfs_emit_at(buf, size, "   %uMhz *\n", cur_value);
766 		break;
767 	case SMU_GFXCLK:
768 	case SMU_SCLK:
769 		min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
770 		max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
771 		if (cur_value  == max)
772 			i = 2;
773 		else if (cur_value == min)
774 			i = 0;
775 		else
776 			i = 1;
777 		size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
778 				i == 0 ? "*" : "");
779 		size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
780 				i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK,
781 				i == 1 ? "*" : "");
782 		size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
783 				i == 2 ? "*" : "");
784 		break;
785 	default:
786 		break;
787 	}
788 
789 	return size;
790 }
791 
792 static int vangogh_common_print_clk_levels(struct smu_context *smu,
793 			enum smu_clk_type clk_type, char *buf)
794 {
795 	struct amdgpu_device *adev = smu->adev;
796 	uint32_t if_version;
797 	int ret = 0;
798 
799 	ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
800 	if (ret) {
801 		dev_err(adev->dev, "Failed to get smu if version!\n");
802 		return ret;
803 	}
804 
805 	if (if_version < 0x3)
806 		ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf);
807 	else
808 		ret = vangogh_print_clk_levels(smu, clk_type, buf);
809 
810 	return ret;
811 }
812 
813 static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
814 					 enum amd_dpm_forced_level level,
815 					 uint32_t *vclk_mask,
816 					 uint32_t *dclk_mask,
817 					 uint32_t *mclk_mask,
818 					 uint32_t *fclk_mask,
819 					 uint32_t *soc_mask)
820 {
821 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
822 
823 	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
824 		if (mclk_mask)
825 			*mclk_mask = clk_table->NumDfPstatesEnabled - 1;
826 
827 		if (fclk_mask)
828 			*fclk_mask = clk_table->NumDfPstatesEnabled - 1;
829 
830 		if (soc_mask)
831 			*soc_mask = 0;
832 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
833 		if (mclk_mask)
834 			*mclk_mask = 0;
835 
836 		if (fclk_mask)
837 			*fclk_mask = 0;
838 
839 		if (soc_mask)
840 			*soc_mask = 1;
841 
842 		if (vclk_mask)
843 			*vclk_mask = 1;
844 
845 		if (dclk_mask)
846 			*dclk_mask = 1;
847 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) {
848 		if (mclk_mask)
849 			*mclk_mask = 0;
850 
851 		if (fclk_mask)
852 			*fclk_mask = 0;
853 
854 		if (soc_mask)
855 			*soc_mask = 1;
856 
857 		if (vclk_mask)
858 			*vclk_mask = 1;
859 
860 		if (dclk_mask)
861 			*dclk_mask = 1;
862 	}
863 
864 	return 0;
865 }
866 
867 static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu,
868 				enum smu_clk_type clk_type)
869 {
870 	enum smu_feature_mask feature_id = 0;
871 
872 	switch (clk_type) {
873 	case SMU_MCLK:
874 	case SMU_UCLK:
875 	case SMU_FCLK:
876 		feature_id = SMU_FEATURE_DPM_FCLK_BIT;
877 		break;
878 	case SMU_GFXCLK:
879 	case SMU_SCLK:
880 		feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
881 		break;
882 	case SMU_SOCCLK:
883 		feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
884 		break;
885 	case SMU_VCLK:
886 	case SMU_DCLK:
887 		feature_id = SMU_FEATURE_VCN_DPM_BIT;
888 		break;
889 	default:
890 		return true;
891 	}
892 
893 	if (!smu_cmn_feature_is_enabled(smu, feature_id))
894 		return false;
895 
896 	return true;
897 }
898 
899 static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
900 					enum smu_clk_type clk_type,
901 					uint32_t *min,
902 					uint32_t *max)
903 {
904 	int ret = 0;
905 	uint32_t soc_mask;
906 	uint32_t vclk_mask;
907 	uint32_t dclk_mask;
908 	uint32_t mclk_mask;
909 	uint32_t fclk_mask;
910 	uint32_t clock_limit;
911 
912 	if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) {
913 		switch (clk_type) {
914 		case SMU_MCLK:
915 		case SMU_UCLK:
916 			clock_limit = smu->smu_table.boot_values.uclk;
917 			break;
918 		case SMU_FCLK:
919 			clock_limit = smu->smu_table.boot_values.fclk;
920 			break;
921 		case SMU_GFXCLK:
922 		case SMU_SCLK:
923 			clock_limit = smu->smu_table.boot_values.gfxclk;
924 			break;
925 		case SMU_SOCCLK:
926 			clock_limit = smu->smu_table.boot_values.socclk;
927 			break;
928 		case SMU_VCLK:
929 			clock_limit = smu->smu_table.boot_values.vclk;
930 			break;
931 		case SMU_DCLK:
932 			clock_limit = smu->smu_table.boot_values.dclk;
933 			break;
934 		default:
935 			clock_limit = 0;
936 			break;
937 		}
938 
939 		/* clock in Mhz unit */
940 		if (min)
941 			*min = clock_limit / 100;
942 		if (max)
943 			*max = clock_limit / 100;
944 
945 		return 0;
946 	}
947 	if (max) {
948 		ret = vangogh_get_profiling_clk_mask(smu,
949 							AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
950 							&vclk_mask,
951 							&dclk_mask,
952 							&mclk_mask,
953 							&fclk_mask,
954 							&soc_mask);
955 		if (ret)
956 			goto failed;
957 
958 		switch (clk_type) {
959 		case SMU_UCLK:
960 		case SMU_MCLK:
961 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
962 			if (ret)
963 				goto failed;
964 			break;
965 		case SMU_SOCCLK:
966 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
967 			if (ret)
968 				goto failed;
969 			break;
970 		case SMU_FCLK:
971 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max);
972 			if (ret)
973 				goto failed;
974 			break;
975 		case SMU_VCLK:
976 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max);
977 			if (ret)
978 				goto failed;
979 			break;
980 		case SMU_DCLK:
981 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max);
982 			if (ret)
983 				goto failed;
984 			break;
985 		default:
986 			ret = -EINVAL;
987 			goto failed;
988 		}
989 	}
990 	if (min) {
991 		switch (clk_type) {
992 		case SMU_UCLK:
993 		case SMU_MCLK:
994 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min);
995 			if (ret)
996 				goto failed;
997 			break;
998 		case SMU_SOCCLK:
999 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min);
1000 			if (ret)
1001 				goto failed;
1002 			break;
1003 		case SMU_FCLK:
1004 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min);
1005 			if (ret)
1006 				goto failed;
1007 			break;
1008 		case SMU_VCLK:
1009 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min);
1010 			if (ret)
1011 				goto failed;
1012 			break;
1013 		case SMU_DCLK:
1014 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min);
1015 			if (ret)
1016 				goto failed;
1017 			break;
1018 		default:
1019 			ret = -EINVAL;
1020 			goto failed;
1021 		}
1022 	}
1023 failed:
1024 	return ret;
1025 }
1026 
1027 static int vangogh_get_power_profile_mode(struct smu_context *smu,
1028 					   char *buf)
1029 {
1030 	uint32_t i, size = 0;
1031 	int16_t workload_type = 0;
1032 
1033 	if (!buf)
1034 		return -EINVAL;
1035 
1036 	for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1037 		/*
1038 		 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
1039 		 * Not all profile modes are supported on vangogh.
1040 		 */
1041 		workload_type = smu_cmn_to_asic_specific_index(smu,
1042 							       CMN2ASIC_MAPPING_WORKLOAD,
1043 							       i);
1044 
1045 		if (workload_type < 0)
1046 			continue;
1047 
1048 		size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
1049 			i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1050 	}
1051 
1052 	return size;
1053 }
1054 
1055 static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1056 {
1057 	int workload_type, ret;
1058 	uint32_t profile_mode = input[size];
1059 
1060 	if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1061 		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
1062 		return -EINVAL;
1063 	}
1064 
1065 	if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
1066 			profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
1067 		return 0;
1068 
1069 	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1070 	workload_type = smu_cmn_to_asic_specific_index(smu,
1071 						       CMN2ASIC_MAPPING_WORKLOAD,
1072 						       profile_mode);
1073 	if (workload_type < 0) {
1074 		dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
1075 					profile_mode);
1076 		return -EINVAL;
1077 	}
1078 
1079 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1080 				    1 << workload_type,
1081 				    NULL);
1082 	if (ret) {
1083 		dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
1084 					workload_type);
1085 		return ret;
1086 	}
1087 
1088 	smu->power_profile_mode = profile_mode;
1089 
1090 	return 0;
1091 }
1092 
1093 static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
1094 					  enum smu_clk_type clk_type,
1095 					  uint32_t min,
1096 					  uint32_t max)
1097 {
1098 	int ret = 0;
1099 
1100 	if (!vangogh_clk_dpm_is_enabled(smu, clk_type))
1101 		return 0;
1102 
1103 	switch (clk_type) {
1104 	case SMU_GFXCLK:
1105 	case SMU_SCLK:
1106 		ret = smu_cmn_send_smc_msg_with_param(smu,
1107 							SMU_MSG_SetHardMinGfxClk,
1108 							min, NULL);
1109 		if (ret)
1110 			return ret;
1111 
1112 		ret = smu_cmn_send_smc_msg_with_param(smu,
1113 							SMU_MSG_SetSoftMaxGfxClk,
1114 							max, NULL);
1115 		if (ret)
1116 			return ret;
1117 		break;
1118 	case SMU_FCLK:
1119 		ret = smu_cmn_send_smc_msg_with_param(smu,
1120 							SMU_MSG_SetHardMinFclkByFreq,
1121 							min, NULL);
1122 		if (ret)
1123 			return ret;
1124 
1125 		ret = smu_cmn_send_smc_msg_with_param(smu,
1126 							SMU_MSG_SetSoftMaxFclkByFreq,
1127 							max, NULL);
1128 		if (ret)
1129 			return ret;
1130 		break;
1131 	case SMU_SOCCLK:
1132 		ret = smu_cmn_send_smc_msg_with_param(smu,
1133 							SMU_MSG_SetHardMinSocclkByFreq,
1134 							min, NULL);
1135 		if (ret)
1136 			return ret;
1137 
1138 		ret = smu_cmn_send_smc_msg_with_param(smu,
1139 							SMU_MSG_SetSoftMaxSocclkByFreq,
1140 							max, NULL);
1141 		if (ret)
1142 			return ret;
1143 		break;
1144 	case SMU_VCLK:
1145 		ret = smu_cmn_send_smc_msg_with_param(smu,
1146 							SMU_MSG_SetHardMinVcn,
1147 							min << 16, NULL);
1148 		if (ret)
1149 			return ret;
1150 		ret = smu_cmn_send_smc_msg_with_param(smu,
1151 							SMU_MSG_SetSoftMaxVcn,
1152 							max << 16, NULL);
1153 		if (ret)
1154 			return ret;
1155 		break;
1156 	case SMU_DCLK:
1157 		ret = smu_cmn_send_smc_msg_with_param(smu,
1158 							SMU_MSG_SetHardMinVcn,
1159 							min, NULL);
1160 		if (ret)
1161 			return ret;
1162 		ret = smu_cmn_send_smc_msg_with_param(smu,
1163 							SMU_MSG_SetSoftMaxVcn,
1164 							max, NULL);
1165 		if (ret)
1166 			return ret;
1167 		break;
1168 	default:
1169 		return -EINVAL;
1170 	}
1171 
1172 	return ret;
1173 }
1174 
1175 static int vangogh_force_clk_levels(struct smu_context *smu,
1176 				   enum smu_clk_type clk_type, uint32_t mask)
1177 {
1178 	uint32_t soft_min_level = 0, soft_max_level = 0;
1179 	uint32_t min_freq = 0, max_freq = 0;
1180 	int ret = 0 ;
1181 
1182 	soft_min_level = mask ? (ffs(mask) - 1) : 0;
1183 	soft_max_level = mask ? (fls(mask) - 1) : 0;
1184 
1185 	switch (clk_type) {
1186 	case SMU_SOCCLK:
1187 		ret = vangogh_get_dpm_clk_limited(smu, clk_type,
1188 						soft_min_level, &min_freq);
1189 		if (ret)
1190 			return ret;
1191 		ret = vangogh_get_dpm_clk_limited(smu, clk_type,
1192 						soft_max_level, &max_freq);
1193 		if (ret)
1194 			return ret;
1195 		ret = smu_cmn_send_smc_msg_with_param(smu,
1196 								SMU_MSG_SetSoftMaxSocclkByFreq,
1197 								max_freq, NULL);
1198 		if (ret)
1199 			return ret;
1200 		ret = smu_cmn_send_smc_msg_with_param(smu,
1201 								SMU_MSG_SetHardMinSocclkByFreq,
1202 								min_freq, NULL);
1203 		if (ret)
1204 			return ret;
1205 		break;
1206 	case SMU_FCLK:
1207 		ret = vangogh_get_dpm_clk_limited(smu,
1208 							clk_type, soft_min_level, &min_freq);
1209 		if (ret)
1210 			return ret;
1211 		ret = vangogh_get_dpm_clk_limited(smu,
1212 							clk_type, soft_max_level, &max_freq);
1213 		if (ret)
1214 			return ret;
1215 		ret = smu_cmn_send_smc_msg_with_param(smu,
1216 								SMU_MSG_SetSoftMaxFclkByFreq,
1217 								max_freq, NULL);
1218 		if (ret)
1219 			return ret;
1220 		ret = smu_cmn_send_smc_msg_with_param(smu,
1221 								SMU_MSG_SetHardMinFclkByFreq,
1222 								min_freq, NULL);
1223 		if (ret)
1224 			return ret;
1225 		break;
1226 	case SMU_VCLK:
1227 		ret = vangogh_get_dpm_clk_limited(smu,
1228 							clk_type, soft_min_level, &min_freq);
1229 		if (ret)
1230 			return ret;
1231 
1232 		ret = vangogh_get_dpm_clk_limited(smu,
1233 							clk_type, soft_max_level, &max_freq);
1234 		if (ret)
1235 			return ret;
1236 
1237 
1238 		ret = smu_cmn_send_smc_msg_with_param(smu,
1239 								SMU_MSG_SetHardMinVcn,
1240 								min_freq << 16, NULL);
1241 		if (ret)
1242 			return ret;
1243 
1244 		ret = smu_cmn_send_smc_msg_with_param(smu,
1245 								SMU_MSG_SetSoftMaxVcn,
1246 								max_freq << 16, NULL);
1247 		if (ret)
1248 			return ret;
1249 
1250 		break;
1251 	case SMU_DCLK:
1252 		ret = vangogh_get_dpm_clk_limited(smu,
1253 							clk_type, soft_min_level, &min_freq);
1254 		if (ret)
1255 			return ret;
1256 
1257 		ret = vangogh_get_dpm_clk_limited(smu,
1258 							clk_type, soft_max_level, &max_freq);
1259 		if (ret)
1260 			return ret;
1261 
1262 		ret = smu_cmn_send_smc_msg_with_param(smu,
1263 							SMU_MSG_SetHardMinVcn,
1264 							min_freq, NULL);
1265 		if (ret)
1266 			return ret;
1267 
1268 		ret = smu_cmn_send_smc_msg_with_param(smu,
1269 							SMU_MSG_SetSoftMaxVcn,
1270 							max_freq, NULL);
1271 		if (ret)
1272 			return ret;
1273 
1274 		break;
1275 	default:
1276 		break;
1277 	}
1278 
1279 	return ret;
1280 }
1281 
1282 static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
1283 {
1284 	int ret = 0, i = 0;
1285 	uint32_t min_freq, max_freq, force_freq;
1286 	enum smu_clk_type clk_type;
1287 
1288 	enum smu_clk_type clks[] = {
1289 		SMU_SOCCLK,
1290 		SMU_VCLK,
1291 		SMU_DCLK,
1292 		SMU_FCLK,
1293 	};
1294 
1295 	for (i = 0; i < ARRAY_SIZE(clks); i++) {
1296 		clk_type = clks[i];
1297 		ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
1298 		if (ret)
1299 			return ret;
1300 
1301 		force_freq = highest ? max_freq : min_freq;
1302 		ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
1303 		if (ret)
1304 			return ret;
1305 	}
1306 
1307 	return ret;
1308 }
1309 
1310 static int vangogh_unforce_dpm_levels(struct smu_context *smu)
1311 {
1312 	int ret = 0, i = 0;
1313 	uint32_t min_freq, max_freq;
1314 	enum smu_clk_type clk_type;
1315 
1316 	struct clk_feature_map {
1317 		enum smu_clk_type clk_type;
1318 		uint32_t	feature;
1319 	} clk_feature_map[] = {
1320 		{SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
1321 		{SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
1322 		{SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
1323 		{SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT},
1324 	};
1325 
1326 	for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
1327 
1328 		if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
1329 		    continue;
1330 
1331 		clk_type = clk_feature_map[i].clk_type;
1332 
1333 		ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
1334 
1335 		if (ret)
1336 			return ret;
1337 
1338 		ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
1339 
1340 		if (ret)
1341 			return ret;
1342 	}
1343 
1344 	return ret;
1345 }
1346 
1347 static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
1348 {
1349 	int ret = 0;
1350 	uint32_t socclk_freq = 0, fclk_freq = 0;
1351 	uint32_t vclk_freq = 0, dclk_freq = 0;
1352 
1353 	ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq);
1354 	if (ret)
1355 		return ret;
1356 
1357 	ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
1358 	if (ret)
1359 		return ret;
1360 
1361 	ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq);
1362 	if (ret)
1363 		return ret;
1364 
1365 	ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
1366 	if (ret)
1367 		return ret;
1368 
1369 	ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq);
1370 	if (ret)
1371 		return ret;
1372 
1373 	ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
1374 	if (ret)
1375 		return ret;
1376 
1377 	ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq);
1378 	if (ret)
1379 		return ret;
1380 
1381 	ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
1382 	if (ret)
1383 		return ret;
1384 
1385 	return ret;
1386 }
1387 
1388 static int vangogh_set_performance_level(struct smu_context *smu,
1389 					enum amd_dpm_forced_level level)
1390 {
1391 	int ret = 0;
1392 	uint32_t soc_mask, mclk_mask, fclk_mask;
1393 	uint32_t vclk_mask = 0, dclk_mask = 0;
1394 
1395 	smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
1396 	smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
1397 
1398 	switch (level) {
1399 	case AMD_DPM_FORCED_LEVEL_HIGH:
1400 		smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq;
1401 		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1402 
1403 
1404 		ret = vangogh_force_dpm_limit_value(smu, true);
1405 		if (ret)
1406 			return ret;
1407 		break;
1408 	case AMD_DPM_FORCED_LEVEL_LOW:
1409 		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1410 		smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
1411 
1412 		ret = vangogh_force_dpm_limit_value(smu, false);
1413 		if (ret)
1414 			return ret;
1415 		break;
1416 	case AMD_DPM_FORCED_LEVEL_AUTO:
1417 		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1418 		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1419 
1420 		ret = vangogh_unforce_dpm_levels(smu);
1421 		if (ret)
1422 			return ret;
1423 		break;
1424 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1425 		smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
1426 		smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
1427 
1428 		ret = vangogh_get_profiling_clk_mask(smu, level,
1429 							&vclk_mask,
1430 							&dclk_mask,
1431 							&mclk_mask,
1432 							&fclk_mask,
1433 							&soc_mask);
1434 		if (ret)
1435 			return ret;
1436 
1437 		vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
1438 		vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1439 		vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
1440 		vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask);
1441 		break;
1442 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1443 		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1444 		smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
1445 		break;
1446 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1447 		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1448 		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1449 
1450 		ret = vangogh_get_profiling_clk_mask(smu, level,
1451 							NULL,
1452 							NULL,
1453 							&mclk_mask,
1454 							&fclk_mask,
1455 							NULL);
1456 		if (ret)
1457 			return ret;
1458 
1459 		vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
1460 		break;
1461 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1462 		smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
1463 		smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
1464 
1465 		ret = vangogh_set_peak_clock_by_device(smu);
1466 		if (ret)
1467 			return ret;
1468 		break;
1469 	case AMD_DPM_FORCED_LEVEL_MANUAL:
1470 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1471 	default:
1472 		return 0;
1473 	}
1474 
1475 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
1476 					      smu->gfx_actual_hard_min_freq, NULL);
1477 	if (ret)
1478 		return ret;
1479 
1480 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1481 					      smu->gfx_actual_soft_max_freq, NULL);
1482 	if (ret)
1483 		return ret;
1484 
1485 	return ret;
1486 }
1487 
1488 static int vangogh_read_sensor(struct smu_context *smu,
1489 				 enum amd_pp_sensors sensor,
1490 				 void *data, uint32_t *size)
1491 {
1492 	int ret = 0;
1493 
1494 	if (!data || !size)
1495 		return -EINVAL;
1496 
1497 	switch (sensor) {
1498 	case AMDGPU_PP_SENSOR_GPU_LOAD:
1499 		ret = vangogh_common_get_smu_metrics_data(smu,
1500 						   METRICS_AVERAGE_GFXACTIVITY,
1501 						   (uint32_t *)data);
1502 		*size = 4;
1503 		break;
1504 	case AMDGPU_PP_SENSOR_GPU_POWER:
1505 		ret = vangogh_common_get_smu_metrics_data(smu,
1506 						   METRICS_AVERAGE_SOCKETPOWER,
1507 						   (uint32_t *)data);
1508 		*size = 4;
1509 		break;
1510 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
1511 		ret = vangogh_common_get_smu_metrics_data(smu,
1512 						   METRICS_TEMPERATURE_EDGE,
1513 						   (uint32_t *)data);
1514 		*size = 4;
1515 		break;
1516 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1517 		ret = vangogh_common_get_smu_metrics_data(smu,
1518 						   METRICS_TEMPERATURE_HOTSPOT,
1519 						   (uint32_t *)data);
1520 		*size = 4;
1521 		break;
1522 	case AMDGPU_PP_SENSOR_GFX_MCLK:
1523 		ret = vangogh_common_get_smu_metrics_data(smu,
1524 						   METRICS_CURR_UCLK,
1525 						   (uint32_t *)data);
1526 		*(uint32_t *)data *= 100;
1527 		*size = 4;
1528 		break;
1529 	case AMDGPU_PP_SENSOR_GFX_SCLK:
1530 		ret = vangogh_common_get_smu_metrics_data(smu,
1531 						   METRICS_CURR_GFXCLK,
1532 						   (uint32_t *)data);
1533 		*(uint32_t *)data *= 100;
1534 		*size = 4;
1535 		break;
1536 	case AMDGPU_PP_SENSOR_VDDGFX:
1537 		ret = vangogh_common_get_smu_metrics_data(smu,
1538 						   METRICS_VOLTAGE_VDDGFX,
1539 						   (uint32_t *)data);
1540 		*size = 4;
1541 		break;
1542 	case AMDGPU_PP_SENSOR_VDDNB:
1543 		ret = vangogh_common_get_smu_metrics_data(smu,
1544 						   METRICS_VOLTAGE_VDDSOC,
1545 						   (uint32_t *)data);
1546 		*size = 4;
1547 		break;
1548 	case AMDGPU_PP_SENSOR_CPU_CLK:
1549 		ret = vangogh_common_get_smu_metrics_data(smu,
1550 						   METRICS_AVERAGE_CPUCLK,
1551 						   (uint32_t *)data);
1552 		*size = smu->cpu_core_num * sizeof(uint16_t);
1553 		break;
1554 	default:
1555 		ret = -EOPNOTSUPP;
1556 		break;
1557 	}
1558 
1559 	return ret;
1560 }
1561 
1562 static int vangogh_set_watermarks_table(struct smu_context *smu,
1563 				       struct pp_smu_wm_range_sets *clock_ranges)
1564 {
1565 	int i;
1566 	int ret = 0;
1567 	Watermarks_t *table = smu->smu_table.watermarks_table;
1568 
1569 	if (!table || !clock_ranges)
1570 		return -EINVAL;
1571 
1572 	if (clock_ranges) {
1573 		if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
1574 			clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
1575 			return -EINVAL;
1576 
1577 		for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
1578 			table->WatermarkRow[WM_DCFCLK][i].MinClock =
1579 				clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
1580 			table->WatermarkRow[WM_DCFCLK][i].MaxClock =
1581 				clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
1582 			table->WatermarkRow[WM_DCFCLK][i].MinMclk =
1583 				clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
1584 			table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
1585 				clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
1586 
1587 			table->WatermarkRow[WM_DCFCLK][i].WmSetting =
1588 				clock_ranges->reader_wm_sets[i].wm_inst;
1589 		}
1590 
1591 		for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
1592 			table->WatermarkRow[WM_SOCCLK][i].MinClock =
1593 				clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
1594 			table->WatermarkRow[WM_SOCCLK][i].MaxClock =
1595 				clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
1596 			table->WatermarkRow[WM_SOCCLK][i].MinMclk =
1597 				clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
1598 			table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
1599 				clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
1600 
1601 			table->WatermarkRow[WM_SOCCLK][i].WmSetting =
1602 				clock_ranges->writer_wm_sets[i].wm_inst;
1603 		}
1604 
1605 		smu->watermarks_bitmap |= WATERMARKS_EXIST;
1606 	}
1607 
1608 	/* pass data to smu controller */
1609 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1610 	     !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1611 		ret = smu_cmn_write_watermarks_table(smu);
1612 		if (ret) {
1613 			dev_err(smu->adev->dev, "Failed to update WMTABLE!");
1614 			return ret;
1615 		}
1616 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1617 	}
1618 
1619 	return 0;
1620 }
1621 
1622 static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
1623 				      void **table)
1624 {
1625 	struct smu_table_context *smu_table = &smu->smu_table;
1626 	struct gpu_metrics_v2_2 *gpu_metrics =
1627 		(struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
1628 	SmuMetrics_legacy_t metrics;
1629 	int ret = 0;
1630 
1631 	ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1632 	if (ret)
1633 		return ret;
1634 
1635 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
1636 
1637 	gpu_metrics->temperature_gfx = metrics.GfxTemperature;
1638 	gpu_metrics->temperature_soc = metrics.SocTemperature;
1639 	memcpy(&gpu_metrics->temperature_core[0],
1640 		&metrics.CoreTemperature[0],
1641 		sizeof(uint16_t) * 4);
1642 	gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
1643 
1644 	gpu_metrics->average_gfx_activity = metrics.GfxActivity;
1645 	gpu_metrics->average_mm_activity = metrics.UvdActivity;
1646 
1647 	gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
1648 	gpu_metrics->average_cpu_power = metrics.Power[0];
1649 	gpu_metrics->average_soc_power = metrics.Power[1];
1650 	gpu_metrics->average_gfx_power = metrics.Power[2];
1651 	memcpy(&gpu_metrics->average_core_power[0],
1652 		&metrics.CorePower[0],
1653 		sizeof(uint16_t) * 4);
1654 
1655 	gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
1656 	gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
1657 	gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
1658 	gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
1659 	gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
1660 	gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
1661 
1662 	memcpy(&gpu_metrics->current_coreclk[0],
1663 		&metrics.CoreFrequency[0],
1664 		sizeof(uint16_t) * 4);
1665 	gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
1666 
1667 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
1668 	gpu_metrics->indep_throttle_status =
1669 			smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
1670 							   vangogh_throttler_map);
1671 
1672 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1673 
1674 	*table = (void *)gpu_metrics;
1675 
1676 	return sizeof(struct gpu_metrics_v2_2);
1677 }
1678 
1679 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
1680 				      void **table)
1681 {
1682 	struct smu_table_context *smu_table = &smu->smu_table;
1683 	struct gpu_metrics_v2_2 *gpu_metrics =
1684 		(struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
1685 	SmuMetrics_t metrics;
1686 	int ret = 0;
1687 
1688 	ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1689 	if (ret)
1690 		return ret;
1691 
1692 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
1693 
1694 	gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
1695 	gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
1696 	memcpy(&gpu_metrics->temperature_core[0],
1697 		&metrics.Current.CoreTemperature[0],
1698 		sizeof(uint16_t) * 4);
1699 	gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
1700 
1701 	gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
1702 	gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
1703 
1704 	gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
1705 	gpu_metrics->average_cpu_power = metrics.Current.Power[0];
1706 	gpu_metrics->average_soc_power = metrics.Current.Power[1];
1707 	gpu_metrics->average_gfx_power = metrics.Current.Power[2];
1708 	memcpy(&gpu_metrics->average_core_power[0],
1709 		&metrics.Average.CorePower[0],
1710 		sizeof(uint16_t) * 4);
1711 
1712 	gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
1713 	gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
1714 	gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
1715 	gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
1716 	gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
1717 	gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
1718 
1719 	gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
1720 	gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
1721 	gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
1722 	gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
1723 	gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
1724 	gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
1725 
1726 	memcpy(&gpu_metrics->current_coreclk[0],
1727 		&metrics.Current.CoreFrequency[0],
1728 		sizeof(uint16_t) * 4);
1729 	gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
1730 
1731 	gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
1732 	gpu_metrics->indep_throttle_status =
1733 			smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
1734 							   vangogh_throttler_map);
1735 
1736 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1737 
1738 	*table = (void *)gpu_metrics;
1739 
1740 	return sizeof(struct gpu_metrics_v2_2);
1741 }
1742 
1743 static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
1744 				      void **table)
1745 {
1746 	struct amdgpu_device *adev = smu->adev;
1747 	uint32_t if_version;
1748 	int ret = 0;
1749 
1750 	ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
1751 	if (ret) {
1752 		dev_err(adev->dev, "Failed to get smu if version!\n");
1753 		return ret;
1754 	}
1755 
1756 	if (if_version < 0x3)
1757 		ret = vangogh_get_legacy_gpu_metrics(smu, table);
1758 	else
1759 		ret = vangogh_get_gpu_metrics(smu, table);
1760 
1761 	return ret;
1762 }
1763 
1764 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
1765 					long input[], uint32_t size)
1766 {
1767 	int ret = 0;
1768 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1769 
1770 	if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
1771 		dev_warn(smu->adev->dev,
1772 			"pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n");
1773 		return -EINVAL;
1774 	}
1775 
1776 	switch (type) {
1777 	case PP_OD_EDIT_CCLK_VDDC_TABLE:
1778 		if (size != 3) {
1779 			dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n");
1780 			return -EINVAL;
1781 		}
1782 		if (input[0] >= smu->cpu_core_num) {
1783 			dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n",
1784 				smu->cpu_core_num);
1785 		}
1786 		smu->cpu_core_id_select = input[0];
1787 		if (input[1] == 0) {
1788 			if (input[2] < smu->cpu_default_soft_min_freq) {
1789 				dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
1790 					input[2], smu->cpu_default_soft_min_freq);
1791 				return -EINVAL;
1792 			}
1793 			smu->cpu_actual_soft_min_freq = input[2];
1794 		} else if (input[1] == 1) {
1795 			if (input[2] > smu->cpu_default_soft_max_freq) {
1796 				dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
1797 					input[2], smu->cpu_default_soft_max_freq);
1798 				return -EINVAL;
1799 			}
1800 			smu->cpu_actual_soft_max_freq = input[2];
1801 		} else {
1802 			return -EINVAL;
1803 		}
1804 		break;
1805 	case PP_OD_EDIT_SCLK_VDDC_TABLE:
1806 		if (size != 2) {
1807 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
1808 			return -EINVAL;
1809 		}
1810 
1811 		if (input[0] == 0) {
1812 			if (input[1] < smu->gfx_default_hard_min_freq) {
1813 				dev_warn(smu->adev->dev,
1814 					"Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
1815 					input[1], smu->gfx_default_hard_min_freq);
1816 				return -EINVAL;
1817 			}
1818 			smu->gfx_actual_hard_min_freq = input[1];
1819 		} else if (input[0] == 1) {
1820 			if (input[1] > smu->gfx_default_soft_max_freq) {
1821 				dev_warn(smu->adev->dev,
1822 					"Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
1823 					input[1], smu->gfx_default_soft_max_freq);
1824 				return -EINVAL;
1825 			}
1826 			smu->gfx_actual_soft_max_freq = input[1];
1827 		} else {
1828 			return -EINVAL;
1829 		}
1830 		break;
1831 	case PP_OD_RESTORE_DEFAULT_TABLE:
1832 		if (size != 0) {
1833 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
1834 			return -EINVAL;
1835 		} else {
1836 			smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1837 			smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1838 			smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
1839 			smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
1840 		}
1841 		break;
1842 	case PP_OD_COMMIT_DPM_TABLE:
1843 		if (size != 0) {
1844 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
1845 			return -EINVAL;
1846 		} else {
1847 			if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
1848 				dev_err(smu->adev->dev,
1849 					"The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
1850 					smu->gfx_actual_hard_min_freq,
1851 					smu->gfx_actual_soft_max_freq);
1852 				return -EINVAL;
1853 			}
1854 
1855 			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
1856 									smu->gfx_actual_hard_min_freq, NULL);
1857 			if (ret) {
1858 				dev_err(smu->adev->dev, "Set hard min sclk failed!");
1859 				return ret;
1860 			}
1861 
1862 			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1863 									smu->gfx_actual_soft_max_freq, NULL);
1864 			if (ret) {
1865 				dev_err(smu->adev->dev, "Set soft max sclk failed!");
1866 				return ret;
1867 			}
1868 
1869 			if (smu->adev->pm.fw_version < 0x43f1b00) {
1870 				dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
1871 				break;
1872 			}
1873 
1874 			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
1875 							      ((smu->cpu_core_id_select << 20)
1876 							       | smu->cpu_actual_soft_min_freq),
1877 							      NULL);
1878 			if (ret) {
1879 				dev_err(smu->adev->dev, "Set hard min cclk failed!");
1880 				return ret;
1881 			}
1882 
1883 			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
1884 							      ((smu->cpu_core_id_select << 20)
1885 							       | smu->cpu_actual_soft_max_freq),
1886 							      NULL);
1887 			if (ret) {
1888 				dev_err(smu->adev->dev, "Set soft max cclk failed!");
1889 				return ret;
1890 			}
1891 		}
1892 		break;
1893 	default:
1894 		return -ENOSYS;
1895 	}
1896 
1897 	return ret;
1898 }
1899 
1900 static int vangogh_set_default_dpm_tables(struct smu_context *smu)
1901 {
1902 	struct smu_table_context *smu_table = &smu->smu_table;
1903 
1904 	return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
1905 }
1906 
1907 static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
1908 {
1909 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
1910 
1911 	smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
1912 	smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
1913 	smu->gfx_actual_hard_min_freq = 0;
1914 	smu->gfx_actual_soft_max_freq = 0;
1915 
1916 	smu->cpu_default_soft_min_freq = 1400;
1917 	smu->cpu_default_soft_max_freq = 3500;
1918 	smu->cpu_actual_soft_min_freq = 0;
1919 	smu->cpu_actual_soft_max_freq = 0;
1920 
1921 	return 0;
1922 }
1923 
1924 static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
1925 {
1926 	DpmClocks_t *table = smu->smu_table.clocks_table;
1927 	int i;
1928 
1929 	if (!clock_table || !table)
1930 		return -EINVAL;
1931 
1932 	for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
1933 		clock_table->SocClocks[i].Freq = table->SocClocks[i];
1934 		clock_table->SocClocks[i].Vol = table->SocVoltage[i];
1935 	}
1936 
1937 	for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
1938 		clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk;
1939 		clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage;
1940 	}
1941 
1942 	for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
1943 		clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk;
1944 		clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage;
1945 	}
1946 
1947 	return 0;
1948 }
1949 
1950 
1951 static int vangogh_system_features_control(struct smu_context *smu, bool en)
1952 {
1953 	struct amdgpu_device *adev = smu->adev;
1954 	struct smu_feature *feature = &smu->smu_feature;
1955 	uint32_t feature_mask[2];
1956 	int ret = 0;
1957 
1958 	if (adev->pm.fw_version >= 0x43f1700 && !en)
1959 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
1960 						      RLC_STATUS_OFF, NULL);
1961 
1962 	bitmap_zero(feature->enabled, feature->feature_num);
1963 	bitmap_zero(feature->supported, feature->feature_num);
1964 
1965 	if (!en)
1966 		return ret;
1967 
1968 	ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
1969 	if (ret)
1970 		return ret;
1971 
1972 	bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
1973 		    feature->feature_num);
1974 	bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
1975 		    feature->feature_num);
1976 
1977 	return 0;
1978 }
1979 
1980 static int vangogh_post_smu_init(struct smu_context *smu)
1981 {
1982 	struct amdgpu_device *adev = smu->adev;
1983 	uint32_t tmp;
1984 	int ret = 0;
1985 	uint8_t aon_bits = 0;
1986 	/* Two CUs in one WGP */
1987 	uint32_t req_active_wgps = adev->gfx.cu_info.number/2;
1988 	uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
1989 		adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
1990 
1991 	/* allow message will be sent after enable message on Vangogh*/
1992 	if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
1993 			(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
1994 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
1995 		if (ret) {
1996 			dev_err(adev->dev, "Failed to Enable GfxOff!\n");
1997 			return ret;
1998 		}
1999 	} else {
2000 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2001 		dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n");
2002 	}
2003 
2004 	/* if all CUs are active, no need to power off any WGPs */
2005 	if (total_cu == adev->gfx.cu_info.number)
2006 		return 0;
2007 
2008 	/*
2009 	 * Calculate the total bits number of always on WGPs for all SA/SEs in
2010 	 * RLC_PG_ALWAYS_ON_WGP_MASK.
2011 	 */
2012 	tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK));
2013 	tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK;
2014 
2015 	aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
2016 
2017 	/* Do not request any WGPs less than set in the AON_WGP_MASK */
2018 	if (aon_bits > req_active_wgps) {
2019 		dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n");
2020 		return 0;
2021 	} else {
2022 		return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL);
2023 	}
2024 }
2025 
2026 static int vangogh_mode_reset(struct smu_context *smu, int type)
2027 {
2028 	int ret = 0, index = 0;
2029 
2030 	index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2031 					       SMU_MSG_GfxDeviceDriverReset);
2032 	if (index < 0)
2033 		return index == -EACCES ? 0 : index;
2034 
2035 	mutex_lock(&smu->message_lock);
2036 
2037 	ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
2038 
2039 	mutex_unlock(&smu->message_lock);
2040 
2041 	mdelay(10);
2042 
2043 	return ret;
2044 }
2045 
2046 static int vangogh_mode2_reset(struct smu_context *smu)
2047 {
2048 	return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
2049 }
2050 
2051 static int vangogh_get_power_limit(struct smu_context *smu,
2052 				   uint32_t *current_power_limit,
2053 				   uint32_t *default_power_limit,
2054 				   uint32_t *max_power_limit)
2055 {
2056 	struct smu_11_5_power_context *power_context =
2057 								smu->smu_power.power_context;
2058 	uint32_t ppt_limit;
2059 	int ret = 0;
2060 
2061 	if (smu->adev->pm.fw_version < 0x43f1e00)
2062 		return ret;
2063 
2064 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit);
2065 	if (ret) {
2066 		dev_err(smu->adev->dev, "Get slow PPT limit failed!\n");
2067 		return ret;
2068 	}
2069 	/* convert from milliwatt to watt */
2070 	if (current_power_limit)
2071 		*current_power_limit = ppt_limit / 1000;
2072 	if (default_power_limit)
2073 		*default_power_limit = ppt_limit / 1000;
2074 	if (max_power_limit)
2075 		*max_power_limit = 29;
2076 
2077 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
2078 	if (ret) {
2079 		dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
2080 		return ret;
2081 	}
2082 	/* convert from milliwatt to watt */
2083 	power_context->current_fast_ppt_limit =
2084 			power_context->default_fast_ppt_limit = ppt_limit / 1000;
2085 	power_context->max_fast_ppt_limit = 30;
2086 
2087 	return ret;
2088 }
2089 
2090 static int vangogh_get_ppt_limit(struct smu_context *smu,
2091 								uint32_t *ppt_limit,
2092 								enum smu_ppt_limit_type type,
2093 								enum smu_ppt_limit_level level)
2094 {
2095 	struct smu_11_5_power_context *power_context =
2096 							smu->smu_power.power_context;
2097 
2098 	if (!power_context)
2099 		return -EOPNOTSUPP;
2100 
2101 	if (type == SMU_FAST_PPT_LIMIT) {
2102 		switch (level) {
2103 		case SMU_PPT_LIMIT_MAX:
2104 			*ppt_limit = power_context->max_fast_ppt_limit;
2105 			break;
2106 		case SMU_PPT_LIMIT_CURRENT:
2107 			*ppt_limit = power_context->current_fast_ppt_limit;
2108 			break;
2109 		case SMU_PPT_LIMIT_DEFAULT:
2110 			*ppt_limit = power_context->default_fast_ppt_limit;
2111 			break;
2112 		default:
2113 			break;
2114 		}
2115 	}
2116 
2117 	return 0;
2118 }
2119 
2120 static int vangogh_set_power_limit(struct smu_context *smu,
2121 				   enum smu_ppt_limit_type limit_type,
2122 				   uint32_t ppt_limit)
2123 {
2124 	struct smu_11_5_power_context *power_context =
2125 			smu->smu_power.power_context;
2126 	int ret = 0;
2127 
2128 	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
2129 		dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
2130 		return -EOPNOTSUPP;
2131 	}
2132 
2133 	switch (limit_type) {
2134 	case SMU_DEFAULT_PPT_LIMIT:
2135 		ret = smu_cmn_send_smc_msg_with_param(smu,
2136 				SMU_MSG_SetSlowPPTLimit,
2137 				ppt_limit * 1000, /* convert from watt to milliwatt */
2138 				NULL);
2139 		if (ret)
2140 			return ret;
2141 
2142 		smu->current_power_limit = ppt_limit;
2143 		break;
2144 	case SMU_FAST_PPT_LIMIT:
2145 		ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
2146 		if (ppt_limit > power_context->max_fast_ppt_limit) {
2147 			dev_err(smu->adev->dev,
2148 				"New power limit (%d) is over the max allowed %d\n",
2149 				ppt_limit, power_context->max_fast_ppt_limit);
2150 			return ret;
2151 		}
2152 
2153 		ret = smu_cmn_send_smc_msg_with_param(smu,
2154 				SMU_MSG_SetFastPPTLimit,
2155 				ppt_limit * 1000, /* convert from watt to milliwatt */
2156 				NULL);
2157 		if (ret)
2158 			return ret;
2159 
2160 		power_context->current_fast_ppt_limit = ppt_limit;
2161 		break;
2162 	default:
2163 		return -EINVAL;
2164 	}
2165 
2166 	return ret;
2167 }
2168 
2169 static const struct pptable_funcs vangogh_ppt_funcs = {
2170 
2171 	.check_fw_status = smu_v11_0_check_fw_status,
2172 	.check_fw_version = smu_v11_0_check_fw_version,
2173 	.init_smc_tables = vangogh_init_smc_tables,
2174 	.fini_smc_tables = smu_v11_0_fini_smc_tables,
2175 	.init_power = smu_v11_0_init_power,
2176 	.fini_power = smu_v11_0_fini_power,
2177 	.register_irq_handler = smu_v11_0_register_irq_handler,
2178 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
2179 	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
2180 	.send_smc_msg = smu_cmn_send_smc_msg,
2181 	.dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable,
2182 	.dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable,
2183 	.is_dpm_running = vangogh_is_dpm_running,
2184 	.read_sensor = vangogh_read_sensor,
2185 	.get_enabled_mask = smu_cmn_get_enabled_32_bits_mask,
2186 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2187 	.set_watermarks_table = vangogh_set_watermarks_table,
2188 	.set_driver_table_location = smu_v11_0_set_driver_table_location,
2189 	.interrupt_work = smu_v11_0_interrupt_work,
2190 	.get_gpu_metrics = vangogh_common_get_gpu_metrics,
2191 	.od_edit_dpm_table = vangogh_od_edit_dpm_table,
2192 	.print_clk_levels = vangogh_common_print_clk_levels,
2193 	.set_default_dpm_table = vangogh_set_default_dpm_tables,
2194 	.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
2195 	.system_features_control = vangogh_system_features_control,
2196 	.feature_is_enabled = smu_cmn_feature_is_enabled,
2197 	.set_power_profile_mode = vangogh_set_power_profile_mode,
2198 	.get_power_profile_mode = vangogh_get_power_profile_mode,
2199 	.get_dpm_clock_table = vangogh_get_dpm_clock_table,
2200 	.force_clk_levels = vangogh_force_clk_levels,
2201 	.set_performance_level = vangogh_set_performance_level,
2202 	.post_init = vangogh_post_smu_init,
2203 	.mode2_reset = vangogh_mode2_reset,
2204 	.gfx_off_control = smu_v11_0_gfx_off_control,
2205 	.get_ppt_limit = vangogh_get_ppt_limit,
2206 	.get_power_limit = vangogh_get_power_limit,
2207 	.set_power_limit = vangogh_set_power_limit,
2208 	.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
2209 };
2210 
2211 void vangogh_set_ppt_funcs(struct smu_context *smu)
2212 {
2213 	smu->ppt_funcs = &vangogh_ppt_funcs;
2214 	smu->message_map = vangogh_message_map;
2215 	smu->feature_map = vangogh_feature_mask_map;
2216 	smu->table_map = vangogh_table_map;
2217 	smu->workload_map = vangogh_workload_map;
2218 	smu->is_apu = true;
2219 }
2220