1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "smu_types.h"
25 #define SWSMU_CODE_LAYER_L2
26 
27 #include "amdgpu.h"
28 #include "amdgpu_smu.h"
29 #include "smu_v13_0.h"
30 #include "smu13_driver_if_v13_0_4.h"
31 #include "smu_v13_0_4_ppt.h"
32 #include "smu_v13_0_4_ppsmc.h"
33 #include "smu_v13_0_4_pmfw.h"
34 #include "smu_cmn.h"
35 
36 /*
37  * DO NOT use these for err/warn/info/debug messages.
38  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
39  * They are more MGPU friendly.
40  */
41 #undef pr_err
42 #undef pr_warn
43 #undef pr_info
44 #undef pr_debug
45 
46 #define mmMP1_SMN_C2PMSG_66			0x0282
47 #define mmMP1_SMN_C2PMSG_66_BASE_IDX            1
48 
49 #define mmMP1_SMN_C2PMSG_82			0x0292
50 #define mmMP1_SMN_C2PMSG_82_BASE_IDX            1
51 
52 #define mmMP1_SMN_C2PMSG_90			0x029a
53 #define mmMP1_SMN_C2PMSG_90_BASE_IDX		1
54 
55 #define FEATURE_MASK(feature) (1ULL << feature)
56 
57 #define SMC_DPM_FEATURE ( \
58 	FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
59 	FEATURE_MASK(FEATURE_VCN_DPM_BIT)	 | \
60 	FEATURE_MASK(FEATURE_FCLK_DPM_BIT)	 | \
61 	FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT)	 | \
62 	FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)	 | \
63 	FEATURE_MASK(FEATURE_LCLK_DPM_BIT)	 | \
64 	FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT)	 | \
65 	FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)	 | \
66 	FEATURE_MASK(FEATURE_ISP_DPM_BIT)	 | \
67 	FEATURE_MASK(FEATURE_IPU_DPM_BIT)	 | \
68 	FEATURE_MASK(FEATURE_GFX_DPM_BIT))
69 
70 static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] = {
71 	MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,			1),
72 	MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetPmfwVersion,		1),
73 	MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion,		1),
74 	MSG_MAP(EnableGfxOff,                   PPSMC_MSG_EnableGfxOff,			1),
75 	MSG_MAP(AllowGfxOff,                    PPSMC_MSG_AllowGfxOff,			1),
76 	MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisallowGfxOff,		1),
77 	MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn,			1),
78 	MSG_MAP(PowerUpVcn,                     PPSMC_MSG_PowerUpVcn,			1),
79 	MSG_MAP(SetHardMinVcn,                  PPSMC_MSG_SetHardMinVcn,		1),
80 	MSG_MAP(PrepareMp1ForUnload,            PPSMC_MSG_PrepareMp1ForUnload,		1),
81 	MSG_MAP(SetDriverDramAddrHigh,          PPSMC_MSG_SetDriverDramAddrHigh,	1),
82 	MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverDramAddrLow,		1),
83 	MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,	1),
84 	MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,	1),
85 	MSG_MAP(GfxDeviceDriverReset,           PPSMC_MSG_GfxDeviceDriverReset,		1),
86 	MSG_MAP(GetEnabledSmuFeatures,          PPSMC_MSG_GetEnabledSmuFeatures,	1),
87 	MSG_MAP(SetHardMinSocclkByFreq,         PPSMC_MSG_SetHardMinSocclkByFreq,	1),
88 	MSG_MAP(SetSoftMinVcn,                  PPSMC_MSG_SetSoftMinVcn,		1),
89 	MSG_MAP(GetGfxclkFrequency,             PPSMC_MSG_GetGfxclkFrequency,		1),
90 	MSG_MAP(GetFclkFrequency,               PPSMC_MSG_GetFclkFrequency,		1),
91 	MSG_MAP(SetSoftMaxGfxClk,               PPSMC_MSG_SetSoftMaxGfxClk,		1),
92 	MSG_MAP(SetHardMinGfxClk,               PPSMC_MSG_SetHardMinGfxClk,		1),
93 	MSG_MAP(SetSoftMaxSocclkByFreq,         PPSMC_MSG_SetSoftMaxSocclkByFreq,	1),
94 	MSG_MAP(SetSoftMaxFclkByFreq,           PPSMC_MSG_SetSoftMaxFclkByFreq,		1),
95 	MSG_MAP(SetSoftMaxVcn,                  PPSMC_MSG_SetSoftMaxVcn,		1),
96 	MSG_MAP(SetPowerLimitPercentage,        PPSMC_MSG_SetPowerLimitPercentage,	1),
97 	MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg,		1),
98 	MSG_MAP(PowerUpJpeg,                    PPSMC_MSG_PowerUpJpeg,			1),
99 	MSG_MAP(SetHardMinFclkByFreq,           PPSMC_MSG_SetHardMinFclkByFreq,		1),
100 	MSG_MAP(SetSoftMinSocclkByFreq,         PPSMC_MSG_SetSoftMinSocclkByFreq,	1),
101 	MSG_MAP(EnableGfxImu,                   PPSMC_MSG_EnableGfxImu,			1),
102 	MSG_MAP(PowerUpIspByTile,               PPSMC_MSG_PowerUpIspByTile,		1),
103 	MSG_MAP(PowerDownIspByTile,             PPSMC_MSG_PowerDownIspByTile,		1),
104 };
105 
106 static struct cmn2asic_mapping smu_v13_0_4_feature_mask_map[SMU_FEATURE_COUNT] = {
107 	FEA_MAP(CCLK_DPM),
108 	FEA_MAP(FAN_CONTROLLER),
109 	FEA_MAP(PPT),
110 	FEA_MAP(TDC),
111 	FEA_MAP(THERMAL),
112 	FEA_MAP(VCN_DPM),
113 	FEA_MAP_REVERSE(FCLK),
114 	FEA_MAP_REVERSE(SOCCLK),
115 	FEA_MAP(LCLK_DPM),
116 	FEA_MAP(SHUBCLK_DPM),
117 	FEA_MAP(DCFCLK_DPM),
118 	FEA_MAP_HALF_REVERSE(GFX),
119 	FEA_MAP(DS_GFXCLK),
120 	FEA_MAP(DS_SOCCLK),
121 	FEA_MAP(DS_LCLK),
122 	FEA_MAP(DS_DCFCLK),
123 	FEA_MAP(DS_FCLK),
124 	FEA_MAP(DS_MP1CLK),
125 	FEA_MAP(DS_MP0CLK),
126 	FEA_MAP(GFX_DEM),
127 	FEA_MAP(PSI),
128 	FEA_MAP(PROCHOT),
129 	FEA_MAP(CPUOFF),
130 	FEA_MAP(STAPM),
131 	FEA_MAP(S0I3),
132 	FEA_MAP(PERF_LIMIT),
133 	FEA_MAP(CORE_DLDO),
134 	FEA_MAP(DS_VCN),
135 	FEA_MAP(CPPC),
136 	FEA_MAP(DF_CSTATES),
137 	FEA_MAP(ATHUB_PG),
138 };
139 
140 static struct cmn2asic_mapping smu_v13_0_4_table_map[SMU_TABLE_COUNT] = {
141 	TAB_MAP_VALID(WATERMARKS),
142 	TAB_MAP_VALID(SMU_METRICS),
143 	TAB_MAP_VALID(CUSTOM_DPM),
144 	TAB_MAP_VALID(DPMCLOCKS),
145 };
146 
147 static int smu_v13_0_4_init_smc_tables(struct smu_context *smu)
148 {
149 	struct smu_table_context *smu_table = &smu->smu_table;
150 	struct smu_table *tables = smu_table->tables;
151 
152 	SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
153 		PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
154 	SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
155 		PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
156 	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
157 		PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
158 
159 	smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
160 	if (!smu_table->clocks_table)
161 		goto err0_out;
162 
163 	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
164 	if (!smu_table->metrics_table)
165 		goto err1_out;
166 	smu_table->metrics_time = 0;
167 
168 	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
169 	if (!smu_table->watermarks_table)
170 		goto err2_out;
171 
172 	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1);
173 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
174 	if (!smu_table->gpu_metrics_table)
175 		goto err3_out;
176 
177 	return 0;
178 
179 err3_out:
180 	kfree(smu_table->watermarks_table);
181 err2_out:
182 	kfree(smu_table->metrics_table);
183 err1_out:
184 	kfree(smu_table->clocks_table);
185 err0_out:
186 	return -ENOMEM;
187 }
188 
189 static int smu_v13_0_4_fini_smc_tables(struct smu_context *smu)
190 {
191 	struct smu_table_context *smu_table = &smu->smu_table;
192 
193 	kfree(smu_table->clocks_table);
194 	smu_table->clocks_table = NULL;
195 
196 	kfree(smu_table->metrics_table);
197 	smu_table->metrics_table = NULL;
198 
199 	kfree(smu_table->watermarks_table);
200 	smu_table->watermarks_table = NULL;
201 
202 	return 0;
203 }
204 
205 static bool smu_v13_0_4_is_dpm_running(struct smu_context *smu)
206 {
207 	int ret = 0;
208 	uint64_t feature_enabled;
209 
210 	ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
211 
212 	if (ret)
213 		return false;
214 
215 	return !!(feature_enabled & SMC_DPM_FEATURE);
216 }
217 
218 static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
219 {
220 	struct amdgpu_device *adev = smu->adev;
221 	int ret = 0;
222 
223 	if (!en && !adev->in_s0ix)
224 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
225 
226 	return ret;
227 }
228 
229 static int smu_v13_0_4_post_smu_init(struct smu_context *smu)
230 {
231 	struct amdgpu_device *adev = smu->adev;
232 	int ret = 0;
233 
234 	/* allow message will be sent after enable message */
235 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
236 	if (ret)
237 		dev_err(adev->dev, "Failed to Enable GfxOff!\n");
238 	return ret;
239 }
240 
241 static ssize_t smu_v13_0_4_get_gpu_metrics(struct smu_context *smu,
242 					   void **table)
243 {
244 	struct smu_table_context *smu_table = &smu->smu_table;
245 	struct gpu_metrics_v2_1 *gpu_metrics =
246 		(struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
247 	SmuMetrics_t metrics;
248 	int ret = 0;
249 
250 	ret = smu_cmn_get_metrics_table(smu, &metrics, true);
251 	if (ret)
252 		return ret;
253 
254 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
255 
256 	gpu_metrics->temperature_gfx = metrics.GfxTemperature;
257 	gpu_metrics->temperature_soc = metrics.SocTemperature;
258 	memcpy(&gpu_metrics->temperature_core[0],
259 		&metrics.CoreTemperature[0],
260 		sizeof(uint16_t) * 8);
261 	gpu_metrics->temperature_l3[0] = metrics.L3Temperature;
262 
263 	gpu_metrics->average_gfx_activity = metrics.GfxActivity;
264 	gpu_metrics->average_mm_activity = metrics.UvdActivity;
265 
266 	gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
267 	gpu_metrics->average_gfx_power = metrics.Power[0];
268 	gpu_metrics->average_soc_power = metrics.Power[1];
269 	memcpy(&gpu_metrics->average_core_power[0],
270 		&metrics.CorePower[0],
271 		sizeof(uint16_t) * 8);
272 
273 	gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
274 	gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
275 	gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
276 	gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
277 	gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
278 	gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
279 
280 	memcpy(&gpu_metrics->current_coreclk[0],
281 		&metrics.CoreFrequency[0],
282 		sizeof(uint16_t) * 8);
283 	gpu_metrics->current_l3clk[0] = metrics.L3Frequency;
284 
285 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
286 
287 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
288 
289 	*table = (void *)gpu_metrics;
290 
291 	return sizeof(struct gpu_metrics_v2_1);
292 }
293 
294 static int smu_v13_0_4_get_smu_metrics_data(struct smu_context *smu,
295 					    MetricsMember_t member,
296 					    uint32_t *value)
297 {
298 	struct smu_table_context *smu_table = &smu->smu_table;
299 
300 	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
301 	int ret = 0;
302 
303 	ret = smu_cmn_get_metrics_table(smu, NULL, false);
304 	if (ret)
305 		return ret;
306 
307 	switch (member) {
308 	case METRICS_AVERAGE_GFXCLK:
309 		*value = metrics->GfxclkFrequency;
310 		break;
311 	case METRICS_AVERAGE_SOCCLK:
312 		*value = metrics->SocclkFrequency;
313 		break;
314 	case METRICS_AVERAGE_VCLK:
315 		*value = metrics->VclkFrequency;
316 		break;
317 	case METRICS_AVERAGE_DCLK:
318 		*value = metrics->DclkFrequency;
319 		break;
320 	case METRICS_AVERAGE_UCLK:
321 		*value = metrics->MemclkFrequency;
322 		break;
323 	case METRICS_AVERAGE_GFXACTIVITY:
324 		*value = metrics->GfxActivity / 100;
325 		break;
326 	case METRICS_AVERAGE_VCNACTIVITY:
327 		*value = metrics->UvdActivity;
328 		break;
329 	case METRICS_AVERAGE_SOCKETPOWER:
330 		*value = (metrics->CurrentSocketPower << 8) / 1000;
331 		break;
332 	case METRICS_TEMPERATURE_EDGE:
333 		*value = metrics->GfxTemperature / 100 *
334 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
335 		break;
336 	case METRICS_TEMPERATURE_HOTSPOT:
337 		*value = metrics->SocTemperature / 100 *
338 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
339 		break;
340 	case METRICS_THROTTLER_STATUS:
341 		*value = metrics->ThrottlerStatus;
342 		break;
343 	case METRICS_VOLTAGE_VDDGFX:
344 		*value = metrics->Voltage[0];
345 		break;
346 	case METRICS_VOLTAGE_VDDSOC:
347 		*value = metrics->Voltage[1];
348 		break;
349 	case METRICS_SS_APU_SHARE:
350 		/* return the percentage of APU power with respect to APU's power limit.
351 		 * percentage is reported, this isn't boost value. Smartshift power
352 		 * boost/shift is only when the percentage is more than 100.
353 		 */
354 		if (metrics->StapmOpnLimit > 0)
355 			*value =  (metrics->ApuPower * 100) / metrics->StapmOpnLimit;
356 		else
357 			*value = 0;
358 		break;
359 	case METRICS_SS_DGPU_SHARE:
360 		/* return the percentage of dGPU power with respect to dGPU's power limit.
361 		 * percentage is reported, this isn't boost value. Smartshift power
362 		 * boost/shift is only when the percentage is more than 100.
363 		 */
364 		if ((metrics->dGpuPower > 0) &&
365 		    (metrics->StapmCurrentLimit > metrics->StapmOpnLimit))
366 			*value = (metrics->dGpuPower * 100) /
367 				 (metrics->StapmCurrentLimit - metrics->StapmOpnLimit);
368 		else
369 			*value = 0;
370 		break;
371 	default:
372 		*value = UINT_MAX;
373 		break;
374 	}
375 
376 	return ret;
377 }
378 
379 static int smu_v13_0_4_get_current_clk_freq(struct smu_context *smu,
380 					    enum smu_clk_type clk_type,
381 					    uint32_t *value)
382 {
383 	MetricsMember_t member_type;
384 
385 	switch (clk_type) {
386 	case SMU_SOCCLK:
387 		member_type = METRICS_AVERAGE_SOCCLK;
388 		break;
389 	case SMU_VCLK:
390 		member_type = METRICS_AVERAGE_VCLK;
391 		break;
392 	case SMU_DCLK:
393 		member_type = METRICS_AVERAGE_DCLK;
394 		break;
395 	case SMU_MCLK:
396 		member_type = METRICS_AVERAGE_UCLK;
397 		break;
398 	case SMU_FCLK:
399 		return smu_cmn_send_smc_msg_with_param(smu,
400 						       SMU_MSG_GetFclkFrequency,
401 						       0, value);
402 	case SMU_GFXCLK:
403 	case SMU_SCLK:
404 		return smu_cmn_send_smc_msg_with_param(smu,
405 						       SMU_MSG_GetGfxclkFrequency,
406 						       0, value);
407 		break;
408 	default:
409 		return -EINVAL;
410 	}
411 
412 	return smu_v13_0_4_get_smu_metrics_data(smu, member_type, value);
413 }
414 
415 static int smu_v13_0_4_get_dpm_freq_by_index(struct smu_context *smu,
416 					     enum smu_clk_type clk_type,
417 					     uint32_t dpm_level,
418 					     uint32_t *freq)
419 {
420 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
421 
422 	if (!clk_table || clk_type >= SMU_CLK_COUNT)
423 		return -EINVAL;
424 
425 	switch (clk_type) {
426 	case SMU_SOCCLK:
427 		if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
428 			return -EINVAL;
429 		*freq = clk_table->SocClocks[dpm_level];
430 		break;
431 	case SMU_VCLK:
432 		if (dpm_level >= clk_table->VcnClkLevelsEnabled)
433 			return -EINVAL;
434 		*freq = clk_table->VClocks[dpm_level];
435 		break;
436 	case SMU_DCLK:
437 		if (dpm_level >= clk_table->VcnClkLevelsEnabled)
438 			return -EINVAL;
439 		*freq = clk_table->DClocks[dpm_level];
440 		break;
441 	case SMU_UCLK:
442 	case SMU_MCLK:
443 		if (dpm_level >= clk_table->NumDfPstatesEnabled)
444 			return -EINVAL;
445 		*freq = clk_table->DfPstateTable[dpm_level].MemClk;
446 		break;
447 	case SMU_FCLK:
448 		if (dpm_level >= clk_table->NumDfPstatesEnabled)
449 			return -EINVAL;
450 		*freq = clk_table->DfPstateTable[dpm_level].FClk;
451 		break;
452 	default:
453 		return -EINVAL;
454 	}
455 
456 	return 0;
457 }
458 
459 static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu,
460 					   enum smu_clk_type clk_type,
461 					   uint32_t *count)
462 {
463 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
464 
465 	switch (clk_type) {
466 	case SMU_SOCCLK:
467 		*count = clk_table->NumSocClkLevelsEnabled;
468 		break;
469 	case SMU_VCLK:
470 		*count = clk_table->VcnClkLevelsEnabled;
471 		break;
472 	case SMU_DCLK:
473 		*count = clk_table->VcnClkLevelsEnabled;
474 		break;
475 	case SMU_MCLK:
476 		*count = clk_table->NumDfPstatesEnabled;
477 		break;
478 	case SMU_FCLK:
479 		*count = clk_table->NumDfPstatesEnabled;
480 		break;
481 	default:
482 		break;
483 	}
484 
485 	return 0;
486 }
487 
488 static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
489 					enum smu_clk_type clk_type, char *buf)
490 {
491 	int i, size = 0, ret = 0;
492 	uint32_t cur_value = 0, value = 0, count = 0;
493 	uint32_t min, max;
494 
495 	smu_cmn_get_sysfs_buf(&buf, &size);
496 
497 	switch (clk_type) {
498 	case SMU_OD_SCLK:
499 		size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
500 		size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
501 		(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
502 		size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
503 		(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
504 		break;
505 	case SMU_OD_RANGE:
506 		size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
507 		size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
508 				      smu->gfx_default_hard_min_freq,
509 				      smu->gfx_default_soft_max_freq);
510 		break;
511 	case SMU_SOCCLK:
512 	case SMU_VCLK:
513 	case SMU_DCLK:
514 	case SMU_MCLK:
515 	case SMU_FCLK:
516 		ret = smu_v13_0_4_get_current_clk_freq(smu, clk_type, &cur_value);
517 		if (ret)
518 			break;
519 
520 		ret = smu_v13_0_4_get_dpm_level_count(smu, clk_type, &count);
521 		if (ret)
522 			break;
523 
524 		for (i = 0; i < count; i++) {
525 			ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value);
526 			if (ret)
527 				break;
528 
529 			size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
530 					      cur_value == value ? "*" : "");
531 		}
532 		break;
533 	case SMU_GFXCLK:
534 	case SMU_SCLK:
535 		ret = smu_v13_0_4_get_current_clk_freq(smu, clk_type, &cur_value);
536 		if (ret)
537 			break;
538 		min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
539 		max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
540 		if (cur_value  == max)
541 			i = 2;
542 		else if (cur_value == min)
543 			i = 0;
544 		else
545 			i = 1;
546 		size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
547 				      i == 0 ? "*" : "");
548 		size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
549 				      i == 1 ? cur_value : 1100, /* UMD PSTATE GFXCLK 1100 */
550 				      i == 1 ? "*" : "");
551 		size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
552 				      i == 2 ? "*" : "");
553 		break;
554 	default:
555 		break;
556 	}
557 
558 	return size;
559 }
560 
561 static int smu_v13_0_4_read_sensor(struct smu_context *smu,
562 				   enum amd_pp_sensors sensor,
563 				   void *data, uint32_t *size)
564 {
565 	int ret = 0;
566 
567 	if (!data || !size)
568 		return -EINVAL;
569 
570 	switch (sensor) {
571 	case AMDGPU_PP_SENSOR_GPU_LOAD:
572 		ret = smu_v13_0_4_get_smu_metrics_data(smu,
573 						       METRICS_AVERAGE_GFXACTIVITY,
574 						       (uint32_t *)data);
575 		*size = 4;
576 		break;
577 	case AMDGPU_PP_SENSOR_GPU_POWER:
578 		ret = smu_v13_0_4_get_smu_metrics_data(smu,
579 						       METRICS_AVERAGE_SOCKETPOWER,
580 						       (uint32_t *)data);
581 		*size = 4;
582 		break;
583 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
584 		ret = smu_v13_0_4_get_smu_metrics_data(smu,
585 						       METRICS_TEMPERATURE_EDGE,
586 						       (uint32_t *)data);
587 		*size = 4;
588 		break;
589 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
590 		ret = smu_v13_0_4_get_smu_metrics_data(smu,
591 						       METRICS_TEMPERATURE_HOTSPOT,
592 						       (uint32_t *)data);
593 		*size = 4;
594 		break;
595 	case AMDGPU_PP_SENSOR_GFX_MCLK:
596 		ret = smu_v13_0_4_get_smu_metrics_data(smu,
597 						       METRICS_AVERAGE_UCLK,
598 						       (uint32_t *)data);
599 		*(uint32_t *)data *= 100;
600 		*size = 4;
601 		break;
602 	case AMDGPU_PP_SENSOR_GFX_SCLK:
603 		ret = smu_v13_0_4_get_smu_metrics_data(smu,
604 						       METRICS_AVERAGE_GFXCLK,
605 						       (uint32_t *)data);
606 		*(uint32_t *)data *= 100;
607 		*size = 4;
608 		break;
609 	case AMDGPU_PP_SENSOR_VDDGFX:
610 		ret = smu_v13_0_4_get_smu_metrics_data(smu,
611 						       METRICS_VOLTAGE_VDDGFX,
612 						       (uint32_t *)data);
613 		*size = 4;
614 		break;
615 	case AMDGPU_PP_SENSOR_VDDNB:
616 		ret = smu_v13_0_4_get_smu_metrics_data(smu,
617 						       METRICS_VOLTAGE_VDDSOC,
618 						       (uint32_t *)data);
619 		*size = 4;
620 		break;
621 	case AMDGPU_PP_SENSOR_SS_APU_SHARE:
622 		ret = smu_v13_0_4_get_smu_metrics_data(smu,
623 						       METRICS_SS_APU_SHARE,
624 						       (uint32_t *)data);
625 		*size = 4;
626 		break;
627 	case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
628 		ret = smu_v13_0_4_get_smu_metrics_data(smu,
629 						       METRICS_SS_DGPU_SHARE,
630 						       (uint32_t *)data);
631 		*size = 4;
632 		break;
633 	default:
634 		ret = -EOPNOTSUPP;
635 		break;
636 	}
637 
638 	return ret;
639 }
640 
641 static int smu_v13_0_4_set_watermarks_table(struct smu_context *smu,
642 					    struct pp_smu_wm_range_sets *clock_ranges)
643 {
644 	int i;
645 	int ret = 0;
646 	Watermarks_t *table = smu->smu_table.watermarks_table;
647 
648 	if (!table || !clock_ranges)
649 		return -EINVAL;
650 
651 	if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
652 		clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
653 		return -EINVAL;
654 
655 	for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
656 		table->WatermarkRow[WM_DCFCLK][i].MinClock =
657 			clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
658 		table->WatermarkRow[WM_DCFCLK][i].MaxClock =
659 			clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
660 		table->WatermarkRow[WM_DCFCLK][i].MinMclk =
661 			clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
662 		table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
663 			clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
664 
665 		table->WatermarkRow[WM_DCFCLK][i].WmSetting =
666 			clock_ranges->reader_wm_sets[i].wm_inst;
667 	}
668 
669 	for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
670 		table->WatermarkRow[WM_SOCCLK][i].MinClock =
671 			clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
672 		table->WatermarkRow[WM_SOCCLK][i].MaxClock =
673 			clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
674 		table->WatermarkRow[WM_SOCCLK][i].MinMclk =
675 			clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
676 		table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
677 			clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
678 
679 		table->WatermarkRow[WM_SOCCLK][i].WmSetting =
680 			clock_ranges->writer_wm_sets[i].wm_inst;
681 	}
682 
683 	smu->watermarks_bitmap |= WATERMARKS_EXIST;
684 
685 	/* pass data to smu controller */
686 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
687 	     !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
688 		ret = smu_cmn_write_watermarks_table(smu);
689 		if (ret) {
690 			dev_err(smu->adev->dev, "Failed to update WMTABLE!");
691 			return ret;
692 		}
693 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
694 	}
695 
696 	return 0;
697 }
698 
699 static bool smu_v13_0_4_clk_dpm_is_enabled(struct smu_context *smu,
700 					   enum smu_clk_type clk_type)
701 {
702 	enum smu_feature_mask feature_id = 0;
703 
704 	switch (clk_type) {
705 	case SMU_MCLK:
706 	case SMU_UCLK:
707 	case SMU_FCLK:
708 		feature_id = SMU_FEATURE_DPM_FCLK_BIT;
709 		break;
710 	case SMU_GFXCLK:
711 	case SMU_SCLK:
712 		feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
713 		break;
714 	case SMU_SOCCLK:
715 		feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
716 		break;
717 	case SMU_VCLK:
718 	case SMU_DCLK:
719 		feature_id = SMU_FEATURE_VCN_DPM_BIT;
720 		break;
721 	default:
722 		return true;
723 	}
724 
725 	return smu_cmn_feature_is_enabled(smu, feature_id);
726 }
727 
728 static int smu_v13_0_4_get_dpm_ultimate_freq(struct smu_context *smu,
729 					     enum smu_clk_type clk_type,
730 					     uint32_t *min,
731 					     uint32_t *max)
732 {
733 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
734 	uint32_t clock_limit;
735 	uint32_t max_dpm_level, min_dpm_level;
736 	int ret = 0;
737 
738 	if (!smu_v13_0_4_clk_dpm_is_enabled(smu, clk_type)) {
739 		switch (clk_type) {
740 		case SMU_MCLK:
741 		case SMU_UCLK:
742 			clock_limit = smu->smu_table.boot_values.uclk;
743 			break;
744 		case SMU_FCLK:
745 			clock_limit = smu->smu_table.boot_values.fclk;
746 			break;
747 		case SMU_GFXCLK:
748 		case SMU_SCLK:
749 			clock_limit = smu->smu_table.boot_values.gfxclk;
750 			break;
751 		case SMU_SOCCLK:
752 			clock_limit = smu->smu_table.boot_values.socclk;
753 			break;
754 		case SMU_VCLK:
755 			clock_limit = smu->smu_table.boot_values.vclk;
756 			break;
757 		case SMU_DCLK:
758 			clock_limit = smu->smu_table.boot_values.dclk;
759 			break;
760 		default:
761 			clock_limit = 0;
762 			break;
763 		}
764 
765 		/* clock in Mhz unit */
766 		if (min)
767 			*min = clock_limit / 100;
768 		if (max)
769 			*max = clock_limit / 100;
770 
771 		return 0;
772 	}
773 
774 	if (max) {
775 		switch (clk_type) {
776 		case SMU_GFXCLK:
777 		case SMU_SCLK:
778 			*max = clk_table->MaxGfxClk;
779 			break;
780 		case SMU_MCLK:
781 		case SMU_UCLK:
782 		case SMU_FCLK:
783 			max_dpm_level = 0;
784 			break;
785 		case SMU_SOCCLK:
786 			max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
787 			break;
788 		case SMU_VCLK:
789 		case SMU_DCLK:
790 			max_dpm_level = clk_table->VcnClkLevelsEnabled - 1;
791 			break;
792 		default:
793 			return -EINVAL;
794 		}
795 
796 		if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
797 			ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type,
798 								max_dpm_level,
799 								max);
800 			if (ret)
801 				return ret;
802 		}
803 	}
804 
805 	if (min) {
806 		switch (clk_type) {
807 		case SMU_GFXCLK:
808 		case SMU_SCLK:
809 			*min = clk_table->MinGfxClk;
810 			break;
811 		case SMU_MCLK:
812 		case SMU_UCLK:
813 		case SMU_FCLK:
814 			min_dpm_level = clk_table->NumDfPstatesEnabled - 1;
815 			break;
816 		case SMU_SOCCLK:
817 			min_dpm_level = 0;
818 			break;
819 		case SMU_VCLK:
820 		case SMU_DCLK:
821 			min_dpm_level = 0;
822 			break;
823 		default:
824 			return -EINVAL;
825 		}
826 
827 		if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
828 			ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type,
829 								min_dpm_level,
830 								min);
831 		}
832 	}
833 
834 	return ret;
835 }
836 
837 static int smu_v13_0_4_set_soft_freq_limited_range(struct smu_context *smu,
838 						   enum smu_clk_type clk_type,
839 						   uint32_t min,
840 						   uint32_t max)
841 {
842 	enum smu_message_type msg_set_min, msg_set_max;
843 	int ret = 0;
844 
845 	if (!smu_v13_0_4_clk_dpm_is_enabled(smu, clk_type))
846 		return -EINVAL;
847 
848 	switch (clk_type) {
849 	case SMU_GFXCLK:
850 	case SMU_SCLK:
851 		msg_set_min = SMU_MSG_SetHardMinGfxClk;
852 		msg_set_max = SMU_MSG_SetSoftMaxGfxClk;
853 		break;
854 	case SMU_FCLK:
855 		msg_set_min = SMU_MSG_SetHardMinFclkByFreq;
856 		msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq;
857 		break;
858 	case SMU_SOCCLK:
859 		msg_set_min = SMU_MSG_SetHardMinSocclkByFreq;
860 		msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq;
861 		break;
862 	case SMU_VCLK:
863 	case SMU_DCLK:
864 		msg_set_min = SMU_MSG_SetHardMinVcn;
865 		msg_set_max = SMU_MSG_SetSoftMaxVcn;
866 		break;
867 	default:
868 		return -EINVAL;
869 	}
870 
871 	ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
872 	if (ret)
873 		return ret;
874 
875 	return smu_cmn_send_smc_msg_with_param(smu, msg_set_max,
876 					       max, NULL);
877 }
878 
879 static int smu_v13_0_4_force_clk_levels(struct smu_context *smu,
880 					enum smu_clk_type clk_type,
881 					uint32_t mask)
882 {
883 	uint32_t soft_min_level = 0, soft_max_level = 0;
884 	uint32_t min_freq = 0, max_freq = 0;
885 	int ret = 0;
886 
887 	soft_min_level = mask ? (ffs(mask) - 1) : 0;
888 	soft_max_level = mask ? (fls(mask) - 1) : 0;
889 
890 	switch (clk_type) {
891 	case SMU_SOCCLK:
892 	case SMU_FCLK:
893 	case SMU_VCLK:
894 	case SMU_DCLK:
895 		ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
896 		if (ret)
897 			break;
898 
899 		ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
900 		if (ret)
901 			break;
902 
903 		ret = smu_v13_0_4_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
904 		break;
905 	default:
906 		ret = -EINVAL;
907 		break;
908 	}
909 
910 	return ret;
911 }
912 
913 static int smu_v13_0_4_set_performance_level(struct smu_context *smu,
914 					     enum amd_dpm_forced_level level)
915 {
916 	struct amdgpu_device *adev = smu->adev;
917 	uint32_t sclk_min = 0, sclk_max = 0;
918 	uint32_t fclk_min = 0, fclk_max = 0;
919 	uint32_t socclk_min = 0, socclk_max = 0;
920 	int ret = 0;
921 
922 	switch (level) {
923 	case AMD_DPM_FORCED_LEVEL_HIGH:
924 		smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
925 		smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
926 		smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
927 		sclk_min = sclk_max;
928 		fclk_min = fclk_max;
929 		socclk_min = socclk_max;
930 		break;
931 	case AMD_DPM_FORCED_LEVEL_LOW:
932 		smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
933 		smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
934 		smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
935 		sclk_max = sclk_min;
936 		fclk_max = fclk_min;
937 		socclk_max = socclk_min;
938 		break;
939 	case AMD_DPM_FORCED_LEVEL_AUTO:
940 		smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
941 		smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
942 		smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
943 		break;
944 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
945 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
946 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
947 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
948 		/* Temporarily do nothing since the optimal clocks haven't been provided yet */
949 		break;
950 	case AMD_DPM_FORCED_LEVEL_MANUAL:
951 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
952 		return 0;
953 	default:
954 		dev_err(adev->dev, "Invalid performance level %d\n", level);
955 		return -EINVAL;
956 	}
957 
958 	if (sclk_min && sclk_max) {
959 		ret = smu_v13_0_4_set_soft_freq_limited_range(smu,
960 							      SMU_SCLK,
961 							      sclk_min,
962 							      sclk_max);
963 		if (ret)
964 			return ret;
965 
966 		smu->gfx_actual_hard_min_freq = sclk_min;
967 		smu->gfx_actual_soft_max_freq = sclk_max;
968 	}
969 
970 	if (fclk_min && fclk_max) {
971 		ret = smu_v13_0_4_set_soft_freq_limited_range(smu,
972 							      SMU_FCLK,
973 							      fclk_min,
974 							      fclk_max);
975 		if (ret)
976 			return ret;
977 	}
978 
979 	if (socclk_min && socclk_max) {
980 		ret = smu_v13_0_4_set_soft_freq_limited_range(smu,
981 							      SMU_SOCCLK,
982 							      socclk_min,
983 							      socclk_max);
984 		if (ret)
985 			return ret;
986 	}
987 
988 	return ret;
989 }
990 
991 static int smu_v13_0_4_mode2_reset(struct smu_context *smu)
992 {
993 	return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
994 					       SMU_RESET_MODE_2, NULL);
995 }
996 
997 static int smu_v13_0_4_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
998 {
999 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
1000 
1001 	smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
1002 	smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
1003 	smu->gfx_actual_hard_min_freq = 0;
1004 	smu->gfx_actual_soft_max_freq = 0;
1005 
1006 	return 0;
1007 }
1008 
1009 static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
1010 	.check_fw_status = smu_v13_0_check_fw_status,
1011 	.check_fw_version = smu_v13_0_check_fw_version,
1012 	.init_smc_tables = smu_v13_0_4_init_smc_tables,
1013 	.fini_smc_tables = smu_v13_0_4_fini_smc_tables,
1014 	.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
1015 	.system_features_control = smu_v13_0_4_system_features_control,
1016 	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
1017 	.send_smc_msg = smu_cmn_send_smc_msg,
1018 	.dpm_set_vcn_enable = smu_v13_0_set_vcn_enable,
1019 	.dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
1020 	.set_default_dpm_table = smu_v13_0_set_default_dpm_tables,
1021 	.read_sensor = smu_v13_0_4_read_sensor,
1022 	.is_dpm_running = smu_v13_0_4_is_dpm_running,
1023 	.set_watermarks_table = smu_v13_0_4_set_watermarks_table,
1024 	.get_gpu_metrics = smu_v13_0_4_get_gpu_metrics,
1025 	.get_enabled_mask = smu_cmn_get_enabled_mask,
1026 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
1027 	.set_driver_table_location = smu_v13_0_set_driver_table_location,
1028 	.gfx_off_control = smu_v13_0_gfx_off_control,
1029 	.post_init = smu_v13_0_4_post_smu_init,
1030 	.mode2_reset = smu_v13_0_4_mode2_reset,
1031 	.get_dpm_ultimate_freq = smu_v13_0_4_get_dpm_ultimate_freq,
1032 	.od_edit_dpm_table = smu_v13_0_od_edit_dpm_table,
1033 	.print_clk_levels = smu_v13_0_4_print_clk_levels,
1034 	.force_clk_levels = smu_v13_0_4_force_clk_levels,
1035 	.set_performance_level = smu_v13_0_4_set_performance_level,
1036 	.set_fine_grain_gfx_freq_parameters = smu_v13_0_4_set_fine_grain_gfx_freq_parameters,
1037 	.set_gfx_power_up_by_imu = smu_v13_0_set_gfx_power_up_by_imu,
1038 };
1039 
1040 void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu)
1041 {
1042 	struct amdgpu_device *adev = smu->adev;
1043 
1044 	smu->ppt_funcs = &smu_v13_0_4_ppt_funcs;
1045 	smu->message_map = smu_v13_0_4_message_map;
1046 	smu->feature_map = smu_v13_0_4_feature_mask_map;
1047 	smu->table_map = smu_v13_0_4_table_map;
1048 	smu->is_apu = true;
1049 	smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
1050 	smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
1051 	smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
1052 }
1053