1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #define SWSMU_CODE_LAYER_L2
25 
26 #include <linux/firmware.h>
27 #include "amdgpu.h"
28 #include "amdgpu_smu.h"
29 #include "atomfirmware.h"
30 #include "amdgpu_atomfirmware.h"
31 #include "amdgpu_atombios.h"
32 #include "smu_v13_0.h"
33 #include "smu13_driver_if_aldebaran.h"
34 #include "soc15_common.h"
35 #include "atom.h"
36 #include "power_state.h"
37 #include "aldebaran_ppt.h"
38 #include "smu_v13_0_pptable.h"
39 #include "aldebaran_ppsmc.h"
40 #include "nbio/nbio_7_4_offset.h"
41 #include "nbio/nbio_7_4_sh_mask.h"
42 #include "thm/thm_11_0_2_offset.h"
43 #include "thm/thm_11_0_2_sh_mask.h"
44 #include "amdgpu_xgmi.h"
45 #include <linux/pci.h>
46 #include "amdgpu_ras.h"
47 #include "smu_cmn.h"
48 #include "mp/mp_13_0_2_offset.h"
49 
50 /*
51  * DO NOT use these for err/warn/info/debug messages.
52  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
53  * They are more MGPU friendly.
54  */
55 #undef pr_err
56 #undef pr_warn
57 #undef pr_info
58 #undef pr_debug
59 
60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
61 
62 #define ALDEBARAN_FEA_MAP(smu_feature, aldebaran_feature) \
63 	[smu_feature] = {1, (aldebaran_feature)}
64 
65 #define FEATURE_MASK(feature) (1ULL << feature)
66 #define SMC_DPM_FEATURE ( \
67 			  FEATURE_MASK(FEATURE_DATA_CALCULATIONS) | \
68 			  FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)	| \
69 			  FEATURE_MASK(FEATURE_DPM_UCLK_BIT)	| \
70 			  FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)	| \
71 			  FEATURE_MASK(FEATURE_DPM_FCLK_BIT)	| \
72 			  FEATURE_MASK(FEATURE_DPM_LCLK_BIT)	| \
73 			  FEATURE_MASK(FEATURE_DPM_XGMI_BIT)	| \
74 			  FEATURE_MASK(FEATURE_DPM_VCN_BIT))
75 
76 /* possible frequency drift (1Mhz) */
77 #define EPSILON				1
78 
79 #define smnPCIE_ESM_CTRL			0x111003D0
80 
81 static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = {
82 	MSG_MAP(TestMessage,			     PPSMC_MSG_TestMessage,			0),
83 	MSG_MAP(GetSmuVersion,			     PPSMC_MSG_GetSmuVersion,			1),
84 	MSG_MAP(GetDriverIfVersion,		     PPSMC_MSG_GetDriverIfVersion,		1),
85 	MSG_MAP(EnableAllSmuFeatures,		     PPSMC_MSG_EnableAllSmuFeatures,		0),
86 	MSG_MAP(DisableAllSmuFeatures,		     PPSMC_MSG_DisableAllSmuFeatures,		0),
87 	MSG_MAP(GetEnabledSmuFeaturesLow,	     PPSMC_MSG_GetEnabledSmuFeaturesLow,	0),
88 	MSG_MAP(GetEnabledSmuFeaturesHigh,	     PPSMC_MSG_GetEnabledSmuFeaturesHigh,	0),
89 	MSG_MAP(SetDriverDramAddrHigh,		     PPSMC_MSG_SetDriverDramAddrHigh,		1),
90 	MSG_MAP(SetDriverDramAddrLow,		     PPSMC_MSG_SetDriverDramAddrLow,		1),
91 	MSG_MAP(SetToolsDramAddrHigh,		     PPSMC_MSG_SetToolsDramAddrHigh,		0),
92 	MSG_MAP(SetToolsDramAddrLow,		     PPSMC_MSG_SetToolsDramAddrLow,		0),
93 	MSG_MAP(TransferTableSmu2Dram,		     PPSMC_MSG_TransferTableSmu2Dram,		1),
94 	MSG_MAP(TransferTableDram2Smu,		     PPSMC_MSG_TransferTableDram2Smu,		0),
95 	MSG_MAP(UseDefaultPPTable,		     PPSMC_MSG_UseDefaultPPTable,		0),
96 	MSG_MAP(SetSystemVirtualDramAddrHigh,	     PPSMC_MSG_SetSystemVirtualDramAddrHigh,	0),
97 	MSG_MAP(SetSystemVirtualDramAddrLow,	     PPSMC_MSG_SetSystemVirtualDramAddrLow,	0),
98 	MSG_MAP(SetSoftMinByFreq,		     PPSMC_MSG_SetSoftMinByFreq,		0),
99 	MSG_MAP(SetSoftMaxByFreq,		     PPSMC_MSG_SetSoftMaxByFreq,		0),
100 	MSG_MAP(SetHardMinByFreq,		     PPSMC_MSG_SetHardMinByFreq,		0),
101 	MSG_MAP(SetHardMaxByFreq,		     PPSMC_MSG_SetHardMaxByFreq,		0),
102 	MSG_MAP(GetMinDpmFreq,			     PPSMC_MSG_GetMinDpmFreq,			0),
103 	MSG_MAP(GetMaxDpmFreq,			     PPSMC_MSG_GetMaxDpmFreq,			0),
104 	MSG_MAP(GetDpmFreqByIndex,		     PPSMC_MSG_GetDpmFreqByIndex,		1),
105 	MSG_MAP(SetWorkloadMask,		     PPSMC_MSG_SetWorkloadMask,			1),
106 	MSG_MAP(GetVoltageByDpm,		     PPSMC_MSG_GetVoltageByDpm,			0),
107 	MSG_MAP(GetVoltageByDpmOverdrive,	     PPSMC_MSG_GetVoltageByDpmOverdrive,	0),
108 	MSG_MAP(SetPptLimit,			     PPSMC_MSG_SetPptLimit,			0),
109 	MSG_MAP(GetPptLimit,			     PPSMC_MSG_GetPptLimit,			1),
110 	MSG_MAP(PrepareMp1ForUnload,		     PPSMC_MSG_PrepareMp1ForUnload,		0),
111 	MSG_MAP(GfxDeviceDriverReset,		     PPSMC_MSG_GfxDriverReset,			0),
112 	MSG_MAP(RunDcBtc,			     PPSMC_MSG_RunDcBtc,			0),
113 	MSG_MAP(DramLogSetDramAddrHigh,		     PPSMC_MSG_DramLogSetDramAddrHigh,		0),
114 	MSG_MAP(DramLogSetDramAddrLow,		     PPSMC_MSG_DramLogSetDramAddrLow,		0),
115 	MSG_MAP(DramLogSetDramSize,		     PPSMC_MSG_DramLogSetDramSize,		0),
116 	MSG_MAP(GetDebugData,			     PPSMC_MSG_GetDebugData,			0),
117 	MSG_MAP(WaflTest,			     PPSMC_MSG_WaflTest,			0),
118 	MSG_MAP(SetMemoryChannelEnable,		     PPSMC_MSG_SetMemoryChannelEnable,		0),
119 	MSG_MAP(SetNumBadHbmPagesRetired,	     PPSMC_MSG_SetNumBadHbmPagesRetired,	0),
120 	MSG_MAP(DFCstateControl,		     PPSMC_MSG_DFCstateControl,			0),
121 	MSG_MAP(GetGmiPwrDnHyst,		     PPSMC_MSG_GetGmiPwrDnHyst,			0),
122 	MSG_MAP(SetGmiPwrDnHyst,		     PPSMC_MSG_SetGmiPwrDnHyst,			0),
123 	MSG_MAP(GmiPwrDnControl,		     PPSMC_MSG_GmiPwrDnControl,			0),
124 	MSG_MAP(EnterGfxoff,			     PPSMC_MSG_EnterGfxoff,			0),
125 	MSG_MAP(ExitGfxoff,			     PPSMC_MSG_ExitGfxoff,			0),
126 	MSG_MAP(SetExecuteDMATest,		     PPSMC_MSG_SetExecuteDMATest,		0),
127 	MSG_MAP(EnableDeterminism,		     PPSMC_MSG_EnableDeterminism,		0),
128 	MSG_MAP(DisableDeterminism,		     PPSMC_MSG_DisableDeterminism,		0),
129 	MSG_MAP(SetUclkDpmMode,			     PPSMC_MSG_SetUclkDpmMode,			0),
130 	MSG_MAP(GfxDriverResetRecovery,		     PPSMC_MSG_GfxDriverResetRecovery,		0),
131 };
132 
133 static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
134 	CLK_MAP(GFXCLK, PPCLK_GFXCLK),
135 	CLK_MAP(SCLK,	PPCLK_GFXCLK),
136 	CLK_MAP(SOCCLK, PPCLK_SOCCLK),
137 	CLK_MAP(FCLK, PPCLK_FCLK),
138 	CLK_MAP(UCLK, PPCLK_UCLK),
139 	CLK_MAP(MCLK, PPCLK_UCLK),
140 	CLK_MAP(DCLK, PPCLK_DCLK),
141 	CLK_MAP(VCLK, PPCLK_VCLK),
142 	CLK_MAP(LCLK, 	PPCLK_LCLK),
143 };
144 
145 static const struct cmn2asic_mapping aldebaran_feature_mask_map[SMU_FEATURE_COUNT] = {
146 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_PREFETCHER_BIT, 		FEATURE_DATA_CALCULATIONS),
147 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, 			FEATURE_DPM_GFXCLK_BIT),
148 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, 			FEATURE_DPM_UCLK_BIT),
149 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, 			FEATURE_DPM_SOCCLK_BIT),
150 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, 			FEATURE_DPM_FCLK_BIT),
151 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, 			FEATURE_DPM_LCLK_BIT),
152 	ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_BIT, 				FEATURE_DPM_XGMI_BIT),
153 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, 			FEATURE_DS_GFXCLK_BIT),
154 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, 			FEATURE_DS_SOCCLK_BIT),
155 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, 				FEATURE_DS_LCLK_BIT),
156 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, 				FEATURE_DS_FCLK_BIT),
157 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_UCLK_BIT,				FEATURE_DS_UCLK_BIT),
158 	ALDEBARAN_FEA_MAP(SMU_FEATURE_GFX_SS_BIT, 				FEATURE_GFX_SS_BIT),
159 	ALDEBARAN_FEA_MAP(SMU_FEATURE_VCN_PG_BIT, 				FEATURE_DPM_VCN_BIT),
160 	ALDEBARAN_FEA_MAP(SMU_FEATURE_RSMU_SMN_CG_BIT, 			FEATURE_RSMU_SMN_CG_BIT),
161 	ALDEBARAN_FEA_MAP(SMU_FEATURE_WAFL_CG_BIT, 				FEATURE_WAFL_CG_BIT),
162 	ALDEBARAN_FEA_MAP(SMU_FEATURE_PPT_BIT, 					FEATURE_PPT_BIT),
163 	ALDEBARAN_FEA_MAP(SMU_FEATURE_TDC_BIT, 					FEATURE_TDC_BIT),
164 	ALDEBARAN_FEA_MAP(SMU_FEATURE_APCC_PLUS_BIT, 			FEATURE_APCC_PLUS_BIT),
165 	ALDEBARAN_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, 			FEATURE_APCC_DFLL_BIT),
166 	ALDEBARAN_FEA_MAP(SMU_FEATURE_FUSE_CG_BIT, 				FEATURE_FUSE_CG_BIT),
167 	ALDEBARAN_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, 				FEATURE_MP1_CG_BIT),
168 	ALDEBARAN_FEA_MAP(SMU_FEATURE_SMUIO_CG_BIT, 			FEATURE_SMUIO_CG_BIT),
169 	ALDEBARAN_FEA_MAP(SMU_FEATURE_THM_CG_BIT, 				FEATURE_THM_CG_BIT),
170 	ALDEBARAN_FEA_MAP(SMU_FEATURE_CLK_CG_BIT, 				FEATURE_CLK_CG_BIT),
171 	ALDEBARAN_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, 				FEATURE_FW_CTF_BIT),
172 	ALDEBARAN_FEA_MAP(SMU_FEATURE_THERMAL_BIT, 				FEATURE_THERMAL_BIT),
173 	ALDEBARAN_FEA_MAP(SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT, 	FEATURE_OUT_OF_BAND_MONITOR_BIT),
174 	ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT,FEATURE_XGMI_PER_LINK_PWR_DWN),
175 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, 			FEATURE_DF_CSTATE),
176 };
177 
178 static const struct cmn2asic_mapping aldebaran_table_map[SMU_TABLE_COUNT] = {
179 	TAB_MAP(PPTABLE),
180 	TAB_MAP(AVFS_PSM_DEBUG),
181 	TAB_MAP(AVFS_FUSE_OVERRIDE),
182 	TAB_MAP(PMSTATUSLOG),
183 	TAB_MAP(SMU_METRICS),
184 	TAB_MAP(DRIVER_SMU_CONFIG),
185 	TAB_MAP(I2C_COMMANDS),
186 };
187 
188 static int aldebaran_tables_init(struct smu_context *smu)
189 {
190 	struct smu_table_context *smu_table = &smu->smu_table;
191 	struct smu_table *tables = smu_table->tables;
192 
193 	SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
194 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
195 
196 	SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
197 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
198 
199 	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
200 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
201 
202 	SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
203 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
204 
205 	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
206 	if (!smu_table->metrics_table)
207 		return -ENOMEM;
208 	smu_table->metrics_time = 0;
209 
210 	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_2);
211 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
212 	if (!smu_table->gpu_metrics_table) {
213 		kfree(smu_table->metrics_table);
214 		return -ENOMEM;
215 	}
216 
217 	return 0;
218 }
219 
220 static int aldebaran_allocate_dpm_context(struct smu_context *smu)
221 {
222 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
223 
224 	smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
225 				       GFP_KERNEL);
226 	if (!smu_dpm->dpm_context)
227 		return -ENOMEM;
228 	smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
229 
230 	smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state),
231 						   GFP_KERNEL);
232 	if (!smu_dpm->dpm_current_power_state)
233 		return -ENOMEM;
234 
235 	smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state),
236 						   GFP_KERNEL);
237 	if (!smu_dpm->dpm_request_power_state)
238 		return -ENOMEM;
239 
240 	return 0;
241 }
242 
243 static int aldebaran_init_smc_tables(struct smu_context *smu)
244 {
245 	int ret = 0;
246 
247 	ret = aldebaran_tables_init(smu);
248 	if (ret)
249 		return ret;
250 
251 	ret = aldebaran_allocate_dpm_context(smu);
252 	if (ret)
253 		return ret;
254 
255 	return smu_v13_0_init_smc_tables(smu);
256 }
257 
258 static int aldebaran_get_allowed_feature_mask(struct smu_context *smu,
259 					      uint32_t *feature_mask, uint32_t num)
260 {
261 	if (num > 2)
262 		return -EINVAL;
263 
264 	/* pptable will handle the features to enable */
265 	memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
266 
267 	return 0;
268 }
269 
270 static int aldebaran_set_default_dpm_table(struct smu_context *smu)
271 {
272 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
273 	struct smu_13_0_dpm_table *dpm_table = NULL;
274 	PPTable_t *pptable = smu->smu_table.driver_pptable;
275 	int ret = 0;
276 
277 	/* socclk dpm table setup */
278 	dpm_table = &dpm_context->dpm_tables.soc_table;
279 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
280 		ret = smu_v13_0_set_single_dpm_table(smu,
281 						     SMU_SOCCLK,
282 						     dpm_table);
283 		if (ret)
284 			return ret;
285 	} else {
286 		dpm_table->count = 1;
287 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
288 		dpm_table->dpm_levels[0].enabled = true;
289 		dpm_table->min = dpm_table->dpm_levels[0].value;
290 		dpm_table->max = dpm_table->dpm_levels[0].value;
291 	}
292 
293 	/* gfxclk dpm table setup */
294 	dpm_table = &dpm_context->dpm_tables.gfx_table;
295 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
296 		/* in the case of gfxclk, only fine-grained dpm is honored */
297 		dpm_table->count = 2;
298 		dpm_table->dpm_levels[0].value = pptable->GfxclkFmin;
299 		dpm_table->dpm_levels[0].enabled = true;
300 		dpm_table->dpm_levels[1].value = pptable->GfxclkFmax;
301 		dpm_table->dpm_levels[1].enabled = true;
302 		dpm_table->min = dpm_table->dpm_levels[0].value;
303 		dpm_table->max = dpm_table->dpm_levels[1].value;
304 	} else {
305 		dpm_table->count = 1;
306 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
307 		dpm_table->dpm_levels[0].enabled = true;
308 		dpm_table->min = dpm_table->dpm_levels[0].value;
309 		dpm_table->max = dpm_table->dpm_levels[0].value;
310 	}
311 
312 	/* memclk dpm table setup */
313 	dpm_table = &dpm_context->dpm_tables.uclk_table;
314 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
315 		ret = smu_v13_0_set_single_dpm_table(smu,
316 						     SMU_UCLK,
317 						     dpm_table);
318 		if (ret)
319 			return ret;
320 	} else {
321 		dpm_table->count = 1;
322 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
323 		dpm_table->dpm_levels[0].enabled = true;
324 		dpm_table->min = dpm_table->dpm_levels[0].value;
325 		dpm_table->max = dpm_table->dpm_levels[0].value;
326 	}
327 
328 	/* fclk dpm table setup */
329 	dpm_table = &dpm_context->dpm_tables.fclk_table;
330 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
331 		ret = smu_v13_0_set_single_dpm_table(smu,
332 						     SMU_FCLK,
333 						     dpm_table);
334 		if (ret)
335 			return ret;
336 	} else {
337 		dpm_table->count = 1;
338 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
339 		dpm_table->dpm_levels[0].enabled = true;
340 		dpm_table->min = dpm_table->dpm_levels[0].value;
341 		dpm_table->max = dpm_table->dpm_levels[0].value;
342 	}
343 
344 	return 0;
345 }
346 
347 static int aldebaran_check_powerplay_table(struct smu_context *smu)
348 {
349 	struct smu_table_context *table_context = &smu->smu_table;
350 	struct smu_13_0_powerplay_table *powerplay_table =
351 		table_context->power_play_table;
352 	struct smu_baco_context *smu_baco = &smu->smu_baco;
353 
354 	mutex_lock(&smu_baco->mutex);
355 	if (powerplay_table->platform_caps & SMU_13_0_PP_PLATFORM_CAP_BACO ||
356 	    powerplay_table->platform_caps & SMU_13_0_PP_PLATFORM_CAP_MACO)
357 		smu_baco->platform_support = true;
358 	mutex_unlock(&smu_baco->mutex);
359 
360 	table_context->thermal_controller_type =
361 		powerplay_table->thermal_controller_type;
362 
363 	return 0;
364 }
365 
366 static int aldebaran_store_powerplay_table(struct smu_context *smu)
367 {
368 	struct smu_table_context *table_context = &smu->smu_table;
369 	struct smu_13_0_powerplay_table *powerplay_table =
370 		table_context->power_play_table;
371 	memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
372 	       sizeof(PPTable_t));
373 
374 	return 0;
375 }
376 
377 static int aldebaran_append_powerplay_table(struct smu_context *smu)
378 {
379 	struct smu_table_context *table_context = &smu->smu_table;
380 	PPTable_t *smc_pptable = table_context->driver_pptable;
381 	struct atom_smc_dpm_info_v4_10 *smc_dpm_table;
382 	int index, ret;
383 
384 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
385 					   smc_dpm_info);
386 
387 	ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
388 				      (uint8_t **)&smc_dpm_table);
389 	if (ret)
390 		return ret;
391 
392 	dev_info(smu->adev->dev, "smc_dpm_info table revision(format.content): %d.%d\n",
393 			smc_dpm_table->table_header.format_revision,
394 			smc_dpm_table->table_header.content_revision);
395 
396 	if ((smc_dpm_table->table_header.format_revision == 4) &&
397 	    (smc_dpm_table->table_header.content_revision == 10))
398 		memcpy(&smc_pptable->GfxMaxCurrent,
399 		       &smc_dpm_table->GfxMaxCurrent,
400 		       sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_10, GfxMaxCurrent));
401 	return 0;
402 }
403 
404 static int aldebaran_setup_pptable(struct smu_context *smu)
405 {
406 	int ret = 0;
407 
408 	/* VBIOS pptable is the first choice */
409 	smu->smu_table.boot_values.pp_table_id = 0;
410 
411 	ret = smu_v13_0_setup_pptable(smu);
412 	if (ret)
413 		return ret;
414 
415 	ret = aldebaran_store_powerplay_table(smu);
416 	if (ret)
417 		return ret;
418 
419 	ret = aldebaran_append_powerplay_table(smu);
420 	if (ret)
421 		return ret;
422 
423 	ret = aldebaran_check_powerplay_table(smu);
424 	if (ret)
425 		return ret;
426 
427 	return ret;
428 }
429 
430 static int aldebaran_run_btc(struct smu_context *smu)
431 {
432 	int ret;
433 
434 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
435 	if (ret)
436 		dev_err(smu->adev->dev, "RunDcBtc failed!\n");
437 
438 	return ret;
439 }
440 
441 static int aldebaran_populate_umd_state_clk(struct smu_context *smu)
442 {
443 	struct smu_13_0_dpm_context *dpm_context =
444 		smu->smu_dpm.dpm_context;
445 	struct smu_13_0_dpm_table *gfx_table =
446 		&dpm_context->dpm_tables.gfx_table;
447 	struct smu_13_0_dpm_table *mem_table =
448 		&dpm_context->dpm_tables.uclk_table;
449 	struct smu_13_0_dpm_table *soc_table =
450 		&dpm_context->dpm_tables.soc_table;
451 	struct smu_umd_pstate_table *pstate_table =
452 		&smu->pstate_table;
453 
454 	pstate_table->gfxclk_pstate.min = gfx_table->min;
455 	pstate_table->gfxclk_pstate.peak = gfx_table->max;
456 	pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
457 	pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
458 
459 	pstate_table->uclk_pstate.min = mem_table->min;
460 	pstate_table->uclk_pstate.peak = mem_table->max;
461 	pstate_table->uclk_pstate.curr.min = mem_table->min;
462 	pstate_table->uclk_pstate.curr.max = mem_table->max;
463 
464 	pstate_table->socclk_pstate.min = soc_table->min;
465 	pstate_table->socclk_pstate.peak = soc_table->max;
466 	pstate_table->socclk_pstate.curr.min = soc_table->min;
467 	pstate_table->socclk_pstate.curr.max = soc_table->max;
468 
469 	if (gfx_table->count > ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL &&
470 	    mem_table->count > ALDEBARAN_UMD_PSTATE_MCLK_LEVEL &&
471 	    soc_table->count > ALDEBARAN_UMD_PSTATE_SOCCLK_LEVEL) {
472 		pstate_table->gfxclk_pstate.standard =
473 			gfx_table->dpm_levels[ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL].value;
474 		pstate_table->uclk_pstate.standard =
475 			mem_table->dpm_levels[ALDEBARAN_UMD_PSTATE_MCLK_LEVEL].value;
476 		pstate_table->socclk_pstate.standard =
477 			soc_table->dpm_levels[ALDEBARAN_UMD_PSTATE_SOCCLK_LEVEL].value;
478 	} else {
479 		pstate_table->gfxclk_pstate.standard =
480 			pstate_table->gfxclk_pstate.min;
481 		pstate_table->uclk_pstate.standard =
482 			pstate_table->uclk_pstate.min;
483 		pstate_table->socclk_pstate.standard =
484 			pstate_table->socclk_pstate.min;
485 	}
486 
487 	return 0;
488 }
489 
490 static int aldebaran_get_clk_table(struct smu_context *smu,
491 				   struct pp_clock_levels_with_latency *clocks,
492 				   struct smu_13_0_dpm_table *dpm_table)
493 {
494 	int i, count;
495 
496 	count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
497 	clocks->num_levels = count;
498 
499 	for (i = 0; i < count; i++) {
500 		clocks->data[i].clocks_in_khz =
501 			dpm_table->dpm_levels[i].value * 1000;
502 		clocks->data[i].latency_in_us = 0;
503 	}
504 
505 	return 0;
506 }
507 
508 static int aldebaran_freqs_in_same_level(int32_t frequency1,
509 					 int32_t frequency2)
510 {
511 	return (abs(frequency1 - frequency2) <= EPSILON);
512 }
513 
514 static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
515 					  MetricsMember_t member,
516 					  uint32_t *value)
517 {
518 	struct smu_table_context *smu_table= &smu->smu_table;
519 	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
520 	int ret = 0;
521 
522 	mutex_lock(&smu->metrics_lock);
523 
524 	ret = smu_cmn_get_metrics_table_locked(smu,
525 					       NULL,
526 					       false);
527 	if (ret) {
528 		mutex_unlock(&smu->metrics_lock);
529 		return ret;
530 	}
531 
532 	switch (member) {
533 	case METRICS_CURR_GFXCLK:
534 		*value = metrics->CurrClock[PPCLK_GFXCLK];
535 		break;
536 	case METRICS_CURR_SOCCLK:
537 		*value = metrics->CurrClock[PPCLK_SOCCLK];
538 		break;
539 	case METRICS_CURR_UCLK:
540 		*value = metrics->CurrClock[PPCLK_UCLK];
541 		break;
542 	case METRICS_CURR_VCLK:
543 		*value = metrics->CurrClock[PPCLK_VCLK];
544 		break;
545 	case METRICS_CURR_DCLK:
546 		*value = metrics->CurrClock[PPCLK_DCLK];
547 		break;
548 	case METRICS_CURR_FCLK:
549 		*value = metrics->CurrClock[PPCLK_FCLK];
550 		break;
551 	case METRICS_AVERAGE_GFXCLK:
552 		*value = metrics->AverageGfxclkFrequency;
553 		break;
554 	case METRICS_AVERAGE_SOCCLK:
555 		*value = metrics->AverageSocclkFrequency;
556 		break;
557 	case METRICS_AVERAGE_UCLK:
558 		*value = metrics->AverageUclkFrequency;
559 		break;
560 	case METRICS_AVERAGE_GFXACTIVITY:
561 		*value = metrics->AverageGfxActivity;
562 		break;
563 	case METRICS_AVERAGE_MEMACTIVITY:
564 		*value = metrics->AverageUclkActivity;
565 		break;
566 	case METRICS_AVERAGE_SOCKETPOWER:
567 		*value = metrics->AverageSocketPower << 8;
568 		break;
569 	case METRICS_TEMPERATURE_EDGE:
570 		*value = metrics->TemperatureEdge *
571 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
572 		break;
573 	case METRICS_TEMPERATURE_HOTSPOT:
574 		*value = metrics->TemperatureHotspot *
575 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
576 		break;
577 	case METRICS_TEMPERATURE_MEM:
578 		*value = metrics->TemperatureHBM *
579 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
580 		break;
581 	case METRICS_TEMPERATURE_VRGFX:
582 		*value = metrics->TemperatureVrGfx *
583 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
584 		break;
585 	case METRICS_TEMPERATURE_VRSOC:
586 		*value = metrics->TemperatureVrSoc *
587 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
588 		break;
589 	case METRICS_TEMPERATURE_VRMEM:
590 		*value = metrics->TemperatureVrMem *
591 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
592 		break;
593 	case METRICS_THROTTLER_STATUS:
594 		*value = metrics->ThrottlerStatus;
595 		break;
596 	default:
597 		*value = UINT_MAX;
598 		break;
599 	}
600 
601 	mutex_unlock(&smu->metrics_lock);
602 
603 	return ret;
604 }
605 
606 static int aldebaran_get_current_clk_freq_by_table(struct smu_context *smu,
607 						   enum smu_clk_type clk_type,
608 						   uint32_t *value)
609 {
610 	MetricsMember_t member_type;
611 	int clk_id = 0;
612 
613 	if (!value)
614 		return -EINVAL;
615 
616 	clk_id = smu_cmn_to_asic_specific_index(smu,
617 						CMN2ASIC_MAPPING_CLK,
618 						clk_type);
619 	if (clk_id < 0)
620 		return -EINVAL;
621 
622 	switch (clk_id) {
623 	case PPCLK_GFXCLK:
624 		/*
625 		 * CurrClock[clk_id] can provide accurate
626 		 *   output only when the dpm feature is enabled.
627 		 * We can use Average_* for dpm disabled case.
628 		 *   But this is available for gfxclk/uclk/socclk/vclk/dclk.
629 		 */
630 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
631 			member_type = METRICS_CURR_GFXCLK;
632 		else
633 			member_type = METRICS_AVERAGE_GFXCLK;
634 		break;
635 	case PPCLK_UCLK:
636 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
637 			member_type = METRICS_CURR_UCLK;
638 		else
639 			member_type = METRICS_AVERAGE_UCLK;
640 		break;
641 	case PPCLK_SOCCLK:
642 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
643 			member_type = METRICS_CURR_SOCCLK;
644 		else
645 			member_type = METRICS_AVERAGE_SOCCLK;
646 		break;
647 	case PPCLK_VCLK:
648 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
649 			member_type = METRICS_CURR_VCLK;
650 		else
651 			member_type = METRICS_AVERAGE_VCLK;
652 		break;
653 	case PPCLK_DCLK:
654 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
655 			member_type = METRICS_CURR_DCLK;
656 		else
657 			member_type = METRICS_AVERAGE_DCLK;
658 		break;
659 	case PPCLK_FCLK:
660 		member_type = METRICS_CURR_FCLK;
661 		break;
662 	default:
663 		return -EINVAL;
664 	}
665 
666 	return aldebaran_get_smu_metrics_data(smu,
667 					      member_type,
668 					      value);
669 }
670 
671 static int aldebaran_print_clk_levels(struct smu_context *smu,
672 				      enum smu_clk_type type, char *buf)
673 {
674 	int i, now, size = 0;
675 	int ret = 0;
676 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
677 	struct pp_clock_levels_with_latency clocks;
678 	struct smu_13_0_dpm_table *single_dpm_table;
679 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
680 	struct smu_13_0_dpm_context *dpm_context = NULL;
681 	uint32_t display_levels;
682 	uint32_t freq_values[3] = {0};
683 	uint32_t min_clk, max_clk;
684 
685 	if (amdgpu_ras_intr_triggered())
686 		return snprintf(buf, PAGE_SIZE, "unavailable\n");
687 
688 	dpm_context = smu_dpm->dpm_context;
689 
690 	switch (type) {
691 
692 	case SMU_OD_SCLK:
693 		size = sprintf(buf, "%s:\n", "GFXCLK");
694 		fallthrough;
695 	case SMU_SCLK:
696 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
697 		if (ret) {
698 			dev_err(smu->adev->dev, "Attempt to get current gfx clk Failed!");
699 			return ret;
700 		}
701 
702 		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
703 		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
704 		if (ret) {
705 			dev_err(smu->adev->dev, "Attempt to get gfx clk levels Failed!");
706 			return ret;
707 		}
708 
709 		display_levels = clocks.num_levels;
710 
711 		min_clk = pstate_table->gfxclk_pstate.curr.min;
712 		max_clk = pstate_table->gfxclk_pstate.curr.max;
713 
714 		freq_values[0] = min_clk;
715 		freq_values[1] = max_clk;
716 
717 		/* fine-grained dpm has only 2 levels */
718 		if (now > min_clk && now < max_clk) {
719 			display_levels = clocks.num_levels + 1;
720 			freq_values[2] = max_clk;
721 			freq_values[1] = now;
722 		}
723 
724 		/*
725 		 * For DPM disabled case, there will be only one clock level.
726 		 * And it's safe to assume that is always the current clock.
727 		 */
728 		if (display_levels == clocks.num_levels) {
729 			for (i = 0; i < clocks.num_levels; i++)
730 				size += sprintf(
731 					buf + size, "%d: %uMhz %s\n", i,
732 					freq_values[i],
733 					(clocks.num_levels == 1) ?
734 						"*" :
735 						(aldebaran_freqs_in_same_level(
736 							 freq_values[i], now) ?
737 							 "*" :
738 							 ""));
739 		} else {
740 			for (i = 0; i < display_levels; i++)
741 				size += sprintf(buf + size, "%d: %uMhz %s\n", i,
742 						freq_values[i], i == 1 ? "*" : "");
743 		}
744 
745 		break;
746 
747 	case SMU_OD_MCLK:
748 		size = sprintf(buf, "%s:\n", "MCLK");
749 		fallthrough;
750 	case SMU_MCLK:
751 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
752 		if (ret) {
753 			dev_err(smu->adev->dev, "Attempt to get current mclk Failed!");
754 			return ret;
755 		}
756 
757 		single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
758 		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
759 		if (ret) {
760 			dev_err(smu->adev->dev, "Attempt to get memory clk levels Failed!");
761 			return ret;
762 		}
763 
764 		for (i = 0; i < clocks.num_levels; i++)
765 			size += sprintf(buf + size, "%d: %uMhz %s\n",
766 					i, clocks.data[i].clocks_in_khz / 1000,
767 					(clocks.num_levels == 1) ? "*" :
768 					(aldebaran_freqs_in_same_level(
769 								       clocks.data[i].clocks_in_khz / 1000,
770 								       now) ? "*" : ""));
771 		break;
772 
773 	case SMU_SOCCLK:
774 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &now);
775 		if (ret) {
776 			dev_err(smu->adev->dev, "Attempt to get current socclk Failed!");
777 			return ret;
778 		}
779 
780 		single_dpm_table = &(dpm_context->dpm_tables.soc_table);
781 		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
782 		if (ret) {
783 			dev_err(smu->adev->dev, "Attempt to get socclk levels Failed!");
784 			return ret;
785 		}
786 
787 		for (i = 0; i < clocks.num_levels; i++)
788 			size += sprintf(buf + size, "%d: %uMhz %s\n",
789 					i, clocks.data[i].clocks_in_khz / 1000,
790 					(clocks.num_levels == 1) ? "*" :
791 					(aldebaran_freqs_in_same_level(
792 								       clocks.data[i].clocks_in_khz / 1000,
793 								       now) ? "*" : ""));
794 		break;
795 
796 	case SMU_FCLK:
797 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_FCLK, &now);
798 		if (ret) {
799 			dev_err(smu->adev->dev, "Attempt to get current fclk Failed!");
800 			return ret;
801 		}
802 
803 		single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
804 		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
805 		if (ret) {
806 			dev_err(smu->adev->dev, "Attempt to get fclk levels Failed!");
807 			return ret;
808 		}
809 
810 		for (i = 0; i < single_dpm_table->count; i++)
811 			size += sprintf(buf + size, "%d: %uMhz %s\n",
812 					i, single_dpm_table->dpm_levels[i].value,
813 					(clocks.num_levels == 1) ? "*" :
814 					(aldebaran_freqs_in_same_level(
815 								       clocks.data[i].clocks_in_khz / 1000,
816 								       now) ? "*" : ""));
817 		break;
818 
819 	default:
820 		break;
821 	}
822 
823 	return size;
824 }
825 
826 static int aldebaran_upload_dpm_level(struct smu_context *smu,
827 				      bool max,
828 				      uint32_t feature_mask,
829 				      uint32_t level)
830 {
831 	struct smu_13_0_dpm_context *dpm_context =
832 		smu->smu_dpm.dpm_context;
833 	uint32_t freq;
834 	int ret = 0;
835 
836 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
837 	    (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT))) {
838 		freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
839 		ret = smu_cmn_send_smc_msg_with_param(smu,
840 						      (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
841 						      (PPCLK_GFXCLK << 16) | (freq & 0xffff),
842 						      NULL);
843 		if (ret) {
844 			dev_err(smu->adev->dev, "Failed to set soft %s gfxclk !\n",
845 				max ? "max" : "min");
846 			return ret;
847 		}
848 	}
849 
850 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
851 	    (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK_BIT))) {
852 		freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level].value;
853 		ret = smu_cmn_send_smc_msg_with_param(smu,
854 						      (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
855 						      (PPCLK_UCLK << 16) | (freq & 0xffff),
856 						      NULL);
857 		if (ret) {
858 			dev_err(smu->adev->dev, "Failed to set soft %s memclk !\n",
859 				max ? "max" : "min");
860 			return ret;
861 		}
862 	}
863 
864 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
865 	    (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT))) {
866 		freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
867 		ret = smu_cmn_send_smc_msg_with_param(smu,
868 						      (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
869 						      (PPCLK_SOCCLK << 16) | (freq & 0xffff),
870 						      NULL);
871 		if (ret) {
872 			dev_err(smu->adev->dev, "Failed to set soft %s socclk !\n",
873 				max ? "max" : "min");
874 			return ret;
875 		}
876 	}
877 
878 	return ret;
879 }
880 
881 static int aldebaran_force_clk_levels(struct smu_context *smu,
882 				      enum smu_clk_type type, uint32_t mask)
883 {
884 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
885 	struct smu_13_0_dpm_table *single_dpm_table = NULL;
886 	uint32_t soft_min_level, soft_max_level;
887 	int ret = 0;
888 
889 	soft_min_level = mask ? (ffs(mask) - 1) : 0;
890 	soft_max_level = mask ? (fls(mask) - 1) : 0;
891 
892 	switch (type) {
893 	case SMU_SCLK:
894 		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
895 		if (soft_max_level >= single_dpm_table->count) {
896 			dev_err(smu->adev->dev, "Clock level specified %d is over max allowed %d\n",
897 				soft_max_level, single_dpm_table->count - 1);
898 			ret = -EINVAL;
899 			break;
900 		}
901 
902 		ret = aldebaran_upload_dpm_level(smu,
903 						 false,
904 						 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT),
905 						 soft_min_level);
906 		if (ret) {
907 			dev_err(smu->adev->dev, "Failed to upload boot level to lowest!\n");
908 			break;
909 		}
910 
911 		ret = aldebaran_upload_dpm_level(smu,
912 						 true,
913 						 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT),
914 						 soft_max_level);
915 		if (ret)
916 			dev_err(smu->adev->dev, "Failed to upload dpm max level to highest!\n");
917 
918 		break;
919 
920 	case SMU_MCLK:
921 	case SMU_SOCCLK:
922 	case SMU_FCLK:
923 		/*
924 		 * Should not arrive here since aldebaran does not
925 		 * support mclk/socclk/fclk softmin/softmax settings
926 		 */
927 		ret = -EINVAL;
928 		break;
929 
930 	default:
931 		break;
932 	}
933 
934 	return ret;
935 }
936 
937 static int aldebaran_get_thermal_temperature_range(struct smu_context *smu,
938 						   struct smu_temperature_range *range)
939 {
940 	struct smu_table_context *table_context = &smu->smu_table;
941 	struct smu_13_0_powerplay_table *powerplay_table =
942 		table_context->power_play_table;
943 	PPTable_t *pptable = smu->smu_table.driver_pptable;
944 
945 	if (!range)
946 		return -EINVAL;
947 
948 	memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range));
949 
950 	range->hotspot_crit_max = pptable->ThotspotLimit *
951 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
952 	range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
953 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
954 	range->mem_crit_max = pptable->TmemLimit *
955 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
956 	range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM)*
957 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
958 	range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
959 
960 	return 0;
961 }
962 
963 static int aldebaran_get_current_activity_percent(struct smu_context *smu,
964 						  enum amd_pp_sensors sensor,
965 						  uint32_t *value)
966 {
967 	int ret = 0;
968 
969 	if (!value)
970 		return -EINVAL;
971 
972 	switch (sensor) {
973 	case AMDGPU_PP_SENSOR_GPU_LOAD:
974 		ret = aldebaran_get_smu_metrics_data(smu,
975 						     METRICS_AVERAGE_GFXACTIVITY,
976 						     value);
977 		break;
978 	case AMDGPU_PP_SENSOR_MEM_LOAD:
979 		ret = aldebaran_get_smu_metrics_data(smu,
980 						     METRICS_AVERAGE_MEMACTIVITY,
981 						     value);
982 		break;
983 	default:
984 		dev_err(smu->adev->dev, "Invalid sensor for retrieving clock activity\n");
985 		return -EINVAL;
986 	}
987 
988 	return ret;
989 }
990 
991 static int aldebaran_get_gpu_power(struct smu_context *smu, uint32_t *value)
992 {
993 	if (!value)
994 		return -EINVAL;
995 
996 	return aldebaran_get_smu_metrics_data(smu,
997 					      METRICS_AVERAGE_SOCKETPOWER,
998 					      value);
999 }
1000 
1001 static int aldebaran_thermal_get_temperature(struct smu_context *smu,
1002 					     enum amd_pp_sensors sensor,
1003 					     uint32_t *value)
1004 {
1005 	int ret = 0;
1006 
1007 	if (!value)
1008 		return -EINVAL;
1009 
1010 	switch (sensor) {
1011 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1012 		ret = aldebaran_get_smu_metrics_data(smu,
1013 						     METRICS_TEMPERATURE_HOTSPOT,
1014 						     value);
1015 		break;
1016 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
1017 		ret = aldebaran_get_smu_metrics_data(smu,
1018 						     METRICS_TEMPERATURE_EDGE,
1019 						     value);
1020 		break;
1021 	case AMDGPU_PP_SENSOR_MEM_TEMP:
1022 		ret = aldebaran_get_smu_metrics_data(smu,
1023 						     METRICS_TEMPERATURE_MEM,
1024 						     value);
1025 		break;
1026 	default:
1027 		dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
1028 		return -EINVAL;
1029 	}
1030 
1031 	return ret;
1032 }
1033 
1034 static int aldebaran_read_sensor(struct smu_context *smu,
1035 				 enum amd_pp_sensors sensor,
1036 				 void *data, uint32_t *size)
1037 {
1038 	int ret = 0;
1039 
1040 	if (amdgpu_ras_intr_triggered())
1041 		return 0;
1042 
1043 	if (!data || !size)
1044 		return -EINVAL;
1045 
1046 	mutex_lock(&smu->sensor_lock);
1047 	switch (sensor) {
1048 	case AMDGPU_PP_SENSOR_MEM_LOAD:
1049 	case AMDGPU_PP_SENSOR_GPU_LOAD:
1050 		ret = aldebaran_get_current_activity_percent(smu,
1051 							     sensor,
1052 							     (uint32_t *)data);
1053 		*size = 4;
1054 		break;
1055 	case AMDGPU_PP_SENSOR_GPU_POWER:
1056 		ret = aldebaran_get_gpu_power(smu, (uint32_t *)data);
1057 		*size = 4;
1058 		break;
1059 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1060 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
1061 	case AMDGPU_PP_SENSOR_MEM_TEMP:
1062 		ret = aldebaran_thermal_get_temperature(smu, sensor,
1063 							(uint32_t *)data);
1064 		*size = 4;
1065 		break;
1066 	case AMDGPU_PP_SENSOR_GFX_MCLK:
1067 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
1068 		/* the output clock frequency in 10K unit */
1069 		*(uint32_t *)data *= 100;
1070 		*size = 4;
1071 		break;
1072 	case AMDGPU_PP_SENSOR_GFX_SCLK:
1073 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
1074 		*(uint32_t *)data *= 100;
1075 		*size = 4;
1076 		break;
1077 	case AMDGPU_PP_SENSOR_VDDGFX:
1078 		ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
1079 		*size = 4;
1080 		break;
1081 	default:
1082 		ret = -EOPNOTSUPP;
1083 		break;
1084 	}
1085 	mutex_unlock(&smu->sensor_lock);
1086 
1087 	return ret;
1088 }
1089 
1090 static int aldebaran_get_power_limit(struct smu_context *smu)
1091 {
1092 	PPTable_t *pptable = smu->smu_table.driver_pptable;
1093 	uint32_t power_limit = 0;
1094 	int ret;
1095 
1096 	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
1097 		return -EINVAL;
1098 
1099 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
1100 
1101 	if (ret) {
1102 		/* the last hope to figure out the ppt limit */
1103 		if (!pptable) {
1104 			dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!");
1105 			return -EINVAL;
1106 		}
1107 		power_limit = pptable->PptLimit;
1108 	}
1109 
1110 	smu->current_power_limit = smu->default_power_limit = power_limit;
1111 	if (pptable)
1112 		smu->max_power_limit = pptable->PptLimit;
1113 
1114 	return 0;
1115 }
1116 
1117 static int aldebaran_system_features_control(struct  smu_context *smu, bool enable)
1118 {
1119 	int ret;
1120 
1121 	ret = smu_v13_0_system_features_control(smu, enable);
1122 	if (!ret && enable)
1123 		ret = aldebaran_run_btc(smu);
1124 
1125 	return ret;
1126 }
1127 
1128 static int aldebaran_set_performance_level(struct smu_context *smu,
1129 					   enum amd_dpm_forced_level level)
1130 {
1131 	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1132 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1133 	struct smu_13_0_dpm_table *gfx_table =
1134 		&dpm_context->dpm_tables.gfx_table;
1135 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1136 
1137 	/* Disable determinism if switching to another mode */
1138 	if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
1139 	    (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
1140 		smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
1141 		pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
1142 	}
1143 
1144 	switch (level) {
1145 
1146 	case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
1147 		return 0;
1148 
1149 	case AMD_DPM_FORCED_LEVEL_HIGH:
1150 	case AMD_DPM_FORCED_LEVEL_LOW:
1151 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1152 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1153 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1154 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1155 	default:
1156 		break;
1157 	}
1158 
1159 	return smu_v13_0_set_performance_level(smu, level);
1160 }
1161 
1162 static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
1163 					  enum smu_clk_type clk_type,
1164 					  uint32_t min,
1165 					  uint32_t max)
1166 {
1167 	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1168 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1169 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1170 	struct amdgpu_device *adev = smu->adev;
1171 	uint32_t min_clk;
1172 	uint32_t max_clk;
1173 	int ret = 0;
1174 
1175 	if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
1176 		return -EINVAL;
1177 
1178 	if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1179 			&& (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1180 		return -EINVAL;
1181 
1182 	if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
1183 		if (min >= max) {
1184 			dev_err(smu->adev->dev,
1185 				"Minimum GFX clk should be less than the maximum allowed clock\n");
1186 			return -EINVAL;
1187 		}
1188 
1189 		if ((min == pstate_table->gfxclk_pstate.curr.min) &&
1190 		    (max == pstate_table->gfxclk_pstate.curr.max))
1191 			return 0;
1192 
1193 		ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK,
1194 							    min, max);
1195 		if (!ret) {
1196 			pstate_table->gfxclk_pstate.curr.min = min;
1197 			pstate_table->gfxclk_pstate.curr.max = max;
1198 		}
1199 
1200 		return ret;
1201 	}
1202 
1203 	if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1204 		if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
1205 			(max > dpm_context->dpm_tables.gfx_table.max)) {
1206 			dev_warn(adev->dev,
1207 					"Invalid max frequency %d MHz specified for determinism\n", max);
1208 			return -EINVAL;
1209 		}
1210 
1211 		/* Restore default min/max clocks and enable determinism */
1212 		min_clk = dpm_context->dpm_tables.gfx_table.min;
1213 		max_clk = dpm_context->dpm_tables.gfx_table.max;
1214 		ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
1215 		if (!ret) {
1216 			usleep_range(500, 1000);
1217 			ret = smu_cmn_send_smc_msg_with_param(smu,
1218 					SMU_MSG_EnableDeterminism,
1219 					max, NULL);
1220 			if (ret) {
1221 				dev_err(adev->dev,
1222 						"Failed to enable determinism at GFX clock %d MHz\n", max);
1223 			} else {
1224 				pstate_table->gfxclk_pstate.curr.min = min_clk;
1225 				pstate_table->gfxclk_pstate.curr.max = max;
1226 			}
1227 		}
1228 	}
1229 
1230 	return ret;
1231 }
1232 
1233 static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
1234 							long input[], uint32_t size)
1235 {
1236 	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1237 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1238 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1239 	uint32_t min_clk;
1240 	uint32_t max_clk;
1241 	int ret = 0;
1242 
1243 	/* Only allowed in manual or determinism mode */
1244 	if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1245 			&& (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1246 		return -EINVAL;
1247 
1248 	switch (type) {
1249 	case PP_OD_EDIT_SCLK_VDDC_TABLE:
1250 		if (size != 2) {
1251 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
1252 			return -EINVAL;
1253 		}
1254 
1255 		if (input[0] == 0) {
1256 			if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
1257 				dev_warn(smu->adev->dev, "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
1258 					input[1], dpm_context->dpm_tables.gfx_table.min);
1259 				pstate_table->gfxclk_pstate.custom.min =
1260 					pstate_table->gfxclk_pstate.curr.min;
1261 				return -EINVAL;
1262 			}
1263 
1264 			pstate_table->gfxclk_pstate.custom.min = input[1];
1265 		} else if (input[0] == 1) {
1266 			if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
1267 				dev_warn(smu->adev->dev, "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
1268 					input[1], dpm_context->dpm_tables.gfx_table.max);
1269 				pstate_table->gfxclk_pstate.custom.max =
1270 					pstate_table->gfxclk_pstate.curr.max;
1271 				return -EINVAL;
1272 			}
1273 
1274 			pstate_table->gfxclk_pstate.custom.max = input[1];
1275 		} else {
1276 			return -EINVAL;
1277 		}
1278 		break;
1279 	case PP_OD_RESTORE_DEFAULT_TABLE:
1280 		if (size != 0) {
1281 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
1282 			return -EINVAL;
1283 		} else {
1284 			/* Use the default frequencies for manual and determinism mode */
1285 			min_clk = dpm_context->dpm_tables.gfx_table.min;
1286 			max_clk = dpm_context->dpm_tables.gfx_table.max;
1287 
1288 			return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
1289 		}
1290 		break;
1291 	case PP_OD_COMMIT_DPM_TABLE:
1292 		if (size != 0) {
1293 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
1294 			return -EINVAL;
1295 		} else {
1296 			if (!pstate_table->gfxclk_pstate.custom.min)
1297 				pstate_table->gfxclk_pstate.custom.min =
1298 					pstate_table->gfxclk_pstate.curr.min;
1299 
1300 			if (!pstate_table->gfxclk_pstate.custom.max)
1301 				pstate_table->gfxclk_pstate.custom.max =
1302 					pstate_table->gfxclk_pstate.curr.max;
1303 
1304 			min_clk = pstate_table->gfxclk_pstate.custom.min;
1305 			max_clk = pstate_table->gfxclk_pstate.custom.max;
1306 
1307 			return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
1308 		}
1309 		break;
1310 	default:
1311 		return -ENOSYS;
1312 	}
1313 
1314 	return ret;
1315 }
1316 
1317 static bool aldebaran_is_dpm_running(struct smu_context *smu)
1318 {
1319 	int ret = 0;
1320 	uint32_t feature_mask[2];
1321 	unsigned long feature_enabled;
1322 	ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
1323 	feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
1324 					  ((uint64_t)feature_mask[1] << 32));
1325 	return !!(feature_enabled & SMC_DPM_FEATURE);
1326 }
1327 
1328 static void aldebaran_fill_i2c_req(SwI2cRequest_t  *req, bool write,
1329 				  uint8_t address, uint32_t numbytes,
1330 				  uint8_t *data)
1331 {
1332 	int i;
1333 
1334 	req->I2CcontrollerPort = 0;
1335 	req->I2CSpeed = 2;
1336 	req->SlaveAddress = address;
1337 	req->NumCmds = numbytes;
1338 
1339 	for (i = 0; i < numbytes; i++) {
1340 		SwI2cCmd_t *cmd =  &req->SwI2cCmds[i];
1341 
1342 		/* First 2 bytes are always write for lower 2b EEPROM address */
1343 		if (i < 2)
1344 			cmd->CmdConfig = CMDCONFIG_READWRITE_MASK;
1345 		else
1346 			cmd->CmdConfig = write ? CMDCONFIG_READWRITE_MASK : 0;
1347 
1348 
1349 		/* Add RESTART for read  after address filled */
1350 		cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
1351 
1352 		/* Add STOP in the end */
1353 		cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
1354 
1355 		/* Fill with data regardless if read or write to simplify code */
1356 		cmd->ReadWriteData = data[i];
1357 	}
1358 }
1359 
1360 static int aldebaran_i2c_read_data(struct i2c_adapter *control,
1361 					       uint8_t address,
1362 					       uint8_t *data,
1363 					       uint32_t numbytes)
1364 {
1365 	uint32_t  i, ret = 0;
1366 	SwI2cRequest_t req;
1367 	struct amdgpu_device *adev = to_amdgpu_device(control);
1368 	struct smu_table_context *smu_table = &adev->smu.smu_table;
1369 	struct smu_table *table = &smu_table->driver_table;
1370 
1371 	if (numbytes > MAX_SW_I2C_COMMANDS) {
1372 		dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
1373 			numbytes, MAX_SW_I2C_COMMANDS);
1374 		return -EINVAL;
1375 	}
1376 
1377 	memset(&req, 0, sizeof(req));
1378 	aldebaran_fill_i2c_req(&req, false, address, numbytes, data);
1379 
1380 	mutex_lock(&adev->smu.mutex);
1381 	/* Now read data starting with that address */
1382 	ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
1383 					true);
1384 	mutex_unlock(&adev->smu.mutex);
1385 
1386 	if (!ret) {
1387 		SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
1388 
1389 		/* Assume SMU  fills res.SwI2cCmds[i].Data with read bytes */
1390 		for (i = 0; i < numbytes; i++)
1391 			data[i] = res->SwI2cCmds[i].ReadWriteData;
1392 
1393 		dev_dbg(adev->dev, "aldebaran_i2c_read_data, address = %x, bytes = %d, data :",
1394 				  (uint16_t)address, numbytes);
1395 
1396 		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
1397 			       8, 1, data, numbytes, false);
1398 	} else
1399 		dev_err(adev->dev, "aldebaran_i2c_read_data - error occurred :%x", ret);
1400 
1401 	return ret;
1402 }
1403 
1404 static int aldebaran_i2c_write_data(struct i2c_adapter *control,
1405 						uint8_t address,
1406 						uint8_t *data,
1407 						uint32_t numbytes)
1408 {
1409 	uint32_t ret;
1410 	SwI2cRequest_t req;
1411 	struct amdgpu_device *adev = to_amdgpu_device(control);
1412 
1413 	if (numbytes > MAX_SW_I2C_COMMANDS) {
1414 		dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
1415 			numbytes, MAX_SW_I2C_COMMANDS);
1416 		return -EINVAL;
1417 	}
1418 
1419 	memset(&req, 0, sizeof(req));
1420 	aldebaran_fill_i2c_req(&req, true, address, numbytes, data);
1421 
1422 	mutex_lock(&adev->smu.mutex);
1423 	ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
1424 	mutex_unlock(&adev->smu.mutex);
1425 
1426 	if (!ret) {
1427 		dev_dbg(adev->dev, "aldebaran_i2c_write(), address = %x, bytes = %d , data: ",
1428 					 (uint16_t)address, numbytes);
1429 
1430 		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
1431 			       8, 1, data, numbytes, false);
1432 		/*
1433 		 * According to EEPROM spec there is a MAX of 10 ms required for
1434 		 * EEPROM to flush internal RX buffer after STOP was issued at the
1435 		 * end of write transaction. During this time the EEPROM will not be
1436 		 * responsive to any more commands - so wait a bit more.
1437 		 */
1438 		msleep(10);
1439 
1440 	} else
1441 		dev_err(adev->dev, "aldebaran_i2c_write- error occurred :%x", ret);
1442 
1443 	return ret;
1444 }
1445 
1446 static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
1447 			      struct i2c_msg *msgs, int num)
1448 {
1449 	uint32_t  i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
1450 	uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
1451 
1452 	for (i = 0; i < num; i++) {
1453 		/*
1454 		 * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
1455 		 * once and hence the data needs to be spliced into chunks and sent each
1456 		 * chunk separately
1457 		 */
1458 		data_size = msgs[i].len - 2;
1459 		data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
1460 		next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
1461 		data_ptr = msgs[i].buf + 2;
1462 
1463 		for (j = 0; j < data_size / data_chunk_size; j++) {
1464 			/* Insert the EEPROM dest addess, bits 0-15 */
1465 			data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
1466 			data_chunk[1] = (next_eeprom_addr & 0xff);
1467 
1468 			if (msgs[i].flags & I2C_M_RD) {
1469 				ret = aldebaran_i2c_read_data(i2c_adap,
1470 							     (uint8_t)msgs[i].addr,
1471 							     data_chunk, MAX_SW_I2C_COMMANDS);
1472 
1473 				memcpy(data_ptr, data_chunk + 2, data_chunk_size);
1474 			} else {
1475 
1476 				memcpy(data_chunk + 2, data_ptr, data_chunk_size);
1477 
1478 				ret = aldebaran_i2c_write_data(i2c_adap,
1479 							      (uint8_t)msgs[i].addr,
1480 							      data_chunk, MAX_SW_I2C_COMMANDS);
1481 			}
1482 
1483 			if (ret) {
1484 				num = -EIO;
1485 				goto fail;
1486 			}
1487 
1488 			next_eeprom_addr += data_chunk_size;
1489 			data_ptr += data_chunk_size;
1490 		}
1491 
1492 		if (data_size % data_chunk_size) {
1493 			data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
1494 			data_chunk[1] = (next_eeprom_addr & 0xff);
1495 
1496 			if (msgs[i].flags & I2C_M_RD) {
1497 				ret = aldebaran_i2c_read_data(i2c_adap,
1498 							     (uint8_t)msgs[i].addr,
1499 							     data_chunk, (data_size % data_chunk_size) + 2);
1500 
1501 				memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
1502 			} else {
1503 				memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
1504 
1505 				ret = aldebaran_i2c_write_data(i2c_adap,
1506 							      (uint8_t)msgs[i].addr,
1507 							      data_chunk, (data_size % data_chunk_size) + 2);
1508 			}
1509 
1510 			if (ret) {
1511 				num = -EIO;
1512 				goto fail;
1513 			}
1514 		}
1515 	}
1516 
1517 fail:
1518 	return num;
1519 }
1520 
1521 static u32 aldebaran_i2c_func(struct i2c_adapter *adap)
1522 {
1523 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1524 }
1525 
1526 
1527 static const struct i2c_algorithm aldebaran_i2c_algo = {
1528 	.master_xfer = aldebaran_i2c_xfer,
1529 	.functionality = aldebaran_i2c_func,
1530 };
1531 
1532 static int aldebaran_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
1533 {
1534 	struct amdgpu_device *adev = to_amdgpu_device(control);
1535 	int res;
1536 
1537 	control->owner = THIS_MODULE;
1538 	control->class = I2C_CLASS_SPD;
1539 	control->dev.parent = &adev->pdev->dev;
1540 	control->algo = &aldebaran_i2c_algo;
1541 	snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
1542 
1543 	res = i2c_add_adapter(control);
1544 	if (res)
1545 		DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
1546 
1547 	return res;
1548 }
1549 
1550 static void aldebaran_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
1551 {
1552 	i2c_del_adapter(control);
1553 }
1554 
1555 static void aldebaran_get_unique_id(struct smu_context *smu)
1556 {
1557 	struct amdgpu_device *adev = smu->adev;
1558 	SmuMetrics_t *metrics = smu->smu_table.metrics_table;
1559 	uint32_t upper32 = 0, lower32 = 0;
1560 	int ret;
1561 
1562 	mutex_lock(&smu->metrics_lock);
1563 	ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
1564 	if (ret)
1565 		goto out_unlock;
1566 
1567 	upper32 = metrics->PublicSerialNumUpper32;
1568 	lower32 = metrics->PublicSerialNumLower32;
1569 
1570 out_unlock:
1571 	mutex_unlock(&smu->metrics_lock);
1572 
1573 	adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
1574 	sprintf(adev->serial, "%016llx", adev->unique_id);
1575 }
1576 
1577 static bool aldebaran_is_baco_supported(struct smu_context *smu)
1578 {
1579 	/* aldebaran is not support baco */
1580 
1581 	return false;
1582 }
1583 
1584 static int aldebaran_set_df_cstate(struct smu_context *smu,
1585 				   enum pp_df_cstate state)
1586 {
1587 	return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
1588 }
1589 
1590 static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
1591 {
1592 	return smu_cmn_send_smc_msg_with_param(smu,
1593 					       SMU_MSG_GmiPwrDnControl,
1594 					       en ? 1 : 0,
1595 					       NULL);
1596 }
1597 
1598 static const struct throttling_logging_label {
1599 	uint32_t feature_mask;
1600 	const char *label;
1601 } logging_label[] = {
1602 	{(1U << THROTTLER_TEMP_MEM_BIT), "HBM"},
1603 	{(1U << THROTTLER_TEMP_VR_GFX_BIT), "VR of GFX rail"},
1604 	{(1U << THROTTLER_TEMP_VR_MEM_BIT), "VR of HBM rail"},
1605 	{(1U << THROTTLER_TEMP_VR_SOC_BIT), "VR of SOC rail"},
1606 };
1607 static void aldebaran_log_thermal_throttling_event(struct smu_context *smu)
1608 {
1609 	int ret;
1610 	int throttler_idx, throtting_events = 0, buf_idx = 0;
1611 	struct amdgpu_device *adev = smu->adev;
1612 	uint32_t throttler_status;
1613 	char log_buf[256];
1614 
1615 	ret = aldebaran_get_smu_metrics_data(smu,
1616 					     METRICS_THROTTLER_STATUS,
1617 					     &throttler_status);
1618 	if (ret)
1619 		return;
1620 
1621 	memset(log_buf, 0, sizeof(log_buf));
1622 	for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
1623 	     throttler_idx++) {
1624 		if (throttler_status & logging_label[throttler_idx].feature_mask) {
1625 			throtting_events++;
1626 			buf_idx += snprintf(log_buf + buf_idx,
1627 					    sizeof(log_buf) - buf_idx,
1628 					    "%s%s",
1629 					    throtting_events > 1 ? " and " : "",
1630 					    logging_label[throttler_idx].label);
1631 			if (buf_idx >= sizeof(log_buf)) {
1632 				dev_err(adev->dev, "buffer overflow!\n");
1633 				log_buf[sizeof(log_buf) - 1] = '\0';
1634 				break;
1635 			}
1636 		}
1637 	}
1638 
1639 	dev_warn(adev->dev, "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n",
1640 		 log_buf);
1641 	kgd2kfd_smi_event_throttle(smu->adev->kfd.dev, throttler_status);
1642 }
1643 
1644 static int aldebaran_get_current_pcie_link_speed(struct smu_context *smu)
1645 {
1646 	struct amdgpu_device *adev = smu->adev;
1647 	uint32_t esm_ctrl;
1648 
1649 	/* TODO: confirm this on real target */
1650 	esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
1651 	if ((esm_ctrl >> 15) & 0x1FFFF)
1652 		return (((esm_ctrl >> 8) & 0x3F) + 128);
1653 
1654 	return smu_v13_0_get_current_pcie_link_speed(smu);
1655 }
1656 
1657 static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
1658 					 void **table)
1659 {
1660 	struct smu_table_context *smu_table = &smu->smu_table;
1661 	struct gpu_metrics_v1_2 *gpu_metrics =
1662 		(struct gpu_metrics_v1_2 *)smu_table->gpu_metrics_table;
1663 	SmuMetrics_t metrics;
1664 	int i, ret = 0;
1665 
1666 	ret = smu_cmn_get_metrics_table(smu,
1667 					&metrics,
1668 					true);
1669 	if (ret)
1670 		return ret;
1671 
1672 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 2);
1673 
1674 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
1675 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
1676 	gpu_metrics->temperature_mem = metrics.TemperatureHBM;
1677 	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
1678 	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
1679 	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem;
1680 
1681 	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
1682 	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
1683 	gpu_metrics->average_mm_activity = 0;
1684 
1685 	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
1686 	gpu_metrics->energy_accumulator =
1687 			(uint64_t)metrics.EnergyAcc64bitHigh << 32 |
1688 			metrics.EnergyAcc64bitLow;
1689 
1690 	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
1691 	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
1692 	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
1693 	gpu_metrics->average_vclk0_frequency = 0;
1694 	gpu_metrics->average_dclk0_frequency = 0;
1695 
1696 	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
1697 	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
1698 	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
1699 	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
1700 	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
1701 
1702 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
1703 
1704 	gpu_metrics->current_fan_speed = 0;
1705 
1706 	gpu_metrics->pcie_link_width =
1707 		smu_v13_0_get_current_pcie_link_width(smu);
1708 	gpu_metrics->pcie_link_speed =
1709 		aldebaran_get_current_pcie_link_speed(smu);
1710 
1711 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1712 
1713 	gpu_metrics->gfx_activity_acc = metrics.GfxBusyAcc;
1714 	gpu_metrics->mem_activity_acc = metrics.DramBusyAcc;
1715 
1716 	for (i = 0; i < NUM_HBM_INSTANCES; i++)
1717 		gpu_metrics->temperature_hbm[i] = metrics.TemperatureAllHBM[i];
1718 
1719 	gpu_metrics->firmware_timestamp = ((uint64_t)metrics.TimeStampHigh << 32) |
1720 					metrics.TimeStampLow;
1721 
1722 	*table = (void *)gpu_metrics;
1723 
1724 	return sizeof(struct gpu_metrics_v1_2);
1725 }
1726 
1727 static int aldebaran_mode2_reset(struct smu_context *smu)
1728 {
1729 	u32 smu_version;
1730 	int ret = 0, index;
1731 	struct amdgpu_device *adev = smu->adev;
1732 	int timeout = 10;
1733 
1734 	smu_cmn_get_smc_version(smu, NULL, &smu_version);
1735 
1736 	index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
1737 						SMU_MSG_GfxDeviceDriverReset);
1738 
1739 	mutex_lock(&smu->message_lock);
1740 	if (smu_version >= 0x00441400) {
1741 		ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
1742 		/* This is similar to FLR, wait till max FLR timeout */
1743 		msleep(100);
1744 		dev_dbg(smu->adev->dev, "restore config space...\n");
1745 		/* Restore the config space saved during init */
1746 		amdgpu_device_load_pci_state(adev->pdev);
1747 
1748 		dev_dbg(smu->adev->dev, "wait for reset ack\n");
1749 		while (ret == -ETIME && timeout)  {
1750 			ret = smu_cmn_wait_for_response(smu);
1751 			/* Wait a bit more time for getting ACK */
1752 			if (ret == -ETIME) {
1753 				--timeout;
1754 				usleep_range(500, 1000);
1755 				continue;
1756 			}
1757 
1758 			if (ret != 1) {
1759 				dev_err(adev->dev, "failed to send mode2 message \tparam: 0x%08x response %#x\n",
1760 						SMU_RESET_MODE_2, ret);
1761 				goto out;
1762 			}
1763 		}
1764 
1765 	} else {
1766 		dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n",
1767 				smu_version);
1768 	}
1769 
1770 	if (ret == 1)
1771 		ret = 0;
1772 out:
1773 	mutex_unlock(&smu->message_lock);
1774 
1775 	return ret;
1776 }
1777 
1778 static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
1779 {
1780 #if 0
1781 	struct amdgpu_device *adev = smu->adev;
1782 	u32 smu_version;
1783 	uint32_t val;
1784 	/**
1785 	 * PM FW version support mode1 reset from 68.07
1786 	 */
1787 	smu_cmn_get_smc_version(smu, NULL, &smu_version);
1788 	if ((smu_version < 0x00440700))
1789 		return false;
1790 	/**
1791 	 * mode1 reset relies on PSP, so we should check if
1792 	 * PSP is alive.
1793 	 */
1794 	val = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
1795 
1796 	return val != 0x0;
1797 #endif
1798 	return true;
1799 }
1800 
1801 static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
1802 {
1803 	return true;
1804 }
1805 
1806 static int aldebaran_set_mp1_state(struct smu_context *smu,
1807 				   enum pp_mp1_state mp1_state)
1808 {
1809 	switch (mp1_state) {
1810 	case PP_MP1_STATE_UNLOAD:
1811 		return smu_cmn_set_mp1_state(smu, mp1_state);
1812 	default:
1813 		return -EINVAL;
1814 	}
1815 
1816 	return 0;
1817 }
1818 
1819 static const struct pptable_funcs aldebaran_ppt_funcs = {
1820 	/* init dpm */
1821 	.get_allowed_feature_mask = aldebaran_get_allowed_feature_mask,
1822 	/* dpm/clk tables */
1823 	.set_default_dpm_table = aldebaran_set_default_dpm_table,
1824 	.populate_umd_state_clk = aldebaran_populate_umd_state_clk,
1825 	.get_thermal_temperature_range = aldebaran_get_thermal_temperature_range,
1826 	.print_clk_levels = aldebaran_print_clk_levels,
1827 	.force_clk_levels = aldebaran_force_clk_levels,
1828 	.read_sensor = aldebaran_read_sensor,
1829 	.set_performance_level = aldebaran_set_performance_level,
1830 	.get_power_limit = aldebaran_get_power_limit,
1831 	.is_dpm_running = aldebaran_is_dpm_running,
1832 	.get_unique_id = aldebaran_get_unique_id,
1833 	.init_microcode = smu_v13_0_init_microcode,
1834 	.load_microcode = smu_v13_0_load_microcode,
1835 	.fini_microcode = smu_v13_0_fini_microcode,
1836 	.init_smc_tables = aldebaran_init_smc_tables,
1837 	.fini_smc_tables = smu_v13_0_fini_smc_tables,
1838 	.init_power = smu_v13_0_init_power,
1839 	.fini_power = smu_v13_0_fini_power,
1840 	.check_fw_status = smu_v13_0_check_fw_status,
1841 	/* pptable related */
1842 	.setup_pptable = aldebaran_setup_pptable,
1843 	.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
1844 	.check_fw_version = smu_v13_0_check_fw_version,
1845 	.write_pptable = smu_cmn_write_pptable,
1846 	.set_driver_table_location = smu_v13_0_set_driver_table_location,
1847 	.set_tool_table_location = smu_v13_0_set_tool_table_location,
1848 	.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
1849 	.system_features_control = aldebaran_system_features_control,
1850 	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
1851 	.send_smc_msg = smu_cmn_send_smc_msg,
1852 	.get_enabled_mask = smu_cmn_get_enabled_mask,
1853 	.feature_is_enabled = smu_cmn_feature_is_enabled,
1854 	.disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
1855 	.set_power_limit = smu_v13_0_set_power_limit,
1856 	.init_max_sustainable_clocks = smu_v13_0_init_max_sustainable_clocks,
1857 	.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
1858 	.disable_thermal_alert = smu_v13_0_disable_thermal_alert,
1859 	.set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
1860 	.register_irq_handler = smu_v13_0_register_irq_handler,
1861 	.set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme,
1862 	.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
1863 	.baco_is_support= aldebaran_is_baco_supported,
1864 	.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
1865 	.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
1866 	.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
1867 	.set_df_cstate = aldebaran_set_df_cstate,
1868 	.allow_xgmi_power_down = aldebaran_allow_xgmi_power_down,
1869 	.log_thermal_throttling_event = aldebaran_log_thermal_throttling_event,
1870 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
1871 	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
1872 	.get_gpu_metrics = aldebaran_get_gpu_metrics,
1873 	.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
1874 	.mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
1875 	.mode1_reset = smu_v13_0_mode1_reset,
1876 	.set_mp1_state = aldebaran_set_mp1_state,
1877 	.mode2_reset = aldebaran_mode2_reset,
1878 	.wait_for_event = smu_v13_0_wait_for_event,
1879 	.i2c_init = aldebaran_i2c_control_init,
1880 	.i2c_fini = aldebaran_i2c_control_fini,
1881 };
1882 
1883 void aldebaran_set_ppt_funcs(struct smu_context *smu)
1884 {
1885 	smu->ppt_funcs = &aldebaran_ppt_funcs;
1886 	smu->message_map = aldebaran_message_map;
1887 	smu->clock_map = aldebaran_clk_map;
1888 	smu->feature_map = aldebaran_feature_mask_map;
1889 	smu->table_map = aldebaran_table_map;
1890 }
1891