1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #define SWSMU_CODE_LAYER_L2
25 
26 #include <linux/firmware.h>
27 #include "amdgpu.h"
28 #include "amdgpu_smu.h"
29 #include "atomfirmware.h"
30 #include "amdgpu_atomfirmware.h"
31 #include "amdgpu_atombios.h"
32 #include "smu_v13_0.h"
33 #include "smu13_driver_if_aldebaran.h"
34 #include "soc15_common.h"
35 #include "atom.h"
36 #include "power_state.h"
37 #include "aldebaran_ppt.h"
38 #include "smu_v13_0_pptable.h"
39 #include "aldebaran_ppsmc.h"
40 #include "nbio/nbio_7_4_offset.h"
41 #include "nbio/nbio_7_4_sh_mask.h"
42 #include "thm/thm_11_0_2_offset.h"
43 #include "thm/thm_11_0_2_sh_mask.h"
44 #include "amdgpu_xgmi.h"
45 #include <linux/pci.h>
46 #include "amdgpu_ras.h"
47 #include "smu_cmn.h"
48 #include "mp/mp_13_0_2_offset.h"
49 
50 /*
51  * DO NOT use these for err/warn/info/debug messages.
52  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
53  * They are more MGPU friendly.
54  */
55 #undef pr_err
56 #undef pr_warn
57 #undef pr_info
58 #undef pr_debug
59 
60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
61 
62 #define ALDEBARAN_FEA_MAP(smu_feature, aldebaran_feature) \
63 	[smu_feature] = {1, (aldebaran_feature)}
64 
65 #define FEATURE_MASK(feature) (1ULL << feature)
66 #define SMC_DPM_FEATURE ( \
67 			  FEATURE_MASK(FEATURE_DATA_CALCULATIONS) | \
68 			  FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)	| \
69 			  FEATURE_MASK(FEATURE_DPM_UCLK_BIT)	| \
70 			  FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)	| \
71 			  FEATURE_MASK(FEATURE_DPM_FCLK_BIT)	| \
72 			  FEATURE_MASK(FEATURE_DPM_LCLK_BIT)	| \
73 			  FEATURE_MASK(FEATURE_DPM_XGMI_BIT)	| \
74 			  FEATURE_MASK(FEATURE_DPM_VCN_BIT))
75 
76 /* possible frequency drift (1Mhz) */
77 #define EPSILON				1
78 
79 #define smnPCIE_ESM_CTRL			0x111003D0
80 
81 static const struct smu_temperature_range smu13_thermal_policy[] =
82 {
83 	{-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
84 	{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
85 };
86 
87 static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = {
88 	MSG_MAP(TestMessage,			     PPSMC_MSG_TestMessage,			0),
89 	MSG_MAP(GetSmuVersion,			     PPSMC_MSG_GetSmuVersion,			1),
90 	MSG_MAP(GetDriverIfVersion,		     PPSMC_MSG_GetDriverIfVersion,		1),
91 	MSG_MAP(EnableAllSmuFeatures,		     PPSMC_MSG_EnableAllSmuFeatures,		0),
92 	MSG_MAP(DisableAllSmuFeatures,		     PPSMC_MSG_DisableAllSmuFeatures,		0),
93 	MSG_MAP(GetEnabledSmuFeaturesLow,	     PPSMC_MSG_GetEnabledSmuFeaturesLow,	0),
94 	MSG_MAP(GetEnabledSmuFeaturesHigh,	     PPSMC_MSG_GetEnabledSmuFeaturesHigh,	0),
95 	MSG_MAP(SetDriverDramAddrHigh,		     PPSMC_MSG_SetDriverDramAddrHigh,		1),
96 	MSG_MAP(SetDriverDramAddrLow,		     PPSMC_MSG_SetDriverDramAddrLow,		1),
97 	MSG_MAP(SetToolsDramAddrHigh,		     PPSMC_MSG_SetToolsDramAddrHigh,		0),
98 	MSG_MAP(SetToolsDramAddrLow,		     PPSMC_MSG_SetToolsDramAddrLow,		0),
99 	MSG_MAP(TransferTableSmu2Dram,		     PPSMC_MSG_TransferTableSmu2Dram,		1),
100 	MSG_MAP(TransferTableDram2Smu,		     PPSMC_MSG_TransferTableDram2Smu,		0),
101 	MSG_MAP(UseDefaultPPTable,		     PPSMC_MSG_UseDefaultPPTable,		0),
102 	MSG_MAP(SetSystemVirtualDramAddrHigh,	     PPSMC_MSG_SetSystemVirtualDramAddrHigh,	0),
103 	MSG_MAP(SetSystemVirtualDramAddrLow,	     PPSMC_MSG_SetSystemVirtualDramAddrLow,	0),
104 	MSG_MAP(SetSoftMinByFreq,		     PPSMC_MSG_SetSoftMinByFreq,		0),
105 	MSG_MAP(SetSoftMaxByFreq,		     PPSMC_MSG_SetSoftMaxByFreq,		0),
106 	MSG_MAP(SetHardMinByFreq,		     PPSMC_MSG_SetHardMinByFreq,		0),
107 	MSG_MAP(SetHardMaxByFreq,		     PPSMC_MSG_SetHardMaxByFreq,		0),
108 	MSG_MAP(GetMinDpmFreq,			     PPSMC_MSG_GetMinDpmFreq,			0),
109 	MSG_MAP(GetMaxDpmFreq,			     PPSMC_MSG_GetMaxDpmFreq,			0),
110 	MSG_MAP(GetDpmFreqByIndex,		     PPSMC_MSG_GetDpmFreqByIndex,		1),
111 	MSG_MAP(SetWorkloadMask,		     PPSMC_MSG_SetWorkloadMask,			1),
112 	MSG_MAP(GetVoltageByDpm,		     PPSMC_MSG_GetVoltageByDpm,			0),
113 	MSG_MAP(GetVoltageByDpmOverdrive,	     PPSMC_MSG_GetVoltageByDpmOverdrive,	0),
114 	MSG_MAP(SetPptLimit,			     PPSMC_MSG_SetPptLimit,			0),
115 	MSG_MAP(GetPptLimit,			     PPSMC_MSG_GetPptLimit,			1),
116 	MSG_MAP(PrepareMp1ForUnload,		     PPSMC_MSG_PrepareMp1ForUnload,		0),
117 	MSG_MAP(GfxDeviceDriverReset,		     PPSMC_MSG_GfxDriverReset,			0),
118 	MSG_MAP(RunDcBtc,			     PPSMC_MSG_RunDcBtc,			0),
119 	MSG_MAP(DramLogSetDramAddrHigh,		     PPSMC_MSG_DramLogSetDramAddrHigh,		0),
120 	MSG_MAP(DramLogSetDramAddrLow,		     PPSMC_MSG_DramLogSetDramAddrLow,		0),
121 	MSG_MAP(DramLogSetDramSize,		     PPSMC_MSG_DramLogSetDramSize,		0),
122 	MSG_MAP(GetDebugData,			     PPSMC_MSG_GetDebugData,			0),
123 	MSG_MAP(WaflTest,			     PPSMC_MSG_WaflTest,			0),
124 	MSG_MAP(SetMemoryChannelEnable,		     PPSMC_MSG_SetMemoryChannelEnable,		0),
125 	MSG_MAP(SetNumBadHbmPagesRetired,	     PPSMC_MSG_SetNumBadHbmPagesRetired,	0),
126 	MSG_MAP(DFCstateControl,		     PPSMC_MSG_DFCstateControl,			0),
127 	MSG_MAP(GetGmiPwrDnHyst,		     PPSMC_MSG_GetGmiPwrDnHyst,			0),
128 	MSG_MAP(SetGmiPwrDnHyst,		     PPSMC_MSG_SetGmiPwrDnHyst,			0),
129 	MSG_MAP(GmiPwrDnControl,		     PPSMC_MSG_GmiPwrDnControl,			0),
130 	MSG_MAP(EnterGfxoff,			     PPSMC_MSG_EnterGfxoff,			0),
131 	MSG_MAP(ExitGfxoff,			     PPSMC_MSG_ExitGfxoff,			0),
132 	MSG_MAP(SetExecuteDMATest,		     PPSMC_MSG_SetExecuteDMATest,		0),
133 	MSG_MAP(EnableDeterminism,		     PPSMC_MSG_EnableDeterminism,		0),
134 	MSG_MAP(DisableDeterminism,		     PPSMC_MSG_DisableDeterminism,		0),
135 	MSG_MAP(SetUclkDpmMode,			     PPSMC_MSG_SetUclkDpmMode,			0),
136 	MSG_MAP(GfxDriverResetRecovery,		     PPSMC_MSG_GfxDriverResetRecovery,		0),
137 };
138 
139 static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
140 	CLK_MAP(GFXCLK, PPCLK_GFXCLK),
141 	CLK_MAP(SCLK,	PPCLK_GFXCLK),
142 	CLK_MAP(SOCCLK, PPCLK_SOCCLK),
143 	CLK_MAP(FCLK, PPCLK_FCLK),
144 	CLK_MAP(UCLK, PPCLK_UCLK),
145 	CLK_MAP(MCLK, PPCLK_UCLK),
146 	CLK_MAP(DCLK, PPCLK_DCLK),
147 	CLK_MAP(VCLK, PPCLK_VCLK),
148 	CLK_MAP(LCLK, 	PPCLK_LCLK),
149 };
150 
151 static const struct cmn2asic_mapping aldebaran_feature_mask_map[SMU_FEATURE_COUNT] = {
152 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_PREFETCHER_BIT, 		FEATURE_DATA_CALCULATIONS),
153 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, 			FEATURE_DPM_GFXCLK_BIT),
154 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, 			FEATURE_DPM_UCLK_BIT),
155 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, 			FEATURE_DPM_SOCCLK_BIT),
156 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, 			FEATURE_DPM_FCLK_BIT),
157 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, 			FEATURE_DPM_LCLK_BIT),
158 	ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_BIT, 				FEATURE_DPM_XGMI_BIT),
159 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, 			FEATURE_DS_GFXCLK_BIT),
160 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, 			FEATURE_DS_SOCCLK_BIT),
161 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, 				FEATURE_DS_LCLK_BIT),
162 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, 				FEATURE_DS_FCLK_BIT),
163 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_UCLK_BIT,				FEATURE_DS_UCLK_BIT),
164 	ALDEBARAN_FEA_MAP(SMU_FEATURE_GFX_SS_BIT, 				FEATURE_GFX_SS_BIT),
165 	ALDEBARAN_FEA_MAP(SMU_FEATURE_VCN_PG_BIT, 				FEATURE_DPM_VCN_BIT),
166 	ALDEBARAN_FEA_MAP(SMU_FEATURE_RSMU_SMN_CG_BIT, 			FEATURE_RSMU_SMN_CG_BIT),
167 	ALDEBARAN_FEA_MAP(SMU_FEATURE_WAFL_CG_BIT, 				FEATURE_WAFL_CG_BIT),
168 	ALDEBARAN_FEA_MAP(SMU_FEATURE_PPT_BIT, 					FEATURE_PPT_BIT),
169 	ALDEBARAN_FEA_MAP(SMU_FEATURE_TDC_BIT, 					FEATURE_TDC_BIT),
170 	ALDEBARAN_FEA_MAP(SMU_FEATURE_APCC_PLUS_BIT, 			FEATURE_APCC_PLUS_BIT),
171 	ALDEBARAN_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, 			FEATURE_APCC_DFLL_BIT),
172 	ALDEBARAN_FEA_MAP(SMU_FEATURE_FUSE_CG_BIT, 				FEATURE_FUSE_CG_BIT),
173 	ALDEBARAN_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, 				FEATURE_MP1_CG_BIT),
174 	ALDEBARAN_FEA_MAP(SMU_FEATURE_SMUIO_CG_BIT, 			FEATURE_SMUIO_CG_BIT),
175 	ALDEBARAN_FEA_MAP(SMU_FEATURE_THM_CG_BIT, 				FEATURE_THM_CG_BIT),
176 	ALDEBARAN_FEA_MAP(SMU_FEATURE_CLK_CG_BIT, 				FEATURE_CLK_CG_BIT),
177 	ALDEBARAN_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, 				FEATURE_FW_CTF_BIT),
178 	ALDEBARAN_FEA_MAP(SMU_FEATURE_THERMAL_BIT, 				FEATURE_THERMAL_BIT),
179 	ALDEBARAN_FEA_MAP(SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT, 	FEATURE_OUT_OF_BAND_MONITOR_BIT),
180 	ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT,FEATURE_XGMI_PER_LINK_PWR_DWN),
181 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, 			FEATURE_DF_CSTATE),
182 };
183 
184 static const struct cmn2asic_mapping aldebaran_table_map[SMU_TABLE_COUNT] = {
185 	TAB_MAP(PPTABLE),
186 	TAB_MAP(AVFS_PSM_DEBUG),
187 	TAB_MAP(AVFS_FUSE_OVERRIDE),
188 	TAB_MAP(PMSTATUSLOG),
189 	TAB_MAP(SMU_METRICS),
190 	TAB_MAP(DRIVER_SMU_CONFIG),
191 	TAB_MAP(I2C_COMMANDS),
192 };
193 
194 static int aldebaran_tables_init(struct smu_context *smu)
195 {
196 	struct smu_table_context *smu_table = &smu->smu_table;
197 	struct smu_table *tables = smu_table->tables;
198 
199 	SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
200 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
201 
202 	SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
203 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
204 
205 	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
206 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
207 
208 	SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
209 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
210 
211 	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
212 	if (!smu_table->metrics_table)
213 		return -ENOMEM;
214 	smu_table->metrics_time = 0;
215 
216 	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_2);
217 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
218 	if (!smu_table->gpu_metrics_table) {
219 		kfree(smu_table->metrics_table);
220 		return -ENOMEM;
221 	}
222 
223 	return 0;
224 }
225 
226 static int aldebaran_allocate_dpm_context(struct smu_context *smu)
227 {
228 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
229 
230 	smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
231 				       GFP_KERNEL);
232 	if (!smu_dpm->dpm_context)
233 		return -ENOMEM;
234 	smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
235 
236 	smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state),
237 						   GFP_KERNEL);
238 	if (!smu_dpm->dpm_current_power_state)
239 		return -ENOMEM;
240 
241 	smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state),
242 						   GFP_KERNEL);
243 	if (!smu_dpm->dpm_request_power_state)
244 		return -ENOMEM;
245 
246 	return 0;
247 }
248 
249 static int aldebaran_init_smc_tables(struct smu_context *smu)
250 {
251 	int ret = 0;
252 
253 	ret = aldebaran_tables_init(smu);
254 	if (ret)
255 		return ret;
256 
257 	ret = aldebaran_allocate_dpm_context(smu);
258 	if (ret)
259 		return ret;
260 
261 	return smu_v13_0_init_smc_tables(smu);
262 }
263 
264 static int aldebaran_get_allowed_feature_mask(struct smu_context *smu,
265 					      uint32_t *feature_mask, uint32_t num)
266 {
267 	if (num > 2)
268 		return -EINVAL;
269 
270 	/* pptable will handle the features to enable */
271 	memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
272 
273 	return 0;
274 }
275 
276 static int aldebaran_set_default_dpm_table(struct smu_context *smu)
277 {
278 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
279 	struct smu_13_0_dpm_table *dpm_table = NULL;
280 	PPTable_t *pptable = smu->smu_table.driver_pptable;
281 	int ret = 0;
282 
283 	/* socclk dpm table setup */
284 	dpm_table = &dpm_context->dpm_tables.soc_table;
285 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
286 		ret = smu_v13_0_set_single_dpm_table(smu,
287 						     SMU_SOCCLK,
288 						     dpm_table);
289 		if (ret)
290 			return ret;
291 	} else {
292 		dpm_table->count = 1;
293 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
294 		dpm_table->dpm_levels[0].enabled = true;
295 		dpm_table->min = dpm_table->dpm_levels[0].value;
296 		dpm_table->max = dpm_table->dpm_levels[0].value;
297 	}
298 
299 	/* gfxclk dpm table setup */
300 	dpm_table = &dpm_context->dpm_tables.gfx_table;
301 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
302 		/* in the case of gfxclk, only fine-grained dpm is honored */
303 		dpm_table->count = 2;
304 		dpm_table->dpm_levels[0].value = pptable->GfxclkFmin;
305 		dpm_table->dpm_levels[0].enabled = true;
306 		dpm_table->dpm_levels[1].value = pptable->GfxclkFmax;
307 		dpm_table->dpm_levels[1].enabled = true;
308 		dpm_table->min = dpm_table->dpm_levels[0].value;
309 		dpm_table->max = dpm_table->dpm_levels[1].value;
310 	} else {
311 		dpm_table->count = 1;
312 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
313 		dpm_table->dpm_levels[0].enabled = true;
314 		dpm_table->min = dpm_table->dpm_levels[0].value;
315 		dpm_table->max = dpm_table->dpm_levels[0].value;
316 	}
317 
318 	/* memclk dpm table setup */
319 	dpm_table = &dpm_context->dpm_tables.uclk_table;
320 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
321 		ret = smu_v13_0_set_single_dpm_table(smu,
322 						     SMU_UCLK,
323 						     dpm_table);
324 		if (ret)
325 			return ret;
326 	} else {
327 		dpm_table->count = 1;
328 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
329 		dpm_table->dpm_levels[0].enabled = true;
330 		dpm_table->min = dpm_table->dpm_levels[0].value;
331 		dpm_table->max = dpm_table->dpm_levels[0].value;
332 	}
333 
334 	/* fclk dpm table setup */
335 	dpm_table = &dpm_context->dpm_tables.fclk_table;
336 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
337 		ret = smu_v13_0_set_single_dpm_table(smu,
338 						     SMU_FCLK,
339 						     dpm_table);
340 		if (ret)
341 			return ret;
342 	} else {
343 		dpm_table->count = 1;
344 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
345 		dpm_table->dpm_levels[0].enabled = true;
346 		dpm_table->min = dpm_table->dpm_levels[0].value;
347 		dpm_table->max = dpm_table->dpm_levels[0].value;
348 	}
349 
350 	return 0;
351 }
352 
353 static int aldebaran_check_powerplay_table(struct smu_context *smu)
354 {
355 	struct smu_table_context *table_context = &smu->smu_table;
356 	struct smu_13_0_powerplay_table *powerplay_table =
357 		table_context->power_play_table;
358 
359 	table_context->thermal_controller_type =
360 		powerplay_table->thermal_controller_type;
361 
362 	return 0;
363 }
364 
365 static int aldebaran_store_powerplay_table(struct smu_context *smu)
366 {
367 	struct smu_table_context *table_context = &smu->smu_table;
368 	struct smu_13_0_powerplay_table *powerplay_table =
369 		table_context->power_play_table;
370 	memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
371 	       sizeof(PPTable_t));
372 
373 	return 0;
374 }
375 
376 static int aldebaran_append_powerplay_table(struct smu_context *smu)
377 {
378 	struct smu_table_context *table_context = &smu->smu_table;
379 	PPTable_t *smc_pptable = table_context->driver_pptable;
380 	struct atom_smc_dpm_info_v4_10 *smc_dpm_table;
381 	int index, ret;
382 
383 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
384 					   smc_dpm_info);
385 
386 	ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
387 				      (uint8_t **)&smc_dpm_table);
388 	if (ret)
389 		return ret;
390 
391 	dev_info(smu->adev->dev, "smc_dpm_info table revision(format.content): %d.%d\n",
392 			smc_dpm_table->table_header.format_revision,
393 			smc_dpm_table->table_header.content_revision);
394 
395 	if ((smc_dpm_table->table_header.format_revision == 4) &&
396 	    (smc_dpm_table->table_header.content_revision == 10))
397 		memcpy(&smc_pptable->GfxMaxCurrent,
398 		       &smc_dpm_table->GfxMaxCurrent,
399 		       sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_10, GfxMaxCurrent));
400 	return 0;
401 }
402 
403 static int aldebaran_setup_pptable(struct smu_context *smu)
404 {
405 	int ret = 0;
406 
407 	/* VBIOS pptable is the first choice */
408 	smu->smu_table.boot_values.pp_table_id = 0;
409 
410 	ret = smu_v13_0_setup_pptable(smu);
411 	if (ret)
412 		return ret;
413 
414 	ret = aldebaran_store_powerplay_table(smu);
415 	if (ret)
416 		return ret;
417 
418 	ret = aldebaran_append_powerplay_table(smu);
419 	if (ret)
420 		return ret;
421 
422 	ret = aldebaran_check_powerplay_table(smu);
423 	if (ret)
424 		return ret;
425 
426 	return ret;
427 }
428 
429 static int aldebaran_run_btc(struct smu_context *smu)
430 {
431 	int ret;
432 
433 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
434 	if (ret)
435 		dev_err(smu->adev->dev, "RunDcBtc failed!\n");
436 
437 	return ret;
438 }
439 
440 static int aldebaran_populate_umd_state_clk(struct smu_context *smu)
441 {
442 	struct smu_13_0_dpm_context *dpm_context =
443 		smu->smu_dpm.dpm_context;
444 	struct smu_13_0_dpm_table *gfx_table =
445 		&dpm_context->dpm_tables.gfx_table;
446 	struct smu_13_0_dpm_table *mem_table =
447 		&dpm_context->dpm_tables.uclk_table;
448 	struct smu_13_0_dpm_table *soc_table =
449 		&dpm_context->dpm_tables.soc_table;
450 	struct smu_umd_pstate_table *pstate_table =
451 		&smu->pstate_table;
452 
453 	pstate_table->gfxclk_pstate.min = gfx_table->min;
454 	pstate_table->gfxclk_pstate.peak = gfx_table->max;
455 	pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
456 	pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
457 
458 	pstate_table->uclk_pstate.min = mem_table->min;
459 	pstate_table->uclk_pstate.peak = mem_table->max;
460 	pstate_table->uclk_pstate.curr.min = mem_table->min;
461 	pstate_table->uclk_pstate.curr.max = mem_table->max;
462 
463 	pstate_table->socclk_pstate.min = soc_table->min;
464 	pstate_table->socclk_pstate.peak = soc_table->max;
465 	pstate_table->socclk_pstate.curr.min = soc_table->min;
466 	pstate_table->socclk_pstate.curr.max = soc_table->max;
467 
468 	if (gfx_table->count > ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL &&
469 	    mem_table->count > ALDEBARAN_UMD_PSTATE_MCLK_LEVEL &&
470 	    soc_table->count > ALDEBARAN_UMD_PSTATE_SOCCLK_LEVEL) {
471 		pstate_table->gfxclk_pstate.standard =
472 			gfx_table->dpm_levels[ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL].value;
473 		pstate_table->uclk_pstate.standard =
474 			mem_table->dpm_levels[ALDEBARAN_UMD_PSTATE_MCLK_LEVEL].value;
475 		pstate_table->socclk_pstate.standard =
476 			soc_table->dpm_levels[ALDEBARAN_UMD_PSTATE_SOCCLK_LEVEL].value;
477 	} else {
478 		pstate_table->gfxclk_pstate.standard =
479 			pstate_table->gfxclk_pstate.min;
480 		pstate_table->uclk_pstate.standard =
481 			pstate_table->uclk_pstate.min;
482 		pstate_table->socclk_pstate.standard =
483 			pstate_table->socclk_pstate.min;
484 	}
485 
486 	return 0;
487 }
488 
489 static int aldebaran_get_clk_table(struct smu_context *smu,
490 				   struct pp_clock_levels_with_latency *clocks,
491 				   struct smu_13_0_dpm_table *dpm_table)
492 {
493 	int i, count;
494 
495 	count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
496 	clocks->num_levels = count;
497 
498 	for (i = 0; i < count; i++) {
499 		clocks->data[i].clocks_in_khz =
500 			dpm_table->dpm_levels[i].value * 1000;
501 		clocks->data[i].latency_in_us = 0;
502 	}
503 
504 	return 0;
505 }
506 
507 static int aldebaran_freqs_in_same_level(int32_t frequency1,
508 					 int32_t frequency2)
509 {
510 	return (abs(frequency1 - frequency2) <= EPSILON);
511 }
512 
513 static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
514 					  MetricsMember_t member,
515 					  uint32_t *value)
516 {
517 	struct smu_table_context *smu_table= &smu->smu_table;
518 	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
519 	int ret = 0;
520 
521 	mutex_lock(&smu->metrics_lock);
522 
523 	ret = smu_cmn_get_metrics_table_locked(smu,
524 					       NULL,
525 					       false);
526 	if (ret) {
527 		mutex_unlock(&smu->metrics_lock);
528 		return ret;
529 	}
530 
531 	switch (member) {
532 	case METRICS_CURR_GFXCLK:
533 		*value = metrics->CurrClock[PPCLK_GFXCLK];
534 		break;
535 	case METRICS_CURR_SOCCLK:
536 		*value = metrics->CurrClock[PPCLK_SOCCLK];
537 		break;
538 	case METRICS_CURR_UCLK:
539 		*value = metrics->CurrClock[PPCLK_UCLK];
540 		break;
541 	case METRICS_CURR_VCLK:
542 		*value = metrics->CurrClock[PPCLK_VCLK];
543 		break;
544 	case METRICS_CURR_DCLK:
545 		*value = metrics->CurrClock[PPCLK_DCLK];
546 		break;
547 	case METRICS_CURR_FCLK:
548 		*value = metrics->CurrClock[PPCLK_FCLK];
549 		break;
550 	case METRICS_AVERAGE_GFXCLK:
551 		*value = metrics->AverageGfxclkFrequency;
552 		break;
553 	case METRICS_AVERAGE_SOCCLK:
554 		*value = metrics->AverageSocclkFrequency;
555 		break;
556 	case METRICS_AVERAGE_UCLK:
557 		*value = metrics->AverageUclkFrequency;
558 		break;
559 	case METRICS_AVERAGE_GFXACTIVITY:
560 		*value = metrics->AverageGfxActivity;
561 		break;
562 	case METRICS_AVERAGE_MEMACTIVITY:
563 		*value = metrics->AverageUclkActivity;
564 		break;
565 	case METRICS_AVERAGE_SOCKETPOWER:
566 		*value = metrics->AverageSocketPower << 8;
567 		break;
568 	case METRICS_TEMPERATURE_EDGE:
569 		*value = metrics->TemperatureEdge *
570 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
571 		break;
572 	case METRICS_TEMPERATURE_HOTSPOT:
573 		*value = metrics->TemperatureHotspot *
574 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
575 		break;
576 	case METRICS_TEMPERATURE_MEM:
577 		*value = metrics->TemperatureHBM *
578 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
579 		break;
580 	case METRICS_TEMPERATURE_VRGFX:
581 		*value = metrics->TemperatureVrGfx *
582 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
583 		break;
584 	case METRICS_TEMPERATURE_VRSOC:
585 		*value = metrics->TemperatureVrSoc *
586 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
587 		break;
588 	case METRICS_TEMPERATURE_VRMEM:
589 		*value = metrics->TemperatureVrMem *
590 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
591 		break;
592 	case METRICS_THROTTLER_STATUS:
593 		*value = metrics->ThrottlerStatus;
594 		break;
595 	default:
596 		*value = UINT_MAX;
597 		break;
598 	}
599 
600 	mutex_unlock(&smu->metrics_lock);
601 
602 	return ret;
603 }
604 
605 static int aldebaran_get_current_clk_freq_by_table(struct smu_context *smu,
606 						   enum smu_clk_type clk_type,
607 						   uint32_t *value)
608 {
609 	MetricsMember_t member_type;
610 	int clk_id = 0;
611 
612 	if (!value)
613 		return -EINVAL;
614 
615 	clk_id = smu_cmn_to_asic_specific_index(smu,
616 						CMN2ASIC_MAPPING_CLK,
617 						clk_type);
618 	if (clk_id < 0)
619 		return -EINVAL;
620 
621 	switch (clk_id) {
622 	case PPCLK_GFXCLK:
623 		/*
624 		 * CurrClock[clk_id] can provide accurate
625 		 *   output only when the dpm feature is enabled.
626 		 * We can use Average_* for dpm disabled case.
627 		 *   But this is available for gfxclk/uclk/socclk/vclk/dclk.
628 		 */
629 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
630 			member_type = METRICS_CURR_GFXCLK;
631 		else
632 			member_type = METRICS_AVERAGE_GFXCLK;
633 		break;
634 	case PPCLK_UCLK:
635 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
636 			member_type = METRICS_CURR_UCLK;
637 		else
638 			member_type = METRICS_AVERAGE_UCLK;
639 		break;
640 	case PPCLK_SOCCLK:
641 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
642 			member_type = METRICS_CURR_SOCCLK;
643 		else
644 			member_type = METRICS_AVERAGE_SOCCLK;
645 		break;
646 	case PPCLK_VCLK:
647 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
648 			member_type = METRICS_CURR_VCLK;
649 		else
650 			member_type = METRICS_AVERAGE_VCLK;
651 		break;
652 	case PPCLK_DCLK:
653 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
654 			member_type = METRICS_CURR_DCLK;
655 		else
656 			member_type = METRICS_AVERAGE_DCLK;
657 		break;
658 	case PPCLK_FCLK:
659 		member_type = METRICS_CURR_FCLK;
660 		break;
661 	default:
662 		return -EINVAL;
663 	}
664 
665 	return aldebaran_get_smu_metrics_data(smu,
666 					      member_type,
667 					      value);
668 }
669 
670 static int aldebaran_print_clk_levels(struct smu_context *smu,
671 				      enum smu_clk_type type, char *buf)
672 {
673 	int i, now, size = 0;
674 	int ret = 0;
675 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
676 	struct pp_clock_levels_with_latency clocks;
677 	struct smu_13_0_dpm_table *single_dpm_table;
678 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
679 	struct smu_13_0_dpm_context *dpm_context = NULL;
680 	uint32_t display_levels;
681 	uint32_t freq_values[3] = {0};
682 	uint32_t min_clk, max_clk;
683 
684 	if (amdgpu_ras_intr_triggered())
685 		return snprintf(buf, PAGE_SIZE, "unavailable\n");
686 
687 	dpm_context = smu_dpm->dpm_context;
688 
689 	switch (type) {
690 
691 	case SMU_OD_SCLK:
692 		size = sprintf(buf, "%s:\n", "GFXCLK");
693 		fallthrough;
694 	case SMU_SCLK:
695 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
696 		if (ret) {
697 			dev_err(smu->adev->dev, "Attempt to get current gfx clk Failed!");
698 			return ret;
699 		}
700 
701 		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
702 		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
703 		if (ret) {
704 			dev_err(smu->adev->dev, "Attempt to get gfx clk levels Failed!");
705 			return ret;
706 		}
707 
708 		display_levels = clocks.num_levels;
709 
710 		min_clk = pstate_table->gfxclk_pstate.curr.min;
711 		max_clk = pstate_table->gfxclk_pstate.curr.max;
712 
713 		freq_values[0] = min_clk;
714 		freq_values[1] = max_clk;
715 
716 		/* fine-grained dpm has only 2 levels */
717 		if (now > min_clk && now < max_clk) {
718 			display_levels = clocks.num_levels + 1;
719 			freq_values[2] = max_clk;
720 			freq_values[1] = now;
721 		}
722 
723 		/*
724 		 * For DPM disabled case, there will be only one clock level.
725 		 * And it's safe to assume that is always the current clock.
726 		 */
727 		if (display_levels == clocks.num_levels) {
728 			for (i = 0; i < clocks.num_levels; i++)
729 				size += sprintf(
730 					buf + size, "%d: %uMhz %s\n", i,
731 					freq_values[i],
732 					(clocks.num_levels == 1) ?
733 						"*" :
734 						(aldebaran_freqs_in_same_level(
735 							 freq_values[i], now) ?
736 							 "*" :
737 							 ""));
738 		} else {
739 			for (i = 0; i < display_levels; i++)
740 				size += sprintf(buf + size, "%d: %uMhz %s\n", i,
741 						freq_values[i], i == 1 ? "*" : "");
742 		}
743 
744 		break;
745 
746 	case SMU_OD_MCLK:
747 		size = sprintf(buf, "%s:\n", "MCLK");
748 		fallthrough;
749 	case SMU_MCLK:
750 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
751 		if (ret) {
752 			dev_err(smu->adev->dev, "Attempt to get current mclk Failed!");
753 			return ret;
754 		}
755 
756 		single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
757 		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
758 		if (ret) {
759 			dev_err(smu->adev->dev, "Attempt to get memory clk levels Failed!");
760 			return ret;
761 		}
762 
763 		for (i = 0; i < clocks.num_levels; i++)
764 			size += sprintf(buf + size, "%d: %uMhz %s\n",
765 					i, clocks.data[i].clocks_in_khz / 1000,
766 					(clocks.num_levels == 1) ? "*" :
767 					(aldebaran_freqs_in_same_level(
768 								       clocks.data[i].clocks_in_khz / 1000,
769 								       now) ? "*" : ""));
770 		break;
771 
772 	case SMU_SOCCLK:
773 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &now);
774 		if (ret) {
775 			dev_err(smu->adev->dev, "Attempt to get current socclk Failed!");
776 			return ret;
777 		}
778 
779 		single_dpm_table = &(dpm_context->dpm_tables.soc_table);
780 		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
781 		if (ret) {
782 			dev_err(smu->adev->dev, "Attempt to get socclk levels Failed!");
783 			return ret;
784 		}
785 
786 		for (i = 0; i < clocks.num_levels; i++)
787 			size += sprintf(buf + size, "%d: %uMhz %s\n",
788 					i, clocks.data[i].clocks_in_khz / 1000,
789 					(clocks.num_levels == 1) ? "*" :
790 					(aldebaran_freqs_in_same_level(
791 								       clocks.data[i].clocks_in_khz / 1000,
792 								       now) ? "*" : ""));
793 		break;
794 
795 	case SMU_FCLK:
796 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_FCLK, &now);
797 		if (ret) {
798 			dev_err(smu->adev->dev, "Attempt to get current fclk Failed!");
799 			return ret;
800 		}
801 
802 		single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
803 		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
804 		if (ret) {
805 			dev_err(smu->adev->dev, "Attempt to get fclk levels Failed!");
806 			return ret;
807 		}
808 
809 		for (i = 0; i < single_dpm_table->count; i++)
810 			size += sprintf(buf + size, "%d: %uMhz %s\n",
811 					i, single_dpm_table->dpm_levels[i].value,
812 					(clocks.num_levels == 1) ? "*" :
813 					(aldebaran_freqs_in_same_level(
814 								       clocks.data[i].clocks_in_khz / 1000,
815 								       now) ? "*" : ""));
816 		break;
817 
818 	case SMU_VCLK:
819 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
820 		if (ret) {
821 			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
822 			return ret;
823 		}
824 
825 		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
826 		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
827 		if (ret) {
828 			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
829 			return ret;
830 		}
831 
832 		for (i = 0; i < single_dpm_table->count; i++)
833 			size += sprintf(buf + size, "%d: %uMhz %s\n",
834 					i, single_dpm_table->dpm_levels[i].value,
835 					(clocks.num_levels == 1) ? "*" :
836 					(aldebaran_freqs_in_same_level(
837 								       clocks.data[i].clocks_in_khz / 1000,
838 								       now) ? "*" : ""));
839 		break;
840 
841 	case SMU_DCLK:
842 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
843 		if (ret) {
844 			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
845 			return ret;
846 		}
847 
848 		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
849 		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
850 		if (ret) {
851 			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
852 			return ret;
853 		}
854 
855 		for (i = 0; i < single_dpm_table->count; i++)
856 			size += sprintf(buf + size, "%d: %uMhz %s\n",
857 					i, single_dpm_table->dpm_levels[i].value,
858 					(clocks.num_levels == 1) ? "*" :
859 					(aldebaran_freqs_in_same_level(
860 								       clocks.data[i].clocks_in_khz / 1000,
861 								       now) ? "*" : ""));
862 		break;
863 
864 	default:
865 		break;
866 	}
867 
868 	return size;
869 }
870 
871 static int aldebaran_upload_dpm_level(struct smu_context *smu,
872 				      bool max,
873 				      uint32_t feature_mask,
874 				      uint32_t level)
875 {
876 	struct smu_13_0_dpm_context *dpm_context =
877 		smu->smu_dpm.dpm_context;
878 	uint32_t freq;
879 	int ret = 0;
880 
881 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
882 	    (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT))) {
883 		freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
884 		ret = smu_cmn_send_smc_msg_with_param(smu,
885 						      (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
886 						      (PPCLK_GFXCLK << 16) | (freq & 0xffff),
887 						      NULL);
888 		if (ret) {
889 			dev_err(smu->adev->dev, "Failed to set soft %s gfxclk !\n",
890 				max ? "max" : "min");
891 			return ret;
892 		}
893 	}
894 
895 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
896 	    (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK_BIT))) {
897 		freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level].value;
898 		ret = smu_cmn_send_smc_msg_with_param(smu,
899 						      (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
900 						      (PPCLK_UCLK << 16) | (freq & 0xffff),
901 						      NULL);
902 		if (ret) {
903 			dev_err(smu->adev->dev, "Failed to set soft %s memclk !\n",
904 				max ? "max" : "min");
905 			return ret;
906 		}
907 	}
908 
909 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
910 	    (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT))) {
911 		freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
912 		ret = smu_cmn_send_smc_msg_with_param(smu,
913 						      (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
914 						      (PPCLK_SOCCLK << 16) | (freq & 0xffff),
915 						      NULL);
916 		if (ret) {
917 			dev_err(smu->adev->dev, "Failed to set soft %s socclk !\n",
918 				max ? "max" : "min");
919 			return ret;
920 		}
921 	}
922 
923 	return ret;
924 }
925 
926 static int aldebaran_force_clk_levels(struct smu_context *smu,
927 				      enum smu_clk_type type, uint32_t mask)
928 {
929 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
930 	struct smu_13_0_dpm_table *single_dpm_table = NULL;
931 	uint32_t soft_min_level, soft_max_level;
932 	int ret = 0;
933 
934 	soft_min_level = mask ? (ffs(mask) - 1) : 0;
935 	soft_max_level = mask ? (fls(mask) - 1) : 0;
936 
937 	switch (type) {
938 	case SMU_SCLK:
939 		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
940 		if (soft_max_level >= single_dpm_table->count) {
941 			dev_err(smu->adev->dev, "Clock level specified %d is over max allowed %d\n",
942 				soft_max_level, single_dpm_table->count - 1);
943 			ret = -EINVAL;
944 			break;
945 		}
946 
947 		ret = aldebaran_upload_dpm_level(smu,
948 						 false,
949 						 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT),
950 						 soft_min_level);
951 		if (ret) {
952 			dev_err(smu->adev->dev, "Failed to upload boot level to lowest!\n");
953 			break;
954 		}
955 
956 		ret = aldebaran_upload_dpm_level(smu,
957 						 true,
958 						 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT),
959 						 soft_max_level);
960 		if (ret)
961 			dev_err(smu->adev->dev, "Failed to upload dpm max level to highest!\n");
962 
963 		break;
964 
965 	case SMU_MCLK:
966 	case SMU_SOCCLK:
967 	case SMU_FCLK:
968 		/*
969 		 * Should not arrive here since aldebaran does not
970 		 * support mclk/socclk/fclk softmin/softmax settings
971 		 */
972 		ret = -EINVAL;
973 		break;
974 
975 	default:
976 		break;
977 	}
978 
979 	return ret;
980 }
981 
982 static int aldebaran_get_thermal_temperature_range(struct smu_context *smu,
983 						   struct smu_temperature_range *range)
984 {
985 	struct smu_table_context *table_context = &smu->smu_table;
986 	struct smu_13_0_powerplay_table *powerplay_table =
987 		table_context->power_play_table;
988 	PPTable_t *pptable = smu->smu_table.driver_pptable;
989 
990 	if (!range)
991 		return -EINVAL;
992 
993 	memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range));
994 
995 	range->hotspot_crit_max = pptable->ThotspotLimit *
996 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
997 	range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
998 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
999 	range->mem_crit_max = pptable->TmemLimit *
1000 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1001 	range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM)*
1002 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1003 	range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
1004 
1005 	return 0;
1006 }
1007 
1008 static int aldebaran_get_current_activity_percent(struct smu_context *smu,
1009 						  enum amd_pp_sensors sensor,
1010 						  uint32_t *value)
1011 {
1012 	int ret = 0;
1013 
1014 	if (!value)
1015 		return -EINVAL;
1016 
1017 	switch (sensor) {
1018 	case AMDGPU_PP_SENSOR_GPU_LOAD:
1019 		ret = aldebaran_get_smu_metrics_data(smu,
1020 						     METRICS_AVERAGE_GFXACTIVITY,
1021 						     value);
1022 		break;
1023 	case AMDGPU_PP_SENSOR_MEM_LOAD:
1024 		ret = aldebaran_get_smu_metrics_data(smu,
1025 						     METRICS_AVERAGE_MEMACTIVITY,
1026 						     value);
1027 		break;
1028 	default:
1029 		dev_err(smu->adev->dev, "Invalid sensor for retrieving clock activity\n");
1030 		return -EINVAL;
1031 	}
1032 
1033 	return ret;
1034 }
1035 
1036 static int aldebaran_get_gpu_power(struct smu_context *smu, uint32_t *value)
1037 {
1038 	if (!value)
1039 		return -EINVAL;
1040 
1041 	return aldebaran_get_smu_metrics_data(smu,
1042 					      METRICS_AVERAGE_SOCKETPOWER,
1043 					      value);
1044 }
1045 
1046 static int aldebaran_thermal_get_temperature(struct smu_context *smu,
1047 					     enum amd_pp_sensors sensor,
1048 					     uint32_t *value)
1049 {
1050 	int ret = 0;
1051 
1052 	if (!value)
1053 		return -EINVAL;
1054 
1055 	switch (sensor) {
1056 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1057 		ret = aldebaran_get_smu_metrics_data(smu,
1058 						     METRICS_TEMPERATURE_HOTSPOT,
1059 						     value);
1060 		break;
1061 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
1062 		ret = aldebaran_get_smu_metrics_data(smu,
1063 						     METRICS_TEMPERATURE_EDGE,
1064 						     value);
1065 		break;
1066 	case AMDGPU_PP_SENSOR_MEM_TEMP:
1067 		ret = aldebaran_get_smu_metrics_data(smu,
1068 						     METRICS_TEMPERATURE_MEM,
1069 						     value);
1070 		break;
1071 	default:
1072 		dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
1073 		return -EINVAL;
1074 	}
1075 
1076 	return ret;
1077 }
1078 
1079 static int aldebaran_read_sensor(struct smu_context *smu,
1080 				 enum amd_pp_sensors sensor,
1081 				 void *data, uint32_t *size)
1082 {
1083 	int ret = 0;
1084 
1085 	if (amdgpu_ras_intr_triggered())
1086 		return 0;
1087 
1088 	if (!data || !size)
1089 		return -EINVAL;
1090 
1091 	mutex_lock(&smu->sensor_lock);
1092 	switch (sensor) {
1093 	case AMDGPU_PP_SENSOR_MEM_LOAD:
1094 	case AMDGPU_PP_SENSOR_GPU_LOAD:
1095 		ret = aldebaran_get_current_activity_percent(smu,
1096 							     sensor,
1097 							     (uint32_t *)data);
1098 		*size = 4;
1099 		break;
1100 	case AMDGPU_PP_SENSOR_GPU_POWER:
1101 		ret = aldebaran_get_gpu_power(smu, (uint32_t *)data);
1102 		*size = 4;
1103 		break;
1104 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1105 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
1106 	case AMDGPU_PP_SENSOR_MEM_TEMP:
1107 		ret = aldebaran_thermal_get_temperature(smu, sensor,
1108 							(uint32_t *)data);
1109 		*size = 4;
1110 		break;
1111 	case AMDGPU_PP_SENSOR_GFX_MCLK:
1112 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
1113 		/* the output clock frequency in 10K unit */
1114 		*(uint32_t *)data *= 100;
1115 		*size = 4;
1116 		break;
1117 	case AMDGPU_PP_SENSOR_GFX_SCLK:
1118 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
1119 		*(uint32_t *)data *= 100;
1120 		*size = 4;
1121 		break;
1122 	case AMDGPU_PP_SENSOR_VDDGFX:
1123 		ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
1124 		*size = 4;
1125 		break;
1126 	default:
1127 		ret = -EOPNOTSUPP;
1128 		break;
1129 	}
1130 	mutex_unlock(&smu->sensor_lock);
1131 
1132 	return ret;
1133 }
1134 
1135 static int aldebaran_get_power_limit(struct smu_context *smu)
1136 {
1137 	PPTable_t *pptable = smu->smu_table.driver_pptable;
1138 	uint32_t power_limit = 0;
1139 	int ret;
1140 
1141 	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
1142 		return -EINVAL;
1143 
1144 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
1145 
1146 	if (ret) {
1147 		/* the last hope to figure out the ppt limit */
1148 		if (!pptable) {
1149 			dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!");
1150 			return -EINVAL;
1151 		}
1152 		power_limit = pptable->PptLimit;
1153 	}
1154 
1155 	smu->current_power_limit = smu->default_power_limit = power_limit;
1156 	if (pptable)
1157 		smu->max_power_limit = pptable->PptLimit;
1158 
1159 	return 0;
1160 }
1161 
1162 static int aldebaran_system_features_control(struct  smu_context *smu, bool enable)
1163 {
1164 	int ret;
1165 
1166 	ret = smu_v13_0_system_features_control(smu, enable);
1167 	if (!ret && enable)
1168 		ret = aldebaran_run_btc(smu);
1169 
1170 	return ret;
1171 }
1172 
1173 static int aldebaran_set_performance_level(struct smu_context *smu,
1174 					   enum amd_dpm_forced_level level)
1175 {
1176 	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1177 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1178 	struct smu_13_0_dpm_table *gfx_table =
1179 		&dpm_context->dpm_tables.gfx_table;
1180 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1181 
1182 	/* Disable determinism if switching to another mode */
1183 	if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
1184 	    (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
1185 		smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
1186 		pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
1187 	}
1188 
1189 	switch (level) {
1190 
1191 	case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
1192 		return 0;
1193 
1194 	case AMD_DPM_FORCED_LEVEL_HIGH:
1195 	case AMD_DPM_FORCED_LEVEL_LOW:
1196 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1197 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1198 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1199 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1200 	default:
1201 		break;
1202 	}
1203 
1204 	return smu_v13_0_set_performance_level(smu, level);
1205 }
1206 
1207 static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
1208 					  enum smu_clk_type clk_type,
1209 					  uint32_t min,
1210 					  uint32_t max)
1211 {
1212 	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1213 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1214 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1215 	struct amdgpu_device *adev = smu->adev;
1216 	uint32_t min_clk;
1217 	uint32_t max_clk;
1218 	int ret = 0;
1219 
1220 	if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
1221 		return -EINVAL;
1222 
1223 	if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1224 			&& (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1225 		return -EINVAL;
1226 
1227 	if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
1228 		if (min >= max) {
1229 			dev_err(smu->adev->dev,
1230 				"Minimum GFX clk should be less than the maximum allowed clock\n");
1231 			return -EINVAL;
1232 		}
1233 
1234 		if ((min == pstate_table->gfxclk_pstate.curr.min) &&
1235 		    (max == pstate_table->gfxclk_pstate.curr.max))
1236 			return 0;
1237 
1238 		ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK,
1239 							    min, max);
1240 		if (!ret) {
1241 			pstate_table->gfxclk_pstate.curr.min = min;
1242 			pstate_table->gfxclk_pstate.curr.max = max;
1243 		}
1244 
1245 		return ret;
1246 	}
1247 
1248 	if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1249 		if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
1250 			(max > dpm_context->dpm_tables.gfx_table.max)) {
1251 			dev_warn(adev->dev,
1252 					"Invalid max frequency %d MHz specified for determinism\n", max);
1253 			return -EINVAL;
1254 		}
1255 
1256 		/* Restore default min/max clocks and enable determinism */
1257 		min_clk = dpm_context->dpm_tables.gfx_table.min;
1258 		max_clk = dpm_context->dpm_tables.gfx_table.max;
1259 		ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
1260 		if (!ret) {
1261 			usleep_range(500, 1000);
1262 			ret = smu_cmn_send_smc_msg_with_param(smu,
1263 					SMU_MSG_EnableDeterminism,
1264 					max, NULL);
1265 			if (ret) {
1266 				dev_err(adev->dev,
1267 						"Failed to enable determinism at GFX clock %d MHz\n", max);
1268 			} else {
1269 				pstate_table->gfxclk_pstate.curr.min = min_clk;
1270 				pstate_table->gfxclk_pstate.curr.max = max;
1271 			}
1272 		}
1273 	}
1274 
1275 	return ret;
1276 }
1277 
1278 static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
1279 							long input[], uint32_t size)
1280 {
1281 	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1282 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1283 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1284 	uint32_t min_clk;
1285 	uint32_t max_clk;
1286 	int ret = 0;
1287 
1288 	/* Only allowed in manual or determinism mode */
1289 	if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1290 			&& (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1291 		return -EINVAL;
1292 
1293 	switch (type) {
1294 	case PP_OD_EDIT_SCLK_VDDC_TABLE:
1295 		if (size != 2) {
1296 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
1297 			return -EINVAL;
1298 		}
1299 
1300 		if (input[0] == 0) {
1301 			if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
1302 				dev_warn(smu->adev->dev, "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
1303 					input[1], dpm_context->dpm_tables.gfx_table.min);
1304 				pstate_table->gfxclk_pstate.custom.min =
1305 					pstate_table->gfxclk_pstate.curr.min;
1306 				return -EINVAL;
1307 			}
1308 
1309 			pstate_table->gfxclk_pstate.custom.min = input[1];
1310 		} else if (input[0] == 1) {
1311 			if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
1312 				dev_warn(smu->adev->dev, "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
1313 					input[1], dpm_context->dpm_tables.gfx_table.max);
1314 				pstate_table->gfxclk_pstate.custom.max =
1315 					pstate_table->gfxclk_pstate.curr.max;
1316 				return -EINVAL;
1317 			}
1318 
1319 			pstate_table->gfxclk_pstate.custom.max = input[1];
1320 		} else {
1321 			return -EINVAL;
1322 		}
1323 		break;
1324 	case PP_OD_RESTORE_DEFAULT_TABLE:
1325 		if (size != 0) {
1326 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
1327 			return -EINVAL;
1328 		} else {
1329 			/* Use the default frequencies for manual and determinism mode */
1330 			min_clk = dpm_context->dpm_tables.gfx_table.min;
1331 			max_clk = dpm_context->dpm_tables.gfx_table.max;
1332 
1333 			return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
1334 		}
1335 		break;
1336 	case PP_OD_COMMIT_DPM_TABLE:
1337 		if (size != 0) {
1338 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
1339 			return -EINVAL;
1340 		} else {
1341 			if (!pstate_table->gfxclk_pstate.custom.min)
1342 				pstate_table->gfxclk_pstate.custom.min =
1343 					pstate_table->gfxclk_pstate.curr.min;
1344 
1345 			if (!pstate_table->gfxclk_pstate.custom.max)
1346 				pstate_table->gfxclk_pstate.custom.max =
1347 					pstate_table->gfxclk_pstate.curr.max;
1348 
1349 			min_clk = pstate_table->gfxclk_pstate.custom.min;
1350 			max_clk = pstate_table->gfxclk_pstate.custom.max;
1351 
1352 			return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
1353 		}
1354 		break;
1355 	default:
1356 		return -ENOSYS;
1357 	}
1358 
1359 	return ret;
1360 }
1361 
1362 static bool aldebaran_is_dpm_running(struct smu_context *smu)
1363 {
1364 	int ret;
1365 	uint32_t feature_mask[2];
1366 	unsigned long feature_enabled;
1367 
1368 	ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
1369 	if (ret)
1370 		return false;
1371 	feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
1372 					  ((uint64_t)feature_mask[1] << 32));
1373 	return !!(feature_enabled & SMC_DPM_FEATURE);
1374 }
1375 
1376 static void aldebaran_fill_i2c_req(SwI2cRequest_t  *req, bool write,
1377 				  uint8_t address, uint32_t numbytes,
1378 				  uint8_t *data)
1379 {
1380 	int i;
1381 
1382 	req->I2CcontrollerPort = 0;
1383 	req->I2CSpeed = 2;
1384 	req->SlaveAddress = address;
1385 	req->NumCmds = numbytes;
1386 
1387 	for (i = 0; i < numbytes; i++) {
1388 		SwI2cCmd_t *cmd =  &req->SwI2cCmds[i];
1389 
1390 		/* First 2 bytes are always write for lower 2b EEPROM address */
1391 		if (i < 2)
1392 			cmd->CmdConfig = CMDCONFIG_READWRITE_MASK;
1393 		else
1394 			cmd->CmdConfig = write ? CMDCONFIG_READWRITE_MASK : 0;
1395 
1396 
1397 		/* Add RESTART for read  after address filled */
1398 		cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
1399 
1400 		/* Add STOP in the end */
1401 		cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
1402 
1403 		/* Fill with data regardless if read or write to simplify code */
1404 		cmd->ReadWriteData = data[i];
1405 	}
1406 }
1407 
1408 static int aldebaran_i2c_read_data(struct i2c_adapter *control,
1409 					       uint8_t address,
1410 					       uint8_t *data,
1411 					       uint32_t numbytes)
1412 {
1413 	uint32_t  i, ret = 0;
1414 	SwI2cRequest_t req;
1415 	struct amdgpu_device *adev = to_amdgpu_device(control);
1416 	struct smu_table_context *smu_table = &adev->smu.smu_table;
1417 	struct smu_table *table = &smu_table->driver_table;
1418 
1419 	if (numbytes > MAX_SW_I2C_COMMANDS) {
1420 		dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
1421 			numbytes, MAX_SW_I2C_COMMANDS);
1422 		return -EINVAL;
1423 	}
1424 
1425 	memset(&req, 0, sizeof(req));
1426 	aldebaran_fill_i2c_req(&req, false, address, numbytes, data);
1427 
1428 	mutex_lock(&adev->smu.mutex);
1429 	/* Now read data starting with that address */
1430 	ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
1431 					true);
1432 	mutex_unlock(&adev->smu.mutex);
1433 
1434 	if (!ret) {
1435 		SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
1436 
1437 		/* Assume SMU  fills res.SwI2cCmds[i].Data with read bytes */
1438 		for (i = 0; i < numbytes; i++)
1439 			data[i] = res->SwI2cCmds[i].ReadWriteData;
1440 
1441 		dev_dbg(adev->dev, "aldebaran_i2c_read_data, address = %x, bytes = %d, data :",
1442 				  (uint16_t)address, numbytes);
1443 
1444 		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
1445 			       8, 1, data, numbytes, false);
1446 	} else
1447 		dev_err(adev->dev, "aldebaran_i2c_read_data - error occurred :%x", ret);
1448 
1449 	return ret;
1450 }
1451 
1452 static int aldebaran_i2c_write_data(struct i2c_adapter *control,
1453 						uint8_t address,
1454 						uint8_t *data,
1455 						uint32_t numbytes)
1456 {
1457 	uint32_t ret;
1458 	SwI2cRequest_t req;
1459 	struct amdgpu_device *adev = to_amdgpu_device(control);
1460 
1461 	if (numbytes > MAX_SW_I2C_COMMANDS) {
1462 		dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
1463 			numbytes, MAX_SW_I2C_COMMANDS);
1464 		return -EINVAL;
1465 	}
1466 
1467 	memset(&req, 0, sizeof(req));
1468 	aldebaran_fill_i2c_req(&req, true, address, numbytes, data);
1469 
1470 	mutex_lock(&adev->smu.mutex);
1471 	ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
1472 	mutex_unlock(&adev->smu.mutex);
1473 
1474 	if (!ret) {
1475 		dev_dbg(adev->dev, "aldebaran_i2c_write(), address = %x, bytes = %d , data: ",
1476 					 (uint16_t)address, numbytes);
1477 
1478 		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
1479 			       8, 1, data, numbytes, false);
1480 		/*
1481 		 * According to EEPROM spec there is a MAX of 10 ms required for
1482 		 * EEPROM to flush internal RX buffer after STOP was issued at the
1483 		 * end of write transaction. During this time the EEPROM will not be
1484 		 * responsive to any more commands - so wait a bit more.
1485 		 */
1486 		msleep(10);
1487 
1488 	} else
1489 		dev_err(adev->dev, "aldebaran_i2c_write- error occurred :%x", ret);
1490 
1491 	return ret;
1492 }
1493 
1494 static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
1495 			      struct i2c_msg *msgs, int num)
1496 {
1497 	uint32_t  i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
1498 	uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
1499 
1500 	for (i = 0; i < num; i++) {
1501 		/*
1502 		 * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
1503 		 * once and hence the data needs to be spliced into chunks and sent each
1504 		 * chunk separately
1505 		 */
1506 		data_size = msgs[i].len - 2;
1507 		data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
1508 		next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
1509 		data_ptr = msgs[i].buf + 2;
1510 
1511 		for (j = 0; j < data_size / data_chunk_size; j++) {
1512 			/* Insert the EEPROM dest addess, bits 0-15 */
1513 			data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
1514 			data_chunk[1] = (next_eeprom_addr & 0xff);
1515 
1516 			if (msgs[i].flags & I2C_M_RD) {
1517 				ret = aldebaran_i2c_read_data(i2c_adap,
1518 							     (uint8_t)msgs[i].addr,
1519 							     data_chunk, MAX_SW_I2C_COMMANDS);
1520 
1521 				memcpy(data_ptr, data_chunk + 2, data_chunk_size);
1522 			} else {
1523 
1524 				memcpy(data_chunk + 2, data_ptr, data_chunk_size);
1525 
1526 				ret = aldebaran_i2c_write_data(i2c_adap,
1527 							      (uint8_t)msgs[i].addr,
1528 							      data_chunk, MAX_SW_I2C_COMMANDS);
1529 			}
1530 
1531 			if (ret) {
1532 				num = -EIO;
1533 				goto fail;
1534 			}
1535 
1536 			next_eeprom_addr += data_chunk_size;
1537 			data_ptr += data_chunk_size;
1538 		}
1539 
1540 		if (data_size % data_chunk_size) {
1541 			data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
1542 			data_chunk[1] = (next_eeprom_addr & 0xff);
1543 
1544 			if (msgs[i].flags & I2C_M_RD) {
1545 				ret = aldebaran_i2c_read_data(i2c_adap,
1546 							     (uint8_t)msgs[i].addr,
1547 							     data_chunk, (data_size % data_chunk_size) + 2);
1548 
1549 				memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
1550 			} else {
1551 				memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
1552 
1553 				ret = aldebaran_i2c_write_data(i2c_adap,
1554 							      (uint8_t)msgs[i].addr,
1555 							      data_chunk, (data_size % data_chunk_size) + 2);
1556 			}
1557 
1558 			if (ret) {
1559 				num = -EIO;
1560 				goto fail;
1561 			}
1562 		}
1563 	}
1564 
1565 fail:
1566 	return num;
1567 }
1568 
1569 static u32 aldebaran_i2c_func(struct i2c_adapter *adap)
1570 {
1571 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1572 }
1573 
1574 
1575 static const struct i2c_algorithm aldebaran_i2c_algo = {
1576 	.master_xfer = aldebaran_i2c_xfer,
1577 	.functionality = aldebaran_i2c_func,
1578 };
1579 
1580 static int aldebaran_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
1581 {
1582 	struct amdgpu_device *adev = to_amdgpu_device(control);
1583 	int res;
1584 
1585 	control->owner = THIS_MODULE;
1586 	control->class = I2C_CLASS_SPD;
1587 	control->dev.parent = &adev->pdev->dev;
1588 	control->algo = &aldebaran_i2c_algo;
1589 	snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
1590 
1591 	res = i2c_add_adapter(control);
1592 	if (res)
1593 		DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
1594 
1595 	return res;
1596 }
1597 
1598 static void aldebaran_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
1599 {
1600 	i2c_del_adapter(control);
1601 }
1602 
1603 static void aldebaran_get_unique_id(struct smu_context *smu)
1604 {
1605 	struct amdgpu_device *adev = smu->adev;
1606 	SmuMetrics_t *metrics = smu->smu_table.metrics_table;
1607 	uint32_t upper32 = 0, lower32 = 0;
1608 	int ret;
1609 
1610 	mutex_lock(&smu->metrics_lock);
1611 	ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
1612 	if (ret)
1613 		goto out_unlock;
1614 
1615 	upper32 = metrics->PublicSerialNumUpper32;
1616 	lower32 = metrics->PublicSerialNumLower32;
1617 
1618 out_unlock:
1619 	mutex_unlock(&smu->metrics_lock);
1620 
1621 	adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
1622 	sprintf(adev->serial, "%016llx", adev->unique_id);
1623 }
1624 
1625 static bool aldebaran_is_baco_supported(struct smu_context *smu)
1626 {
1627 	/* aldebaran is not support baco */
1628 
1629 	return false;
1630 }
1631 
1632 static int aldebaran_set_df_cstate(struct smu_context *smu,
1633 				   enum pp_df_cstate state)
1634 {
1635 	return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
1636 }
1637 
1638 static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
1639 {
1640 	return smu_cmn_send_smc_msg_with_param(smu,
1641 					       SMU_MSG_GmiPwrDnControl,
1642 					       en ? 1 : 0,
1643 					       NULL);
1644 }
1645 
1646 static const struct throttling_logging_label {
1647 	uint32_t feature_mask;
1648 	const char *label;
1649 } logging_label[] = {
1650 	{(1U << THROTTLER_TEMP_MEM_BIT), "HBM"},
1651 	{(1U << THROTTLER_TEMP_VR_GFX_BIT), "VR of GFX rail"},
1652 	{(1U << THROTTLER_TEMP_VR_MEM_BIT), "VR of HBM rail"},
1653 	{(1U << THROTTLER_TEMP_VR_SOC_BIT), "VR of SOC rail"},
1654 };
1655 static void aldebaran_log_thermal_throttling_event(struct smu_context *smu)
1656 {
1657 	int ret;
1658 	int throttler_idx, throtting_events = 0, buf_idx = 0;
1659 	struct amdgpu_device *adev = smu->adev;
1660 	uint32_t throttler_status;
1661 	char log_buf[256];
1662 
1663 	ret = aldebaran_get_smu_metrics_data(smu,
1664 					     METRICS_THROTTLER_STATUS,
1665 					     &throttler_status);
1666 	if (ret)
1667 		return;
1668 
1669 	memset(log_buf, 0, sizeof(log_buf));
1670 	for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
1671 	     throttler_idx++) {
1672 		if (throttler_status & logging_label[throttler_idx].feature_mask) {
1673 			throtting_events++;
1674 			buf_idx += snprintf(log_buf + buf_idx,
1675 					    sizeof(log_buf) - buf_idx,
1676 					    "%s%s",
1677 					    throtting_events > 1 ? " and " : "",
1678 					    logging_label[throttler_idx].label);
1679 			if (buf_idx >= sizeof(log_buf)) {
1680 				dev_err(adev->dev, "buffer overflow!\n");
1681 				log_buf[sizeof(log_buf) - 1] = '\0';
1682 				break;
1683 			}
1684 		}
1685 	}
1686 
1687 	dev_warn(adev->dev, "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n",
1688 		 log_buf);
1689 	kgd2kfd_smi_event_throttle(smu->adev->kfd.dev, throttler_status);
1690 }
1691 
1692 static int aldebaran_get_current_pcie_link_speed(struct smu_context *smu)
1693 {
1694 	struct amdgpu_device *adev = smu->adev;
1695 	uint32_t esm_ctrl;
1696 
1697 	/* TODO: confirm this on real target */
1698 	esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
1699 	if ((esm_ctrl >> 15) & 0x1FFFF)
1700 		return (((esm_ctrl >> 8) & 0x3F) + 128);
1701 
1702 	return smu_v13_0_get_current_pcie_link_speed(smu);
1703 }
1704 
1705 static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
1706 					 void **table)
1707 {
1708 	struct smu_table_context *smu_table = &smu->smu_table;
1709 	struct gpu_metrics_v1_2 *gpu_metrics =
1710 		(struct gpu_metrics_v1_2 *)smu_table->gpu_metrics_table;
1711 	SmuMetrics_t metrics;
1712 	int i, ret = 0;
1713 
1714 	ret = smu_cmn_get_metrics_table(smu,
1715 					&metrics,
1716 					true);
1717 	if (ret)
1718 		return ret;
1719 
1720 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 2);
1721 
1722 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
1723 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
1724 	gpu_metrics->temperature_mem = metrics.TemperatureHBM;
1725 	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
1726 	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
1727 	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem;
1728 
1729 	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
1730 	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
1731 	gpu_metrics->average_mm_activity = 0;
1732 
1733 	gpu_metrics->average_socket_power = metrics.AverageSocketPower;
1734 	gpu_metrics->energy_accumulator =
1735 			(uint64_t)metrics.EnergyAcc64bitHigh << 32 |
1736 			metrics.EnergyAcc64bitLow;
1737 
1738 	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
1739 	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
1740 	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
1741 	gpu_metrics->average_vclk0_frequency = 0;
1742 	gpu_metrics->average_dclk0_frequency = 0;
1743 
1744 	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
1745 	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
1746 	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
1747 	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
1748 	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
1749 
1750 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
1751 
1752 	gpu_metrics->current_fan_speed = 0;
1753 
1754 	gpu_metrics->pcie_link_width =
1755 		smu_v13_0_get_current_pcie_link_width(smu);
1756 	gpu_metrics->pcie_link_speed =
1757 		aldebaran_get_current_pcie_link_speed(smu);
1758 
1759 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1760 
1761 	gpu_metrics->gfx_activity_acc = metrics.GfxBusyAcc;
1762 	gpu_metrics->mem_activity_acc = metrics.DramBusyAcc;
1763 
1764 	for (i = 0; i < NUM_HBM_INSTANCES; i++)
1765 		gpu_metrics->temperature_hbm[i] = metrics.TemperatureAllHBM[i];
1766 
1767 	gpu_metrics->firmware_timestamp = ((uint64_t)metrics.TimeStampHigh << 32) |
1768 					metrics.TimeStampLow;
1769 
1770 	*table = (void *)gpu_metrics;
1771 
1772 	return sizeof(struct gpu_metrics_v1_2);
1773 }
1774 
1775 static int aldebaran_mode2_reset(struct smu_context *smu)
1776 {
1777 	u32 smu_version;
1778 	int ret = 0, index;
1779 	struct amdgpu_device *adev = smu->adev;
1780 	int timeout = 10;
1781 
1782 	smu_cmn_get_smc_version(smu, NULL, &smu_version);
1783 
1784 	index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
1785 						SMU_MSG_GfxDeviceDriverReset);
1786 
1787 	mutex_lock(&smu->message_lock);
1788 	if (smu_version >= 0x00441400) {
1789 		ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
1790 		/* This is similar to FLR, wait till max FLR timeout */
1791 		msleep(100);
1792 		dev_dbg(smu->adev->dev, "restore config space...\n");
1793 		/* Restore the config space saved during init */
1794 		amdgpu_device_load_pci_state(adev->pdev);
1795 
1796 		dev_dbg(smu->adev->dev, "wait for reset ack\n");
1797 		while (ret == -ETIME && timeout)  {
1798 			ret = smu_cmn_wait_for_response(smu);
1799 			/* Wait a bit more time for getting ACK */
1800 			if (ret == -ETIME) {
1801 				--timeout;
1802 				usleep_range(500, 1000);
1803 				continue;
1804 			}
1805 
1806 			if (ret != 1) {
1807 				dev_err(adev->dev, "failed to send mode2 message \tparam: 0x%08x response %#x\n",
1808 						SMU_RESET_MODE_2, ret);
1809 				goto out;
1810 			}
1811 		}
1812 
1813 	} else {
1814 		dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n",
1815 				smu_version);
1816 	}
1817 
1818 	if (ret == 1)
1819 		ret = 0;
1820 out:
1821 	mutex_unlock(&smu->message_lock);
1822 
1823 	return ret;
1824 }
1825 
1826 static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
1827 {
1828 #if 0
1829 	struct amdgpu_device *adev = smu->adev;
1830 	u32 smu_version;
1831 	uint32_t val;
1832 	/**
1833 	 * PM FW version support mode1 reset from 68.07
1834 	 */
1835 	smu_cmn_get_smc_version(smu, NULL, &smu_version);
1836 	if ((smu_version < 0x00440700))
1837 		return false;
1838 	/**
1839 	 * mode1 reset relies on PSP, so we should check if
1840 	 * PSP is alive.
1841 	 */
1842 	val = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
1843 
1844 	return val != 0x0;
1845 #endif
1846 	return true;
1847 }
1848 
1849 static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
1850 {
1851 	return true;
1852 }
1853 
1854 static int aldebaran_set_mp1_state(struct smu_context *smu,
1855 				   enum pp_mp1_state mp1_state)
1856 {
1857 	switch (mp1_state) {
1858 	case PP_MP1_STATE_UNLOAD:
1859 		return smu_cmn_set_mp1_state(smu, mp1_state);
1860 	default:
1861 		return 0;
1862 	}
1863 }
1864 
1865 static const struct pptable_funcs aldebaran_ppt_funcs = {
1866 	/* init dpm */
1867 	.get_allowed_feature_mask = aldebaran_get_allowed_feature_mask,
1868 	/* dpm/clk tables */
1869 	.set_default_dpm_table = aldebaran_set_default_dpm_table,
1870 	.populate_umd_state_clk = aldebaran_populate_umd_state_clk,
1871 	.get_thermal_temperature_range = aldebaran_get_thermal_temperature_range,
1872 	.print_clk_levels = aldebaran_print_clk_levels,
1873 	.force_clk_levels = aldebaran_force_clk_levels,
1874 	.read_sensor = aldebaran_read_sensor,
1875 	.set_performance_level = aldebaran_set_performance_level,
1876 	.get_power_limit = aldebaran_get_power_limit,
1877 	.is_dpm_running = aldebaran_is_dpm_running,
1878 	.get_unique_id = aldebaran_get_unique_id,
1879 	.init_microcode = smu_v13_0_init_microcode,
1880 	.load_microcode = smu_v13_0_load_microcode,
1881 	.fini_microcode = smu_v13_0_fini_microcode,
1882 	.init_smc_tables = aldebaran_init_smc_tables,
1883 	.fini_smc_tables = smu_v13_0_fini_smc_tables,
1884 	.init_power = smu_v13_0_init_power,
1885 	.fini_power = smu_v13_0_fini_power,
1886 	.check_fw_status = smu_v13_0_check_fw_status,
1887 	/* pptable related */
1888 	.setup_pptable = aldebaran_setup_pptable,
1889 	.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
1890 	.check_fw_version = smu_v13_0_check_fw_version,
1891 	.write_pptable = smu_cmn_write_pptable,
1892 	.set_driver_table_location = smu_v13_0_set_driver_table_location,
1893 	.set_tool_table_location = smu_v13_0_set_tool_table_location,
1894 	.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
1895 	.system_features_control = aldebaran_system_features_control,
1896 	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
1897 	.send_smc_msg = smu_cmn_send_smc_msg,
1898 	.get_enabled_mask = smu_cmn_get_enabled_mask,
1899 	.feature_is_enabled = smu_cmn_feature_is_enabled,
1900 	.disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
1901 	.set_power_limit = smu_v13_0_set_power_limit,
1902 	.init_max_sustainable_clocks = smu_v13_0_init_max_sustainable_clocks,
1903 	.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
1904 	.disable_thermal_alert = smu_v13_0_disable_thermal_alert,
1905 	.set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
1906 	.register_irq_handler = smu_v13_0_register_irq_handler,
1907 	.set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme,
1908 	.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
1909 	.baco_is_support= aldebaran_is_baco_supported,
1910 	.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
1911 	.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
1912 	.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
1913 	.set_df_cstate = aldebaran_set_df_cstate,
1914 	.allow_xgmi_power_down = aldebaran_allow_xgmi_power_down,
1915 	.log_thermal_throttling_event = aldebaran_log_thermal_throttling_event,
1916 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
1917 	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
1918 	.get_gpu_metrics = aldebaran_get_gpu_metrics,
1919 	.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
1920 	.mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
1921 	.mode1_reset = smu_v13_0_mode1_reset,
1922 	.set_mp1_state = aldebaran_set_mp1_state,
1923 	.mode2_reset = aldebaran_mode2_reset,
1924 	.wait_for_event = smu_v13_0_wait_for_event,
1925 	.i2c_init = aldebaran_i2c_control_init,
1926 	.i2c_fini = aldebaran_i2c_control_fini,
1927 };
1928 
1929 void aldebaran_set_ppt_funcs(struct smu_context *smu)
1930 {
1931 	smu->ppt_funcs = &aldebaran_ppt_funcs;
1932 	smu->message_map = aldebaran_message_map;
1933 	smu->clock_map = aldebaran_clk_map;
1934 	smu->feature_map = aldebaran_feature_mask_map;
1935 	smu->table_map = aldebaran_table_map;
1936 }
1937