1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #define SWSMU_CODE_LAYER_L3
24 
25 #include <linux/firmware.h>
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "atomfirmware.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "amdgpu_atombios.h"
31 #include "smu_v12_0.h"
32 #include "soc15_common.h"
33 #include "atom.h"
34 #include "smu_cmn.h"
35 
36 #include "asic_reg/mp/mp_12_0_0_offset.h"
37 #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
38 #include "asic_reg/smuio/smuio_12_0_0_offset.h"
39 #include "asic_reg/smuio/smuio_12_0_0_sh_mask.h"
40 
41 /*
42  * DO NOT use these for err/warn/info/debug messages.
43  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
44  * They are more MGPU friendly.
45  */
46 #undef pr_err
47 #undef pr_warn
48 #undef pr_info
49 #undef pr_debug
50 
51 // because some SMU12 based ASICs use older ip offset tables
52 // we should undefine this register from the smuio12 header
53 // to prevent confusion down the road
54 #undef mmPWR_MISC_CNTL_STATUS
55 
56 #define smnMP1_FIRMWARE_FLAGS                                0x3010024
57 
58 int smu_v12_0_check_fw_status(struct smu_context *smu)
59 {
60 	struct amdgpu_device *adev = smu->adev;
61 	uint32_t mp1_fw_flags;
62 
63 	mp1_fw_flags = RREG32_PCIE(MP1_Public |
64 		(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
65 
66 	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
67 		MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
68 		return 0;
69 
70 	return -EIO;
71 }
72 
73 int smu_v12_0_check_fw_version(struct smu_context *smu)
74 {
75 	struct amdgpu_device *adev = smu->adev;
76 	uint32_t if_version = 0xff, smu_version = 0xff;
77 	uint16_t smu_major;
78 	uint8_t smu_minor, smu_debug;
79 	int ret = 0;
80 
81 	ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
82 	if (ret)
83 		return ret;
84 
85 	smu_major = (smu_version >> 16) & 0xffff;
86 	smu_minor = (smu_version >> 8) & 0xff;
87 	smu_debug = (smu_version >> 0) & 0xff;
88 	if (smu->is_apu)
89 		adev->pm.fw_version = smu_version;
90 
91 	/*
92 	 * 1. if_version mismatch is not critical as our fw is designed
93 	 * to be backward compatible.
94 	 * 2. New fw usually brings some optimizations. But that's visible
95 	 * only on the paired driver.
96 	 * Considering above, we just leave user a warning message instead
97 	 * of halt driver loading.
98 	 */
99 	if (if_version != smu->smc_driver_if_version) {
100 		dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
101 			"smu fw version = 0x%08x (%d.%d.%d)\n",
102 			smu->smc_driver_if_version, if_version,
103 			smu_version, smu_major, smu_minor, smu_debug);
104 		dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
105 	}
106 
107 	return ret;
108 }
109 
110 int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
111 {
112 	if (!smu->is_apu)
113 		return 0;
114 
115 	if (gate)
116 		return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
117 	else
118 		return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
119 }
120 
121 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
122 {
123 	if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
124 		return 0;
125 
126 	return smu_cmn_send_smc_msg_with_param(smu,
127 		SMU_MSG_SetGfxCGPG,
128 		enable ? 1 : 0,
129 		NULL);
130 }
131 
132 /**
133  * smu_v12_0_get_gfxoff_status - get gfxoff status
134  *
135  * @smu: amdgpu_device pointer
136  *
137  * This function will be used to get gfxoff status
138  *
139  * Returns 0=GFXOFF(default).
140  * Returns 1=Transition out of GFX State.
141  * Returns 2=Not in GFXOFF.
142  * Returns 3=Transition into GFXOFF.
143  */
144 uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
145 {
146 	uint32_t reg;
147 	uint32_t gfxOff_Status = 0;
148 	struct amdgpu_device *adev = smu->adev;
149 
150 	reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
151 	gfxOff_Status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
152 		>> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
153 
154 	return gfxOff_Status;
155 }
156 
157 int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
158 {
159 	int ret = 0, timeout = 500;
160 
161 	if (enable) {
162 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
163 
164 	} else {
165 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
166 
167 		/* confirm gfx is back to "on" state, timeout is 0.5 second */
168 		while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
169 			msleep(1);
170 			timeout--;
171 			if (timeout == 0) {
172 				DRM_ERROR("disable gfxoff timeout and failed!\n");
173 				break;
174 			}
175 		}
176 	}
177 
178 	return ret;
179 }
180 
181 int smu_v12_0_fini_smc_tables(struct smu_context *smu)
182 {
183 	struct smu_table_context *smu_table = &smu->smu_table;
184 
185 	kfree(smu_table->clocks_table);
186 	smu_table->clocks_table = NULL;
187 
188 	kfree(smu_table->metrics_table);
189 	smu_table->metrics_table = NULL;
190 
191 	kfree(smu_table->watermarks_table);
192 	smu_table->watermarks_table = NULL;
193 
194 	return 0;
195 }
196 
197 int smu_v12_0_set_default_dpm_tables(struct smu_context *smu)
198 {
199 	struct smu_table_context *smu_table = &smu->smu_table;
200 
201 	return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
202 }
203 
204 int smu_v12_0_mode2_reset(struct smu_context *smu){
205 	return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
206 }
207 
208 int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
209 			    uint32_t min, uint32_t max)
210 {
211 	int ret = 0;
212 
213 	if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
214 		return 0;
215 
216 	switch (clk_type) {
217 	case SMU_GFXCLK:
218 	case SMU_SCLK:
219 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
220 		if (ret)
221 			return ret;
222 
223 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
224 		if (ret)
225 			return ret;
226 	break;
227 	case SMU_FCLK:
228 	case SMU_MCLK:
229 	case SMU_UCLK:
230 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
231 		if (ret)
232 			return ret;
233 
234 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
235 		if (ret)
236 			return ret;
237 	break;
238 	case SMU_SOCCLK:
239 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
240 		if (ret)
241 			return ret;
242 
243 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
244 		if (ret)
245 			return ret;
246 	break;
247 	case SMU_VCLK:
248 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
249 		if (ret)
250 			return ret;
251 
252 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
253 		if (ret)
254 			return ret;
255 	break;
256 	default:
257 		return -EINVAL;
258 	}
259 
260 	return ret;
261 }
262 
263 int smu_v12_0_set_driver_table_location(struct smu_context *smu)
264 {
265 	struct smu_table *driver_table = &smu->smu_table.driver_table;
266 	int ret = 0;
267 
268 	if (driver_table->mc_address) {
269 		ret = smu_cmn_send_smc_msg_with_param(smu,
270 				SMU_MSG_SetDriverDramAddrHigh,
271 				upper_32_bits(driver_table->mc_address),
272 				NULL);
273 		if (!ret)
274 			ret = smu_cmn_send_smc_msg_with_param(smu,
275 				SMU_MSG_SetDriverDramAddrLow,
276 				lower_32_bits(driver_table->mc_address),
277 				NULL);
278 	}
279 
280 	return ret;
281 }
282 
283 static int smu_v12_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
284 					    uint8_t clk_id,
285 					    uint8_t syspll_id,
286 					    uint32_t *clk_freq)
287 {
288 	struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
289 	struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
290 	int ret, index;
291 
292 	input.clk_id = clk_id;
293 	input.syspll_id = syspll_id;
294 	input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
295 	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
296 					    getsmuclockinfo);
297 
298 	ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
299 					(uint32_t *)&input);
300 	if (ret)
301 		return -EINVAL;
302 
303 	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
304 	*clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
305 
306 	return 0;
307 }
308 
309 int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu)
310 {
311 	int ret, index;
312 	uint16_t size;
313 	uint8_t frev, crev;
314 	struct atom_common_table_header *header;
315 	struct atom_firmware_info_v3_1 *v_3_1;
316 	struct atom_firmware_info_v3_3 *v_3_3;
317 
318 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
319 					    firmwareinfo);
320 
321 	ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
322 				      (uint8_t **)&header);
323 	if (ret)
324 		return ret;
325 
326 	if (header->format_revision != 3) {
327 		dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu12\n");
328 		return -EINVAL;
329 	}
330 
331 	switch (header->content_revision) {
332 	case 0:
333 	case 1:
334 	case 2:
335 		v_3_1 = (struct atom_firmware_info_v3_1 *)header;
336 		smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
337 		smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
338 		smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
339 		smu->smu_table.boot_values.socclk = 0;
340 		smu->smu_table.boot_values.dcefclk = 0;
341 		smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
342 		smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
343 		smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
344 		smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
345 		smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
346 		smu->smu_table.boot_values.pp_table_id = 0;
347 		smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
348 		break;
349 	case 3:
350 	case 4:
351 	default:
352 		v_3_3 = (struct atom_firmware_info_v3_3 *)header;
353 		smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
354 		smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
355 		smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
356 		smu->smu_table.boot_values.socclk = 0;
357 		smu->smu_table.boot_values.dcefclk = 0;
358 		smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
359 		smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
360 		smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
361 		smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
362 		smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
363 		smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
364 		smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
365 	}
366 
367 	smu->smu_table.boot_values.format_revision = header->format_revision;
368 	smu->smu_table.boot_values.content_revision = header->content_revision;
369 
370 	smu_v12_0_atom_get_smu_clockinfo(smu->adev,
371 					 (uint8_t)SMU12_SYSPLL0_SOCCLK_ID,
372 					 (uint8_t)SMU12_SYSPLL0_ID,
373 					 &smu->smu_table.boot_values.socclk);
374 
375 	smu_v12_0_atom_get_smu_clockinfo(smu->adev,
376 					 (uint8_t)SMU12_SYSPLL1_DCFCLK_ID,
377 					 (uint8_t)SMU12_SYSPLL1_ID,
378 					 &smu->smu_table.boot_values.dcefclk);
379 
380 	smu_v12_0_atom_get_smu_clockinfo(smu->adev,
381 					 (uint8_t)SMU12_SYSPLL0_VCLK_ID,
382 					 (uint8_t)SMU12_SYSPLL0_ID,
383 					 &smu->smu_table.boot_values.vclk);
384 
385 	smu_v12_0_atom_get_smu_clockinfo(smu->adev,
386 					 (uint8_t)SMU12_SYSPLL0_DCLK_ID,
387 					 (uint8_t)SMU12_SYSPLL0_ID,
388 					 &smu->smu_table.boot_values.dclk);
389 
390 	if ((smu->smu_table.boot_values.format_revision == 3) &&
391 	    (smu->smu_table.boot_values.content_revision >= 2))
392 		smu_v12_0_atom_get_smu_clockinfo(smu->adev,
393 						 (uint8_t)SMU12_SYSPLL3_0_FCLK_ID,
394 						 (uint8_t)SMU12_SYSPLL3_0_ID,
395 						 &smu->smu_table.boot_values.fclk);
396 
397 	smu_v12_0_atom_get_smu_clockinfo(smu->adev,
398 					 (uint8_t)SMU12_SYSPLL0_LCLK_ID,
399 					 (uint8_t)SMU12_SYSPLL0_ID,
400 					 &smu->smu_table.boot_values.lclk);
401 
402 	return 0;
403 }
404