1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #define SWSMU_CODE_LAYER_L3
24 
25 #include <linux/firmware.h>
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "atomfirmware.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "amdgpu_atombios.h"
31 #include "smu_v12_0.h"
32 #include "soc15_common.h"
33 #include "atom.h"
34 #include "smu_cmn.h"
35 
36 #include "asic_reg/mp/mp_12_0_0_offset.h"
37 #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
38 #include "asic_reg/smuio/smuio_12_0_0_offset.h"
39 #include "asic_reg/smuio/smuio_12_0_0_sh_mask.h"
40 
41 /*
42  * DO NOT use these for err/warn/info/debug messages.
43  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
44  * They are more MGPU friendly.
45  */
46 #undef pr_err
47 #undef pr_warn
48 #undef pr_info
49 #undef pr_debug
50 
51 // because some SMU12 based ASICs use older ip offset tables
52 // we should undefine this register from the smuio12 header
53 // to prevent confusion down the road
54 #undef mmPWR_MISC_CNTL_STATUS
55 
56 #define smnMP1_FIRMWARE_FLAGS                                0x3010024
57 
58 int smu_v12_0_check_fw_status(struct smu_context *smu)
59 {
60 	struct amdgpu_device *adev = smu->adev;
61 	uint32_t mp1_fw_flags;
62 
63 	mp1_fw_flags = RREG32_PCIE(MP1_Public |
64 		(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
65 
66 	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
67 		MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
68 		return 0;
69 
70 	return -EIO;
71 }
72 
73 int smu_v12_0_check_fw_version(struct smu_context *smu)
74 {
75 	struct amdgpu_device *adev = smu->adev;
76 	uint32_t if_version = 0xff, smu_version = 0xff;
77 	uint8_t smu_program, smu_major, smu_minor, smu_debug;
78 	int ret = 0;
79 
80 	ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
81 	if (ret)
82 		return ret;
83 
84 	smu_program = (smu_version >> 24) & 0xff;
85 	smu_major = (smu_version >> 16) & 0xff;
86 	smu_minor = (smu_version >> 8) & 0xff;
87 	smu_debug = (smu_version >> 0) & 0xff;
88 	if (smu->is_apu)
89 		adev->pm.fw_version = smu_version;
90 
91 	/*
92 	 * 1. if_version mismatch is not critical as our fw is designed
93 	 * to be backward compatible.
94 	 * 2. New fw usually brings some optimizations. But that's visible
95 	 * only on the paired driver.
96 	 * Considering above, we just leave user a warning message instead
97 	 * of halt driver loading.
98 	 */
99 	if (if_version != smu->smc_driver_if_version) {
100 		dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
101 			"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
102 			smu->smc_driver_if_version, if_version,
103 			smu_program, smu_version, smu_major, smu_minor, smu_debug);
104 		dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
105 	}
106 
107 	return ret;
108 }
109 
110 int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
111 {
112 	if (!smu->is_apu)
113 		return 0;
114 
115 	if (gate)
116 		return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
117 	else
118 		return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
119 }
120 
121 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
122 {
123 	/* Until now the SMU12 only implemented for Renoir series so here neen't do APU check. */
124 	if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) || smu->adev->in_s0ix)
125 		return 0;
126 
127 	return smu_cmn_send_smc_msg_with_param(smu,
128 		SMU_MSG_SetGfxCGPG,
129 		enable ? 1 : 0,
130 		NULL);
131 }
132 
133 /**
134  * smu_v12_0_get_gfxoff_status - get gfxoff status
135  *
136  * @smu: amdgpu_device pointer
137  *
138  * This function will be used to get gfxoff status
139  *
140  * Returns 0=GFXOFF(default).
141  * Returns 1=Transition out of GFX State.
142  * Returns 2=Not in GFXOFF.
143  * Returns 3=Transition into GFXOFF.
144  */
145 uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
146 {
147 	uint32_t reg;
148 	uint32_t gfxOff_Status = 0;
149 	struct amdgpu_device *adev = smu->adev;
150 
151 	reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
152 	gfxOff_Status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
153 		>> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
154 
155 	return gfxOff_Status;
156 }
157 
158 int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
159 {
160 	int ret = 0, timeout = 500;
161 
162 	if (enable) {
163 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
164 
165 	} else {
166 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
167 
168 		/* confirm gfx is back to "on" state, timeout is 0.5 second */
169 		while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
170 			msleep(1);
171 			timeout--;
172 			if (timeout == 0) {
173 				DRM_ERROR("disable gfxoff timeout and failed!\n");
174 				break;
175 			}
176 		}
177 	}
178 
179 	return ret;
180 }
181 
182 int smu_v12_0_fini_smc_tables(struct smu_context *smu)
183 {
184 	struct smu_table_context *smu_table = &smu->smu_table;
185 
186 	kfree(smu_table->clocks_table);
187 	smu_table->clocks_table = NULL;
188 
189 	kfree(smu_table->metrics_table);
190 	smu_table->metrics_table = NULL;
191 
192 	kfree(smu_table->watermarks_table);
193 	smu_table->watermarks_table = NULL;
194 
195 	kfree(smu_table->gpu_metrics_table);
196 	smu_table->gpu_metrics_table = NULL;
197 
198 	return 0;
199 }
200 
201 int smu_v12_0_set_default_dpm_tables(struct smu_context *smu)
202 {
203 	struct smu_table_context *smu_table = &smu->smu_table;
204 
205 	return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
206 }
207 
208 int smu_v12_0_mode2_reset(struct smu_context *smu){
209 	return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
210 }
211 
212 int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
213 			    uint32_t min, uint32_t max)
214 {
215 	int ret = 0;
216 
217 	if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
218 		return 0;
219 
220 	switch (clk_type) {
221 	case SMU_GFXCLK:
222 	case SMU_SCLK:
223 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
224 		if (ret)
225 			return ret;
226 
227 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
228 		if (ret)
229 			return ret;
230 	break;
231 	case SMU_FCLK:
232 	case SMU_MCLK:
233 	case SMU_UCLK:
234 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
235 		if (ret)
236 			return ret;
237 
238 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
239 		if (ret)
240 			return ret;
241 	break;
242 	case SMU_SOCCLK:
243 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
244 		if (ret)
245 			return ret;
246 
247 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
248 		if (ret)
249 			return ret;
250 	break;
251 	case SMU_VCLK:
252 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
253 		if (ret)
254 			return ret;
255 
256 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
257 		if (ret)
258 			return ret;
259 	break;
260 	default:
261 		return -EINVAL;
262 	}
263 
264 	return ret;
265 }
266 
267 int smu_v12_0_set_driver_table_location(struct smu_context *smu)
268 {
269 	struct smu_table *driver_table = &smu->smu_table.driver_table;
270 	int ret = 0;
271 
272 	if (driver_table->mc_address) {
273 		ret = smu_cmn_send_smc_msg_with_param(smu,
274 				SMU_MSG_SetDriverDramAddrHigh,
275 				upper_32_bits(driver_table->mc_address),
276 				NULL);
277 		if (!ret)
278 			ret = smu_cmn_send_smc_msg_with_param(smu,
279 				SMU_MSG_SetDriverDramAddrLow,
280 				lower_32_bits(driver_table->mc_address),
281 				NULL);
282 	}
283 
284 	return ret;
285 }
286 
287 static int smu_v12_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
288 					    uint8_t clk_id,
289 					    uint8_t syspll_id,
290 					    uint32_t *clk_freq)
291 {
292 	struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
293 	struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
294 	int ret, index;
295 
296 	input.clk_id = clk_id;
297 	input.syspll_id = syspll_id;
298 	input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
299 	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
300 					    getsmuclockinfo);
301 
302 	ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
303 					(uint32_t *)&input);
304 	if (ret)
305 		return -EINVAL;
306 
307 	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
308 	*clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
309 
310 	return 0;
311 }
312 
313 int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu)
314 {
315 	int ret, index;
316 	uint16_t size;
317 	uint8_t frev, crev;
318 	struct atom_common_table_header *header;
319 	struct atom_firmware_info_v3_1 *v_3_1;
320 	struct atom_firmware_info_v3_3 *v_3_3;
321 
322 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
323 					    firmwareinfo);
324 
325 	ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
326 				      (uint8_t **)&header);
327 	if (ret)
328 		return ret;
329 
330 	if (header->format_revision != 3) {
331 		dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu12\n");
332 		return -EINVAL;
333 	}
334 
335 	switch (header->content_revision) {
336 	case 0:
337 	case 1:
338 	case 2:
339 		v_3_1 = (struct atom_firmware_info_v3_1 *)header;
340 		smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
341 		smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
342 		smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
343 		smu->smu_table.boot_values.socclk = 0;
344 		smu->smu_table.boot_values.dcefclk = 0;
345 		smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
346 		smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
347 		smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
348 		smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
349 		smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
350 		smu->smu_table.boot_values.pp_table_id = 0;
351 		smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
352 		break;
353 	case 3:
354 	case 4:
355 	default:
356 		v_3_3 = (struct atom_firmware_info_v3_3 *)header;
357 		smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
358 		smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
359 		smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
360 		smu->smu_table.boot_values.socclk = 0;
361 		smu->smu_table.boot_values.dcefclk = 0;
362 		smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
363 		smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
364 		smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
365 		smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
366 		smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
367 		smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
368 		smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
369 	}
370 
371 	smu->smu_table.boot_values.format_revision = header->format_revision;
372 	smu->smu_table.boot_values.content_revision = header->content_revision;
373 
374 	smu_v12_0_atom_get_smu_clockinfo(smu->adev,
375 					 (uint8_t)SMU12_SYSPLL0_SOCCLK_ID,
376 					 (uint8_t)SMU12_SYSPLL0_ID,
377 					 &smu->smu_table.boot_values.socclk);
378 
379 	smu_v12_0_atom_get_smu_clockinfo(smu->adev,
380 					 (uint8_t)SMU12_SYSPLL1_DCFCLK_ID,
381 					 (uint8_t)SMU12_SYSPLL1_ID,
382 					 &smu->smu_table.boot_values.dcefclk);
383 
384 	smu_v12_0_atom_get_smu_clockinfo(smu->adev,
385 					 (uint8_t)SMU12_SYSPLL0_VCLK_ID,
386 					 (uint8_t)SMU12_SYSPLL0_ID,
387 					 &smu->smu_table.boot_values.vclk);
388 
389 	smu_v12_0_atom_get_smu_clockinfo(smu->adev,
390 					 (uint8_t)SMU12_SYSPLL0_DCLK_ID,
391 					 (uint8_t)SMU12_SYSPLL0_ID,
392 					 &smu->smu_table.boot_values.dclk);
393 
394 	if ((smu->smu_table.boot_values.format_revision == 3) &&
395 	    (smu->smu_table.boot_values.content_revision >= 2))
396 		smu_v12_0_atom_get_smu_clockinfo(smu->adev,
397 						 (uint8_t)SMU12_SYSPLL3_0_FCLK_ID,
398 						 (uint8_t)SMU12_SYSPLL3_0_ID,
399 						 &smu->smu_table.boot_values.fclk);
400 
401 	smu_v12_0_atom_get_smu_clockinfo(smu->adev,
402 					 (uint8_t)SMU12_SYSPLL0_LCLK_ID,
403 					 (uint8_t)SMU12_SYSPLL0_ID,
404 					 &smu->smu_table.boot_values.lclk);
405 
406 	return 0;
407 }
408