1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "amdgpu.h"
24 #include "amdgpu_i2c.h"
25 #include "amdgpu_atombios.h"
26 #include "atom.h"
27 #include "amd_pcie.h"
28 #include "legacy_dpm.h"
29 #include "amdgpu_dpm_internal.h"
30 #include "amdgpu_display.h"
31 
32 #define amdgpu_dpm_pre_set_power_state(adev) \
33 		((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
34 
35 #define amdgpu_dpm_post_set_power_state(adev) \
36 		((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
37 
38 #define amdgpu_dpm_display_configuration_changed(adev) \
39 		((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
40 
41 #define amdgpu_dpm_print_power_state(adev, ps) \
42 		((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
43 
44 #define amdgpu_dpm_vblank_too_short(adev) \
45 		((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
46 
47 #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
48 		((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
49 
amdgpu_dpm_print_class_info(u32 class,u32 class2)50 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
51 {
52 	const char *s;
53 
54 	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
55 	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
56 	default:
57 		s = "none";
58 		break;
59 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
60 		s = "battery";
61 		break;
62 	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
63 		s = "balanced";
64 		break;
65 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
66 		s = "performance";
67 		break;
68 	}
69 	printk("\tui class: %s\n", s);
70 	printk("\tinternal class:");
71 	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
72 	    (class2 == 0))
73 		pr_cont(" none");
74 	else {
75 		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
76 			pr_cont(" boot");
77 		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
78 			pr_cont(" thermal");
79 		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
80 			pr_cont(" limited_pwr");
81 		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
82 			pr_cont(" rest");
83 		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
84 			pr_cont(" forced");
85 		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
86 			pr_cont(" 3d_perf");
87 		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
88 			pr_cont(" ovrdrv");
89 		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
90 			pr_cont(" uvd");
91 		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
92 			pr_cont(" 3d_low");
93 		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
94 			pr_cont(" acpi");
95 		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
96 			pr_cont(" uvd_hd2");
97 		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
98 			pr_cont(" uvd_hd");
99 		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
100 			pr_cont(" uvd_sd");
101 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
102 			pr_cont(" limited_pwr2");
103 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
104 			pr_cont(" ulv");
105 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
106 			pr_cont(" uvd_mvc");
107 	}
108 	pr_cont("\n");
109 }
110 
amdgpu_dpm_print_cap_info(u32 caps)111 void amdgpu_dpm_print_cap_info(u32 caps)
112 {
113 	printk("\tcaps:");
114 	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
115 		pr_cont(" single_disp");
116 	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
117 		pr_cont(" video");
118 	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
119 		pr_cont(" no_dc");
120 	pr_cont("\n");
121 }
122 
amdgpu_dpm_print_ps_status(struct amdgpu_device * adev,struct amdgpu_ps * rps)123 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
124 				struct amdgpu_ps *rps)
125 {
126 	printk("\tstatus:");
127 	if (rps == adev->pm.dpm.current_ps)
128 		pr_cont(" c");
129 	if (rps == adev->pm.dpm.requested_ps)
130 		pr_cont(" r");
131 	if (rps == adev->pm.dpm.boot_ps)
132 		pr_cont(" b");
133 	pr_cont("\n");
134 }
135 
amdgpu_pm_print_power_states(struct amdgpu_device * adev)136 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
137 {
138 	int i;
139 
140 	if (adev->powerplay.pp_funcs->print_power_state == NULL)
141 		return;
142 
143 	for (i = 0; i < adev->pm.dpm.num_ps; i++)
144 		amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
145 
146 }
147 
148 union power_info {
149 	struct _ATOM_POWERPLAY_INFO info;
150 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
151 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
152 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
153 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
154 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
155 	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
156 	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
157 };
158 
amdgpu_get_platform_caps(struct amdgpu_device * adev)159 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
160 {
161 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
162 	union power_info *power_info;
163 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
164 	u16 data_offset;
165 	u8 frev, crev;
166 
167 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
168 				   &frev, &crev, &data_offset))
169 		return -EINVAL;
170 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
171 
172 	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
173 	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
174 	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
175 
176 	return 0;
177 }
178 
179 union fan_info {
180 	struct _ATOM_PPLIB_FANTABLE fan;
181 	struct _ATOM_PPLIB_FANTABLE2 fan2;
182 	struct _ATOM_PPLIB_FANTABLE3 fan3;
183 };
184 
amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table * amdgpu_table,ATOM_PPLIB_Clock_Voltage_Dependency_Table * atom_table)185 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
186 					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
187 {
188 	u32 size = atom_table->ucNumEntries *
189 		sizeof(struct amdgpu_clock_voltage_dependency_entry);
190 	int i;
191 	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
192 
193 	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
194 	if (!amdgpu_table->entries)
195 		return -ENOMEM;
196 
197 	entry = &atom_table->entries[0];
198 	for (i = 0; i < atom_table->ucNumEntries; i++) {
199 		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
200 			(entry->ucClockHigh << 16);
201 		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
202 		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
203 			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
204 	}
205 	amdgpu_table->count = atom_table->ucNumEntries;
206 
207 	return 0;
208 }
209 
210 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
211 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
212 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
213 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
214 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
215 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
216 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
217 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
218 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
219 
amdgpu_parse_extended_power_table(struct amdgpu_device * adev)220 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
221 {
222 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
223 	union power_info *power_info;
224 	union fan_info *fan_info;
225 	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
226 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
227 	u16 data_offset;
228 	u8 frev, crev;
229 	int ret, i;
230 
231 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
232 				   &frev, &crev, &data_offset))
233 		return -EINVAL;
234 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
235 
236 	/* fan table */
237 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
238 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
239 		if (power_info->pplib3.usFanTableOffset) {
240 			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
241 						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
242 			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
243 			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
244 			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
245 			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
246 			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
247 			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
248 			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
249 			if (fan_info->fan.ucFanTableFormat >= 2)
250 				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
251 			else
252 				adev->pm.dpm.fan.t_max = 10900;
253 			adev->pm.dpm.fan.cycle_delay = 100000;
254 			if (fan_info->fan.ucFanTableFormat >= 3) {
255 				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
256 				adev->pm.dpm.fan.default_max_fan_pwm =
257 					le16_to_cpu(fan_info->fan3.usFanPWMMax);
258 				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
259 				adev->pm.dpm.fan.fan_output_sensitivity =
260 					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
261 			}
262 			adev->pm.dpm.fan.ucode_fan_control = true;
263 		}
264 	}
265 
266 	/* clock dependancy tables, shedding tables */
267 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
268 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
269 		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
270 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
271 				(mode_info->atom_context->bios + data_offset +
272 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
273 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
274 								 dep_table);
275 			if (ret)
276 				return ret;
277 		}
278 		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
279 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
280 				(mode_info->atom_context->bios + data_offset +
281 				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
282 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
283 								 dep_table);
284 			if (ret)
285 				return ret;
286 		}
287 		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
288 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
289 				(mode_info->atom_context->bios + data_offset +
290 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
291 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
292 								 dep_table);
293 			if (ret)
294 				return ret;
295 		}
296 		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
297 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
298 				(mode_info->atom_context->bios + data_offset +
299 				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
300 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
301 								 dep_table);
302 			if (ret)
303 				return ret;
304 		}
305 		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
306 			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
307 				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
308 				(mode_info->atom_context->bios + data_offset +
309 				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
310 			if (clk_v->ucNumEntries) {
311 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
312 					le16_to_cpu(clk_v->entries[0].usSclkLow) |
313 					(clk_v->entries[0].ucSclkHigh << 16);
314 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
315 					le16_to_cpu(clk_v->entries[0].usMclkLow) |
316 					(clk_v->entries[0].ucMclkHigh << 16);
317 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
318 					le16_to_cpu(clk_v->entries[0].usVddc);
319 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
320 					le16_to_cpu(clk_v->entries[0].usVddci);
321 			}
322 		}
323 		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
324 			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
325 				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
326 				(mode_info->atom_context->bios + data_offset +
327 				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
328 			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
329 
330 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
331 				kcalloc(psl->ucNumEntries,
332 					sizeof(struct amdgpu_phase_shedding_limits_entry),
333 					GFP_KERNEL);
334 			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
335 				return -ENOMEM;
336 
337 			entry = &psl->entries[0];
338 			for (i = 0; i < psl->ucNumEntries; i++) {
339 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
340 					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
341 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
342 					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
343 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
344 					le16_to_cpu(entry->usVoltage);
345 				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
346 					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
347 			}
348 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
349 				psl->ucNumEntries;
350 		}
351 	}
352 
353 	/* cac data */
354 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
355 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
356 		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
357 		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
358 		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
359 		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
360 		if (adev->pm.dpm.tdp_od_limit)
361 			adev->pm.dpm.power_control = true;
362 		else
363 			adev->pm.dpm.power_control = false;
364 		adev->pm.dpm.tdp_adjustment = 0;
365 		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
366 		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
367 		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
368 		if (power_info->pplib5.usCACLeakageTableOffset) {
369 			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
370 				(ATOM_PPLIB_CAC_Leakage_Table *)
371 				(mode_info->atom_context->bios + data_offset +
372 				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
373 			ATOM_PPLIB_CAC_Leakage_Record *entry;
374 			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
375 			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
376 			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries)
377 				return -ENOMEM;
378 			entry = &cac_table->entries[0];
379 			for (i = 0; i < cac_table->ucNumEntries; i++) {
380 				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
381 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
382 						le16_to_cpu(entry->usVddc1);
383 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
384 						le16_to_cpu(entry->usVddc2);
385 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
386 						le16_to_cpu(entry->usVddc3);
387 				} else {
388 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
389 						le16_to_cpu(entry->usVddc);
390 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
391 						le32_to_cpu(entry->ulLeakageValue);
392 				}
393 				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
394 					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
395 			}
396 			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
397 		}
398 	}
399 
400 	/* ext tables */
401 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
402 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
403 		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
404 			(mode_info->atom_context->bios + data_offset +
405 			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
406 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
407 			ext_hdr->usVCETableOffset) {
408 			VCEClockInfoArray *array = (VCEClockInfoArray *)
409 				(mode_info->atom_context->bios + data_offset +
410 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
411 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
412 				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
413 				(mode_info->atom_context->bios + data_offset +
414 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
415 				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
416 			ATOM_PPLIB_VCE_State_Table *states =
417 				(ATOM_PPLIB_VCE_State_Table *)
418 				(mode_info->atom_context->bios + data_offset +
419 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
420 				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
421 				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
422 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
423 			ATOM_PPLIB_VCE_State_Record *state_entry;
424 			VCEClockInfo *vce_clk;
425 			u32 size = limits->numEntries *
426 				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
427 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
428 				kzalloc(size, GFP_KERNEL);
429 			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
430 				return -ENOMEM;
431 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
432 				limits->numEntries;
433 			entry = &limits->entries[0];
434 			state_entry = &states->entries[0];
435 			for (i = 0; i < limits->numEntries; i++) {
436 				vce_clk = (VCEClockInfo *)
437 					((u8 *)&array->entries[0] +
438 					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
439 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
440 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
441 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
442 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
443 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
444 					le16_to_cpu(entry->usVoltage);
445 				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
446 					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
447 			}
448 			adev->pm.dpm.num_of_vce_states =
449 					states->numEntries > AMD_MAX_VCE_LEVELS ?
450 					AMD_MAX_VCE_LEVELS : states->numEntries;
451 			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
452 				vce_clk = (VCEClockInfo *)
453 					((u8 *)&array->entries[0] +
454 					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
455 				adev->pm.dpm.vce_states[i].evclk =
456 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
457 				adev->pm.dpm.vce_states[i].ecclk =
458 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
459 				adev->pm.dpm.vce_states[i].clk_idx =
460 					state_entry->ucClockInfoIndex & 0x3f;
461 				adev->pm.dpm.vce_states[i].pstate =
462 					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
463 				state_entry = (ATOM_PPLIB_VCE_State_Record *)
464 					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
465 			}
466 		}
467 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
468 			ext_hdr->usUVDTableOffset) {
469 			UVDClockInfoArray *array = (UVDClockInfoArray *)
470 				(mode_info->atom_context->bios + data_offset +
471 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
472 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
473 				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
474 				(mode_info->atom_context->bios + data_offset +
475 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
476 				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
477 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
478 			u32 size = limits->numEntries *
479 				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
480 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
481 				kzalloc(size, GFP_KERNEL);
482 			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
483 				return -ENOMEM;
484 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
485 				limits->numEntries;
486 			entry = &limits->entries[0];
487 			for (i = 0; i < limits->numEntries; i++) {
488 				UVDClockInfo *uvd_clk = (UVDClockInfo *)
489 					((u8 *)&array->entries[0] +
490 					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
491 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
492 					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
493 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
494 					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
495 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
496 					le16_to_cpu(entry->usVoltage);
497 				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
498 					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
499 			}
500 		}
501 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
502 			ext_hdr->usSAMUTableOffset) {
503 			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
504 				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
505 				(mode_info->atom_context->bios + data_offset +
506 				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
507 			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
508 			u32 size = limits->numEntries *
509 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
510 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
511 				kzalloc(size, GFP_KERNEL);
512 			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
513 				return -ENOMEM;
514 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
515 				limits->numEntries;
516 			entry = &limits->entries[0];
517 			for (i = 0; i < limits->numEntries; i++) {
518 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
519 					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
520 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
521 					le16_to_cpu(entry->usVoltage);
522 				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
523 					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
524 			}
525 		}
526 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
527 		    ext_hdr->usPPMTableOffset) {
528 			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
529 				(mode_info->atom_context->bios + data_offset +
530 				 le16_to_cpu(ext_hdr->usPPMTableOffset));
531 			adev->pm.dpm.dyn_state.ppm_table =
532 				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
533 			if (!adev->pm.dpm.dyn_state.ppm_table)
534 				return -ENOMEM;
535 			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
536 			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
537 				le16_to_cpu(ppm->usCpuCoreNumber);
538 			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
539 				le32_to_cpu(ppm->ulPlatformTDP);
540 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
541 				le32_to_cpu(ppm->ulSmallACPlatformTDP);
542 			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
543 				le32_to_cpu(ppm->ulPlatformTDC);
544 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
545 				le32_to_cpu(ppm->ulSmallACPlatformTDC);
546 			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
547 				le32_to_cpu(ppm->ulApuTDP);
548 			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
549 				le32_to_cpu(ppm->ulDGpuTDP);
550 			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
551 				le32_to_cpu(ppm->ulDGpuUlvPower);
552 			adev->pm.dpm.dyn_state.ppm_table->tj_max =
553 				le32_to_cpu(ppm->ulTjmax);
554 		}
555 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
556 			ext_hdr->usACPTableOffset) {
557 			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
558 				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
559 				(mode_info->atom_context->bios + data_offset +
560 				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
561 			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
562 			u32 size = limits->numEntries *
563 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
564 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
565 				kzalloc(size, GFP_KERNEL);
566 			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
567 				return -ENOMEM;
568 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
569 				limits->numEntries;
570 			entry = &limits->entries[0];
571 			for (i = 0; i < limits->numEntries; i++) {
572 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
573 					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
574 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
575 					le16_to_cpu(entry->usVoltage);
576 				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
577 					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
578 			}
579 		}
580 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
581 			ext_hdr->usPowerTuneTableOffset) {
582 			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
583 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
584 			ATOM_PowerTune_Table *pt;
585 			adev->pm.dpm.dyn_state.cac_tdp_table =
586 				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
587 			if (!adev->pm.dpm.dyn_state.cac_tdp_table)
588 				return -ENOMEM;
589 			if (rev > 0) {
590 				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
591 					(mode_info->atom_context->bios + data_offset +
592 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
593 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
594 					ppt->usMaximumPowerDeliveryLimit;
595 				pt = &ppt->power_tune_table;
596 			} else {
597 				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
598 					(mode_info->atom_context->bios + data_offset +
599 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
600 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
601 				pt = &ppt->power_tune_table;
602 			}
603 			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
604 			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
605 				le16_to_cpu(pt->usConfigurableTDP);
606 			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
607 			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
608 				le16_to_cpu(pt->usBatteryPowerLimit);
609 			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
610 				le16_to_cpu(pt->usSmallPowerLimit);
611 			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
612 				le16_to_cpu(pt->usLowCACLeakage);
613 			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
614 				le16_to_cpu(pt->usHighCACLeakage);
615 		}
616 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
617 				ext_hdr->usSclkVddgfxTableOffset) {
618 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
619 				(mode_info->atom_context->bios + data_offset +
620 				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
621 			ret = amdgpu_parse_clk_voltage_dep_table(
622 					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
623 					dep_table);
624 			if (ret)
625 				return ret;
626 		}
627 	}
628 
629 	return 0;
630 }
631 
amdgpu_free_extended_power_table(struct amdgpu_device * adev)632 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
633 {
634 	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
635 
636 	kfree(dyn_state->vddc_dependency_on_sclk.entries);
637 	kfree(dyn_state->vddci_dependency_on_mclk.entries);
638 	kfree(dyn_state->vddc_dependency_on_mclk.entries);
639 	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
640 	kfree(dyn_state->cac_leakage_table.entries);
641 	kfree(dyn_state->phase_shedding_limits_table.entries);
642 	kfree(dyn_state->ppm_table);
643 	kfree(dyn_state->cac_tdp_table);
644 	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
645 	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
646 	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
647 	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
648 	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
649 }
650 
651 static const char *pp_lib_thermal_controller_names[] = {
652 	"NONE",
653 	"lm63",
654 	"adm1032",
655 	"adm1030",
656 	"max6649",
657 	"lm64",
658 	"f75375",
659 	"RV6xx",
660 	"RV770",
661 	"adt7473",
662 	"NONE",
663 	"External GPIO",
664 	"Evergreen",
665 	"emc2103",
666 	"Sumo",
667 	"Northern Islands",
668 	"Southern Islands",
669 	"lm96163",
670 	"Sea Islands",
671 	"Kaveri/Kabini",
672 };
673 
amdgpu_add_thermal_controller(struct amdgpu_device * adev)674 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
675 {
676 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
677 	ATOM_PPLIB_POWERPLAYTABLE *power_table;
678 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
679 	ATOM_PPLIB_THERMALCONTROLLER *controller;
680 	struct amdgpu_i2c_bus_rec i2c_bus;
681 	u16 data_offset;
682 	u8 frev, crev;
683 
684 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
685 				   &frev, &crev, &data_offset))
686 		return;
687 	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
688 		(mode_info->atom_context->bios + data_offset);
689 	controller = &power_table->sThermalController;
690 
691 	/* add the i2c bus for thermal/fan chip */
692 	if (controller->ucType > 0) {
693 		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
694 			adev->pm.no_fan = true;
695 		adev->pm.fan_pulses_per_revolution =
696 			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
697 		if (adev->pm.fan_pulses_per_revolution) {
698 			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
699 			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
700 		}
701 		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
702 			DRM_INFO("Internal thermal controller %s fan control\n",
703 				 (controller->ucFanParameters &
704 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
705 			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
706 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
707 			DRM_INFO("Internal thermal controller %s fan control\n",
708 				 (controller->ucFanParameters &
709 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
710 			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
711 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
712 			DRM_INFO("Internal thermal controller %s fan control\n",
713 				 (controller->ucFanParameters &
714 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
715 			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
716 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
717 			DRM_INFO("Internal thermal controller %s fan control\n",
718 				 (controller->ucFanParameters &
719 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
720 			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
721 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
722 			DRM_INFO("Internal thermal controller %s fan control\n",
723 				 (controller->ucFanParameters &
724 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
725 			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
726 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
727 			DRM_INFO("Internal thermal controller %s fan control\n",
728 				 (controller->ucFanParameters &
729 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
730 			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
731 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
732 			DRM_INFO("Internal thermal controller %s fan control\n",
733 				 (controller->ucFanParameters &
734 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
735 			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
736 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
737 			DRM_INFO("Internal thermal controller %s fan control\n",
738 				 (controller->ucFanParameters &
739 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
740 			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
741 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
742 			DRM_INFO("External GPIO thermal controller %s fan control\n",
743 				 (controller->ucFanParameters &
744 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
745 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
746 		} else if (controller->ucType ==
747 			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
748 			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
749 				 (controller->ucFanParameters &
750 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
751 			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
752 		} else if (controller->ucType ==
753 			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
754 			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
755 				 (controller->ucFanParameters &
756 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
757 			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
758 		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
759 			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
760 				 pp_lib_thermal_controller_names[controller->ucType],
761 				 controller->ucI2cAddress >> 1,
762 				 (controller->ucFanParameters &
763 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
764 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
765 			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
766 			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
767 			if (adev->pm.i2c_bus) {
768 				struct i2c_board_info info = { };
769 				const char *name = pp_lib_thermal_controller_names[controller->ucType];
770 				info.addr = controller->ucI2cAddress >> 1;
771 				strscpy(info.type, name, sizeof(info.type));
772 				i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
773 			}
774 		} else {
775 			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
776 				 controller->ucType,
777 				 controller->ucI2cAddress >> 1,
778 				 (controller->ucFanParameters &
779 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
780 		}
781 	}
782 }
783 
amdgpu_get_vce_clock_state(void * handle,u32 idx)784 struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx)
785 {
786 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
787 
788 	if (idx < adev->pm.dpm.num_of_vce_states)
789 		return &adev->pm.dpm.vce_states[idx];
790 
791 	return NULL;
792 }
793 
amdgpu_dpm_pick_power_state(struct amdgpu_device * adev,enum amd_pm_state_type dpm_state)794 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
795 						     enum amd_pm_state_type dpm_state)
796 {
797 	int i;
798 	struct amdgpu_ps *ps;
799 	u32 ui_class;
800 	bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
801 		true : false;
802 
803 	/* check if the vblank period is too short to adjust the mclk */
804 	if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
805 		if (amdgpu_dpm_vblank_too_short(adev))
806 			single_display = false;
807 	}
808 
809 	/* certain older asics have a separare 3D performance state,
810 	 * so try that first if the user selected performance
811 	 */
812 	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
813 		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
814 	/* balanced states don't exist at the moment */
815 	if (dpm_state == POWER_STATE_TYPE_BALANCED)
816 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
817 
818 restart_search:
819 	/* Pick the best power state based on current conditions */
820 	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
821 		ps = &adev->pm.dpm.ps[i];
822 		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
823 		switch (dpm_state) {
824 		/* user states */
825 		case POWER_STATE_TYPE_BATTERY:
826 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
827 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
828 					if (single_display)
829 						return ps;
830 				} else
831 					return ps;
832 			}
833 			break;
834 		case POWER_STATE_TYPE_BALANCED:
835 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
836 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
837 					if (single_display)
838 						return ps;
839 				} else
840 					return ps;
841 			}
842 			break;
843 		case POWER_STATE_TYPE_PERFORMANCE:
844 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
845 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
846 					if (single_display)
847 						return ps;
848 				} else
849 					return ps;
850 			}
851 			break;
852 		/* internal states */
853 		case POWER_STATE_TYPE_INTERNAL_UVD:
854 			if (adev->pm.dpm.uvd_ps)
855 				return adev->pm.dpm.uvd_ps;
856 			else
857 				break;
858 		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
859 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
860 				return ps;
861 			break;
862 		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
863 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
864 				return ps;
865 			break;
866 		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
867 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
868 				return ps;
869 			break;
870 		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
871 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
872 				return ps;
873 			break;
874 		case POWER_STATE_TYPE_INTERNAL_BOOT:
875 			return adev->pm.dpm.boot_ps;
876 		case POWER_STATE_TYPE_INTERNAL_THERMAL:
877 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
878 				return ps;
879 			break;
880 		case POWER_STATE_TYPE_INTERNAL_ACPI:
881 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
882 				return ps;
883 			break;
884 		case POWER_STATE_TYPE_INTERNAL_ULV:
885 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
886 				return ps;
887 			break;
888 		case POWER_STATE_TYPE_INTERNAL_3DPERF:
889 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
890 				return ps;
891 			break;
892 		default:
893 			break;
894 		}
895 	}
896 	/* use a fallback state if we didn't match */
897 	switch (dpm_state) {
898 	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
899 		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
900 		goto restart_search;
901 	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
902 	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
903 	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
904 		if (adev->pm.dpm.uvd_ps) {
905 			return adev->pm.dpm.uvd_ps;
906 		} else {
907 			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
908 			goto restart_search;
909 		}
910 	case POWER_STATE_TYPE_INTERNAL_THERMAL:
911 		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
912 		goto restart_search;
913 	case POWER_STATE_TYPE_INTERNAL_ACPI:
914 		dpm_state = POWER_STATE_TYPE_BATTERY;
915 		goto restart_search;
916 	case POWER_STATE_TYPE_BATTERY:
917 	case POWER_STATE_TYPE_BALANCED:
918 	case POWER_STATE_TYPE_INTERNAL_3DPERF:
919 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
920 		goto restart_search;
921 	default:
922 		break;
923 	}
924 
925 	return NULL;
926 }
927 
amdgpu_dpm_change_power_state_locked(struct amdgpu_device * adev)928 static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
929 {
930 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
931 	struct amdgpu_ps *ps;
932 	enum amd_pm_state_type dpm_state;
933 	int ret;
934 	bool equal = false;
935 
936 	/* if dpm init failed */
937 	if (!adev->pm.dpm_enabled)
938 		return 0;
939 
940 	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
941 		/* add other state override checks here */
942 		if ((!adev->pm.dpm.thermal_active) &&
943 		    (!adev->pm.dpm.uvd_active))
944 			adev->pm.dpm.state = adev->pm.dpm.user_state;
945 	}
946 	dpm_state = adev->pm.dpm.state;
947 
948 	ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
949 	if (ps)
950 		adev->pm.dpm.requested_ps = ps;
951 	else
952 		return -EINVAL;
953 
954 	if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
955 		printk("switching from power state:\n");
956 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
957 		printk("switching to power state:\n");
958 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
959 	}
960 
961 	/* update whether vce is active */
962 	ps->vce_active = adev->pm.dpm.vce_active;
963 	if (pp_funcs->display_configuration_changed)
964 		amdgpu_dpm_display_configuration_changed(adev);
965 
966 	ret = amdgpu_dpm_pre_set_power_state(adev);
967 	if (ret)
968 		return ret;
969 
970 	if (pp_funcs->check_state_equal) {
971 		if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
972 			equal = false;
973 	}
974 
975 	if (equal)
976 		return 0;
977 
978 	if (pp_funcs->set_power_state)
979 		pp_funcs->set_power_state(adev->powerplay.pp_handle);
980 
981 	amdgpu_dpm_post_set_power_state(adev);
982 
983 	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
984 	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
985 
986 	if (pp_funcs->force_performance_level) {
987 		if (adev->pm.dpm.thermal_active) {
988 			enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
989 			/* force low perf level for thermal */
990 			pp_funcs->force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
991 			/* save the user's level */
992 			adev->pm.dpm.forced_level = level;
993 		} else {
994 			/* otherwise, user selected level */
995 			pp_funcs->force_performance_level(adev, adev->pm.dpm.forced_level);
996 		}
997 	}
998 
999 	return 0;
1000 }
1001 
amdgpu_legacy_dpm_compute_clocks(void * handle)1002 void amdgpu_legacy_dpm_compute_clocks(void *handle)
1003 {
1004 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1005 
1006 	amdgpu_dpm_get_active_displays(adev);
1007 
1008 	amdgpu_dpm_change_power_state_locked(adev);
1009 }
1010 
amdgpu_dpm_thermal_work_handler(struct work_struct * work)1011 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1012 {
1013 	struct amdgpu_device *adev =
1014 		container_of(work, struct amdgpu_device,
1015 			     pm.dpm.thermal.work);
1016 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1017 	/* switch to the thermal state */
1018 	enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1019 	int temp, size = sizeof(temp);
1020 
1021 	if (!adev->pm.dpm_enabled)
1022 		return;
1023 
1024 	if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
1025 				   AMDGPU_PP_SENSOR_GPU_TEMP,
1026 				   (void *)&temp,
1027 				   &size)) {
1028 		if (temp < adev->pm.dpm.thermal.min_temp)
1029 			/* switch back the user state */
1030 			dpm_state = adev->pm.dpm.user_state;
1031 	} else {
1032 		if (adev->pm.dpm.thermal.high_to_low)
1033 			/* switch back the user state */
1034 			dpm_state = adev->pm.dpm.user_state;
1035 	}
1036 
1037 	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1038 		adev->pm.dpm.thermal_active = true;
1039 	else
1040 		adev->pm.dpm.thermal_active = false;
1041 
1042 	adev->pm.dpm.state = dpm_state;
1043 
1044 	amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
1045 }
1046