xref: /openbmc/linux/drivers/gpu/drm/amd/pm/amdgpu_dpm.c (revision 79c65f3f)
1e098bc96SEvan Quan /*
2e098bc96SEvan Quan  * Copyright 2011 Advanced Micro Devices, Inc.
3e098bc96SEvan Quan  *
4e098bc96SEvan Quan  * Permission is hereby granted, free of charge, to any person obtaining a
5e098bc96SEvan Quan  * copy of this software and associated documentation files (the "Software"),
6e098bc96SEvan Quan  * to deal in the Software without restriction, including without limitation
7e098bc96SEvan Quan  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e098bc96SEvan Quan  * and/or sell copies of the Software, and to permit persons to whom the
9e098bc96SEvan Quan  * Software is furnished to do so, subject to the following conditions:
10e098bc96SEvan Quan  *
11e098bc96SEvan Quan  * The above copyright notice and this permission notice shall be included in
12e098bc96SEvan Quan  * all copies or substantial portions of the Software.
13e098bc96SEvan Quan  *
14e098bc96SEvan Quan  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e098bc96SEvan Quan  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e098bc96SEvan Quan  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e098bc96SEvan Quan  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e098bc96SEvan Quan  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e098bc96SEvan Quan  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e098bc96SEvan Quan  * OTHER DEALINGS IN THE SOFTWARE.
21e098bc96SEvan Quan  *
22e098bc96SEvan Quan  * Authors: Alex Deucher
23e098bc96SEvan Quan  */
24e098bc96SEvan Quan 
25e098bc96SEvan Quan #include "amdgpu.h"
26e098bc96SEvan Quan #include "amdgpu_atombios.h"
27e098bc96SEvan Quan #include "amdgpu_i2c.h"
28e098bc96SEvan Quan #include "amdgpu_dpm.h"
29e098bc96SEvan Quan #include "atom.h"
30e098bc96SEvan Quan #include "amd_pcie.h"
31e098bc96SEvan Quan #include "amdgpu_display.h"
32e098bc96SEvan Quan #include "hwmgr.h"
33e098bc96SEvan Quan #include <linux/power_supply.h>
34e098bc96SEvan Quan 
35e098bc96SEvan Quan #define WIDTH_4K 3840
36e098bc96SEvan Quan 
37e098bc96SEvan Quan void amdgpu_dpm_print_class_info(u32 class, u32 class2)
38e098bc96SEvan Quan {
39e098bc96SEvan Quan 	const char *s;
40e098bc96SEvan Quan 
41e098bc96SEvan Quan 	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
42e098bc96SEvan Quan 	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
43e098bc96SEvan Quan 	default:
44e098bc96SEvan Quan 		s = "none";
45e098bc96SEvan Quan 		break;
46e098bc96SEvan Quan 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
47e098bc96SEvan Quan 		s = "battery";
48e098bc96SEvan Quan 		break;
49e098bc96SEvan Quan 	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
50e098bc96SEvan Quan 		s = "balanced";
51e098bc96SEvan Quan 		break;
52e098bc96SEvan Quan 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
53e098bc96SEvan Quan 		s = "performance";
54e098bc96SEvan Quan 		break;
55e098bc96SEvan Quan 	}
56e098bc96SEvan Quan 	printk("\tui class: %s\n", s);
57e098bc96SEvan Quan 	printk("\tinternal class:");
58e098bc96SEvan Quan 	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
59e098bc96SEvan Quan 	    (class2 == 0))
60e098bc96SEvan Quan 		pr_cont(" none");
61e098bc96SEvan Quan 	else {
62e098bc96SEvan Quan 		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
63e098bc96SEvan Quan 			pr_cont(" boot");
64e098bc96SEvan Quan 		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
65e098bc96SEvan Quan 			pr_cont(" thermal");
66e098bc96SEvan Quan 		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
67e098bc96SEvan Quan 			pr_cont(" limited_pwr");
68e098bc96SEvan Quan 		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
69e098bc96SEvan Quan 			pr_cont(" rest");
70e098bc96SEvan Quan 		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
71e098bc96SEvan Quan 			pr_cont(" forced");
72e098bc96SEvan Quan 		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
73e098bc96SEvan Quan 			pr_cont(" 3d_perf");
74e098bc96SEvan Quan 		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
75e098bc96SEvan Quan 			pr_cont(" ovrdrv");
76e098bc96SEvan Quan 		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
77e098bc96SEvan Quan 			pr_cont(" uvd");
78e098bc96SEvan Quan 		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
79e098bc96SEvan Quan 			pr_cont(" 3d_low");
80e098bc96SEvan Quan 		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
81e098bc96SEvan Quan 			pr_cont(" acpi");
82e098bc96SEvan Quan 		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
83e098bc96SEvan Quan 			pr_cont(" uvd_hd2");
84e098bc96SEvan Quan 		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
85e098bc96SEvan Quan 			pr_cont(" uvd_hd");
86e098bc96SEvan Quan 		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
87e098bc96SEvan Quan 			pr_cont(" uvd_sd");
88e098bc96SEvan Quan 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
89e098bc96SEvan Quan 			pr_cont(" limited_pwr2");
90e098bc96SEvan Quan 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
91e098bc96SEvan Quan 			pr_cont(" ulv");
92e098bc96SEvan Quan 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
93e098bc96SEvan Quan 			pr_cont(" uvd_mvc");
94e098bc96SEvan Quan 	}
95e098bc96SEvan Quan 	pr_cont("\n");
96e098bc96SEvan Quan }
97e098bc96SEvan Quan 
98e098bc96SEvan Quan void amdgpu_dpm_print_cap_info(u32 caps)
99e098bc96SEvan Quan {
100e098bc96SEvan Quan 	printk("\tcaps:");
101e098bc96SEvan Quan 	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
102e098bc96SEvan Quan 		pr_cont(" single_disp");
103e098bc96SEvan Quan 	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
104e098bc96SEvan Quan 		pr_cont(" video");
105e098bc96SEvan Quan 	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
106e098bc96SEvan Quan 		pr_cont(" no_dc");
107e098bc96SEvan Quan 	pr_cont("\n");
108e098bc96SEvan Quan }
109e098bc96SEvan Quan 
110e098bc96SEvan Quan void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
111e098bc96SEvan Quan 				struct amdgpu_ps *rps)
112e098bc96SEvan Quan {
113e098bc96SEvan Quan 	printk("\tstatus:");
114e098bc96SEvan Quan 	if (rps == adev->pm.dpm.current_ps)
115e098bc96SEvan Quan 		pr_cont(" c");
116e098bc96SEvan Quan 	if (rps == adev->pm.dpm.requested_ps)
117e098bc96SEvan Quan 		pr_cont(" r");
118e098bc96SEvan Quan 	if (rps == adev->pm.dpm.boot_ps)
119e098bc96SEvan Quan 		pr_cont(" b");
120e098bc96SEvan Quan 	pr_cont("\n");
121e098bc96SEvan Quan }
122e098bc96SEvan Quan 
123e098bc96SEvan Quan void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
124e098bc96SEvan Quan {
1254a580877SLuben Tuikov 	struct drm_device *ddev = adev_to_drm(adev);
126e098bc96SEvan Quan 	struct drm_crtc *crtc;
127e098bc96SEvan Quan 	struct amdgpu_crtc *amdgpu_crtc;
128e098bc96SEvan Quan 
129e098bc96SEvan Quan 	adev->pm.dpm.new_active_crtcs = 0;
130e098bc96SEvan Quan 	adev->pm.dpm.new_active_crtc_count = 0;
131e098bc96SEvan Quan 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
132e098bc96SEvan Quan 		list_for_each_entry(crtc,
133e098bc96SEvan Quan 				    &ddev->mode_config.crtc_list, head) {
134e098bc96SEvan Quan 			amdgpu_crtc = to_amdgpu_crtc(crtc);
135e098bc96SEvan Quan 			if (amdgpu_crtc->enabled) {
136e098bc96SEvan Quan 				adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
137e098bc96SEvan Quan 				adev->pm.dpm.new_active_crtc_count++;
138e098bc96SEvan Quan 			}
139e098bc96SEvan Quan 		}
140e098bc96SEvan Quan 	}
141e098bc96SEvan Quan }
142e098bc96SEvan Quan 
143e098bc96SEvan Quan 
144e098bc96SEvan Quan u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
145e098bc96SEvan Quan {
1464a580877SLuben Tuikov 	struct drm_device *dev = adev_to_drm(adev);
147e098bc96SEvan Quan 	struct drm_crtc *crtc;
148e098bc96SEvan Quan 	struct amdgpu_crtc *amdgpu_crtc;
149e098bc96SEvan Quan 	u32 vblank_in_pixels;
150e098bc96SEvan Quan 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
151e098bc96SEvan Quan 
152e098bc96SEvan Quan 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
153e098bc96SEvan Quan 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
154e098bc96SEvan Quan 			amdgpu_crtc = to_amdgpu_crtc(crtc);
155e098bc96SEvan Quan 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
156e098bc96SEvan Quan 				vblank_in_pixels =
157e098bc96SEvan Quan 					amdgpu_crtc->hw_mode.crtc_htotal *
158e098bc96SEvan Quan 					(amdgpu_crtc->hw_mode.crtc_vblank_end -
159e098bc96SEvan Quan 					amdgpu_crtc->hw_mode.crtc_vdisplay +
160e098bc96SEvan Quan 					(amdgpu_crtc->v_border * 2));
161e098bc96SEvan Quan 
162e098bc96SEvan Quan 				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
163e098bc96SEvan Quan 				break;
164e098bc96SEvan Quan 			}
165e098bc96SEvan Quan 		}
166e098bc96SEvan Quan 	}
167e098bc96SEvan Quan 
168e098bc96SEvan Quan 	return vblank_time_us;
169e098bc96SEvan Quan }
170e098bc96SEvan Quan 
171e098bc96SEvan Quan u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
172e098bc96SEvan Quan {
1734a580877SLuben Tuikov 	struct drm_device *dev = adev_to_drm(adev);
174e098bc96SEvan Quan 	struct drm_crtc *crtc;
175e098bc96SEvan Quan 	struct amdgpu_crtc *amdgpu_crtc;
176e098bc96SEvan Quan 	u32 vrefresh = 0;
177e098bc96SEvan Quan 
178e098bc96SEvan Quan 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
179e098bc96SEvan Quan 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
180e098bc96SEvan Quan 			amdgpu_crtc = to_amdgpu_crtc(crtc);
181e098bc96SEvan Quan 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
182e098bc96SEvan Quan 				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
183e098bc96SEvan Quan 				break;
184e098bc96SEvan Quan 			}
185e098bc96SEvan Quan 		}
186e098bc96SEvan Quan 	}
187e098bc96SEvan Quan 
188e098bc96SEvan Quan 	return vrefresh;
189e098bc96SEvan Quan }
190e098bc96SEvan Quan 
191e098bc96SEvan Quan bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
192e098bc96SEvan Quan {
193e098bc96SEvan Quan 	switch (sensor) {
194e098bc96SEvan Quan 	case THERMAL_TYPE_RV6XX:
195e098bc96SEvan Quan 	case THERMAL_TYPE_RV770:
196e098bc96SEvan Quan 	case THERMAL_TYPE_EVERGREEN:
197e098bc96SEvan Quan 	case THERMAL_TYPE_SUMO:
198e098bc96SEvan Quan 	case THERMAL_TYPE_NI:
199e098bc96SEvan Quan 	case THERMAL_TYPE_SI:
200e098bc96SEvan Quan 	case THERMAL_TYPE_CI:
201e098bc96SEvan Quan 	case THERMAL_TYPE_KV:
202e098bc96SEvan Quan 		return true;
203e098bc96SEvan Quan 	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
204e098bc96SEvan Quan 	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
205e098bc96SEvan Quan 		return false; /* need special handling */
206e098bc96SEvan Quan 	case THERMAL_TYPE_NONE:
207e098bc96SEvan Quan 	case THERMAL_TYPE_EXTERNAL:
208e098bc96SEvan Quan 	case THERMAL_TYPE_EXTERNAL_GPIO:
209e098bc96SEvan Quan 	default:
210e098bc96SEvan Quan 		return false;
211e098bc96SEvan Quan 	}
212e098bc96SEvan Quan }
213e098bc96SEvan Quan 
214e098bc96SEvan Quan union power_info {
215e098bc96SEvan Quan 	struct _ATOM_POWERPLAY_INFO info;
216e098bc96SEvan Quan 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
217e098bc96SEvan Quan 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
218e098bc96SEvan Quan 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
219e098bc96SEvan Quan 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
220e098bc96SEvan Quan 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
221e098bc96SEvan Quan 	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
222e098bc96SEvan Quan 	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
223e098bc96SEvan Quan };
224e098bc96SEvan Quan 
225e098bc96SEvan Quan union fan_info {
226e098bc96SEvan Quan 	struct _ATOM_PPLIB_FANTABLE fan;
227e098bc96SEvan Quan 	struct _ATOM_PPLIB_FANTABLE2 fan2;
228e098bc96SEvan Quan 	struct _ATOM_PPLIB_FANTABLE3 fan3;
229e098bc96SEvan Quan };
230e098bc96SEvan Quan 
231e098bc96SEvan Quan static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
232e098bc96SEvan Quan 					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
233e098bc96SEvan Quan {
234e098bc96SEvan Quan 	u32 size = atom_table->ucNumEntries *
235e098bc96SEvan Quan 		sizeof(struct amdgpu_clock_voltage_dependency_entry);
236e098bc96SEvan Quan 	int i;
237e098bc96SEvan Quan 	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
238e098bc96SEvan Quan 
239e098bc96SEvan Quan 	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
240e098bc96SEvan Quan 	if (!amdgpu_table->entries)
241e098bc96SEvan Quan 		return -ENOMEM;
242e098bc96SEvan Quan 
243e098bc96SEvan Quan 	entry = &atom_table->entries[0];
244e098bc96SEvan Quan 	for (i = 0; i < atom_table->ucNumEntries; i++) {
245e098bc96SEvan Quan 		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
246e098bc96SEvan Quan 			(entry->ucClockHigh << 16);
247e098bc96SEvan Quan 		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
248e098bc96SEvan Quan 		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
249e098bc96SEvan Quan 			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
250e098bc96SEvan Quan 	}
251e098bc96SEvan Quan 	amdgpu_table->count = atom_table->ucNumEntries;
252e098bc96SEvan Quan 
253e098bc96SEvan Quan 	return 0;
254e098bc96SEvan Quan }
255e098bc96SEvan Quan 
256e098bc96SEvan Quan int amdgpu_get_platform_caps(struct amdgpu_device *adev)
257e098bc96SEvan Quan {
258e098bc96SEvan Quan 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
259e098bc96SEvan Quan 	union power_info *power_info;
260e098bc96SEvan Quan 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
261e098bc96SEvan Quan 	u16 data_offset;
262e098bc96SEvan Quan 	u8 frev, crev;
263e098bc96SEvan Quan 
264e098bc96SEvan Quan 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
265e098bc96SEvan Quan 				   &frev, &crev, &data_offset))
266e098bc96SEvan Quan 		return -EINVAL;
267e098bc96SEvan Quan 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
268e098bc96SEvan Quan 
269e098bc96SEvan Quan 	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
270e098bc96SEvan Quan 	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
271e098bc96SEvan Quan 	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
272e098bc96SEvan Quan 
273e098bc96SEvan Quan 	return 0;
274e098bc96SEvan Quan }
275e098bc96SEvan Quan 
276e098bc96SEvan Quan /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
277e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
278e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
279e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
280e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
281e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
282e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
283e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
284e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
285e098bc96SEvan Quan 
286e098bc96SEvan Quan int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
287e098bc96SEvan Quan {
288e098bc96SEvan Quan 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
289e098bc96SEvan Quan 	union power_info *power_info;
290e098bc96SEvan Quan 	union fan_info *fan_info;
291e098bc96SEvan Quan 	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
292e098bc96SEvan Quan 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
293e098bc96SEvan Quan 	u16 data_offset;
294e098bc96SEvan Quan 	u8 frev, crev;
295e098bc96SEvan Quan 	int ret, i;
296e098bc96SEvan Quan 
297e098bc96SEvan Quan 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
298e098bc96SEvan Quan 				   &frev, &crev, &data_offset))
299e098bc96SEvan Quan 		return -EINVAL;
300e098bc96SEvan Quan 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
301e098bc96SEvan Quan 
302e098bc96SEvan Quan 	/* fan table */
303e098bc96SEvan Quan 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
304e098bc96SEvan Quan 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
305e098bc96SEvan Quan 		if (power_info->pplib3.usFanTableOffset) {
306e098bc96SEvan Quan 			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
307e098bc96SEvan Quan 						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
308e098bc96SEvan Quan 			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
309e098bc96SEvan Quan 			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
310e098bc96SEvan Quan 			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
311e098bc96SEvan Quan 			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
312e098bc96SEvan Quan 			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
313e098bc96SEvan Quan 			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
314e098bc96SEvan Quan 			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
315e098bc96SEvan Quan 			if (fan_info->fan.ucFanTableFormat >= 2)
316e098bc96SEvan Quan 				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
317e098bc96SEvan Quan 			else
318e098bc96SEvan Quan 				adev->pm.dpm.fan.t_max = 10900;
319e098bc96SEvan Quan 			adev->pm.dpm.fan.cycle_delay = 100000;
320e098bc96SEvan Quan 			if (fan_info->fan.ucFanTableFormat >= 3) {
321e098bc96SEvan Quan 				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
322e098bc96SEvan Quan 				adev->pm.dpm.fan.default_max_fan_pwm =
323e098bc96SEvan Quan 					le16_to_cpu(fan_info->fan3.usFanPWMMax);
324e098bc96SEvan Quan 				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
325e098bc96SEvan Quan 				adev->pm.dpm.fan.fan_output_sensitivity =
326e098bc96SEvan Quan 					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
327e098bc96SEvan Quan 			}
328e098bc96SEvan Quan 			adev->pm.dpm.fan.ucode_fan_control = true;
329e098bc96SEvan Quan 		}
330e098bc96SEvan Quan 	}
331e098bc96SEvan Quan 
332e098bc96SEvan Quan 	/* clock dependancy tables, shedding tables */
333e098bc96SEvan Quan 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
334e098bc96SEvan Quan 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
335e098bc96SEvan Quan 		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
336e098bc96SEvan Quan 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
337e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
338e098bc96SEvan Quan 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
339e098bc96SEvan Quan 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
340e098bc96SEvan Quan 								 dep_table);
341e098bc96SEvan Quan 			if (ret) {
342e098bc96SEvan Quan 				amdgpu_free_extended_power_table(adev);
343e098bc96SEvan Quan 				return ret;
344e098bc96SEvan Quan 			}
345e098bc96SEvan Quan 		}
346e098bc96SEvan Quan 		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
347e098bc96SEvan Quan 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
348e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
349e098bc96SEvan Quan 				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
350e098bc96SEvan Quan 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
351e098bc96SEvan Quan 								 dep_table);
352e098bc96SEvan Quan 			if (ret) {
353e098bc96SEvan Quan 				amdgpu_free_extended_power_table(adev);
354e098bc96SEvan Quan 				return ret;
355e098bc96SEvan Quan 			}
356e098bc96SEvan Quan 		}
357e098bc96SEvan Quan 		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
358e098bc96SEvan Quan 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
359e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
360e098bc96SEvan Quan 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
361e098bc96SEvan Quan 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
362e098bc96SEvan Quan 								 dep_table);
363e098bc96SEvan Quan 			if (ret) {
364e098bc96SEvan Quan 				amdgpu_free_extended_power_table(adev);
365e098bc96SEvan Quan 				return ret;
366e098bc96SEvan Quan 			}
367e098bc96SEvan Quan 		}
368e098bc96SEvan Quan 		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
369e098bc96SEvan Quan 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
370e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
371e098bc96SEvan Quan 				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
372e098bc96SEvan Quan 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
373e098bc96SEvan Quan 								 dep_table);
374e098bc96SEvan Quan 			if (ret) {
375e098bc96SEvan Quan 				amdgpu_free_extended_power_table(adev);
376e098bc96SEvan Quan 				return ret;
377e098bc96SEvan Quan 			}
378e098bc96SEvan Quan 		}
379e098bc96SEvan Quan 		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
380e098bc96SEvan Quan 			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
381e098bc96SEvan Quan 				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
382e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
383e098bc96SEvan Quan 				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
384e098bc96SEvan Quan 			if (clk_v->ucNumEntries) {
385e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
386e098bc96SEvan Quan 					le16_to_cpu(clk_v->entries[0].usSclkLow) |
387e098bc96SEvan Quan 					(clk_v->entries[0].ucSclkHigh << 16);
388e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
389e098bc96SEvan Quan 					le16_to_cpu(clk_v->entries[0].usMclkLow) |
390e098bc96SEvan Quan 					(clk_v->entries[0].ucMclkHigh << 16);
391e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
392e098bc96SEvan Quan 					le16_to_cpu(clk_v->entries[0].usVddc);
393e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
394e098bc96SEvan Quan 					le16_to_cpu(clk_v->entries[0].usVddci);
395e098bc96SEvan Quan 			}
396e098bc96SEvan Quan 		}
397e098bc96SEvan Quan 		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
398e098bc96SEvan Quan 			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
399e098bc96SEvan Quan 				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
400e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
401e098bc96SEvan Quan 				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
402e098bc96SEvan Quan 			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
403e098bc96SEvan Quan 
404e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
405e098bc96SEvan Quan 				kcalloc(psl->ucNumEntries,
406e098bc96SEvan Quan 					sizeof(struct amdgpu_phase_shedding_limits_entry),
407e098bc96SEvan Quan 					GFP_KERNEL);
408e098bc96SEvan Quan 			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
409e098bc96SEvan Quan 				amdgpu_free_extended_power_table(adev);
410e098bc96SEvan Quan 				return -ENOMEM;
411e098bc96SEvan Quan 			}
412e098bc96SEvan Quan 
413e098bc96SEvan Quan 			entry = &psl->entries[0];
414e098bc96SEvan Quan 			for (i = 0; i < psl->ucNumEntries; i++) {
415e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
416e098bc96SEvan Quan 					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
417e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
418e098bc96SEvan Quan 					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
419e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
420e098bc96SEvan Quan 					le16_to_cpu(entry->usVoltage);
421e098bc96SEvan Quan 				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
422e098bc96SEvan Quan 					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
423e098bc96SEvan Quan 			}
424e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
425e098bc96SEvan Quan 				psl->ucNumEntries;
426e098bc96SEvan Quan 		}
427e098bc96SEvan Quan 	}
428e098bc96SEvan Quan 
429e098bc96SEvan Quan 	/* cac data */
430e098bc96SEvan Quan 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
431e098bc96SEvan Quan 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
432e098bc96SEvan Quan 		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
433e098bc96SEvan Quan 		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
434e098bc96SEvan Quan 		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
435e098bc96SEvan Quan 		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
436e098bc96SEvan Quan 		if (adev->pm.dpm.tdp_od_limit)
437e098bc96SEvan Quan 			adev->pm.dpm.power_control = true;
438e098bc96SEvan Quan 		else
439e098bc96SEvan Quan 			adev->pm.dpm.power_control = false;
440e098bc96SEvan Quan 		adev->pm.dpm.tdp_adjustment = 0;
441e098bc96SEvan Quan 		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
442e098bc96SEvan Quan 		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
443e098bc96SEvan Quan 		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
444e098bc96SEvan Quan 		if (power_info->pplib5.usCACLeakageTableOffset) {
445e098bc96SEvan Quan 			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
446e098bc96SEvan Quan 				(ATOM_PPLIB_CAC_Leakage_Table *)
447e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
448e098bc96SEvan Quan 				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
449e098bc96SEvan Quan 			ATOM_PPLIB_CAC_Leakage_Record *entry;
450e098bc96SEvan Quan 			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
451e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
452e098bc96SEvan Quan 			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
453e098bc96SEvan Quan 				amdgpu_free_extended_power_table(adev);
454e098bc96SEvan Quan 				return -ENOMEM;
455e098bc96SEvan Quan 			}
456e098bc96SEvan Quan 			entry = &cac_table->entries[0];
457e098bc96SEvan Quan 			for (i = 0; i < cac_table->ucNumEntries; i++) {
458e098bc96SEvan Quan 				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
459e098bc96SEvan Quan 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
460e098bc96SEvan Quan 						le16_to_cpu(entry->usVddc1);
461e098bc96SEvan Quan 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
462e098bc96SEvan Quan 						le16_to_cpu(entry->usVddc2);
463e098bc96SEvan Quan 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
464e098bc96SEvan Quan 						le16_to_cpu(entry->usVddc3);
465e098bc96SEvan Quan 				} else {
466e098bc96SEvan Quan 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
467e098bc96SEvan Quan 						le16_to_cpu(entry->usVddc);
468e098bc96SEvan Quan 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
469e098bc96SEvan Quan 						le32_to_cpu(entry->ulLeakageValue);
470e098bc96SEvan Quan 				}
471e098bc96SEvan Quan 				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
472e098bc96SEvan Quan 					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
473e098bc96SEvan Quan 			}
474e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
475e098bc96SEvan Quan 		}
476e098bc96SEvan Quan 	}
477e098bc96SEvan Quan 
478e098bc96SEvan Quan 	/* ext tables */
479e098bc96SEvan Quan 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
480e098bc96SEvan Quan 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
481e098bc96SEvan Quan 		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
482e098bc96SEvan Quan 			(mode_info->atom_context->bios + data_offset +
483e098bc96SEvan Quan 			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
484e098bc96SEvan Quan 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
485e098bc96SEvan Quan 			ext_hdr->usVCETableOffset) {
486e098bc96SEvan Quan 			VCEClockInfoArray *array = (VCEClockInfoArray *)
487e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
488e098bc96SEvan Quan 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
489e098bc96SEvan Quan 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
490e098bc96SEvan Quan 				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
491e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
492e098bc96SEvan Quan 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
493e098bc96SEvan Quan 				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
494e098bc96SEvan Quan 			ATOM_PPLIB_VCE_State_Table *states =
495e098bc96SEvan Quan 				(ATOM_PPLIB_VCE_State_Table *)
496e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
497e098bc96SEvan Quan 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
498e098bc96SEvan Quan 				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
499e098bc96SEvan Quan 				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
500e098bc96SEvan Quan 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
501e098bc96SEvan Quan 			ATOM_PPLIB_VCE_State_Record *state_entry;
502e098bc96SEvan Quan 			VCEClockInfo *vce_clk;
503e098bc96SEvan Quan 			u32 size = limits->numEntries *
504e098bc96SEvan Quan 				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
505e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
506e098bc96SEvan Quan 				kzalloc(size, GFP_KERNEL);
507e098bc96SEvan Quan 			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
508e098bc96SEvan Quan 				amdgpu_free_extended_power_table(adev);
509e098bc96SEvan Quan 				return -ENOMEM;
510e098bc96SEvan Quan 			}
511e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
512e098bc96SEvan Quan 				limits->numEntries;
513e098bc96SEvan Quan 			entry = &limits->entries[0];
514e098bc96SEvan Quan 			state_entry = &states->entries[0];
515e098bc96SEvan Quan 			for (i = 0; i < limits->numEntries; i++) {
516e098bc96SEvan Quan 				vce_clk = (VCEClockInfo *)
517e098bc96SEvan Quan 					((u8 *)&array->entries[0] +
518e098bc96SEvan Quan 					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
519e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
520e098bc96SEvan Quan 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
521e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
522e098bc96SEvan Quan 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
523e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
524e098bc96SEvan Quan 					le16_to_cpu(entry->usVoltage);
525e098bc96SEvan Quan 				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
526e098bc96SEvan Quan 					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
527e098bc96SEvan Quan 			}
528e098bc96SEvan Quan 			adev->pm.dpm.num_of_vce_states =
529e098bc96SEvan Quan 					states->numEntries > AMD_MAX_VCE_LEVELS ?
530e098bc96SEvan Quan 					AMD_MAX_VCE_LEVELS : states->numEntries;
531e098bc96SEvan Quan 			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
532e098bc96SEvan Quan 				vce_clk = (VCEClockInfo *)
533e098bc96SEvan Quan 					((u8 *)&array->entries[0] +
534e098bc96SEvan Quan 					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
535e098bc96SEvan Quan 				adev->pm.dpm.vce_states[i].evclk =
536e098bc96SEvan Quan 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
537e098bc96SEvan Quan 				adev->pm.dpm.vce_states[i].ecclk =
538e098bc96SEvan Quan 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
539e098bc96SEvan Quan 				adev->pm.dpm.vce_states[i].clk_idx =
540e098bc96SEvan Quan 					state_entry->ucClockInfoIndex & 0x3f;
541e098bc96SEvan Quan 				adev->pm.dpm.vce_states[i].pstate =
542e098bc96SEvan Quan 					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
543e098bc96SEvan Quan 				state_entry = (ATOM_PPLIB_VCE_State_Record *)
544e098bc96SEvan Quan 					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
545e098bc96SEvan Quan 			}
546e098bc96SEvan Quan 		}
547e098bc96SEvan Quan 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
548e098bc96SEvan Quan 			ext_hdr->usUVDTableOffset) {
549e098bc96SEvan Quan 			UVDClockInfoArray *array = (UVDClockInfoArray *)
550e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
551e098bc96SEvan Quan 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
552e098bc96SEvan Quan 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
553e098bc96SEvan Quan 				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
554e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
555e098bc96SEvan Quan 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
556e098bc96SEvan Quan 				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
557e098bc96SEvan Quan 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
558e098bc96SEvan Quan 			u32 size = limits->numEntries *
559e098bc96SEvan Quan 				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
560e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
561e098bc96SEvan Quan 				kzalloc(size, GFP_KERNEL);
562e098bc96SEvan Quan 			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
563e098bc96SEvan Quan 				amdgpu_free_extended_power_table(adev);
564e098bc96SEvan Quan 				return -ENOMEM;
565e098bc96SEvan Quan 			}
566e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
567e098bc96SEvan Quan 				limits->numEntries;
568e098bc96SEvan Quan 			entry = &limits->entries[0];
569e098bc96SEvan Quan 			for (i = 0; i < limits->numEntries; i++) {
570e098bc96SEvan Quan 				UVDClockInfo *uvd_clk = (UVDClockInfo *)
571e098bc96SEvan Quan 					((u8 *)&array->entries[0] +
572e098bc96SEvan Quan 					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
573e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
574e098bc96SEvan Quan 					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
575e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
576e098bc96SEvan Quan 					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
577e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
578e098bc96SEvan Quan 					le16_to_cpu(entry->usVoltage);
579e098bc96SEvan Quan 				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
580e098bc96SEvan Quan 					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
581e098bc96SEvan Quan 			}
582e098bc96SEvan Quan 		}
583e098bc96SEvan Quan 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
584e098bc96SEvan Quan 			ext_hdr->usSAMUTableOffset) {
585e098bc96SEvan Quan 			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
586e098bc96SEvan Quan 				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
587e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
588e098bc96SEvan Quan 				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
589e098bc96SEvan Quan 			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
590e098bc96SEvan Quan 			u32 size = limits->numEntries *
591e098bc96SEvan Quan 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
592e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
593e098bc96SEvan Quan 				kzalloc(size, GFP_KERNEL);
594e098bc96SEvan Quan 			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
595e098bc96SEvan Quan 				amdgpu_free_extended_power_table(adev);
596e098bc96SEvan Quan 				return -ENOMEM;
597e098bc96SEvan Quan 			}
598e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
599e098bc96SEvan Quan 				limits->numEntries;
600e098bc96SEvan Quan 			entry = &limits->entries[0];
601e098bc96SEvan Quan 			for (i = 0; i < limits->numEntries; i++) {
602e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
603e098bc96SEvan Quan 					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
604e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
605e098bc96SEvan Quan 					le16_to_cpu(entry->usVoltage);
606e098bc96SEvan Quan 				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
607e098bc96SEvan Quan 					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
608e098bc96SEvan Quan 			}
609e098bc96SEvan Quan 		}
610e098bc96SEvan Quan 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
611e098bc96SEvan Quan 		    ext_hdr->usPPMTableOffset) {
612e098bc96SEvan Quan 			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
613e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
614e098bc96SEvan Quan 				 le16_to_cpu(ext_hdr->usPPMTableOffset));
615e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.ppm_table =
616e098bc96SEvan Quan 				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
617e098bc96SEvan Quan 			if (!adev->pm.dpm.dyn_state.ppm_table) {
618e098bc96SEvan Quan 				amdgpu_free_extended_power_table(adev);
619e098bc96SEvan Quan 				return -ENOMEM;
620e098bc96SEvan Quan 			}
621e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
622e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
623e098bc96SEvan Quan 				le16_to_cpu(ppm->usCpuCoreNumber);
624e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
625e098bc96SEvan Quan 				le32_to_cpu(ppm->ulPlatformTDP);
626e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
627e098bc96SEvan Quan 				le32_to_cpu(ppm->ulSmallACPlatformTDP);
628e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
629e098bc96SEvan Quan 				le32_to_cpu(ppm->ulPlatformTDC);
630e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
631e098bc96SEvan Quan 				le32_to_cpu(ppm->ulSmallACPlatformTDC);
632e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
633e098bc96SEvan Quan 				le32_to_cpu(ppm->ulApuTDP);
634e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
635e098bc96SEvan Quan 				le32_to_cpu(ppm->ulDGpuTDP);
636e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
637e098bc96SEvan Quan 				le32_to_cpu(ppm->ulDGpuUlvPower);
638e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.ppm_table->tj_max =
639e098bc96SEvan Quan 				le32_to_cpu(ppm->ulTjmax);
640e098bc96SEvan Quan 		}
641e098bc96SEvan Quan 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
642e098bc96SEvan Quan 			ext_hdr->usACPTableOffset) {
643e098bc96SEvan Quan 			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
644e098bc96SEvan Quan 				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
645e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
646e098bc96SEvan Quan 				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
647e098bc96SEvan Quan 			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
648e098bc96SEvan Quan 			u32 size = limits->numEntries *
649e098bc96SEvan Quan 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
650e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
651e098bc96SEvan Quan 				kzalloc(size, GFP_KERNEL);
652e098bc96SEvan Quan 			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
653e098bc96SEvan Quan 				amdgpu_free_extended_power_table(adev);
654e098bc96SEvan Quan 				return -ENOMEM;
655e098bc96SEvan Quan 			}
656e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
657e098bc96SEvan Quan 				limits->numEntries;
658e098bc96SEvan Quan 			entry = &limits->entries[0];
659e098bc96SEvan Quan 			for (i = 0; i < limits->numEntries; i++) {
660e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
661e098bc96SEvan Quan 					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
662e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
663e098bc96SEvan Quan 					le16_to_cpu(entry->usVoltage);
664e098bc96SEvan Quan 				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
665e098bc96SEvan Quan 					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
666e098bc96SEvan Quan 			}
667e098bc96SEvan Quan 		}
668e098bc96SEvan Quan 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
669e098bc96SEvan Quan 			ext_hdr->usPowerTuneTableOffset) {
670e098bc96SEvan Quan 			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
671e098bc96SEvan Quan 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
672e098bc96SEvan Quan 			ATOM_PowerTune_Table *pt;
673e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.cac_tdp_table =
674e098bc96SEvan Quan 				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
675e098bc96SEvan Quan 			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
676e098bc96SEvan Quan 				amdgpu_free_extended_power_table(adev);
677e098bc96SEvan Quan 				return -ENOMEM;
678e098bc96SEvan Quan 			}
679e098bc96SEvan Quan 			if (rev > 0) {
680e098bc96SEvan Quan 				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
681e098bc96SEvan Quan 					(mode_info->atom_context->bios + data_offset +
682e098bc96SEvan Quan 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
683e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
684e098bc96SEvan Quan 					ppt->usMaximumPowerDeliveryLimit;
685e098bc96SEvan Quan 				pt = &ppt->power_tune_table;
686e098bc96SEvan Quan 			} else {
687e098bc96SEvan Quan 				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
688e098bc96SEvan Quan 					(mode_info->atom_context->bios + data_offset +
689e098bc96SEvan Quan 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
690e098bc96SEvan Quan 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
691e098bc96SEvan Quan 				pt = &ppt->power_tune_table;
692e098bc96SEvan Quan 			}
693e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
694e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
695e098bc96SEvan Quan 				le16_to_cpu(pt->usConfigurableTDP);
696e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
697e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
698e098bc96SEvan Quan 				le16_to_cpu(pt->usBatteryPowerLimit);
699e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
700e098bc96SEvan Quan 				le16_to_cpu(pt->usSmallPowerLimit);
701e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
702e098bc96SEvan Quan 				le16_to_cpu(pt->usLowCACLeakage);
703e098bc96SEvan Quan 			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
704e098bc96SEvan Quan 				le16_to_cpu(pt->usHighCACLeakage);
705e098bc96SEvan Quan 		}
706e098bc96SEvan Quan 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
707e098bc96SEvan Quan 				ext_hdr->usSclkVddgfxTableOffset) {
708e098bc96SEvan Quan 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
709e098bc96SEvan Quan 				(mode_info->atom_context->bios + data_offset +
710e098bc96SEvan Quan 				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
711e098bc96SEvan Quan 			ret = amdgpu_parse_clk_voltage_dep_table(
712e098bc96SEvan Quan 					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
713e098bc96SEvan Quan 					dep_table);
714e098bc96SEvan Quan 			if (ret) {
715e098bc96SEvan Quan 				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
716e098bc96SEvan Quan 				return ret;
717e098bc96SEvan Quan 			}
718e098bc96SEvan Quan 		}
719e098bc96SEvan Quan 	}
720e098bc96SEvan Quan 
721e098bc96SEvan Quan 	return 0;
722e098bc96SEvan Quan }
723e098bc96SEvan Quan 
724e098bc96SEvan Quan void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
725e098bc96SEvan Quan {
726e098bc96SEvan Quan 	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
727e098bc96SEvan Quan 
728e098bc96SEvan Quan 	kfree(dyn_state->vddc_dependency_on_sclk.entries);
729e098bc96SEvan Quan 	kfree(dyn_state->vddci_dependency_on_mclk.entries);
730e098bc96SEvan Quan 	kfree(dyn_state->vddc_dependency_on_mclk.entries);
731e098bc96SEvan Quan 	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
732e098bc96SEvan Quan 	kfree(dyn_state->cac_leakage_table.entries);
733e098bc96SEvan Quan 	kfree(dyn_state->phase_shedding_limits_table.entries);
734e098bc96SEvan Quan 	kfree(dyn_state->ppm_table);
735e098bc96SEvan Quan 	kfree(dyn_state->cac_tdp_table);
736e098bc96SEvan Quan 	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
737e098bc96SEvan Quan 	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
738e098bc96SEvan Quan 	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
739e098bc96SEvan Quan 	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
740e098bc96SEvan Quan 	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
741e098bc96SEvan Quan }
742e098bc96SEvan Quan 
743e098bc96SEvan Quan static const char *pp_lib_thermal_controller_names[] = {
744e098bc96SEvan Quan 	"NONE",
745e098bc96SEvan Quan 	"lm63",
746e098bc96SEvan Quan 	"adm1032",
747e098bc96SEvan Quan 	"adm1030",
748e098bc96SEvan Quan 	"max6649",
749e098bc96SEvan Quan 	"lm64",
750e098bc96SEvan Quan 	"f75375",
751e098bc96SEvan Quan 	"RV6xx",
752e098bc96SEvan Quan 	"RV770",
753e098bc96SEvan Quan 	"adt7473",
754e098bc96SEvan Quan 	"NONE",
755e098bc96SEvan Quan 	"External GPIO",
756e098bc96SEvan Quan 	"Evergreen",
757e098bc96SEvan Quan 	"emc2103",
758e098bc96SEvan Quan 	"Sumo",
759e098bc96SEvan Quan 	"Northern Islands",
760e098bc96SEvan Quan 	"Southern Islands",
761e098bc96SEvan Quan 	"lm96163",
762e098bc96SEvan Quan 	"Sea Islands",
763e098bc96SEvan Quan 	"Kaveri/Kabini",
764e098bc96SEvan Quan };
765e098bc96SEvan Quan 
766e098bc96SEvan Quan void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
767e098bc96SEvan Quan {
768e098bc96SEvan Quan 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
769e098bc96SEvan Quan 	ATOM_PPLIB_POWERPLAYTABLE *power_table;
770e098bc96SEvan Quan 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
771e098bc96SEvan Quan 	ATOM_PPLIB_THERMALCONTROLLER *controller;
772e098bc96SEvan Quan 	struct amdgpu_i2c_bus_rec i2c_bus;
773e098bc96SEvan Quan 	u16 data_offset;
774e098bc96SEvan Quan 	u8 frev, crev;
775e098bc96SEvan Quan 
776e098bc96SEvan Quan 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
777e098bc96SEvan Quan 				   &frev, &crev, &data_offset))
778e098bc96SEvan Quan 		return;
779e098bc96SEvan Quan 	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
780e098bc96SEvan Quan 		(mode_info->atom_context->bios + data_offset);
781e098bc96SEvan Quan 	controller = &power_table->sThermalController;
782e098bc96SEvan Quan 
783e098bc96SEvan Quan 	/* add the i2c bus for thermal/fan chip */
784e098bc96SEvan Quan 	if (controller->ucType > 0) {
785e098bc96SEvan Quan 		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
786e098bc96SEvan Quan 			adev->pm.no_fan = true;
787e098bc96SEvan Quan 		adev->pm.fan_pulses_per_revolution =
788e098bc96SEvan Quan 			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
789e098bc96SEvan Quan 		if (adev->pm.fan_pulses_per_revolution) {
790e098bc96SEvan Quan 			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
791e098bc96SEvan Quan 			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
792e098bc96SEvan Quan 		}
793e098bc96SEvan Quan 		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
794e098bc96SEvan Quan 			DRM_INFO("Internal thermal controller %s fan control\n",
795e098bc96SEvan Quan 				 (controller->ucFanParameters &
796e098bc96SEvan Quan 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
797e098bc96SEvan Quan 			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
798e098bc96SEvan Quan 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
799e098bc96SEvan Quan 			DRM_INFO("Internal thermal controller %s fan control\n",
800e098bc96SEvan Quan 				 (controller->ucFanParameters &
801e098bc96SEvan Quan 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
802e098bc96SEvan Quan 			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
803e098bc96SEvan Quan 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
804e098bc96SEvan Quan 			DRM_INFO("Internal thermal controller %s fan control\n",
805e098bc96SEvan Quan 				 (controller->ucFanParameters &
806e098bc96SEvan Quan 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
807e098bc96SEvan Quan 			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
808e098bc96SEvan Quan 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
809e098bc96SEvan Quan 			DRM_INFO("Internal thermal controller %s fan control\n",
810e098bc96SEvan Quan 				 (controller->ucFanParameters &
811e098bc96SEvan Quan 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
812e098bc96SEvan Quan 			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
813e098bc96SEvan Quan 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
814e098bc96SEvan Quan 			DRM_INFO("Internal thermal controller %s fan control\n",
815e098bc96SEvan Quan 				 (controller->ucFanParameters &
816e098bc96SEvan Quan 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
817e098bc96SEvan Quan 			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
818e098bc96SEvan Quan 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
819e098bc96SEvan Quan 			DRM_INFO("Internal thermal controller %s fan control\n",
820e098bc96SEvan Quan 				 (controller->ucFanParameters &
821e098bc96SEvan Quan 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822e098bc96SEvan Quan 			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
823e098bc96SEvan Quan 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
824e098bc96SEvan Quan 			DRM_INFO("Internal thermal controller %s fan control\n",
825e098bc96SEvan Quan 				 (controller->ucFanParameters &
826e098bc96SEvan Quan 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827e098bc96SEvan Quan 			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
828e098bc96SEvan Quan 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
829e098bc96SEvan Quan 			DRM_INFO("Internal thermal controller %s fan control\n",
830e098bc96SEvan Quan 				 (controller->ucFanParameters &
831e098bc96SEvan Quan 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832e098bc96SEvan Quan 			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
833e098bc96SEvan Quan 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
834e098bc96SEvan Quan 			DRM_INFO("External GPIO thermal controller %s fan control\n",
835e098bc96SEvan Quan 				 (controller->ucFanParameters &
836e098bc96SEvan Quan 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837e098bc96SEvan Quan 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
838e098bc96SEvan Quan 		} else if (controller->ucType ==
839e098bc96SEvan Quan 			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
840e098bc96SEvan Quan 			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
841e098bc96SEvan Quan 				 (controller->ucFanParameters &
842e098bc96SEvan Quan 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
843e098bc96SEvan Quan 			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
844e098bc96SEvan Quan 		} else if (controller->ucType ==
845e098bc96SEvan Quan 			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
846e098bc96SEvan Quan 			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
847e098bc96SEvan Quan 				 (controller->ucFanParameters &
848e098bc96SEvan Quan 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
849e098bc96SEvan Quan 			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
850e098bc96SEvan Quan 		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
851e098bc96SEvan Quan 			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
852e098bc96SEvan Quan 				 pp_lib_thermal_controller_names[controller->ucType],
853e098bc96SEvan Quan 				 controller->ucI2cAddress >> 1,
854e098bc96SEvan Quan 				 (controller->ucFanParameters &
855e098bc96SEvan Quan 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
856e098bc96SEvan Quan 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
857e098bc96SEvan Quan 			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
858e098bc96SEvan Quan 			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
859e098bc96SEvan Quan 			if (adev->pm.i2c_bus) {
860e098bc96SEvan Quan 				struct i2c_board_info info = { };
861e098bc96SEvan Quan 				const char *name = pp_lib_thermal_controller_names[controller->ucType];
862e098bc96SEvan Quan 				info.addr = controller->ucI2cAddress >> 1;
863e098bc96SEvan Quan 				strlcpy(info.type, name, sizeof(info.type));
864e098bc96SEvan Quan 				i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
865e098bc96SEvan Quan 			}
866e098bc96SEvan Quan 		} else {
867e098bc96SEvan Quan 			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
868e098bc96SEvan Quan 				 controller->ucType,
869e098bc96SEvan Quan 				 controller->ucI2cAddress >> 1,
870e098bc96SEvan Quan 				 (controller->ucFanParameters &
871e098bc96SEvan Quan 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
872e098bc96SEvan Quan 		}
873e098bc96SEvan Quan 	}
874e098bc96SEvan Quan }
875e098bc96SEvan Quan 
876e098bc96SEvan Quan enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
877e098bc96SEvan Quan 						 u32 sys_mask,
878e098bc96SEvan Quan 						 enum amdgpu_pcie_gen asic_gen,
879e098bc96SEvan Quan 						 enum amdgpu_pcie_gen default_gen)
880e098bc96SEvan Quan {
881e098bc96SEvan Quan 	switch (asic_gen) {
882e098bc96SEvan Quan 	case AMDGPU_PCIE_GEN1:
883e098bc96SEvan Quan 		return AMDGPU_PCIE_GEN1;
884e098bc96SEvan Quan 	case AMDGPU_PCIE_GEN2:
885e098bc96SEvan Quan 		return AMDGPU_PCIE_GEN2;
886e098bc96SEvan Quan 	case AMDGPU_PCIE_GEN3:
887e098bc96SEvan Quan 		return AMDGPU_PCIE_GEN3;
888e098bc96SEvan Quan 	default:
889e098bc96SEvan Quan 		if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
890e098bc96SEvan Quan 		    (default_gen == AMDGPU_PCIE_GEN3))
891e098bc96SEvan Quan 			return AMDGPU_PCIE_GEN3;
892e098bc96SEvan Quan 		else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
893e098bc96SEvan Quan 			 (default_gen == AMDGPU_PCIE_GEN2))
894e098bc96SEvan Quan 			return AMDGPU_PCIE_GEN2;
895e098bc96SEvan Quan 		else
896e098bc96SEvan Quan 			return AMDGPU_PCIE_GEN1;
897e098bc96SEvan Quan 	}
898e098bc96SEvan Quan 	return AMDGPU_PCIE_GEN1;
899e098bc96SEvan Quan }
900e098bc96SEvan Quan 
901e098bc96SEvan Quan struct amd_vce_state*
902e098bc96SEvan Quan amdgpu_get_vce_clock_state(void *handle, u32 idx)
903e098bc96SEvan Quan {
904e098bc96SEvan Quan 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
905e098bc96SEvan Quan 
906e098bc96SEvan Quan 	if (idx < adev->pm.dpm.num_of_vce_states)
907e098bc96SEvan Quan 		return &adev->pm.dpm.vce_states[idx];
908e098bc96SEvan Quan 
909e098bc96SEvan Quan 	return NULL;
910e098bc96SEvan Quan }
911e098bc96SEvan Quan 
912e098bc96SEvan Quan int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
913e098bc96SEvan Quan {
914bc7d6c12SDarren Powell 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
915e098bc96SEvan Quan 
916bc7d6c12SDarren Powell 	return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
917e098bc96SEvan Quan }
918e098bc96SEvan Quan 
919e098bc96SEvan Quan int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
920e098bc96SEvan Quan {
921bc7d6c12SDarren Powell 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
922e098bc96SEvan Quan 
923bc7d6c12SDarren Powell 	return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
924e098bc96SEvan Quan }
925e098bc96SEvan Quan 
926e098bc96SEvan Quan int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
927e098bc96SEvan Quan {
928e098bc96SEvan Quan 	int ret = 0;
929bc7d6c12SDarren Powell 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
9306ee27ee2SEvan Quan 	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
9316ee27ee2SEvan Quan 
9326ee27ee2SEvan Quan 	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
9336ee27ee2SEvan Quan 		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
9346ee27ee2SEvan Quan 				block_type, gate ? "gate" : "ungate");
9356ee27ee2SEvan Quan 		return 0;
9366ee27ee2SEvan Quan 	}
937e098bc96SEvan Quan 
938e098bc96SEvan Quan 	switch (block_type) {
939e098bc96SEvan Quan 	case AMD_IP_BLOCK_TYPE_UVD:
940e098bc96SEvan Quan 	case AMD_IP_BLOCK_TYPE_VCE:
941bc7d6c12SDarren Powell 		if (pp_funcs && pp_funcs->set_powergating_by_smu) {
942e098bc96SEvan Quan 			/*
943e098bc96SEvan Quan 			 * TODO: need a better lock mechanism
944e098bc96SEvan Quan 			 *
945e098bc96SEvan Quan 			 * Here adev->pm.mutex lock protection is enforced on
946e098bc96SEvan Quan 			 * UVD and VCE cases only. Since for other cases, there
947e098bc96SEvan Quan 			 * may be already lock protection in amdgpu_pm.c.
948e098bc96SEvan Quan 			 * This is a quick fix for the deadlock issue below.
949e098bc96SEvan Quan 			 *     NFO: task ocltst:2028 blocked for more than 120 seconds.
950e098bc96SEvan Quan 			 *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
951e098bc96SEvan Quan 			 *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
952e098bc96SEvan Quan 			 *     cltst          D    0  2028   2026 0x00000000
953e098bc96SEvan Quan 			 *     all Trace:
954e098bc96SEvan Quan 			 *     __schedule+0x2c0/0x870
955e098bc96SEvan Quan 			 *     schedule+0x2c/0x70
956e098bc96SEvan Quan 			 *     schedule_preempt_disabled+0xe/0x10
957e098bc96SEvan Quan 			 *     __mutex_lock.isra.9+0x26d/0x4e0
958e098bc96SEvan Quan 			 *     __mutex_lock_slowpath+0x13/0x20
959e098bc96SEvan Quan 			 *     ? __mutex_lock_slowpath+0x13/0x20
960e098bc96SEvan Quan 			 *     mutex_lock+0x2f/0x40
961e098bc96SEvan Quan 			 *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
962e098bc96SEvan Quan 			 *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
963e098bc96SEvan Quan 			 *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
964e098bc96SEvan Quan 			 *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
965e098bc96SEvan Quan 			 *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
966e098bc96SEvan Quan 			 *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
967e098bc96SEvan Quan 			 */
968e098bc96SEvan Quan 			mutex_lock(&adev->pm.mutex);
969bc7d6c12SDarren Powell 			ret = (pp_funcs->set_powergating_by_smu(
970e098bc96SEvan Quan 				(adev)->powerplay.pp_handle, block_type, gate));
971e098bc96SEvan Quan 			mutex_unlock(&adev->pm.mutex);
972e098bc96SEvan Quan 		}
973e098bc96SEvan Quan 		break;
974e098bc96SEvan Quan 	case AMD_IP_BLOCK_TYPE_GFX:
975e098bc96SEvan Quan 	case AMD_IP_BLOCK_TYPE_VCN:
976e098bc96SEvan Quan 	case AMD_IP_BLOCK_TYPE_SDMA:
977e098bc96SEvan Quan 	case AMD_IP_BLOCK_TYPE_JPEG:
978e098bc96SEvan Quan 	case AMD_IP_BLOCK_TYPE_GMC:
979e098bc96SEvan Quan 	case AMD_IP_BLOCK_TYPE_ACP:
980bc7d6c12SDarren Powell 		if (pp_funcs && pp_funcs->set_powergating_by_smu) {
981bc7d6c12SDarren Powell 			ret = (pp_funcs->set_powergating_by_smu(
982e098bc96SEvan Quan 				(adev)->powerplay.pp_handle, block_type, gate));
983bc7d6c12SDarren Powell 		}
984e098bc96SEvan Quan 		break;
985e098bc96SEvan Quan 	default:
986e098bc96SEvan Quan 		break;
987e098bc96SEvan Quan 	}
988e098bc96SEvan Quan 
9896ee27ee2SEvan Quan 	if (!ret)
9906ee27ee2SEvan Quan 		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
9916ee27ee2SEvan Quan 
992e098bc96SEvan Quan 	return ret;
993e098bc96SEvan Quan }
994e098bc96SEvan Quan 
995e098bc96SEvan Quan int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
996e098bc96SEvan Quan {
997e098bc96SEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
998e098bc96SEvan Quan 	void *pp_handle = adev->powerplay.pp_handle;
999e098bc96SEvan Quan 	int ret = 0;
1000e098bc96SEvan Quan 
1001e098bc96SEvan Quan 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1002e098bc96SEvan Quan 		return -ENOENT;
1003e098bc96SEvan Quan 
1004e098bc96SEvan Quan 	/* enter BACO state */
1005e098bc96SEvan Quan 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1006e098bc96SEvan Quan 
1007e098bc96SEvan Quan 	return ret;
1008e098bc96SEvan Quan }
1009e098bc96SEvan Quan 
1010e098bc96SEvan Quan int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1011e098bc96SEvan Quan {
1012e098bc96SEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1013e098bc96SEvan Quan 	void *pp_handle = adev->powerplay.pp_handle;
1014e098bc96SEvan Quan 	int ret = 0;
1015e098bc96SEvan Quan 
1016e098bc96SEvan Quan 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1017e098bc96SEvan Quan 		return -ENOENT;
1018e098bc96SEvan Quan 
1019e098bc96SEvan Quan 	/* exit BACO state */
1020e098bc96SEvan Quan 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1021e098bc96SEvan Quan 
1022e098bc96SEvan Quan 	return ret;
1023e098bc96SEvan Quan }
1024e098bc96SEvan Quan 
1025e098bc96SEvan Quan int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1026e098bc96SEvan Quan 			     enum pp_mp1_state mp1_state)
1027e098bc96SEvan Quan {
1028e098bc96SEvan Quan 	int ret = 0;
1029bab0f602SDarren Powell 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1030e098bc96SEvan Quan 
1031bab0f602SDarren Powell 	if (pp_funcs && pp_funcs->set_mp1_state) {
1032bab0f602SDarren Powell 		ret = pp_funcs->set_mp1_state(
1033e098bc96SEvan Quan 				adev->powerplay.pp_handle,
1034e098bc96SEvan Quan 				mp1_state);
1035e098bc96SEvan Quan 	}
1036e098bc96SEvan Quan 
1037e098bc96SEvan Quan 	return ret;
1038e098bc96SEvan Quan }
1039e098bc96SEvan Quan 
1040e098bc96SEvan Quan bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1041e098bc96SEvan Quan {
1042e098bc96SEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1043e098bc96SEvan Quan 	void *pp_handle = adev->powerplay.pp_handle;
1044e098bc96SEvan Quan 	bool baco_cap;
1045e098bc96SEvan Quan 
1046e098bc96SEvan Quan 	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1047e098bc96SEvan Quan 		return false;
1048e098bc96SEvan Quan 
1049e098bc96SEvan Quan 	if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1050e098bc96SEvan Quan 		return false;
1051e098bc96SEvan Quan 
10529ab5001aSDarren Powell 	return baco_cap;
1053e098bc96SEvan Quan }
1054e098bc96SEvan Quan 
1055e098bc96SEvan Quan int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1056e098bc96SEvan Quan {
1057e098bc96SEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1058e098bc96SEvan Quan 	void *pp_handle = adev->powerplay.pp_handle;
1059e098bc96SEvan Quan 
1060e098bc96SEvan Quan 	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1061e098bc96SEvan Quan 		return -ENOENT;
1062e098bc96SEvan Quan 
1063e098bc96SEvan Quan 	return pp_funcs->asic_reset_mode_2(pp_handle);
1064e098bc96SEvan Quan }
1065e098bc96SEvan Quan 
1066e098bc96SEvan Quan int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1067e098bc96SEvan Quan {
1068e098bc96SEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1069e098bc96SEvan Quan 	void *pp_handle = adev->powerplay.pp_handle;
1070e098bc96SEvan Quan 	int ret = 0;
1071e098bc96SEvan Quan 
10729ab5001aSDarren Powell 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1073e098bc96SEvan Quan 		return -ENOENT;
1074e098bc96SEvan Quan 
1075e098bc96SEvan Quan 	/* enter BACO state */
1076e098bc96SEvan Quan 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1077e098bc96SEvan Quan 	if (ret)
1078e098bc96SEvan Quan 		return ret;
1079e098bc96SEvan Quan 
1080e098bc96SEvan Quan 	/* exit BACO state */
1081e098bc96SEvan Quan 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1082e098bc96SEvan Quan 	if (ret)
1083e098bc96SEvan Quan 		return ret;
1084e098bc96SEvan Quan 
1085e098bc96SEvan Quan 	return 0;
1086e098bc96SEvan Quan }
1087e098bc96SEvan Quan 
1088e098bc96SEvan Quan bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
1089e098bc96SEvan Quan {
1090e098bc96SEvan Quan 	struct smu_context *smu = &adev->smu;
1091e098bc96SEvan Quan 
1092e098bc96SEvan Quan 	if (is_support_sw_smu(adev))
1093e098bc96SEvan Quan 		return smu_mode1_reset_is_support(smu);
1094e098bc96SEvan Quan 
1095e098bc96SEvan Quan 	return false;
1096e098bc96SEvan Quan }
1097e098bc96SEvan Quan 
1098e098bc96SEvan Quan int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
1099e098bc96SEvan Quan {
1100e098bc96SEvan Quan 	struct smu_context *smu = &adev->smu;
1101e098bc96SEvan Quan 
1102e098bc96SEvan Quan 	if (is_support_sw_smu(adev))
1103e098bc96SEvan Quan 		return smu_mode1_reset(smu);
1104e098bc96SEvan Quan 
1105e098bc96SEvan Quan 	return -EOPNOTSUPP;
1106e098bc96SEvan Quan }
1107e098bc96SEvan Quan 
1108e098bc96SEvan Quan int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1109e098bc96SEvan Quan 				    enum PP_SMC_POWER_PROFILE type,
1110e098bc96SEvan Quan 				    bool en)
1111e098bc96SEvan Quan {
1112bab0f602SDarren Powell 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1113e098bc96SEvan Quan 	int ret = 0;
1114e098bc96SEvan Quan 
11157cf7a392SJingwen Chen 	if (amdgpu_sriov_vf(adev))
11167cf7a392SJingwen Chen 		return 0;
11177cf7a392SJingwen Chen 
1118bab0f602SDarren Powell 	if (pp_funcs && pp_funcs->switch_power_profile)
1119bab0f602SDarren Powell 		ret = pp_funcs->switch_power_profile(
1120e098bc96SEvan Quan 			adev->powerplay.pp_handle, type, en);
1121e098bc96SEvan Quan 
1122e098bc96SEvan Quan 	return ret;
1123e098bc96SEvan Quan }
1124e098bc96SEvan Quan 
1125e098bc96SEvan Quan int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1126e098bc96SEvan Quan 			       uint32_t pstate)
1127e098bc96SEvan Quan {
1128bab0f602SDarren Powell 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1129e098bc96SEvan Quan 	int ret = 0;
1130e098bc96SEvan Quan 
1131bab0f602SDarren Powell 	if (pp_funcs && pp_funcs->set_xgmi_pstate)
1132bab0f602SDarren Powell 		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1133e098bc96SEvan Quan 								pstate);
1134e098bc96SEvan Quan 
1135e098bc96SEvan Quan 	return ret;
1136e098bc96SEvan Quan }
1137e098bc96SEvan Quan 
1138e098bc96SEvan Quan int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
1139e098bc96SEvan Quan 			     uint32_t cstate)
1140e098bc96SEvan Quan {
1141e098bc96SEvan Quan 	int ret = 0;
1142e098bc96SEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1143e098bc96SEvan Quan 	void *pp_handle = adev->powerplay.pp_handle;
1144e098bc96SEvan Quan 
1145bab0f602SDarren Powell 	if (pp_funcs && pp_funcs->set_df_cstate)
1146e098bc96SEvan Quan 		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
1147e098bc96SEvan Quan 
1148e098bc96SEvan Quan 	return ret;
1149e098bc96SEvan Quan }
1150e098bc96SEvan Quan 
1151e098bc96SEvan Quan int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
1152e098bc96SEvan Quan {
1153e098bc96SEvan Quan 	struct smu_context *smu = &adev->smu;
1154e098bc96SEvan Quan 
1155e098bc96SEvan Quan 	if (is_support_sw_smu(adev))
1156e098bc96SEvan Quan 		return smu_allow_xgmi_power_down(smu, en);
1157e098bc96SEvan Quan 
1158e098bc96SEvan Quan 	return 0;
1159e098bc96SEvan Quan }
1160e098bc96SEvan Quan 
1161e098bc96SEvan Quan int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
1162e098bc96SEvan Quan {
1163e098bc96SEvan Quan 	void *pp_handle = adev->powerplay.pp_handle;
1164e098bc96SEvan Quan 	const struct amd_pm_funcs *pp_funcs =
1165e098bc96SEvan Quan 			adev->powerplay.pp_funcs;
1166e098bc96SEvan Quan 	int ret = 0;
1167e098bc96SEvan Quan 
1168bab0f602SDarren Powell 	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
1169e098bc96SEvan Quan 		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
1170e098bc96SEvan Quan 
1171e098bc96SEvan Quan 	return ret;
1172e098bc96SEvan Quan }
1173e098bc96SEvan Quan 
1174e098bc96SEvan Quan int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
1175e098bc96SEvan Quan 				      uint32_t msg_id)
1176e098bc96SEvan Quan {
1177e098bc96SEvan Quan 	void *pp_handle = adev->powerplay.pp_handle;
1178e098bc96SEvan Quan 	const struct amd_pm_funcs *pp_funcs =
1179e098bc96SEvan Quan 			adev->powerplay.pp_funcs;
1180e098bc96SEvan Quan 	int ret = 0;
1181e098bc96SEvan Quan 
1182e098bc96SEvan Quan 	if (pp_funcs && pp_funcs->set_clockgating_by_smu)
1183e098bc96SEvan Quan 		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
1184e098bc96SEvan Quan 						       msg_id);
1185e098bc96SEvan Quan 
1186e098bc96SEvan Quan 	return ret;
1187e098bc96SEvan Quan }
1188e098bc96SEvan Quan 
1189e098bc96SEvan Quan int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
1190e098bc96SEvan Quan 				  bool acquire)
1191e098bc96SEvan Quan {
1192e098bc96SEvan Quan 	void *pp_handle = adev->powerplay.pp_handle;
1193e098bc96SEvan Quan 	const struct amd_pm_funcs *pp_funcs =
1194e098bc96SEvan Quan 			adev->powerplay.pp_funcs;
1195e098bc96SEvan Quan 	int ret = -EOPNOTSUPP;
1196e098bc96SEvan Quan 
1197e098bc96SEvan Quan 	if (pp_funcs && pp_funcs->smu_i2c_bus_access)
1198e098bc96SEvan Quan 		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
1199e098bc96SEvan Quan 						   acquire);
1200e098bc96SEvan Quan 
1201e098bc96SEvan Quan 	return ret;
1202e098bc96SEvan Quan }
1203e098bc96SEvan Quan 
1204e098bc96SEvan Quan void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
1205e098bc96SEvan Quan {
1206e098bc96SEvan Quan 	if (adev->pm.dpm_enabled) {
1207e098bc96SEvan Quan 		mutex_lock(&adev->pm.mutex);
1208e098bc96SEvan Quan 		if (power_supply_is_system_supplied() > 0)
1209e098bc96SEvan Quan 			adev->pm.ac_power = true;
1210e098bc96SEvan Quan 		else
1211e098bc96SEvan Quan 			adev->pm.ac_power = false;
1212e098bc96SEvan Quan 		if (adev->powerplay.pp_funcs &&
1213e098bc96SEvan Quan 		    adev->powerplay.pp_funcs->enable_bapm)
1214e098bc96SEvan Quan 			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
1215e098bc96SEvan Quan 		mutex_unlock(&adev->pm.mutex);
1216e098bc96SEvan Quan 
1217e098bc96SEvan Quan 		if (is_support_sw_smu(adev))
1218e098bc96SEvan Quan 			smu_set_ac_dc(&adev->smu);
1219e098bc96SEvan Quan 	}
1220e098bc96SEvan Quan }
1221e098bc96SEvan Quan 
1222e098bc96SEvan Quan int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
1223e098bc96SEvan Quan 			   void *data, uint32_t *size)
1224e098bc96SEvan Quan {
12259ab5001aSDarren Powell 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1226e098bc96SEvan Quan 	int ret = 0;
1227e098bc96SEvan Quan 
1228e098bc96SEvan Quan 	if (!data || !size)
1229e098bc96SEvan Quan 		return -EINVAL;
1230e098bc96SEvan Quan 
12319ab5001aSDarren Powell 	if (pp_funcs && pp_funcs->read_sensor)
12329ab5001aSDarren Powell 		ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle,
1233e098bc96SEvan Quan 								    sensor, data, size);
1234e098bc96SEvan Quan 	else
1235e098bc96SEvan Quan 		ret = -EINVAL;
1236e098bc96SEvan Quan 
1237e098bc96SEvan Quan 	return ret;
1238e098bc96SEvan Quan }
1239e098bc96SEvan Quan 
1240e098bc96SEvan Quan void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1241e098bc96SEvan Quan {
1242e098bc96SEvan Quan 	struct amdgpu_device *adev =
1243e098bc96SEvan Quan 		container_of(work, struct amdgpu_device,
1244e098bc96SEvan Quan 			     pm.dpm.thermal.work);
1245e098bc96SEvan Quan 	/* switch to the thermal state */
1246e098bc96SEvan Quan 	enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1247e098bc96SEvan Quan 	int temp, size = sizeof(temp);
1248e098bc96SEvan Quan 
1249e098bc96SEvan Quan 	if (!adev->pm.dpm_enabled)
1250e098bc96SEvan Quan 		return;
1251e098bc96SEvan Quan 
1252e098bc96SEvan Quan 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1253e098bc96SEvan Quan 				    (void *)&temp, &size)) {
1254e098bc96SEvan Quan 		if (temp < adev->pm.dpm.thermal.min_temp)
1255e098bc96SEvan Quan 			/* switch back the user state */
1256e098bc96SEvan Quan 			dpm_state = adev->pm.dpm.user_state;
1257e098bc96SEvan Quan 	} else {
1258e098bc96SEvan Quan 		if (adev->pm.dpm.thermal.high_to_low)
1259e098bc96SEvan Quan 			/* switch back the user state */
1260e098bc96SEvan Quan 			dpm_state = adev->pm.dpm.user_state;
1261e098bc96SEvan Quan 	}
1262e098bc96SEvan Quan 	mutex_lock(&adev->pm.mutex);
1263e098bc96SEvan Quan 	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1264e098bc96SEvan Quan 		adev->pm.dpm.thermal_active = true;
1265e098bc96SEvan Quan 	else
1266e098bc96SEvan Quan 		adev->pm.dpm.thermal_active = false;
1267e098bc96SEvan Quan 	adev->pm.dpm.state = dpm_state;
1268e098bc96SEvan Quan 	mutex_unlock(&adev->pm.mutex);
1269e098bc96SEvan Quan 
1270e098bc96SEvan Quan 	amdgpu_pm_compute_clocks(adev);
1271e098bc96SEvan Quan }
1272e098bc96SEvan Quan 
1273e098bc96SEvan Quan static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1274e098bc96SEvan Quan 						     enum amd_pm_state_type dpm_state)
1275e098bc96SEvan Quan {
1276e098bc96SEvan Quan 	int i;
1277e098bc96SEvan Quan 	struct amdgpu_ps *ps;
1278e098bc96SEvan Quan 	u32 ui_class;
1279e098bc96SEvan Quan 	bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1280e098bc96SEvan Quan 		true : false;
1281e098bc96SEvan Quan 
1282e098bc96SEvan Quan 	/* check if the vblank period is too short to adjust the mclk */
1283e098bc96SEvan Quan 	if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1284e098bc96SEvan Quan 		if (amdgpu_dpm_vblank_too_short(adev))
1285e098bc96SEvan Quan 			single_display = false;
1286e098bc96SEvan Quan 	}
1287e098bc96SEvan Quan 
1288e098bc96SEvan Quan 	/* certain older asics have a separare 3D performance state,
1289e098bc96SEvan Quan 	 * so try that first if the user selected performance
1290e098bc96SEvan Quan 	 */
1291e098bc96SEvan Quan 	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1292e098bc96SEvan Quan 		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1293e098bc96SEvan Quan 	/* balanced states don't exist at the moment */
1294e098bc96SEvan Quan 	if (dpm_state == POWER_STATE_TYPE_BALANCED)
1295e098bc96SEvan Quan 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1296e098bc96SEvan Quan 
1297e098bc96SEvan Quan restart_search:
1298e098bc96SEvan Quan 	/* Pick the best power state based on current conditions */
1299e098bc96SEvan Quan 	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1300e098bc96SEvan Quan 		ps = &adev->pm.dpm.ps[i];
1301e098bc96SEvan Quan 		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1302e098bc96SEvan Quan 		switch (dpm_state) {
1303e098bc96SEvan Quan 		/* user states */
1304e098bc96SEvan Quan 		case POWER_STATE_TYPE_BATTERY:
1305e098bc96SEvan Quan 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1306e098bc96SEvan Quan 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1307e098bc96SEvan Quan 					if (single_display)
1308e098bc96SEvan Quan 						return ps;
1309e098bc96SEvan Quan 				} else
1310e098bc96SEvan Quan 					return ps;
1311e098bc96SEvan Quan 			}
1312e098bc96SEvan Quan 			break;
1313e098bc96SEvan Quan 		case POWER_STATE_TYPE_BALANCED:
1314e098bc96SEvan Quan 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1315e098bc96SEvan Quan 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1316e098bc96SEvan Quan 					if (single_display)
1317e098bc96SEvan Quan 						return ps;
1318e098bc96SEvan Quan 				} else
1319e098bc96SEvan Quan 					return ps;
1320e098bc96SEvan Quan 			}
1321e098bc96SEvan Quan 			break;
1322e098bc96SEvan Quan 		case POWER_STATE_TYPE_PERFORMANCE:
1323e098bc96SEvan Quan 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1324e098bc96SEvan Quan 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1325e098bc96SEvan Quan 					if (single_display)
1326e098bc96SEvan Quan 						return ps;
1327e098bc96SEvan Quan 				} else
1328e098bc96SEvan Quan 					return ps;
1329e098bc96SEvan Quan 			}
1330e098bc96SEvan Quan 			break;
1331e098bc96SEvan Quan 		/* internal states */
1332e098bc96SEvan Quan 		case POWER_STATE_TYPE_INTERNAL_UVD:
1333e098bc96SEvan Quan 			if (adev->pm.dpm.uvd_ps)
1334e098bc96SEvan Quan 				return adev->pm.dpm.uvd_ps;
1335e098bc96SEvan Quan 			else
1336e098bc96SEvan Quan 				break;
1337e098bc96SEvan Quan 		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1338e098bc96SEvan Quan 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1339e098bc96SEvan Quan 				return ps;
1340e098bc96SEvan Quan 			break;
1341e098bc96SEvan Quan 		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1342e098bc96SEvan Quan 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1343e098bc96SEvan Quan 				return ps;
1344e098bc96SEvan Quan 			break;
1345e098bc96SEvan Quan 		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1346e098bc96SEvan Quan 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1347e098bc96SEvan Quan 				return ps;
1348e098bc96SEvan Quan 			break;
1349e098bc96SEvan Quan 		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1350e098bc96SEvan Quan 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1351e098bc96SEvan Quan 				return ps;
1352e098bc96SEvan Quan 			break;
1353e098bc96SEvan Quan 		case POWER_STATE_TYPE_INTERNAL_BOOT:
1354e098bc96SEvan Quan 			return adev->pm.dpm.boot_ps;
1355e098bc96SEvan Quan 		case POWER_STATE_TYPE_INTERNAL_THERMAL:
1356e098bc96SEvan Quan 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1357e098bc96SEvan Quan 				return ps;
1358e098bc96SEvan Quan 			break;
1359e098bc96SEvan Quan 		case POWER_STATE_TYPE_INTERNAL_ACPI:
1360e098bc96SEvan Quan 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1361e098bc96SEvan Quan 				return ps;
1362e098bc96SEvan Quan 			break;
1363e098bc96SEvan Quan 		case POWER_STATE_TYPE_INTERNAL_ULV:
1364e098bc96SEvan Quan 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1365e098bc96SEvan Quan 				return ps;
1366e098bc96SEvan Quan 			break;
1367e098bc96SEvan Quan 		case POWER_STATE_TYPE_INTERNAL_3DPERF:
1368e098bc96SEvan Quan 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1369e098bc96SEvan Quan 				return ps;
1370e098bc96SEvan Quan 			break;
1371e098bc96SEvan Quan 		default:
1372e098bc96SEvan Quan 			break;
1373e098bc96SEvan Quan 		}
1374e098bc96SEvan Quan 	}
1375e098bc96SEvan Quan 	/* use a fallback state if we didn't match */
1376e098bc96SEvan Quan 	switch (dpm_state) {
1377e098bc96SEvan Quan 	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1378e098bc96SEvan Quan 		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1379e098bc96SEvan Quan 		goto restart_search;
1380e098bc96SEvan Quan 	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1381e098bc96SEvan Quan 	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1382e098bc96SEvan Quan 	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1383e098bc96SEvan Quan 		if (adev->pm.dpm.uvd_ps) {
1384e098bc96SEvan Quan 			return adev->pm.dpm.uvd_ps;
1385e098bc96SEvan Quan 		} else {
1386e098bc96SEvan Quan 			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1387e098bc96SEvan Quan 			goto restart_search;
1388e098bc96SEvan Quan 		}
1389e098bc96SEvan Quan 	case POWER_STATE_TYPE_INTERNAL_THERMAL:
1390e098bc96SEvan Quan 		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1391e098bc96SEvan Quan 		goto restart_search;
1392e098bc96SEvan Quan 	case POWER_STATE_TYPE_INTERNAL_ACPI:
1393e098bc96SEvan Quan 		dpm_state = POWER_STATE_TYPE_BATTERY;
1394e098bc96SEvan Quan 		goto restart_search;
1395e098bc96SEvan Quan 	case POWER_STATE_TYPE_BATTERY:
1396e098bc96SEvan Quan 	case POWER_STATE_TYPE_BALANCED:
1397e098bc96SEvan Quan 	case POWER_STATE_TYPE_INTERNAL_3DPERF:
1398e098bc96SEvan Quan 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1399e098bc96SEvan Quan 		goto restart_search;
1400e098bc96SEvan Quan 	default:
1401e098bc96SEvan Quan 		break;
1402e098bc96SEvan Quan 	}
1403e098bc96SEvan Quan 
1404e098bc96SEvan Quan 	return NULL;
1405e098bc96SEvan Quan }
1406e098bc96SEvan Quan 
1407e098bc96SEvan Quan static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1408e098bc96SEvan Quan {
1409e098bc96SEvan Quan 	struct amdgpu_ps *ps;
1410e098bc96SEvan Quan 	enum amd_pm_state_type dpm_state;
1411e098bc96SEvan Quan 	int ret;
1412e098bc96SEvan Quan 	bool equal = false;
1413e098bc96SEvan Quan 
1414e098bc96SEvan Quan 	/* if dpm init failed */
1415e098bc96SEvan Quan 	if (!adev->pm.dpm_enabled)
1416e098bc96SEvan Quan 		return;
1417e098bc96SEvan Quan 
1418e098bc96SEvan Quan 	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1419e098bc96SEvan Quan 		/* add other state override checks here */
1420e098bc96SEvan Quan 		if ((!adev->pm.dpm.thermal_active) &&
1421e098bc96SEvan Quan 		    (!adev->pm.dpm.uvd_active))
1422e098bc96SEvan Quan 			adev->pm.dpm.state = adev->pm.dpm.user_state;
1423e098bc96SEvan Quan 	}
1424e098bc96SEvan Quan 	dpm_state = adev->pm.dpm.state;
1425e098bc96SEvan Quan 
1426e098bc96SEvan Quan 	ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1427e098bc96SEvan Quan 	if (ps)
1428e098bc96SEvan Quan 		adev->pm.dpm.requested_ps = ps;
1429e098bc96SEvan Quan 	else
1430e098bc96SEvan Quan 		return;
1431e098bc96SEvan Quan 
1432e098bc96SEvan Quan 	if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1433e098bc96SEvan Quan 		printk("switching from power state:\n");
1434e098bc96SEvan Quan 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1435e098bc96SEvan Quan 		printk("switching to power state:\n");
1436e098bc96SEvan Quan 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1437e098bc96SEvan Quan 	}
1438e098bc96SEvan Quan 
1439e098bc96SEvan Quan 	/* update whether vce is active */
1440e098bc96SEvan Quan 	ps->vce_active = adev->pm.dpm.vce_active;
1441e098bc96SEvan Quan 	if (adev->powerplay.pp_funcs->display_configuration_changed)
1442e098bc96SEvan Quan 		amdgpu_dpm_display_configuration_changed(adev);
1443e098bc96SEvan Quan 
1444e098bc96SEvan Quan 	ret = amdgpu_dpm_pre_set_power_state(adev);
1445e098bc96SEvan Quan 	if (ret)
1446e098bc96SEvan Quan 		return;
1447e098bc96SEvan Quan 
1448e098bc96SEvan Quan 	if (adev->powerplay.pp_funcs->check_state_equal) {
1449e098bc96SEvan Quan 		if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1450e098bc96SEvan Quan 			equal = false;
1451e098bc96SEvan Quan 	}
1452e098bc96SEvan Quan 
1453e098bc96SEvan Quan 	if (equal)
1454e098bc96SEvan Quan 		return;
1455e098bc96SEvan Quan 
1456*79c65f3fSEvan Quan 	if (adev->powerplay.pp_funcs->set_power_state)
1457*79c65f3fSEvan Quan 		adev->powerplay.pp_funcs->set_power_state(adev->powerplay.pp_handle);
1458*79c65f3fSEvan Quan 
1459e098bc96SEvan Quan 	amdgpu_dpm_post_set_power_state(adev);
1460e098bc96SEvan Quan 
1461e098bc96SEvan Quan 	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1462e098bc96SEvan Quan 	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1463e098bc96SEvan Quan 
1464e098bc96SEvan Quan 	if (adev->powerplay.pp_funcs->force_performance_level) {
1465e098bc96SEvan Quan 		if (adev->pm.dpm.thermal_active) {
1466e098bc96SEvan Quan 			enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1467e098bc96SEvan Quan 			/* force low perf level for thermal */
1468e098bc96SEvan Quan 			amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1469e098bc96SEvan Quan 			/* save the user's level */
1470e098bc96SEvan Quan 			adev->pm.dpm.forced_level = level;
1471e098bc96SEvan Quan 		} else {
1472e098bc96SEvan Quan 			/* otherwise, user selected level */
1473e098bc96SEvan Quan 			amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1474e098bc96SEvan Quan 		}
1475e098bc96SEvan Quan 	}
1476e098bc96SEvan Quan }
1477e098bc96SEvan Quan 
1478e098bc96SEvan Quan void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1479e098bc96SEvan Quan {
1480e098bc96SEvan Quan 	int i = 0;
1481e098bc96SEvan Quan 
1482e098bc96SEvan Quan 	if (!adev->pm.dpm_enabled)
1483e098bc96SEvan Quan 		return;
1484e098bc96SEvan Quan 
1485e098bc96SEvan Quan 	if (adev->mode_info.num_crtc)
1486e098bc96SEvan Quan 		amdgpu_display_bandwidth_update(adev);
1487e098bc96SEvan Quan 
1488e098bc96SEvan Quan 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1489e098bc96SEvan Quan 		struct amdgpu_ring *ring = adev->rings[i];
1490e098bc96SEvan Quan 		if (ring && ring->sched.ready)
1491e098bc96SEvan Quan 			amdgpu_fence_wait_empty(ring);
1492e098bc96SEvan Quan 	}
1493e098bc96SEvan Quan 
1494e098bc96SEvan Quan 	if (adev->powerplay.pp_funcs->dispatch_tasks) {
1495e098bc96SEvan Quan 		if (!amdgpu_device_has_dc_support(adev)) {
1496e098bc96SEvan Quan 			mutex_lock(&adev->pm.mutex);
1497e098bc96SEvan Quan 			amdgpu_dpm_get_active_displays(adev);
1498e098bc96SEvan Quan 			adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1499e098bc96SEvan Quan 			adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1500e098bc96SEvan Quan 			adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1501bc7d6c12SDarren Powell 			/* we have issues with mclk switching with
1502bc7d6c12SDarren Powell 			 * refresh rates over 120 hz on the non-DC code.
1503bc7d6c12SDarren Powell 			 */
1504e098bc96SEvan Quan 			if (adev->pm.pm_display_cfg.vrefresh > 120)
1505e098bc96SEvan Quan 				adev->pm.pm_display_cfg.min_vblank_time = 0;
1506e098bc96SEvan Quan 			if (adev->powerplay.pp_funcs->display_configuration_change)
1507e098bc96SEvan Quan 				adev->powerplay.pp_funcs->display_configuration_change(
1508e098bc96SEvan Quan 							adev->powerplay.pp_handle,
1509e098bc96SEvan Quan 							&adev->pm.pm_display_cfg);
1510e098bc96SEvan Quan 			mutex_unlock(&adev->pm.mutex);
1511e098bc96SEvan Quan 		}
1512e098bc96SEvan Quan 		amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1513e098bc96SEvan Quan 	} else {
1514e098bc96SEvan Quan 		mutex_lock(&adev->pm.mutex);
1515e098bc96SEvan Quan 		amdgpu_dpm_get_active_displays(adev);
1516e098bc96SEvan Quan 		amdgpu_dpm_change_power_state_locked(adev);
1517e098bc96SEvan Quan 		mutex_unlock(&adev->pm.mutex);
1518e098bc96SEvan Quan 	}
1519e098bc96SEvan Quan }
1520e098bc96SEvan Quan 
1521e098bc96SEvan Quan void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1522e098bc96SEvan Quan {
1523e098bc96SEvan Quan 	int ret = 0;
1524e098bc96SEvan Quan 
1525e098bc96SEvan Quan 	if (adev->family == AMDGPU_FAMILY_SI) {
1526e098bc96SEvan Quan 		mutex_lock(&adev->pm.mutex);
1527e098bc96SEvan Quan 		if (enable) {
1528e098bc96SEvan Quan 			adev->pm.dpm.uvd_active = true;
1529e098bc96SEvan Quan 			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1530e098bc96SEvan Quan 		} else {
1531e098bc96SEvan Quan 			adev->pm.dpm.uvd_active = false;
1532e098bc96SEvan Quan 		}
1533e098bc96SEvan Quan 		mutex_unlock(&adev->pm.mutex);
1534e098bc96SEvan Quan 
1535e098bc96SEvan Quan 		amdgpu_pm_compute_clocks(adev);
1536e098bc96SEvan Quan 	} else {
1537e098bc96SEvan Quan 		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1538e098bc96SEvan Quan 		if (ret)
1539e098bc96SEvan Quan 			DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
1540e098bc96SEvan Quan 				  enable ? "enable" : "disable", ret);
1541e098bc96SEvan Quan 
1542e098bc96SEvan Quan 		/* enable/disable Low Memory PState for UVD (4k videos) */
1543e098bc96SEvan Quan 		if (adev->asic_type == CHIP_STONEY &&
1544e098bc96SEvan Quan 			adev->uvd.decode_image_width >= WIDTH_4K) {
1545e098bc96SEvan Quan 			struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1546e098bc96SEvan Quan 
1547e098bc96SEvan Quan 			if (hwmgr && hwmgr->hwmgr_func &&
1548e098bc96SEvan Quan 			    hwmgr->hwmgr_func->update_nbdpm_pstate)
1549e098bc96SEvan Quan 				hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
1550e098bc96SEvan Quan 								       !enable,
1551e098bc96SEvan Quan 								       true);
1552e098bc96SEvan Quan 		}
1553e098bc96SEvan Quan 	}
1554e098bc96SEvan Quan }
1555e098bc96SEvan Quan 
1556e098bc96SEvan Quan void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1557e098bc96SEvan Quan {
1558e098bc96SEvan Quan 	int ret = 0;
1559e098bc96SEvan Quan 
1560e098bc96SEvan Quan 	if (adev->family == AMDGPU_FAMILY_SI) {
1561e098bc96SEvan Quan 		mutex_lock(&adev->pm.mutex);
1562e098bc96SEvan Quan 		if (enable) {
1563e098bc96SEvan Quan 			adev->pm.dpm.vce_active = true;
1564e098bc96SEvan Quan 			/* XXX select vce level based on ring/task */
1565e098bc96SEvan Quan 			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1566e098bc96SEvan Quan 		} else {
1567e098bc96SEvan Quan 			adev->pm.dpm.vce_active = false;
1568e098bc96SEvan Quan 		}
1569e098bc96SEvan Quan 		mutex_unlock(&adev->pm.mutex);
1570e098bc96SEvan Quan 
1571e098bc96SEvan Quan 		amdgpu_pm_compute_clocks(adev);
1572e098bc96SEvan Quan 	} else {
1573e098bc96SEvan Quan 		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1574e098bc96SEvan Quan 		if (ret)
1575e098bc96SEvan Quan 			DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
1576e098bc96SEvan Quan 				  enable ? "enable" : "disable", ret);
1577e098bc96SEvan Quan 	}
1578e098bc96SEvan Quan }
1579e098bc96SEvan Quan 
1580e098bc96SEvan Quan void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1581e098bc96SEvan Quan {
1582e098bc96SEvan Quan 	int i;
1583e098bc96SEvan Quan 
1584e098bc96SEvan Quan 	if (adev->powerplay.pp_funcs->print_power_state == NULL)
1585e098bc96SEvan Quan 		return;
1586e098bc96SEvan Quan 
1587e098bc96SEvan Quan 	for (i = 0; i < adev->pm.dpm.num_ps; i++)
1588e098bc96SEvan Quan 		amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1589e098bc96SEvan Quan 
1590e098bc96SEvan Quan }
1591e098bc96SEvan Quan 
1592e098bc96SEvan Quan void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
1593e098bc96SEvan Quan {
1594e098bc96SEvan Quan 	int ret = 0;
1595e098bc96SEvan Quan 
1596e098bc96SEvan Quan 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
1597e098bc96SEvan Quan 	if (ret)
1598e098bc96SEvan Quan 		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
1599e098bc96SEvan Quan 			  enable ? "enable" : "disable", ret);
1600e098bc96SEvan Quan }
1601e098bc96SEvan Quan 
1602e098bc96SEvan Quan int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
1603e098bc96SEvan Quan {
1604e098bc96SEvan Quan 	int r;
1605e098bc96SEvan Quan 
1606e098bc96SEvan Quan 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
1607e098bc96SEvan Quan 		r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1608e098bc96SEvan Quan 		if (r) {
1609e098bc96SEvan Quan 			pr_err("smu firmware loading failed\n");
1610e098bc96SEvan Quan 			return r;
1611e098bc96SEvan Quan 		}
16122e4b2f7bSEvan Quan 
16132e4b2f7bSEvan Quan 		if (smu_version)
1614e098bc96SEvan Quan 			*smu_version = adev->pm.fw_version;
1615e098bc96SEvan Quan 	}
16162e4b2f7bSEvan Quan 
1617e098bc96SEvan Quan 	return 0;
1618e098bc96SEvan Quan }
1619bc143d8bSEvan Quan 
1620bc143d8bSEvan Quan int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
1621bc143d8bSEvan Quan {
1622bc143d8bSEvan Quan 	return smu_handle_passthrough_sbr(&adev->smu, enable);
1623bc143d8bSEvan Quan }
1624bc143d8bSEvan Quan 
1625bc143d8bSEvan Quan int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
1626bc143d8bSEvan Quan {
1627bc143d8bSEvan Quan 	return smu_send_hbm_bad_pages_num(&adev->smu, size);
1628bc143d8bSEvan Quan }
1629bc143d8bSEvan Quan 
1630bc143d8bSEvan Quan int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
1631bc143d8bSEvan Quan 				  enum pp_clock_type type,
1632bc143d8bSEvan Quan 				  uint32_t *min,
1633bc143d8bSEvan Quan 				  uint32_t *max)
1634bc143d8bSEvan Quan {
1635bc143d8bSEvan Quan 	if (!is_support_sw_smu(adev))
1636bc143d8bSEvan Quan 		return -EOPNOTSUPP;
1637bc143d8bSEvan Quan 
1638bc143d8bSEvan Quan 	switch (type) {
1639bc143d8bSEvan Quan 	case PP_SCLK:
1640bc143d8bSEvan Quan 		return smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, min, max);
1641bc143d8bSEvan Quan 	default:
1642bc143d8bSEvan Quan 		return -EINVAL;
1643bc143d8bSEvan Quan 	}
1644bc143d8bSEvan Quan }
1645bc143d8bSEvan Quan 
1646bc143d8bSEvan Quan int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
1647bc143d8bSEvan Quan 				   enum pp_clock_type type,
1648bc143d8bSEvan Quan 				   uint32_t min,
1649bc143d8bSEvan Quan 				   uint32_t max)
1650bc143d8bSEvan Quan {
1651bc143d8bSEvan Quan 	if (!is_support_sw_smu(adev))
1652bc143d8bSEvan Quan 		return -EOPNOTSUPP;
1653bc143d8bSEvan Quan 
1654bc143d8bSEvan Quan 	switch (type) {
1655bc143d8bSEvan Quan 	case PP_SCLK:
1656bc143d8bSEvan Quan 		return smu_set_soft_freq_range(&adev->smu, SMU_SCLK, min, max);
1657bc143d8bSEvan Quan 	default:
1658bc143d8bSEvan Quan 		return -EINVAL;
1659bc143d8bSEvan Quan 	}
1660bc143d8bSEvan Quan }
1661bc143d8bSEvan Quan 
1662bc143d8bSEvan Quan int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
1663bc143d8bSEvan Quan 			      enum smu_event_type event,
1664bc143d8bSEvan Quan 			      uint64_t event_arg)
1665bc143d8bSEvan Quan {
1666bc143d8bSEvan Quan 	if (!is_support_sw_smu(adev))
1667bc143d8bSEvan Quan 		return -EOPNOTSUPP;
1668bc143d8bSEvan Quan 
1669bc143d8bSEvan Quan 	return smu_wait_for_event(&adev->smu, event, event_arg);
1670bc143d8bSEvan Quan }
1671bc143d8bSEvan Quan 
1672bc143d8bSEvan Quan int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
1673bc143d8bSEvan Quan {
1674bc143d8bSEvan Quan 	if (!is_support_sw_smu(adev))
1675bc143d8bSEvan Quan 		return -EOPNOTSUPP;
1676bc143d8bSEvan Quan 
1677bc143d8bSEvan Quan 	return smu_get_status_gfxoff(&adev->smu, value);
1678bc143d8bSEvan Quan }
1679bc143d8bSEvan Quan 
1680bc143d8bSEvan Quan uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
1681bc143d8bSEvan Quan {
1682bc143d8bSEvan Quan 	return atomic64_read(&adev->smu.throttle_int_counter);
1683bc143d8bSEvan Quan }
1684bc143d8bSEvan Quan 
1685bc143d8bSEvan Quan /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
1686bc143d8bSEvan Quan  * @adev: amdgpu_device pointer
1687bc143d8bSEvan Quan  * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
1688bc143d8bSEvan Quan  *
1689bc143d8bSEvan Quan  */
1690bc143d8bSEvan Quan void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
1691bc143d8bSEvan Quan 				 enum gfx_change_state state)
1692bc143d8bSEvan Quan {
1693bc143d8bSEvan Quan 	mutex_lock(&adev->pm.mutex);
1694bc143d8bSEvan Quan 	if (adev->powerplay.pp_funcs &&
1695bc143d8bSEvan Quan 	    adev->powerplay.pp_funcs->gfx_state_change_set)
1696bc143d8bSEvan Quan 		((adev)->powerplay.pp_funcs->gfx_state_change_set(
1697bc143d8bSEvan Quan 			(adev)->powerplay.pp_handle, state));
1698bc143d8bSEvan Quan 	mutex_unlock(&adev->pm.mutex);
1699bc143d8bSEvan Quan }
1700bc143d8bSEvan Quan 
1701bc143d8bSEvan Quan int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
1702bc143d8bSEvan Quan 			    void *umc_ecc)
1703bc143d8bSEvan Quan {
1704bc143d8bSEvan Quan 	if (!is_support_sw_smu(adev))
1705bc143d8bSEvan Quan 		return -EOPNOTSUPP;
1706bc143d8bSEvan Quan 
1707bc143d8bSEvan Quan 	return smu_get_ecc_info(&adev->smu, umc_ecc);
1708bc143d8bSEvan Quan }
1709*79c65f3fSEvan Quan 
1710*79c65f3fSEvan Quan struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
1711*79c65f3fSEvan Quan 						     uint32_t idx)
1712*79c65f3fSEvan Quan {
1713*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1714*79c65f3fSEvan Quan 
1715*79c65f3fSEvan Quan 	if (!pp_funcs->get_vce_clock_state)
1716*79c65f3fSEvan Quan 		return NULL;
1717*79c65f3fSEvan Quan 
1718*79c65f3fSEvan Quan 	return pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
1719*79c65f3fSEvan Quan 					     idx);
1720*79c65f3fSEvan Quan }
1721*79c65f3fSEvan Quan 
1722*79c65f3fSEvan Quan void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
1723*79c65f3fSEvan Quan 					enum amd_pm_state_type *state)
1724*79c65f3fSEvan Quan {
1725*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1726*79c65f3fSEvan Quan 
1727*79c65f3fSEvan Quan 	if (!pp_funcs->get_current_power_state) {
1728*79c65f3fSEvan Quan 		*state = adev->pm.dpm.user_state;
1729*79c65f3fSEvan Quan 		return;
1730*79c65f3fSEvan Quan 	}
1731*79c65f3fSEvan Quan 
1732*79c65f3fSEvan Quan 	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
1733*79c65f3fSEvan Quan 	if (*state < POWER_STATE_TYPE_DEFAULT ||
1734*79c65f3fSEvan Quan 	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
1735*79c65f3fSEvan Quan 		*state = adev->pm.dpm.user_state;
1736*79c65f3fSEvan Quan }
1737*79c65f3fSEvan Quan 
1738*79c65f3fSEvan Quan void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
1739*79c65f3fSEvan Quan 				enum amd_pm_state_type state)
1740*79c65f3fSEvan Quan {
1741*79c65f3fSEvan Quan 	adev->pm.dpm.user_state = state;
1742*79c65f3fSEvan Quan 
1743*79c65f3fSEvan Quan 	if (is_support_sw_smu(adev))
1744*79c65f3fSEvan Quan 		return;
1745*79c65f3fSEvan Quan 
1746*79c65f3fSEvan Quan 	if (amdgpu_dpm_dispatch_task(adev,
1747*79c65f3fSEvan Quan 				     AMD_PP_TASK_ENABLE_USER_STATE,
1748*79c65f3fSEvan Quan 				     &state) == -EOPNOTSUPP)
1749*79c65f3fSEvan Quan 		amdgpu_pm_compute_clocks(adev);
1750*79c65f3fSEvan Quan }
1751*79c65f3fSEvan Quan 
1752*79c65f3fSEvan Quan enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
1753*79c65f3fSEvan Quan {
1754*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1755*79c65f3fSEvan Quan 	enum amd_dpm_forced_level level;
1756*79c65f3fSEvan Quan 
1757*79c65f3fSEvan Quan 	if (pp_funcs->get_performance_level)
1758*79c65f3fSEvan Quan 		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
1759*79c65f3fSEvan Quan 	else
1760*79c65f3fSEvan Quan 		level = adev->pm.dpm.forced_level;
1761*79c65f3fSEvan Quan 
1762*79c65f3fSEvan Quan 	return level;
1763*79c65f3fSEvan Quan }
1764*79c65f3fSEvan Quan 
1765*79c65f3fSEvan Quan int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
1766*79c65f3fSEvan Quan 				       enum amd_dpm_forced_level level)
1767*79c65f3fSEvan Quan {
1768*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1769*79c65f3fSEvan Quan 
1770*79c65f3fSEvan Quan 	if (pp_funcs->force_performance_level) {
1771*79c65f3fSEvan Quan 		if (adev->pm.dpm.thermal_active)
1772*79c65f3fSEvan Quan 			return -EINVAL;
1773*79c65f3fSEvan Quan 
1774*79c65f3fSEvan Quan 		if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1775*79c65f3fSEvan Quan 						      level))
1776*79c65f3fSEvan Quan 			return -EINVAL;
1777*79c65f3fSEvan Quan 
1778*79c65f3fSEvan Quan 		adev->pm.dpm.forced_level = level;
1779*79c65f3fSEvan Quan 	}
1780*79c65f3fSEvan Quan 
1781*79c65f3fSEvan Quan 	return 0;
1782*79c65f3fSEvan Quan }
1783*79c65f3fSEvan Quan 
1784*79c65f3fSEvan Quan int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1785*79c65f3fSEvan Quan 				 struct pp_states_info *states)
1786*79c65f3fSEvan Quan {
1787*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1788*79c65f3fSEvan Quan 
1789*79c65f3fSEvan Quan 	if (!pp_funcs->get_pp_num_states)
1790*79c65f3fSEvan Quan 		return -EOPNOTSUPP;
1791*79c65f3fSEvan Quan 
1792*79c65f3fSEvan Quan 	return pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, states);
1793*79c65f3fSEvan Quan }
1794*79c65f3fSEvan Quan 
1795*79c65f3fSEvan Quan int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1796*79c65f3fSEvan Quan 			      enum amd_pp_task task_id,
1797*79c65f3fSEvan Quan 			      enum amd_pm_state_type *user_state)
1798*79c65f3fSEvan Quan {
1799*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1800*79c65f3fSEvan Quan 
1801*79c65f3fSEvan Quan 	if (!pp_funcs->dispatch_tasks)
1802*79c65f3fSEvan Quan 		return -EOPNOTSUPP;
1803*79c65f3fSEvan Quan 
1804*79c65f3fSEvan Quan 	return pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, task_id, user_state);
1805*79c65f3fSEvan Quan }
1806*79c65f3fSEvan Quan 
1807*79c65f3fSEvan Quan int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1808*79c65f3fSEvan Quan {
1809*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1810*79c65f3fSEvan Quan 
1811*79c65f3fSEvan Quan 	if (!pp_funcs->get_pp_table)
1812*79c65f3fSEvan Quan 		return 0;
1813*79c65f3fSEvan Quan 
1814*79c65f3fSEvan Quan 	return pp_funcs->get_pp_table(adev->powerplay.pp_handle, table);
1815*79c65f3fSEvan Quan }
1816*79c65f3fSEvan Quan 
1817*79c65f3fSEvan Quan int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1818*79c65f3fSEvan Quan 				      uint32_t type,
1819*79c65f3fSEvan Quan 				      long *input,
1820*79c65f3fSEvan Quan 				      uint32_t size)
1821*79c65f3fSEvan Quan {
1822*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1823*79c65f3fSEvan Quan 
1824*79c65f3fSEvan Quan 	if (!pp_funcs->set_fine_grain_clk_vol)
1825*79c65f3fSEvan Quan 		return 0;
1826*79c65f3fSEvan Quan 
1827*79c65f3fSEvan Quan 	return pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1828*79c65f3fSEvan Quan 						type,
1829*79c65f3fSEvan Quan 						input,
1830*79c65f3fSEvan Quan 						size);
1831*79c65f3fSEvan Quan }
1832*79c65f3fSEvan Quan 
1833*79c65f3fSEvan Quan int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1834*79c65f3fSEvan Quan 				  uint32_t type,
1835*79c65f3fSEvan Quan 				  long *input,
1836*79c65f3fSEvan Quan 				  uint32_t size)
1837*79c65f3fSEvan Quan {
1838*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1839*79c65f3fSEvan Quan 
1840*79c65f3fSEvan Quan 	if (!pp_funcs->odn_edit_dpm_table)
1841*79c65f3fSEvan Quan 		return 0;
1842*79c65f3fSEvan Quan 
1843*79c65f3fSEvan Quan 	return pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1844*79c65f3fSEvan Quan 					    type,
1845*79c65f3fSEvan Quan 					    input,
1846*79c65f3fSEvan Quan 					    size);
1847*79c65f3fSEvan Quan }
1848*79c65f3fSEvan Quan 
1849*79c65f3fSEvan Quan int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1850*79c65f3fSEvan Quan 				  enum pp_clock_type type,
1851*79c65f3fSEvan Quan 				  char *buf)
1852*79c65f3fSEvan Quan {
1853*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1854*79c65f3fSEvan Quan 
1855*79c65f3fSEvan Quan 	if (!pp_funcs->print_clock_levels)
1856*79c65f3fSEvan Quan 		return 0;
1857*79c65f3fSEvan Quan 
1858*79c65f3fSEvan Quan 	return pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1859*79c65f3fSEvan Quan 					    type,
1860*79c65f3fSEvan Quan 					    buf);
1861*79c65f3fSEvan Quan }
1862*79c65f3fSEvan Quan 
1863*79c65f3fSEvan Quan int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1864*79c65f3fSEvan Quan 				    uint64_t ppfeature_masks)
1865*79c65f3fSEvan Quan {
1866*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1867*79c65f3fSEvan Quan 
1868*79c65f3fSEvan Quan 	if (!pp_funcs->set_ppfeature_status)
1869*79c65f3fSEvan Quan 		return 0;
1870*79c65f3fSEvan Quan 
1871*79c65f3fSEvan Quan 	return pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1872*79c65f3fSEvan Quan 					      ppfeature_masks);
1873*79c65f3fSEvan Quan }
1874*79c65f3fSEvan Quan 
1875*79c65f3fSEvan Quan int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1876*79c65f3fSEvan Quan {
1877*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1878*79c65f3fSEvan Quan 
1879*79c65f3fSEvan Quan 	if (!pp_funcs->get_ppfeature_status)
1880*79c65f3fSEvan Quan 		return 0;
1881*79c65f3fSEvan Quan 
1882*79c65f3fSEvan Quan 	return pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1883*79c65f3fSEvan Quan 					      buf);
1884*79c65f3fSEvan Quan }
1885*79c65f3fSEvan Quan 
1886*79c65f3fSEvan Quan int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1887*79c65f3fSEvan Quan 				 enum pp_clock_type type,
1888*79c65f3fSEvan Quan 				 uint32_t mask)
1889*79c65f3fSEvan Quan {
1890*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1891*79c65f3fSEvan Quan 
1892*79c65f3fSEvan Quan 	if (!pp_funcs->force_clock_level)
1893*79c65f3fSEvan Quan 		return 0;
1894*79c65f3fSEvan Quan 
1895*79c65f3fSEvan Quan 	return pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1896*79c65f3fSEvan Quan 					   type,
1897*79c65f3fSEvan Quan 					   mask);
1898*79c65f3fSEvan Quan }
1899*79c65f3fSEvan Quan 
1900*79c65f3fSEvan Quan int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1901*79c65f3fSEvan Quan {
1902*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1903*79c65f3fSEvan Quan 
1904*79c65f3fSEvan Quan 	if (!pp_funcs->get_sclk_od)
1905*79c65f3fSEvan Quan 		return 0;
1906*79c65f3fSEvan Quan 
1907*79c65f3fSEvan Quan 	return pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1908*79c65f3fSEvan Quan }
1909*79c65f3fSEvan Quan 
1910*79c65f3fSEvan Quan int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1911*79c65f3fSEvan Quan {
1912*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1913*79c65f3fSEvan Quan 
1914*79c65f3fSEvan Quan 	if (is_support_sw_smu(adev))
1915*79c65f3fSEvan Quan 		return 0;
1916*79c65f3fSEvan Quan 
1917*79c65f3fSEvan Quan 	if (pp_funcs->set_sclk_od)
1918*79c65f3fSEvan Quan 		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1919*79c65f3fSEvan Quan 
1920*79c65f3fSEvan Quan 	if (amdgpu_dpm_dispatch_task(adev,
1921*79c65f3fSEvan Quan 				     AMD_PP_TASK_READJUST_POWER_STATE,
1922*79c65f3fSEvan Quan 				     NULL) == -EOPNOTSUPP) {
1923*79c65f3fSEvan Quan 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1924*79c65f3fSEvan Quan 		amdgpu_pm_compute_clocks(adev);
1925*79c65f3fSEvan Quan 	}
1926*79c65f3fSEvan Quan 
1927*79c65f3fSEvan Quan 	return 0;
1928*79c65f3fSEvan Quan }
1929*79c65f3fSEvan Quan 
1930*79c65f3fSEvan Quan int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1931*79c65f3fSEvan Quan {
1932*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1933*79c65f3fSEvan Quan 
1934*79c65f3fSEvan Quan 	if (!pp_funcs->get_mclk_od)
1935*79c65f3fSEvan Quan 		return 0;
1936*79c65f3fSEvan Quan 
1937*79c65f3fSEvan Quan 	return pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1938*79c65f3fSEvan Quan }
1939*79c65f3fSEvan Quan 
1940*79c65f3fSEvan Quan int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1941*79c65f3fSEvan Quan {
1942*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1943*79c65f3fSEvan Quan 
1944*79c65f3fSEvan Quan 	if (is_support_sw_smu(adev))
1945*79c65f3fSEvan Quan 		return 0;
1946*79c65f3fSEvan Quan 
1947*79c65f3fSEvan Quan 	if (pp_funcs->set_mclk_od)
1948*79c65f3fSEvan Quan 		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1949*79c65f3fSEvan Quan 
1950*79c65f3fSEvan Quan 	if (amdgpu_dpm_dispatch_task(adev,
1951*79c65f3fSEvan Quan 				     AMD_PP_TASK_READJUST_POWER_STATE,
1952*79c65f3fSEvan Quan 				     NULL) == -EOPNOTSUPP) {
1953*79c65f3fSEvan Quan 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1954*79c65f3fSEvan Quan 		amdgpu_pm_compute_clocks(adev);
1955*79c65f3fSEvan Quan 	}
1956*79c65f3fSEvan Quan 
1957*79c65f3fSEvan Quan 	return 0;
1958*79c65f3fSEvan Quan }
1959*79c65f3fSEvan Quan 
1960*79c65f3fSEvan Quan int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1961*79c65f3fSEvan Quan 				      char *buf)
1962*79c65f3fSEvan Quan {
1963*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1964*79c65f3fSEvan Quan 
1965*79c65f3fSEvan Quan 	if (!pp_funcs->get_power_profile_mode)
1966*79c65f3fSEvan Quan 		return -EOPNOTSUPP;
1967*79c65f3fSEvan Quan 
1968*79c65f3fSEvan Quan 	return pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1969*79c65f3fSEvan Quan 						buf);
1970*79c65f3fSEvan Quan }
1971*79c65f3fSEvan Quan 
1972*79c65f3fSEvan Quan int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1973*79c65f3fSEvan Quan 				      long *input, uint32_t size)
1974*79c65f3fSEvan Quan {
1975*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1976*79c65f3fSEvan Quan 
1977*79c65f3fSEvan Quan 	if (!pp_funcs->set_power_profile_mode)
1978*79c65f3fSEvan Quan 		return 0;
1979*79c65f3fSEvan Quan 
1980*79c65f3fSEvan Quan 	return pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1981*79c65f3fSEvan Quan 						input,
1982*79c65f3fSEvan Quan 						size);
1983*79c65f3fSEvan Quan }
1984*79c65f3fSEvan Quan 
1985*79c65f3fSEvan Quan int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1986*79c65f3fSEvan Quan {
1987*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1988*79c65f3fSEvan Quan 
1989*79c65f3fSEvan Quan 	if (!pp_funcs->get_gpu_metrics)
1990*79c65f3fSEvan Quan 		return 0;
1991*79c65f3fSEvan Quan 
1992*79c65f3fSEvan Quan 	return pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, table);
1993*79c65f3fSEvan Quan }
1994*79c65f3fSEvan Quan 
1995*79c65f3fSEvan Quan int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1996*79c65f3fSEvan Quan 				    uint32_t *fan_mode)
1997*79c65f3fSEvan Quan {
1998*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1999*79c65f3fSEvan Quan 
2000*79c65f3fSEvan Quan 	if (!pp_funcs->get_fan_control_mode)
2001*79c65f3fSEvan Quan 		return -EOPNOTSUPP;
2002*79c65f3fSEvan Quan 
2003*79c65f3fSEvan Quan 	*fan_mode = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle);
2004*79c65f3fSEvan Quan 
2005*79c65f3fSEvan Quan 	return 0;
2006*79c65f3fSEvan Quan }
2007*79c65f3fSEvan Quan 
2008*79c65f3fSEvan Quan int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
2009*79c65f3fSEvan Quan 				 uint32_t speed)
2010*79c65f3fSEvan Quan {
2011*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2012*79c65f3fSEvan Quan 
2013*79c65f3fSEvan Quan 	if (!pp_funcs->set_fan_speed_pwm)
2014*79c65f3fSEvan Quan 		return -EINVAL;
2015*79c65f3fSEvan Quan 
2016*79c65f3fSEvan Quan 	return pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, speed);
2017*79c65f3fSEvan Quan }
2018*79c65f3fSEvan Quan 
2019*79c65f3fSEvan Quan int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
2020*79c65f3fSEvan Quan 				 uint32_t *speed)
2021*79c65f3fSEvan Quan {
2022*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2023*79c65f3fSEvan Quan 
2024*79c65f3fSEvan Quan 	if (!pp_funcs->get_fan_speed_pwm)
2025*79c65f3fSEvan Quan 		return -EINVAL;
2026*79c65f3fSEvan Quan 
2027*79c65f3fSEvan Quan 	return pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, speed);
2028*79c65f3fSEvan Quan }
2029*79c65f3fSEvan Quan 
2030*79c65f3fSEvan Quan int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
2031*79c65f3fSEvan Quan 				 uint32_t *speed)
2032*79c65f3fSEvan Quan {
2033*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2034*79c65f3fSEvan Quan 
2035*79c65f3fSEvan Quan 	if (!pp_funcs->get_fan_speed_rpm)
2036*79c65f3fSEvan Quan 		return -EINVAL;
2037*79c65f3fSEvan Quan 
2038*79c65f3fSEvan Quan 	return pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, speed);
2039*79c65f3fSEvan Quan }
2040*79c65f3fSEvan Quan 
2041*79c65f3fSEvan Quan int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
2042*79c65f3fSEvan Quan 				 uint32_t speed)
2043*79c65f3fSEvan Quan {
2044*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2045*79c65f3fSEvan Quan 
2046*79c65f3fSEvan Quan 	if (!pp_funcs->set_fan_speed_rpm)
2047*79c65f3fSEvan Quan 		return -EINVAL;
2048*79c65f3fSEvan Quan 
2049*79c65f3fSEvan Quan 	return pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, speed);
2050*79c65f3fSEvan Quan }
2051*79c65f3fSEvan Quan 
2052*79c65f3fSEvan Quan int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
2053*79c65f3fSEvan Quan 				    uint32_t mode)
2054*79c65f3fSEvan Quan {
2055*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2056*79c65f3fSEvan Quan 
2057*79c65f3fSEvan Quan 	if (!pp_funcs->set_fan_control_mode)
2058*79c65f3fSEvan Quan 		return -EOPNOTSUPP;
2059*79c65f3fSEvan Quan 
2060*79c65f3fSEvan Quan 	pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, mode);
2061*79c65f3fSEvan Quan 
2062*79c65f3fSEvan Quan 	return 0;
2063*79c65f3fSEvan Quan }
2064*79c65f3fSEvan Quan 
2065*79c65f3fSEvan Quan int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
2066*79c65f3fSEvan Quan 			       uint32_t *limit,
2067*79c65f3fSEvan Quan 			       enum pp_power_limit_level pp_limit_level,
2068*79c65f3fSEvan Quan 			       enum pp_power_type power_type)
2069*79c65f3fSEvan Quan {
2070*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2071*79c65f3fSEvan Quan 
2072*79c65f3fSEvan Quan 	if (!pp_funcs->get_power_limit)
2073*79c65f3fSEvan Quan 		return -ENODATA;
2074*79c65f3fSEvan Quan 
2075*79c65f3fSEvan Quan 	return pp_funcs->get_power_limit(adev->powerplay.pp_handle,
2076*79c65f3fSEvan Quan 					 limit,
2077*79c65f3fSEvan Quan 					 pp_limit_level,
2078*79c65f3fSEvan Quan 					 power_type);
2079*79c65f3fSEvan Quan }
2080*79c65f3fSEvan Quan 
2081*79c65f3fSEvan Quan int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
2082*79c65f3fSEvan Quan 			       uint32_t limit)
2083*79c65f3fSEvan Quan {
2084*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2085*79c65f3fSEvan Quan 
2086*79c65f3fSEvan Quan 	if (!pp_funcs->set_power_limit)
2087*79c65f3fSEvan Quan 		return -EINVAL;
2088*79c65f3fSEvan Quan 
2089*79c65f3fSEvan Quan 	return pp_funcs->set_power_limit(adev->powerplay.pp_handle, limit);
2090*79c65f3fSEvan Quan }
2091*79c65f3fSEvan Quan 
2092*79c65f3fSEvan Quan int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
2093*79c65f3fSEvan Quan {
2094*79c65f3fSEvan Quan 	if (!is_support_sw_smu(adev))
2095*79c65f3fSEvan Quan 		return false;
2096*79c65f3fSEvan Quan 
2097*79c65f3fSEvan Quan 	return is_support_cclk_dpm(adev);
2098*79c65f3fSEvan Quan }
2099*79c65f3fSEvan Quan 
2100*79c65f3fSEvan Quan int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
2101*79c65f3fSEvan Quan 						       struct seq_file *m)
2102*79c65f3fSEvan Quan {
2103*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2104*79c65f3fSEvan Quan 
2105*79c65f3fSEvan Quan 	if (!pp_funcs->debugfs_print_current_performance_level)
2106*79c65f3fSEvan Quan 		return -EOPNOTSUPP;
2107*79c65f3fSEvan Quan 
2108*79c65f3fSEvan Quan 	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
2109*79c65f3fSEvan Quan 							  m);
2110*79c65f3fSEvan Quan 
2111*79c65f3fSEvan Quan 	return 0;
2112*79c65f3fSEvan Quan }
2113*79c65f3fSEvan Quan 
2114*79c65f3fSEvan Quan int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
2115*79c65f3fSEvan Quan 				       void **addr,
2116*79c65f3fSEvan Quan 				       size_t *size)
2117*79c65f3fSEvan Quan {
2118*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2119*79c65f3fSEvan Quan 
2120*79c65f3fSEvan Quan 	if (!pp_funcs->get_smu_prv_buf_details)
2121*79c65f3fSEvan Quan 		return -ENOSYS;
2122*79c65f3fSEvan Quan 
2123*79c65f3fSEvan Quan 	return pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
2124*79c65f3fSEvan Quan 						 addr,
2125*79c65f3fSEvan Quan 						 size);
2126*79c65f3fSEvan Quan }
2127*79c65f3fSEvan Quan 
2128*79c65f3fSEvan Quan int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
2129*79c65f3fSEvan Quan {
2130*79c65f3fSEvan Quan 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2131*79c65f3fSEvan Quan 
2132*79c65f3fSEvan Quan 	if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2133*79c65f3fSEvan Quan 	    (is_support_sw_smu(adev) && adev->smu.is_apu) ||
2134*79c65f3fSEvan Quan 		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
2135*79c65f3fSEvan Quan 		return true;
2136*79c65f3fSEvan Quan 
2137*79c65f3fSEvan Quan 	return false;
2138*79c65f3fSEvan Quan }
2139*79c65f3fSEvan Quan 
2140*79c65f3fSEvan Quan int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
2141*79c65f3fSEvan Quan 			    const char *buf,
2142*79c65f3fSEvan Quan 			    size_t size)
2143*79c65f3fSEvan Quan {
2144*79c65f3fSEvan Quan 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2145*79c65f3fSEvan Quan 
2146*79c65f3fSEvan Quan 	if (!pp_funcs->set_pp_table)
2147*79c65f3fSEvan Quan 		return -EOPNOTSUPP;
2148*79c65f3fSEvan Quan 
2149*79c65f3fSEvan Quan 	return pp_funcs->set_pp_table(adev->powerplay.pp_handle,
2150*79c65f3fSEvan Quan 				      buf,
2151*79c65f3fSEvan Quan 				      size);
2152*79c65f3fSEvan Quan }
2153*79c65f3fSEvan Quan 
2154*79c65f3fSEvan Quan int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
2155*79c65f3fSEvan Quan {
2156*79c65f3fSEvan Quan 	return adev->smu.cpu_core_num;
2157*79c65f3fSEvan Quan }
2158*79c65f3fSEvan Quan 
2159*79c65f3fSEvan Quan void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
2160*79c65f3fSEvan Quan {
2161*79c65f3fSEvan Quan 	if (!is_support_sw_smu(adev))
2162*79c65f3fSEvan Quan 		return;
2163*79c65f3fSEvan Quan 
2164*79c65f3fSEvan Quan 	amdgpu_smu_stb_debug_fs_init(adev);
2165*79c65f3fSEvan Quan }
2166