xref: /openbmc/linux/drivers/gpu/drm/amd/pm/amdgpu_dpm.c (revision ebfc2533)
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
35 
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38 
39 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
40 {
41 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
42 
43 	return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
44 }
45 
46 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
47 {
48 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
49 
50 	return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
51 }
52 
53 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
54 {
55 	int ret = 0;
56 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
57 	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
58 
59 	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
60 		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
61 				block_type, gate ? "gate" : "ungate");
62 		return 0;
63 	}
64 
65 	switch (block_type) {
66 	case AMD_IP_BLOCK_TYPE_UVD:
67 	case AMD_IP_BLOCK_TYPE_VCE:
68 		if (pp_funcs && pp_funcs->set_powergating_by_smu) {
69 			/*
70 			 * TODO: need a better lock mechanism
71 			 *
72 			 * Here adev->pm.mutex lock protection is enforced on
73 			 * UVD and VCE cases only. Since for other cases, there
74 			 * may be already lock protection in amdgpu_pm.c.
75 			 * This is a quick fix for the deadlock issue below.
76 			 *     NFO: task ocltst:2028 blocked for more than 120 seconds.
77 			 *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
78 			 *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
79 			 *     cltst          D    0  2028   2026 0x00000000
80 			 *     all Trace:
81 			 *     __schedule+0x2c0/0x870
82 			 *     schedule+0x2c/0x70
83 			 *     schedule_preempt_disabled+0xe/0x10
84 			 *     __mutex_lock.isra.9+0x26d/0x4e0
85 			 *     __mutex_lock_slowpath+0x13/0x20
86 			 *     ? __mutex_lock_slowpath+0x13/0x20
87 			 *     mutex_lock+0x2f/0x40
88 			 *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
89 			 *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
90 			 *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
91 			 *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
92 			 *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
93 			 *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
94 			 */
95 			mutex_lock(&adev->pm.mutex);
96 			ret = (pp_funcs->set_powergating_by_smu(
97 				(adev)->powerplay.pp_handle, block_type, gate));
98 			mutex_unlock(&adev->pm.mutex);
99 		}
100 		break;
101 	case AMD_IP_BLOCK_TYPE_GFX:
102 	case AMD_IP_BLOCK_TYPE_VCN:
103 	case AMD_IP_BLOCK_TYPE_SDMA:
104 	case AMD_IP_BLOCK_TYPE_JPEG:
105 	case AMD_IP_BLOCK_TYPE_GMC:
106 	case AMD_IP_BLOCK_TYPE_ACP:
107 		if (pp_funcs && pp_funcs->set_powergating_by_smu) {
108 			ret = (pp_funcs->set_powergating_by_smu(
109 				(adev)->powerplay.pp_handle, block_type, gate));
110 		}
111 		break;
112 	default:
113 		break;
114 	}
115 
116 	if (!ret)
117 		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
118 
119 	return ret;
120 }
121 
122 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
123 {
124 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
125 	void *pp_handle = adev->powerplay.pp_handle;
126 	int ret = 0;
127 
128 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
129 		return -ENOENT;
130 
131 	/* enter BACO state */
132 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
133 
134 	return ret;
135 }
136 
137 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
138 {
139 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
140 	void *pp_handle = adev->powerplay.pp_handle;
141 	int ret = 0;
142 
143 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
144 		return -ENOENT;
145 
146 	/* exit BACO state */
147 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
148 
149 	return ret;
150 }
151 
152 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
153 			     enum pp_mp1_state mp1_state)
154 {
155 	int ret = 0;
156 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
157 
158 	if (pp_funcs && pp_funcs->set_mp1_state) {
159 		ret = pp_funcs->set_mp1_state(
160 				adev->powerplay.pp_handle,
161 				mp1_state);
162 	}
163 
164 	return ret;
165 }
166 
167 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
168 {
169 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
170 	void *pp_handle = adev->powerplay.pp_handle;
171 	bool baco_cap;
172 
173 	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
174 		return false;
175 
176 	if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
177 		return false;
178 
179 	return baco_cap;
180 }
181 
182 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
183 {
184 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
185 	void *pp_handle = adev->powerplay.pp_handle;
186 
187 	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
188 		return -ENOENT;
189 
190 	return pp_funcs->asic_reset_mode_2(pp_handle);
191 }
192 
193 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
194 {
195 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
196 	void *pp_handle = adev->powerplay.pp_handle;
197 	int ret = 0;
198 
199 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
200 		return -ENOENT;
201 
202 	/* enter BACO state */
203 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
204 	if (ret)
205 		return ret;
206 
207 	/* exit BACO state */
208 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
209 	if (ret)
210 		return ret;
211 
212 	return 0;
213 }
214 
215 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
216 {
217 	struct smu_context *smu = adev->powerplay.pp_handle;
218 
219 	if (is_support_sw_smu(adev))
220 		return smu_mode1_reset_is_support(smu);
221 
222 	return false;
223 }
224 
225 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
226 {
227 	struct smu_context *smu = adev->powerplay.pp_handle;
228 
229 	if (is_support_sw_smu(adev))
230 		return smu_mode1_reset(smu);
231 
232 	return -EOPNOTSUPP;
233 }
234 
235 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
236 				    enum PP_SMC_POWER_PROFILE type,
237 				    bool en)
238 {
239 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
240 	int ret = 0;
241 
242 	if (amdgpu_sriov_vf(adev))
243 		return 0;
244 
245 	if (pp_funcs && pp_funcs->switch_power_profile)
246 		ret = pp_funcs->switch_power_profile(
247 			adev->powerplay.pp_handle, type, en);
248 
249 	return ret;
250 }
251 
252 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
253 			       uint32_t pstate)
254 {
255 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
256 	int ret = 0;
257 
258 	if (pp_funcs && pp_funcs->set_xgmi_pstate)
259 		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
260 								pstate);
261 
262 	return ret;
263 }
264 
265 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
266 			     uint32_t cstate)
267 {
268 	int ret = 0;
269 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
270 	void *pp_handle = adev->powerplay.pp_handle;
271 
272 	if (pp_funcs && pp_funcs->set_df_cstate)
273 		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
274 
275 	return ret;
276 }
277 
278 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
279 {
280 	struct smu_context *smu = adev->powerplay.pp_handle;
281 
282 	if (is_support_sw_smu(adev))
283 		return smu_allow_xgmi_power_down(smu, en);
284 
285 	return 0;
286 }
287 
288 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
289 {
290 	void *pp_handle = adev->powerplay.pp_handle;
291 	const struct amd_pm_funcs *pp_funcs =
292 			adev->powerplay.pp_funcs;
293 	int ret = 0;
294 
295 	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
296 		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
297 
298 	return ret;
299 }
300 
301 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
302 				      uint32_t msg_id)
303 {
304 	void *pp_handle = adev->powerplay.pp_handle;
305 	const struct amd_pm_funcs *pp_funcs =
306 			adev->powerplay.pp_funcs;
307 	int ret = 0;
308 
309 	if (pp_funcs && pp_funcs->set_clockgating_by_smu)
310 		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
311 						       msg_id);
312 
313 	return ret;
314 }
315 
316 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
317 				  bool acquire)
318 {
319 	void *pp_handle = adev->powerplay.pp_handle;
320 	const struct amd_pm_funcs *pp_funcs =
321 			adev->powerplay.pp_funcs;
322 	int ret = -EOPNOTSUPP;
323 
324 	if (pp_funcs && pp_funcs->smu_i2c_bus_access)
325 		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
326 						   acquire);
327 
328 	return ret;
329 }
330 
331 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
332 {
333 	if (adev->pm.dpm_enabled) {
334 		mutex_lock(&adev->pm.mutex);
335 		if (power_supply_is_system_supplied() > 0)
336 			adev->pm.ac_power = true;
337 		else
338 			adev->pm.ac_power = false;
339 		if (adev->powerplay.pp_funcs &&
340 		    adev->powerplay.pp_funcs->enable_bapm)
341 			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
342 		mutex_unlock(&adev->pm.mutex);
343 
344 		if (is_support_sw_smu(adev))
345 			smu_set_ac_dc(adev->powerplay.pp_handle);
346 	}
347 }
348 
349 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
350 			   void *data, uint32_t *size)
351 {
352 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
353 	int ret = 0;
354 
355 	if (!data || !size)
356 		return -EINVAL;
357 
358 	if (pp_funcs && pp_funcs->read_sensor)
359 		ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle,
360 								    sensor, data, size);
361 	else
362 		ret = -EINVAL;
363 
364 	return ret;
365 }
366 
367 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
368 {
369 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
370 
371 	if (!adev->pm.dpm_enabled)
372 		return;
373 
374 	if (!pp_funcs->pm_compute_clocks)
375 		return;
376 
377 	pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
378 }
379 
380 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
381 {
382 	int ret = 0;
383 
384 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
385 	if (ret)
386 		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
387 			  enable ? "enable" : "disable", ret);
388 }
389 
390 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
391 {
392 	int ret = 0;
393 
394 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
395 	if (ret)
396 		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
397 			  enable ? "enable" : "disable", ret);
398 }
399 
400 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
401 {
402 	int ret = 0;
403 
404 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
405 	if (ret)
406 		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
407 			  enable ? "enable" : "disable", ret);
408 }
409 
410 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
411 {
412 	int r;
413 
414 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
415 		r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
416 		if (r) {
417 			pr_err("smu firmware loading failed\n");
418 			return r;
419 		}
420 
421 		if (smu_version)
422 			*smu_version = adev->pm.fw_version;
423 	}
424 
425 	return 0;
426 }
427 
428 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
429 {
430 	return smu_handle_passthrough_sbr(adev->powerplay.pp_handle, enable);
431 }
432 
433 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
434 {
435 	struct smu_context *smu = adev->powerplay.pp_handle;
436 
437 	return smu_send_hbm_bad_pages_num(smu, size);
438 }
439 
440 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
441 				  enum pp_clock_type type,
442 				  uint32_t *min,
443 				  uint32_t *max)
444 {
445 	if (!is_support_sw_smu(adev))
446 		return -EOPNOTSUPP;
447 
448 	switch (type) {
449 	case PP_SCLK:
450 		return smu_get_dpm_freq_range(adev->powerplay.pp_handle, SMU_SCLK, min, max);
451 	default:
452 		return -EINVAL;
453 	}
454 }
455 
456 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
457 				   enum pp_clock_type type,
458 				   uint32_t min,
459 				   uint32_t max)
460 {
461 	struct smu_context *smu = adev->powerplay.pp_handle;
462 
463 	if (!is_support_sw_smu(adev))
464 		return -EOPNOTSUPP;
465 
466 	switch (type) {
467 	case PP_SCLK:
468 		return smu_set_soft_freq_range(smu, SMU_SCLK, min, max);
469 	default:
470 		return -EINVAL;
471 	}
472 }
473 
474 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
475 {
476 	struct smu_context *smu = adev->powerplay.pp_handle;
477 
478 	if (!is_support_sw_smu(adev))
479 		return 0;
480 
481 	return smu_write_watermarks_table(smu);
482 }
483 
484 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
485 			      enum smu_event_type event,
486 			      uint64_t event_arg)
487 {
488 	struct smu_context *smu = adev->powerplay.pp_handle;
489 
490 	if (!is_support_sw_smu(adev))
491 		return -EOPNOTSUPP;
492 
493 	return smu_wait_for_event(smu, event, event_arg);
494 }
495 
496 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
497 {
498 	struct smu_context *smu = adev->powerplay.pp_handle;
499 
500 	if (!is_support_sw_smu(adev))
501 		return -EOPNOTSUPP;
502 
503 	return smu_get_status_gfxoff(smu, value);
504 }
505 
506 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
507 {
508 	struct smu_context *smu = adev->powerplay.pp_handle;
509 
510 	return atomic64_read(&smu->throttle_int_counter);
511 }
512 
513 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
514  * @adev: amdgpu_device pointer
515  * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
516  *
517  */
518 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
519 				 enum gfx_change_state state)
520 {
521 	mutex_lock(&adev->pm.mutex);
522 	if (adev->powerplay.pp_funcs &&
523 	    adev->powerplay.pp_funcs->gfx_state_change_set)
524 		((adev)->powerplay.pp_funcs->gfx_state_change_set(
525 			(adev)->powerplay.pp_handle, state));
526 	mutex_unlock(&adev->pm.mutex);
527 }
528 
529 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
530 			    void *umc_ecc)
531 {
532 	struct smu_context *smu = adev->powerplay.pp_handle;
533 
534 	if (!is_support_sw_smu(adev))
535 		return -EOPNOTSUPP;
536 
537 	return smu_get_ecc_info(smu, umc_ecc);
538 }
539 
540 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
541 						     uint32_t idx)
542 {
543 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
544 
545 	if (!pp_funcs->get_vce_clock_state)
546 		return NULL;
547 
548 	return pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
549 					     idx);
550 }
551 
552 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
553 					enum amd_pm_state_type *state)
554 {
555 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
556 
557 	if (!pp_funcs->get_current_power_state) {
558 		*state = adev->pm.dpm.user_state;
559 		return;
560 	}
561 
562 	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
563 	if (*state < POWER_STATE_TYPE_DEFAULT ||
564 	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
565 		*state = adev->pm.dpm.user_state;
566 }
567 
568 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
569 				enum amd_pm_state_type state)
570 {
571 	adev->pm.dpm.user_state = state;
572 
573 	if (is_support_sw_smu(adev))
574 		return;
575 
576 	if (amdgpu_dpm_dispatch_task(adev,
577 				     AMD_PP_TASK_ENABLE_USER_STATE,
578 				     &state) == -EOPNOTSUPP)
579 		amdgpu_dpm_compute_clocks(adev);
580 }
581 
582 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
583 {
584 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
585 	enum amd_dpm_forced_level level;
586 
587 	if (pp_funcs->get_performance_level)
588 		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
589 	else
590 		level = adev->pm.dpm.forced_level;
591 
592 	return level;
593 }
594 
595 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
596 				       enum amd_dpm_forced_level level)
597 {
598 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
599 
600 	if (pp_funcs->force_performance_level) {
601 		if (adev->pm.dpm.thermal_active)
602 			return -EINVAL;
603 
604 		if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
605 						      level))
606 			return -EINVAL;
607 
608 		adev->pm.dpm.forced_level = level;
609 	}
610 
611 	return 0;
612 }
613 
614 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
615 				 struct pp_states_info *states)
616 {
617 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
618 
619 	if (!pp_funcs->get_pp_num_states)
620 		return -EOPNOTSUPP;
621 
622 	return pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, states);
623 }
624 
625 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
626 			      enum amd_pp_task task_id,
627 			      enum amd_pm_state_type *user_state)
628 {
629 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
630 
631 	if (!pp_funcs->dispatch_tasks)
632 		return -EOPNOTSUPP;
633 
634 	return pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, task_id, user_state);
635 }
636 
637 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
638 {
639 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
640 
641 	if (!pp_funcs->get_pp_table)
642 		return 0;
643 
644 	return pp_funcs->get_pp_table(adev->powerplay.pp_handle, table);
645 }
646 
647 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
648 				      uint32_t type,
649 				      long *input,
650 				      uint32_t size)
651 {
652 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
653 
654 	if (!pp_funcs->set_fine_grain_clk_vol)
655 		return 0;
656 
657 	return pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
658 						type,
659 						input,
660 						size);
661 }
662 
663 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
664 				  uint32_t type,
665 				  long *input,
666 				  uint32_t size)
667 {
668 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
669 
670 	if (!pp_funcs->odn_edit_dpm_table)
671 		return 0;
672 
673 	return pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
674 					    type,
675 					    input,
676 					    size);
677 }
678 
679 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
680 				  enum pp_clock_type type,
681 				  char *buf)
682 {
683 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
684 
685 	if (!pp_funcs->print_clock_levels)
686 		return 0;
687 
688 	return pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
689 					    type,
690 					    buf);
691 }
692 
693 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
694 				    uint64_t ppfeature_masks)
695 {
696 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
697 
698 	if (!pp_funcs->set_ppfeature_status)
699 		return 0;
700 
701 	return pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
702 					      ppfeature_masks);
703 }
704 
705 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
706 {
707 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
708 
709 	if (!pp_funcs->get_ppfeature_status)
710 		return 0;
711 
712 	return pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
713 					      buf);
714 }
715 
716 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
717 				 enum pp_clock_type type,
718 				 uint32_t mask)
719 {
720 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
721 
722 	if (!pp_funcs->force_clock_level)
723 		return 0;
724 
725 	return pp_funcs->force_clock_level(adev->powerplay.pp_handle,
726 					   type,
727 					   mask);
728 }
729 
730 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
731 {
732 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
733 
734 	if (!pp_funcs->get_sclk_od)
735 		return 0;
736 
737 	return pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
738 }
739 
740 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
741 {
742 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
743 
744 	if (is_support_sw_smu(adev))
745 		return 0;
746 
747 	if (pp_funcs->set_sclk_od)
748 		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
749 
750 	if (amdgpu_dpm_dispatch_task(adev,
751 				     AMD_PP_TASK_READJUST_POWER_STATE,
752 				     NULL) == -EOPNOTSUPP) {
753 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
754 		amdgpu_dpm_compute_clocks(adev);
755 	}
756 
757 	return 0;
758 }
759 
760 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
761 {
762 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
763 
764 	if (!pp_funcs->get_mclk_od)
765 		return 0;
766 
767 	return pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
768 }
769 
770 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
771 {
772 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
773 
774 	if (is_support_sw_smu(adev))
775 		return 0;
776 
777 	if (pp_funcs->set_mclk_od)
778 		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
779 
780 	if (amdgpu_dpm_dispatch_task(adev,
781 				     AMD_PP_TASK_READJUST_POWER_STATE,
782 				     NULL) == -EOPNOTSUPP) {
783 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
784 		amdgpu_dpm_compute_clocks(adev);
785 	}
786 
787 	return 0;
788 }
789 
790 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
791 				      char *buf)
792 {
793 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
794 
795 	if (!pp_funcs->get_power_profile_mode)
796 		return -EOPNOTSUPP;
797 
798 	return pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
799 						buf);
800 }
801 
802 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
803 				      long *input, uint32_t size)
804 {
805 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
806 
807 	if (!pp_funcs->set_power_profile_mode)
808 		return 0;
809 
810 	return pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
811 						input,
812 						size);
813 }
814 
815 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
816 {
817 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
818 
819 	if (!pp_funcs->get_gpu_metrics)
820 		return 0;
821 
822 	return pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, table);
823 }
824 
825 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
826 				    uint32_t *fan_mode)
827 {
828 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
829 
830 	if (!pp_funcs->get_fan_control_mode)
831 		return -EOPNOTSUPP;
832 
833 	*fan_mode = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle);
834 
835 	return 0;
836 }
837 
838 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
839 				 uint32_t speed)
840 {
841 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
842 
843 	if (!pp_funcs->set_fan_speed_pwm)
844 		return -EINVAL;
845 
846 	return pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, speed);
847 }
848 
849 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
850 				 uint32_t *speed)
851 {
852 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
853 
854 	if (!pp_funcs->get_fan_speed_pwm)
855 		return -EINVAL;
856 
857 	return pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, speed);
858 }
859 
860 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
861 				 uint32_t *speed)
862 {
863 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
864 
865 	if (!pp_funcs->get_fan_speed_rpm)
866 		return -EINVAL;
867 
868 	return pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, speed);
869 }
870 
871 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
872 				 uint32_t speed)
873 {
874 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
875 
876 	if (!pp_funcs->set_fan_speed_rpm)
877 		return -EINVAL;
878 
879 	return pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, speed);
880 }
881 
882 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
883 				    uint32_t mode)
884 {
885 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
886 
887 	if (!pp_funcs->set_fan_control_mode)
888 		return -EOPNOTSUPP;
889 
890 	pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, mode);
891 
892 	return 0;
893 }
894 
895 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
896 			       uint32_t *limit,
897 			       enum pp_power_limit_level pp_limit_level,
898 			       enum pp_power_type power_type)
899 {
900 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
901 
902 	if (!pp_funcs->get_power_limit)
903 		return -ENODATA;
904 
905 	return pp_funcs->get_power_limit(adev->powerplay.pp_handle,
906 					 limit,
907 					 pp_limit_level,
908 					 power_type);
909 }
910 
911 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
912 			       uint32_t limit)
913 {
914 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
915 
916 	if (!pp_funcs->set_power_limit)
917 		return -EINVAL;
918 
919 	return pp_funcs->set_power_limit(adev->powerplay.pp_handle, limit);
920 }
921 
922 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
923 {
924 	if (!is_support_sw_smu(adev))
925 		return false;
926 
927 	return is_support_cclk_dpm(adev);
928 }
929 
930 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
931 						       struct seq_file *m)
932 {
933 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
934 
935 	if (!pp_funcs->debugfs_print_current_performance_level)
936 		return -EOPNOTSUPP;
937 
938 	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
939 							  m);
940 
941 	return 0;
942 }
943 
944 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
945 				       void **addr,
946 				       size_t *size)
947 {
948 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
949 
950 	if (!pp_funcs->get_smu_prv_buf_details)
951 		return -ENOSYS;
952 
953 	return pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
954 						 addr,
955 						 size);
956 }
957 
958 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
959 {
960 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
961 	struct smu_context *smu = adev->powerplay.pp_handle;
962 
963 	if ((is_support_sw_smu(adev) && smu->od_enabled) ||
964 	    (is_support_sw_smu(adev) && smu->is_apu) ||
965 		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
966 		return true;
967 
968 	return false;
969 }
970 
971 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
972 			    const char *buf,
973 			    size_t size)
974 {
975 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
976 
977 	if (!pp_funcs->set_pp_table)
978 		return -EOPNOTSUPP;
979 
980 	return pp_funcs->set_pp_table(adev->powerplay.pp_handle,
981 				      buf,
982 				      size);
983 }
984 
985 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
986 {
987 	struct smu_context *smu = adev->powerplay.pp_handle;
988 
989 	return smu->cpu_core_num;
990 }
991 
992 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
993 {
994 	if (!is_support_sw_smu(adev))
995 		return;
996 
997 	amdgpu_smu_stb_debug_fs_init(adev);
998 }
999 
1000 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1001 					    const struct amd_pp_display_configuration *input)
1002 {
1003 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1004 
1005 	if (!pp_funcs->display_configuration_change)
1006 		return 0;
1007 
1008 	return pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1009 						      input);
1010 }
1011 
1012 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1013 				 enum amd_pp_clock_type type,
1014 				 struct amd_pp_clocks *clocks)
1015 {
1016 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1017 
1018 	if (!pp_funcs->get_clock_by_type)
1019 		return 0;
1020 
1021 	return pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1022 					   type,
1023 					   clocks);
1024 }
1025 
1026 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1027 						struct amd_pp_simple_clock_info *clocks)
1028 {
1029 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1030 
1031 	if (!pp_funcs->get_display_mode_validation_clocks)
1032 		return 0;
1033 
1034 	return pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1035 							    clocks);
1036 }
1037 
1038 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1039 					      enum amd_pp_clock_type type,
1040 					      struct pp_clock_levels_with_latency *clocks)
1041 {
1042 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1043 
1044 	if (!pp_funcs->get_clock_by_type_with_latency)
1045 		return 0;
1046 
1047 	return pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1048 							type,
1049 							clocks);
1050 }
1051 
1052 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1053 					      enum amd_pp_clock_type type,
1054 					      struct pp_clock_levels_with_voltage *clocks)
1055 {
1056 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1057 
1058 	if (!pp_funcs->get_clock_by_type_with_voltage)
1059 		return 0;
1060 
1061 	return pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1062 							type,
1063 							clocks);
1064 }
1065 
1066 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1067 					       void *clock_ranges)
1068 {
1069 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1070 
1071 	if (!pp_funcs->set_watermarks_for_clocks_ranges)
1072 		return -EOPNOTSUPP;
1073 
1074 	return pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1075 							  clock_ranges);
1076 }
1077 
1078 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1079 					     struct pp_display_clock_request *clock)
1080 {
1081 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1082 
1083 	if (!pp_funcs->display_clock_voltage_request)
1084 		return -EOPNOTSUPP;
1085 
1086 	return pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1087 						       clock);
1088 }
1089 
1090 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1091 				  struct amd_pp_clock_info *clocks)
1092 {
1093 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1094 
1095 	if (!pp_funcs->get_current_clocks)
1096 		return -EOPNOTSUPP;
1097 
1098 	return pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1099 					    clocks);
1100 }
1101 
1102 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1103 {
1104 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1105 
1106 	if (!pp_funcs->notify_smu_enable_pwe)
1107 		return;
1108 
1109 	pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1110 }
1111 
1112 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1113 					uint32_t count)
1114 {
1115 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1116 
1117 	if (!pp_funcs->set_active_display_count)
1118 		return -EOPNOTSUPP;
1119 
1120 	return pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1121 						  count);
1122 }
1123 
1124 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1125 					  uint32_t clock)
1126 {
1127 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1128 
1129 	if (!pp_funcs->set_min_deep_sleep_dcefclk)
1130 		return -EOPNOTSUPP;
1131 
1132 	return pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1133 						    clock);
1134 }
1135 
1136 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1137 					     uint32_t clock)
1138 {
1139 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1140 
1141 	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1142 		return;
1143 
1144 	pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1145 					       clock);
1146 }
1147 
1148 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1149 					  uint32_t clock)
1150 {
1151 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1152 
1153 	if (!pp_funcs->set_hard_min_fclk_by_freq)
1154 		return;
1155 
1156 	pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1157 					    clock);
1158 }
1159 
1160 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1161 						   bool disable_memory_clock_switch)
1162 {
1163 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1164 
1165 	if (!pp_funcs->display_disable_memory_clock_switch)
1166 		return 0;
1167 
1168 	return pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1169 							     disable_memory_clock_switch);
1170 }
1171 
1172 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1173 						struct pp_smu_nv_clock_table *max_clocks)
1174 {
1175 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1176 
1177 	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1178 		return -EOPNOTSUPP;
1179 
1180 	return pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1181 							  max_clocks);
1182 }
1183 
1184 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1185 						  unsigned int *clock_values_in_khz,
1186 						  unsigned int *num_states)
1187 {
1188 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1189 
1190 	if (!pp_funcs->get_uclk_dpm_states)
1191 		return -EOPNOTSUPP;
1192 
1193 	return pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1194 					     clock_values_in_khz,
1195 					     num_states);
1196 }
1197 
1198 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1199 				   struct dpm_clocks *clock_table)
1200 {
1201 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1202 
1203 	if (!pp_funcs->get_dpm_clock_table)
1204 		return -EOPNOTSUPP;
1205 
1206 	return pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1207 					     clock_table);
1208 }
1209