xref: /openbmc/linux/drivers/gpu/drm/amd/pm/amdgpu_dpm.c (revision 55eb9a6c)
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
35 
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38 
39 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
40 {
41 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
42 	int ret = 0;
43 
44 	if (!pp_funcs->get_sclk)
45 		return 0;
46 
47 	mutex_lock(&adev->pm.mutex);
48 	ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
49 				 low);
50 	mutex_unlock(&adev->pm.mutex);
51 
52 	return ret;
53 }
54 
55 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
56 {
57 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
58 	int ret = 0;
59 
60 	if (!pp_funcs->get_mclk)
61 		return 0;
62 
63 	mutex_lock(&adev->pm.mutex);
64 	ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
65 				 low);
66 	mutex_unlock(&adev->pm.mutex);
67 
68 	return ret;
69 }
70 
71 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
72 {
73 	int ret = 0;
74 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
75 	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
76 
77 	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
78 		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
79 				block_type, gate ? "gate" : "ungate");
80 		return 0;
81 	}
82 
83 	mutex_lock(&adev->pm.mutex);
84 
85 	switch (block_type) {
86 	case AMD_IP_BLOCK_TYPE_UVD:
87 	case AMD_IP_BLOCK_TYPE_VCE:
88 	case AMD_IP_BLOCK_TYPE_GFX:
89 	case AMD_IP_BLOCK_TYPE_VCN:
90 	case AMD_IP_BLOCK_TYPE_SDMA:
91 	case AMD_IP_BLOCK_TYPE_JPEG:
92 	case AMD_IP_BLOCK_TYPE_GMC:
93 	case AMD_IP_BLOCK_TYPE_ACP:
94 		if (pp_funcs && pp_funcs->set_powergating_by_smu)
95 			ret = (pp_funcs->set_powergating_by_smu(
96 				(adev)->powerplay.pp_handle, block_type, gate));
97 		break;
98 	default:
99 		break;
100 	}
101 
102 	if (!ret)
103 		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
104 
105 	mutex_unlock(&adev->pm.mutex);
106 
107 	return ret;
108 }
109 
110 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
111 {
112 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
113 	void *pp_handle = adev->powerplay.pp_handle;
114 	int ret = 0;
115 
116 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
117 		return -ENOENT;
118 
119 	mutex_lock(&adev->pm.mutex);
120 
121 	/* enter BACO state */
122 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
123 
124 	mutex_unlock(&adev->pm.mutex);
125 
126 	return ret;
127 }
128 
129 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
130 {
131 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
132 	void *pp_handle = adev->powerplay.pp_handle;
133 	int ret = 0;
134 
135 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
136 		return -ENOENT;
137 
138 	mutex_lock(&adev->pm.mutex);
139 
140 	/* exit BACO state */
141 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
142 
143 	mutex_unlock(&adev->pm.mutex);
144 
145 	return ret;
146 }
147 
148 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
149 			     enum pp_mp1_state mp1_state)
150 {
151 	int ret = 0;
152 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
153 
154 	if (pp_funcs && pp_funcs->set_mp1_state) {
155 		mutex_lock(&adev->pm.mutex);
156 
157 		ret = pp_funcs->set_mp1_state(
158 				adev->powerplay.pp_handle,
159 				mp1_state);
160 
161 		mutex_unlock(&adev->pm.mutex);
162 	}
163 
164 	return ret;
165 }
166 
167 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
168 {
169 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
170 	void *pp_handle = adev->powerplay.pp_handle;
171 	bool baco_cap;
172 	int ret = 0;
173 
174 	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
175 		return false;
176 	/* Don't use baco for reset in S3.
177 	 * This is a workaround for some platforms
178 	 * where entering BACO during suspend
179 	 * seems to cause reboots or hangs.
180 	 * This might be related to the fact that BACO controls
181 	 * power to the whole GPU including devices like audio and USB.
182 	 * Powering down/up everything may adversely affect these other
183 	 * devices.  Needs more investigation.
184 	 */
185 	if (adev->in_s3)
186 		return false;
187 
188 	mutex_lock(&adev->pm.mutex);
189 
190 	ret = pp_funcs->get_asic_baco_capability(pp_handle,
191 						 &baco_cap);
192 
193 	mutex_unlock(&adev->pm.mutex);
194 
195 	return ret ? false : baco_cap;
196 }
197 
198 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
199 {
200 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
201 	void *pp_handle = adev->powerplay.pp_handle;
202 	int ret = 0;
203 
204 	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
205 		return -ENOENT;
206 
207 	mutex_lock(&adev->pm.mutex);
208 
209 	ret = pp_funcs->asic_reset_mode_2(pp_handle);
210 
211 	mutex_unlock(&adev->pm.mutex);
212 
213 	return ret;
214 }
215 
216 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
217 {
218 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
219 	void *pp_handle = adev->powerplay.pp_handle;
220 	int ret = 0;
221 
222 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
223 		return -ENOENT;
224 
225 	mutex_lock(&adev->pm.mutex);
226 
227 	/* enter BACO state */
228 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
229 	if (ret)
230 		goto out;
231 
232 	/* exit BACO state */
233 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
234 
235 out:
236 	mutex_unlock(&adev->pm.mutex);
237 	return ret;
238 }
239 
240 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
241 {
242 	struct smu_context *smu = adev->powerplay.pp_handle;
243 	bool support_mode1_reset = false;
244 
245 	if (is_support_sw_smu(adev)) {
246 		mutex_lock(&adev->pm.mutex);
247 		support_mode1_reset = smu_mode1_reset_is_support(smu);
248 		mutex_unlock(&adev->pm.mutex);
249 	}
250 
251 	return support_mode1_reset;
252 }
253 
254 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
255 {
256 	struct smu_context *smu = adev->powerplay.pp_handle;
257 	int ret = -EOPNOTSUPP;
258 
259 	if (is_support_sw_smu(adev)) {
260 		mutex_lock(&adev->pm.mutex);
261 		ret = smu_mode1_reset(smu);
262 		mutex_unlock(&adev->pm.mutex);
263 	}
264 
265 	return ret;
266 }
267 
268 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
269 				    enum PP_SMC_POWER_PROFILE type,
270 				    bool en)
271 {
272 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
273 	int ret = 0;
274 
275 	if (amdgpu_sriov_vf(adev))
276 		return 0;
277 
278 	if (pp_funcs && pp_funcs->switch_power_profile) {
279 		mutex_lock(&adev->pm.mutex);
280 		ret = pp_funcs->switch_power_profile(
281 			adev->powerplay.pp_handle, type, en);
282 		mutex_unlock(&adev->pm.mutex);
283 	}
284 
285 	return ret;
286 }
287 
288 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
289 			       uint32_t pstate)
290 {
291 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
292 	int ret = 0;
293 
294 	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
295 		mutex_lock(&adev->pm.mutex);
296 		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
297 								pstate);
298 		mutex_unlock(&adev->pm.mutex);
299 	}
300 
301 	return ret;
302 }
303 
304 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
305 			     uint32_t cstate)
306 {
307 	int ret = 0;
308 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
309 	void *pp_handle = adev->powerplay.pp_handle;
310 
311 	if (pp_funcs && pp_funcs->set_df_cstate) {
312 		mutex_lock(&adev->pm.mutex);
313 		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
314 		mutex_unlock(&adev->pm.mutex);
315 	}
316 
317 	return ret;
318 }
319 
320 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
321 {
322 	struct smu_context *smu = adev->powerplay.pp_handle;
323 	int ret = 0;
324 
325 	if (is_support_sw_smu(adev)) {
326 		mutex_lock(&adev->pm.mutex);
327 		ret = smu_allow_xgmi_power_down(smu, en);
328 		mutex_unlock(&adev->pm.mutex);
329 	}
330 
331 	return ret;
332 }
333 
334 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
335 {
336 	void *pp_handle = adev->powerplay.pp_handle;
337 	const struct amd_pm_funcs *pp_funcs =
338 			adev->powerplay.pp_funcs;
339 	int ret = 0;
340 
341 	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
342 		mutex_lock(&adev->pm.mutex);
343 		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
344 		mutex_unlock(&adev->pm.mutex);
345 	}
346 
347 	return ret;
348 }
349 
350 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
351 				      uint32_t msg_id)
352 {
353 	void *pp_handle = adev->powerplay.pp_handle;
354 	const struct amd_pm_funcs *pp_funcs =
355 			adev->powerplay.pp_funcs;
356 	int ret = 0;
357 
358 	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
359 		mutex_lock(&adev->pm.mutex);
360 		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
361 						       msg_id);
362 		mutex_unlock(&adev->pm.mutex);
363 	}
364 
365 	return ret;
366 }
367 
368 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
369 				  bool acquire)
370 {
371 	void *pp_handle = adev->powerplay.pp_handle;
372 	const struct amd_pm_funcs *pp_funcs =
373 			adev->powerplay.pp_funcs;
374 	int ret = -EOPNOTSUPP;
375 
376 	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
377 		mutex_lock(&adev->pm.mutex);
378 		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
379 						   acquire);
380 		mutex_unlock(&adev->pm.mutex);
381 	}
382 
383 	return ret;
384 }
385 
386 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
387 {
388 	if (adev->pm.dpm_enabled) {
389 		mutex_lock(&adev->pm.mutex);
390 		if (power_supply_is_system_supplied() > 0)
391 			adev->pm.ac_power = true;
392 		else
393 			adev->pm.ac_power = false;
394 
395 		if (adev->powerplay.pp_funcs &&
396 		    adev->powerplay.pp_funcs->enable_bapm)
397 			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
398 
399 		if (is_support_sw_smu(adev))
400 			smu_set_ac_dc(adev->powerplay.pp_handle);
401 
402 		mutex_unlock(&adev->pm.mutex);
403 	}
404 }
405 
406 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
407 			   void *data, uint32_t *size)
408 {
409 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
410 	int ret = -EINVAL;
411 
412 	if (!data || !size)
413 		return -EINVAL;
414 
415 	if (pp_funcs && pp_funcs->read_sensor) {
416 		mutex_lock(&adev->pm.mutex);
417 		ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
418 					    sensor,
419 					    data,
420 					    size);
421 		mutex_unlock(&adev->pm.mutex);
422 	}
423 
424 	return ret;
425 }
426 
427 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
428 {
429 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
430 	int i;
431 
432 	if (!adev->pm.dpm_enabled)
433 		return;
434 
435 	if (!pp_funcs->pm_compute_clocks)
436 		return;
437 
438 	if (adev->mode_info.num_crtc)
439 		amdgpu_display_bandwidth_update(adev);
440 
441 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
442 		struct amdgpu_ring *ring = adev->rings[i];
443 		if (ring && ring->sched.ready)
444 			amdgpu_fence_wait_empty(ring);
445 	}
446 
447 	mutex_lock(&adev->pm.mutex);
448 	pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
449 	mutex_unlock(&adev->pm.mutex);
450 }
451 
452 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
453 {
454 	int ret = 0;
455 
456 	if (adev->family == AMDGPU_FAMILY_SI) {
457 		mutex_lock(&adev->pm.mutex);
458 		if (enable) {
459 			adev->pm.dpm.uvd_active = true;
460 			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
461 		} else {
462 			adev->pm.dpm.uvd_active = false;
463 		}
464 		mutex_unlock(&adev->pm.mutex);
465 
466 		amdgpu_dpm_compute_clocks(adev);
467 		return;
468 	}
469 
470 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
471 	if (ret)
472 		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
473 			  enable ? "enable" : "disable", ret);
474 }
475 
476 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
477 {
478 	int ret = 0;
479 
480 	if (adev->family == AMDGPU_FAMILY_SI) {
481 		mutex_lock(&adev->pm.mutex);
482 		if (enable) {
483 			adev->pm.dpm.vce_active = true;
484 			/* XXX select vce level based on ring/task */
485 			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
486 		} else {
487 			adev->pm.dpm.vce_active = false;
488 		}
489 		mutex_unlock(&adev->pm.mutex);
490 
491 		amdgpu_dpm_compute_clocks(adev);
492 		return;
493 	}
494 
495 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
496 	if (ret)
497 		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
498 			  enable ? "enable" : "disable", ret);
499 }
500 
501 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
502 {
503 	int ret = 0;
504 
505 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
506 	if (ret)
507 		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
508 			  enable ? "enable" : "disable", ret);
509 }
510 
511 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
512 {
513 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
514 	int r = 0;
515 
516 	if (!pp_funcs || !pp_funcs->load_firmware)
517 		return 0;
518 
519 	mutex_lock(&adev->pm.mutex);
520 	r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
521 	if (r) {
522 		pr_err("smu firmware loading failed\n");
523 		goto out;
524 	}
525 
526 	if (smu_version)
527 		*smu_version = adev->pm.fw_version;
528 
529 out:
530 	mutex_unlock(&adev->pm.mutex);
531 	return r;
532 }
533 
534 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
535 {
536 	int ret = 0;
537 
538 	if (is_support_sw_smu(adev)) {
539 		mutex_lock(&adev->pm.mutex);
540 		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
541 						 enable);
542 		mutex_unlock(&adev->pm.mutex);
543 	}
544 
545 	return ret;
546 }
547 
548 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
549 {
550 	struct smu_context *smu = adev->powerplay.pp_handle;
551 	int ret = 0;
552 
553 	if (!is_support_sw_smu(adev))
554 		return -EOPNOTSUPP;
555 
556 	mutex_lock(&adev->pm.mutex);
557 	ret = smu_send_hbm_bad_pages_num(smu, size);
558 	mutex_unlock(&adev->pm.mutex);
559 
560 	return ret;
561 }
562 
563 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
564 {
565 	struct smu_context *smu = adev->powerplay.pp_handle;
566 	int ret = 0;
567 
568 	if (!is_support_sw_smu(adev))
569 		return -EOPNOTSUPP;
570 
571 	mutex_lock(&adev->pm.mutex);
572 	ret = smu_send_hbm_bad_channel_flag(smu, size);
573 	mutex_unlock(&adev->pm.mutex);
574 
575 	return ret;
576 }
577 
578 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
579 				  enum pp_clock_type type,
580 				  uint32_t *min,
581 				  uint32_t *max)
582 {
583 	int ret = 0;
584 
585 	if (type != PP_SCLK)
586 		return -EINVAL;
587 
588 	if (!is_support_sw_smu(adev))
589 		return -EOPNOTSUPP;
590 
591 	mutex_lock(&adev->pm.mutex);
592 	ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
593 				     SMU_SCLK,
594 				     min,
595 				     max);
596 	mutex_unlock(&adev->pm.mutex);
597 
598 	return ret;
599 }
600 
601 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
602 				   enum pp_clock_type type,
603 				   uint32_t min,
604 				   uint32_t max)
605 {
606 	struct smu_context *smu = adev->powerplay.pp_handle;
607 	int ret = 0;
608 
609 	if (type != PP_SCLK)
610 		return -EINVAL;
611 
612 	if (!is_support_sw_smu(adev))
613 		return -EOPNOTSUPP;
614 
615 	mutex_lock(&adev->pm.mutex);
616 	ret = smu_set_soft_freq_range(smu,
617 				      SMU_SCLK,
618 				      min,
619 				      max);
620 	mutex_unlock(&adev->pm.mutex);
621 
622 	return ret;
623 }
624 
625 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
626 {
627 	struct smu_context *smu = adev->powerplay.pp_handle;
628 	int ret = 0;
629 
630 	if (!is_support_sw_smu(adev))
631 		return 0;
632 
633 	mutex_lock(&adev->pm.mutex);
634 	ret = smu_write_watermarks_table(smu);
635 	mutex_unlock(&adev->pm.mutex);
636 
637 	return ret;
638 }
639 
640 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
641 			      enum smu_event_type event,
642 			      uint64_t event_arg)
643 {
644 	struct smu_context *smu = adev->powerplay.pp_handle;
645 	int ret = 0;
646 
647 	if (!is_support_sw_smu(adev))
648 		return -EOPNOTSUPP;
649 
650 	mutex_lock(&adev->pm.mutex);
651 	ret = smu_wait_for_event(smu, event, event_arg);
652 	mutex_unlock(&adev->pm.mutex);
653 
654 	return ret;
655 }
656 
657 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
658 {
659 	struct smu_context *smu = adev->powerplay.pp_handle;
660 	int ret = 0;
661 
662 	if (!is_support_sw_smu(adev))
663 		return -EOPNOTSUPP;
664 
665 	mutex_lock(&adev->pm.mutex);
666 	ret = smu_get_status_gfxoff(smu, value);
667 	mutex_unlock(&adev->pm.mutex);
668 
669 	return ret;
670 }
671 
672 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
673 {
674 	struct smu_context *smu = adev->powerplay.pp_handle;
675 
676 	if (!is_support_sw_smu(adev))
677 		return 0;
678 
679 	return atomic64_read(&smu->throttle_int_counter);
680 }
681 
682 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
683  * @adev: amdgpu_device pointer
684  * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
685  *
686  */
687 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
688 				 enum gfx_change_state state)
689 {
690 	mutex_lock(&adev->pm.mutex);
691 	if (adev->powerplay.pp_funcs &&
692 	    adev->powerplay.pp_funcs->gfx_state_change_set)
693 		((adev)->powerplay.pp_funcs->gfx_state_change_set(
694 			(adev)->powerplay.pp_handle, state));
695 	mutex_unlock(&adev->pm.mutex);
696 }
697 
698 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
699 			    void *umc_ecc)
700 {
701 	struct smu_context *smu = adev->powerplay.pp_handle;
702 	int ret = 0;
703 
704 	if (!is_support_sw_smu(adev))
705 		return -EOPNOTSUPP;
706 
707 	mutex_lock(&adev->pm.mutex);
708 	ret = smu_get_ecc_info(smu, umc_ecc);
709 	mutex_unlock(&adev->pm.mutex);
710 
711 	return ret;
712 }
713 
714 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
715 						     uint32_t idx)
716 {
717 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
718 	struct amd_vce_state *vstate = NULL;
719 
720 	if (!pp_funcs->get_vce_clock_state)
721 		return NULL;
722 
723 	mutex_lock(&adev->pm.mutex);
724 	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
725 					       idx);
726 	mutex_unlock(&adev->pm.mutex);
727 
728 	return vstate;
729 }
730 
731 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
732 					enum amd_pm_state_type *state)
733 {
734 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
735 
736 	mutex_lock(&adev->pm.mutex);
737 
738 	if (!pp_funcs->get_current_power_state) {
739 		*state = adev->pm.dpm.user_state;
740 		goto out;
741 	}
742 
743 	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
744 	if (*state < POWER_STATE_TYPE_DEFAULT ||
745 	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
746 		*state = adev->pm.dpm.user_state;
747 
748 out:
749 	mutex_unlock(&adev->pm.mutex);
750 }
751 
752 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
753 				enum amd_pm_state_type state)
754 {
755 	mutex_lock(&adev->pm.mutex);
756 	adev->pm.dpm.user_state = state;
757 	mutex_unlock(&adev->pm.mutex);
758 
759 	if (is_support_sw_smu(adev))
760 		return;
761 
762 	if (amdgpu_dpm_dispatch_task(adev,
763 				     AMD_PP_TASK_ENABLE_USER_STATE,
764 				     &state) == -EOPNOTSUPP)
765 		amdgpu_dpm_compute_clocks(adev);
766 }
767 
768 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
769 {
770 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
771 	enum amd_dpm_forced_level level;
772 
773 	mutex_lock(&adev->pm.mutex);
774 	if (pp_funcs->get_performance_level)
775 		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
776 	else
777 		level = adev->pm.dpm.forced_level;
778 	mutex_unlock(&adev->pm.mutex);
779 
780 	return level;
781 }
782 
783 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
784 				       enum amd_dpm_forced_level level)
785 {
786 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
787 	enum amd_dpm_forced_level current_level;
788 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
789 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
790 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
791 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
792 
793 	if (!pp_funcs || !pp_funcs->force_performance_level)
794 		return 0;
795 
796 	if (adev->pm.dpm.thermal_active)
797 		return -EINVAL;
798 
799 	current_level = amdgpu_dpm_get_performance_level(adev);
800 	if (current_level == level)
801 		return 0;
802 
803 	if (adev->asic_type == CHIP_RAVEN) {
804 		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
805 			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
806 			    level == AMD_DPM_FORCED_LEVEL_MANUAL)
807 				amdgpu_gfx_off_ctrl(adev, false);
808 			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
809 				 level != AMD_DPM_FORCED_LEVEL_MANUAL)
810 				amdgpu_gfx_off_ctrl(adev, true);
811 		}
812 	}
813 
814 	if (!(current_level & profile_mode_mask) &&
815 	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
816 		return -EINVAL;
817 
818 	if (!(current_level & profile_mode_mask) &&
819 	      (level & profile_mode_mask)) {
820 		/* enter UMD Pstate */
821 		amdgpu_device_ip_set_powergating_state(adev,
822 						       AMD_IP_BLOCK_TYPE_GFX,
823 						       AMD_PG_STATE_UNGATE);
824 		amdgpu_device_ip_set_clockgating_state(adev,
825 						       AMD_IP_BLOCK_TYPE_GFX,
826 						       AMD_CG_STATE_UNGATE);
827 	} else if ((current_level & profile_mode_mask) &&
828 		    !(level & profile_mode_mask)) {
829 		/* exit UMD Pstate */
830 		amdgpu_device_ip_set_clockgating_state(adev,
831 						       AMD_IP_BLOCK_TYPE_GFX,
832 						       AMD_CG_STATE_GATE);
833 		amdgpu_device_ip_set_powergating_state(adev,
834 						       AMD_IP_BLOCK_TYPE_GFX,
835 						       AMD_PG_STATE_GATE);
836 	}
837 
838 	mutex_lock(&adev->pm.mutex);
839 
840 	if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
841 					      level)) {
842 		mutex_unlock(&adev->pm.mutex);
843 		return -EINVAL;
844 	}
845 
846 	adev->pm.dpm.forced_level = level;
847 
848 	mutex_unlock(&adev->pm.mutex);
849 
850 	return 0;
851 }
852 
853 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
854 				 struct pp_states_info *states)
855 {
856 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
857 	int ret = 0;
858 
859 	if (!pp_funcs->get_pp_num_states)
860 		return -EOPNOTSUPP;
861 
862 	mutex_lock(&adev->pm.mutex);
863 	ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
864 					  states);
865 	mutex_unlock(&adev->pm.mutex);
866 
867 	return ret;
868 }
869 
870 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
871 			      enum amd_pp_task task_id,
872 			      enum amd_pm_state_type *user_state)
873 {
874 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
875 	int ret = 0;
876 
877 	if (!pp_funcs->dispatch_tasks)
878 		return -EOPNOTSUPP;
879 
880 	mutex_lock(&adev->pm.mutex);
881 	ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
882 				       task_id,
883 				       user_state);
884 	mutex_unlock(&adev->pm.mutex);
885 
886 	return ret;
887 }
888 
889 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
890 {
891 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
892 	int ret = 0;
893 
894 	if (!pp_funcs->get_pp_table)
895 		return 0;
896 
897 	mutex_lock(&adev->pm.mutex);
898 	ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
899 				     table);
900 	mutex_unlock(&adev->pm.mutex);
901 
902 	return ret;
903 }
904 
905 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
906 				      uint32_t type,
907 				      long *input,
908 				      uint32_t size)
909 {
910 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
911 	int ret = 0;
912 
913 	if (!pp_funcs->set_fine_grain_clk_vol)
914 		return 0;
915 
916 	mutex_lock(&adev->pm.mutex);
917 	ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
918 					       type,
919 					       input,
920 					       size);
921 	mutex_unlock(&adev->pm.mutex);
922 
923 	return ret;
924 }
925 
926 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
927 				  uint32_t type,
928 				  long *input,
929 				  uint32_t size)
930 {
931 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
932 	int ret = 0;
933 
934 	if (!pp_funcs->odn_edit_dpm_table)
935 		return 0;
936 
937 	mutex_lock(&adev->pm.mutex);
938 	ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
939 					   type,
940 					   input,
941 					   size);
942 	mutex_unlock(&adev->pm.mutex);
943 
944 	return ret;
945 }
946 
947 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
948 				  enum pp_clock_type type,
949 				  char *buf)
950 {
951 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
952 	int ret = 0;
953 
954 	if (!pp_funcs->print_clock_levels)
955 		return 0;
956 
957 	mutex_lock(&adev->pm.mutex);
958 	ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
959 					   type,
960 					   buf);
961 	mutex_unlock(&adev->pm.mutex);
962 
963 	return ret;
964 }
965 
966 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
967 				  enum pp_clock_type type,
968 				  char *buf,
969 				  int *offset)
970 {
971 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
972 	int ret = 0;
973 
974 	if (!pp_funcs->emit_clock_levels)
975 		return -ENOENT;
976 
977 	mutex_lock(&adev->pm.mutex);
978 	ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
979 					   type,
980 					   buf,
981 					   offset);
982 	mutex_unlock(&adev->pm.mutex);
983 
984 	return ret;
985 }
986 
987 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
988 				    uint64_t ppfeature_masks)
989 {
990 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
991 	int ret = 0;
992 
993 	if (!pp_funcs->set_ppfeature_status)
994 		return 0;
995 
996 	mutex_lock(&adev->pm.mutex);
997 	ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
998 					     ppfeature_masks);
999 	mutex_unlock(&adev->pm.mutex);
1000 
1001 	return ret;
1002 }
1003 
1004 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1005 {
1006 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1007 	int ret = 0;
1008 
1009 	if (!pp_funcs->get_ppfeature_status)
1010 		return 0;
1011 
1012 	mutex_lock(&adev->pm.mutex);
1013 	ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1014 					     buf);
1015 	mutex_unlock(&adev->pm.mutex);
1016 
1017 	return ret;
1018 }
1019 
1020 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1021 				 enum pp_clock_type type,
1022 				 uint32_t mask)
1023 {
1024 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1025 	int ret = 0;
1026 
1027 	if (!pp_funcs->force_clock_level)
1028 		return 0;
1029 
1030 	mutex_lock(&adev->pm.mutex);
1031 	ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1032 					  type,
1033 					  mask);
1034 	mutex_unlock(&adev->pm.mutex);
1035 
1036 	return ret;
1037 }
1038 
1039 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1040 {
1041 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1042 	int ret = 0;
1043 
1044 	if (!pp_funcs->get_sclk_od)
1045 		return 0;
1046 
1047 	mutex_lock(&adev->pm.mutex);
1048 	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1049 	mutex_unlock(&adev->pm.mutex);
1050 
1051 	return ret;
1052 }
1053 
1054 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1055 {
1056 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1057 
1058 	if (is_support_sw_smu(adev))
1059 		return 0;
1060 
1061 	mutex_lock(&adev->pm.mutex);
1062 	if (pp_funcs->set_sclk_od)
1063 		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1064 	mutex_unlock(&adev->pm.mutex);
1065 
1066 	if (amdgpu_dpm_dispatch_task(adev,
1067 				     AMD_PP_TASK_READJUST_POWER_STATE,
1068 				     NULL) == -EOPNOTSUPP) {
1069 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1070 		amdgpu_dpm_compute_clocks(adev);
1071 	}
1072 
1073 	return 0;
1074 }
1075 
1076 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1077 {
1078 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1079 	int ret = 0;
1080 
1081 	if (!pp_funcs->get_mclk_od)
1082 		return 0;
1083 
1084 	mutex_lock(&adev->pm.mutex);
1085 	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1086 	mutex_unlock(&adev->pm.mutex);
1087 
1088 	return ret;
1089 }
1090 
1091 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1092 {
1093 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1094 
1095 	if (is_support_sw_smu(adev))
1096 		return 0;
1097 
1098 	mutex_lock(&adev->pm.mutex);
1099 	if (pp_funcs->set_mclk_od)
1100 		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1101 	mutex_unlock(&adev->pm.mutex);
1102 
1103 	if (amdgpu_dpm_dispatch_task(adev,
1104 				     AMD_PP_TASK_READJUST_POWER_STATE,
1105 				     NULL) == -EOPNOTSUPP) {
1106 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1107 		amdgpu_dpm_compute_clocks(adev);
1108 	}
1109 
1110 	return 0;
1111 }
1112 
1113 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1114 				      char *buf)
1115 {
1116 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1117 	int ret = 0;
1118 
1119 	if (!pp_funcs->get_power_profile_mode)
1120 		return -EOPNOTSUPP;
1121 
1122 	mutex_lock(&adev->pm.mutex);
1123 	ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1124 					       buf);
1125 	mutex_unlock(&adev->pm.mutex);
1126 
1127 	return ret;
1128 }
1129 
1130 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1131 				      long *input, uint32_t size)
1132 {
1133 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1134 	int ret = 0;
1135 
1136 	if (!pp_funcs->set_power_profile_mode)
1137 		return 0;
1138 
1139 	mutex_lock(&adev->pm.mutex);
1140 	ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1141 					       input,
1142 					       size);
1143 	mutex_unlock(&adev->pm.mutex);
1144 
1145 	return ret;
1146 }
1147 
1148 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1149 {
1150 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1151 	int ret = 0;
1152 
1153 	if (!pp_funcs->get_gpu_metrics)
1154 		return 0;
1155 
1156 	mutex_lock(&adev->pm.mutex);
1157 	ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1158 					table);
1159 	mutex_unlock(&adev->pm.mutex);
1160 
1161 	return ret;
1162 }
1163 
1164 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1165 				    uint32_t *fan_mode)
1166 {
1167 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1168 	int ret = 0;
1169 
1170 	if (!pp_funcs->get_fan_control_mode)
1171 		return -EOPNOTSUPP;
1172 
1173 	mutex_lock(&adev->pm.mutex);
1174 	ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1175 					     fan_mode);
1176 	mutex_unlock(&adev->pm.mutex);
1177 
1178 	return ret;
1179 }
1180 
1181 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1182 				 uint32_t speed)
1183 {
1184 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1185 	int ret = 0;
1186 
1187 	if (!pp_funcs->set_fan_speed_pwm)
1188 		return -EOPNOTSUPP;
1189 
1190 	mutex_lock(&adev->pm.mutex);
1191 	ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1192 					  speed);
1193 	mutex_unlock(&adev->pm.mutex);
1194 
1195 	return ret;
1196 }
1197 
1198 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1199 				 uint32_t *speed)
1200 {
1201 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1202 	int ret = 0;
1203 
1204 	if (!pp_funcs->get_fan_speed_pwm)
1205 		return -EOPNOTSUPP;
1206 
1207 	mutex_lock(&adev->pm.mutex);
1208 	ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1209 					  speed);
1210 	mutex_unlock(&adev->pm.mutex);
1211 
1212 	return ret;
1213 }
1214 
1215 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1216 				 uint32_t *speed)
1217 {
1218 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1219 	int ret = 0;
1220 
1221 	if (!pp_funcs->get_fan_speed_rpm)
1222 		return -EOPNOTSUPP;
1223 
1224 	mutex_lock(&adev->pm.mutex);
1225 	ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1226 					  speed);
1227 	mutex_unlock(&adev->pm.mutex);
1228 
1229 	return ret;
1230 }
1231 
1232 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1233 				 uint32_t speed)
1234 {
1235 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1236 	int ret = 0;
1237 
1238 	if (!pp_funcs->set_fan_speed_rpm)
1239 		return -EOPNOTSUPP;
1240 
1241 	mutex_lock(&adev->pm.mutex);
1242 	ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1243 					  speed);
1244 	mutex_unlock(&adev->pm.mutex);
1245 
1246 	return ret;
1247 }
1248 
1249 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1250 				    uint32_t mode)
1251 {
1252 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1253 	int ret = 0;
1254 
1255 	if (!pp_funcs->set_fan_control_mode)
1256 		return -EOPNOTSUPP;
1257 
1258 	mutex_lock(&adev->pm.mutex);
1259 	ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1260 					     mode);
1261 	mutex_unlock(&adev->pm.mutex);
1262 
1263 	return ret;
1264 }
1265 
1266 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1267 			       uint32_t *limit,
1268 			       enum pp_power_limit_level pp_limit_level,
1269 			       enum pp_power_type power_type)
1270 {
1271 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1272 	int ret = 0;
1273 
1274 	if (!pp_funcs->get_power_limit)
1275 		return -ENODATA;
1276 
1277 	mutex_lock(&adev->pm.mutex);
1278 	ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1279 					limit,
1280 					pp_limit_level,
1281 					power_type);
1282 	mutex_unlock(&adev->pm.mutex);
1283 
1284 	return ret;
1285 }
1286 
1287 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1288 			       uint32_t limit)
1289 {
1290 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1291 	int ret = 0;
1292 
1293 	if (!pp_funcs->set_power_limit)
1294 		return -EINVAL;
1295 
1296 	mutex_lock(&adev->pm.mutex);
1297 	ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1298 					limit);
1299 	mutex_unlock(&adev->pm.mutex);
1300 
1301 	return ret;
1302 }
1303 
1304 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1305 {
1306 	bool cclk_dpm_supported = false;
1307 
1308 	if (!is_support_sw_smu(adev))
1309 		return false;
1310 
1311 	mutex_lock(&adev->pm.mutex);
1312 	cclk_dpm_supported = is_support_cclk_dpm(adev);
1313 	mutex_unlock(&adev->pm.mutex);
1314 
1315 	return (int)cclk_dpm_supported;
1316 }
1317 
1318 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1319 						       struct seq_file *m)
1320 {
1321 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1322 
1323 	if (!pp_funcs->debugfs_print_current_performance_level)
1324 		return -EOPNOTSUPP;
1325 
1326 	mutex_lock(&adev->pm.mutex);
1327 	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1328 							  m);
1329 	mutex_unlock(&adev->pm.mutex);
1330 
1331 	return 0;
1332 }
1333 
1334 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1335 				       void **addr,
1336 				       size_t *size)
1337 {
1338 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1339 	int ret = 0;
1340 
1341 	if (!pp_funcs->get_smu_prv_buf_details)
1342 		return -ENOSYS;
1343 
1344 	mutex_lock(&adev->pm.mutex);
1345 	ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1346 						addr,
1347 						size);
1348 	mutex_unlock(&adev->pm.mutex);
1349 
1350 	return ret;
1351 }
1352 
1353 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1354 {
1355 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1356 	struct smu_context *smu = adev->powerplay.pp_handle;
1357 
1358 	if ((is_support_sw_smu(adev) && smu->od_enabled) ||
1359 	    (is_support_sw_smu(adev) && smu->is_apu) ||
1360 		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
1361 		return true;
1362 
1363 	return false;
1364 }
1365 
1366 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1367 			    const char *buf,
1368 			    size_t size)
1369 {
1370 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1371 	int ret = 0;
1372 
1373 	if (!pp_funcs->set_pp_table)
1374 		return -EOPNOTSUPP;
1375 
1376 	mutex_lock(&adev->pm.mutex);
1377 	ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1378 				     buf,
1379 				     size);
1380 	mutex_unlock(&adev->pm.mutex);
1381 
1382 	return ret;
1383 }
1384 
1385 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1386 {
1387 	struct smu_context *smu = adev->powerplay.pp_handle;
1388 
1389 	if (!is_support_sw_smu(adev))
1390 		return INT_MAX;
1391 
1392 	return smu->cpu_core_num;
1393 }
1394 
1395 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1396 {
1397 	if (!is_support_sw_smu(adev))
1398 		return;
1399 
1400 	amdgpu_smu_stb_debug_fs_init(adev);
1401 }
1402 
1403 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1404 					    const struct amd_pp_display_configuration *input)
1405 {
1406 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1407 	int ret = 0;
1408 
1409 	if (!pp_funcs->display_configuration_change)
1410 		return 0;
1411 
1412 	mutex_lock(&adev->pm.mutex);
1413 	ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1414 						     input);
1415 	mutex_unlock(&adev->pm.mutex);
1416 
1417 	return ret;
1418 }
1419 
1420 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1421 				 enum amd_pp_clock_type type,
1422 				 struct amd_pp_clocks *clocks)
1423 {
1424 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1425 	int ret = 0;
1426 
1427 	if (!pp_funcs->get_clock_by_type)
1428 		return 0;
1429 
1430 	mutex_lock(&adev->pm.mutex);
1431 	ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1432 					  type,
1433 					  clocks);
1434 	mutex_unlock(&adev->pm.mutex);
1435 
1436 	return ret;
1437 }
1438 
1439 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1440 						struct amd_pp_simple_clock_info *clocks)
1441 {
1442 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1443 	int ret = 0;
1444 
1445 	if (!pp_funcs->get_display_mode_validation_clocks)
1446 		return 0;
1447 
1448 	mutex_lock(&adev->pm.mutex);
1449 	ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1450 							   clocks);
1451 	mutex_unlock(&adev->pm.mutex);
1452 
1453 	return ret;
1454 }
1455 
1456 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1457 					      enum amd_pp_clock_type type,
1458 					      struct pp_clock_levels_with_latency *clocks)
1459 {
1460 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1461 	int ret = 0;
1462 
1463 	if (!pp_funcs->get_clock_by_type_with_latency)
1464 		return 0;
1465 
1466 	mutex_lock(&adev->pm.mutex);
1467 	ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1468 						       type,
1469 						       clocks);
1470 	mutex_unlock(&adev->pm.mutex);
1471 
1472 	return ret;
1473 }
1474 
1475 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1476 					      enum amd_pp_clock_type type,
1477 					      struct pp_clock_levels_with_voltage *clocks)
1478 {
1479 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1480 	int ret = 0;
1481 
1482 	if (!pp_funcs->get_clock_by_type_with_voltage)
1483 		return 0;
1484 
1485 	mutex_lock(&adev->pm.mutex);
1486 	ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1487 						       type,
1488 						       clocks);
1489 	mutex_unlock(&adev->pm.mutex);
1490 
1491 	return ret;
1492 }
1493 
1494 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1495 					       void *clock_ranges)
1496 {
1497 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1498 	int ret = 0;
1499 
1500 	if (!pp_funcs->set_watermarks_for_clocks_ranges)
1501 		return -EOPNOTSUPP;
1502 
1503 	mutex_lock(&adev->pm.mutex);
1504 	ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1505 							 clock_ranges);
1506 	mutex_unlock(&adev->pm.mutex);
1507 
1508 	return ret;
1509 }
1510 
1511 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1512 					     struct pp_display_clock_request *clock)
1513 {
1514 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1515 	int ret = 0;
1516 
1517 	if (!pp_funcs->display_clock_voltage_request)
1518 		return -EOPNOTSUPP;
1519 
1520 	mutex_lock(&adev->pm.mutex);
1521 	ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1522 						      clock);
1523 	mutex_unlock(&adev->pm.mutex);
1524 
1525 	return ret;
1526 }
1527 
1528 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1529 				  struct amd_pp_clock_info *clocks)
1530 {
1531 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1532 	int ret = 0;
1533 
1534 	if (!pp_funcs->get_current_clocks)
1535 		return -EOPNOTSUPP;
1536 
1537 	mutex_lock(&adev->pm.mutex);
1538 	ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1539 					   clocks);
1540 	mutex_unlock(&adev->pm.mutex);
1541 
1542 	return ret;
1543 }
1544 
1545 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1546 {
1547 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1548 
1549 	if (!pp_funcs->notify_smu_enable_pwe)
1550 		return;
1551 
1552 	mutex_lock(&adev->pm.mutex);
1553 	pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1554 	mutex_unlock(&adev->pm.mutex);
1555 }
1556 
1557 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1558 					uint32_t count)
1559 {
1560 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1561 	int ret = 0;
1562 
1563 	if (!pp_funcs->set_active_display_count)
1564 		return -EOPNOTSUPP;
1565 
1566 	mutex_lock(&adev->pm.mutex);
1567 	ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1568 						 count);
1569 	mutex_unlock(&adev->pm.mutex);
1570 
1571 	return ret;
1572 }
1573 
1574 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1575 					  uint32_t clock)
1576 {
1577 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1578 	int ret = 0;
1579 
1580 	if (!pp_funcs->set_min_deep_sleep_dcefclk)
1581 		return -EOPNOTSUPP;
1582 
1583 	mutex_lock(&adev->pm.mutex);
1584 	ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1585 						   clock);
1586 	mutex_unlock(&adev->pm.mutex);
1587 
1588 	return ret;
1589 }
1590 
1591 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1592 					     uint32_t clock)
1593 {
1594 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1595 
1596 	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1597 		return;
1598 
1599 	mutex_lock(&adev->pm.mutex);
1600 	pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1601 					       clock);
1602 	mutex_unlock(&adev->pm.mutex);
1603 }
1604 
1605 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1606 					  uint32_t clock)
1607 {
1608 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1609 
1610 	if (!pp_funcs->set_hard_min_fclk_by_freq)
1611 		return;
1612 
1613 	mutex_lock(&adev->pm.mutex);
1614 	pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1615 					    clock);
1616 	mutex_unlock(&adev->pm.mutex);
1617 }
1618 
1619 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1620 						   bool disable_memory_clock_switch)
1621 {
1622 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1623 	int ret = 0;
1624 
1625 	if (!pp_funcs->display_disable_memory_clock_switch)
1626 		return 0;
1627 
1628 	mutex_lock(&adev->pm.mutex);
1629 	ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1630 							    disable_memory_clock_switch);
1631 	mutex_unlock(&adev->pm.mutex);
1632 
1633 	return ret;
1634 }
1635 
1636 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1637 						struct pp_smu_nv_clock_table *max_clocks)
1638 {
1639 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1640 	int ret = 0;
1641 
1642 	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1643 		return -EOPNOTSUPP;
1644 
1645 	mutex_lock(&adev->pm.mutex);
1646 	ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1647 							 max_clocks);
1648 	mutex_unlock(&adev->pm.mutex);
1649 
1650 	return ret;
1651 }
1652 
1653 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1654 						  unsigned int *clock_values_in_khz,
1655 						  unsigned int *num_states)
1656 {
1657 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1658 	int ret = 0;
1659 
1660 	if (!pp_funcs->get_uclk_dpm_states)
1661 		return -EOPNOTSUPP;
1662 
1663 	mutex_lock(&adev->pm.mutex);
1664 	ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1665 					    clock_values_in_khz,
1666 					    num_states);
1667 	mutex_unlock(&adev->pm.mutex);
1668 
1669 	return ret;
1670 }
1671 
1672 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1673 				   struct dpm_clocks *clock_table)
1674 {
1675 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1676 	int ret = 0;
1677 
1678 	if (!pp_funcs->get_dpm_clock_table)
1679 		return -EOPNOTSUPP;
1680 
1681 	mutex_lock(&adev->pm.mutex);
1682 	ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1683 					    clock_table);
1684 	mutex_unlock(&adev->pm.mutex);
1685 
1686 	return ret;
1687 }
1688