xref: /openbmc/linux/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c (revision d699090510c3223641a23834b4710e2d4309a6ad)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include <linux/reboot.h>
30 #include "amd_shared.h"
31 #include "amd_powerplay.h"
32 #include "power_state.h"
33 #include "amdgpu.h"
34 #include "hwmgr.h"
35 #include "amdgpu_dpm_internal.h"
36 #include "amdgpu_display.h"
37 
38 static const struct amd_pm_funcs pp_dpm_funcs;
39 
amd_powerplay_create(struct amdgpu_device * adev)40 static int amd_powerplay_create(struct amdgpu_device *adev)
41 {
42 	struct pp_hwmgr *hwmgr;
43 
44 	if (adev == NULL)
45 		return -EINVAL;
46 
47 	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
48 	if (hwmgr == NULL)
49 		return -ENOMEM;
50 
51 	hwmgr->adev = adev;
52 	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
53 	hwmgr->device = amdgpu_cgs_create_device(adev);
54 	if (!hwmgr->device) {
55 		kfree(hwmgr);
56 		return -ENOMEM;
57 	}
58 
59 	mutex_init(&hwmgr->msg_lock);
60 	hwmgr->chip_family = adev->family;
61 	hwmgr->chip_id = adev->asic_type;
62 	hwmgr->feature_mask = adev->pm.pp_feature;
63 	hwmgr->display_config = &adev->pm.pm_display_cfg;
64 	adev->powerplay.pp_handle = hwmgr;
65 	adev->powerplay.pp_funcs = &pp_dpm_funcs;
66 	return 0;
67 }
68 
69 
amd_powerplay_destroy(struct amdgpu_device * adev)70 static void amd_powerplay_destroy(struct amdgpu_device *adev)
71 {
72 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
73 
74 	mutex_destroy(&hwmgr->msg_lock);
75 
76 	kfree(hwmgr->hardcode_pp_table);
77 	hwmgr->hardcode_pp_table = NULL;
78 
79 	kfree(hwmgr);
80 	hwmgr = NULL;
81 }
82 
pp_early_init(void * handle)83 static int pp_early_init(void *handle)
84 {
85 	int ret;
86 	struct amdgpu_device *adev = handle;
87 
88 	ret = amd_powerplay_create(adev);
89 
90 	if (ret != 0)
91 		return ret;
92 
93 	ret = hwmgr_early_init(adev->powerplay.pp_handle);
94 	if (ret)
95 		return -EINVAL;
96 
97 	return 0;
98 }
99 
pp_swctf_delayed_work_handler(struct work_struct * work)100 static void pp_swctf_delayed_work_handler(struct work_struct *work)
101 {
102 	struct pp_hwmgr *hwmgr =
103 		container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
104 	struct amdgpu_device *adev = hwmgr->adev;
105 	struct amdgpu_dpm_thermal *range =
106 				&adev->pm.dpm.thermal;
107 	uint32_t gpu_temperature, size = sizeof(gpu_temperature);
108 	int ret;
109 
110 	/*
111 	 * If the hotspot/edge temperature is confirmed as below SW CTF setting point
112 	 * after the delay enforced, nothing will be done.
113 	 * Otherwise, a graceful shutdown will be performed to prevent further damage.
114 	 */
115 	if (range->sw_ctf_threshold &&
116 	    hwmgr->hwmgr_func->read_sensor) {
117 		ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
118 						     AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
119 						     &gpu_temperature,
120 						     &size);
121 		/*
122 		 * For some legacy ASICs, hotspot temperature retrieving might be not
123 		 * supported. Check the edge temperature instead then.
124 		 */
125 		if (ret == -EOPNOTSUPP)
126 			ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
127 							     AMDGPU_PP_SENSOR_EDGE_TEMP,
128 							     &gpu_temperature,
129 							     &size);
130 		if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
131 			return;
132 	}
133 
134 	dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
135 	dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
136 	orderly_poweroff(true);
137 }
138 
pp_sw_init(void * handle)139 static int pp_sw_init(void *handle)
140 {
141 	struct amdgpu_device *adev = handle;
142 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
143 	int ret = 0;
144 
145 	ret = hwmgr_sw_init(hwmgr);
146 
147 	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
148 
149 	if (!ret)
150 		INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work,
151 				  pp_swctf_delayed_work_handler);
152 
153 	return ret;
154 }
155 
pp_sw_fini(void * handle)156 static int pp_sw_fini(void *handle)
157 {
158 	struct amdgpu_device *adev = handle;
159 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
160 
161 	hwmgr_sw_fini(hwmgr);
162 
163 	amdgpu_ucode_release(&adev->pm.fw);
164 
165 	return 0;
166 }
167 
pp_hw_init(void * handle)168 static int pp_hw_init(void *handle)
169 {
170 	int ret = 0;
171 	struct amdgpu_device *adev = handle;
172 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
173 
174 	ret = hwmgr_hw_init(hwmgr);
175 
176 	if (ret)
177 		pr_err("powerplay hw init failed\n");
178 
179 	return ret;
180 }
181 
pp_hw_fini(void * handle)182 static int pp_hw_fini(void *handle)
183 {
184 	struct amdgpu_device *adev = handle;
185 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
186 
187 	cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
188 
189 	hwmgr_hw_fini(hwmgr);
190 
191 	return 0;
192 }
193 
pp_reserve_vram_for_smu(struct amdgpu_device * adev)194 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
195 {
196 	int r = -EINVAL;
197 	void *cpu_ptr = NULL;
198 	uint64_t gpu_addr;
199 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
200 
201 	if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
202 						PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
203 						&adev->pm.smu_prv_buffer,
204 						&gpu_addr,
205 						&cpu_ptr)) {
206 		DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
207 		return;
208 	}
209 
210 	if (hwmgr->hwmgr_func->notify_cac_buffer_info)
211 		r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
212 					lower_32_bits((unsigned long)cpu_ptr),
213 					upper_32_bits((unsigned long)cpu_ptr),
214 					lower_32_bits(gpu_addr),
215 					upper_32_bits(gpu_addr),
216 					adev->pm.smu_prv_buffer_size);
217 
218 	if (r) {
219 		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
220 		adev->pm.smu_prv_buffer = NULL;
221 		DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
222 	}
223 }
224 
pp_late_init(void * handle)225 static int pp_late_init(void *handle)
226 {
227 	struct amdgpu_device *adev = handle;
228 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
229 
230 	if (hwmgr && hwmgr->pm_en)
231 		hwmgr_handle_task(hwmgr,
232 					AMD_PP_TASK_COMPLETE_INIT, NULL);
233 	if (adev->pm.smu_prv_buffer_size != 0)
234 		pp_reserve_vram_for_smu(adev);
235 
236 	return 0;
237 }
238 
pp_late_fini(void * handle)239 static void pp_late_fini(void *handle)
240 {
241 	struct amdgpu_device *adev = handle;
242 
243 	if (adev->pm.smu_prv_buffer)
244 		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
245 	amd_powerplay_destroy(adev);
246 }
247 
248 
pp_is_idle(void * handle)249 static bool pp_is_idle(void *handle)
250 {
251 	return false;
252 }
253 
pp_wait_for_idle(void * handle)254 static int pp_wait_for_idle(void *handle)
255 {
256 	return 0;
257 }
258 
pp_sw_reset(void * handle)259 static int pp_sw_reset(void *handle)
260 {
261 	return 0;
262 }
263 
pp_set_powergating_state(void * handle,enum amd_powergating_state state)264 static int pp_set_powergating_state(void *handle,
265 				    enum amd_powergating_state state)
266 {
267 	return 0;
268 }
269 
pp_suspend(void * handle)270 static int pp_suspend(void *handle)
271 {
272 	struct amdgpu_device *adev = handle;
273 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
274 
275 	cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
276 
277 	return hwmgr_suspend(hwmgr);
278 }
279 
pp_resume(void * handle)280 static int pp_resume(void *handle)
281 {
282 	struct amdgpu_device *adev = handle;
283 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
284 
285 	return hwmgr_resume(hwmgr);
286 }
287 
pp_set_clockgating_state(void * handle,enum amd_clockgating_state state)288 static int pp_set_clockgating_state(void *handle,
289 					  enum amd_clockgating_state state)
290 {
291 	return 0;
292 }
293 
294 static const struct amd_ip_funcs pp_ip_funcs = {
295 	.name = "powerplay",
296 	.early_init = pp_early_init,
297 	.late_init = pp_late_init,
298 	.sw_init = pp_sw_init,
299 	.sw_fini = pp_sw_fini,
300 	.hw_init = pp_hw_init,
301 	.hw_fini = pp_hw_fini,
302 	.late_fini = pp_late_fini,
303 	.suspend = pp_suspend,
304 	.resume = pp_resume,
305 	.is_idle = pp_is_idle,
306 	.wait_for_idle = pp_wait_for_idle,
307 	.soft_reset = pp_sw_reset,
308 	.set_clockgating_state = pp_set_clockgating_state,
309 	.set_powergating_state = pp_set_powergating_state,
310 };
311 
312 const struct amdgpu_ip_block_version pp_smu_ip_block =
313 {
314 	.type = AMD_IP_BLOCK_TYPE_SMC,
315 	.major = 1,
316 	.minor = 0,
317 	.rev = 0,
318 	.funcs = &pp_ip_funcs,
319 };
320 
321 /* This interface only be supported On Vi,
322  * because only smu7/8 can help to load gfx/sdma fw,
323  * smu need to be enabled before load other ip's fw.
324  * so call start smu to load smu7 fw and other ip's fw
325  */
pp_dpm_load_fw(void * handle)326 static int pp_dpm_load_fw(void *handle)
327 {
328 	struct pp_hwmgr *hwmgr = handle;
329 
330 	if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
331 		return -EINVAL;
332 
333 	if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
334 		pr_err("fw load failed\n");
335 		return -EINVAL;
336 	}
337 
338 	return 0;
339 }
340 
pp_dpm_fw_loading_complete(void * handle)341 static int pp_dpm_fw_loading_complete(void *handle)
342 {
343 	return 0;
344 }
345 
pp_set_clockgating_by_smu(void * handle,uint32_t msg_id)346 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
347 {
348 	struct pp_hwmgr *hwmgr = handle;
349 
350 	if (!hwmgr || !hwmgr->pm_en)
351 		return -EINVAL;
352 
353 	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
354 		pr_info_ratelimited("%s was not implemented.\n", __func__);
355 		return 0;
356 	}
357 
358 	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
359 }
360 
pp_dpm_en_umd_pstate(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level * level)361 static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
362 						enum amd_dpm_forced_level *level)
363 {
364 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
365 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
366 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
367 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
368 
369 	if (!(hwmgr->dpm_level & profile_mode_mask)) {
370 		/* enter umd pstate, save current level, disable gfx cg*/
371 		if (*level & profile_mode_mask) {
372 			hwmgr->saved_dpm_level = hwmgr->dpm_level;
373 			hwmgr->en_umd_pstate = true;
374 		}
375 	} else {
376 		/* exit umd pstate, restore level, enable gfx cg*/
377 		if (!(*level & profile_mode_mask)) {
378 			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
379 				*level = hwmgr->saved_dpm_level;
380 			hwmgr->en_umd_pstate = false;
381 		}
382 	}
383 }
384 
pp_dpm_force_performance_level(void * handle,enum amd_dpm_forced_level level)385 static int pp_dpm_force_performance_level(void *handle,
386 					enum amd_dpm_forced_level level)
387 {
388 	struct pp_hwmgr *hwmgr = handle;
389 
390 	if (!hwmgr || !hwmgr->pm_en)
391 		return -EINVAL;
392 
393 	if (level == hwmgr->dpm_level)
394 		return 0;
395 
396 	pp_dpm_en_umd_pstate(hwmgr, &level);
397 	hwmgr->request_dpm_level = level;
398 	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
399 
400 	return 0;
401 }
402 
pp_dpm_get_performance_level(void * handle)403 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
404 								void *handle)
405 {
406 	struct pp_hwmgr *hwmgr = handle;
407 
408 	if (!hwmgr || !hwmgr->pm_en)
409 		return -EINVAL;
410 
411 	return hwmgr->dpm_level;
412 }
413 
pp_dpm_get_sclk(void * handle,bool low)414 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
415 {
416 	struct pp_hwmgr *hwmgr = handle;
417 
418 	if (!hwmgr || !hwmgr->pm_en)
419 		return 0;
420 
421 	if (hwmgr->hwmgr_func->get_sclk == NULL) {
422 		pr_info_ratelimited("%s was not implemented.\n", __func__);
423 		return 0;
424 	}
425 	return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
426 }
427 
pp_dpm_get_mclk(void * handle,bool low)428 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
429 {
430 	struct pp_hwmgr *hwmgr = handle;
431 
432 	if (!hwmgr || !hwmgr->pm_en)
433 		return 0;
434 
435 	if (hwmgr->hwmgr_func->get_mclk == NULL) {
436 		pr_info_ratelimited("%s was not implemented.\n", __func__);
437 		return 0;
438 	}
439 	return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
440 }
441 
pp_dpm_powergate_vce(void * handle,bool gate)442 static void pp_dpm_powergate_vce(void *handle, bool gate)
443 {
444 	struct pp_hwmgr *hwmgr = handle;
445 
446 	if (!hwmgr || !hwmgr->pm_en)
447 		return;
448 
449 	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
450 		pr_info_ratelimited("%s was not implemented.\n", __func__);
451 		return;
452 	}
453 	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
454 }
455 
pp_dpm_powergate_uvd(void * handle,bool gate)456 static void pp_dpm_powergate_uvd(void *handle, bool gate)
457 {
458 	struct pp_hwmgr *hwmgr = handle;
459 
460 	if (!hwmgr || !hwmgr->pm_en)
461 		return;
462 
463 	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
464 		pr_info_ratelimited("%s was not implemented.\n", __func__);
465 		return;
466 	}
467 	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
468 }
469 
pp_dpm_dispatch_tasks(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)470 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
471 		enum amd_pm_state_type *user_state)
472 {
473 	struct pp_hwmgr *hwmgr = handle;
474 
475 	if (!hwmgr || !hwmgr->pm_en)
476 		return -EINVAL;
477 
478 	return hwmgr_handle_task(hwmgr, task_id, user_state);
479 }
480 
pp_dpm_get_current_power_state(void * handle)481 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
482 {
483 	struct pp_hwmgr *hwmgr = handle;
484 	struct pp_power_state *state;
485 	enum amd_pm_state_type pm_type;
486 
487 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
488 		return -EINVAL;
489 
490 	state = hwmgr->current_ps;
491 
492 	switch (state->classification.ui_label) {
493 	case PP_StateUILabel_Battery:
494 		pm_type = POWER_STATE_TYPE_BATTERY;
495 		break;
496 	case PP_StateUILabel_Balanced:
497 		pm_type = POWER_STATE_TYPE_BALANCED;
498 		break;
499 	case PP_StateUILabel_Performance:
500 		pm_type = POWER_STATE_TYPE_PERFORMANCE;
501 		break;
502 	default:
503 		if (state->classification.flags & PP_StateClassificationFlag_Boot)
504 			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
505 		else
506 			pm_type = POWER_STATE_TYPE_DEFAULT;
507 		break;
508 	}
509 
510 	return pm_type;
511 }
512 
pp_dpm_set_fan_control_mode(void * handle,uint32_t mode)513 static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
514 {
515 	struct pp_hwmgr *hwmgr = handle;
516 
517 	if (!hwmgr || !hwmgr->pm_en)
518 		return -EOPNOTSUPP;
519 
520 	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
521 		return -EOPNOTSUPP;
522 
523 	if (mode == U32_MAX)
524 		return -EINVAL;
525 
526 	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
527 
528 	return 0;
529 }
530 
pp_dpm_get_fan_control_mode(void * handle,uint32_t * fan_mode)531 static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
532 {
533 	struct pp_hwmgr *hwmgr = handle;
534 
535 	if (!hwmgr || !hwmgr->pm_en)
536 		return -EOPNOTSUPP;
537 
538 	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
539 		return -EOPNOTSUPP;
540 
541 	if (!fan_mode)
542 		return -EINVAL;
543 
544 	*fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
545 	return 0;
546 }
547 
pp_dpm_set_fan_speed_pwm(void * handle,uint32_t speed)548 static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
549 {
550 	struct pp_hwmgr *hwmgr = handle;
551 
552 	if (!hwmgr || !hwmgr->pm_en)
553 		return -EOPNOTSUPP;
554 
555 	if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
556 		return -EOPNOTSUPP;
557 
558 	if (speed == U32_MAX)
559 		return -EINVAL;
560 
561 	return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
562 }
563 
pp_dpm_get_fan_speed_pwm(void * handle,uint32_t * speed)564 static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
565 {
566 	struct pp_hwmgr *hwmgr = handle;
567 
568 	if (!hwmgr || !hwmgr->pm_en)
569 		return -EOPNOTSUPP;
570 
571 	if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
572 		return -EOPNOTSUPP;
573 
574 	if (!speed)
575 		return -EINVAL;
576 
577 	return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
578 }
579 
pp_dpm_get_fan_speed_rpm(void * handle,uint32_t * rpm)580 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
581 {
582 	struct pp_hwmgr *hwmgr = handle;
583 
584 	if (!hwmgr || !hwmgr->pm_en)
585 		return -EOPNOTSUPP;
586 
587 	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
588 		return -EOPNOTSUPP;
589 
590 	if (!rpm)
591 		return -EINVAL;
592 
593 	return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
594 }
595 
pp_dpm_set_fan_speed_rpm(void * handle,uint32_t rpm)596 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
597 {
598 	struct pp_hwmgr *hwmgr = handle;
599 
600 	if (!hwmgr || !hwmgr->pm_en)
601 		return -EOPNOTSUPP;
602 
603 	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
604 		return -EOPNOTSUPP;
605 
606 	if (rpm == U32_MAX)
607 		return -EINVAL;
608 
609 	return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
610 }
611 
pp_dpm_get_pp_num_states(void * handle,struct pp_states_info * data)612 static int pp_dpm_get_pp_num_states(void *handle,
613 		struct pp_states_info *data)
614 {
615 	struct pp_hwmgr *hwmgr = handle;
616 	int i;
617 
618 	memset(data, 0, sizeof(*data));
619 
620 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->ps)
621 		return -EINVAL;
622 
623 	data->nums = hwmgr->num_ps;
624 
625 	for (i = 0; i < hwmgr->num_ps; i++) {
626 		struct pp_power_state *state = (struct pp_power_state *)
627 				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
628 		switch (state->classification.ui_label) {
629 		case PP_StateUILabel_Battery:
630 			data->states[i] = POWER_STATE_TYPE_BATTERY;
631 			break;
632 		case PP_StateUILabel_Balanced:
633 			data->states[i] = POWER_STATE_TYPE_BALANCED;
634 			break;
635 		case PP_StateUILabel_Performance:
636 			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
637 			break;
638 		default:
639 			if (state->classification.flags & PP_StateClassificationFlag_Boot)
640 				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
641 			else
642 				data->states[i] = POWER_STATE_TYPE_DEFAULT;
643 		}
644 	}
645 	return 0;
646 }
647 
pp_dpm_get_pp_table(void * handle,char ** table)648 static int pp_dpm_get_pp_table(void *handle, char **table)
649 {
650 	struct pp_hwmgr *hwmgr = handle;
651 
652 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table)
653 		return -EINVAL;
654 
655 	*table = (char *)hwmgr->soft_pp_table;
656 	return hwmgr->soft_pp_table_size;
657 }
658 
amd_powerplay_reset(void * handle)659 static int amd_powerplay_reset(void *handle)
660 {
661 	struct pp_hwmgr *hwmgr = handle;
662 	int ret;
663 
664 	ret = hwmgr_hw_fini(hwmgr);
665 	if (ret)
666 		return ret;
667 
668 	ret = hwmgr_hw_init(hwmgr);
669 	if (ret)
670 		return ret;
671 
672 	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
673 }
674 
pp_dpm_set_pp_table(void * handle,const char * buf,size_t size)675 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
676 {
677 	struct pp_hwmgr *hwmgr = handle;
678 	int ret = -ENOMEM;
679 
680 	if (!hwmgr || !hwmgr->pm_en)
681 		return -EINVAL;
682 
683 	if (!hwmgr->hardcode_pp_table) {
684 		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
685 						   hwmgr->soft_pp_table_size,
686 						   GFP_KERNEL);
687 		if (!hwmgr->hardcode_pp_table)
688 			return ret;
689 	}
690 
691 	memcpy(hwmgr->hardcode_pp_table, buf, size);
692 
693 	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
694 
695 	ret = amd_powerplay_reset(handle);
696 	if (ret)
697 		return ret;
698 
699 	if (hwmgr->hwmgr_func->avfs_control)
700 		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
701 
702 	return ret;
703 }
704 
pp_dpm_force_clock_level(void * handle,enum pp_clock_type type,uint32_t mask)705 static int pp_dpm_force_clock_level(void *handle,
706 		enum pp_clock_type type, uint32_t mask)
707 {
708 	struct pp_hwmgr *hwmgr = handle;
709 
710 	if (!hwmgr || !hwmgr->pm_en)
711 		return -EINVAL;
712 
713 	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
714 		pr_info_ratelimited("%s was not implemented.\n", __func__);
715 		return 0;
716 	}
717 
718 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
719 		pr_debug("force clock level is for dpm manual mode only.\n");
720 		return -EINVAL;
721 	}
722 
723 	return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
724 }
725 
pp_dpm_emit_clock_levels(void * handle,enum pp_clock_type type,char * buf,int * offset)726 static int pp_dpm_emit_clock_levels(void *handle,
727 				    enum pp_clock_type type,
728 				    char *buf,
729 				    int *offset)
730 {
731 	struct pp_hwmgr *hwmgr = handle;
732 
733 	if (!hwmgr || !hwmgr->pm_en)
734 		return -EOPNOTSUPP;
735 
736 	if (!hwmgr->hwmgr_func->emit_clock_levels)
737 		return -ENOENT;
738 
739 	return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset);
740 }
741 
pp_dpm_print_clock_levels(void * handle,enum pp_clock_type type,char * buf)742 static int pp_dpm_print_clock_levels(void *handle,
743 		enum pp_clock_type type, char *buf)
744 {
745 	struct pp_hwmgr *hwmgr = handle;
746 
747 	if (!hwmgr || !hwmgr->pm_en)
748 		return -EINVAL;
749 
750 	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
751 		pr_info_ratelimited("%s was not implemented.\n", __func__);
752 		return 0;
753 	}
754 	return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
755 }
756 
pp_dpm_get_sclk_od(void * handle)757 static int pp_dpm_get_sclk_od(void *handle)
758 {
759 	struct pp_hwmgr *hwmgr = handle;
760 
761 	if (!hwmgr || !hwmgr->pm_en)
762 		return -EINVAL;
763 
764 	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
765 		pr_info_ratelimited("%s was not implemented.\n", __func__);
766 		return 0;
767 	}
768 	return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
769 }
770 
pp_dpm_set_sclk_od(void * handle,uint32_t value)771 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
772 {
773 	struct pp_hwmgr *hwmgr = handle;
774 
775 	if (!hwmgr || !hwmgr->pm_en)
776 		return -EINVAL;
777 
778 	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
779 		pr_info_ratelimited("%s was not implemented.\n", __func__);
780 		return 0;
781 	}
782 
783 	return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
784 }
785 
pp_dpm_get_mclk_od(void * handle)786 static int pp_dpm_get_mclk_od(void *handle)
787 {
788 	struct pp_hwmgr *hwmgr = handle;
789 
790 	if (!hwmgr || !hwmgr->pm_en)
791 		return -EINVAL;
792 
793 	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
794 		pr_info_ratelimited("%s was not implemented.\n", __func__);
795 		return 0;
796 	}
797 	return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
798 }
799 
pp_dpm_set_mclk_od(void * handle,uint32_t value)800 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
801 {
802 	struct pp_hwmgr *hwmgr = handle;
803 
804 	if (!hwmgr || !hwmgr->pm_en)
805 		return -EINVAL;
806 
807 	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
808 		pr_info_ratelimited("%s was not implemented.\n", __func__);
809 		return 0;
810 	}
811 	return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
812 }
813 
pp_dpm_read_sensor(void * handle,int idx,void * value,int * size)814 static int pp_dpm_read_sensor(void *handle, int idx,
815 			      void *value, int *size)
816 {
817 	struct pp_hwmgr *hwmgr = handle;
818 
819 	if (!hwmgr || !hwmgr->pm_en || !value)
820 		return -EINVAL;
821 
822 	switch (idx) {
823 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
824 		*((uint32_t *)value) = hwmgr->pstate_sclk * 100;
825 		return 0;
826 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
827 		*((uint32_t *)value) = hwmgr->pstate_mclk * 100;
828 		return 0;
829 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
830 		*((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
831 		return 0;
832 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
833 		*((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
834 		return 0;
835 	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
836 		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
837 		return 0;
838 	case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
839 		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
840 		return 0;
841 	default:
842 		return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
843 	}
844 }
845 
846 static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void * handle,unsigned idx)847 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
848 {
849 	struct pp_hwmgr *hwmgr = handle;
850 
851 	if (!hwmgr || !hwmgr->pm_en)
852 		return NULL;
853 
854 	if (idx < hwmgr->num_vce_state_tables)
855 		return &hwmgr->vce_states[idx];
856 	return NULL;
857 }
858 
pp_get_power_profile_mode(void * handle,char * buf)859 static int pp_get_power_profile_mode(void *handle, char *buf)
860 {
861 	struct pp_hwmgr *hwmgr = handle;
862 
863 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
864 		return -EOPNOTSUPP;
865 	if (!buf)
866 		return -EINVAL;
867 
868 	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
869 }
870 
pp_set_power_profile_mode(void * handle,long * input,uint32_t size)871 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
872 {
873 	struct pp_hwmgr *hwmgr = handle;
874 
875 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
876 		return -EOPNOTSUPP;
877 
878 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
879 		pr_debug("power profile setting is for manual dpm mode only.\n");
880 		return -EINVAL;
881 	}
882 
883 	return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
884 }
885 
pp_set_fine_grain_clk_vol(void * handle,uint32_t type,long * input,uint32_t size)886 static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
887 {
888 	struct pp_hwmgr *hwmgr = handle;
889 
890 	if (!hwmgr || !hwmgr->pm_en)
891 		return -EINVAL;
892 
893 	if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
894 		return 0;
895 
896 	return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
897 }
898 
pp_odn_edit_dpm_table(void * handle,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)899 static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
900 				 long *input, uint32_t size)
901 {
902 	struct pp_hwmgr *hwmgr = handle;
903 
904 	if (!hwmgr || !hwmgr->pm_en)
905 		return -EINVAL;
906 
907 	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
908 		pr_info_ratelimited("%s was not implemented.\n", __func__);
909 		return 0;
910 	}
911 
912 	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
913 }
914 
pp_dpm_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)915 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
916 {
917 	struct pp_hwmgr *hwmgr = handle;
918 
919 	if (!hwmgr)
920 		return -EINVAL;
921 
922 	if (!hwmgr->pm_en)
923 		return 0;
924 
925 	if (hwmgr->hwmgr_func->set_mp1_state)
926 		return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
927 
928 	return 0;
929 }
930 
pp_dpm_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool en)931 static int pp_dpm_switch_power_profile(void *handle,
932 		enum PP_SMC_POWER_PROFILE type, bool en)
933 {
934 	struct pp_hwmgr *hwmgr = handle;
935 	long workload[1];
936 	uint32_t index;
937 
938 	if (!hwmgr || !hwmgr->pm_en)
939 		return -EINVAL;
940 
941 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
942 		pr_info_ratelimited("%s was not implemented.\n", __func__);
943 		return -EINVAL;
944 	}
945 
946 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
947 		return -EINVAL;
948 
949 	if (!en) {
950 		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
951 		index = fls(hwmgr->workload_mask);
952 		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
953 		workload[0] = hwmgr->workload_setting[index];
954 	} else {
955 		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
956 		index = fls(hwmgr->workload_mask);
957 		index = index <= Workload_Policy_Max ? index - 1 : 0;
958 		workload[0] = hwmgr->workload_setting[index];
959 	}
960 
961 	if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
962 		hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
963 			if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
964 				return -EINVAL;
965 	}
966 
967 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
968 		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
969 
970 	return 0;
971 }
972 
pp_set_power_limit(void * handle,uint32_t limit)973 static int pp_set_power_limit(void *handle, uint32_t limit)
974 {
975 	struct pp_hwmgr *hwmgr = handle;
976 	uint32_t max_power_limit;
977 
978 	if (!hwmgr || !hwmgr->pm_en)
979 		return -EINVAL;
980 
981 	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
982 		pr_info_ratelimited("%s was not implemented.\n", __func__);
983 		return -EINVAL;
984 	}
985 
986 	if (limit == 0)
987 		limit = hwmgr->default_power_limit;
988 
989 	max_power_limit = hwmgr->default_power_limit;
990 	if (hwmgr->od_enabled) {
991 		max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
992 		max_power_limit /= 100;
993 	}
994 
995 	if (limit > max_power_limit)
996 		return -EINVAL;
997 
998 	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
999 	hwmgr->power_limit = limit;
1000 	return 0;
1001 }
1002 
pp_get_power_limit(void * handle,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type power_type)1003 static int pp_get_power_limit(void *handle, uint32_t *limit,
1004 			      enum pp_power_limit_level pp_limit_level,
1005 			      enum pp_power_type power_type)
1006 {
1007 	struct pp_hwmgr *hwmgr = handle;
1008 	int ret = 0;
1009 
1010 	if (!hwmgr || !hwmgr->pm_en || !limit)
1011 		return -EINVAL;
1012 
1013 	if (power_type != PP_PWR_TYPE_SUSTAINED)
1014 		return -EOPNOTSUPP;
1015 
1016 	switch (pp_limit_level) {
1017 		case PP_PWR_LIMIT_CURRENT:
1018 			*limit = hwmgr->power_limit;
1019 			break;
1020 		case PP_PWR_LIMIT_DEFAULT:
1021 			*limit = hwmgr->default_power_limit;
1022 			break;
1023 		case PP_PWR_LIMIT_MAX:
1024 			*limit = hwmgr->default_power_limit;
1025 			if (hwmgr->od_enabled) {
1026 				*limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1027 				*limit /= 100;
1028 			}
1029 			break;
1030 		default:
1031 			ret = -EOPNOTSUPP;
1032 			break;
1033 	}
1034 
1035 	return ret;
1036 }
1037 
pp_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)1038 static int pp_display_configuration_change(void *handle,
1039 	const struct amd_pp_display_configuration *display_config)
1040 {
1041 	struct pp_hwmgr *hwmgr = handle;
1042 
1043 	if (!hwmgr || !hwmgr->pm_en)
1044 		return -EINVAL;
1045 
1046 	phm_store_dal_configuration_data(hwmgr, display_config);
1047 	return 0;
1048 }
1049 
pp_get_display_power_level(void * handle,struct amd_pp_simple_clock_info * output)1050 static int pp_get_display_power_level(void *handle,
1051 		struct amd_pp_simple_clock_info *output)
1052 {
1053 	struct pp_hwmgr *hwmgr = handle;
1054 
1055 	if (!hwmgr || !hwmgr->pm_en || !output)
1056 		return -EINVAL;
1057 
1058 	return phm_get_dal_power_level(hwmgr, output);
1059 }
1060 
pp_get_current_clocks(void * handle,struct amd_pp_clock_info * clocks)1061 static int pp_get_current_clocks(void *handle,
1062 		struct amd_pp_clock_info *clocks)
1063 {
1064 	struct amd_pp_simple_clock_info simple_clocks = { 0 };
1065 	struct pp_clock_info hw_clocks;
1066 	struct pp_hwmgr *hwmgr = handle;
1067 	int ret = 0;
1068 
1069 	if (!hwmgr || !hwmgr->pm_en)
1070 		return -EINVAL;
1071 
1072 	phm_get_dal_power_level(hwmgr, &simple_clocks);
1073 
1074 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1075 					PHM_PlatformCaps_PowerContainment))
1076 		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1077 					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1078 	else
1079 		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1080 					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1081 
1082 	if (ret) {
1083 		pr_debug("Error in phm_get_clock_info \n");
1084 		return -EINVAL;
1085 	}
1086 
1087 	clocks->min_engine_clock = hw_clocks.min_eng_clk;
1088 	clocks->max_engine_clock = hw_clocks.max_eng_clk;
1089 	clocks->min_memory_clock = hw_clocks.min_mem_clk;
1090 	clocks->max_memory_clock = hw_clocks.max_mem_clk;
1091 	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1092 	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1093 
1094 	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1095 	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1096 
1097 	if (simple_clocks.level == 0)
1098 		clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1099 	else
1100 		clocks->max_clocks_state = simple_clocks.level;
1101 
1102 	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1103 		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1104 		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1105 	}
1106 	return 0;
1107 }
1108 
pp_get_clock_by_type(void * handle,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)1109 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1110 {
1111 	struct pp_hwmgr *hwmgr = handle;
1112 
1113 	if (!hwmgr || !hwmgr->pm_en)
1114 		return -EINVAL;
1115 
1116 	if (clocks == NULL)
1117 		return -EINVAL;
1118 
1119 	return phm_get_clock_by_type(hwmgr, type, clocks);
1120 }
1121 
pp_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)1122 static int pp_get_clock_by_type_with_latency(void *handle,
1123 		enum amd_pp_clock_type type,
1124 		struct pp_clock_levels_with_latency *clocks)
1125 {
1126 	struct pp_hwmgr *hwmgr = handle;
1127 
1128 	if (!hwmgr || !hwmgr->pm_en || !clocks)
1129 		return -EINVAL;
1130 
1131 	return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1132 }
1133 
pp_get_clock_by_type_with_voltage(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1134 static int pp_get_clock_by_type_with_voltage(void *handle,
1135 		enum amd_pp_clock_type type,
1136 		struct pp_clock_levels_with_voltage *clocks)
1137 {
1138 	struct pp_hwmgr *hwmgr = handle;
1139 
1140 	if (!hwmgr || !hwmgr->pm_en || !clocks)
1141 		return -EINVAL;
1142 
1143 	return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1144 }
1145 
pp_set_watermarks_for_clocks_ranges(void * handle,void * clock_ranges)1146 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1147 		void *clock_ranges)
1148 {
1149 	struct pp_hwmgr *hwmgr = handle;
1150 
1151 	if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1152 		return -EINVAL;
1153 
1154 	return phm_set_watermarks_for_clocks_ranges(hwmgr,
1155 						    clock_ranges);
1156 }
1157 
pp_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock)1158 static int pp_display_clock_voltage_request(void *handle,
1159 		struct pp_display_clock_request *clock)
1160 {
1161 	struct pp_hwmgr *hwmgr = handle;
1162 
1163 	if (!hwmgr || !hwmgr->pm_en || !clock)
1164 		return -EINVAL;
1165 
1166 	return phm_display_clock_voltage_request(hwmgr, clock);
1167 }
1168 
pp_get_display_mode_validation_clocks(void * handle,struct amd_pp_simple_clock_info * clocks)1169 static int pp_get_display_mode_validation_clocks(void *handle,
1170 		struct amd_pp_simple_clock_info *clocks)
1171 {
1172 	struct pp_hwmgr *hwmgr = handle;
1173 	int ret = 0;
1174 
1175 	if (!hwmgr || !hwmgr->pm_en || !clocks)
1176 		return -EINVAL;
1177 
1178 	clocks->level = PP_DAL_POWERLEVEL_7;
1179 
1180 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1181 		ret = phm_get_max_high_clocks(hwmgr, clocks);
1182 
1183 	return ret;
1184 }
1185 
pp_dpm_powergate_mmhub(void * handle)1186 static int pp_dpm_powergate_mmhub(void *handle)
1187 {
1188 	struct pp_hwmgr *hwmgr = handle;
1189 
1190 	if (!hwmgr || !hwmgr->pm_en)
1191 		return -EINVAL;
1192 
1193 	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1194 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1195 		return 0;
1196 	}
1197 
1198 	return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1199 }
1200 
pp_dpm_powergate_gfx(void * handle,bool gate)1201 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1202 {
1203 	struct pp_hwmgr *hwmgr = handle;
1204 
1205 	if (!hwmgr || !hwmgr->pm_en)
1206 		return 0;
1207 
1208 	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1209 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1210 		return 0;
1211 	}
1212 
1213 	return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1214 }
1215 
pp_dpm_powergate_acp(void * handle,bool gate)1216 static void pp_dpm_powergate_acp(void *handle, bool gate)
1217 {
1218 	struct pp_hwmgr *hwmgr = handle;
1219 
1220 	if (!hwmgr || !hwmgr->pm_en)
1221 		return;
1222 
1223 	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1224 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1225 		return;
1226 	}
1227 
1228 	hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1229 }
1230 
pp_dpm_powergate_sdma(void * handle,bool gate)1231 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1232 {
1233 	struct pp_hwmgr *hwmgr = handle;
1234 
1235 	if (!hwmgr)
1236 		return;
1237 
1238 	if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1239 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1240 		return;
1241 	}
1242 
1243 	hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1244 }
1245 
pp_set_powergating_by_smu(void * handle,uint32_t block_type,bool gate)1246 static int pp_set_powergating_by_smu(void *handle,
1247 				uint32_t block_type, bool gate)
1248 {
1249 	int ret = 0;
1250 
1251 	switch (block_type) {
1252 	case AMD_IP_BLOCK_TYPE_UVD:
1253 	case AMD_IP_BLOCK_TYPE_VCN:
1254 		pp_dpm_powergate_uvd(handle, gate);
1255 		break;
1256 	case AMD_IP_BLOCK_TYPE_VCE:
1257 		pp_dpm_powergate_vce(handle, gate);
1258 		break;
1259 	case AMD_IP_BLOCK_TYPE_GMC:
1260 		/*
1261 		 * For now, this is only used on PICASSO.
1262 		 * And only "gate" operation is supported.
1263 		 */
1264 		if (gate)
1265 			pp_dpm_powergate_mmhub(handle);
1266 		break;
1267 	case AMD_IP_BLOCK_TYPE_GFX:
1268 		ret = pp_dpm_powergate_gfx(handle, gate);
1269 		break;
1270 	case AMD_IP_BLOCK_TYPE_ACP:
1271 		pp_dpm_powergate_acp(handle, gate);
1272 		break;
1273 	case AMD_IP_BLOCK_TYPE_SDMA:
1274 		pp_dpm_powergate_sdma(handle, gate);
1275 		break;
1276 	default:
1277 		break;
1278 	}
1279 	return ret;
1280 }
1281 
pp_notify_smu_enable_pwe(void * handle)1282 static int pp_notify_smu_enable_pwe(void *handle)
1283 {
1284 	struct pp_hwmgr *hwmgr = handle;
1285 
1286 	if (!hwmgr || !hwmgr->pm_en)
1287 		return -EINVAL;
1288 
1289 	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1290 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1291 		return -EINVAL;
1292 	}
1293 
1294 	hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1295 
1296 	return 0;
1297 }
1298 
pp_enable_mgpu_fan_boost(void * handle)1299 static int pp_enable_mgpu_fan_boost(void *handle)
1300 {
1301 	struct pp_hwmgr *hwmgr = handle;
1302 
1303 	if (!hwmgr)
1304 		return -EINVAL;
1305 
1306 	if (!hwmgr->pm_en ||
1307 	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1308 		return 0;
1309 
1310 	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1311 
1312 	return 0;
1313 }
1314 
pp_set_min_deep_sleep_dcefclk(void * handle,uint32_t clock)1315 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1316 {
1317 	struct pp_hwmgr *hwmgr = handle;
1318 
1319 	if (!hwmgr || !hwmgr->pm_en)
1320 		return -EINVAL;
1321 
1322 	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1323 		pr_debug("%s was not implemented.\n", __func__);
1324 		return -EINVAL;
1325 	}
1326 
1327 	hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1328 
1329 	return 0;
1330 }
1331 
pp_set_hard_min_dcefclk_by_freq(void * handle,uint32_t clock)1332 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1333 {
1334 	struct pp_hwmgr *hwmgr = handle;
1335 
1336 	if (!hwmgr || !hwmgr->pm_en)
1337 		return -EINVAL;
1338 
1339 	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1340 		pr_debug("%s was not implemented.\n", __func__);
1341 		return -EINVAL;
1342 	}
1343 
1344 	hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1345 
1346 	return 0;
1347 }
1348 
pp_set_hard_min_fclk_by_freq(void * handle,uint32_t clock)1349 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1350 {
1351 	struct pp_hwmgr *hwmgr = handle;
1352 
1353 	if (!hwmgr || !hwmgr->pm_en)
1354 		return -EINVAL;
1355 
1356 	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1357 		pr_debug("%s was not implemented.\n", __func__);
1358 		return -EINVAL;
1359 	}
1360 
1361 	hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1362 
1363 	return 0;
1364 }
1365 
pp_set_active_display_count(void * handle,uint32_t count)1366 static int pp_set_active_display_count(void *handle, uint32_t count)
1367 {
1368 	struct pp_hwmgr *hwmgr = handle;
1369 
1370 	if (!hwmgr || !hwmgr->pm_en)
1371 		return -EINVAL;
1372 
1373 	return phm_set_active_display_count(hwmgr, count);
1374 }
1375 
pp_get_asic_baco_capability(void * handle,bool * cap)1376 static int pp_get_asic_baco_capability(void *handle, bool *cap)
1377 {
1378 	struct pp_hwmgr *hwmgr = handle;
1379 
1380 	*cap = false;
1381 	if (!hwmgr)
1382 		return -EINVAL;
1383 
1384 	if (!(hwmgr->not_vf && amdgpu_dpm) ||
1385 		!hwmgr->hwmgr_func->get_asic_baco_capability)
1386 		return 0;
1387 
1388 	hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1389 
1390 	return 0;
1391 }
1392 
pp_get_asic_baco_state(void * handle,int * state)1393 static int pp_get_asic_baco_state(void *handle, int *state)
1394 {
1395 	struct pp_hwmgr *hwmgr = handle;
1396 
1397 	if (!hwmgr)
1398 		return -EINVAL;
1399 
1400 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1401 		return 0;
1402 
1403 	hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1404 
1405 	return 0;
1406 }
1407 
pp_set_asic_baco_state(void * handle,int state)1408 static int pp_set_asic_baco_state(void *handle, int state)
1409 {
1410 	struct pp_hwmgr *hwmgr = handle;
1411 
1412 	if (!hwmgr)
1413 		return -EINVAL;
1414 
1415 	if (!(hwmgr->not_vf && amdgpu_dpm) ||
1416 		!hwmgr->hwmgr_func->set_asic_baco_state)
1417 		return 0;
1418 
1419 	hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1420 
1421 	return 0;
1422 }
1423 
pp_get_ppfeature_status(void * handle,char * buf)1424 static int pp_get_ppfeature_status(void *handle, char *buf)
1425 {
1426 	struct pp_hwmgr *hwmgr = handle;
1427 
1428 	if (!hwmgr || !hwmgr->pm_en || !buf)
1429 		return -EINVAL;
1430 
1431 	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1432 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1433 		return -EINVAL;
1434 	}
1435 
1436 	return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1437 }
1438 
pp_set_ppfeature_status(void * handle,uint64_t ppfeature_masks)1439 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1440 {
1441 	struct pp_hwmgr *hwmgr = handle;
1442 
1443 	if (!hwmgr || !hwmgr->pm_en)
1444 		return -EINVAL;
1445 
1446 	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1447 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1448 		return -EINVAL;
1449 	}
1450 
1451 	return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1452 }
1453 
pp_asic_reset_mode_2(void * handle)1454 static int pp_asic_reset_mode_2(void *handle)
1455 {
1456 	struct pp_hwmgr *hwmgr = handle;
1457 
1458 	if (!hwmgr || !hwmgr->pm_en)
1459 		return -EINVAL;
1460 
1461 	if (hwmgr->hwmgr_func->asic_reset == NULL) {
1462 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1463 		return -EINVAL;
1464 	}
1465 
1466 	return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1467 }
1468 
pp_smu_i2c_bus_access(void * handle,bool acquire)1469 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1470 {
1471 	struct pp_hwmgr *hwmgr = handle;
1472 
1473 	if (!hwmgr || !hwmgr->pm_en)
1474 		return -EINVAL;
1475 
1476 	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1477 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1478 		return -EINVAL;
1479 	}
1480 
1481 	return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1482 }
1483 
pp_set_df_cstate(void * handle,enum pp_df_cstate state)1484 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1485 {
1486 	struct pp_hwmgr *hwmgr = handle;
1487 
1488 	if (!hwmgr)
1489 		return -EINVAL;
1490 
1491 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1492 		return 0;
1493 
1494 	hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1495 
1496 	return 0;
1497 }
1498 
pp_set_xgmi_pstate(void * handle,uint32_t pstate)1499 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1500 {
1501 	struct pp_hwmgr *hwmgr = handle;
1502 
1503 	if (!hwmgr)
1504 		return -EINVAL;
1505 
1506 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1507 		return 0;
1508 
1509 	hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1510 
1511 	return 0;
1512 }
1513 
pp_get_gpu_metrics(void * handle,void ** table)1514 static ssize_t pp_get_gpu_metrics(void *handle, void **table)
1515 {
1516 	struct pp_hwmgr *hwmgr = handle;
1517 
1518 	if (!hwmgr)
1519 		return -EINVAL;
1520 
1521 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
1522 		return -EOPNOTSUPP;
1523 
1524 	return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
1525 }
1526 
pp_gfx_state_change_set(void * handle,uint32_t state)1527 static int pp_gfx_state_change_set(void *handle, uint32_t state)
1528 {
1529 	struct pp_hwmgr *hwmgr = handle;
1530 
1531 	if (!hwmgr || !hwmgr->pm_en)
1532 		return -EINVAL;
1533 
1534 	if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
1535 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1536 		return -EINVAL;
1537 	}
1538 
1539 	hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
1540 	return 0;
1541 }
1542 
pp_get_prv_buffer_details(void * handle,void ** addr,size_t * size)1543 static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
1544 {
1545 	struct pp_hwmgr *hwmgr = handle;
1546 	struct amdgpu_device *adev = hwmgr->adev;
1547 	int err;
1548 
1549 	if (!addr || !size)
1550 		return -EINVAL;
1551 
1552 	*addr = NULL;
1553 	*size = 0;
1554 	if (adev->pm.smu_prv_buffer) {
1555 		err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
1556 		if (err)
1557 			return err;
1558 		*size = adev->pm.smu_prv_buffer_size;
1559 	}
1560 
1561 	return 0;
1562 }
1563 
pp_pm_compute_clocks(void * handle)1564 static void pp_pm_compute_clocks(void *handle)
1565 {
1566 	struct pp_hwmgr *hwmgr = handle;
1567 	struct amdgpu_device *adev = hwmgr->adev;
1568 
1569 	if (!adev->dc_enabled) {
1570 		amdgpu_dpm_get_active_displays(adev);
1571 		adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1572 		adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1573 		adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1574 		/* we have issues with mclk switching with
1575 		 * refresh rates over 120 hz on the non-DC code.
1576 		 */
1577 		if (adev->pm.pm_display_cfg.vrefresh > 120)
1578 			adev->pm.pm_display_cfg.min_vblank_time = 0;
1579 
1580 		pp_display_configuration_change(handle,
1581 						&adev->pm.pm_display_cfg);
1582 	}
1583 
1584 	pp_dpm_dispatch_tasks(handle,
1585 			      AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
1586 			      NULL);
1587 }
1588 
1589 static const struct amd_pm_funcs pp_dpm_funcs = {
1590 	.load_firmware = pp_dpm_load_fw,
1591 	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1592 	.force_performance_level = pp_dpm_force_performance_level,
1593 	.get_performance_level = pp_dpm_get_performance_level,
1594 	.get_current_power_state = pp_dpm_get_current_power_state,
1595 	.dispatch_tasks = pp_dpm_dispatch_tasks,
1596 	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
1597 	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
1598 	.set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
1599 	.get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
1600 	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1601 	.set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1602 	.get_pp_num_states = pp_dpm_get_pp_num_states,
1603 	.get_pp_table = pp_dpm_get_pp_table,
1604 	.set_pp_table = pp_dpm_set_pp_table,
1605 	.force_clock_level = pp_dpm_force_clock_level,
1606 	.emit_clock_levels = pp_dpm_emit_clock_levels,
1607 	.print_clock_levels = pp_dpm_print_clock_levels,
1608 	.get_sclk_od = pp_dpm_get_sclk_od,
1609 	.set_sclk_od = pp_dpm_set_sclk_od,
1610 	.get_mclk_od = pp_dpm_get_mclk_od,
1611 	.set_mclk_od = pp_dpm_set_mclk_od,
1612 	.read_sensor = pp_dpm_read_sensor,
1613 	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
1614 	.switch_power_profile = pp_dpm_switch_power_profile,
1615 	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1616 	.set_powergating_by_smu = pp_set_powergating_by_smu,
1617 	.get_power_profile_mode = pp_get_power_profile_mode,
1618 	.set_power_profile_mode = pp_set_power_profile_mode,
1619 	.set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
1620 	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1621 	.set_mp1_state = pp_dpm_set_mp1_state,
1622 	.set_power_limit = pp_set_power_limit,
1623 	.get_power_limit = pp_get_power_limit,
1624 /* export to DC */
1625 	.get_sclk = pp_dpm_get_sclk,
1626 	.get_mclk = pp_dpm_get_mclk,
1627 	.display_configuration_change = pp_display_configuration_change,
1628 	.get_display_power_level = pp_get_display_power_level,
1629 	.get_current_clocks = pp_get_current_clocks,
1630 	.get_clock_by_type = pp_get_clock_by_type,
1631 	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1632 	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1633 	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1634 	.display_clock_voltage_request = pp_display_clock_voltage_request,
1635 	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1636 	.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1637 	.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1638 	.set_active_display_count = pp_set_active_display_count,
1639 	.set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1640 	.set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1641 	.set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1642 	.get_asic_baco_capability = pp_get_asic_baco_capability,
1643 	.get_asic_baco_state = pp_get_asic_baco_state,
1644 	.set_asic_baco_state = pp_set_asic_baco_state,
1645 	.get_ppfeature_status = pp_get_ppfeature_status,
1646 	.set_ppfeature_status = pp_set_ppfeature_status,
1647 	.asic_reset_mode_2 = pp_asic_reset_mode_2,
1648 	.smu_i2c_bus_access = pp_smu_i2c_bus_access,
1649 	.set_df_cstate = pp_set_df_cstate,
1650 	.set_xgmi_pstate = pp_set_xgmi_pstate,
1651 	.get_gpu_metrics = pp_get_gpu_metrics,
1652 	.gfx_state_change_set = pp_gfx_state_change_set,
1653 	.get_smu_prv_buf_details = pp_get_prv_buffer_details,
1654 	.pm_compute_clocks = pp_pm_compute_clocks,
1655 };
1656