Lines Matching full:smu
60 static int smu_force_smuclk_levels(struct smu_context *smu,
63 static int smu_handle_task(struct smu_context *smu,
66 static int smu_reset(struct smu_context *smu);
71 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
77 struct smu_context *smu = handle; in smu_sys_get_pp_feature_mask() local
79 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_get_pp_feature_mask()
82 return smu_get_pp_feature_mask(smu, buf); in smu_sys_get_pp_feature_mask()
88 struct smu_context *smu = handle; in smu_sys_set_pp_feature_mask() local
90 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_set_pp_feature_mask()
93 return smu_set_pp_feature_mask(smu, new_mask); in smu_sys_set_pp_feature_mask()
96 int smu_set_residency_gfxoff(struct smu_context *smu, bool value) in smu_set_residency_gfxoff() argument
98 if (!smu->ppt_funcs->set_gfx_off_residency) in smu_set_residency_gfxoff()
101 return smu_set_gfx_off_residency(smu, value); in smu_set_residency_gfxoff()
104 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value) in smu_get_residency_gfxoff() argument
106 if (!smu->ppt_funcs->get_gfx_off_residency) in smu_get_residency_gfxoff()
109 return smu_get_gfx_off_residency(smu, value); in smu_get_residency_gfxoff()
112 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value) in smu_get_entrycount_gfxoff() argument
114 if (!smu->ppt_funcs->get_gfx_off_entrycount) in smu_get_entrycount_gfxoff()
117 return smu_get_gfx_off_entrycount(smu, value); in smu_get_entrycount_gfxoff()
120 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) in smu_get_status_gfxoff() argument
122 if (!smu->ppt_funcs->get_gfx_off_status) in smu_get_status_gfxoff()
125 *value = smu_get_gfx_off_status(smu); in smu_get_status_gfxoff()
130 int smu_set_soft_freq_range(struct smu_context *smu, in smu_set_soft_freq_range() argument
137 if (smu->ppt_funcs->set_soft_freq_limited_range) in smu_set_soft_freq_range()
138 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, in smu_set_soft_freq_range()
146 int smu_get_dpm_freq_range(struct smu_context *smu, in smu_get_dpm_freq_range() argument
156 if (smu->ppt_funcs->get_dpm_ultimate_freq) in smu_get_dpm_freq_range()
157 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, in smu_get_dpm_freq_range()
165 int smu_set_gfx_power_up_by_imu(struct smu_context *smu) in smu_set_gfx_power_up_by_imu() argument
168 struct amdgpu_device *adev = smu->adev; in smu_set_gfx_power_up_by_imu()
170 if (smu->ppt_funcs->set_gfx_power_up_by_imu) { in smu_set_gfx_power_up_by_imu()
171 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu); in smu_set_gfx_power_up_by_imu()
180 struct smu_context *smu = handle; in smu_get_mclk() local
184 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, in smu_get_mclk()
194 struct smu_context *smu = handle; in smu_get_sclk() local
198 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, in smu_get_sclk()
206 static int smu_set_gfx_imu_enable(struct smu_context *smu) in smu_set_gfx_imu_enable() argument
208 struct amdgpu_device *adev = smu->adev; in smu_set_gfx_imu_enable()
213 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix) in smu_set_gfx_imu_enable()
216 return smu_set_gfx_power_up_by_imu(smu); in smu_set_gfx_imu_enable()
219 static int smu_dpm_set_vcn_enable(struct smu_context *smu, in smu_dpm_set_vcn_enable() argument
222 struct smu_power_context *smu_power = &smu->smu_power; in smu_dpm_set_vcn_enable()
226 if (!smu->ppt_funcs->dpm_set_vcn_enable) in smu_dpm_set_vcn_enable()
232 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); in smu_dpm_set_vcn_enable()
239 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, in smu_dpm_set_jpeg_enable() argument
242 struct smu_power_context *smu_power = &smu->smu_power; in smu_dpm_set_jpeg_enable()
246 if (!smu->ppt_funcs->dpm_set_jpeg_enable) in smu_dpm_set_jpeg_enable()
252 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); in smu_dpm_set_jpeg_enable()
266 * This API uses no smu->mutex lock protection due to:
270 * Under this case, the smu->mutex lock protection is already enforced on
277 struct smu_context *smu = handle; in smu_dpm_set_power_gate() local
280 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) { in smu_dpm_set_power_gate()
281 dev_WARN(smu->adev->dev, in smu_dpm_set_power_gate()
282 "SMU uninitialized but power %s requested for %u!\n", in smu_dpm_set_power_gate()
294 ret = smu_dpm_set_vcn_enable(smu, !gate); in smu_dpm_set_power_gate()
296 dev_err(smu->adev->dev, "Failed to power %s VCN!\n", in smu_dpm_set_power_gate()
300 ret = smu_gfx_off_control(smu, gate); in smu_dpm_set_power_gate()
302 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", in smu_dpm_set_power_gate()
306 ret = smu_powergate_sdma(smu, gate); in smu_dpm_set_power_gate()
308 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", in smu_dpm_set_power_gate()
312 ret = smu_dpm_set_jpeg_enable(smu, !gate); in smu_dpm_set_power_gate()
314 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", in smu_dpm_set_power_gate()
318 dev_err(smu->adev->dev, "Unsupported block type!\n"); in smu_dpm_set_power_gate()
328 * @smu: smu_context pointer
333 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk) in smu_set_user_clk_dependencies() argument
335 if (smu->adev->in_suspend) in smu_set_user_clk_dependencies()
339 smu->user_dpm_profile.clk_dependency = 0; in smu_set_user_clk_dependencies()
340 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); in smu_set_user_clk_dependencies()
343 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) in smu_set_user_clk_dependencies()
346 smu->user_dpm_profile.clk_dependency = 0; in smu_set_user_clk_dependencies()
347 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); in smu_set_user_clk_dependencies()
350 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) in smu_set_user_clk_dependencies()
353 smu->user_dpm_profile.clk_dependency = 0; in smu_set_user_clk_dependencies()
354 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); in smu_set_user_clk_dependencies()
363 * @smu: smu_context pointer
368 static void smu_restore_dpm_user_profile(struct smu_context *smu) in smu_restore_dpm_user_profile() argument
370 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_restore_dpm_user_profile()
373 if (!smu->adev->in_suspend) in smu_restore_dpm_user_profile()
376 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_restore_dpm_user_profile()
380 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; in smu_restore_dpm_user_profile()
383 if (smu->user_dpm_profile.power_limit) { in smu_restore_dpm_user_profile()
384 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); in smu_restore_dpm_user_profile()
386 dev_err(smu->adev->dev, "Failed to set power limit value\n"); in smu_restore_dpm_user_profile()
395 * Iterate over smu clk type and force the saved user clk in smu_restore_dpm_user_profile()
398 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && in smu_restore_dpm_user_profile()
399 smu->user_dpm_profile.clk_mask[clk_type]) { in smu_restore_dpm_user_profile()
400 ret = smu_force_smuclk_levels(smu, clk_type, in smu_restore_dpm_user_profile()
401 smu->user_dpm_profile.clk_mask[clk_type]); in smu_restore_dpm_user_profile()
403 dev_err(smu->adev->dev, in smu_restore_dpm_user_profile()
410 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL || in smu_restore_dpm_user_profile()
411 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) { in smu_restore_dpm_user_profile()
412 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); in smu_restore_dpm_user_profile()
414 smu->user_dpm_profile.fan_speed_pwm = 0; in smu_restore_dpm_user_profile()
415 smu->user_dpm_profile.fan_speed_rpm = 0; in smu_restore_dpm_user_profile()
416 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO; in smu_restore_dpm_user_profile()
417 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n"); in smu_restore_dpm_user_profile()
420 if (smu->user_dpm_profile.fan_speed_pwm) { in smu_restore_dpm_user_profile()
421 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm); in smu_restore_dpm_user_profile()
423 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n"); in smu_restore_dpm_user_profile()
426 if (smu->user_dpm_profile.fan_speed_rpm) { in smu_restore_dpm_user_profile()
427 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm); in smu_restore_dpm_user_profile()
429 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n"); in smu_restore_dpm_user_profile()
434 if (smu->user_dpm_profile.user_od) { in smu_restore_dpm_user_profile()
435 if (smu->ppt_funcs->restore_user_od_settings) { in smu_restore_dpm_user_profile()
436 ret = smu->ppt_funcs->restore_user_od_settings(smu); in smu_restore_dpm_user_profile()
438 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n"); in smu_restore_dpm_user_profile()
443 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; in smu_restore_dpm_user_profile()
474 struct smu_context *smu = adev->powerplay.pp_handle; in is_support_cclk_dpm() local
476 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) in is_support_cclk_dpm()
486 struct smu_context *smu = handle; in smu_sys_get_pp_table() local
487 struct smu_table_context *smu_table = &smu->smu_table; in smu_sys_get_pp_table()
489 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_get_pp_table()
507 struct smu_context *smu = handle; in smu_sys_set_pp_table() local
508 struct smu_table_context *smu_table = &smu->smu_table; in smu_sys_set_pp_table()
512 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_set_pp_table()
516 dev_err(smu->adev->dev, "pp table size not matched !\n"); in smu_sys_set_pp_table()
535 smu->uploading_custom_pp_table = true; in smu_sys_set_pp_table()
537 ret = smu_reset(smu); in smu_sys_set_pp_table()
539 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); in smu_sys_set_pp_table()
541 smu->uploading_custom_pp_table = false; in smu_sys_set_pp_table()
546 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) in smu_get_driver_allowed_feature_mask() argument
548 struct smu_feature *feature = &smu->smu_feature; in smu_get_driver_allowed_feature_mask()
559 if (smu->adev->scpm_enabled) { in smu_get_driver_allowed_feature_mask()
566 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, in smu_get_driver_allowed_feature_mask()
580 struct smu_context *smu = adev->powerplay.pp_handle; in smu_set_funcs() local
583 smu->od_enabled = true; in smu_set_funcs()
589 navi10_set_ppt_funcs(smu); in smu_set_funcs()
595 sienna_cichlid_set_ppt_funcs(smu); in smu_set_funcs()
599 renoir_set_ppt_funcs(smu); in smu_set_funcs()
602 vangogh_set_ppt_funcs(smu); in smu_set_funcs()
607 yellow_carp_set_ppt_funcs(smu); in smu_set_funcs()
611 smu_v13_0_4_set_ppt_funcs(smu); in smu_set_funcs()
614 smu_v13_0_5_set_ppt_funcs(smu); in smu_set_funcs()
617 cyan_skillfish_set_ppt_funcs(smu); in smu_set_funcs()
621 arcturus_set_ppt_funcs(smu); in smu_set_funcs()
623 smu->od_enabled = false; in smu_set_funcs()
626 aldebaran_set_ppt_funcs(smu); in smu_set_funcs()
628 smu->od_enabled = true; in smu_set_funcs()
632 smu_v13_0_0_set_ppt_funcs(smu); in smu_set_funcs()
635 smu_v13_0_6_set_ppt_funcs(smu); in smu_set_funcs()
637 smu->od_enabled = true; in smu_set_funcs()
640 smu_v13_0_7_set_ppt_funcs(smu); in smu_set_funcs()
652 struct smu_context *smu; in smu_early_init() local
655 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL); in smu_early_init()
656 if (!smu) in smu_early_init()
659 smu->adev = adev; in smu_early_init()
660 smu->pm_enabled = !!amdgpu_dpm; in smu_early_init()
661 smu->is_apu = false; in smu_early_init()
662 smu->smu_baco.state = SMU_BACO_STATE_EXIT; in smu_early_init()
663 smu->smu_baco.platform_support = false; in smu_early_init()
664 smu->user_dpm_profile.fan_mode = -1; in smu_early_init()
666 mutex_init(&smu->message_lock); in smu_early_init()
668 adev->powerplay.pp_handle = smu; in smu_early_init()
674 return smu_init_microcode(smu); in smu_early_init()
677 static int smu_set_default_dpm_table(struct smu_context *smu) in smu_set_default_dpm_table() argument
679 struct smu_power_context *smu_power = &smu->smu_power; in smu_set_default_dpm_table()
684 if (!smu->ppt_funcs->set_default_dpm_table) in smu_set_default_dpm_table()
690 ret = smu_dpm_set_vcn_enable(smu, true); in smu_set_default_dpm_table()
694 ret = smu_dpm_set_jpeg_enable(smu, true); in smu_set_default_dpm_table()
698 ret = smu->ppt_funcs->set_default_dpm_table(smu); in smu_set_default_dpm_table()
700 dev_err(smu->adev->dev, in smu_set_default_dpm_table()
703 smu_dpm_set_jpeg_enable(smu, !jpeg_gate); in smu_set_default_dpm_table()
705 smu_dpm_set_vcn_enable(smu, !vcn_gate); in smu_set_default_dpm_table()
709 static int smu_apply_default_config_table_settings(struct smu_context *smu) in smu_apply_default_config_table_settings() argument
711 struct amdgpu_device *adev = smu->adev; in smu_apply_default_config_table_settings()
714 ret = smu_get_default_config_table_settings(smu, in smu_apply_default_config_table_settings()
719 return smu_set_config_table(smu, &adev->pm.config_table); in smu_apply_default_config_table_settings()
725 struct smu_context *smu = adev->powerplay.pp_handle; in smu_late_init() local
728 smu_set_fine_grain_gfx_freq_parameters(smu); in smu_late_init()
730 if (!smu->pm_enabled) in smu_late_init()
733 ret = smu_post_init(smu); in smu_late_init()
735 dev_err(adev->dev, "Failed to post smu init!\n"); in smu_late_init()
747 smu_set_ac_dc(smu); in smu_late_init()
753 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { in smu_late_init()
754 ret = smu_set_default_od_settings(smu); in smu_late_init()
761 ret = smu_populate_umd_state_clk(smu); in smu_late_init()
767 ret = smu_get_asic_power_limits(smu, in smu_late_init()
768 &smu->current_power_limit, in smu_late_init()
769 &smu->default_power_limit, in smu_late_init()
770 &smu->max_power_limit); in smu_late_init()
777 smu_get_unique_id(smu); in smu_late_init()
779 smu_get_fan_parameters(smu); in smu_late_init()
781 smu_handle_task(smu, in smu_late_init()
782 smu->smu_dpm.dpm_level, in smu_late_init()
785 ret = smu_apply_default_config_table_settings(smu); in smu_late_init()
791 smu_restore_dpm_user_profile(smu); in smu_late_init()
796 static int smu_init_fb_allocations(struct smu_context *smu) in smu_init_fb_allocations() argument
798 struct amdgpu_device *adev = smu->adev; in smu_init_fb_allocations()
799 struct smu_table_context *smu_table = &smu->smu_table; in smu_init_fb_allocations()
862 static int smu_fini_fb_allocations(struct smu_context *smu) in smu_fini_fb_allocations() argument
864 struct smu_table_context *smu_table = &smu->smu_table; in smu_fini_fb_allocations()
883 * @smu: amdgpu_device pointer
890 static int smu_alloc_memory_pool(struct smu_context *smu) in smu_alloc_memory_pool() argument
892 struct amdgpu_device *adev = smu->adev; in smu_alloc_memory_pool()
893 struct smu_table_context *smu_table = &smu->smu_table; in smu_alloc_memory_pool()
895 uint64_t pool_size = smu->pool_size; in smu_alloc_memory_pool()
927 static int smu_free_memory_pool(struct smu_context *smu) in smu_free_memory_pool() argument
929 struct smu_table_context *smu_table = &smu->smu_table; in smu_free_memory_pool()
944 static int smu_alloc_dummy_read_table(struct smu_context *smu) in smu_alloc_dummy_read_table() argument
946 struct smu_table_context *smu_table = &smu->smu_table; in smu_alloc_dummy_read_table()
949 struct amdgpu_device *adev = smu->adev; in smu_alloc_dummy_read_table()
968 static void smu_free_dummy_read_table(struct smu_context *smu) in smu_free_dummy_read_table() argument
970 struct smu_table_context *smu_table = &smu->smu_table; in smu_free_dummy_read_table()
982 static int smu_smc_table_sw_init(struct smu_context *smu) in smu_smc_table_sw_init() argument
990 ret = smu_init_smc_tables(smu); in smu_smc_table_sw_init()
992 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); in smu_smc_table_sw_init()
1000 ret = smu_init_power(smu); in smu_smc_table_sw_init()
1002 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); in smu_smc_table_sw_init()
1009 ret = smu_init_fb_allocations(smu); in smu_smc_table_sw_init()
1013 ret = smu_alloc_memory_pool(smu); in smu_smc_table_sw_init()
1017 ret = smu_alloc_dummy_read_table(smu); in smu_smc_table_sw_init()
1021 ret = smu_i2c_init(smu); in smu_smc_table_sw_init()
1028 static int smu_smc_table_sw_fini(struct smu_context *smu) in smu_smc_table_sw_fini() argument
1032 smu_i2c_fini(smu); in smu_smc_table_sw_fini()
1034 smu_free_dummy_read_table(smu); in smu_smc_table_sw_fini()
1036 ret = smu_free_memory_pool(smu); in smu_smc_table_sw_fini()
1040 ret = smu_fini_fb_allocations(smu); in smu_smc_table_sw_fini()
1044 ret = smu_fini_power(smu); in smu_smc_table_sw_fini()
1046 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); in smu_smc_table_sw_fini()
1050 ret = smu_fini_smc_tables(smu); in smu_smc_table_sw_fini()
1052 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); in smu_smc_table_sw_fini()
1061 struct smu_context *smu = container_of(work, struct smu_context, in smu_throttling_logging_work_fn() local
1064 smu_log_thermal_throttling(smu); in smu_throttling_logging_work_fn()
1069 struct smu_context *smu = container_of(work, struct smu_context, in smu_interrupt_work_fn() local
1072 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) in smu_interrupt_work_fn()
1073 smu->ppt_funcs->interrupt_work(smu); in smu_interrupt_work_fn()
1078 struct smu_context *smu = in smu_swctf_delayed_work_handler() local
1081 &smu->thermal_range; in smu_swctf_delayed_work_handler()
1082 struct amdgpu_device *adev = smu->adev; in smu_swctf_delayed_work_handler()
1091 smu->ppt_funcs->read_sensor && in smu_swctf_delayed_work_handler()
1092 !smu->ppt_funcs->read_sensor(smu, in smu_swctf_delayed_work_handler()
1107 struct smu_context *smu = adev->powerplay.pp_handle; in smu_sw_init() local
1110 smu->pool_size = adev->pm.smu_prv_buffer_size; in smu_sw_init()
1111 smu->smu_feature.feature_num = SMU_FEATURE_MAX; in smu_sw_init()
1112 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); in smu_sw_init()
1113 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); in smu_sw_init()
1115 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); in smu_sw_init()
1116 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); in smu_sw_init()
1117 atomic64_set(&smu->throttle_int_counter, 0); in smu_sw_init()
1118 smu->watermarks_bitmap = 0; in smu_sw_init()
1119 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; in smu_sw_init()
1120 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; in smu_sw_init()
1122 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); in smu_sw_init()
1123 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); in smu_sw_init()
1125 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; in smu_sw_init()
1126 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; in smu_sw_init()
1127 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; in smu_sw_init()
1128 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; in smu_sw_init()
1129 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; in smu_sw_init()
1130 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; in smu_sw_init()
1131 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; in smu_sw_init()
1132 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; in smu_sw_init()
1134 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; in smu_sw_init()
1135 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; in smu_sw_init()
1136 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; in smu_sw_init()
1137 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; in smu_sw_init()
1138 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; in smu_sw_init()
1139 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; in smu_sw_init()
1140 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; in smu_sw_init()
1141 smu->display_config = &adev->pm.pm_display_cfg; in smu_sw_init()
1143 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; in smu_sw_init()
1144 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; in smu_sw_init()
1146 INIT_DELAYED_WORK(&smu->swctf_delayed_work, in smu_sw_init()
1149 ret = smu_smc_table_sw_init(smu); in smu_sw_init()
1156 ret = smu_get_vbios_bootup_values(smu); in smu_sw_init()
1162 ret = smu_init_pptable_microcode(smu); in smu_sw_init()
1168 ret = smu_register_irq_handler(smu); in smu_sw_init()
1175 if (!smu->ppt_funcs->get_fan_control_mode) in smu_sw_init()
1176 smu->adev->pm.no_fan = true; in smu_sw_init()
1184 struct smu_context *smu = adev->powerplay.pp_handle; in smu_sw_fini() local
1187 ret = smu_smc_table_sw_fini(smu); in smu_sw_fini()
1193 smu_fini_microcode(smu); in smu_sw_fini()
1198 static int smu_get_thermal_temperature_range(struct smu_context *smu) in smu_get_thermal_temperature_range() argument
1200 struct amdgpu_device *adev = smu->adev; in smu_get_thermal_temperature_range()
1202 &smu->thermal_range; in smu_get_thermal_temperature_range()
1205 if (!smu->ppt_funcs->get_thermal_temperature_range) in smu_get_thermal_temperature_range()
1208 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); in smu_get_thermal_temperature_range()
1225 static int smu_smc_hw_setup(struct smu_context *smu) in smu_smc_hw_setup() argument
1227 struct smu_feature *feature = &smu->smu_feature; in smu_smc_hw_setup()
1228 struct amdgpu_device *adev = smu->adev; in smu_smc_hw_setup()
1238 if (adev->in_suspend && smu_is_dpm_running(smu)) { in smu_smc_hw_setup()
1240 ret = smu_system_features_control(smu, true); in smu_smc_hw_setup()
1250 ret = smu_init_display_count(smu, 0); in smu_smc_hw_setup()
1256 ret = smu_set_driver_table_location(smu); in smu_smc_hw_setup()
1265 ret = smu_set_tool_table_location(smu); in smu_smc_hw_setup()
1275 ret = smu_notify_memory_pool_location(smu); in smu_smc_hw_setup()
1287 ret = smu_setup_pptable(smu); in smu_smc_hw_setup()
1294 /* smu_dump_pptable(smu); */ in smu_smc_hw_setup()
1298 * (to SMU). Driver involvement is not needed and permitted. in smu_smc_hw_setup()
1302 * Copy pptable bo in the vram to smc with SMU MSGs such as in smu_smc_hw_setup()
1305 ret = smu_write_pptable(smu); in smu_smc_hw_setup()
1313 ret = smu_run_btc(smu); in smu_smc_hw_setup()
1322 ret = smu_feature_set_allowed_mask(smu); in smu_smc_hw_setup()
1329 ret = smu_system_features_control(smu, true); in smu_smc_hw_setup()
1335 ret = smu_feature_get_enabled_mask(smu, &features_supported); in smu_smc_hw_setup()
1344 if (!smu_is_dpm_running(smu)) in smu_smc_hw_setup()
1352 ret = smu_set_default_dpm_table(smu); in smu_smc_hw_setup()
1383 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); in smu_smc_hw_setup()
1389 ret = smu_get_thermal_temperature_range(smu); in smu_smc_hw_setup()
1395 ret = smu_enable_thermal_alert(smu); in smu_smc_hw_setup()
1401 ret = smu_notify_display_change(smu); in smu_smc_hw_setup()
1411 ret = smu_set_min_dcef_deep_sleep(smu, in smu_smc_hw_setup()
1412 smu->smu_table.boot_values.dcefclk / 100); in smu_smc_hw_setup()
1417 static int smu_start_smc_engine(struct smu_context *smu) in smu_start_smc_engine() argument
1419 struct amdgpu_device *adev = smu->adev; in smu_start_smc_engine()
1424 if (smu->ppt_funcs->load_microcode) { in smu_start_smc_engine()
1425 ret = smu->ppt_funcs->load_microcode(smu); in smu_start_smc_engine()
1432 if (smu->ppt_funcs->check_fw_status) { in smu_start_smc_engine()
1433 ret = smu->ppt_funcs->check_fw_status(smu); in smu_start_smc_engine()
1444 ret = smu_check_fw_version(smu); in smu_start_smc_engine()
1455 struct smu_context *smu = adev->powerplay.pp_handle; in smu_hw_init() local
1458 smu->pm_enabled = false; in smu_hw_init()
1462 ret = smu_start_smc_engine(smu); in smu_hw_init()
1468 if (smu->is_apu) { in smu_hw_init()
1469 ret = smu_set_gfx_imu_enable(smu); in smu_hw_init()
1472 smu_dpm_set_vcn_enable(smu, true); in smu_hw_init()
1473 smu_dpm_set_jpeg_enable(smu, true); in smu_hw_init()
1474 smu_set_gfx_cgpg(smu, true); in smu_hw_init()
1477 if (!smu->pm_enabled) in smu_hw_init()
1480 ret = smu_get_driver_allowed_feature_mask(smu); in smu_hw_init()
1484 ret = smu_smc_hw_setup(smu); in smu_hw_init()
1493 * 2. DAL settings come between .hw_init and .late_init of SMU. in smu_hw_init()
1497 ret = smu_init_max_sustainable_clocks(smu); in smu_hw_init()
1505 dev_info(adev->dev, "SMU is initialized successfully!\n"); in smu_hw_init()
1510 static int smu_disable_dpms(struct smu_context *smu) in smu_disable_dpms() argument
1512 struct amdgpu_device *adev = smu->adev; in smu_disable_dpms()
1514 bool use_baco = !smu->is_apu && in smu_disable_dpms()
1520 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others) in smu_disable_dpms()
1540 * - SMU firmware can handle the DPM reenablement in smu_disable_dpms()
1543 if (smu->uploading_custom_pp_table) { in smu_disable_dpms()
1577 * For SMU 13.0.4/11, PMFW will handle the features disablement properly in smu_disable_dpms()
1594 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { in smu_disable_dpms()
1595 ret = smu_disable_all_features_with_exception(smu, in smu_disable_dpms()
1598 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); in smu_disable_dpms()
1602 ret = smu_system_features_control(smu, false); in smu_disable_dpms()
1604 dev_err(adev->dev, "Failed to disable smu features.\n"); in smu_disable_dpms()
1615 static int smu_smc_hw_cleanup(struct smu_context *smu) in smu_smc_hw_cleanup() argument
1617 struct amdgpu_device *adev = smu->adev; in smu_smc_hw_cleanup()
1620 cancel_work_sync(&smu->throttling_logging_work); in smu_smc_hw_cleanup()
1621 cancel_work_sync(&smu->interrupt_work); in smu_smc_hw_cleanup()
1623 ret = smu_disable_thermal_alert(smu); in smu_smc_hw_cleanup()
1629 cancel_delayed_work_sync(&smu->swctf_delayed_work); in smu_smc_hw_cleanup()
1631 ret = smu_disable_dpms(smu); in smu_smc_hw_cleanup()
1643 struct smu_context *smu = adev->powerplay.pp_handle; in smu_hw_fini() local
1648 smu_dpm_set_vcn_enable(smu, false); in smu_hw_fini()
1649 smu_dpm_set_jpeg_enable(smu, false); in smu_hw_fini()
1654 if (!smu->pm_enabled) in smu_hw_fini()
1659 return smu_smc_hw_cleanup(smu); in smu_hw_fini()
1665 struct smu_context *smu = adev->powerplay.pp_handle; in smu_late_fini() local
1667 kfree(smu); in smu_late_fini()
1670 static int smu_reset(struct smu_context *smu) in smu_reset() argument
1672 struct amdgpu_device *adev = smu->adev; in smu_reset()
1693 struct smu_context *smu = adev->powerplay.pp_handle; in smu_suspend() local
1700 if (!smu->pm_enabled) in smu_suspend()
1705 ret = smu_smc_hw_cleanup(smu); in smu_suspend()
1709 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); in smu_suspend()
1711 smu_set_gfx_cgpg(smu, false); in smu_suspend()
1717 ret = smu_get_entrycount_gfxoff(smu, &count); in smu_suspend()
1728 struct smu_context *smu = adev->powerplay.pp_handle; in smu_resume() local
1733 if (!smu->pm_enabled) in smu_resume()
1736 dev_info(adev->dev, "SMU is resuming...\n"); in smu_resume()
1738 ret = smu_start_smc_engine(smu); in smu_resume()
1744 ret = smu_smc_hw_setup(smu); in smu_resume()
1750 ret = smu_set_gfx_imu_enable(smu); in smu_resume()
1754 smu_set_gfx_cgpg(smu, true); in smu_resume()
1756 smu->disable_uclk_switch = 0; in smu_resume()
1760 dev_info(adev->dev, "SMU is resumed successfully!\n"); in smu_resume()
1768 struct smu_context *smu = handle; in smu_display_configuration_change() local
1770 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_display_configuration_change()
1776 smu_set_min_dcef_deep_sleep(smu, in smu_display_configuration_change()
1802 struct smu_context *smu = (struct smu_context*)(handle); in smu_enable_umd_pstate() local
1803 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_enable_umd_pstate()
1805 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) in smu_enable_umd_pstate()
1812 smu_gpo_control(smu, false); in smu_enable_umd_pstate()
1813 smu_gfx_ulv_control(smu, false); in smu_enable_umd_pstate()
1814 smu_deep_sleep_control(smu, false); in smu_enable_umd_pstate()
1815 amdgpu_asic_update_umd_stable_pstate(smu->adev, true); in smu_enable_umd_pstate()
1822 amdgpu_asic_update_umd_stable_pstate(smu->adev, false); in smu_enable_umd_pstate()
1823 smu_deep_sleep_control(smu, true); in smu_enable_umd_pstate()
1824 smu_gfx_ulv_control(smu, true); in smu_enable_umd_pstate()
1825 smu_gpo_control(smu, true); in smu_enable_umd_pstate()
1832 static int smu_bump_power_profile_mode(struct smu_context *smu, in smu_bump_power_profile_mode() argument
1838 if (smu->ppt_funcs->set_power_profile_mode) in smu_bump_power_profile_mode()
1839 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); in smu_bump_power_profile_mode()
1844 static int smu_adjust_power_state_dynamic(struct smu_context *smu, in smu_adjust_power_state_dynamic() argument
1852 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_adjust_power_state_dynamic()
1855 ret = smu_display_config_changed(smu); in smu_adjust_power_state_dynamic()
1857 dev_err(smu->adev->dev, "Failed to change display config!"); in smu_adjust_power_state_dynamic()
1862 ret = smu_apply_clocks_adjust_rules(smu); in smu_adjust_power_state_dynamic()
1864 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); in smu_adjust_power_state_dynamic()
1869 ret = smu_notify_smc_display_config(smu); in smu_adjust_power_state_dynamic()
1871 dev_err(smu->adev->dev, "Failed to notify smc display config!"); in smu_adjust_power_state_dynamic()
1877 ret = smu_asic_set_performance_level(smu, level); in smu_adjust_power_state_dynamic()
1879 dev_err(smu->adev->dev, "Failed to set performance level!"); in smu_adjust_power_state_dynamic()
1889 index = fls(smu->workload_mask); in smu_adjust_power_state_dynamic()
1891 workload[0] = smu->workload_setting[index]; in smu_adjust_power_state_dynamic()
1893 if (init || smu->power_profile_mode != workload[0]) in smu_adjust_power_state_dynamic()
1894 smu_bump_power_profile_mode(smu, workload, 0); in smu_adjust_power_state_dynamic()
1900 static int smu_handle_task(struct smu_context *smu, in smu_handle_task() argument
1906 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_handle_task()
1911 ret = smu_pre_display_config_changed(smu); in smu_handle_task()
1914 ret = smu_adjust_power_state_dynamic(smu, level, false, false); in smu_handle_task()
1917 ret = smu_adjust_power_state_dynamic(smu, level, true, true); in smu_handle_task()
1920 ret = smu_adjust_power_state_dynamic(smu, level, true, false); in smu_handle_task()
1933 struct smu_context *smu = handle; in smu_handle_dpm_task() local
1934 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; in smu_handle_dpm_task()
1936 return smu_handle_task(smu, smu_dpm->dpm_level, task_id); in smu_handle_dpm_task()
1944 struct smu_context *smu = handle; in smu_switch_power_profile() local
1945 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_switch_power_profile()
1949 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_switch_power_profile()
1956 smu->workload_mask &= ~(1 << smu->workload_prority[type]); in smu_switch_power_profile()
1957 index = fls(smu->workload_mask); in smu_switch_power_profile()
1959 workload[0] = smu->workload_setting[index]; in smu_switch_power_profile()
1961 smu->workload_mask |= (1 << smu->workload_prority[type]); in smu_switch_power_profile()
1962 index = fls(smu->workload_mask); in smu_switch_power_profile()
1964 workload[0] = smu->workload_setting[index]; in smu_switch_power_profile()
1969 smu_bump_power_profile_mode(smu, workload, 0); in smu_switch_power_profile()
1976 struct smu_context *smu = handle; in smu_get_performance_level() local
1977 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_get_performance_level()
1979 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_performance_level()
1982 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) in smu_get_performance_level()
1991 struct smu_context *smu = handle; in smu_force_performance_level() local
1992 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_force_performance_level()
1995 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_force_performance_level()
1998 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) in smu_force_performance_level()
2001 ret = smu_enable_umd_pstate(smu, &level); in smu_force_performance_level()
2005 ret = smu_handle_task(smu, level, in smu_force_performance_level()
2010 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask)); in smu_force_performance_level()
2011 smu->user_dpm_profile.clk_dependency = 0; in smu_force_performance_level()
2019 struct smu_context *smu = handle; in smu_set_display_count() local
2021 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_display_count()
2024 return smu_init_display_count(smu, count); in smu_set_display_count()
2027 static int smu_force_smuclk_levels(struct smu_context *smu, in smu_force_smuclk_levels() argument
2031 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_force_smuclk_levels()
2034 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_force_smuclk_levels()
2038 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); in smu_force_smuclk_levels()
2042 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { in smu_force_smuclk_levels()
2043 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); in smu_force_smuclk_levels()
2044 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_force_smuclk_levels()
2045 smu->user_dpm_profile.clk_mask[clk_type] = mask; in smu_force_smuclk_levels()
2046 smu_set_user_clk_dependencies(smu, clk_type); in smu_force_smuclk_levels()
2057 struct smu_context *smu = handle; in smu_force_ppclk_levels() local
2093 return smu_force_smuclk_levels(smu, clk_type, mask); in smu_force_ppclk_levels()
2098 * flag will be cleared. So that those SMU services which
2106 struct smu_context *smu = handle; in smu_set_mp1_state() local
2109 if (!smu->pm_enabled) in smu_set_mp1_state()
2112 if (smu->ppt_funcs && in smu_set_mp1_state()
2113 smu->ppt_funcs->set_mp1_state) in smu_set_mp1_state()
2114 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); in smu_set_mp1_state()
2122 struct smu_context *smu = handle; in smu_set_df_cstate() local
2125 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_df_cstate()
2128 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) in smu_set_df_cstate()
2131 ret = smu->ppt_funcs->set_df_cstate(smu, state); in smu_set_df_cstate()
2133 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); in smu_set_df_cstate()
2138 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en) in smu_allow_xgmi_power_down() argument
2142 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_allow_xgmi_power_down()
2145 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down) in smu_allow_xgmi_power_down()
2148 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en); in smu_allow_xgmi_power_down()
2150 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n"); in smu_allow_xgmi_power_down()
2155 int smu_write_watermarks_table(struct smu_context *smu) in smu_write_watermarks_table() argument
2157 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_write_watermarks_table()
2160 return smu_set_watermarks_table(smu, NULL); in smu_write_watermarks_table()
2166 struct smu_context *smu = handle; in smu_set_watermarks_for_clock_ranges() local
2168 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_watermarks_for_clock_ranges()
2171 if (smu->disable_watermark) in smu_set_watermarks_for_clock_ranges()
2174 return smu_set_watermarks_table(smu, clock_ranges); in smu_set_watermarks_for_clock_ranges()
2177 int smu_set_ac_dc(struct smu_context *smu) in smu_set_ac_dc() argument
2181 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_ac_dc()
2185 if (smu->dc_controlled_by_gpio) in smu_set_ac_dc()
2188 ret = smu_set_power_source(smu, in smu_set_ac_dc()
2189 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : in smu_set_ac_dc()
2192 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", in smu_set_ac_dc()
2193 smu->adev->pm.ac_power ? "AC" : "DC"); in smu_set_ac_dc()
2199 .name = "smu",
2243 struct smu_context *smu = handle; in smu_load_microcode() local
2244 struct amdgpu_device *adev = smu->adev; in smu_load_microcode()
2247 if (!smu->pm_enabled) in smu_load_microcode()
2254 if (smu->ppt_funcs->load_microcode) { in smu_load_microcode()
2255 ret = smu->ppt_funcs->load_microcode(smu); in smu_load_microcode()
2262 if (smu->ppt_funcs->check_fw_status) { in smu_load_microcode()
2263 ret = smu->ppt_funcs->check_fw_status(smu); in smu_load_microcode()
2273 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) in smu_set_gfx_cgpg() argument
2277 if (smu->ppt_funcs->set_gfx_cgpg) in smu_set_gfx_cgpg()
2278 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); in smu_set_gfx_cgpg()
2285 struct smu_context *smu = handle; in smu_set_fan_speed_rpm() local
2288 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_fan_speed_rpm()
2291 if (!smu->ppt_funcs->set_fan_speed_rpm) in smu_set_fan_speed_rpm()
2297 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); in smu_set_fan_speed_rpm()
2298 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_set_fan_speed_rpm()
2299 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; in smu_set_fan_speed_rpm()
2300 smu->user_dpm_profile.fan_speed_rpm = speed; in smu_set_fan_speed_rpm()
2303 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM; in smu_set_fan_speed_rpm()
2304 smu->user_dpm_profile.fan_speed_pwm = 0; in smu_set_fan_speed_rpm()
2311 * smu_get_power_limit - Request one of the SMU Power Limits
2313 * @handle: pointer to smu context
2325 struct smu_context *smu = handle; in smu_get_power_limit() local
2326 struct amdgpu_device *adev = smu->adev; in smu_get_power_limit()
2331 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_power_limit()
2363 if (smu->ppt_funcs->get_ppt_limit) in smu_get_power_limit()
2364 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); in smu_get_power_limit()
2374 ret = smu_get_asic_power_limits(smu, in smu_get_power_limit()
2375 &smu->current_power_limit, in smu_get_power_limit()
2382 *limit = smu->current_power_limit; in smu_get_power_limit()
2385 *limit = smu->default_power_limit; in smu_get_power_limit()
2388 *limit = smu->max_power_limit; in smu_get_power_limit()
2400 struct smu_context *smu = handle; in smu_set_power_limit() local
2404 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_power_limit()
2409 if (smu->ppt_funcs->set_power_limit) in smu_set_power_limit()
2410 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); in smu_set_power_limit()
2412 if (limit > smu->max_power_limit) { in smu_set_power_limit()
2413 dev_err(smu->adev->dev, in smu_set_power_limit()
2415 limit, smu->max_power_limit); in smu_set_power_limit()
2420 limit = smu->current_power_limit; in smu_set_power_limit()
2422 if (smu->ppt_funcs->set_power_limit) { in smu_set_power_limit()
2423 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); in smu_set_power_limit()
2424 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) in smu_set_power_limit()
2425 smu->user_dpm_profile.power_limit = limit; in smu_set_power_limit()
2431 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) in smu_print_smuclk_levels() argument
2435 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_print_smuclk_levels()
2438 if (smu->ppt_funcs->print_clk_levels) in smu_print_smuclk_levels()
2439 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); in smu_print_smuclk_levels()
2492 struct smu_context *smu = handle; in smu_print_ppclk_levels() local
2499 return smu_print_smuclk_levels(smu, clk_type, buf); in smu_print_ppclk_levels()
2504 struct smu_context *smu = handle; in smu_emit_ppclk_levels() local
2511 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_emit_ppclk_levels()
2514 if (!smu->ppt_funcs->emit_clk_levels) in smu_emit_ppclk_levels()
2517 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset); in smu_emit_ppclk_levels()
2525 struct smu_context *smu = handle; in smu_od_edit_dpm_table() local
2528 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_od_edit_dpm_table()
2531 if (smu->ppt_funcs->od_edit_dpm_table) { in smu_od_edit_dpm_table()
2532 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); in smu_od_edit_dpm_table()
2543 struct smu_context *smu = handle; in smu_read_sensor() local
2545 &smu->pstate_table; in smu_read_sensor()
2549 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_read_sensor()
2558 if (smu->ppt_funcs->read_sensor) in smu_read_sensor()
2559 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) in smu_read_sensor()
2580 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); in smu_read_sensor()
2584 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; in smu_read_sensor()
2588 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; in smu_read_sensor()
2592 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1; in smu_read_sensor()
2615 struct smu_context *smu = handle; in smu_get_apu_thermal_limit() local
2617 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit) in smu_get_apu_thermal_limit()
2618 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit); in smu_get_apu_thermal_limit()
2626 struct smu_context *smu = handle; in smu_set_apu_thermal_limit() local
2628 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit) in smu_set_apu_thermal_limit()
2629 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit); in smu_set_apu_thermal_limit()
2636 struct smu_context *smu = handle; in smu_get_power_profile_mode() local
2638 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || in smu_get_power_profile_mode()
2639 !smu->ppt_funcs->get_power_profile_mode) in smu_get_power_profile_mode()
2644 return smu->ppt_funcs->get_power_profile_mode(smu, buf); in smu_get_power_profile_mode()
2651 struct smu_context *smu = handle; in smu_set_power_profile_mode() local
2653 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || in smu_set_power_profile_mode()
2654 !smu->ppt_funcs->set_power_profile_mode) in smu_set_power_profile_mode()
2657 return smu_bump_power_profile_mode(smu, param, param_size); in smu_set_power_profile_mode()
2662 struct smu_context *smu = handle; in smu_get_fan_control_mode() local
2664 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_fan_control_mode()
2667 if (!smu->ppt_funcs->get_fan_control_mode) in smu_get_fan_control_mode()
2673 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu); in smu_get_fan_control_mode()
2680 struct smu_context *smu = handle; in smu_set_fan_control_mode() local
2683 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_fan_control_mode()
2686 if (!smu->ppt_funcs->set_fan_control_mode) in smu_set_fan_control_mode()
2692 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); in smu_set_fan_control_mode()
2696 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_set_fan_control_mode()
2697 smu->user_dpm_profile.fan_mode = value; in smu_set_fan_control_mode()
2701 smu->user_dpm_profile.fan_speed_pwm = 0; in smu_set_fan_control_mode()
2702 smu->user_dpm_profile.fan_speed_rpm = 0; in smu_set_fan_control_mode()
2703 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM); in smu_set_fan_control_mode()
2713 struct smu_context *smu = handle; in smu_get_fan_speed_pwm() local
2716 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_fan_speed_pwm()
2719 if (!smu->ppt_funcs->get_fan_speed_pwm) in smu_get_fan_speed_pwm()
2725 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); in smu_get_fan_speed_pwm()
2732 struct smu_context *smu = handle; in smu_set_fan_speed_pwm() local
2735 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_fan_speed_pwm()
2738 if (!smu->ppt_funcs->set_fan_speed_pwm) in smu_set_fan_speed_pwm()
2744 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); in smu_set_fan_speed_pwm()
2745 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_set_fan_speed_pwm()
2746 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; in smu_set_fan_speed_pwm()
2747 smu->user_dpm_profile.fan_speed_pwm = speed; in smu_set_fan_speed_pwm()
2750 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM; in smu_set_fan_speed_pwm()
2751 smu->user_dpm_profile.fan_speed_rpm = 0; in smu_set_fan_speed_pwm()
2759 struct smu_context *smu = handle; in smu_get_fan_speed_rpm() local
2762 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_fan_speed_rpm()
2765 if (!smu->ppt_funcs->get_fan_speed_rpm) in smu_get_fan_speed_rpm()
2771 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); in smu_get_fan_speed_rpm()
2778 struct smu_context *smu = handle; in smu_set_deep_sleep_dcefclk() local
2780 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_deep_sleep_dcefclk()
2783 return smu_set_min_dcef_deep_sleep(smu, clk); in smu_set_deep_sleep_dcefclk()
2790 struct smu_context *smu = handle; in smu_get_clock_by_type_with_latency() local
2794 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_clock_by_type_with_latency()
2797 if (smu->ppt_funcs->get_clock_by_type_with_latency) { in smu_get_clock_by_type_with_latency()
2812 dev_err(smu->adev->dev, "Invalid clock type!\n"); in smu_get_clock_by_type_with_latency()
2816 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); in smu_get_clock_by_type_with_latency()
2825 struct smu_context *smu = handle; in smu_display_clock_voltage_request() local
2828 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_display_clock_voltage_request()
2831 if (smu->ppt_funcs->display_clock_voltage_request) in smu_display_clock_voltage_request()
2832 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); in smu_display_clock_voltage_request()
2841 struct smu_context *smu = handle; in smu_display_disable_memory_clock_switch() local
2844 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_display_disable_memory_clock_switch()
2847 if (smu->ppt_funcs->display_disable_memory_clock_switch) in smu_display_disable_memory_clock_switch()
2848 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); in smu_display_disable_memory_clock_switch()
2856 struct smu_context *smu = handle; in smu_set_xgmi_pstate() local
2859 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_xgmi_pstate()
2862 if (smu->ppt_funcs->set_xgmi_pstate) in smu_set_xgmi_pstate()
2863 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); in smu_set_xgmi_pstate()
2866 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); in smu_set_xgmi_pstate()
2873 struct smu_context *smu = handle; in smu_get_baco_capability() local
2877 if (!smu->pm_enabled) in smu_get_baco_capability()
2880 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) in smu_get_baco_capability()
2881 *cap = smu->ppt_funcs->baco_is_support(smu); in smu_get_baco_capability()
2888 struct smu_context *smu = handle; in smu_baco_set_state() local
2891 if (!smu->pm_enabled) in smu_baco_set_state()
2895 if (smu->ppt_funcs->baco_exit) in smu_baco_set_state()
2896 ret = smu->ppt_funcs->baco_exit(smu); in smu_baco_set_state()
2898 if (smu->ppt_funcs->baco_enter) in smu_baco_set_state()
2899 ret = smu->ppt_funcs->baco_enter(smu); in smu_baco_set_state()
2905 dev_err(smu->adev->dev, "Failed to %s BACO state!\n", in smu_baco_set_state()
2911 bool smu_mode1_reset_is_support(struct smu_context *smu) in smu_mode1_reset_is_support() argument
2915 if (!smu->pm_enabled) in smu_mode1_reset_is_support()
2918 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) in smu_mode1_reset_is_support()
2919 ret = smu->ppt_funcs->mode1_reset_is_support(smu); in smu_mode1_reset_is_support()
2924 bool smu_mode2_reset_is_support(struct smu_context *smu) in smu_mode2_reset_is_support() argument
2928 if (!smu->pm_enabled) in smu_mode2_reset_is_support()
2931 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) in smu_mode2_reset_is_support()
2932 ret = smu->ppt_funcs->mode2_reset_is_support(smu); in smu_mode2_reset_is_support()
2937 int smu_mode1_reset(struct smu_context *smu) in smu_mode1_reset() argument
2941 if (!smu->pm_enabled) in smu_mode1_reset()
2944 if (smu->ppt_funcs->mode1_reset) in smu_mode1_reset()
2945 ret = smu->ppt_funcs->mode1_reset(smu); in smu_mode1_reset()
2952 struct smu_context *smu = handle; in smu_mode2_reset() local
2955 if (!smu->pm_enabled) in smu_mode2_reset()
2958 if (smu->ppt_funcs->mode2_reset) in smu_mode2_reset()
2959 ret = smu->ppt_funcs->mode2_reset(smu); in smu_mode2_reset()
2962 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); in smu_mode2_reset()
2969 struct smu_context *smu = handle; in smu_enable_gfx_features() local
2972 if (!smu->pm_enabled) in smu_enable_gfx_features()
2975 if (smu->ppt_funcs->enable_gfx_features) in smu_enable_gfx_features()
2976 ret = smu->ppt_funcs->enable_gfx_features(smu); in smu_enable_gfx_features()
2979 dev_err(smu->adev->dev, "enable gfx features failed!\n"); in smu_enable_gfx_features()
2987 struct smu_context *smu = handle; in smu_get_max_sustainable_clocks_by_dc() local
2990 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_max_sustainable_clocks_by_dc()
2993 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) in smu_get_max_sustainable_clocks_by_dc()
2994 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); in smu_get_max_sustainable_clocks_by_dc()
3003 struct smu_context *smu = handle; in smu_get_uclk_dpm_states() local
3006 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_uclk_dpm_states()
3009 if (smu->ppt_funcs->get_uclk_dpm_states) in smu_get_uclk_dpm_states()
3010 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); in smu_get_uclk_dpm_states()
3017 struct smu_context *smu = handle; in smu_get_current_power_state() local
3020 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_current_power_state()
3023 if (smu->ppt_funcs->get_current_power_state) in smu_get_current_power_state()
3024 pm_state = smu->ppt_funcs->get_current_power_state(smu); in smu_get_current_power_state()
3032 struct smu_context *smu = handle; in smu_get_dpm_clock_table() local
3035 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_dpm_clock_table()
3038 if (smu->ppt_funcs->get_dpm_clock_table) in smu_get_dpm_clock_table()
3039 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); in smu_get_dpm_clock_table()
3046 struct smu_context *smu = handle; in smu_sys_get_gpu_metrics() local
3048 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_get_gpu_metrics()
3051 if (!smu->ppt_funcs->get_gpu_metrics) in smu_sys_get_gpu_metrics()
3054 return smu->ppt_funcs->get_gpu_metrics(smu, table); in smu_sys_get_gpu_metrics()
3059 struct smu_context *smu = handle; in smu_enable_mgpu_fan_boost() local
3062 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_enable_mgpu_fan_boost()
3065 if (smu->ppt_funcs->enable_mgpu_fan_boost) in smu_enable_mgpu_fan_boost()
3066 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); in smu_enable_mgpu_fan_boost()
3074 struct smu_context *smu = handle; in smu_gfx_state_change_set() local
3077 if (smu->ppt_funcs->gfx_state_change_set) in smu_gfx_state_change_set()
3078 ret = smu->ppt_funcs->gfx_state_change_set(smu, state); in smu_gfx_state_change_set()
3083 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) in smu_handle_passthrough_sbr() argument
3087 if (smu->ppt_funcs->smu_handle_passthrough_sbr) in smu_handle_passthrough_sbr()
3088 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable); in smu_handle_passthrough_sbr()
3093 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) in smu_get_ecc_info() argument
3097 if (smu->ppt_funcs && in smu_get_ecc_info()
3098 smu->ppt_funcs->get_ecc_info) in smu_get_ecc_info()
3099 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); in smu_get_ecc_info()
3107 struct smu_context *smu = handle; in smu_get_prv_buffer_details() local
3108 struct smu_table_context *smu_table = &smu->smu_table; in smu_get_prv_buffer_details()
3182 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, in smu_wait_for_event() argument
3187 if (smu->ppt_funcs->wait_for_event) in smu_wait_for_event()
3188 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); in smu_wait_for_event()
3193 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) in smu_stb_collect_info() argument
3196 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled) in smu_stb_collect_info()
3200 if (size != smu->stb_context.stb_buf_size) in smu_stb_collect_info()
3204 * No need to lock smu mutex as we access STB directly through MMIO in smu_stb_collect_info()
3205 * and not going through SMU messaging route (for now at least). in smu_stb_collect_info()
3208 return smu->ppt_funcs->stb_collect_info(smu, buf, size); in smu_stb_collect_info()
3216 struct smu_context *smu = adev->powerplay.pp_handle; in smu_stb_debugfs_open() local
3220 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL); in smu_stb_debugfs_open()
3224 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size); in smu_stb_debugfs_open()
3241 struct smu_context *smu = adev->powerplay.pp_handle; in smu_stb_debugfs_read() local
3250 smu->stb_context.stb_buf_size); in smu_stb_debugfs_read()
3282 struct smu_context *smu = adev->powerplay.pp_handle; in amdgpu_smu_stb_debug_fs_init() local
3284 if (!smu || (!smu->stb_context.stb_buf_size)) in amdgpu_smu_stb_debug_fs_init()
3292 smu->stb_context.stb_buf_size); in amdgpu_smu_stb_debug_fs_init()
3296 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size) in smu_send_hbm_bad_pages_num() argument
3300 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num) in smu_send_hbm_bad_pages_num()
3301 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size); in smu_send_hbm_bad_pages_num()
3306 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size) in smu_send_hbm_bad_channel_flag() argument
3310 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag) in smu_send_hbm_bad_channel_flag()
3311 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size); in smu_send_hbm_bad_channel_flag()