Lines Matching refs:smu
60 static int smu_force_smuclk_levels(struct smu_context *smu,
63 static int smu_handle_task(struct smu_context *smu,
66 static int smu_reset(struct smu_context *smu);
71 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
77 struct smu_context *smu = handle; in smu_sys_get_pp_feature_mask() local
79 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_get_pp_feature_mask()
82 return smu_get_pp_feature_mask(smu, buf); in smu_sys_get_pp_feature_mask()
88 struct smu_context *smu = handle; in smu_sys_set_pp_feature_mask() local
90 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_set_pp_feature_mask()
93 return smu_set_pp_feature_mask(smu, new_mask); in smu_sys_set_pp_feature_mask()
96 int smu_set_residency_gfxoff(struct smu_context *smu, bool value) in smu_set_residency_gfxoff() argument
98 if (!smu->ppt_funcs->set_gfx_off_residency) in smu_set_residency_gfxoff()
101 return smu_set_gfx_off_residency(smu, value); in smu_set_residency_gfxoff()
104 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value) in smu_get_residency_gfxoff() argument
106 if (!smu->ppt_funcs->get_gfx_off_residency) in smu_get_residency_gfxoff()
109 return smu_get_gfx_off_residency(smu, value); in smu_get_residency_gfxoff()
112 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value) in smu_get_entrycount_gfxoff() argument
114 if (!smu->ppt_funcs->get_gfx_off_entrycount) in smu_get_entrycount_gfxoff()
117 return smu_get_gfx_off_entrycount(smu, value); in smu_get_entrycount_gfxoff()
120 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) in smu_get_status_gfxoff() argument
122 if (!smu->ppt_funcs->get_gfx_off_status) in smu_get_status_gfxoff()
125 *value = smu_get_gfx_off_status(smu); in smu_get_status_gfxoff()
130 int smu_set_soft_freq_range(struct smu_context *smu, in smu_set_soft_freq_range() argument
137 if (smu->ppt_funcs->set_soft_freq_limited_range) in smu_set_soft_freq_range()
138 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, in smu_set_soft_freq_range()
146 int smu_get_dpm_freq_range(struct smu_context *smu, in smu_get_dpm_freq_range() argument
156 if (smu->ppt_funcs->get_dpm_ultimate_freq) in smu_get_dpm_freq_range()
157 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, in smu_get_dpm_freq_range()
165 int smu_set_gfx_power_up_by_imu(struct smu_context *smu) in smu_set_gfx_power_up_by_imu() argument
168 struct amdgpu_device *adev = smu->adev; in smu_set_gfx_power_up_by_imu()
170 if (smu->ppt_funcs->set_gfx_power_up_by_imu) { in smu_set_gfx_power_up_by_imu()
171 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu); in smu_set_gfx_power_up_by_imu()
180 struct smu_context *smu = handle; in smu_get_mclk() local
184 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, in smu_get_mclk()
194 struct smu_context *smu = handle; in smu_get_sclk() local
198 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, in smu_get_sclk()
206 static int smu_set_gfx_imu_enable(struct smu_context *smu) in smu_set_gfx_imu_enable() argument
208 struct amdgpu_device *adev = smu->adev; in smu_set_gfx_imu_enable()
213 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix) in smu_set_gfx_imu_enable()
216 return smu_set_gfx_power_up_by_imu(smu); in smu_set_gfx_imu_enable()
219 static int smu_dpm_set_vcn_enable(struct smu_context *smu, in smu_dpm_set_vcn_enable() argument
222 struct smu_power_context *smu_power = &smu->smu_power; in smu_dpm_set_vcn_enable()
226 if (!smu->ppt_funcs->dpm_set_vcn_enable) in smu_dpm_set_vcn_enable()
232 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); in smu_dpm_set_vcn_enable()
239 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, in smu_dpm_set_jpeg_enable() argument
242 struct smu_power_context *smu_power = &smu->smu_power; in smu_dpm_set_jpeg_enable()
246 if (!smu->ppt_funcs->dpm_set_jpeg_enable) in smu_dpm_set_jpeg_enable()
252 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); in smu_dpm_set_jpeg_enable()
277 struct smu_context *smu = handle; in smu_dpm_set_power_gate() local
280 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) { in smu_dpm_set_power_gate()
281 dev_WARN(smu->adev->dev, in smu_dpm_set_power_gate()
294 ret = smu_dpm_set_vcn_enable(smu, !gate); in smu_dpm_set_power_gate()
296 dev_err(smu->adev->dev, "Failed to power %s VCN!\n", in smu_dpm_set_power_gate()
300 ret = smu_gfx_off_control(smu, gate); in smu_dpm_set_power_gate()
302 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", in smu_dpm_set_power_gate()
306 ret = smu_powergate_sdma(smu, gate); in smu_dpm_set_power_gate()
308 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", in smu_dpm_set_power_gate()
312 ret = smu_dpm_set_jpeg_enable(smu, !gate); in smu_dpm_set_power_gate()
314 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", in smu_dpm_set_power_gate()
318 dev_err(smu->adev->dev, "Unsupported block type!\n"); in smu_dpm_set_power_gate()
333 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk) in smu_set_user_clk_dependencies() argument
335 if (smu->adev->in_suspend) in smu_set_user_clk_dependencies()
339 smu->user_dpm_profile.clk_dependency = 0; in smu_set_user_clk_dependencies()
340 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); in smu_set_user_clk_dependencies()
343 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) in smu_set_user_clk_dependencies()
346 smu->user_dpm_profile.clk_dependency = 0; in smu_set_user_clk_dependencies()
347 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); in smu_set_user_clk_dependencies()
350 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) in smu_set_user_clk_dependencies()
353 smu->user_dpm_profile.clk_dependency = 0; in smu_set_user_clk_dependencies()
354 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); in smu_set_user_clk_dependencies()
368 static void smu_restore_dpm_user_profile(struct smu_context *smu) in smu_restore_dpm_user_profile() argument
370 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_restore_dpm_user_profile()
373 if (!smu->adev->in_suspend) in smu_restore_dpm_user_profile()
376 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_restore_dpm_user_profile()
380 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; in smu_restore_dpm_user_profile()
383 if (smu->user_dpm_profile.power_limit) { in smu_restore_dpm_user_profile()
384 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); in smu_restore_dpm_user_profile()
386 dev_err(smu->adev->dev, "Failed to set power limit value\n"); in smu_restore_dpm_user_profile()
398 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && in smu_restore_dpm_user_profile()
399 smu->user_dpm_profile.clk_mask[clk_type]) { in smu_restore_dpm_user_profile()
400 ret = smu_force_smuclk_levels(smu, clk_type, in smu_restore_dpm_user_profile()
401 smu->user_dpm_profile.clk_mask[clk_type]); in smu_restore_dpm_user_profile()
403 dev_err(smu->adev->dev, in smu_restore_dpm_user_profile()
410 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL || in smu_restore_dpm_user_profile()
411 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) { in smu_restore_dpm_user_profile()
412 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); in smu_restore_dpm_user_profile()
414 smu->user_dpm_profile.fan_speed_pwm = 0; in smu_restore_dpm_user_profile()
415 smu->user_dpm_profile.fan_speed_rpm = 0; in smu_restore_dpm_user_profile()
416 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO; in smu_restore_dpm_user_profile()
417 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n"); in smu_restore_dpm_user_profile()
420 if (smu->user_dpm_profile.fan_speed_pwm) { in smu_restore_dpm_user_profile()
421 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm); in smu_restore_dpm_user_profile()
423 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n"); in smu_restore_dpm_user_profile()
426 if (smu->user_dpm_profile.fan_speed_rpm) { in smu_restore_dpm_user_profile()
427 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm); in smu_restore_dpm_user_profile()
429 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n"); in smu_restore_dpm_user_profile()
434 if (smu->user_dpm_profile.user_od) { in smu_restore_dpm_user_profile()
435 if (smu->ppt_funcs->restore_user_od_settings) { in smu_restore_dpm_user_profile()
436 ret = smu->ppt_funcs->restore_user_od_settings(smu); in smu_restore_dpm_user_profile()
438 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n"); in smu_restore_dpm_user_profile()
443 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; in smu_restore_dpm_user_profile()
474 struct smu_context *smu = adev->powerplay.pp_handle; in is_support_cclk_dpm() local
476 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) in is_support_cclk_dpm()
486 struct smu_context *smu = handle; in smu_sys_get_pp_table() local
487 struct smu_table_context *smu_table = &smu->smu_table; in smu_sys_get_pp_table()
489 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_get_pp_table()
507 struct smu_context *smu = handle; in smu_sys_set_pp_table() local
508 struct smu_table_context *smu_table = &smu->smu_table; in smu_sys_set_pp_table()
512 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_set_pp_table()
516 dev_err(smu->adev->dev, "pp table size not matched !\n"); in smu_sys_set_pp_table()
534 smu->uploading_custom_pp_table = true; in smu_sys_set_pp_table()
536 ret = smu_reset(smu); in smu_sys_set_pp_table()
538 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); in smu_sys_set_pp_table()
540 smu->uploading_custom_pp_table = false; in smu_sys_set_pp_table()
545 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) in smu_get_driver_allowed_feature_mask() argument
547 struct smu_feature *feature = &smu->smu_feature; in smu_get_driver_allowed_feature_mask()
558 if (smu->adev->scpm_enabled) { in smu_get_driver_allowed_feature_mask()
565 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, in smu_get_driver_allowed_feature_mask()
579 struct smu_context *smu = adev->powerplay.pp_handle; in smu_set_funcs() local
582 smu->od_enabled = true; in smu_set_funcs()
588 navi10_set_ppt_funcs(smu); in smu_set_funcs()
594 sienna_cichlid_set_ppt_funcs(smu); in smu_set_funcs()
598 renoir_set_ppt_funcs(smu); in smu_set_funcs()
601 vangogh_set_ppt_funcs(smu); in smu_set_funcs()
606 yellow_carp_set_ppt_funcs(smu); in smu_set_funcs()
610 smu_v13_0_4_set_ppt_funcs(smu); in smu_set_funcs()
613 smu_v13_0_5_set_ppt_funcs(smu); in smu_set_funcs()
616 cyan_skillfish_set_ppt_funcs(smu); in smu_set_funcs()
620 arcturus_set_ppt_funcs(smu); in smu_set_funcs()
622 smu->od_enabled = false; in smu_set_funcs()
625 aldebaran_set_ppt_funcs(smu); in smu_set_funcs()
627 smu->od_enabled = true; in smu_set_funcs()
631 smu_v13_0_0_set_ppt_funcs(smu); in smu_set_funcs()
634 smu_v13_0_6_set_ppt_funcs(smu); in smu_set_funcs()
636 smu->od_enabled = true; in smu_set_funcs()
639 smu_v13_0_7_set_ppt_funcs(smu); in smu_set_funcs()
651 struct smu_context *smu; in smu_early_init() local
654 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL); in smu_early_init()
655 if (!smu) in smu_early_init()
658 smu->adev = adev; in smu_early_init()
659 smu->pm_enabled = !!amdgpu_dpm; in smu_early_init()
660 smu->is_apu = false; in smu_early_init()
661 smu->smu_baco.state = SMU_BACO_STATE_EXIT; in smu_early_init()
662 smu->smu_baco.platform_support = false; in smu_early_init()
663 smu->user_dpm_profile.fan_mode = -1; in smu_early_init()
665 mutex_init(&smu->message_lock); in smu_early_init()
667 adev->powerplay.pp_handle = smu; in smu_early_init()
673 return smu_init_microcode(smu); in smu_early_init()
676 static int smu_set_default_dpm_table(struct smu_context *smu) in smu_set_default_dpm_table() argument
678 struct smu_power_context *smu_power = &smu->smu_power; in smu_set_default_dpm_table()
683 if (!smu->ppt_funcs->set_default_dpm_table) in smu_set_default_dpm_table()
689 ret = smu_dpm_set_vcn_enable(smu, true); in smu_set_default_dpm_table()
693 ret = smu_dpm_set_jpeg_enable(smu, true); in smu_set_default_dpm_table()
697 ret = smu->ppt_funcs->set_default_dpm_table(smu); in smu_set_default_dpm_table()
699 dev_err(smu->adev->dev, in smu_set_default_dpm_table()
702 smu_dpm_set_jpeg_enable(smu, !jpeg_gate); in smu_set_default_dpm_table()
704 smu_dpm_set_vcn_enable(smu, !vcn_gate); in smu_set_default_dpm_table()
708 static int smu_apply_default_config_table_settings(struct smu_context *smu) in smu_apply_default_config_table_settings() argument
710 struct amdgpu_device *adev = smu->adev; in smu_apply_default_config_table_settings()
713 ret = smu_get_default_config_table_settings(smu, in smu_apply_default_config_table_settings()
718 return smu_set_config_table(smu, &adev->pm.config_table); in smu_apply_default_config_table_settings()
724 struct smu_context *smu = adev->powerplay.pp_handle; in smu_late_init() local
727 smu_set_fine_grain_gfx_freq_parameters(smu); in smu_late_init()
729 if (!smu->pm_enabled) in smu_late_init()
732 ret = smu_post_init(smu); in smu_late_init()
746 smu_set_ac_dc(smu); in smu_late_init()
752 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { in smu_late_init()
753 ret = smu_set_default_od_settings(smu); in smu_late_init()
760 ret = smu_populate_umd_state_clk(smu); in smu_late_init()
766 ret = smu_get_asic_power_limits(smu, in smu_late_init()
767 &smu->current_power_limit, in smu_late_init()
768 &smu->default_power_limit, in smu_late_init()
769 &smu->max_power_limit); in smu_late_init()
776 smu_get_unique_id(smu); in smu_late_init()
778 smu_get_fan_parameters(smu); in smu_late_init()
780 smu_handle_task(smu, in smu_late_init()
781 smu->smu_dpm.dpm_level, in smu_late_init()
784 ret = smu_apply_default_config_table_settings(smu); in smu_late_init()
790 smu_restore_dpm_user_profile(smu); in smu_late_init()
795 static int smu_init_fb_allocations(struct smu_context *smu) in smu_init_fb_allocations() argument
797 struct amdgpu_device *adev = smu->adev; in smu_init_fb_allocations()
798 struct smu_table_context *smu_table = &smu->smu_table; in smu_init_fb_allocations()
861 static int smu_fini_fb_allocations(struct smu_context *smu) in smu_fini_fb_allocations() argument
863 struct smu_table_context *smu_table = &smu->smu_table; in smu_fini_fb_allocations()
889 static int smu_alloc_memory_pool(struct smu_context *smu) in smu_alloc_memory_pool() argument
891 struct amdgpu_device *adev = smu->adev; in smu_alloc_memory_pool()
892 struct smu_table_context *smu_table = &smu->smu_table; in smu_alloc_memory_pool()
894 uint64_t pool_size = smu->pool_size; in smu_alloc_memory_pool()
926 static int smu_free_memory_pool(struct smu_context *smu) in smu_free_memory_pool() argument
928 struct smu_table_context *smu_table = &smu->smu_table; in smu_free_memory_pool()
943 static int smu_alloc_dummy_read_table(struct smu_context *smu) in smu_alloc_dummy_read_table() argument
945 struct smu_table_context *smu_table = &smu->smu_table; in smu_alloc_dummy_read_table()
948 struct amdgpu_device *adev = smu->adev; in smu_alloc_dummy_read_table()
967 static void smu_free_dummy_read_table(struct smu_context *smu) in smu_free_dummy_read_table() argument
969 struct smu_table_context *smu_table = &smu->smu_table; in smu_free_dummy_read_table()
981 static int smu_smc_table_sw_init(struct smu_context *smu) in smu_smc_table_sw_init() argument
989 ret = smu_init_smc_tables(smu); in smu_smc_table_sw_init()
991 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); in smu_smc_table_sw_init()
999 ret = smu_init_power(smu); in smu_smc_table_sw_init()
1001 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); in smu_smc_table_sw_init()
1008 ret = smu_init_fb_allocations(smu); in smu_smc_table_sw_init()
1012 ret = smu_alloc_memory_pool(smu); in smu_smc_table_sw_init()
1016 ret = smu_alloc_dummy_read_table(smu); in smu_smc_table_sw_init()
1020 ret = smu_i2c_init(smu); in smu_smc_table_sw_init()
1027 static int smu_smc_table_sw_fini(struct smu_context *smu) in smu_smc_table_sw_fini() argument
1031 smu_i2c_fini(smu); in smu_smc_table_sw_fini()
1033 smu_free_dummy_read_table(smu); in smu_smc_table_sw_fini()
1035 ret = smu_free_memory_pool(smu); in smu_smc_table_sw_fini()
1039 ret = smu_fini_fb_allocations(smu); in smu_smc_table_sw_fini()
1043 ret = smu_fini_power(smu); in smu_smc_table_sw_fini()
1045 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); in smu_smc_table_sw_fini()
1049 ret = smu_fini_smc_tables(smu); in smu_smc_table_sw_fini()
1051 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); in smu_smc_table_sw_fini()
1060 struct smu_context *smu = container_of(work, struct smu_context, in smu_throttling_logging_work_fn() local
1063 smu_log_thermal_throttling(smu); in smu_throttling_logging_work_fn()
1068 struct smu_context *smu = container_of(work, struct smu_context, in smu_interrupt_work_fn() local
1071 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) in smu_interrupt_work_fn()
1072 smu->ppt_funcs->interrupt_work(smu); in smu_interrupt_work_fn()
1077 struct smu_context *smu = in smu_swctf_delayed_work_handler() local
1080 &smu->thermal_range; in smu_swctf_delayed_work_handler()
1081 struct amdgpu_device *adev = smu->adev; in smu_swctf_delayed_work_handler()
1090 smu->ppt_funcs->read_sensor && in smu_swctf_delayed_work_handler()
1091 !smu->ppt_funcs->read_sensor(smu, in smu_swctf_delayed_work_handler()
1106 struct smu_context *smu = adev->powerplay.pp_handle; in smu_sw_init() local
1109 smu->pool_size = adev->pm.smu_prv_buffer_size; in smu_sw_init()
1110 smu->smu_feature.feature_num = SMU_FEATURE_MAX; in smu_sw_init()
1111 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); in smu_sw_init()
1112 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); in smu_sw_init()
1114 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); in smu_sw_init()
1115 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); in smu_sw_init()
1116 atomic64_set(&smu->throttle_int_counter, 0); in smu_sw_init()
1117 smu->watermarks_bitmap = 0; in smu_sw_init()
1118 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; in smu_sw_init()
1119 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; in smu_sw_init()
1121 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); in smu_sw_init()
1122 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); in smu_sw_init()
1124 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; in smu_sw_init()
1125 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; in smu_sw_init()
1126 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; in smu_sw_init()
1127 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; in smu_sw_init()
1128 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; in smu_sw_init()
1129 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; in smu_sw_init()
1130 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; in smu_sw_init()
1131 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; in smu_sw_init()
1133 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; in smu_sw_init()
1134 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; in smu_sw_init()
1135 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; in smu_sw_init()
1136 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; in smu_sw_init()
1137 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; in smu_sw_init()
1138 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; in smu_sw_init()
1139 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; in smu_sw_init()
1140 smu->display_config = &adev->pm.pm_display_cfg; in smu_sw_init()
1142 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; in smu_sw_init()
1143 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; in smu_sw_init()
1145 INIT_DELAYED_WORK(&smu->swctf_delayed_work, in smu_sw_init()
1148 ret = smu_smc_table_sw_init(smu); in smu_sw_init()
1155 ret = smu_get_vbios_bootup_values(smu); in smu_sw_init()
1161 ret = smu_init_pptable_microcode(smu); in smu_sw_init()
1167 ret = smu_register_irq_handler(smu); in smu_sw_init()
1174 if (!smu->ppt_funcs->get_fan_control_mode) in smu_sw_init()
1175 smu->adev->pm.no_fan = true; in smu_sw_init()
1183 struct smu_context *smu = adev->powerplay.pp_handle; in smu_sw_fini() local
1186 ret = smu_smc_table_sw_fini(smu); in smu_sw_fini()
1192 smu_fini_microcode(smu); in smu_sw_fini()
1197 static int smu_get_thermal_temperature_range(struct smu_context *smu) in smu_get_thermal_temperature_range() argument
1199 struct amdgpu_device *adev = smu->adev; in smu_get_thermal_temperature_range()
1201 &smu->thermal_range; in smu_get_thermal_temperature_range()
1204 if (!smu->ppt_funcs->get_thermal_temperature_range) in smu_get_thermal_temperature_range()
1207 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); in smu_get_thermal_temperature_range()
1224 static int smu_smc_hw_setup(struct smu_context *smu) in smu_smc_hw_setup() argument
1226 struct smu_feature *feature = &smu->smu_feature; in smu_smc_hw_setup()
1227 struct amdgpu_device *adev = smu->adev; in smu_smc_hw_setup()
1237 if (adev->in_suspend && smu_is_dpm_running(smu)) { in smu_smc_hw_setup()
1239 ret = smu_system_features_control(smu, true); in smu_smc_hw_setup()
1249 ret = smu_init_display_count(smu, 0); in smu_smc_hw_setup()
1255 ret = smu_set_driver_table_location(smu); in smu_smc_hw_setup()
1264 ret = smu_set_tool_table_location(smu); in smu_smc_hw_setup()
1274 ret = smu_notify_memory_pool_location(smu); in smu_smc_hw_setup()
1286 ret = smu_setup_pptable(smu); in smu_smc_hw_setup()
1304 ret = smu_write_pptable(smu); in smu_smc_hw_setup()
1312 ret = smu_run_btc(smu); in smu_smc_hw_setup()
1321 ret = smu_feature_set_allowed_mask(smu); in smu_smc_hw_setup()
1328 ret = smu_system_features_control(smu, true); in smu_smc_hw_setup()
1334 ret = smu_feature_get_enabled_mask(smu, &features_supported); in smu_smc_hw_setup()
1343 if (!smu_is_dpm_running(smu)) in smu_smc_hw_setup()
1351 ret = smu_set_default_dpm_table(smu); in smu_smc_hw_setup()
1382 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); in smu_smc_hw_setup()
1388 ret = smu_get_thermal_temperature_range(smu); in smu_smc_hw_setup()
1394 ret = smu_enable_thermal_alert(smu); in smu_smc_hw_setup()
1400 ret = smu_notify_display_change(smu); in smu_smc_hw_setup()
1410 ret = smu_set_min_dcef_deep_sleep(smu, in smu_smc_hw_setup()
1411 smu->smu_table.boot_values.dcefclk / 100); in smu_smc_hw_setup()
1416 static int smu_start_smc_engine(struct smu_context *smu) in smu_start_smc_engine() argument
1418 struct amdgpu_device *adev = smu->adev; in smu_start_smc_engine()
1423 if (smu->ppt_funcs->load_microcode) { in smu_start_smc_engine()
1424 ret = smu->ppt_funcs->load_microcode(smu); in smu_start_smc_engine()
1431 if (smu->ppt_funcs->check_fw_status) { in smu_start_smc_engine()
1432 ret = smu->ppt_funcs->check_fw_status(smu); in smu_start_smc_engine()
1443 ret = smu_check_fw_version(smu); in smu_start_smc_engine()
1454 struct smu_context *smu = adev->powerplay.pp_handle; in smu_hw_init() local
1457 smu->pm_enabled = false; in smu_hw_init()
1461 ret = smu_start_smc_engine(smu); in smu_hw_init()
1467 if (smu->is_apu) { in smu_hw_init()
1468 ret = smu_set_gfx_imu_enable(smu); in smu_hw_init()
1471 smu_dpm_set_vcn_enable(smu, true); in smu_hw_init()
1472 smu_dpm_set_jpeg_enable(smu, true); in smu_hw_init()
1473 smu_set_gfx_cgpg(smu, true); in smu_hw_init()
1476 if (!smu->pm_enabled) in smu_hw_init()
1479 ret = smu_get_driver_allowed_feature_mask(smu); in smu_hw_init()
1483 ret = smu_smc_hw_setup(smu); in smu_hw_init()
1496 ret = smu_init_max_sustainable_clocks(smu); in smu_hw_init()
1509 static int smu_disable_dpms(struct smu_context *smu) in smu_disable_dpms() argument
1511 struct amdgpu_device *adev = smu->adev; in smu_disable_dpms()
1513 bool use_baco = !smu->is_apu && in smu_disable_dpms()
1542 if (smu->uploading_custom_pp_table) { in smu_disable_dpms()
1593 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { in smu_disable_dpms()
1594 ret = smu_disable_all_features_with_exception(smu, in smu_disable_dpms()
1601 ret = smu_system_features_control(smu, false); in smu_disable_dpms()
1614 static int smu_smc_hw_cleanup(struct smu_context *smu) in smu_smc_hw_cleanup() argument
1616 struct amdgpu_device *adev = smu->adev; in smu_smc_hw_cleanup()
1619 cancel_work_sync(&smu->throttling_logging_work); in smu_smc_hw_cleanup()
1620 cancel_work_sync(&smu->interrupt_work); in smu_smc_hw_cleanup()
1622 ret = smu_disable_thermal_alert(smu); in smu_smc_hw_cleanup()
1628 cancel_delayed_work_sync(&smu->swctf_delayed_work); in smu_smc_hw_cleanup()
1630 ret = smu_disable_dpms(smu); in smu_smc_hw_cleanup()
1642 struct smu_context *smu = adev->powerplay.pp_handle; in smu_hw_fini() local
1647 smu_dpm_set_vcn_enable(smu, false); in smu_hw_fini()
1648 smu_dpm_set_jpeg_enable(smu, false); in smu_hw_fini()
1653 if (!smu->pm_enabled) in smu_hw_fini()
1658 return smu_smc_hw_cleanup(smu); in smu_hw_fini()
1664 struct smu_context *smu = adev->powerplay.pp_handle; in smu_late_fini() local
1666 kfree(smu); in smu_late_fini()
1669 static int smu_reset(struct smu_context *smu) in smu_reset() argument
1671 struct amdgpu_device *adev = smu->adev; in smu_reset()
1692 struct smu_context *smu = adev->powerplay.pp_handle; in smu_suspend() local
1699 if (!smu->pm_enabled) in smu_suspend()
1704 ret = smu_smc_hw_cleanup(smu); in smu_suspend()
1708 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); in smu_suspend()
1710 smu_set_gfx_cgpg(smu, false); in smu_suspend()
1716 ret = smu_get_entrycount_gfxoff(smu, &count); in smu_suspend()
1727 struct smu_context *smu = adev->powerplay.pp_handle; in smu_resume() local
1732 if (!smu->pm_enabled) in smu_resume()
1737 ret = smu_start_smc_engine(smu); in smu_resume()
1743 ret = smu_smc_hw_setup(smu); in smu_resume()
1749 ret = smu_set_gfx_imu_enable(smu); in smu_resume()
1753 smu_set_gfx_cgpg(smu, true); in smu_resume()
1755 smu->disable_uclk_switch = 0; in smu_resume()
1767 struct smu_context *smu = handle; in smu_display_configuration_change() local
1769 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_display_configuration_change()
1775 smu_set_min_dcef_deep_sleep(smu, in smu_display_configuration_change()
1801 struct smu_context *smu = (struct smu_context*)(handle); in smu_enable_umd_pstate() local
1802 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_enable_umd_pstate()
1804 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) in smu_enable_umd_pstate()
1811 smu_gpo_control(smu, false); in smu_enable_umd_pstate()
1812 smu_gfx_ulv_control(smu, false); in smu_enable_umd_pstate()
1813 smu_deep_sleep_control(smu, false); in smu_enable_umd_pstate()
1814 amdgpu_asic_update_umd_stable_pstate(smu->adev, true); in smu_enable_umd_pstate()
1821 amdgpu_asic_update_umd_stable_pstate(smu->adev, false); in smu_enable_umd_pstate()
1822 smu_deep_sleep_control(smu, true); in smu_enable_umd_pstate()
1823 smu_gfx_ulv_control(smu, true); in smu_enable_umd_pstate()
1824 smu_gpo_control(smu, true); in smu_enable_umd_pstate()
1831 static int smu_bump_power_profile_mode(struct smu_context *smu, in smu_bump_power_profile_mode() argument
1837 if (smu->ppt_funcs->set_power_profile_mode) in smu_bump_power_profile_mode()
1838 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); in smu_bump_power_profile_mode()
1843 static int smu_adjust_power_state_dynamic(struct smu_context *smu, in smu_adjust_power_state_dynamic() argument
1851 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_adjust_power_state_dynamic()
1854 ret = smu_display_config_changed(smu); in smu_adjust_power_state_dynamic()
1856 dev_err(smu->adev->dev, "Failed to change display config!"); in smu_adjust_power_state_dynamic()
1861 ret = smu_apply_clocks_adjust_rules(smu); in smu_adjust_power_state_dynamic()
1863 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); in smu_adjust_power_state_dynamic()
1868 ret = smu_notify_smc_display_config(smu); in smu_adjust_power_state_dynamic()
1870 dev_err(smu->adev->dev, "Failed to notify smc display config!"); in smu_adjust_power_state_dynamic()
1876 ret = smu_asic_set_performance_level(smu, level); in smu_adjust_power_state_dynamic()
1878 dev_err(smu->adev->dev, "Failed to set performance level!"); in smu_adjust_power_state_dynamic()
1888 index = fls(smu->workload_mask); in smu_adjust_power_state_dynamic()
1890 workload[0] = smu->workload_setting[index]; in smu_adjust_power_state_dynamic()
1892 if (init || smu->power_profile_mode != workload[0]) in smu_adjust_power_state_dynamic()
1893 smu_bump_power_profile_mode(smu, workload, 0); in smu_adjust_power_state_dynamic()
1899 static int smu_handle_task(struct smu_context *smu, in smu_handle_task() argument
1905 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_handle_task()
1910 ret = smu_pre_display_config_changed(smu); in smu_handle_task()
1913 ret = smu_adjust_power_state_dynamic(smu, level, false, false); in smu_handle_task()
1916 ret = smu_adjust_power_state_dynamic(smu, level, true, true); in smu_handle_task()
1919 ret = smu_adjust_power_state_dynamic(smu, level, true, false); in smu_handle_task()
1932 struct smu_context *smu = handle; in smu_handle_dpm_task() local
1933 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; in smu_handle_dpm_task()
1935 return smu_handle_task(smu, smu_dpm->dpm_level, task_id); in smu_handle_dpm_task()
1943 struct smu_context *smu = handle; in smu_switch_power_profile() local
1944 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_switch_power_profile()
1948 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_switch_power_profile()
1955 smu->workload_mask &= ~(1 << smu->workload_prority[type]); in smu_switch_power_profile()
1956 index = fls(smu->workload_mask); in smu_switch_power_profile()
1958 workload[0] = smu->workload_setting[index]; in smu_switch_power_profile()
1960 smu->workload_mask |= (1 << smu->workload_prority[type]); in smu_switch_power_profile()
1961 index = fls(smu->workload_mask); in smu_switch_power_profile()
1963 workload[0] = smu->workload_setting[index]; in smu_switch_power_profile()
1968 smu_bump_power_profile_mode(smu, workload, 0); in smu_switch_power_profile()
1975 struct smu_context *smu = handle; in smu_get_performance_level() local
1976 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_get_performance_level()
1978 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_performance_level()
1981 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) in smu_get_performance_level()
1990 struct smu_context *smu = handle; in smu_force_performance_level() local
1991 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_force_performance_level()
1994 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_force_performance_level()
1997 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) in smu_force_performance_level()
2000 ret = smu_enable_umd_pstate(smu, &level); in smu_force_performance_level()
2004 ret = smu_handle_task(smu, level, in smu_force_performance_level()
2009 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask)); in smu_force_performance_level()
2010 smu->user_dpm_profile.clk_dependency = 0; in smu_force_performance_level()
2018 struct smu_context *smu = handle; in smu_set_display_count() local
2020 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_display_count()
2023 return smu_init_display_count(smu, count); in smu_set_display_count()
2026 static int smu_force_smuclk_levels(struct smu_context *smu, in smu_force_smuclk_levels() argument
2030 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_force_smuclk_levels()
2033 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_force_smuclk_levels()
2037 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); in smu_force_smuclk_levels()
2041 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { in smu_force_smuclk_levels()
2042 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); in smu_force_smuclk_levels()
2043 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_force_smuclk_levels()
2044 smu->user_dpm_profile.clk_mask[clk_type] = mask; in smu_force_smuclk_levels()
2045 smu_set_user_clk_dependencies(smu, clk_type); in smu_force_smuclk_levels()
2056 struct smu_context *smu = handle; in smu_force_ppclk_levels() local
2092 return smu_force_smuclk_levels(smu, clk_type, mask); in smu_force_ppclk_levels()
2105 struct smu_context *smu = handle; in smu_set_mp1_state() local
2108 if (!smu->pm_enabled) in smu_set_mp1_state()
2111 if (smu->ppt_funcs && in smu_set_mp1_state()
2112 smu->ppt_funcs->set_mp1_state) in smu_set_mp1_state()
2113 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); in smu_set_mp1_state()
2121 struct smu_context *smu = handle; in smu_set_df_cstate() local
2124 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_df_cstate()
2127 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) in smu_set_df_cstate()
2130 ret = smu->ppt_funcs->set_df_cstate(smu, state); in smu_set_df_cstate()
2132 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); in smu_set_df_cstate()
2137 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en) in smu_allow_xgmi_power_down() argument
2141 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_allow_xgmi_power_down()
2144 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down) in smu_allow_xgmi_power_down()
2147 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en); in smu_allow_xgmi_power_down()
2149 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n"); in smu_allow_xgmi_power_down()
2154 int smu_write_watermarks_table(struct smu_context *smu) in smu_write_watermarks_table() argument
2156 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_write_watermarks_table()
2159 return smu_set_watermarks_table(smu, NULL); in smu_write_watermarks_table()
2165 struct smu_context *smu = handle; in smu_set_watermarks_for_clock_ranges() local
2167 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_watermarks_for_clock_ranges()
2170 if (smu->disable_watermark) in smu_set_watermarks_for_clock_ranges()
2173 return smu_set_watermarks_table(smu, clock_ranges); in smu_set_watermarks_for_clock_ranges()
2176 int smu_set_ac_dc(struct smu_context *smu) in smu_set_ac_dc() argument
2180 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_ac_dc()
2184 if (smu->dc_controlled_by_gpio) in smu_set_ac_dc()
2187 ret = smu_set_power_source(smu, in smu_set_ac_dc()
2188 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : in smu_set_ac_dc()
2191 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", in smu_set_ac_dc()
2192 smu->adev->pm.ac_power ? "AC" : "DC"); in smu_set_ac_dc()
2242 struct smu_context *smu = handle; in smu_load_microcode() local
2243 struct amdgpu_device *adev = smu->adev; in smu_load_microcode()
2246 if (!smu->pm_enabled) in smu_load_microcode()
2253 if (smu->ppt_funcs->load_microcode) { in smu_load_microcode()
2254 ret = smu->ppt_funcs->load_microcode(smu); in smu_load_microcode()
2261 if (smu->ppt_funcs->check_fw_status) { in smu_load_microcode()
2262 ret = smu->ppt_funcs->check_fw_status(smu); in smu_load_microcode()
2272 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) in smu_set_gfx_cgpg() argument
2276 if (smu->ppt_funcs->set_gfx_cgpg) in smu_set_gfx_cgpg()
2277 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); in smu_set_gfx_cgpg()
2284 struct smu_context *smu = handle; in smu_set_fan_speed_rpm() local
2287 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_fan_speed_rpm()
2290 if (!smu->ppt_funcs->set_fan_speed_rpm) in smu_set_fan_speed_rpm()
2296 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); in smu_set_fan_speed_rpm()
2297 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_set_fan_speed_rpm()
2298 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; in smu_set_fan_speed_rpm()
2299 smu->user_dpm_profile.fan_speed_rpm = speed; in smu_set_fan_speed_rpm()
2302 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM; in smu_set_fan_speed_rpm()
2303 smu->user_dpm_profile.fan_speed_pwm = 0; in smu_set_fan_speed_rpm()
2324 struct smu_context *smu = handle; in smu_get_power_limit() local
2325 struct amdgpu_device *adev = smu->adev; in smu_get_power_limit()
2330 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_power_limit()
2362 if (smu->ppt_funcs->get_ppt_limit) in smu_get_power_limit()
2363 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); in smu_get_power_limit()
2373 ret = smu_get_asic_power_limits(smu, in smu_get_power_limit()
2374 &smu->current_power_limit, in smu_get_power_limit()
2381 *limit = smu->current_power_limit; in smu_get_power_limit()
2384 *limit = smu->default_power_limit; in smu_get_power_limit()
2387 *limit = smu->max_power_limit; in smu_get_power_limit()
2399 struct smu_context *smu = handle; in smu_set_power_limit() local
2403 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_power_limit()
2408 if (smu->ppt_funcs->set_power_limit) in smu_set_power_limit()
2409 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); in smu_set_power_limit()
2411 if (limit > smu->max_power_limit) { in smu_set_power_limit()
2412 dev_err(smu->adev->dev, in smu_set_power_limit()
2414 limit, smu->max_power_limit); in smu_set_power_limit()
2419 limit = smu->current_power_limit; in smu_set_power_limit()
2421 if (smu->ppt_funcs->set_power_limit) { in smu_set_power_limit()
2422 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); in smu_set_power_limit()
2423 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) in smu_set_power_limit()
2424 smu->user_dpm_profile.power_limit = limit; in smu_set_power_limit()
2430 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) in smu_print_smuclk_levels() argument
2434 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_print_smuclk_levels()
2437 if (smu->ppt_funcs->print_clk_levels) in smu_print_smuclk_levels()
2438 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); in smu_print_smuclk_levels()
2491 struct smu_context *smu = handle; in smu_print_ppclk_levels() local
2498 return smu_print_smuclk_levels(smu, clk_type, buf); in smu_print_ppclk_levels()
2503 struct smu_context *smu = handle; in smu_emit_ppclk_levels() local
2510 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_emit_ppclk_levels()
2513 if (!smu->ppt_funcs->emit_clk_levels) in smu_emit_ppclk_levels()
2516 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset); in smu_emit_ppclk_levels()
2524 struct smu_context *smu = handle; in smu_od_edit_dpm_table() local
2527 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_od_edit_dpm_table()
2530 if (smu->ppt_funcs->od_edit_dpm_table) { in smu_od_edit_dpm_table()
2531 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); in smu_od_edit_dpm_table()
2542 struct smu_context *smu = handle; in smu_read_sensor() local
2544 &smu->pstate_table; in smu_read_sensor()
2548 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_read_sensor()
2557 if (smu->ppt_funcs->read_sensor) in smu_read_sensor()
2558 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) in smu_read_sensor()
2579 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); in smu_read_sensor()
2583 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; in smu_read_sensor()
2587 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; in smu_read_sensor()
2591 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1; in smu_read_sensor()
2614 struct smu_context *smu = handle; in smu_get_apu_thermal_limit() local
2616 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit) in smu_get_apu_thermal_limit()
2617 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit); in smu_get_apu_thermal_limit()
2625 struct smu_context *smu = handle; in smu_set_apu_thermal_limit() local
2627 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit) in smu_set_apu_thermal_limit()
2628 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit); in smu_set_apu_thermal_limit()
2635 struct smu_context *smu = handle; in smu_get_power_profile_mode() local
2637 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || in smu_get_power_profile_mode()
2638 !smu->ppt_funcs->get_power_profile_mode) in smu_get_power_profile_mode()
2643 return smu->ppt_funcs->get_power_profile_mode(smu, buf); in smu_get_power_profile_mode()
2650 struct smu_context *smu = handle; in smu_set_power_profile_mode() local
2652 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || in smu_set_power_profile_mode()
2653 !smu->ppt_funcs->set_power_profile_mode) in smu_set_power_profile_mode()
2656 return smu_bump_power_profile_mode(smu, param, param_size); in smu_set_power_profile_mode()
2661 struct smu_context *smu = handle; in smu_get_fan_control_mode() local
2663 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_fan_control_mode()
2666 if (!smu->ppt_funcs->get_fan_control_mode) in smu_get_fan_control_mode()
2672 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu); in smu_get_fan_control_mode()
2679 struct smu_context *smu = handle; in smu_set_fan_control_mode() local
2682 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_fan_control_mode()
2685 if (!smu->ppt_funcs->set_fan_control_mode) in smu_set_fan_control_mode()
2691 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); in smu_set_fan_control_mode()
2695 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_set_fan_control_mode()
2696 smu->user_dpm_profile.fan_mode = value; in smu_set_fan_control_mode()
2700 smu->user_dpm_profile.fan_speed_pwm = 0; in smu_set_fan_control_mode()
2701 smu->user_dpm_profile.fan_speed_rpm = 0; in smu_set_fan_control_mode()
2702 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM); in smu_set_fan_control_mode()
2712 struct smu_context *smu = handle; in smu_get_fan_speed_pwm() local
2715 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_fan_speed_pwm()
2718 if (!smu->ppt_funcs->get_fan_speed_pwm) in smu_get_fan_speed_pwm()
2724 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); in smu_get_fan_speed_pwm()
2731 struct smu_context *smu = handle; in smu_set_fan_speed_pwm() local
2734 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_fan_speed_pwm()
2737 if (!smu->ppt_funcs->set_fan_speed_pwm) in smu_set_fan_speed_pwm()
2743 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); in smu_set_fan_speed_pwm()
2744 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_set_fan_speed_pwm()
2745 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; in smu_set_fan_speed_pwm()
2746 smu->user_dpm_profile.fan_speed_pwm = speed; in smu_set_fan_speed_pwm()
2749 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM; in smu_set_fan_speed_pwm()
2750 smu->user_dpm_profile.fan_speed_rpm = 0; in smu_set_fan_speed_pwm()
2758 struct smu_context *smu = handle; in smu_get_fan_speed_rpm() local
2761 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_fan_speed_rpm()
2764 if (!smu->ppt_funcs->get_fan_speed_rpm) in smu_get_fan_speed_rpm()
2770 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); in smu_get_fan_speed_rpm()
2777 struct smu_context *smu = handle; in smu_set_deep_sleep_dcefclk() local
2779 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_deep_sleep_dcefclk()
2782 return smu_set_min_dcef_deep_sleep(smu, clk); in smu_set_deep_sleep_dcefclk()
2789 struct smu_context *smu = handle; in smu_get_clock_by_type_with_latency() local
2793 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_clock_by_type_with_latency()
2796 if (smu->ppt_funcs->get_clock_by_type_with_latency) { in smu_get_clock_by_type_with_latency()
2811 dev_err(smu->adev->dev, "Invalid clock type!\n"); in smu_get_clock_by_type_with_latency()
2815 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); in smu_get_clock_by_type_with_latency()
2824 struct smu_context *smu = handle; in smu_display_clock_voltage_request() local
2827 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_display_clock_voltage_request()
2830 if (smu->ppt_funcs->display_clock_voltage_request) in smu_display_clock_voltage_request()
2831 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); in smu_display_clock_voltage_request()
2840 struct smu_context *smu = handle; in smu_display_disable_memory_clock_switch() local
2843 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_display_disable_memory_clock_switch()
2846 if (smu->ppt_funcs->display_disable_memory_clock_switch) in smu_display_disable_memory_clock_switch()
2847 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); in smu_display_disable_memory_clock_switch()
2855 struct smu_context *smu = handle; in smu_set_xgmi_pstate() local
2858 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_xgmi_pstate()
2861 if (smu->ppt_funcs->set_xgmi_pstate) in smu_set_xgmi_pstate()
2862 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); in smu_set_xgmi_pstate()
2865 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); in smu_set_xgmi_pstate()
2872 struct smu_context *smu = handle; in smu_get_baco_capability() local
2876 if (!smu->pm_enabled) in smu_get_baco_capability()
2879 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) in smu_get_baco_capability()
2880 *cap = smu->ppt_funcs->baco_is_support(smu); in smu_get_baco_capability()
2887 struct smu_context *smu = handle; in smu_baco_set_state() local
2890 if (!smu->pm_enabled) in smu_baco_set_state()
2894 if (smu->ppt_funcs->baco_exit) in smu_baco_set_state()
2895 ret = smu->ppt_funcs->baco_exit(smu); in smu_baco_set_state()
2897 if (smu->ppt_funcs->baco_enter) in smu_baco_set_state()
2898 ret = smu->ppt_funcs->baco_enter(smu); in smu_baco_set_state()
2904 dev_err(smu->adev->dev, "Failed to %s BACO state!\n", in smu_baco_set_state()
2910 bool smu_mode1_reset_is_support(struct smu_context *smu) in smu_mode1_reset_is_support() argument
2914 if (!smu->pm_enabled) in smu_mode1_reset_is_support()
2917 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) in smu_mode1_reset_is_support()
2918 ret = smu->ppt_funcs->mode1_reset_is_support(smu); in smu_mode1_reset_is_support()
2923 bool smu_mode2_reset_is_support(struct smu_context *smu) in smu_mode2_reset_is_support() argument
2927 if (!smu->pm_enabled) in smu_mode2_reset_is_support()
2930 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) in smu_mode2_reset_is_support()
2931 ret = smu->ppt_funcs->mode2_reset_is_support(smu); in smu_mode2_reset_is_support()
2936 int smu_mode1_reset(struct smu_context *smu) in smu_mode1_reset() argument
2940 if (!smu->pm_enabled) in smu_mode1_reset()
2943 if (smu->ppt_funcs->mode1_reset) in smu_mode1_reset()
2944 ret = smu->ppt_funcs->mode1_reset(smu); in smu_mode1_reset()
2951 struct smu_context *smu = handle; in smu_mode2_reset() local
2954 if (!smu->pm_enabled) in smu_mode2_reset()
2957 if (smu->ppt_funcs->mode2_reset) in smu_mode2_reset()
2958 ret = smu->ppt_funcs->mode2_reset(smu); in smu_mode2_reset()
2961 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); in smu_mode2_reset()
2968 struct smu_context *smu = handle; in smu_enable_gfx_features() local
2971 if (!smu->pm_enabled) in smu_enable_gfx_features()
2974 if (smu->ppt_funcs->enable_gfx_features) in smu_enable_gfx_features()
2975 ret = smu->ppt_funcs->enable_gfx_features(smu); in smu_enable_gfx_features()
2978 dev_err(smu->adev->dev, "enable gfx features failed!\n"); in smu_enable_gfx_features()
2986 struct smu_context *smu = handle; in smu_get_max_sustainable_clocks_by_dc() local
2989 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_max_sustainable_clocks_by_dc()
2992 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) in smu_get_max_sustainable_clocks_by_dc()
2993 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); in smu_get_max_sustainable_clocks_by_dc()
3002 struct smu_context *smu = handle; in smu_get_uclk_dpm_states() local
3005 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_uclk_dpm_states()
3008 if (smu->ppt_funcs->get_uclk_dpm_states) in smu_get_uclk_dpm_states()
3009 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); in smu_get_uclk_dpm_states()
3016 struct smu_context *smu = handle; in smu_get_current_power_state() local
3019 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_current_power_state()
3022 if (smu->ppt_funcs->get_current_power_state) in smu_get_current_power_state()
3023 pm_state = smu->ppt_funcs->get_current_power_state(smu); in smu_get_current_power_state()
3031 struct smu_context *smu = handle; in smu_get_dpm_clock_table() local
3034 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_dpm_clock_table()
3037 if (smu->ppt_funcs->get_dpm_clock_table) in smu_get_dpm_clock_table()
3038 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); in smu_get_dpm_clock_table()
3045 struct smu_context *smu = handle; in smu_sys_get_gpu_metrics() local
3047 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_get_gpu_metrics()
3050 if (!smu->ppt_funcs->get_gpu_metrics) in smu_sys_get_gpu_metrics()
3053 return smu->ppt_funcs->get_gpu_metrics(smu, table); in smu_sys_get_gpu_metrics()
3058 struct smu_context *smu = handle; in smu_enable_mgpu_fan_boost() local
3061 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_enable_mgpu_fan_boost()
3064 if (smu->ppt_funcs->enable_mgpu_fan_boost) in smu_enable_mgpu_fan_boost()
3065 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); in smu_enable_mgpu_fan_boost()
3073 struct smu_context *smu = handle; in smu_gfx_state_change_set() local
3076 if (smu->ppt_funcs->gfx_state_change_set) in smu_gfx_state_change_set()
3077 ret = smu->ppt_funcs->gfx_state_change_set(smu, state); in smu_gfx_state_change_set()
3082 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) in smu_handle_passthrough_sbr() argument
3086 if (smu->ppt_funcs->smu_handle_passthrough_sbr) in smu_handle_passthrough_sbr()
3087 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable); in smu_handle_passthrough_sbr()
3092 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) in smu_get_ecc_info() argument
3096 if (smu->ppt_funcs && in smu_get_ecc_info()
3097 smu->ppt_funcs->get_ecc_info) in smu_get_ecc_info()
3098 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); in smu_get_ecc_info()
3106 struct smu_context *smu = handle; in smu_get_prv_buffer_details() local
3107 struct smu_table_context *smu_table = &smu->smu_table; in smu_get_prv_buffer_details()
3181 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, in smu_wait_for_event() argument
3186 if (smu->ppt_funcs->wait_for_event) in smu_wait_for_event()
3187 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); in smu_wait_for_event()
3192 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) in smu_stb_collect_info() argument
3195 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled) in smu_stb_collect_info()
3199 if (size != smu->stb_context.stb_buf_size) in smu_stb_collect_info()
3207 return smu->ppt_funcs->stb_collect_info(smu, buf, size); in smu_stb_collect_info()
3215 struct smu_context *smu = adev->powerplay.pp_handle; in smu_stb_debugfs_open() local
3219 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL); in smu_stb_debugfs_open()
3223 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size); in smu_stb_debugfs_open()
3240 struct smu_context *smu = adev->powerplay.pp_handle; in smu_stb_debugfs_read() local
3249 smu->stb_context.stb_buf_size); in smu_stb_debugfs_read()
3281 struct smu_context *smu = adev->powerplay.pp_handle; in amdgpu_smu_stb_debug_fs_init() local
3283 if (!smu || (!smu->stb_context.stb_buf_size)) in amdgpu_smu_stb_debug_fs_init()
3291 smu->stb_context.stb_buf_size); in amdgpu_smu_stb_debug_fs_init()
3295 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size) in smu_send_hbm_bad_pages_num() argument
3299 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num) in smu_send_hbm_bad_pages_num()
3300 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size); in smu_send_hbm_bad_pages_num()
3305 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size) in smu_send_hbm_bad_channel_flag() argument
3309 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag) in smu_send_hbm_bad_channel_flag()
3310 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size); in smu_send_hbm_bad_channel_flag()