1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/delay.h> 25 #include <linux/fb.h> 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 29 #include "hwmgr.h" 30 #include "amd_powerplay.h" 31 #include "vega20_smumgr.h" 32 #include "hardwaremanager.h" 33 #include "ppatomfwctrl.h" 34 #include "atomfirmware.h" 35 #include "cgs_common.h" 36 #include "vega20_powertune.h" 37 #include "vega20_inc.h" 38 #include "pppcielanes.h" 39 #include "vega20_hwmgr.h" 40 #include "vega20_processpptables.h" 41 #include "vega20_pptable.h" 42 #include "vega20_thermal.h" 43 #include "vega20_ppsmc.h" 44 #include "pp_debug.h" 45 #include "amd_pcie_helpers.h" 46 #include "ppinterrupt.h" 47 #include "pp_overdriver.h" 48 #include "pp_thermal.h" 49 #include "soc15_common.h" 50 #include "vega20_baco.h" 51 #include "smuio/smuio_9_0_offset.h" 52 #include "smuio/smuio_9_0_sh_mask.h" 53 #include "nbio/nbio_7_4_sh_mask.h" 54 55 #define smnPCIE_LC_SPEED_CNTL 0x11140290 56 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 57 58 #define LINK_WIDTH_MAX 6 59 #define LINK_SPEED_MAX 3 60 static int link_width[] = {0, 1, 2, 4, 8, 12, 16}; 61 static int link_speed[] = {25, 50, 80, 160}; 62 63 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) 64 { 65 struct vega20_hwmgr *data = 66 (struct vega20_hwmgr *)(hwmgr->backend); 67 68 data->gfxclk_average_alpha = PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT; 69 data->socclk_average_alpha = PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT; 70 data->uclk_average_alpha = PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT; 71 data->gfx_activity_average_alpha = PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT; 72 data->lowest_uclk_reserved_for_ulv = PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT; 73 74 data->display_voltage_mode = PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT; 75 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 76 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 77 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 78 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 79 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 80 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 81 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 82 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 83 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 84 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 85 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 86 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 87 88 /* 89 * Disable the following features for now: 90 * GFXCLK DS 91 * SOCLK DS 92 * LCLK DS 93 * DCEFCLK DS 94 * FCLK DS 95 * MP1CLK DS 96 * MP0CLK DS 97 */ 98 data->registry_data.disallowed_features = 0xE0041C00; 99 /* ECC feature should be disabled on old SMUs */ 100 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version); 101 if (hwmgr->smu_version < 0x282100) 102 data->registry_data.disallowed_features |= FEATURE_ECC_MASK; 103 104 if (!(hwmgr->feature_mask & PP_PCIE_DPM_MASK)) 105 data->registry_data.disallowed_features |= FEATURE_DPM_LINK_MASK; 106 107 if (!(hwmgr->feature_mask & PP_SCLK_DPM_MASK)) 108 data->registry_data.disallowed_features |= FEATURE_DPM_GFXCLK_MASK; 109 110 if (!(hwmgr->feature_mask & PP_SOCCLK_DPM_MASK)) 111 data->registry_data.disallowed_features |= FEATURE_DPM_SOCCLK_MASK; 112 113 if (!(hwmgr->feature_mask & PP_MCLK_DPM_MASK)) 114 data->registry_data.disallowed_features |= FEATURE_DPM_UCLK_MASK; 115 116 if (!(hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK)) 117 data->registry_data.disallowed_features |= FEATURE_DPM_DCEFCLK_MASK; 118 119 if (!(hwmgr->feature_mask & PP_ULV_MASK)) 120 data->registry_data.disallowed_features |= FEATURE_ULV_MASK; 121 122 if (!(hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)) 123 data->registry_data.disallowed_features |= FEATURE_DS_GFXCLK_MASK; 124 125 data->registry_data.od_state_in_dc_support = 0; 126 data->registry_data.thermal_support = 1; 127 data->registry_data.skip_baco_hardware = 0; 128 129 data->registry_data.log_avfs_param = 0; 130 data->registry_data.sclk_throttle_low_notification = 1; 131 data->registry_data.force_dpm_high = 0; 132 data->registry_data.stable_pstate_sclk_dpm_percentage = 75; 133 134 data->registry_data.didt_support = 0; 135 if (data->registry_data.didt_support) { 136 data->registry_data.didt_mode = 6; 137 data->registry_data.sq_ramping_support = 1; 138 data->registry_data.db_ramping_support = 0; 139 data->registry_data.td_ramping_support = 0; 140 data->registry_data.tcp_ramping_support = 0; 141 data->registry_data.dbr_ramping_support = 0; 142 data->registry_data.edc_didt_support = 1; 143 data->registry_data.gc_didt_support = 0; 144 data->registry_data.psm_didt_support = 0; 145 } 146 147 data->registry_data.pcie_lane_override = 0xff; 148 data->registry_data.pcie_speed_override = 0xff; 149 data->registry_data.pcie_clock_override = 0xffffffff; 150 data->registry_data.regulator_hot_gpio_support = 1; 151 data->registry_data.ac_dc_switch_gpio_support = 0; 152 data->registry_data.quick_transition_support = 0; 153 data->registry_data.zrpm_start_temp = 0xffff; 154 data->registry_data.zrpm_stop_temp = 0xffff; 155 data->registry_data.od8_feature_enable = 1; 156 data->registry_data.disable_water_mark = 0; 157 data->registry_data.disable_pp_tuning = 0; 158 data->registry_data.disable_xlpp_tuning = 0; 159 data->registry_data.disable_workload_policy = 0; 160 data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F; 161 data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919; 162 data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A; 163 data->registry_data.force_workload_policy_mask = 0; 164 data->registry_data.disable_3d_fs_detection = 0; 165 data->registry_data.fps_support = 1; 166 data->registry_data.disable_auto_wattman = 1; 167 data->registry_data.auto_wattman_debug = 0; 168 data->registry_data.auto_wattman_sample_period = 100; 169 data->registry_data.fclk_gfxclk_ratio = 0; 170 data->registry_data.auto_wattman_threshold = 50; 171 data->registry_data.gfxoff_controlled_by_driver = 1; 172 data->gfxoff_allowed = false; 173 data->counter_gfxoff = 0; 174 } 175 176 static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) 177 { 178 struct vega20_hwmgr *data = 179 (struct vega20_hwmgr *)(hwmgr->backend); 180 struct amdgpu_device *adev = hwmgr->adev; 181 182 if (data->vddci_control == VEGA20_VOLTAGE_CONTROL_NONE) 183 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 184 PHM_PlatformCaps_ControlVDDCI); 185 186 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 187 PHM_PlatformCaps_TablelessHardwareInterface); 188 189 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 190 PHM_PlatformCaps_BACO); 191 192 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 193 PHM_PlatformCaps_EnableSMU7ThermalManagement); 194 195 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 196 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 197 PHM_PlatformCaps_UVDPowerGating); 198 199 if (adev->pg_flags & AMD_PG_SUPPORT_VCE) 200 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 201 PHM_PlatformCaps_VCEPowerGating); 202 203 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 204 PHM_PlatformCaps_UnTabledHardwareInterface); 205 206 if (data->registry_data.od8_feature_enable) 207 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 208 PHM_PlatformCaps_OD8inACSupport); 209 210 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 211 PHM_PlatformCaps_ActivityReporting); 212 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 213 PHM_PlatformCaps_FanSpeedInTableIsRPM); 214 215 if (data->registry_data.od_state_in_dc_support) { 216 if (data->registry_data.od8_feature_enable) 217 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 218 PHM_PlatformCaps_OD8inDCSupport); 219 } 220 221 if (data->registry_data.thermal_support && 222 data->registry_data.fuzzy_fan_control_support && 223 hwmgr->thermal_controller.advanceFanControlParameters.usTMax) 224 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 225 PHM_PlatformCaps_ODFuzzyFanControlSupport); 226 227 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 228 PHM_PlatformCaps_DynamicPowerManagement); 229 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 230 PHM_PlatformCaps_SMC); 231 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 232 PHM_PlatformCaps_ThermalPolicyDelay); 233 234 if (data->registry_data.force_dpm_high) 235 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 236 PHM_PlatformCaps_ExclusiveModeAlwaysHigh); 237 238 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 239 PHM_PlatformCaps_DynamicUVDState); 240 241 if (data->registry_data.sclk_throttle_low_notification) 242 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 243 PHM_PlatformCaps_SclkThrottleLowNotification); 244 245 /* power tune caps */ 246 /* assume disabled */ 247 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 248 PHM_PlatformCaps_PowerContainment); 249 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 250 PHM_PlatformCaps_DiDtSupport); 251 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 252 PHM_PlatformCaps_SQRamping); 253 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 254 PHM_PlatformCaps_DBRamping); 255 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 256 PHM_PlatformCaps_TDRamping); 257 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 258 PHM_PlatformCaps_TCPRamping); 259 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 260 PHM_PlatformCaps_DBRRamping); 261 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 262 PHM_PlatformCaps_DiDtEDCEnable); 263 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 264 PHM_PlatformCaps_GCEDC); 265 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 266 PHM_PlatformCaps_PSM); 267 268 if (data->registry_data.didt_support) { 269 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 270 PHM_PlatformCaps_DiDtSupport); 271 if (data->registry_data.sq_ramping_support) 272 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 273 PHM_PlatformCaps_SQRamping); 274 if (data->registry_data.db_ramping_support) 275 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 276 PHM_PlatformCaps_DBRamping); 277 if (data->registry_data.td_ramping_support) 278 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 279 PHM_PlatformCaps_TDRamping); 280 if (data->registry_data.tcp_ramping_support) 281 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 282 PHM_PlatformCaps_TCPRamping); 283 if (data->registry_data.dbr_ramping_support) 284 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 285 PHM_PlatformCaps_DBRRamping); 286 if (data->registry_data.edc_didt_support) 287 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 288 PHM_PlatformCaps_DiDtEDCEnable); 289 if (data->registry_data.gc_didt_support) 290 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 291 PHM_PlatformCaps_GCEDC); 292 if (data->registry_data.psm_didt_support) 293 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 294 PHM_PlatformCaps_PSM); 295 } 296 297 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 298 PHM_PlatformCaps_RegulatorHot); 299 300 if (data->registry_data.ac_dc_switch_gpio_support) { 301 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 302 PHM_PlatformCaps_AutomaticDCTransition); 303 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 304 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); 305 } 306 307 if (data->registry_data.quick_transition_support) { 308 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 309 PHM_PlatformCaps_AutomaticDCTransition); 310 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 311 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); 312 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 313 PHM_PlatformCaps_Falcon_QuickTransition); 314 } 315 316 if (data->lowest_uclk_reserved_for_ulv != PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT) { 317 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 318 PHM_PlatformCaps_LowestUclkReservedForUlv); 319 if (data->lowest_uclk_reserved_for_ulv == 1) 320 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 321 PHM_PlatformCaps_LowestUclkReservedForUlv); 322 } 323 324 if (data->registry_data.custom_fan_support) 325 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 326 PHM_PlatformCaps_CustomFanControlSupport); 327 328 return 0; 329 } 330 331 static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) 332 { 333 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 334 struct amdgpu_device *adev = hwmgr->adev; 335 uint32_t top32, bottom32; 336 int i; 337 338 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id = 339 FEATURE_DPM_PREFETCHER_BIT; 340 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id = 341 FEATURE_DPM_GFXCLK_BIT; 342 data->smu_features[GNLD_DPM_UCLK].smu_feature_id = 343 FEATURE_DPM_UCLK_BIT; 344 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id = 345 FEATURE_DPM_SOCCLK_BIT; 346 data->smu_features[GNLD_DPM_UVD].smu_feature_id = 347 FEATURE_DPM_UVD_BIT; 348 data->smu_features[GNLD_DPM_VCE].smu_feature_id = 349 FEATURE_DPM_VCE_BIT; 350 data->smu_features[GNLD_ULV].smu_feature_id = 351 FEATURE_ULV_BIT; 352 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id = 353 FEATURE_DPM_MP0CLK_BIT; 354 data->smu_features[GNLD_DPM_LINK].smu_feature_id = 355 FEATURE_DPM_LINK_BIT; 356 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id = 357 FEATURE_DPM_DCEFCLK_BIT; 358 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id = 359 FEATURE_DS_GFXCLK_BIT; 360 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id = 361 FEATURE_DS_SOCCLK_BIT; 362 data->smu_features[GNLD_DS_LCLK].smu_feature_id = 363 FEATURE_DS_LCLK_BIT; 364 data->smu_features[GNLD_PPT].smu_feature_id = 365 FEATURE_PPT_BIT; 366 data->smu_features[GNLD_TDC].smu_feature_id = 367 FEATURE_TDC_BIT; 368 data->smu_features[GNLD_THERMAL].smu_feature_id = 369 FEATURE_THERMAL_BIT; 370 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id = 371 FEATURE_GFX_PER_CU_CG_BIT; 372 data->smu_features[GNLD_RM].smu_feature_id = 373 FEATURE_RM_BIT; 374 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id = 375 FEATURE_DS_DCEFCLK_BIT; 376 data->smu_features[GNLD_ACDC].smu_feature_id = 377 FEATURE_ACDC_BIT; 378 data->smu_features[GNLD_VR0HOT].smu_feature_id = 379 FEATURE_VR0HOT_BIT; 380 data->smu_features[GNLD_VR1HOT].smu_feature_id = 381 FEATURE_VR1HOT_BIT; 382 data->smu_features[GNLD_FW_CTF].smu_feature_id = 383 FEATURE_FW_CTF_BIT; 384 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id = 385 FEATURE_LED_DISPLAY_BIT; 386 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id = 387 FEATURE_FAN_CONTROL_BIT; 388 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT; 389 data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT; 390 data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT; 391 data->smu_features[GNLD_DPM_FCLK].smu_feature_id = FEATURE_DPM_FCLK_BIT; 392 data->smu_features[GNLD_DS_FCLK].smu_feature_id = FEATURE_DS_FCLK_BIT; 393 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; 394 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; 395 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; 396 data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT; 397 398 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 399 data->smu_features[i].smu_feature_bitmap = 400 (uint64_t)(1ULL << data->smu_features[i].smu_feature_id); 401 data->smu_features[i].allowed = 402 ((data->registry_data.disallowed_features >> i) & 1) ? 403 false : true; 404 } 405 406 /* Get the SN to turn into a Unique ID */ 407 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); 408 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); 409 410 adev->unique_id = ((uint64_t)bottom32 << 32) | top32; 411 } 412 413 static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) 414 { 415 return 0; 416 } 417 418 static int vega20_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 419 { 420 kfree(hwmgr->backend); 421 hwmgr->backend = NULL; 422 423 return 0; 424 } 425 426 static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 427 { 428 struct vega20_hwmgr *data; 429 struct amdgpu_device *adev = hwmgr->adev; 430 431 data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL); 432 if (data == NULL) 433 return -ENOMEM; 434 435 hwmgr->backend = data; 436 437 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 438 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 439 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 440 441 vega20_set_default_registry_data(hwmgr); 442 443 data->disable_dpm_mask = 0xff; 444 445 /* need to set voltage control types before EVV patching */ 446 data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE; 447 data->mvdd_control = VEGA20_VOLTAGE_CONTROL_NONE; 448 data->vddci_control = VEGA20_VOLTAGE_CONTROL_NONE; 449 450 data->water_marks_bitmap = 0; 451 data->avfs_exist = false; 452 453 vega20_set_features_platform_caps(hwmgr); 454 455 vega20_init_dpm_defaults(hwmgr); 456 457 /* Parse pptable data read from VBIOS */ 458 vega20_set_private_data_based_on_pptable(hwmgr); 459 460 data->is_tlu_enabled = false; 461 462 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 463 VEGA20_MAX_HARDWARE_POWERLEVELS; 464 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; 465 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 466 467 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ 468 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ 469 hwmgr->platform_descriptor.clockStep.engineClock = 500; 470 hwmgr->platform_descriptor.clockStep.memoryClock = 500; 471 472 data->total_active_cus = adev->gfx.cu_info.number; 473 data->is_custom_profile_set = false; 474 475 return 0; 476 } 477 478 static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr) 479 { 480 struct vega20_hwmgr *data = 481 (struct vega20_hwmgr *)(hwmgr->backend); 482 483 data->low_sclk_interrupt_threshold = 0; 484 485 return 0; 486 } 487 488 static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr) 489 { 490 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 491 int ret = 0; 492 bool use_baco = (amdgpu_in_reset(adev) && 493 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 494 (adev->in_runpm && amdgpu_asic_supports_baco(adev)); 495 496 ret = vega20_init_sclk_threshold(hwmgr); 497 PP_ASSERT_WITH_CODE(!ret, 498 "Failed to init sclk threshold!", 499 return ret); 500 501 if (use_baco) { 502 ret = vega20_baco_apply_vdci_flush_workaround(hwmgr); 503 if (ret) 504 pr_err("Failed to apply vega20 baco workaround!\n"); 505 } 506 507 return ret; 508 } 509 510 /* 511 * @fn vega20_init_dpm_state 512 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff. 513 * 514 * @param dpm_state - the address of the DPM Table to initiailize. 515 * @return None. 516 */ 517 static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state) 518 { 519 dpm_state->soft_min_level = 0x0; 520 dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT; 521 dpm_state->hard_min_level = 0x0; 522 dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT; 523 } 524 525 static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, 526 PPCLK_e clk_id, uint32_t *num_of_levels) 527 { 528 int ret = 0; 529 530 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 531 PPSMC_MSG_GetDpmFreqByIndex, 532 (clk_id << 16 | 0xFF), 533 num_of_levels); 534 PP_ASSERT_WITH_CODE(!ret, 535 "[GetNumOfDpmLevel] failed to get dpm levels!", 536 return ret); 537 538 return ret; 539 } 540 541 static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, 542 PPCLK_e clk_id, uint32_t index, uint32_t *clk) 543 { 544 int ret = 0; 545 546 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 547 PPSMC_MSG_GetDpmFreqByIndex, 548 (clk_id << 16 | index), 549 clk); 550 PP_ASSERT_WITH_CODE(!ret, 551 "[GetDpmFreqByIndex] failed to get dpm freq by index!", 552 return ret); 553 554 return ret; 555 } 556 557 static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr, 558 struct vega20_single_dpm_table *dpm_table, PPCLK_e clk_id) 559 { 560 int ret = 0; 561 uint32_t i, num_of_levels, clk; 562 563 ret = vega20_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels); 564 PP_ASSERT_WITH_CODE(!ret, 565 "[SetupSingleDpmTable] failed to get clk levels!", 566 return ret); 567 568 dpm_table->count = num_of_levels; 569 570 for (i = 0; i < num_of_levels; i++) { 571 ret = vega20_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk); 572 PP_ASSERT_WITH_CODE(!ret, 573 "[SetupSingleDpmTable] failed to get clk of specific level!", 574 return ret); 575 dpm_table->dpm_levels[i].value = clk; 576 dpm_table->dpm_levels[i].enabled = true; 577 } 578 579 return ret; 580 } 581 582 static int vega20_setup_gfxclk_dpm_table(struct pp_hwmgr *hwmgr) 583 { 584 struct vega20_hwmgr *data = 585 (struct vega20_hwmgr *)(hwmgr->backend); 586 struct vega20_single_dpm_table *dpm_table; 587 int ret = 0; 588 589 dpm_table = &(data->dpm_table.gfx_table); 590 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { 591 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK); 592 PP_ASSERT_WITH_CODE(!ret, 593 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!", 594 return ret); 595 } else { 596 dpm_table->count = 1; 597 dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100; 598 } 599 600 return ret; 601 } 602 603 static int vega20_setup_memclk_dpm_table(struct pp_hwmgr *hwmgr) 604 { 605 struct vega20_hwmgr *data = 606 (struct vega20_hwmgr *)(hwmgr->backend); 607 struct vega20_single_dpm_table *dpm_table; 608 int ret = 0; 609 610 dpm_table = &(data->dpm_table.mem_table); 611 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 612 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK); 613 PP_ASSERT_WITH_CODE(!ret, 614 "[SetupDefaultDpmTable] failed to get memclk dpm levels!", 615 return ret); 616 } else { 617 dpm_table->count = 1; 618 dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100; 619 } 620 621 return ret; 622 } 623 624 /* 625 * This function is to initialize all DPM state tables 626 * for SMU based on the dependency table. 627 * Dynamic state patching function will then trim these 628 * state tables to the allowed range based 629 * on the power policy or external client requests, 630 * such as UVD request, etc. 631 */ 632 static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) 633 { 634 struct vega20_hwmgr *data = 635 (struct vega20_hwmgr *)(hwmgr->backend); 636 struct vega20_single_dpm_table *dpm_table; 637 int ret = 0; 638 639 memset(&data->dpm_table, 0, sizeof(data->dpm_table)); 640 641 /* socclk */ 642 dpm_table = &(data->dpm_table.soc_table); 643 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { 644 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK); 645 PP_ASSERT_WITH_CODE(!ret, 646 "[SetupDefaultDpmTable] failed to get socclk dpm levels!", 647 return ret); 648 } else { 649 dpm_table->count = 1; 650 dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100; 651 } 652 vega20_init_dpm_state(&(dpm_table->dpm_state)); 653 654 /* gfxclk */ 655 dpm_table = &(data->dpm_table.gfx_table); 656 ret = vega20_setup_gfxclk_dpm_table(hwmgr); 657 if (ret) 658 return ret; 659 vega20_init_dpm_state(&(dpm_table->dpm_state)); 660 661 /* memclk */ 662 dpm_table = &(data->dpm_table.mem_table); 663 ret = vega20_setup_memclk_dpm_table(hwmgr); 664 if (ret) 665 return ret; 666 vega20_init_dpm_state(&(dpm_table->dpm_state)); 667 668 /* eclk */ 669 dpm_table = &(data->dpm_table.eclk_table); 670 if (data->smu_features[GNLD_DPM_VCE].enabled) { 671 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK); 672 PP_ASSERT_WITH_CODE(!ret, 673 "[SetupDefaultDpmTable] failed to get eclk dpm levels!", 674 return ret); 675 } else { 676 dpm_table->count = 1; 677 dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100; 678 } 679 vega20_init_dpm_state(&(dpm_table->dpm_state)); 680 681 /* vclk */ 682 dpm_table = &(data->dpm_table.vclk_table); 683 if (data->smu_features[GNLD_DPM_UVD].enabled) { 684 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK); 685 PP_ASSERT_WITH_CODE(!ret, 686 "[SetupDefaultDpmTable] failed to get vclk dpm levels!", 687 return ret); 688 } else { 689 dpm_table->count = 1; 690 dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100; 691 } 692 vega20_init_dpm_state(&(dpm_table->dpm_state)); 693 694 /* dclk */ 695 dpm_table = &(data->dpm_table.dclk_table); 696 if (data->smu_features[GNLD_DPM_UVD].enabled) { 697 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK); 698 PP_ASSERT_WITH_CODE(!ret, 699 "[SetupDefaultDpmTable] failed to get dclk dpm levels!", 700 return ret); 701 } else { 702 dpm_table->count = 1; 703 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100; 704 } 705 vega20_init_dpm_state(&(dpm_table->dpm_state)); 706 707 /* dcefclk */ 708 dpm_table = &(data->dpm_table.dcef_table); 709 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 710 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK); 711 PP_ASSERT_WITH_CODE(!ret, 712 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!", 713 return ret); 714 } else { 715 dpm_table->count = 1; 716 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100; 717 } 718 vega20_init_dpm_state(&(dpm_table->dpm_state)); 719 720 /* pixclk */ 721 dpm_table = &(data->dpm_table.pixel_table); 722 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 723 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK); 724 PP_ASSERT_WITH_CODE(!ret, 725 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!", 726 return ret); 727 } else 728 dpm_table->count = 0; 729 vega20_init_dpm_state(&(dpm_table->dpm_state)); 730 731 /* dispclk */ 732 dpm_table = &(data->dpm_table.display_table); 733 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 734 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK); 735 PP_ASSERT_WITH_CODE(!ret, 736 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!", 737 return ret); 738 } else 739 dpm_table->count = 0; 740 vega20_init_dpm_state(&(dpm_table->dpm_state)); 741 742 /* phyclk */ 743 dpm_table = &(data->dpm_table.phy_table); 744 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 745 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK); 746 PP_ASSERT_WITH_CODE(!ret, 747 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!", 748 return ret); 749 } else 750 dpm_table->count = 0; 751 vega20_init_dpm_state(&(dpm_table->dpm_state)); 752 753 /* fclk */ 754 dpm_table = &(data->dpm_table.fclk_table); 755 if (data->smu_features[GNLD_DPM_FCLK].enabled) { 756 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_FCLK); 757 PP_ASSERT_WITH_CODE(!ret, 758 "[SetupDefaultDpmTable] failed to get fclk dpm levels!", 759 return ret); 760 } else { 761 dpm_table->count = 1; 762 dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100; 763 } 764 vega20_init_dpm_state(&(dpm_table->dpm_state)); 765 766 /* save a copy of the default DPM table */ 767 memcpy(&(data->golden_dpm_table), &(data->dpm_table), 768 sizeof(struct vega20_dpm_table)); 769 770 return 0; 771 } 772 773 /** 774 * Initializes the SMC table and uploads it 775 * 776 * @hwmgr: the address of the powerplay hardware manager. 777 * return: always 0 778 */ 779 static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) 780 { 781 int result; 782 struct vega20_hwmgr *data = 783 (struct vega20_hwmgr *)(hwmgr->backend); 784 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 785 struct pp_atomfwctrl_bios_boot_up_values boot_up_values; 786 struct phm_ppt_v3_information *pptable_information = 787 (struct phm_ppt_v3_information *)hwmgr->pptable; 788 789 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values); 790 PP_ASSERT_WITH_CODE(!result, 791 "[InitSMCTable] Failed to get vbios bootup values!", 792 return result); 793 794 data->vbios_boot_state.vddc = boot_up_values.usVddc; 795 data->vbios_boot_state.vddci = boot_up_values.usVddci; 796 data->vbios_boot_state.mvddc = boot_up_values.usMvddc; 797 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; 798 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; 799 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; 800 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; 801 data->vbios_boot_state.eclock = boot_up_values.ulEClk; 802 data->vbios_boot_state.vclock = boot_up_values.ulVClk; 803 data->vbios_boot_state.dclock = boot_up_values.ulDClk; 804 data->vbios_boot_state.fclock = boot_up_values.ulFClk; 805 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; 806 807 smum_send_msg_to_smc_with_parameter(hwmgr, 808 PPSMC_MSG_SetMinDeepSleepDcefclk, 809 (uint32_t)(data->vbios_boot_state.dcef_clock / 100), 810 NULL); 811 812 memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t)); 813 814 result = smum_smc_table_manager(hwmgr, 815 (uint8_t *)pp_table, TABLE_PPTABLE, false); 816 PP_ASSERT_WITH_CODE(!result, 817 "[InitSMCTable] Failed to upload PPtable!", 818 return result); 819 820 return 0; 821 } 822 823 /* 824 * Override PCIe link speed and link width for DPM Level 1. PPTable entries 825 * reflect the ASIC capabilities and not the system capabilities. For e.g. 826 * Vega20 board in a PCI Gen3 system. In this case, when SMU's tries to switch 827 * to DPM1, it fails as system doesn't support Gen4. 828 */ 829 static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) 830 { 831 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 832 struct vega20_hwmgr *data = 833 (struct vega20_hwmgr *)(hwmgr->backend); 834 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; 835 int ret; 836 837 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 838 pcie_gen = 3; 839 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 840 pcie_gen = 2; 841 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 842 pcie_gen = 1; 843 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 844 pcie_gen = 0; 845 846 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 847 pcie_width = 6; 848 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 849 pcie_width = 5; 850 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 851 pcie_width = 4; 852 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 853 pcie_width = 3; 854 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 855 pcie_width = 2; 856 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 857 pcie_width = 1; 858 859 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 860 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 861 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 862 */ 863 smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width; 864 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 865 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, 866 NULL); 867 PP_ASSERT_WITH_CODE(!ret, 868 "[OverridePcieParameters] Attempt to override pcie params failed!", 869 return ret); 870 871 data->pcie_parameters_override = true; 872 data->pcie_gen_level1 = pcie_gen; 873 data->pcie_width_level1 = pcie_width; 874 875 return 0; 876 } 877 878 static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) 879 { 880 struct vega20_hwmgr *data = 881 (struct vega20_hwmgr *)(hwmgr->backend); 882 uint32_t allowed_features_low = 0, allowed_features_high = 0; 883 int i; 884 int ret = 0; 885 886 for (i = 0; i < GNLD_FEATURES_MAX; i++) 887 if (data->smu_features[i].allowed) 888 data->smu_features[i].smu_feature_id > 31 ? 889 (allowed_features_high |= 890 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT) 891 & 0xFFFFFFFF)) : 892 (allowed_features_low |= 893 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) 894 & 0xFFFFFFFF)); 895 896 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 897 PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL); 898 PP_ASSERT_WITH_CODE(!ret, 899 "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!", 900 return ret); 901 902 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 903 PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL); 904 PP_ASSERT_WITH_CODE(!ret, 905 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!", 906 return ret); 907 908 return 0; 909 } 910 911 static int vega20_run_btc(struct pp_hwmgr *hwmgr) 912 { 913 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL); 914 } 915 916 static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr) 917 { 918 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL); 919 } 920 921 static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr) 922 { 923 struct vega20_hwmgr *data = 924 (struct vega20_hwmgr *)(hwmgr->backend); 925 uint64_t features_enabled; 926 int i; 927 bool enabled; 928 int ret = 0; 929 930 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, 931 PPSMC_MSG_EnableAllSmuFeatures, 932 NULL)) == 0, 933 "[EnableAllSMUFeatures] Failed to enable all smu features!", 934 return ret); 935 936 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 937 PP_ASSERT_WITH_CODE(!ret, 938 "[EnableAllSmuFeatures] Failed to get enabled smc features!", 939 return ret); 940 941 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 942 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? 943 true : false; 944 data->smu_features[i].enabled = enabled; 945 data->smu_features[i].supported = enabled; 946 947 #if 0 948 if (data->smu_features[i].allowed && !enabled) 949 pr_info("[EnableAllSMUFeatures] feature %d is expected enabled!", i); 950 else if (!data->smu_features[i].allowed && enabled) 951 pr_info("[EnableAllSMUFeatures] feature %d is expected disabled!", i); 952 #endif 953 } 954 955 return 0; 956 } 957 958 static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr) 959 { 960 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 961 962 if (data->smu_features[GNLD_DPM_UCLK].enabled) 963 return smum_send_msg_to_smc_with_parameter(hwmgr, 964 PPSMC_MSG_SetUclkFastSwitch, 965 1, 966 NULL); 967 968 return 0; 969 } 970 971 static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr) 972 { 973 struct vega20_hwmgr *data = 974 (struct vega20_hwmgr *)(hwmgr->backend); 975 976 return smum_send_msg_to_smc_with_parameter(hwmgr, 977 PPSMC_MSG_SetFclkGfxClkRatio, 978 data->registry_data.fclk_gfxclk_ratio, 979 NULL); 980 } 981 982 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) 983 { 984 struct vega20_hwmgr *data = 985 (struct vega20_hwmgr *)(hwmgr->backend); 986 int i, ret = 0; 987 988 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, 989 PPSMC_MSG_DisableAllSmuFeatures, 990 NULL)) == 0, 991 "[DisableAllSMUFeatures] Failed to disable all smu features!", 992 return ret); 993 994 for (i = 0; i < GNLD_FEATURES_MAX; i++) 995 data->smu_features[i].enabled = 0; 996 997 return 0; 998 } 999 1000 static int vega20_od8_set_feature_capabilities( 1001 struct pp_hwmgr *hwmgr) 1002 { 1003 struct phm_ppt_v3_information *pptable_information = 1004 (struct phm_ppt_v3_information *)hwmgr->pptable; 1005 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1006 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 1007 struct vega20_od8_settings *od_settings = &(data->od8_settings); 1008 1009 od_settings->overdrive8_capabilities = 0; 1010 1011 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { 1012 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] && 1013 pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 && 1014 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 && 1015 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >= 1016 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN])) 1017 od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS; 1018 1019 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] && 1020 (pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >= 1021 pp_table->MinVoltageGfx / VOLTAGE_SCALE) && 1022 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <= 1023 pp_table->MaxVoltageGfx / VOLTAGE_SCALE) && 1024 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] >= 1025 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1])) 1026 od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE; 1027 } 1028 1029 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 1030 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] = 1031 data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value; 1032 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] && 1033 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 && 1034 pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 && 1035 (pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] >= 1036 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX])) 1037 od_settings->overdrive8_capabilities |= OD8_UCLK_MAX; 1038 } 1039 1040 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] && 1041 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 && 1042 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100 && 1043 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 && 1044 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100) 1045 od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT; 1046 1047 if (data->smu_features[GNLD_FAN_CONTROL].enabled) { 1048 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] && 1049 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && 1050 pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && 1051 (pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >= 1052 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT])) 1053 od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK; 1054 1055 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] && 1056 (pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] >= 1057 (pp_table->FanPwmMin * pp_table->FanMaximumRpm / 100)) && 1058 pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 && 1059 (pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >= 1060 pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED])) 1061 od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN; 1062 } 1063 1064 if (data->smu_features[GNLD_THERMAL].enabled) { 1065 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] && 1066 pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 && 1067 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 && 1068 (pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >= 1069 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP])) 1070 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN; 1071 1072 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] && 1073 pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && 1074 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && 1075 (pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >= 1076 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX])) 1077 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM; 1078 } 1079 1080 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE]) 1081 od_settings->overdrive8_capabilities |= OD8_MEMORY_TIMING_TUNE; 1082 1083 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL] && 1084 pp_table->FanZeroRpmEnable) 1085 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL; 1086 1087 if (!od_settings->overdrive8_capabilities) 1088 hwmgr->od_enabled = false; 1089 1090 return 0; 1091 } 1092 1093 static int vega20_od8_set_feature_id( 1094 struct pp_hwmgr *hwmgr) 1095 { 1096 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1097 struct vega20_od8_settings *od_settings = &(data->od8_settings); 1098 1099 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) { 1100 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id = 1101 OD8_GFXCLK_LIMITS; 1102 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id = 1103 OD8_GFXCLK_LIMITS; 1104 } else { 1105 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id = 1106 0; 1107 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id = 1108 0; 1109 } 1110 1111 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) { 1112 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id = 1113 OD8_GFXCLK_CURVE; 1114 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id = 1115 OD8_GFXCLK_CURVE; 1116 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id = 1117 OD8_GFXCLK_CURVE; 1118 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id = 1119 OD8_GFXCLK_CURVE; 1120 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id = 1121 OD8_GFXCLK_CURVE; 1122 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id = 1123 OD8_GFXCLK_CURVE; 1124 } else { 1125 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id = 1126 0; 1127 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id = 1128 0; 1129 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id = 1130 0; 1131 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id = 1132 0; 1133 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id = 1134 0; 1135 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id = 1136 0; 1137 } 1138 1139 if (od_settings->overdrive8_capabilities & OD8_UCLK_MAX) 1140 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = OD8_UCLK_MAX; 1141 else 1142 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = 0; 1143 1144 if (od_settings->overdrive8_capabilities & OD8_POWER_LIMIT) 1145 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = OD8_POWER_LIMIT; 1146 else 1147 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = 0; 1148 1149 if (od_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK) 1150 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id = 1151 OD8_ACOUSTIC_LIMIT_SCLK; 1152 else 1153 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id = 1154 0; 1155 1156 if (od_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN) 1157 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id = 1158 OD8_FAN_SPEED_MIN; 1159 else 1160 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id = 1161 0; 1162 1163 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN) 1164 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id = 1165 OD8_TEMPERATURE_FAN; 1166 else 1167 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id = 1168 0; 1169 1170 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM) 1171 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id = 1172 OD8_TEMPERATURE_SYSTEM; 1173 else 1174 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id = 1175 0; 1176 1177 return 0; 1178 } 1179 1180 static int vega20_od8_get_gfx_clock_base_voltage( 1181 struct pp_hwmgr *hwmgr, 1182 uint32_t *voltage, 1183 uint32_t freq) 1184 { 1185 int ret = 0; 1186 1187 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1188 PPSMC_MSG_GetAVFSVoltageByDpm, 1189 ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq), 1190 voltage); 1191 PP_ASSERT_WITH_CODE(!ret, 1192 "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!", 1193 return ret); 1194 1195 *voltage = *voltage / VOLTAGE_SCALE; 1196 1197 return 0; 1198 } 1199 1200 static int vega20_od8_initialize_default_settings( 1201 struct pp_hwmgr *hwmgr) 1202 { 1203 struct phm_ppt_v3_information *pptable_information = 1204 (struct phm_ppt_v3_information *)hwmgr->pptable; 1205 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1206 struct vega20_od8_settings *od8_settings = &(data->od8_settings); 1207 OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); 1208 int i, ret = 0; 1209 1210 /* Set Feature Capabilities */ 1211 vega20_od8_set_feature_capabilities(hwmgr); 1212 1213 /* Map FeatureID to individual settings */ 1214 vega20_od8_set_feature_id(hwmgr); 1215 1216 /* Set default values */ 1217 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, true); 1218 PP_ASSERT_WITH_CODE(!ret, 1219 "Failed to export over drive table!", 1220 return ret); 1221 1222 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) { 1223 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value = 1224 od_table->GfxclkFmin; 1225 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value = 1226 od_table->GfxclkFmax; 1227 } else { 1228 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value = 1229 0; 1230 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value = 1231 0; 1232 } 1233 1234 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) { 1235 od_table->GfxclkFreq1 = od_table->GfxclkFmin; 1236 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value = 1237 od_table->GfxclkFreq1; 1238 1239 od_table->GfxclkFreq3 = od_table->GfxclkFmax; 1240 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value = 1241 od_table->GfxclkFreq3; 1242 1243 od_table->GfxclkFreq2 = (od_table->GfxclkFreq1 + od_table->GfxclkFreq3) / 2; 1244 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value = 1245 od_table->GfxclkFreq2; 1246 1247 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, 1248 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value), 1249 od_table->GfxclkFreq1), 1250 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", 1251 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0); 1252 od_table->GfxclkVolt1 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value 1253 * VOLTAGE_SCALE; 1254 1255 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, 1256 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value), 1257 od_table->GfxclkFreq2), 1258 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", 1259 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0); 1260 od_table->GfxclkVolt2 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value 1261 * VOLTAGE_SCALE; 1262 1263 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, 1264 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value), 1265 od_table->GfxclkFreq3), 1266 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", 1267 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0); 1268 od_table->GfxclkVolt3 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value 1269 * VOLTAGE_SCALE; 1270 } else { 1271 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value = 1272 0; 1273 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 1274 0; 1275 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value = 1276 0; 1277 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 1278 0; 1279 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value = 1280 0; 1281 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 1282 0; 1283 } 1284 1285 if (od8_settings->overdrive8_capabilities & OD8_UCLK_MAX) 1286 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value = 1287 od_table->UclkFmax; 1288 else 1289 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value = 1290 0; 1291 1292 if (od8_settings->overdrive8_capabilities & OD8_POWER_LIMIT) 1293 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value = 1294 od_table->OverDrivePct; 1295 else 1296 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value = 1297 0; 1298 1299 if (od8_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK) 1300 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value = 1301 od_table->FanMaximumRpm; 1302 else 1303 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value = 1304 0; 1305 1306 if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN) 1307 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value = 1308 od_table->FanMinimumPwm * data->smc_state_table.pp_table.FanMaximumRpm / 100; 1309 else 1310 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value = 1311 0; 1312 1313 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN) 1314 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value = 1315 od_table->FanTargetTemperature; 1316 else 1317 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value = 1318 0; 1319 1320 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM) 1321 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value = 1322 od_table->MaxOpTemp; 1323 else 1324 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value = 1325 0; 1326 1327 for (i = 0; i < OD8_SETTING_COUNT; i++) { 1328 if (od8_settings->od8_settings_array[i].feature_id) { 1329 od8_settings->od8_settings_array[i].min_value = 1330 pptable_information->od_settings_min[i]; 1331 od8_settings->od8_settings_array[i].max_value = 1332 pptable_information->od_settings_max[i]; 1333 od8_settings->od8_settings_array[i].current_value = 1334 od8_settings->od8_settings_array[i].default_value; 1335 } else { 1336 od8_settings->od8_settings_array[i].min_value = 1337 0; 1338 od8_settings->od8_settings_array[i].max_value = 1339 0; 1340 od8_settings->od8_settings_array[i].current_value = 1341 0; 1342 } 1343 } 1344 1345 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, false); 1346 PP_ASSERT_WITH_CODE(!ret, 1347 "Failed to import over drive table!", 1348 return ret); 1349 1350 return 0; 1351 } 1352 1353 static int vega20_od8_set_settings( 1354 struct pp_hwmgr *hwmgr, 1355 uint32_t index, 1356 uint32_t value) 1357 { 1358 OverDriveTable_t od_table; 1359 int ret = 0; 1360 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1361 struct vega20_od8_single_setting *od8_settings = 1362 data->od8_settings.od8_settings_array; 1363 1364 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, true); 1365 PP_ASSERT_WITH_CODE(!ret, 1366 "Failed to export over drive table!", 1367 return ret); 1368 1369 switch(index) { 1370 case OD8_SETTING_GFXCLK_FMIN: 1371 od_table.GfxclkFmin = (uint16_t)value; 1372 break; 1373 case OD8_SETTING_GFXCLK_FMAX: 1374 if (value < od8_settings[OD8_SETTING_GFXCLK_FMAX].min_value || 1375 value > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) 1376 return -EINVAL; 1377 1378 od_table.GfxclkFmax = (uint16_t)value; 1379 break; 1380 case OD8_SETTING_GFXCLK_FREQ1: 1381 od_table.GfxclkFreq1 = (uint16_t)value; 1382 break; 1383 case OD8_SETTING_GFXCLK_VOLTAGE1: 1384 od_table.GfxclkVolt1 = (uint16_t)value; 1385 break; 1386 case OD8_SETTING_GFXCLK_FREQ2: 1387 od_table.GfxclkFreq2 = (uint16_t)value; 1388 break; 1389 case OD8_SETTING_GFXCLK_VOLTAGE2: 1390 od_table.GfxclkVolt2 = (uint16_t)value; 1391 break; 1392 case OD8_SETTING_GFXCLK_FREQ3: 1393 od_table.GfxclkFreq3 = (uint16_t)value; 1394 break; 1395 case OD8_SETTING_GFXCLK_VOLTAGE3: 1396 od_table.GfxclkVolt3 = (uint16_t)value; 1397 break; 1398 case OD8_SETTING_UCLK_FMAX: 1399 if (value < od8_settings[OD8_SETTING_UCLK_FMAX].min_value || 1400 value > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) 1401 return -EINVAL; 1402 od_table.UclkFmax = (uint16_t)value; 1403 break; 1404 case OD8_SETTING_POWER_PERCENTAGE: 1405 od_table.OverDrivePct = (int16_t)value; 1406 break; 1407 case OD8_SETTING_FAN_ACOUSTIC_LIMIT: 1408 od_table.FanMaximumRpm = (uint16_t)value; 1409 break; 1410 case OD8_SETTING_FAN_MIN_SPEED: 1411 od_table.FanMinimumPwm = (uint16_t)value; 1412 break; 1413 case OD8_SETTING_FAN_TARGET_TEMP: 1414 od_table.FanTargetTemperature = (uint16_t)value; 1415 break; 1416 case OD8_SETTING_OPERATING_TEMP_MAX: 1417 od_table.MaxOpTemp = (uint16_t)value; 1418 break; 1419 } 1420 1421 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, false); 1422 PP_ASSERT_WITH_CODE(!ret, 1423 "Failed to import over drive table!", 1424 return ret); 1425 1426 return 0; 1427 } 1428 1429 static int vega20_get_sclk_od( 1430 struct pp_hwmgr *hwmgr) 1431 { 1432 struct vega20_hwmgr *data = hwmgr->backend; 1433 struct vega20_single_dpm_table *sclk_table = 1434 &(data->dpm_table.gfx_table); 1435 struct vega20_single_dpm_table *golden_sclk_table = 1436 &(data->golden_dpm_table.gfx_table); 1437 int value = sclk_table->dpm_levels[sclk_table->count - 1].value; 1438 int golden_value = golden_sclk_table->dpm_levels 1439 [golden_sclk_table->count - 1].value; 1440 1441 /* od percentage */ 1442 value -= golden_value; 1443 value = DIV_ROUND_UP(value * 100, golden_value); 1444 1445 return value; 1446 } 1447 1448 static int vega20_set_sclk_od( 1449 struct pp_hwmgr *hwmgr, uint32_t value) 1450 { 1451 struct vega20_hwmgr *data = hwmgr->backend; 1452 struct vega20_single_dpm_table *golden_sclk_table = 1453 &(data->golden_dpm_table.gfx_table); 1454 uint32_t od_sclk; 1455 int ret = 0; 1456 1457 od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value; 1458 od_sclk /= 100; 1459 od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; 1460 1461 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk); 1462 PP_ASSERT_WITH_CODE(!ret, 1463 "[SetSclkOD] failed to set od gfxclk!", 1464 return ret); 1465 1466 /* retrieve updated gfxclk table */ 1467 ret = vega20_setup_gfxclk_dpm_table(hwmgr); 1468 PP_ASSERT_WITH_CODE(!ret, 1469 "[SetSclkOD] failed to refresh gfxclk table!", 1470 return ret); 1471 1472 return 0; 1473 } 1474 1475 static int vega20_get_mclk_od( 1476 struct pp_hwmgr *hwmgr) 1477 { 1478 struct vega20_hwmgr *data = hwmgr->backend; 1479 struct vega20_single_dpm_table *mclk_table = 1480 &(data->dpm_table.mem_table); 1481 struct vega20_single_dpm_table *golden_mclk_table = 1482 &(data->golden_dpm_table.mem_table); 1483 int value = mclk_table->dpm_levels[mclk_table->count - 1].value; 1484 int golden_value = golden_mclk_table->dpm_levels 1485 [golden_mclk_table->count - 1].value; 1486 1487 /* od percentage */ 1488 value -= golden_value; 1489 value = DIV_ROUND_UP(value * 100, golden_value); 1490 1491 return value; 1492 } 1493 1494 static int vega20_set_mclk_od( 1495 struct pp_hwmgr *hwmgr, uint32_t value) 1496 { 1497 struct vega20_hwmgr *data = hwmgr->backend; 1498 struct vega20_single_dpm_table *golden_mclk_table = 1499 &(data->golden_dpm_table.mem_table); 1500 uint32_t od_mclk; 1501 int ret = 0; 1502 1503 od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value; 1504 od_mclk /= 100; 1505 od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; 1506 1507 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk); 1508 PP_ASSERT_WITH_CODE(!ret, 1509 "[SetMclkOD] failed to set od memclk!", 1510 return ret); 1511 1512 /* retrieve updated memclk table */ 1513 ret = vega20_setup_memclk_dpm_table(hwmgr); 1514 PP_ASSERT_WITH_CODE(!ret, 1515 "[SetMclkOD] failed to refresh memclk table!", 1516 return ret); 1517 1518 return 0; 1519 } 1520 1521 static int vega20_populate_umdpstate_clocks( 1522 struct pp_hwmgr *hwmgr) 1523 { 1524 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1525 struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table); 1526 struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table); 1527 1528 hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value; 1529 hwmgr->pstate_mclk = mem_table->dpm_levels[0].value; 1530 1531 if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && 1532 mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) { 1533 hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; 1534 hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; 1535 } 1536 1537 hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100; 1538 hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100; 1539 1540 return 0; 1541 } 1542 1543 static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr, 1544 PP_Clock *clock, PPCLK_e clock_select) 1545 { 1546 int ret = 0; 1547 1548 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1549 PPSMC_MSG_GetDcModeMaxDpmFreq, 1550 (clock_select << 16), 1551 clock)) == 0, 1552 "[GetMaxSustainableClock] Failed to get max DC clock from SMC!", 1553 return ret); 1554 1555 /* if DC limit is zero, return AC limit */ 1556 if (*clock == 0) { 1557 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1558 PPSMC_MSG_GetMaxDpmFreq, 1559 (clock_select << 16), 1560 clock)) == 0, 1561 "[GetMaxSustainableClock] failed to get max AC clock from SMC!", 1562 return ret); 1563 } 1564 1565 return 0; 1566 } 1567 1568 static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr) 1569 { 1570 struct vega20_hwmgr *data = 1571 (struct vega20_hwmgr *)(hwmgr->backend); 1572 struct vega20_max_sustainable_clocks *max_sustainable_clocks = 1573 &(data->max_sustainable_clocks); 1574 int ret = 0; 1575 1576 max_sustainable_clocks->uclock = data->vbios_boot_state.mem_clock / 100; 1577 max_sustainable_clocks->soc_clock = data->vbios_boot_state.soc_clock / 100; 1578 max_sustainable_clocks->dcef_clock = data->vbios_boot_state.dcef_clock / 100; 1579 max_sustainable_clocks->display_clock = 0xFFFFFFFF; 1580 max_sustainable_clocks->phy_clock = 0xFFFFFFFF; 1581 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; 1582 1583 if (data->smu_features[GNLD_DPM_UCLK].enabled) 1584 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1585 &(max_sustainable_clocks->uclock), 1586 PPCLK_UCLK)) == 0, 1587 "[InitMaxSustainableClocks] failed to get max UCLK from SMC!", 1588 return ret); 1589 1590 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) 1591 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1592 &(max_sustainable_clocks->soc_clock), 1593 PPCLK_SOCCLK)) == 0, 1594 "[InitMaxSustainableClocks] failed to get max SOCCLK from SMC!", 1595 return ret); 1596 1597 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 1598 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1599 &(max_sustainable_clocks->dcef_clock), 1600 PPCLK_DCEFCLK)) == 0, 1601 "[InitMaxSustainableClocks] failed to get max DCEFCLK from SMC!", 1602 return ret); 1603 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1604 &(max_sustainable_clocks->display_clock), 1605 PPCLK_DISPCLK)) == 0, 1606 "[InitMaxSustainableClocks] failed to get max DISPCLK from SMC!", 1607 return ret); 1608 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1609 &(max_sustainable_clocks->phy_clock), 1610 PPCLK_PHYCLK)) == 0, 1611 "[InitMaxSustainableClocks] failed to get max PHYCLK from SMC!", 1612 return ret); 1613 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1614 &(max_sustainable_clocks->pixel_clock), 1615 PPCLK_PIXCLK)) == 0, 1616 "[InitMaxSustainableClocks] failed to get max PIXCLK from SMC!", 1617 return ret); 1618 } 1619 1620 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) 1621 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; 1622 1623 return 0; 1624 } 1625 1626 static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr) 1627 { 1628 int result; 1629 1630 result = smum_send_msg_to_smc(hwmgr, 1631 PPSMC_MSG_SetMGpuFanBoostLimitRpm, 1632 NULL); 1633 PP_ASSERT_WITH_CODE(!result, 1634 "[EnableMgpuFan] Failed to enable mgpu fan boost!", 1635 return result); 1636 1637 return 0; 1638 } 1639 1640 static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr) 1641 { 1642 struct vega20_hwmgr *data = 1643 (struct vega20_hwmgr *)(hwmgr->backend); 1644 1645 data->uvd_power_gated = true; 1646 data->vce_power_gated = true; 1647 } 1648 1649 static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 1650 { 1651 int result = 0; 1652 1653 smum_send_msg_to_smc_with_parameter(hwmgr, 1654 PPSMC_MSG_NumOfDisplays, 0, NULL); 1655 1656 result = vega20_set_allowed_featuresmask(hwmgr); 1657 PP_ASSERT_WITH_CODE(!result, 1658 "[EnableDPMTasks] Failed to set allowed featuresmask!\n", 1659 return result); 1660 1661 result = vega20_init_smc_table(hwmgr); 1662 PP_ASSERT_WITH_CODE(!result, 1663 "[EnableDPMTasks] Failed to initialize SMC table!", 1664 return result); 1665 1666 result = vega20_run_btc(hwmgr); 1667 PP_ASSERT_WITH_CODE(!result, 1668 "[EnableDPMTasks] Failed to run btc!", 1669 return result); 1670 1671 result = vega20_run_btc_afll(hwmgr); 1672 PP_ASSERT_WITH_CODE(!result, 1673 "[EnableDPMTasks] Failed to run btc afll!", 1674 return result); 1675 1676 result = vega20_enable_all_smu_features(hwmgr); 1677 PP_ASSERT_WITH_CODE(!result, 1678 "[EnableDPMTasks] Failed to enable all smu features!", 1679 return result); 1680 1681 result = vega20_override_pcie_parameters(hwmgr); 1682 PP_ASSERT_WITH_CODE(!result, 1683 "[EnableDPMTasks] Failed to override pcie parameters!", 1684 return result); 1685 1686 result = vega20_notify_smc_display_change(hwmgr); 1687 PP_ASSERT_WITH_CODE(!result, 1688 "[EnableDPMTasks] Failed to notify smc display change!", 1689 return result); 1690 1691 result = vega20_send_clock_ratio(hwmgr); 1692 PP_ASSERT_WITH_CODE(!result, 1693 "[EnableDPMTasks] Failed to send clock ratio!", 1694 return result); 1695 1696 /* Initialize UVD/VCE powergating state */ 1697 vega20_init_powergate_state(hwmgr); 1698 1699 result = vega20_setup_default_dpm_tables(hwmgr); 1700 PP_ASSERT_WITH_CODE(!result, 1701 "[EnableDPMTasks] Failed to setup default DPM tables!", 1702 return result); 1703 1704 result = vega20_init_max_sustainable_clocks(hwmgr); 1705 PP_ASSERT_WITH_CODE(!result, 1706 "[EnableDPMTasks] Failed to get maximum sustainable clocks!", 1707 return result); 1708 1709 result = vega20_power_control_set_level(hwmgr); 1710 PP_ASSERT_WITH_CODE(!result, 1711 "[EnableDPMTasks] Failed to power control set level!", 1712 return result); 1713 1714 result = vega20_od8_initialize_default_settings(hwmgr); 1715 PP_ASSERT_WITH_CODE(!result, 1716 "[EnableDPMTasks] Failed to initialize odn settings!", 1717 return result); 1718 1719 result = vega20_populate_umdpstate_clocks(hwmgr); 1720 PP_ASSERT_WITH_CODE(!result, 1721 "[EnableDPMTasks] Failed to populate umdpstate clocks!", 1722 return result); 1723 1724 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit, 1725 POWER_SOURCE_AC << 16, &hwmgr->default_power_limit); 1726 PP_ASSERT_WITH_CODE(!result, 1727 "[GetPptLimit] get default PPT limit failed!", 1728 return result); 1729 hwmgr->power_limit = 1730 hwmgr->default_power_limit; 1731 1732 return 0; 1733 } 1734 1735 static uint32_t vega20_find_lowest_dpm_level( 1736 struct vega20_single_dpm_table *table) 1737 { 1738 uint32_t i; 1739 1740 for (i = 0; i < table->count; i++) { 1741 if (table->dpm_levels[i].enabled) 1742 break; 1743 } 1744 if (i >= table->count) { 1745 i = 0; 1746 table->dpm_levels[i].enabled = true; 1747 } 1748 1749 return i; 1750 } 1751 1752 static uint32_t vega20_find_highest_dpm_level( 1753 struct vega20_single_dpm_table *table) 1754 { 1755 int i = 0; 1756 1757 PP_ASSERT_WITH_CODE(table != NULL, 1758 "[FindHighestDPMLevel] DPM Table does not exist!", 1759 return 0); 1760 PP_ASSERT_WITH_CODE(table->count > 0, 1761 "[FindHighestDPMLevel] DPM Table has no entry!", 1762 return 0); 1763 PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER, 1764 "[FindHighestDPMLevel] DPM Table has too many entries!", 1765 return MAX_REGULAR_DPM_NUMBER - 1); 1766 1767 for (i = table->count - 1; i >= 0; i--) { 1768 if (table->dpm_levels[i].enabled) 1769 break; 1770 } 1771 if (i < 0) { 1772 i = 0; 1773 table->dpm_levels[i].enabled = true; 1774 } 1775 1776 return i; 1777 } 1778 1779 static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask) 1780 { 1781 struct vega20_hwmgr *data = 1782 (struct vega20_hwmgr *)(hwmgr->backend); 1783 uint32_t min_freq; 1784 int ret = 0; 1785 1786 if (data->smu_features[GNLD_DPM_GFXCLK].enabled && 1787 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { 1788 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; 1789 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1790 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1791 (PPCLK_GFXCLK << 16) | (min_freq & 0xffff), 1792 NULL)), 1793 "Failed to set soft min gfxclk !", 1794 return ret); 1795 } 1796 1797 if (data->smu_features[GNLD_DPM_UCLK].enabled && 1798 (feature_mask & FEATURE_DPM_UCLK_MASK)) { 1799 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; 1800 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1801 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1802 (PPCLK_UCLK << 16) | (min_freq & 0xffff), 1803 NULL)), 1804 "Failed to set soft min memclk !", 1805 return ret); 1806 } 1807 1808 if (data->smu_features[GNLD_DPM_UVD].enabled && 1809 (feature_mask & FEATURE_DPM_UVD_MASK)) { 1810 min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level; 1811 1812 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1813 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1814 (PPCLK_VCLK << 16) | (min_freq & 0xffff), 1815 NULL)), 1816 "Failed to set soft min vclk!", 1817 return ret); 1818 1819 min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level; 1820 1821 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1822 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1823 (PPCLK_DCLK << 16) | (min_freq & 0xffff), 1824 NULL)), 1825 "Failed to set soft min dclk!", 1826 return ret); 1827 } 1828 1829 if (data->smu_features[GNLD_DPM_VCE].enabled && 1830 (feature_mask & FEATURE_DPM_VCE_MASK)) { 1831 min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level; 1832 1833 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1834 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1835 (PPCLK_ECLK << 16) | (min_freq & 0xffff), 1836 NULL)), 1837 "Failed to set soft min eclk!", 1838 return ret); 1839 } 1840 1841 if (data->smu_features[GNLD_DPM_SOCCLK].enabled && 1842 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { 1843 min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level; 1844 1845 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1846 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1847 (PPCLK_SOCCLK << 16) | (min_freq & 0xffff), 1848 NULL)), 1849 "Failed to set soft min socclk!", 1850 return ret); 1851 } 1852 1853 if (data->smu_features[GNLD_DPM_FCLK].enabled && 1854 (feature_mask & FEATURE_DPM_FCLK_MASK)) { 1855 min_freq = data->dpm_table.fclk_table.dpm_state.soft_min_level; 1856 1857 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1858 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1859 (PPCLK_FCLK << 16) | (min_freq & 0xffff), 1860 NULL)), 1861 "Failed to set soft min fclk!", 1862 return ret); 1863 } 1864 1865 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled && 1866 (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) { 1867 min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level; 1868 1869 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1870 hwmgr, PPSMC_MSG_SetHardMinByFreq, 1871 (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff), 1872 NULL)), 1873 "Failed to set hard min dcefclk!", 1874 return ret); 1875 } 1876 1877 return ret; 1878 } 1879 1880 static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask) 1881 { 1882 struct vega20_hwmgr *data = 1883 (struct vega20_hwmgr *)(hwmgr->backend); 1884 uint32_t max_freq; 1885 int ret = 0; 1886 1887 if (data->smu_features[GNLD_DPM_GFXCLK].enabled && 1888 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { 1889 max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level; 1890 1891 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1892 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1893 (PPCLK_GFXCLK << 16) | (max_freq & 0xffff), 1894 NULL)), 1895 "Failed to set soft max gfxclk!", 1896 return ret); 1897 } 1898 1899 if (data->smu_features[GNLD_DPM_UCLK].enabled && 1900 (feature_mask & FEATURE_DPM_UCLK_MASK)) { 1901 max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level; 1902 1903 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1904 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1905 (PPCLK_UCLK << 16) | (max_freq & 0xffff), 1906 NULL)), 1907 "Failed to set soft max memclk!", 1908 return ret); 1909 } 1910 1911 if (data->smu_features[GNLD_DPM_UVD].enabled && 1912 (feature_mask & FEATURE_DPM_UVD_MASK)) { 1913 max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level; 1914 1915 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1916 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1917 (PPCLK_VCLK << 16) | (max_freq & 0xffff), 1918 NULL)), 1919 "Failed to set soft max vclk!", 1920 return ret); 1921 1922 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level; 1923 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1924 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1925 (PPCLK_DCLK << 16) | (max_freq & 0xffff), 1926 NULL)), 1927 "Failed to set soft max dclk!", 1928 return ret); 1929 } 1930 1931 if (data->smu_features[GNLD_DPM_VCE].enabled && 1932 (feature_mask & FEATURE_DPM_VCE_MASK)) { 1933 max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level; 1934 1935 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1936 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1937 (PPCLK_ECLK << 16) | (max_freq & 0xffff), 1938 NULL)), 1939 "Failed to set soft max eclk!", 1940 return ret); 1941 } 1942 1943 if (data->smu_features[GNLD_DPM_SOCCLK].enabled && 1944 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { 1945 max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level; 1946 1947 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1948 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1949 (PPCLK_SOCCLK << 16) | (max_freq & 0xffff), 1950 NULL)), 1951 "Failed to set soft max socclk!", 1952 return ret); 1953 } 1954 1955 if (data->smu_features[GNLD_DPM_FCLK].enabled && 1956 (feature_mask & FEATURE_DPM_FCLK_MASK)) { 1957 max_freq = data->dpm_table.fclk_table.dpm_state.soft_max_level; 1958 1959 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1960 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1961 (PPCLK_FCLK << 16) | (max_freq & 0xffff), 1962 NULL)), 1963 "Failed to set soft max fclk!", 1964 return ret); 1965 } 1966 1967 return ret; 1968 } 1969 1970 static int vega20_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) 1971 { 1972 struct vega20_hwmgr *data = 1973 (struct vega20_hwmgr *)(hwmgr->backend); 1974 int ret = 0; 1975 1976 if (data->smu_features[GNLD_DPM_VCE].supported) { 1977 if (data->smu_features[GNLD_DPM_VCE].enabled == enable) { 1978 if (enable) 1979 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already enabled!\n"); 1980 else 1981 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already disabled!\n"); 1982 } 1983 1984 ret = vega20_enable_smc_features(hwmgr, 1985 enable, 1986 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap); 1987 PP_ASSERT_WITH_CODE(!ret, 1988 "Attempt to Enable/Disable DPM VCE Failed!", 1989 return ret); 1990 data->smu_features[GNLD_DPM_VCE].enabled = enable; 1991 } 1992 1993 return 0; 1994 } 1995 1996 static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr, 1997 uint32_t *clock, 1998 PPCLK_e clock_select, 1999 bool max) 2000 { 2001 int ret; 2002 *clock = 0; 2003 2004 if (max) { 2005 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2006 PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16), 2007 clock)) == 0, 2008 "[GetClockRanges] Failed to get max clock from SMC!", 2009 return ret); 2010 } else { 2011 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2012 PPSMC_MSG_GetMinDpmFreq, 2013 (clock_select << 16), 2014 clock)) == 0, 2015 "[GetClockRanges] Failed to get min clock from SMC!", 2016 return ret); 2017 } 2018 2019 return 0; 2020 } 2021 2022 static uint32_t vega20_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 2023 { 2024 struct vega20_hwmgr *data = 2025 (struct vega20_hwmgr *)(hwmgr->backend); 2026 uint32_t gfx_clk; 2027 int ret = 0; 2028 2029 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled, 2030 "[GetSclks]: gfxclk dpm not enabled!\n", 2031 return -EPERM); 2032 2033 if (low) { 2034 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false); 2035 PP_ASSERT_WITH_CODE(!ret, 2036 "[GetSclks]: fail to get min PPCLK_GFXCLK\n", 2037 return ret); 2038 } else { 2039 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true); 2040 PP_ASSERT_WITH_CODE(!ret, 2041 "[GetSclks]: fail to get max PPCLK_GFXCLK\n", 2042 return ret); 2043 } 2044 2045 return (gfx_clk * 100); 2046 } 2047 2048 static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 2049 { 2050 struct vega20_hwmgr *data = 2051 (struct vega20_hwmgr *)(hwmgr->backend); 2052 uint32_t mem_clk; 2053 int ret = 0; 2054 2055 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled, 2056 "[MemMclks]: memclk dpm not enabled!\n", 2057 return -EPERM); 2058 2059 if (low) { 2060 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false); 2061 PP_ASSERT_WITH_CODE(!ret, 2062 "[GetMclks]: fail to get min PPCLK_UCLK\n", 2063 return ret); 2064 } else { 2065 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true); 2066 PP_ASSERT_WITH_CODE(!ret, 2067 "[GetMclks]: fail to get max PPCLK_UCLK\n", 2068 return ret); 2069 } 2070 2071 return (mem_clk * 100); 2072 } 2073 2074 static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr, 2075 SmuMetrics_t *metrics_table, 2076 bool bypass_cache) 2077 { 2078 struct vega20_hwmgr *data = 2079 (struct vega20_hwmgr *)(hwmgr->backend); 2080 int ret = 0; 2081 2082 if (bypass_cache || 2083 !data->metrics_time || 2084 time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) { 2085 ret = smum_smc_table_manager(hwmgr, 2086 (uint8_t *)(&data->metrics_table), 2087 TABLE_SMU_METRICS, 2088 true); 2089 if (ret) { 2090 pr_info("Failed to export SMU metrics table!\n"); 2091 return ret; 2092 } 2093 data->metrics_time = jiffies; 2094 } 2095 2096 if (metrics_table) 2097 memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t)); 2098 2099 return ret; 2100 } 2101 2102 static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, 2103 uint32_t *query) 2104 { 2105 int ret = 0; 2106 SmuMetrics_t metrics_table; 2107 2108 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2109 if (ret) 2110 return ret; 2111 2112 /* For the 40.46 release, they changed the value name */ 2113 if (hwmgr->smu_version == 0x282e00) 2114 *query = metrics_table.AverageSocketPower << 8; 2115 else 2116 *query = metrics_table.CurrSocketPower << 8; 2117 2118 return ret; 2119 } 2120 2121 static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr, 2122 PPCLK_e clk_id, uint32_t *clk_freq) 2123 { 2124 int ret = 0; 2125 2126 *clk_freq = 0; 2127 2128 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2129 PPSMC_MSG_GetDpmClockFreq, (clk_id << 16), 2130 clk_freq)) == 0, 2131 "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!", 2132 return ret); 2133 2134 *clk_freq = *clk_freq * 100; 2135 2136 return 0; 2137 } 2138 2139 static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr, 2140 int idx, 2141 uint32_t *activity_percent) 2142 { 2143 int ret = 0; 2144 SmuMetrics_t metrics_table; 2145 2146 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2147 if (ret) 2148 return ret; 2149 2150 switch (idx) { 2151 case AMDGPU_PP_SENSOR_GPU_LOAD: 2152 *activity_percent = metrics_table.AverageGfxActivity; 2153 break; 2154 case AMDGPU_PP_SENSOR_MEM_LOAD: 2155 *activity_percent = metrics_table.AverageUclkActivity; 2156 break; 2157 default: 2158 pr_err("Invalid index for retrieving clock activity\n"); 2159 return -EINVAL; 2160 } 2161 2162 return ret; 2163 } 2164 2165 static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, 2166 void *value, int *size) 2167 { 2168 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2169 struct amdgpu_device *adev = hwmgr->adev; 2170 SmuMetrics_t metrics_table; 2171 uint32_t val_vid; 2172 int ret = 0; 2173 2174 switch (idx) { 2175 case AMDGPU_PP_SENSOR_GFX_SCLK: 2176 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2177 if (ret) 2178 return ret; 2179 2180 *((uint32_t *)value) = metrics_table.AverageGfxclkFrequency * 100; 2181 *size = 4; 2182 break; 2183 case AMDGPU_PP_SENSOR_GFX_MCLK: 2184 ret = vega20_get_current_clk_freq(hwmgr, 2185 PPCLK_UCLK, 2186 (uint32_t *)value); 2187 if (!ret) 2188 *size = 4; 2189 break; 2190 case AMDGPU_PP_SENSOR_GPU_LOAD: 2191 case AMDGPU_PP_SENSOR_MEM_LOAD: 2192 ret = vega20_get_current_activity_percent(hwmgr, idx, (uint32_t *)value); 2193 if (!ret) 2194 *size = 4; 2195 break; 2196 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 2197 *((uint32_t *)value) = vega20_thermal_get_temperature(hwmgr); 2198 *size = 4; 2199 break; 2200 case AMDGPU_PP_SENSOR_EDGE_TEMP: 2201 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2202 if (ret) 2203 return ret; 2204 2205 *((uint32_t *)value) = metrics_table.TemperatureEdge * 2206 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 2207 *size = 4; 2208 break; 2209 case AMDGPU_PP_SENSOR_MEM_TEMP: 2210 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2211 if (ret) 2212 return ret; 2213 2214 *((uint32_t *)value) = metrics_table.TemperatureHBM * 2215 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 2216 *size = 4; 2217 break; 2218 case AMDGPU_PP_SENSOR_UVD_POWER: 2219 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; 2220 *size = 4; 2221 break; 2222 case AMDGPU_PP_SENSOR_VCE_POWER: 2223 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; 2224 *size = 4; 2225 break; 2226 case AMDGPU_PP_SENSOR_GPU_POWER: 2227 *size = 16; 2228 ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value); 2229 break; 2230 case AMDGPU_PP_SENSOR_VDDGFX: 2231 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) & 2232 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> 2233 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; 2234 *((uint32_t *)value) = 2235 (uint32_t)convert_to_vddc((uint8_t)val_vid); 2236 break; 2237 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 2238 ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value); 2239 if (!ret) 2240 *size = 8; 2241 break; 2242 default: 2243 ret = -EINVAL; 2244 break; 2245 } 2246 return ret; 2247 } 2248 2249 static int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 2250 struct pp_display_clock_request *clock_req) 2251 { 2252 int result = 0; 2253 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2254 enum amd_pp_clock_type clk_type = clock_req->clock_type; 2255 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 2256 PPCLK_e clk_select = 0; 2257 uint32_t clk_request = 0; 2258 2259 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 2260 switch (clk_type) { 2261 case amd_pp_dcef_clock: 2262 clk_select = PPCLK_DCEFCLK; 2263 break; 2264 case amd_pp_disp_clock: 2265 clk_select = PPCLK_DISPCLK; 2266 break; 2267 case amd_pp_pixel_clock: 2268 clk_select = PPCLK_PIXCLK; 2269 break; 2270 case amd_pp_phy_clock: 2271 clk_select = PPCLK_PHYCLK; 2272 break; 2273 default: 2274 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); 2275 result = -EINVAL; 2276 break; 2277 } 2278 2279 if (!result) { 2280 clk_request = (clk_select << 16) | clk_freq; 2281 result = smum_send_msg_to_smc_with_parameter(hwmgr, 2282 PPSMC_MSG_SetHardMinByFreq, 2283 clk_request, 2284 NULL); 2285 } 2286 } 2287 2288 return result; 2289 } 2290 2291 static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 2292 PHM_PerformanceLevelDesignation designation, uint32_t index, 2293 PHM_PerformanceLevel *level) 2294 { 2295 return 0; 2296 } 2297 2298 static int vega20_notify_smc_display_config_after_ps_adjustment( 2299 struct pp_hwmgr *hwmgr) 2300 { 2301 struct vega20_hwmgr *data = 2302 (struct vega20_hwmgr *)(hwmgr->backend); 2303 struct vega20_single_dpm_table *dpm_table = 2304 &data->dpm_table.mem_table; 2305 struct PP_Clocks min_clocks = {0}; 2306 struct pp_display_clock_request clock_req; 2307 int ret = 0; 2308 2309 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; 2310 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; 2311 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; 2312 2313 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) { 2314 clock_req.clock_type = amd_pp_dcef_clock; 2315 clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10; 2316 if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) { 2317 if (data->smu_features[GNLD_DS_DCEFCLK].supported) 2318 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter( 2319 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, 2320 min_clocks.dcefClockInSR / 100, 2321 NULL)) == 0, 2322 "Attempt to set divider for DCEFCLK Failed!", 2323 return ret); 2324 } else { 2325 pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); 2326 } 2327 } 2328 2329 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 2330 dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100; 2331 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2332 PPSMC_MSG_SetHardMinByFreq, 2333 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, 2334 NULL)), 2335 "[SetHardMinFreq] Set hard min uclk failed!", 2336 return ret); 2337 } 2338 2339 return 0; 2340 } 2341 2342 static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr) 2343 { 2344 struct vega20_hwmgr *data = 2345 (struct vega20_hwmgr *)(hwmgr->backend); 2346 uint32_t soft_level; 2347 int ret = 0; 2348 2349 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); 2350 2351 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2352 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2353 data->dpm_table.gfx_table.dpm_levels[soft_level].value; 2354 2355 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); 2356 2357 data->dpm_table.mem_table.dpm_state.soft_min_level = 2358 data->dpm_table.mem_table.dpm_state.soft_max_level = 2359 data->dpm_table.mem_table.dpm_levels[soft_level].value; 2360 2361 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); 2362 2363 data->dpm_table.soc_table.dpm_state.soft_min_level = 2364 data->dpm_table.soc_table.dpm_state.soft_max_level = 2365 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2366 2367 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2368 FEATURE_DPM_UCLK_MASK | 2369 FEATURE_DPM_SOCCLK_MASK); 2370 PP_ASSERT_WITH_CODE(!ret, 2371 "Failed to upload boot level to highest!", 2372 return ret); 2373 2374 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2375 FEATURE_DPM_UCLK_MASK | 2376 FEATURE_DPM_SOCCLK_MASK); 2377 PP_ASSERT_WITH_CODE(!ret, 2378 "Failed to upload dpm max level to highest!", 2379 return ret); 2380 2381 return 0; 2382 } 2383 2384 static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) 2385 { 2386 struct vega20_hwmgr *data = 2387 (struct vega20_hwmgr *)(hwmgr->backend); 2388 uint32_t soft_level; 2389 int ret = 0; 2390 2391 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); 2392 2393 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2394 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2395 data->dpm_table.gfx_table.dpm_levels[soft_level].value; 2396 2397 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); 2398 2399 data->dpm_table.mem_table.dpm_state.soft_min_level = 2400 data->dpm_table.mem_table.dpm_state.soft_max_level = 2401 data->dpm_table.mem_table.dpm_levels[soft_level].value; 2402 2403 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); 2404 2405 data->dpm_table.soc_table.dpm_state.soft_min_level = 2406 data->dpm_table.soc_table.dpm_state.soft_max_level = 2407 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2408 2409 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2410 FEATURE_DPM_UCLK_MASK | 2411 FEATURE_DPM_SOCCLK_MASK); 2412 PP_ASSERT_WITH_CODE(!ret, 2413 "Failed to upload boot level to highest!", 2414 return ret); 2415 2416 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2417 FEATURE_DPM_UCLK_MASK | 2418 FEATURE_DPM_SOCCLK_MASK); 2419 PP_ASSERT_WITH_CODE(!ret, 2420 "Failed to upload dpm max level to highest!", 2421 return ret); 2422 2423 return 0; 2424 2425 } 2426 2427 static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 2428 { 2429 struct vega20_hwmgr *data = 2430 (struct vega20_hwmgr *)(hwmgr->backend); 2431 uint32_t soft_min_level, soft_max_level; 2432 int ret = 0; 2433 2434 /* gfxclk soft min/max settings */ 2435 soft_min_level = 2436 vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); 2437 soft_max_level = 2438 vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); 2439 2440 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2441 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2442 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2443 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; 2444 2445 /* uclk soft min/max settings */ 2446 soft_min_level = 2447 vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); 2448 soft_max_level = 2449 vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); 2450 2451 data->dpm_table.mem_table.dpm_state.soft_min_level = 2452 data->dpm_table.mem_table.dpm_levels[soft_min_level].value; 2453 data->dpm_table.mem_table.dpm_state.soft_max_level = 2454 data->dpm_table.mem_table.dpm_levels[soft_max_level].value; 2455 2456 /* socclk soft min/max settings */ 2457 soft_min_level = 2458 vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); 2459 soft_max_level = 2460 vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); 2461 2462 data->dpm_table.soc_table.dpm_state.soft_min_level = 2463 data->dpm_table.soc_table.dpm_levels[soft_min_level].value; 2464 data->dpm_table.soc_table.dpm_state.soft_max_level = 2465 data->dpm_table.soc_table.dpm_levels[soft_max_level].value; 2466 2467 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2468 FEATURE_DPM_UCLK_MASK | 2469 FEATURE_DPM_SOCCLK_MASK); 2470 PP_ASSERT_WITH_CODE(!ret, 2471 "Failed to upload DPM Bootup Levels!", 2472 return ret); 2473 2474 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2475 FEATURE_DPM_UCLK_MASK | 2476 FEATURE_DPM_SOCCLK_MASK); 2477 PP_ASSERT_WITH_CODE(!ret, 2478 "Failed to upload DPM Max Levels!", 2479 return ret); 2480 2481 return 0; 2482 } 2483 2484 static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, 2485 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask) 2486 { 2487 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2488 struct vega20_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table); 2489 struct vega20_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table); 2490 struct vega20_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table); 2491 2492 *sclk_mask = 0; 2493 *mclk_mask = 0; 2494 *soc_mask = 0; 2495 2496 if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && 2497 mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL && 2498 soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) { 2499 *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL; 2500 *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL; 2501 *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL; 2502 } 2503 2504 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 2505 *sclk_mask = 0; 2506 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 2507 *mclk_mask = 0; 2508 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 2509 *sclk_mask = gfx_dpm_table->count - 1; 2510 *mclk_mask = mem_dpm_table->count - 1; 2511 *soc_mask = soc_dpm_table->count - 1; 2512 } 2513 2514 return 0; 2515 } 2516 2517 static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, 2518 enum pp_clock_type type, uint32_t mask) 2519 { 2520 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2521 uint32_t soft_min_level, soft_max_level, hard_min_level; 2522 int ret = 0; 2523 2524 switch (type) { 2525 case PP_SCLK: 2526 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2527 soft_max_level = mask ? (fls(mask) - 1) : 0; 2528 2529 if (soft_max_level >= data->dpm_table.gfx_table.count) { 2530 pr_err("Clock level specified %d is over max allowed %d\n", 2531 soft_max_level, 2532 data->dpm_table.gfx_table.count - 1); 2533 return -EINVAL; 2534 } 2535 2536 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2537 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2538 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2539 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; 2540 2541 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK); 2542 PP_ASSERT_WITH_CODE(!ret, 2543 "Failed to upload boot level to lowest!", 2544 return ret); 2545 2546 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK); 2547 PP_ASSERT_WITH_CODE(!ret, 2548 "Failed to upload dpm max level to highest!", 2549 return ret); 2550 break; 2551 2552 case PP_MCLK: 2553 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2554 soft_max_level = mask ? (fls(mask) - 1) : 0; 2555 2556 if (soft_max_level >= data->dpm_table.mem_table.count) { 2557 pr_err("Clock level specified %d is over max allowed %d\n", 2558 soft_max_level, 2559 data->dpm_table.mem_table.count - 1); 2560 return -EINVAL; 2561 } 2562 2563 data->dpm_table.mem_table.dpm_state.soft_min_level = 2564 data->dpm_table.mem_table.dpm_levels[soft_min_level].value; 2565 data->dpm_table.mem_table.dpm_state.soft_max_level = 2566 data->dpm_table.mem_table.dpm_levels[soft_max_level].value; 2567 2568 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK); 2569 PP_ASSERT_WITH_CODE(!ret, 2570 "Failed to upload boot level to lowest!", 2571 return ret); 2572 2573 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK); 2574 PP_ASSERT_WITH_CODE(!ret, 2575 "Failed to upload dpm max level to highest!", 2576 return ret); 2577 2578 break; 2579 2580 case PP_SOCCLK: 2581 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2582 soft_max_level = mask ? (fls(mask) - 1) : 0; 2583 2584 if (soft_max_level >= data->dpm_table.soc_table.count) { 2585 pr_err("Clock level specified %d is over max allowed %d\n", 2586 soft_max_level, 2587 data->dpm_table.soc_table.count - 1); 2588 return -EINVAL; 2589 } 2590 2591 data->dpm_table.soc_table.dpm_state.soft_min_level = 2592 data->dpm_table.soc_table.dpm_levels[soft_min_level].value; 2593 data->dpm_table.soc_table.dpm_state.soft_max_level = 2594 data->dpm_table.soc_table.dpm_levels[soft_max_level].value; 2595 2596 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_SOCCLK_MASK); 2597 PP_ASSERT_WITH_CODE(!ret, 2598 "Failed to upload boot level to lowest!", 2599 return ret); 2600 2601 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_SOCCLK_MASK); 2602 PP_ASSERT_WITH_CODE(!ret, 2603 "Failed to upload dpm max level to highest!", 2604 return ret); 2605 2606 break; 2607 2608 case PP_FCLK: 2609 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2610 soft_max_level = mask ? (fls(mask) - 1) : 0; 2611 2612 if (soft_max_level >= data->dpm_table.fclk_table.count) { 2613 pr_err("Clock level specified %d is over max allowed %d\n", 2614 soft_max_level, 2615 data->dpm_table.fclk_table.count - 1); 2616 return -EINVAL; 2617 } 2618 2619 data->dpm_table.fclk_table.dpm_state.soft_min_level = 2620 data->dpm_table.fclk_table.dpm_levels[soft_min_level].value; 2621 data->dpm_table.fclk_table.dpm_state.soft_max_level = 2622 data->dpm_table.fclk_table.dpm_levels[soft_max_level].value; 2623 2624 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_FCLK_MASK); 2625 PP_ASSERT_WITH_CODE(!ret, 2626 "Failed to upload boot level to lowest!", 2627 return ret); 2628 2629 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_FCLK_MASK); 2630 PP_ASSERT_WITH_CODE(!ret, 2631 "Failed to upload dpm max level to highest!", 2632 return ret); 2633 2634 break; 2635 2636 case PP_DCEFCLK: 2637 hard_min_level = mask ? (ffs(mask) - 1) : 0; 2638 2639 if (hard_min_level >= data->dpm_table.dcef_table.count) { 2640 pr_err("Clock level specified %d is over max allowed %d\n", 2641 hard_min_level, 2642 data->dpm_table.dcef_table.count - 1); 2643 return -EINVAL; 2644 } 2645 2646 data->dpm_table.dcef_table.dpm_state.hard_min_level = 2647 data->dpm_table.dcef_table.dpm_levels[hard_min_level].value; 2648 2649 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_DCEFCLK_MASK); 2650 PP_ASSERT_WITH_CODE(!ret, 2651 "Failed to upload boot level to lowest!", 2652 return ret); 2653 2654 //TODO: Setting DCEFCLK max dpm level is not supported 2655 2656 break; 2657 2658 case PP_PCIE: 2659 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2660 soft_max_level = mask ? (fls(mask) - 1) : 0; 2661 if (soft_min_level >= NUM_LINK_LEVELS || 2662 soft_max_level >= NUM_LINK_LEVELS) 2663 return -EINVAL; 2664 2665 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2666 PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level, 2667 NULL); 2668 PP_ASSERT_WITH_CODE(!ret, 2669 "Failed to set min link dpm level!", 2670 return ret); 2671 2672 break; 2673 2674 default: 2675 break; 2676 } 2677 2678 return 0; 2679 } 2680 2681 static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 2682 enum amd_dpm_forced_level level) 2683 { 2684 int ret = 0; 2685 uint32_t sclk_mask, mclk_mask, soc_mask; 2686 2687 switch (level) { 2688 case AMD_DPM_FORCED_LEVEL_HIGH: 2689 ret = vega20_force_dpm_highest(hwmgr); 2690 break; 2691 2692 case AMD_DPM_FORCED_LEVEL_LOW: 2693 ret = vega20_force_dpm_lowest(hwmgr); 2694 break; 2695 2696 case AMD_DPM_FORCED_LEVEL_AUTO: 2697 ret = vega20_unforce_dpm_levels(hwmgr); 2698 break; 2699 2700 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 2701 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 2702 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 2703 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 2704 ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); 2705 if (ret) 2706 return ret; 2707 vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask); 2708 vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask); 2709 vega20_force_clock_level(hwmgr, PP_SOCCLK, 1 << soc_mask); 2710 break; 2711 2712 case AMD_DPM_FORCED_LEVEL_MANUAL: 2713 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 2714 default: 2715 break; 2716 } 2717 2718 return ret; 2719 } 2720 2721 static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr) 2722 { 2723 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2724 2725 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false) 2726 return AMD_FAN_CTRL_MANUAL; 2727 else 2728 return AMD_FAN_CTRL_AUTO; 2729 } 2730 2731 static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) 2732 { 2733 switch (mode) { 2734 case AMD_FAN_CTRL_NONE: 2735 vega20_fan_ctrl_set_fan_speed_percent(hwmgr, 100); 2736 break; 2737 case AMD_FAN_CTRL_MANUAL: 2738 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 2739 vega20_fan_ctrl_stop_smc_fan_control(hwmgr); 2740 break; 2741 case AMD_FAN_CTRL_AUTO: 2742 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 2743 vega20_fan_ctrl_start_smc_fan_control(hwmgr); 2744 break; 2745 default: 2746 break; 2747 } 2748 } 2749 2750 static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr, 2751 struct amd_pp_simple_clock_info *info) 2752 { 2753 #if 0 2754 struct phm_ppt_v2_information *table_info = 2755 (struct phm_ppt_v2_information *)hwmgr->pptable; 2756 struct phm_clock_and_voltage_limits *max_limits = 2757 &table_info->max_clock_voltage_on_ac; 2758 2759 info->engine_max_clock = max_limits->sclk; 2760 info->memory_max_clock = max_limits->mclk; 2761 #endif 2762 return 0; 2763 } 2764 2765 2766 static int vega20_get_sclks(struct pp_hwmgr *hwmgr, 2767 struct pp_clock_levels_with_latency *clocks) 2768 { 2769 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2770 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); 2771 int i, count; 2772 2773 if (!data->smu_features[GNLD_DPM_GFXCLK].enabled) 2774 return -1; 2775 2776 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2777 clocks->num_levels = count; 2778 2779 for (i = 0; i < count; i++) { 2780 clocks->data[i].clocks_in_khz = 2781 dpm_table->dpm_levels[i].value * 1000; 2782 clocks->data[i].latency_in_us = 0; 2783 } 2784 2785 return 0; 2786 } 2787 2788 static uint32_t vega20_get_mem_latency(struct pp_hwmgr *hwmgr, 2789 uint32_t clock) 2790 { 2791 return 25; 2792 } 2793 2794 static int vega20_get_memclocks(struct pp_hwmgr *hwmgr, 2795 struct pp_clock_levels_with_latency *clocks) 2796 { 2797 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2798 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table); 2799 int i, count; 2800 2801 if (!data->smu_features[GNLD_DPM_UCLK].enabled) 2802 return -1; 2803 2804 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2805 clocks->num_levels = data->mclk_latency_table.count = count; 2806 2807 for (i = 0; i < count; i++) { 2808 clocks->data[i].clocks_in_khz = 2809 data->mclk_latency_table.entries[i].frequency = 2810 dpm_table->dpm_levels[i].value * 1000; 2811 clocks->data[i].latency_in_us = 2812 data->mclk_latency_table.entries[i].latency = 2813 vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value); 2814 } 2815 2816 return 0; 2817 } 2818 2819 static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr, 2820 struct pp_clock_levels_with_latency *clocks) 2821 { 2822 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2823 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table); 2824 int i, count; 2825 2826 if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled) 2827 return -1; 2828 2829 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2830 clocks->num_levels = count; 2831 2832 for (i = 0; i < count; i++) { 2833 clocks->data[i].clocks_in_khz = 2834 dpm_table->dpm_levels[i].value * 1000; 2835 clocks->data[i].latency_in_us = 0; 2836 } 2837 2838 return 0; 2839 } 2840 2841 static int vega20_get_socclocks(struct pp_hwmgr *hwmgr, 2842 struct pp_clock_levels_with_latency *clocks) 2843 { 2844 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2845 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table); 2846 int i, count; 2847 2848 if (!data->smu_features[GNLD_DPM_SOCCLK].enabled) 2849 return -1; 2850 2851 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2852 clocks->num_levels = count; 2853 2854 for (i = 0; i < count; i++) { 2855 clocks->data[i].clocks_in_khz = 2856 dpm_table->dpm_levels[i].value * 1000; 2857 clocks->data[i].latency_in_us = 0; 2858 } 2859 2860 return 0; 2861 2862 } 2863 2864 static int vega20_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, 2865 enum amd_pp_clock_type type, 2866 struct pp_clock_levels_with_latency *clocks) 2867 { 2868 int ret; 2869 2870 switch (type) { 2871 case amd_pp_sys_clock: 2872 ret = vega20_get_sclks(hwmgr, clocks); 2873 break; 2874 case amd_pp_mem_clock: 2875 ret = vega20_get_memclocks(hwmgr, clocks); 2876 break; 2877 case amd_pp_dcef_clock: 2878 ret = vega20_get_dcefclocks(hwmgr, clocks); 2879 break; 2880 case amd_pp_soc_clock: 2881 ret = vega20_get_socclocks(hwmgr, clocks); 2882 break; 2883 default: 2884 return -EINVAL; 2885 } 2886 2887 return ret; 2888 } 2889 2890 static int vega20_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, 2891 enum amd_pp_clock_type type, 2892 struct pp_clock_levels_with_voltage *clocks) 2893 { 2894 clocks->num_levels = 0; 2895 2896 return 0; 2897 } 2898 2899 static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 2900 void *clock_ranges) 2901 { 2902 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2903 Watermarks_t *table = &(data->smc_state_table.water_marks_table); 2904 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges; 2905 2906 if (!data->registry_data.disable_water_mark && 2907 data->smu_features[GNLD_DPM_DCEFCLK].supported && 2908 data->smu_features[GNLD_DPM_SOCCLK].supported) { 2909 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); 2910 data->water_marks_bitmap |= WaterMarksExist; 2911 data->water_marks_bitmap &= ~WaterMarksLoaded; 2912 } 2913 2914 return 0; 2915 } 2916 2917 static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, 2918 enum PP_OD_DPM_TABLE_COMMAND type, 2919 long *input, uint32_t size) 2920 { 2921 struct vega20_hwmgr *data = 2922 (struct vega20_hwmgr *)(hwmgr->backend); 2923 struct vega20_od8_single_setting *od8_settings = 2924 data->od8_settings.od8_settings_array; 2925 OverDriveTable_t *od_table = 2926 &(data->smc_state_table.overdrive_table); 2927 int32_t input_index, input_clk, input_vol, i; 2928 int od8_id; 2929 int ret; 2930 2931 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", 2932 return -EINVAL); 2933 2934 switch (type) { 2935 case PP_OD_EDIT_SCLK_VDDC_TABLE: 2936 if (!(od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && 2937 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id)) { 2938 pr_info("Sclk min/max frequency overdrive not supported\n"); 2939 return -EOPNOTSUPP; 2940 } 2941 2942 for (i = 0; i < size; i += 2) { 2943 if (i + 2 > size) { 2944 pr_info("invalid number of input parameters %d\n", 2945 size); 2946 return -EINVAL; 2947 } 2948 2949 input_index = input[i]; 2950 input_clk = input[i + 1]; 2951 2952 if (input_index != 0 && input_index != 1) { 2953 pr_info("Invalid index %d\n", input_index); 2954 pr_info("Support min/max sclk frequency setting only which index by 0/1\n"); 2955 return -EINVAL; 2956 } 2957 2958 if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value || 2959 input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) { 2960 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 2961 input_clk, 2962 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, 2963 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); 2964 return -EINVAL; 2965 } 2966 2967 if ((input_index == 0 && od_table->GfxclkFmin != input_clk) || 2968 (input_index == 1 && od_table->GfxclkFmax != input_clk)) 2969 data->gfxclk_overdrive = true; 2970 2971 if (input_index == 0) 2972 od_table->GfxclkFmin = input_clk; 2973 else 2974 od_table->GfxclkFmax = input_clk; 2975 } 2976 2977 break; 2978 2979 case PP_OD_EDIT_MCLK_VDDC_TABLE: 2980 if (!od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 2981 pr_info("Mclk max frequency overdrive not supported\n"); 2982 return -EOPNOTSUPP; 2983 } 2984 2985 for (i = 0; i < size; i += 2) { 2986 if (i + 2 > size) { 2987 pr_info("invalid number of input parameters %d\n", 2988 size); 2989 return -EINVAL; 2990 } 2991 2992 input_index = input[i]; 2993 input_clk = input[i + 1]; 2994 2995 if (input_index != 1) { 2996 pr_info("Invalid index %d\n", input_index); 2997 pr_info("Support max Mclk frequency setting only which index by 1\n"); 2998 return -EINVAL; 2999 } 3000 3001 if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value || 3002 input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) { 3003 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 3004 input_clk, 3005 od8_settings[OD8_SETTING_UCLK_FMAX].min_value, 3006 od8_settings[OD8_SETTING_UCLK_FMAX].max_value); 3007 return -EINVAL; 3008 } 3009 3010 if (input_index == 1 && od_table->UclkFmax != input_clk) 3011 data->memclk_overdrive = true; 3012 3013 od_table->UclkFmax = input_clk; 3014 } 3015 3016 break; 3017 3018 case PP_OD_EDIT_VDDC_CURVE: 3019 if (!(od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && 3020 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && 3021 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && 3022 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && 3023 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && 3024 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) { 3025 pr_info("Voltage curve calibrate not supported\n"); 3026 return -EOPNOTSUPP; 3027 } 3028 3029 for (i = 0; i < size; i += 3) { 3030 if (i + 3 > size) { 3031 pr_info("invalid number of input parameters %d\n", 3032 size); 3033 return -EINVAL; 3034 } 3035 3036 input_index = input[i]; 3037 input_clk = input[i + 1]; 3038 input_vol = input[i + 2]; 3039 3040 if (input_index > 2) { 3041 pr_info("Setting for point %d is not supported\n", 3042 input_index + 1); 3043 pr_info("Three supported points index by 0, 1, 2\n"); 3044 return -EINVAL; 3045 } 3046 3047 od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index; 3048 if (input_clk < od8_settings[od8_id].min_value || 3049 input_clk > od8_settings[od8_id].max_value) { 3050 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 3051 input_clk, 3052 od8_settings[od8_id].min_value, 3053 od8_settings[od8_id].max_value); 3054 return -EINVAL; 3055 } 3056 3057 od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index; 3058 if (input_vol < od8_settings[od8_id].min_value || 3059 input_vol > od8_settings[od8_id].max_value) { 3060 pr_info("clock voltage %d is not within allowed range [%d - %d]\n", 3061 input_vol, 3062 od8_settings[od8_id].min_value, 3063 od8_settings[od8_id].max_value); 3064 return -EINVAL; 3065 } 3066 3067 switch (input_index) { 3068 case 0: 3069 od_table->GfxclkFreq1 = input_clk; 3070 od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE; 3071 break; 3072 case 1: 3073 od_table->GfxclkFreq2 = input_clk; 3074 od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE; 3075 break; 3076 case 2: 3077 od_table->GfxclkFreq3 = input_clk; 3078 od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE; 3079 break; 3080 } 3081 } 3082 break; 3083 3084 case PP_OD_RESTORE_DEFAULT_TABLE: 3085 data->gfxclk_overdrive = false; 3086 data->memclk_overdrive = false; 3087 3088 ret = smum_smc_table_manager(hwmgr, 3089 (uint8_t *)od_table, 3090 TABLE_OVERDRIVE, true); 3091 PP_ASSERT_WITH_CODE(!ret, 3092 "Failed to export overdrive table!", 3093 return ret); 3094 break; 3095 3096 case PP_OD_COMMIT_DPM_TABLE: 3097 ret = smum_smc_table_manager(hwmgr, 3098 (uint8_t *)od_table, 3099 TABLE_OVERDRIVE, false); 3100 PP_ASSERT_WITH_CODE(!ret, 3101 "Failed to import overdrive table!", 3102 return ret); 3103 3104 /* retrieve updated gfxclk table */ 3105 if (data->gfxclk_overdrive) { 3106 data->gfxclk_overdrive = false; 3107 3108 ret = vega20_setup_gfxclk_dpm_table(hwmgr); 3109 if (ret) 3110 return ret; 3111 } 3112 3113 /* retrieve updated memclk table */ 3114 if (data->memclk_overdrive) { 3115 data->memclk_overdrive = false; 3116 3117 ret = vega20_setup_memclk_dpm_table(hwmgr); 3118 if (ret) 3119 return ret; 3120 } 3121 break; 3122 3123 default: 3124 return -EINVAL; 3125 } 3126 3127 return 0; 3128 } 3129 3130 static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr, 3131 enum pp_mp1_state mp1_state) 3132 { 3133 uint16_t msg; 3134 int ret; 3135 3136 switch (mp1_state) { 3137 case PP_MP1_STATE_SHUTDOWN: 3138 msg = PPSMC_MSG_PrepareMp1ForShutdown; 3139 break; 3140 case PP_MP1_STATE_UNLOAD: 3141 msg = PPSMC_MSG_PrepareMp1ForUnload; 3142 break; 3143 case PP_MP1_STATE_RESET: 3144 msg = PPSMC_MSG_PrepareMp1ForReset; 3145 break; 3146 case PP_MP1_STATE_NONE: 3147 default: 3148 return 0; 3149 } 3150 3151 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0, 3152 "[PrepareMp1] Failed!", 3153 return ret); 3154 3155 return 0; 3156 } 3157 3158 static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) 3159 { 3160 static const char *ppfeature_name[] = { 3161 "DPM_PREFETCHER", 3162 "GFXCLK_DPM", 3163 "UCLK_DPM", 3164 "SOCCLK_DPM", 3165 "UVD_DPM", 3166 "VCE_DPM", 3167 "ULV", 3168 "MP0CLK_DPM", 3169 "LINK_DPM", 3170 "DCEFCLK_DPM", 3171 "GFXCLK_DS", 3172 "SOCCLK_DS", 3173 "LCLK_DS", 3174 "PPT", 3175 "TDC", 3176 "THERMAL", 3177 "GFX_PER_CU_CG", 3178 "RM", 3179 "DCEFCLK_DS", 3180 "ACDC", 3181 "VR0HOT", 3182 "VR1HOT", 3183 "FW_CTF", 3184 "LED_DISPLAY", 3185 "FAN_CONTROL", 3186 "GFX_EDC", 3187 "GFXOFF", 3188 "CG", 3189 "FCLK_DPM", 3190 "FCLK_DS", 3191 "MP1CLK_DS", 3192 "MP0CLK_DS", 3193 "XGMI", 3194 "ECC"}; 3195 static const char *output_title[] = { 3196 "FEATURES", 3197 "BITMASK", 3198 "ENABLEMENT"}; 3199 uint64_t features_enabled; 3200 int i; 3201 int ret = 0; 3202 int size = 0; 3203 3204 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 3205 PP_ASSERT_WITH_CODE(!ret, 3206 "[EnableAllSmuFeatures] Failed to get enabled smc features!", 3207 return ret); 3208 3209 size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled); 3210 size += sprintf(buf + size, "%-19s %-22s %s\n", 3211 output_title[0], 3212 output_title[1], 3213 output_title[2]); 3214 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 3215 size += sprintf(buf + size, "%-19s 0x%016llx %6s\n", 3216 ppfeature_name[i], 3217 1ULL << i, 3218 (features_enabled & (1ULL << i)) ? "Y" : "N"); 3219 } 3220 3221 return size; 3222 } 3223 3224 static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks) 3225 { 3226 struct vega20_hwmgr *data = 3227 (struct vega20_hwmgr *)(hwmgr->backend); 3228 uint64_t features_enabled, features_to_enable, features_to_disable; 3229 int i, ret = 0; 3230 bool enabled; 3231 3232 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX)) 3233 return -EINVAL; 3234 3235 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 3236 if (ret) 3237 return ret; 3238 3239 features_to_disable = 3240 features_enabled & ~new_ppfeature_masks; 3241 features_to_enable = 3242 ~features_enabled & new_ppfeature_masks; 3243 3244 pr_debug("features_to_disable 0x%llx\n", features_to_disable); 3245 pr_debug("features_to_enable 0x%llx\n", features_to_enable); 3246 3247 if (features_to_disable) { 3248 ret = vega20_enable_smc_features(hwmgr, false, features_to_disable); 3249 if (ret) 3250 return ret; 3251 } 3252 3253 if (features_to_enable) { 3254 ret = vega20_enable_smc_features(hwmgr, true, features_to_enable); 3255 if (ret) 3256 return ret; 3257 } 3258 3259 /* Update the cached feature enablement state */ 3260 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 3261 if (ret) 3262 return ret; 3263 3264 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 3265 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? 3266 true : false; 3267 data->smu_features[i].enabled = enabled; 3268 } 3269 3270 return 0; 3271 } 3272 3273 static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr) 3274 { 3275 struct amdgpu_device *adev = hwmgr->adev; 3276 3277 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 3278 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 3279 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 3280 } 3281 3282 static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr) 3283 { 3284 uint32_t width_level; 3285 3286 width_level = vega20_get_current_pcie_link_width_level(hwmgr); 3287 if (width_level > LINK_WIDTH_MAX) 3288 width_level = 0; 3289 3290 return link_width[width_level]; 3291 } 3292 3293 static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr) 3294 { 3295 struct amdgpu_device *adev = hwmgr->adev; 3296 3297 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 3298 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 3299 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 3300 } 3301 3302 static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr) 3303 { 3304 uint32_t speed_level; 3305 3306 speed_level = vega20_get_current_pcie_link_speed_level(hwmgr); 3307 if (speed_level > LINK_SPEED_MAX) 3308 speed_level = 0; 3309 3310 return link_speed[speed_level]; 3311 } 3312 3313 static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, 3314 enum pp_clock_type type, char *buf) 3315 { 3316 struct vega20_hwmgr *data = 3317 (struct vega20_hwmgr *)(hwmgr->backend); 3318 struct vega20_od8_single_setting *od8_settings = 3319 data->od8_settings.od8_settings_array; 3320 OverDriveTable_t *od_table = 3321 &(data->smc_state_table.overdrive_table); 3322 struct phm_ppt_v3_information *pptable_information = 3323 (struct phm_ppt_v3_information *)hwmgr->pptable; 3324 PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable; 3325 struct pp_clock_levels_with_latency clocks; 3326 struct vega20_single_dpm_table *fclk_dpm_table = 3327 &(data->dpm_table.fclk_table); 3328 int i, now, size = 0; 3329 int ret = 0; 3330 uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; 3331 3332 switch (type) { 3333 case PP_SCLK: 3334 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now); 3335 PP_ASSERT_WITH_CODE(!ret, 3336 "Attempt to get current gfx clk Failed!", 3337 return ret); 3338 3339 if (vega20_get_sclks(hwmgr, &clocks)) { 3340 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3341 now / 100); 3342 break; 3343 } 3344 3345 for (i = 0; i < clocks.num_levels; i++) 3346 size += sprintf(buf + size, "%d: %uMhz %s\n", 3347 i, clocks.data[i].clocks_in_khz / 1000, 3348 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3349 break; 3350 3351 case PP_MCLK: 3352 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now); 3353 PP_ASSERT_WITH_CODE(!ret, 3354 "Attempt to get current mclk freq Failed!", 3355 return ret); 3356 3357 if (vega20_get_memclocks(hwmgr, &clocks)) { 3358 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3359 now / 100); 3360 break; 3361 } 3362 3363 for (i = 0; i < clocks.num_levels; i++) 3364 size += sprintf(buf + size, "%d: %uMhz %s\n", 3365 i, clocks.data[i].clocks_in_khz / 1000, 3366 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3367 break; 3368 3369 case PP_SOCCLK: 3370 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_SOCCLK, &now); 3371 PP_ASSERT_WITH_CODE(!ret, 3372 "Attempt to get current socclk freq Failed!", 3373 return ret); 3374 3375 if (vega20_get_socclocks(hwmgr, &clocks)) { 3376 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3377 now / 100); 3378 break; 3379 } 3380 3381 for (i = 0; i < clocks.num_levels; i++) 3382 size += sprintf(buf + size, "%d: %uMhz %s\n", 3383 i, clocks.data[i].clocks_in_khz / 1000, 3384 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3385 break; 3386 3387 case PP_FCLK: 3388 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_FCLK, &now); 3389 PP_ASSERT_WITH_CODE(!ret, 3390 "Attempt to get current fclk freq Failed!", 3391 return ret); 3392 3393 for (i = 0; i < fclk_dpm_table->count; i++) 3394 size += sprintf(buf + size, "%d: %uMhz %s\n", 3395 i, fclk_dpm_table->dpm_levels[i].value, 3396 fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : ""); 3397 break; 3398 3399 case PP_DCEFCLK: 3400 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_DCEFCLK, &now); 3401 PP_ASSERT_WITH_CODE(!ret, 3402 "Attempt to get current dcefclk freq Failed!", 3403 return ret); 3404 3405 if (vega20_get_dcefclocks(hwmgr, &clocks)) { 3406 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3407 now / 100); 3408 break; 3409 } 3410 3411 for (i = 0; i < clocks.num_levels; i++) 3412 size += sprintf(buf + size, "%d: %uMhz %s\n", 3413 i, clocks.data[i].clocks_in_khz / 1000, 3414 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3415 break; 3416 3417 case PP_PCIE: 3418 current_gen_speed = 3419 vega20_get_current_pcie_link_speed_level(hwmgr); 3420 current_lane_width = 3421 vega20_get_current_pcie_link_width_level(hwmgr); 3422 for (i = 0; i < NUM_LINK_LEVELS; i++) { 3423 if (i == 1 && data->pcie_parameters_override) { 3424 gen_speed = data->pcie_gen_level1; 3425 lane_width = data->pcie_width_level1; 3426 } else { 3427 gen_speed = pptable->PcieGenSpeed[i]; 3428 lane_width = pptable->PcieLaneCount[i]; 3429 } 3430 size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, 3431 (gen_speed == 0) ? "2.5GT/s," : 3432 (gen_speed == 1) ? "5.0GT/s," : 3433 (gen_speed == 2) ? "8.0GT/s," : 3434 (gen_speed == 3) ? "16.0GT/s," : "", 3435 (lane_width == 1) ? "x1" : 3436 (lane_width == 2) ? "x2" : 3437 (lane_width == 3) ? "x4" : 3438 (lane_width == 4) ? "x8" : 3439 (lane_width == 5) ? "x12" : 3440 (lane_width == 6) ? "x16" : "", 3441 pptable->LclkFreq[i], 3442 (current_gen_speed == gen_speed) && 3443 (current_lane_width == lane_width) ? 3444 "*" : ""); 3445 } 3446 break; 3447 3448 case OD_SCLK: 3449 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && 3450 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { 3451 size = sprintf(buf, "%s:\n", "OD_SCLK"); 3452 size += sprintf(buf + size, "0: %10uMhz\n", 3453 od_table->GfxclkFmin); 3454 size += sprintf(buf + size, "1: %10uMhz\n", 3455 od_table->GfxclkFmax); 3456 } 3457 break; 3458 3459 case OD_MCLK: 3460 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 3461 size = sprintf(buf, "%s:\n", "OD_MCLK"); 3462 size += sprintf(buf + size, "1: %10uMhz\n", 3463 od_table->UclkFmax); 3464 } 3465 3466 break; 3467 3468 case OD_VDDC_CURVE: 3469 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && 3470 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && 3471 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && 3472 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && 3473 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && 3474 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { 3475 size = sprintf(buf, "%s:\n", "OD_VDDC_CURVE"); 3476 size += sprintf(buf + size, "0: %10uMhz %10dmV\n", 3477 od_table->GfxclkFreq1, 3478 od_table->GfxclkVolt1 / VOLTAGE_SCALE); 3479 size += sprintf(buf + size, "1: %10uMhz %10dmV\n", 3480 od_table->GfxclkFreq2, 3481 od_table->GfxclkVolt2 / VOLTAGE_SCALE); 3482 size += sprintf(buf + size, "2: %10uMhz %10dmV\n", 3483 od_table->GfxclkFreq3, 3484 od_table->GfxclkVolt3 / VOLTAGE_SCALE); 3485 } 3486 3487 break; 3488 3489 case OD_RANGE: 3490 size = sprintf(buf, "%s:\n", "OD_RANGE"); 3491 3492 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && 3493 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { 3494 size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", 3495 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, 3496 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); 3497 } 3498 3499 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 3500 size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", 3501 od8_settings[OD8_SETTING_UCLK_FMAX].min_value, 3502 od8_settings[OD8_SETTING_UCLK_FMAX].max_value); 3503 } 3504 3505 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && 3506 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && 3507 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && 3508 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && 3509 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && 3510 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { 3511 size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", 3512 od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value, 3513 od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value); 3514 size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", 3515 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, 3516 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); 3517 size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", 3518 od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value, 3519 od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value); 3520 size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", 3521 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value, 3522 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value); 3523 size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", 3524 od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value, 3525 od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value); 3526 size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", 3527 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value, 3528 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value); 3529 } 3530 3531 break; 3532 default: 3533 break; 3534 } 3535 return size; 3536 } 3537 3538 static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, 3539 struct vega20_single_dpm_table *dpm_table) 3540 { 3541 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3542 int ret = 0; 3543 3544 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 3545 PP_ASSERT_WITH_CODE(dpm_table->count > 0, 3546 "[SetUclkToHightestDpmLevel] Dpm table has no entry!", 3547 return -EINVAL); 3548 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS, 3549 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!", 3550 return -EINVAL); 3551 3552 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3553 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 3554 PPSMC_MSG_SetHardMinByFreq, 3555 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, 3556 NULL)), 3557 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!", 3558 return ret); 3559 } 3560 3561 return ret; 3562 } 3563 3564 static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr) 3565 { 3566 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3567 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table); 3568 int ret = 0; 3569 3570 if (data->smu_features[GNLD_DPM_FCLK].enabled) { 3571 PP_ASSERT_WITH_CODE(dpm_table->count > 0, 3572 "[SetFclkToHightestDpmLevel] Dpm table has no entry!", 3573 return -EINVAL); 3574 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS, 3575 "[SetFclkToHightestDpmLevel] Dpm table has too many entries!", 3576 return -EINVAL); 3577 3578 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3579 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 3580 PPSMC_MSG_SetSoftMinByFreq, 3581 (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level, 3582 NULL)), 3583 "[SetFclkToHightestDpmLevel] Set soft min fclk failed!", 3584 return ret); 3585 } 3586 3587 return ret; 3588 } 3589 3590 static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 3591 { 3592 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3593 int ret = 0; 3594 3595 smum_send_msg_to_smc_with_parameter(hwmgr, 3596 PPSMC_MSG_NumOfDisplays, 0, NULL); 3597 3598 ret = vega20_set_uclk_to_highest_dpm_level(hwmgr, 3599 &data->dpm_table.mem_table); 3600 if (ret) 3601 return ret; 3602 3603 return vega20_set_fclk_to_highest_dpm_level(hwmgr); 3604 } 3605 3606 static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 3607 { 3608 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3609 int result = 0; 3610 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); 3611 3612 if ((data->water_marks_bitmap & WaterMarksExist) && 3613 !(data->water_marks_bitmap & WaterMarksLoaded)) { 3614 result = smum_smc_table_manager(hwmgr, 3615 (uint8_t *)wm_table, TABLE_WATERMARKS, false); 3616 PP_ASSERT_WITH_CODE(!result, 3617 "Failed to update WMTABLE!", 3618 return result); 3619 data->water_marks_bitmap |= WaterMarksLoaded; 3620 } 3621 3622 if ((data->water_marks_bitmap & WaterMarksExist) && 3623 data->smu_features[GNLD_DPM_DCEFCLK].supported && 3624 data->smu_features[GNLD_DPM_SOCCLK].supported) { 3625 result = smum_send_msg_to_smc_with_parameter(hwmgr, 3626 PPSMC_MSG_NumOfDisplays, 3627 hwmgr->display_config->num_display, 3628 NULL); 3629 } 3630 3631 return result; 3632 } 3633 3634 static int vega20_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) 3635 { 3636 struct vega20_hwmgr *data = 3637 (struct vega20_hwmgr *)(hwmgr->backend); 3638 int ret = 0; 3639 3640 if (data->smu_features[GNLD_DPM_UVD].supported) { 3641 if (data->smu_features[GNLD_DPM_UVD].enabled == enable) { 3642 if (enable) 3643 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already enabled!\n"); 3644 else 3645 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already disabled!\n"); 3646 } 3647 3648 ret = vega20_enable_smc_features(hwmgr, 3649 enable, 3650 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap); 3651 PP_ASSERT_WITH_CODE(!ret, 3652 "[EnableDisableUVDDPM] Attempt to Enable/Disable DPM UVD Failed!", 3653 return ret); 3654 data->smu_features[GNLD_DPM_UVD].enabled = enable; 3655 } 3656 3657 return 0; 3658 } 3659 3660 static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) 3661 { 3662 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3663 3664 if (data->vce_power_gated == bgate) 3665 return ; 3666 3667 data->vce_power_gated = bgate; 3668 if (bgate) { 3669 vega20_enable_disable_vce_dpm(hwmgr, !bgate); 3670 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 3671 AMD_IP_BLOCK_TYPE_VCE, 3672 AMD_PG_STATE_GATE); 3673 } else { 3674 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 3675 AMD_IP_BLOCK_TYPE_VCE, 3676 AMD_PG_STATE_UNGATE); 3677 vega20_enable_disable_vce_dpm(hwmgr, !bgate); 3678 } 3679 3680 } 3681 3682 static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) 3683 { 3684 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3685 3686 if (data->uvd_power_gated == bgate) 3687 return ; 3688 3689 data->uvd_power_gated = bgate; 3690 vega20_enable_disable_uvd_dpm(hwmgr, !bgate); 3691 } 3692 3693 static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) 3694 { 3695 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3696 struct vega20_single_dpm_table *dpm_table; 3697 bool vblank_too_short = false; 3698 bool disable_mclk_switching; 3699 bool disable_fclk_switching; 3700 uint32_t i, latency; 3701 3702 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && 3703 !hwmgr->display_config->multi_monitor_in_sync) || 3704 vblank_too_short; 3705 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; 3706 3707 /* gfxclk */ 3708 dpm_table = &(data->dpm_table.gfx_table); 3709 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3710 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3711 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3712 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3713 3714 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3715 if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) { 3716 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; 3717 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; 3718 } 3719 3720 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 3721 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3722 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; 3723 } 3724 3725 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3726 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3727 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3728 } 3729 } 3730 3731 /* memclk */ 3732 dpm_table = &(data->dpm_table.mem_table); 3733 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3734 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3735 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3736 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3737 3738 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3739 if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) { 3740 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; 3741 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; 3742 } 3743 3744 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 3745 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3746 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; 3747 } 3748 3749 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3750 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3751 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3752 } 3753 } 3754 3755 /* honour DAL's UCLK Hardmin */ 3756 if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100)) 3757 dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100; 3758 3759 /* Hardmin is dependent on displayconfig */ 3760 if (disable_mclk_switching) { 3761 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3762 for (i = 0; i < data->mclk_latency_table.count - 1; i++) { 3763 if (data->mclk_latency_table.entries[i].latency <= latency) { 3764 if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) { 3765 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value; 3766 break; 3767 } 3768 } 3769 } 3770 } 3771 3772 if (hwmgr->display_config->nb_pstate_switch_disable) 3773 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3774 3775 if ((disable_mclk_switching && 3776 (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) || 3777 hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value) 3778 disable_fclk_switching = true; 3779 else 3780 disable_fclk_switching = false; 3781 3782 /* fclk */ 3783 dpm_table = &(data->dpm_table.fclk_table); 3784 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3785 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3786 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3787 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3788 if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching) 3789 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3790 3791 /* vclk */ 3792 dpm_table = &(data->dpm_table.vclk_table); 3793 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3794 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3795 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3796 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3797 3798 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3799 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { 3800 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3801 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3802 } 3803 3804 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3805 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3806 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3807 } 3808 } 3809 3810 /* dclk */ 3811 dpm_table = &(data->dpm_table.dclk_table); 3812 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3813 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3814 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3815 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3816 3817 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3818 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { 3819 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3820 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3821 } 3822 3823 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3824 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3825 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3826 } 3827 } 3828 3829 /* socclk */ 3830 dpm_table = &(data->dpm_table.soc_table); 3831 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3832 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3833 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3834 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3835 3836 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3837 if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) { 3838 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value; 3839 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value; 3840 } 3841 3842 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3843 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3844 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3845 } 3846 } 3847 3848 /* eclk */ 3849 dpm_table = &(data->dpm_table.eclk_table); 3850 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3851 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3852 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3853 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3854 3855 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3856 if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) { 3857 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value; 3858 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value; 3859 } 3860 3861 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3862 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3863 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3864 } 3865 } 3866 3867 return 0; 3868 } 3869 3870 static bool 3871 vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) 3872 { 3873 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3874 bool is_update_required = false; 3875 3876 if (data->display_timing.num_existing_displays != 3877 hwmgr->display_config->num_display) 3878 is_update_required = true; 3879 3880 if (data->registry_data.gfx_clk_deep_sleep_support && 3881 (data->display_timing.min_clock_in_sr != 3882 hwmgr->display_config->min_core_set_clock_in_sr)) 3883 is_update_required = true; 3884 3885 return is_update_required; 3886 } 3887 3888 static int vega20_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 3889 { 3890 int ret = 0; 3891 3892 ret = vega20_disable_all_smu_features(hwmgr); 3893 PP_ASSERT_WITH_CODE(!ret, 3894 "[DisableDpmTasks] Failed to disable all smu features!", 3895 return ret); 3896 3897 return 0; 3898 } 3899 3900 static int vega20_power_off_asic(struct pp_hwmgr *hwmgr) 3901 { 3902 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3903 int result; 3904 3905 result = vega20_disable_dpm_tasks(hwmgr); 3906 PP_ASSERT_WITH_CODE((0 == result), 3907 "[PowerOffAsic] Failed to disable DPM!", 3908 ); 3909 data->water_marks_bitmap &= ~(WaterMarksLoaded); 3910 3911 return result; 3912 } 3913 3914 static int conv_power_profile_to_pplib_workload(int power_profile) 3915 { 3916 int pplib_workload = 0; 3917 3918 switch (power_profile) { 3919 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: 3920 pplib_workload = WORKLOAD_DEFAULT_BIT; 3921 break; 3922 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 3923 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; 3924 break; 3925 case PP_SMC_POWER_PROFILE_POWERSAVING: 3926 pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT; 3927 break; 3928 case PP_SMC_POWER_PROFILE_VIDEO: 3929 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; 3930 break; 3931 case PP_SMC_POWER_PROFILE_VR: 3932 pplib_workload = WORKLOAD_PPLIB_VR_BIT; 3933 break; 3934 case PP_SMC_POWER_PROFILE_COMPUTE: 3935 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; 3936 break; 3937 case PP_SMC_POWER_PROFILE_CUSTOM: 3938 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT; 3939 break; 3940 } 3941 3942 return pplib_workload; 3943 } 3944 3945 static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) 3946 { 3947 DpmActivityMonitorCoeffInt_t activity_monitor; 3948 uint32_t i, size = 0; 3949 uint16_t workload_type = 0; 3950 static const char *profile_name[] = { 3951 "BOOTUP_DEFAULT", 3952 "3D_FULL_SCREEN", 3953 "POWER_SAVING", 3954 "VIDEO", 3955 "VR", 3956 "COMPUTE", 3957 "CUSTOM"}; 3958 static const char *title[] = { 3959 "PROFILE_INDEX(NAME)", 3960 "CLOCK_TYPE(NAME)", 3961 "FPS", 3962 "UseRlcBusy", 3963 "MinActiveFreqType", 3964 "MinActiveFreq", 3965 "BoosterFreqType", 3966 "BoosterFreq", 3967 "PD_Data_limit_c", 3968 "PD_Data_error_coeff", 3969 "PD_Data_error_rate_coeff"}; 3970 int result = 0; 3971 3972 if (!buf) 3973 return -EINVAL; 3974 3975 size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n", 3976 title[0], title[1], title[2], title[3], title[4], title[5], 3977 title[6], title[7], title[8], title[9], title[10]); 3978 3979 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { 3980 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 3981 workload_type = conv_power_profile_to_pplib_workload(i); 3982 result = vega20_get_activity_monitor_coeff(hwmgr, 3983 (uint8_t *)(&activity_monitor), workload_type); 3984 PP_ASSERT_WITH_CODE(!result, 3985 "[GetPowerProfile] Failed to get activity monitor!", 3986 return result); 3987 3988 size += sprintf(buf + size, "%2d %14s%s:\n", 3989 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " "); 3990 3991 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 3992 " ", 3993 0, 3994 "GFXCLK", 3995 activity_monitor.Gfx_FPS, 3996 activity_monitor.Gfx_UseRlcBusy, 3997 activity_monitor.Gfx_MinActiveFreqType, 3998 activity_monitor.Gfx_MinActiveFreq, 3999 activity_monitor.Gfx_BoosterFreqType, 4000 activity_monitor.Gfx_BoosterFreq, 4001 activity_monitor.Gfx_PD_Data_limit_c, 4002 activity_monitor.Gfx_PD_Data_error_coeff, 4003 activity_monitor.Gfx_PD_Data_error_rate_coeff); 4004 4005 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4006 " ", 4007 1, 4008 "SOCCLK", 4009 activity_monitor.Soc_FPS, 4010 activity_monitor.Soc_UseRlcBusy, 4011 activity_monitor.Soc_MinActiveFreqType, 4012 activity_monitor.Soc_MinActiveFreq, 4013 activity_monitor.Soc_BoosterFreqType, 4014 activity_monitor.Soc_BoosterFreq, 4015 activity_monitor.Soc_PD_Data_limit_c, 4016 activity_monitor.Soc_PD_Data_error_coeff, 4017 activity_monitor.Soc_PD_Data_error_rate_coeff); 4018 4019 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4020 " ", 4021 2, 4022 "UCLK", 4023 activity_monitor.Mem_FPS, 4024 activity_monitor.Mem_UseRlcBusy, 4025 activity_monitor.Mem_MinActiveFreqType, 4026 activity_monitor.Mem_MinActiveFreq, 4027 activity_monitor.Mem_BoosterFreqType, 4028 activity_monitor.Mem_BoosterFreq, 4029 activity_monitor.Mem_PD_Data_limit_c, 4030 activity_monitor.Mem_PD_Data_error_coeff, 4031 activity_monitor.Mem_PD_Data_error_rate_coeff); 4032 4033 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4034 " ", 4035 3, 4036 "FCLK", 4037 activity_monitor.Fclk_FPS, 4038 activity_monitor.Fclk_UseRlcBusy, 4039 activity_monitor.Fclk_MinActiveFreqType, 4040 activity_monitor.Fclk_MinActiveFreq, 4041 activity_monitor.Fclk_BoosterFreqType, 4042 activity_monitor.Fclk_BoosterFreq, 4043 activity_monitor.Fclk_PD_Data_limit_c, 4044 activity_monitor.Fclk_PD_Data_error_coeff, 4045 activity_monitor.Fclk_PD_Data_error_rate_coeff); 4046 } 4047 4048 return size; 4049 } 4050 4051 static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) 4052 { 4053 DpmActivityMonitorCoeffInt_t activity_monitor; 4054 int workload_type, result = 0; 4055 uint32_t power_profile_mode = input[size]; 4056 4057 if (power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { 4058 pr_err("Invalid power profile mode %d\n", power_profile_mode); 4059 return -EINVAL; 4060 } 4061 4062 if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 4063 struct vega20_hwmgr *data = 4064 (struct vega20_hwmgr *)(hwmgr->backend); 4065 if (size == 0 && !data->is_custom_profile_set) 4066 return -EINVAL; 4067 if (size < 10 && size != 0) 4068 return -EINVAL; 4069 4070 result = vega20_get_activity_monitor_coeff(hwmgr, 4071 (uint8_t *)(&activity_monitor), 4072 WORKLOAD_PPLIB_CUSTOM_BIT); 4073 PP_ASSERT_WITH_CODE(!result, 4074 "[SetPowerProfile] Failed to get activity monitor!", 4075 return result); 4076 4077 /* If size==0, then we want to apply the already-configured 4078 * CUSTOM profile again. Just apply it, since we checked its 4079 * validity above 4080 */ 4081 if (size == 0) 4082 goto out; 4083 4084 switch (input[0]) { 4085 case 0: /* Gfxclk */ 4086 activity_monitor.Gfx_FPS = input[1]; 4087 activity_monitor.Gfx_UseRlcBusy = input[2]; 4088 activity_monitor.Gfx_MinActiveFreqType = input[3]; 4089 activity_monitor.Gfx_MinActiveFreq = input[4]; 4090 activity_monitor.Gfx_BoosterFreqType = input[5]; 4091 activity_monitor.Gfx_BoosterFreq = input[6]; 4092 activity_monitor.Gfx_PD_Data_limit_c = input[7]; 4093 activity_monitor.Gfx_PD_Data_error_coeff = input[8]; 4094 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; 4095 break; 4096 case 1: /* Socclk */ 4097 activity_monitor.Soc_FPS = input[1]; 4098 activity_monitor.Soc_UseRlcBusy = input[2]; 4099 activity_monitor.Soc_MinActiveFreqType = input[3]; 4100 activity_monitor.Soc_MinActiveFreq = input[4]; 4101 activity_monitor.Soc_BoosterFreqType = input[5]; 4102 activity_monitor.Soc_BoosterFreq = input[6]; 4103 activity_monitor.Soc_PD_Data_limit_c = input[7]; 4104 activity_monitor.Soc_PD_Data_error_coeff = input[8]; 4105 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; 4106 break; 4107 case 2: /* Uclk */ 4108 activity_monitor.Mem_FPS = input[1]; 4109 activity_monitor.Mem_UseRlcBusy = input[2]; 4110 activity_monitor.Mem_MinActiveFreqType = input[3]; 4111 activity_monitor.Mem_MinActiveFreq = input[4]; 4112 activity_monitor.Mem_BoosterFreqType = input[5]; 4113 activity_monitor.Mem_BoosterFreq = input[6]; 4114 activity_monitor.Mem_PD_Data_limit_c = input[7]; 4115 activity_monitor.Mem_PD_Data_error_coeff = input[8]; 4116 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; 4117 break; 4118 case 3: /* Fclk */ 4119 activity_monitor.Fclk_FPS = input[1]; 4120 activity_monitor.Fclk_UseRlcBusy = input[2]; 4121 activity_monitor.Fclk_MinActiveFreqType = input[3]; 4122 activity_monitor.Fclk_MinActiveFreq = input[4]; 4123 activity_monitor.Fclk_BoosterFreqType = input[5]; 4124 activity_monitor.Fclk_BoosterFreq = input[6]; 4125 activity_monitor.Fclk_PD_Data_limit_c = input[7]; 4126 activity_monitor.Fclk_PD_Data_error_coeff = input[8]; 4127 activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9]; 4128 break; 4129 } 4130 4131 result = vega20_set_activity_monitor_coeff(hwmgr, 4132 (uint8_t *)(&activity_monitor), 4133 WORKLOAD_PPLIB_CUSTOM_BIT); 4134 data->is_custom_profile_set = true; 4135 PP_ASSERT_WITH_CODE(!result, 4136 "[SetPowerProfile] Failed to set activity monitor!", 4137 return result); 4138 } 4139 4140 out: 4141 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 4142 workload_type = 4143 conv_power_profile_to_pplib_workload(power_profile_mode); 4144 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, 4145 1 << workload_type, 4146 NULL); 4147 4148 hwmgr->power_profile_mode = power_profile_mode; 4149 4150 return 0; 4151 } 4152 4153 static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, 4154 uint32_t virtual_addr_low, 4155 uint32_t virtual_addr_hi, 4156 uint32_t mc_addr_low, 4157 uint32_t mc_addr_hi, 4158 uint32_t size) 4159 { 4160 smum_send_msg_to_smc_with_parameter(hwmgr, 4161 PPSMC_MSG_SetSystemVirtualDramAddrHigh, 4162 virtual_addr_hi, 4163 NULL); 4164 smum_send_msg_to_smc_with_parameter(hwmgr, 4165 PPSMC_MSG_SetSystemVirtualDramAddrLow, 4166 virtual_addr_low, 4167 NULL); 4168 smum_send_msg_to_smc_with_parameter(hwmgr, 4169 PPSMC_MSG_DramLogSetDramAddrHigh, 4170 mc_addr_hi, 4171 NULL); 4172 4173 smum_send_msg_to_smc_with_parameter(hwmgr, 4174 PPSMC_MSG_DramLogSetDramAddrLow, 4175 mc_addr_low, 4176 NULL); 4177 4178 smum_send_msg_to_smc_with_parameter(hwmgr, 4179 PPSMC_MSG_DramLogSetDramSize, 4180 size, 4181 NULL); 4182 return 0; 4183 } 4184 4185 static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, 4186 struct PP_TemperatureRange *thermal_data) 4187 { 4188 struct vega20_hwmgr *data = 4189 (struct vega20_hwmgr *)(hwmgr->backend); 4190 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 4191 4192 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange)); 4193 4194 thermal_data->max = pp_table->TedgeLimit * 4195 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4196 thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) * 4197 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4198 thermal_data->hotspot_crit_max = pp_table->ThotspotLimit * 4199 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4200 thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) * 4201 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4202 thermal_data->mem_crit_max = pp_table->ThbmLimit * 4203 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4204 thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)* 4205 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4206 4207 return 0; 4208 } 4209 4210 static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire) 4211 { 4212 int res; 4213 4214 /* I2C bus access can happen very early, when SMU not loaded yet */ 4215 if (!vega20_is_smc_ram_running(hwmgr)) 4216 return 0; 4217 4218 res = smum_send_msg_to_smc_with_parameter(hwmgr, 4219 (acquire ? 4220 PPSMC_MSG_RequestI2CBus : 4221 PPSMC_MSG_ReleaseI2CBus), 4222 0, 4223 NULL); 4224 4225 PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res); 4226 return res; 4227 } 4228 4229 static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr, 4230 enum pp_df_cstate state) 4231 { 4232 int ret; 4233 4234 /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */ 4235 if (hwmgr->smu_version < 0x283200) { 4236 pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n"); 4237 return -EINVAL; 4238 } 4239 4240 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state, 4241 NULL); 4242 if (ret) 4243 pr_err("SetDfCstate failed!\n"); 4244 4245 return ret; 4246 } 4247 4248 static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr, 4249 uint32_t pstate) 4250 { 4251 int ret; 4252 4253 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 4254 PPSMC_MSG_SetXgmiMode, 4255 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, 4256 NULL); 4257 if (ret) 4258 pr_err("SetXgmiPstate failed!\n"); 4259 4260 return ret; 4261 } 4262 4263 static void vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics) 4264 { 4265 memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0)); 4266 4267 gpu_metrics->common_header.structure_size = 4268 sizeof(struct gpu_metrics_v1_0); 4269 gpu_metrics->common_header.format_revision = 1; 4270 gpu_metrics->common_header.content_revision = 0; 4271 4272 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 4273 } 4274 4275 static ssize_t vega20_get_gpu_metrics(struct pp_hwmgr *hwmgr, 4276 void **table) 4277 { 4278 struct vega20_hwmgr *data = 4279 (struct vega20_hwmgr *)(hwmgr->backend); 4280 struct gpu_metrics_v1_0 *gpu_metrics = 4281 &data->gpu_metrics_table; 4282 SmuMetrics_t metrics; 4283 uint32_t fan_speed_rpm; 4284 int ret; 4285 4286 ret = vega20_get_metrics_table(hwmgr, &metrics, true); 4287 if (ret) 4288 return ret; 4289 4290 vega20_init_gpu_metrics_v1_0(gpu_metrics); 4291 4292 gpu_metrics->temperature_edge = metrics.TemperatureEdge; 4293 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; 4294 gpu_metrics->temperature_mem = metrics.TemperatureHBM; 4295 gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; 4296 gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; 4297 gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0; 4298 4299 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; 4300 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; 4301 4302 gpu_metrics->average_socket_power = metrics.AverageSocketPower; 4303 4304 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; 4305 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; 4306 gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency; 4307 4308 gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; 4309 gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; 4310 gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; 4311 gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK]; 4312 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; 4313 4314 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 4315 4316 vega20_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm); 4317 gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm; 4318 4319 gpu_metrics->pcie_link_width = 4320 vega20_get_current_pcie_link_width(hwmgr); 4321 gpu_metrics->pcie_link_speed = 4322 vega20_get_current_pcie_link_speed(hwmgr); 4323 4324 *table = (void *)gpu_metrics; 4325 4326 return sizeof(struct gpu_metrics_v1_0); 4327 } 4328 4329 static const struct pp_hwmgr_func vega20_hwmgr_funcs = { 4330 /* init/fini related */ 4331 .backend_init = vega20_hwmgr_backend_init, 4332 .backend_fini = vega20_hwmgr_backend_fini, 4333 .asic_setup = vega20_setup_asic_task, 4334 .power_off_asic = vega20_power_off_asic, 4335 .dynamic_state_management_enable = vega20_enable_dpm_tasks, 4336 .dynamic_state_management_disable = vega20_disable_dpm_tasks, 4337 /* power state related */ 4338 .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules, 4339 .pre_display_config_changed = vega20_pre_display_configuration_changed_task, 4340 .display_config_changed = vega20_display_configuration_changed_task, 4341 .check_smc_update_required_for_display_configuration = 4342 vega20_check_smc_update_required_for_display_configuration, 4343 .notify_smc_display_config_after_ps_adjustment = 4344 vega20_notify_smc_display_config_after_ps_adjustment, 4345 /* export to DAL */ 4346 .get_sclk = vega20_dpm_get_sclk, 4347 .get_mclk = vega20_dpm_get_mclk, 4348 .get_dal_power_level = vega20_get_dal_power_level, 4349 .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency, 4350 .get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage, 4351 .set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges, 4352 .display_clock_voltage_request = vega20_display_clock_voltage_request, 4353 .get_performance_level = vega20_get_performance_level, 4354 /* UMD pstate, profile related */ 4355 .force_dpm_level = vega20_dpm_force_dpm_level, 4356 .get_power_profile_mode = vega20_get_power_profile_mode, 4357 .set_power_profile_mode = vega20_set_power_profile_mode, 4358 /* od related */ 4359 .set_power_limit = vega20_set_power_limit, 4360 .get_sclk_od = vega20_get_sclk_od, 4361 .set_sclk_od = vega20_set_sclk_od, 4362 .get_mclk_od = vega20_get_mclk_od, 4363 .set_mclk_od = vega20_set_mclk_od, 4364 .odn_edit_dpm_table = vega20_odn_edit_dpm_table, 4365 /* for sysfs to retrive/set gfxclk/memclk */ 4366 .force_clock_level = vega20_force_clock_level, 4367 .print_clock_levels = vega20_print_clock_levels, 4368 .read_sensor = vega20_read_sensor, 4369 .get_ppfeature_status = vega20_get_ppfeature_status, 4370 .set_ppfeature_status = vega20_set_ppfeature_status, 4371 /* powergate related */ 4372 .powergate_uvd = vega20_power_gate_uvd, 4373 .powergate_vce = vega20_power_gate_vce, 4374 /* thermal related */ 4375 .start_thermal_controller = vega20_start_thermal_controller, 4376 .stop_thermal_controller = vega20_thermal_stop_thermal_controller, 4377 .get_thermal_temperature_range = vega20_get_thermal_temperature_range, 4378 .register_irq_handlers = smu9_register_irq_handlers, 4379 .disable_smc_firmware_ctf = vega20_thermal_disable_alert, 4380 /* fan control related */ 4381 .get_fan_speed_percent = vega20_fan_ctrl_get_fan_speed_percent, 4382 .set_fan_speed_percent = vega20_fan_ctrl_set_fan_speed_percent, 4383 .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info, 4384 .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm, 4385 .set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm, 4386 .get_fan_control_mode = vega20_get_fan_control_mode, 4387 .set_fan_control_mode = vega20_set_fan_control_mode, 4388 /* smu memory related */ 4389 .notify_cac_buffer_info = vega20_notify_cac_buffer_info, 4390 .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost, 4391 /* BACO related */ 4392 .get_asic_baco_capability = vega20_baco_get_capability, 4393 .get_asic_baco_state = vega20_baco_get_state, 4394 .set_asic_baco_state = vega20_baco_set_state, 4395 .set_mp1_state = vega20_set_mp1_state, 4396 .smu_i2c_bus_access = vega20_smu_i2c_bus_access, 4397 .set_df_cstate = vega20_set_df_cstate, 4398 .set_xgmi_pstate = vega20_set_xgmi_pstate, 4399 .get_gpu_metrics = vega20_get_gpu_metrics, 4400 }; 4401 4402 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) 4403 { 4404 hwmgr->hwmgr_func = &vega20_hwmgr_funcs; 4405 hwmgr->pptable_func = &vega20_pptable_funcs; 4406 4407 return 0; 4408 } 4409