1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/delay.h> 25 #include <linux/fb.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include <linux/slab.h> 29 30 #include "hwmgr.h" 31 #include "amd_powerplay.h" 32 #include "hardwaremanager.h" 33 #include "ppatomfwctrl.h" 34 #include "atomfirmware.h" 35 #include "cgs_common.h" 36 #include "vega10_powertune.h" 37 #include "smu9.h" 38 #include "smu9_driver_if.h" 39 #include "vega10_inc.h" 40 #include "soc15_common.h" 41 #include "pppcielanes.h" 42 #include "vega10_hwmgr.h" 43 #include "vega10_smumgr.h" 44 #include "vega10_processpptables.h" 45 #include "vega10_pptable.h" 46 #include "vega10_thermal.h" 47 #include "pp_debug.h" 48 #include "amd_pcie_helpers.h" 49 #include "ppinterrupt.h" 50 #include "pp_overdriver.h" 51 #include "pp_thermal.h" 52 #include "vega10_baco.h" 53 54 #include "smuio/smuio_9_0_offset.h" 55 #include "smuio/smuio_9_0_sh_mask.h" 56 57 #define HBM_MEMORY_CHANNEL_WIDTH 128 58 59 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; 60 61 #define mmDF_CS_AON0_DramBaseAddress0 0x0044 62 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0 63 64 //DF_CS_AON0_DramBaseAddress0 65 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 66 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 67 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4 68 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8 69 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc 70 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L 71 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L 72 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L 73 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L 74 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L 75 76 typedef enum { 77 CLK_SMNCLK = 0, 78 CLK_SOCCLK, 79 CLK_MP0CLK, 80 CLK_MP1CLK, 81 CLK_LCLK, 82 CLK_DCEFCLK, 83 CLK_VCLK, 84 CLK_DCLK, 85 CLK_ECLK, 86 CLK_UCLK, 87 CLK_GFXCLK, 88 CLK_COUNT, 89 } CLOCK_ID_e; 90 91 static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic); 92 93 static struct vega10_power_state *cast_phw_vega10_power_state( 94 struct pp_hw_power_state *hw_ps) 95 { 96 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic), 97 "Invalid Powerstate Type!", 98 return NULL;); 99 100 return (struct vega10_power_state *)hw_ps; 101 } 102 103 static const struct vega10_power_state *cast_const_phw_vega10_power_state( 104 const struct pp_hw_power_state *hw_ps) 105 { 106 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic), 107 "Invalid Powerstate Type!", 108 return NULL;); 109 110 return (const struct vega10_power_state *)hw_ps; 111 } 112 113 static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr) 114 { 115 struct vega10_hwmgr *data = hwmgr->backend; 116 117 data->registry_data.sclk_dpm_key_disabled = 118 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; 119 data->registry_data.socclk_dpm_key_disabled = 120 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true; 121 data->registry_data.mclk_dpm_key_disabled = 122 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; 123 data->registry_data.pcie_dpm_key_disabled = 124 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true; 125 126 data->registry_data.dcefclk_dpm_key_disabled = 127 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true; 128 129 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) { 130 data->registry_data.power_containment_support = 1; 131 data->registry_data.enable_pkg_pwr_tracking_feature = 1; 132 data->registry_data.enable_tdc_limit_feature = 1; 133 } 134 135 data->registry_data.clock_stretcher_support = 136 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false; 137 138 data->registry_data.ulv_support = 139 hwmgr->feature_mask & PP_ULV_MASK ? true : false; 140 141 data->registry_data.sclk_deep_sleep_support = 142 hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false; 143 144 data->registry_data.disable_water_mark = 0; 145 146 data->registry_data.fan_control_support = 1; 147 data->registry_data.thermal_support = 1; 148 data->registry_data.fw_ctf_enabled = 1; 149 150 data->registry_data.avfs_support = 151 hwmgr->feature_mask & PP_AVFS_MASK ? true : false; 152 data->registry_data.led_dpm_enabled = 1; 153 154 data->registry_data.vr0hot_enabled = 1; 155 data->registry_data.vr1hot_enabled = 1; 156 data->registry_data.regulator_hot_gpio_support = 1; 157 158 data->registry_data.didt_support = 1; 159 if (data->registry_data.didt_support) { 160 data->registry_data.didt_mode = 6; 161 data->registry_data.sq_ramping_support = 1; 162 data->registry_data.db_ramping_support = 0; 163 data->registry_data.td_ramping_support = 0; 164 data->registry_data.tcp_ramping_support = 0; 165 data->registry_data.dbr_ramping_support = 0; 166 data->registry_data.edc_didt_support = 1; 167 data->registry_data.gc_didt_support = 0; 168 data->registry_data.psm_didt_support = 0; 169 } 170 171 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT; 172 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; 173 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; 174 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; 175 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; 176 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; 177 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; 178 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; 179 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; 180 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; 181 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; 182 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; 183 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; 184 185 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT; 186 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT; 187 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT; 188 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT; 189 } 190 191 static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr) 192 { 193 struct vega10_hwmgr *data = hwmgr->backend; 194 struct phm_ppt_v2_information *table_info = 195 (struct phm_ppt_v2_information *)hwmgr->pptable; 196 struct amdgpu_device *adev = hwmgr->adev; 197 198 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 199 PHM_PlatformCaps_SclkDeepSleep); 200 201 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 202 PHM_PlatformCaps_DynamicPatchPowerState); 203 204 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) 205 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 206 PHM_PlatformCaps_ControlVDDCI); 207 208 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 209 PHM_PlatformCaps_EnableSMU7ThermalManagement); 210 211 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 212 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 213 PHM_PlatformCaps_UVDPowerGating); 214 215 if (adev->pg_flags & AMD_PG_SUPPORT_VCE) 216 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 217 PHM_PlatformCaps_VCEPowerGating); 218 219 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 220 PHM_PlatformCaps_UnTabledHardwareInterface); 221 222 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 223 PHM_PlatformCaps_FanSpeedInTableIsRPM); 224 225 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 226 PHM_PlatformCaps_ODFuzzyFanControlSupport); 227 228 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 229 PHM_PlatformCaps_DynamicPowerManagement); 230 231 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 232 PHM_PlatformCaps_SMC); 233 234 /* power tune caps */ 235 /* assume disabled */ 236 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 237 PHM_PlatformCaps_PowerContainment); 238 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 239 PHM_PlatformCaps_DiDtSupport); 240 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 241 PHM_PlatformCaps_SQRamping); 242 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 243 PHM_PlatformCaps_DBRamping); 244 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 245 PHM_PlatformCaps_TDRamping); 246 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 247 PHM_PlatformCaps_TCPRamping); 248 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 249 PHM_PlatformCaps_DBRRamping); 250 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 251 PHM_PlatformCaps_DiDtEDCEnable); 252 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 253 PHM_PlatformCaps_GCEDC); 254 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 255 PHM_PlatformCaps_PSM); 256 257 if (data->registry_data.didt_support) { 258 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport); 259 if (data->registry_data.sq_ramping_support) 260 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); 261 if (data->registry_data.db_ramping_support) 262 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping); 263 if (data->registry_data.td_ramping_support) 264 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping); 265 if (data->registry_data.tcp_ramping_support) 266 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); 267 if (data->registry_data.dbr_ramping_support) 268 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping); 269 if (data->registry_data.edc_didt_support) 270 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable); 271 if (data->registry_data.gc_didt_support) 272 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC); 273 if (data->registry_data.psm_didt_support) 274 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM); 275 } 276 277 if (data->registry_data.power_containment_support) 278 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 279 PHM_PlatformCaps_PowerContainment); 280 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 281 PHM_PlatformCaps_CAC); 282 283 if (table_info->tdp_table->usClockStretchAmount && 284 data->registry_data.clock_stretcher_support) 285 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 286 PHM_PlatformCaps_ClockStretcher); 287 288 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 289 PHM_PlatformCaps_RegulatorHot); 290 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 291 PHM_PlatformCaps_AutomaticDCTransition); 292 293 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 294 PHM_PlatformCaps_UVDDPM); 295 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 296 PHM_PlatformCaps_VCEDPM); 297 298 return 0; 299 } 300 301 static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr) 302 { 303 struct vega10_hwmgr *data = hwmgr->backend; 304 struct phm_ppt_v2_information *table_info = 305 (struct phm_ppt_v2_information *)(hwmgr->pptable); 306 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); 307 struct vega10_odn_vddc_lookup_table *od_lookup_table; 308 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; 309 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3]; 310 struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3]; 311 struct pp_atomfwctrl_avfs_parameters avfs_params = {0}; 312 uint32_t i; 313 int result; 314 315 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params); 316 if (!result) { 317 data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc; 318 data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc; 319 } 320 321 od_lookup_table = &odn_table->vddc_lookup_table; 322 vddc_lookup_table = table_info->vddc_lookup_table; 323 324 for (i = 0; i < vddc_lookup_table->count; i++) 325 od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd; 326 327 od_lookup_table->count = vddc_lookup_table->count; 328 329 dep_table[0] = table_info->vdd_dep_on_sclk; 330 dep_table[1] = table_info->vdd_dep_on_mclk; 331 dep_table[2] = table_info->vdd_dep_on_socclk; 332 od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk; 333 od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk; 334 od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk; 335 336 for (i = 0; i < 3; i++) 337 smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]); 338 339 if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000) 340 odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc; 341 if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000) 342 odn_table->min_vddc = dep_table[0]->entries[0].vddc; 343 344 i = od_table[2]->count - 1; 345 od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ? 346 hwmgr->platform_descriptor.overdriveLimit.memoryClock : 347 od_table[2]->entries[i].clk; 348 od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ? 349 odn_table->max_vddc : 350 od_table[2]->entries[i].vddc; 351 352 return 0; 353 } 354 355 static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) 356 { 357 struct vega10_hwmgr *data = hwmgr->backend; 358 int i; 359 uint32_t sub_vendor_id, hw_revision; 360 uint32_t top32, bottom32; 361 struct amdgpu_device *adev = hwmgr->adev; 362 363 vega10_initialize_power_tune_defaults(hwmgr); 364 365 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 366 data->smu_features[i].smu_feature_id = 0xffff; 367 data->smu_features[i].smu_feature_bitmap = 1 << i; 368 data->smu_features[i].enabled = false; 369 data->smu_features[i].supported = false; 370 } 371 372 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id = 373 FEATURE_DPM_PREFETCHER_BIT; 374 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id = 375 FEATURE_DPM_GFXCLK_BIT; 376 data->smu_features[GNLD_DPM_UCLK].smu_feature_id = 377 FEATURE_DPM_UCLK_BIT; 378 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id = 379 FEATURE_DPM_SOCCLK_BIT; 380 data->smu_features[GNLD_DPM_UVD].smu_feature_id = 381 FEATURE_DPM_UVD_BIT; 382 data->smu_features[GNLD_DPM_VCE].smu_feature_id = 383 FEATURE_DPM_VCE_BIT; 384 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id = 385 FEATURE_DPM_MP0CLK_BIT; 386 data->smu_features[GNLD_DPM_LINK].smu_feature_id = 387 FEATURE_DPM_LINK_BIT; 388 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id = 389 FEATURE_DPM_DCEFCLK_BIT; 390 data->smu_features[GNLD_ULV].smu_feature_id = 391 FEATURE_ULV_BIT; 392 data->smu_features[GNLD_AVFS].smu_feature_id = 393 FEATURE_AVFS_BIT; 394 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id = 395 FEATURE_DS_GFXCLK_BIT; 396 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id = 397 FEATURE_DS_SOCCLK_BIT; 398 data->smu_features[GNLD_DS_LCLK].smu_feature_id = 399 FEATURE_DS_LCLK_BIT; 400 data->smu_features[GNLD_PPT].smu_feature_id = 401 FEATURE_PPT_BIT; 402 data->smu_features[GNLD_TDC].smu_feature_id = 403 FEATURE_TDC_BIT; 404 data->smu_features[GNLD_THERMAL].smu_feature_id = 405 FEATURE_THERMAL_BIT; 406 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id = 407 FEATURE_GFX_PER_CU_CG_BIT; 408 data->smu_features[GNLD_RM].smu_feature_id = 409 FEATURE_RM_BIT; 410 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id = 411 FEATURE_DS_DCEFCLK_BIT; 412 data->smu_features[GNLD_ACDC].smu_feature_id = 413 FEATURE_ACDC_BIT; 414 data->smu_features[GNLD_VR0HOT].smu_feature_id = 415 FEATURE_VR0HOT_BIT; 416 data->smu_features[GNLD_VR1HOT].smu_feature_id = 417 FEATURE_VR1HOT_BIT; 418 data->smu_features[GNLD_FW_CTF].smu_feature_id = 419 FEATURE_FW_CTF_BIT; 420 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id = 421 FEATURE_LED_DISPLAY_BIT; 422 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id = 423 FEATURE_FAN_CONTROL_BIT; 424 data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT; 425 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT; 426 data->smu_features[GNLD_PCC_LIMIT].smu_feature_id = FEATURE_PCC_LIMIT_CONTROL_BIT; 427 428 if (!data->registry_data.prefetcher_dpm_key_disabled) 429 data->smu_features[GNLD_DPM_PREFETCHER].supported = true; 430 431 if (!data->registry_data.sclk_dpm_key_disabled) 432 data->smu_features[GNLD_DPM_GFXCLK].supported = true; 433 434 if (!data->registry_data.mclk_dpm_key_disabled) 435 data->smu_features[GNLD_DPM_UCLK].supported = true; 436 437 if (!data->registry_data.socclk_dpm_key_disabled) 438 data->smu_features[GNLD_DPM_SOCCLK].supported = true; 439 440 if (PP_CAP(PHM_PlatformCaps_UVDDPM)) 441 data->smu_features[GNLD_DPM_UVD].supported = true; 442 443 if (PP_CAP(PHM_PlatformCaps_VCEDPM)) 444 data->smu_features[GNLD_DPM_VCE].supported = true; 445 446 if (!data->registry_data.pcie_dpm_key_disabled) 447 data->smu_features[GNLD_DPM_LINK].supported = true; 448 449 if (!data->registry_data.dcefclk_dpm_key_disabled) 450 data->smu_features[GNLD_DPM_DCEFCLK].supported = true; 451 452 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) && 453 data->registry_data.sclk_deep_sleep_support) { 454 data->smu_features[GNLD_DS_GFXCLK].supported = true; 455 data->smu_features[GNLD_DS_SOCCLK].supported = true; 456 data->smu_features[GNLD_DS_LCLK].supported = true; 457 data->smu_features[GNLD_DS_DCEFCLK].supported = true; 458 } 459 460 if (data->registry_data.enable_pkg_pwr_tracking_feature) 461 data->smu_features[GNLD_PPT].supported = true; 462 463 if (data->registry_data.enable_tdc_limit_feature) 464 data->smu_features[GNLD_TDC].supported = true; 465 466 if (data->registry_data.thermal_support) 467 data->smu_features[GNLD_THERMAL].supported = true; 468 469 if (data->registry_data.fan_control_support) 470 data->smu_features[GNLD_FAN_CONTROL].supported = true; 471 472 if (data->registry_data.fw_ctf_enabled) 473 data->smu_features[GNLD_FW_CTF].supported = true; 474 475 if (data->registry_data.avfs_support) 476 data->smu_features[GNLD_AVFS].supported = true; 477 478 if (data->registry_data.led_dpm_enabled) 479 data->smu_features[GNLD_LED_DISPLAY].supported = true; 480 481 if (data->registry_data.vr1hot_enabled) 482 data->smu_features[GNLD_VR1HOT].supported = true; 483 484 if (data->registry_data.vr0hot_enabled) 485 data->smu_features[GNLD_VR0HOT].supported = true; 486 487 smum_send_msg_to_smc(hwmgr, 488 PPSMC_MSG_GetSmuVersion, 489 &hwmgr->smu_version); 490 /* ACG firmware has major version 5 */ 491 if ((hwmgr->smu_version & 0xff000000) == 0x5000000) 492 data->smu_features[GNLD_ACG].supported = true; 493 if (data->registry_data.didt_support) 494 data->smu_features[GNLD_DIDT].supported = true; 495 496 hw_revision = adev->pdev->revision; 497 sub_vendor_id = adev->pdev->subsystem_vendor; 498 499 if ((hwmgr->chip_id == 0x6862 || 500 hwmgr->chip_id == 0x6861 || 501 hwmgr->chip_id == 0x6868) && 502 (hw_revision == 0) && 503 (sub_vendor_id != 0x1002)) 504 data->smu_features[GNLD_PCC_LIMIT].supported = true; 505 506 /* Get the SN to turn into a Unique ID */ 507 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); 508 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); 509 510 adev->unique_id = ((uint64_t)bottom32 << 32) | top32; 511 } 512 513 #ifdef PPLIB_VEGA10_EVV_SUPPORT 514 static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr, 515 phm_ppt_v1_voltage_lookup_table *lookup_table, 516 uint16_t virtual_voltage_id, int32_t *socclk) 517 { 518 uint8_t entry_id; 519 uint8_t voltage_id; 520 struct phm_ppt_v2_information *table_info = 521 (struct phm_ppt_v2_information *)(hwmgr->pptable); 522 523 PP_ASSERT_WITH_CODE(lookup_table->count != 0, 524 "Lookup table is empty", 525 return -EINVAL); 526 527 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */ 528 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) { 529 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd; 530 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id) 531 break; 532 } 533 534 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count, 535 "Can't find requested voltage id in vdd_dep_on_socclk table!", 536 return -EINVAL); 537 538 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk; 539 540 return 0; 541 } 542 543 #define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01 544 /** 545 * Get Leakage VDDC based on leakage ID. 546 * 547 * @hwmgr: the address of the powerplay hardware manager. 548 * return: always 0. 549 */ 550 static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr) 551 { 552 struct vega10_hwmgr *data = hwmgr->backend; 553 uint16_t vv_id; 554 uint32_t vddc = 0; 555 uint16_t i, j; 556 uint32_t sclk = 0; 557 struct phm_ppt_v2_information *table_info = 558 (struct phm_ppt_v2_information *)hwmgr->pptable; 559 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table = 560 table_info->vdd_dep_on_socclk; 561 int result; 562 563 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) { 564 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 565 566 if (!vega10_get_socclk_for_voltage_evv(hwmgr, 567 table_info->vddc_lookup_table, vv_id, &sclk)) { 568 if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) { 569 for (j = 1; j < socclk_table->count; j++) { 570 if (socclk_table->entries[j].clk == sclk && 571 socclk_table->entries[j].cks_enable == 0) { 572 sclk += 5000; 573 break; 574 } 575 } 576 } 577 578 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, 579 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc), 580 "Error retrieving EVV voltage value!", 581 continue); 582 583 584 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ 585 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0), 586 "Invalid VDDC value", result = -EINVAL;); 587 588 /* the voltage should not be zero nor equal to leakage ID */ 589 if (vddc != 0 && vddc != vv_id) { 590 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100); 591 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; 592 data->vddc_leakage.count++; 593 } 594 } 595 } 596 597 return 0; 598 } 599 600 /** 601 * Change virtual leakage voltage to actual value. 602 * 603 * @hwmgr: the address of the powerplay hardware manager. 604 * @voltage: pointer to changing voltage 605 * @leakage_table: pointer to leakage table 606 */ 607 static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr, 608 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table) 609 { 610 uint32_t index; 611 612 /* search for leakage voltage ID 0xff01 ~ 0xff08 */ 613 for (index = 0; index < leakage_table->count; index++) { 614 /* if this voltage matches a leakage voltage ID */ 615 /* patch with actual leakage voltage */ 616 if (leakage_table->leakage_id[index] == *voltage) { 617 *voltage = leakage_table->actual_voltage[index]; 618 break; 619 } 620 } 621 622 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) 623 pr_info("Voltage value looks like a Leakage ID but it's not patched\n"); 624 } 625 626 /** 627 * Patch voltage lookup table by EVV leakages. 628 * 629 * @hwmgr: the address of the powerplay hardware manager. 630 * @lookup_table: pointer to voltage lookup table 631 * @leakage_table: pointer to leakage table 632 * return: always 0 633 */ 634 static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, 635 phm_ppt_v1_voltage_lookup_table *lookup_table, 636 struct vega10_leakage_voltage *leakage_table) 637 { 638 uint32_t i; 639 640 for (i = 0; i < lookup_table->count; i++) 641 vega10_patch_with_vdd_leakage(hwmgr, 642 &lookup_table->entries[i].us_vdd, leakage_table); 643 644 return 0; 645 } 646 647 static int vega10_patch_clock_voltage_limits_with_vddc_leakage( 648 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table, 649 uint16_t *vddc) 650 { 651 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); 652 653 return 0; 654 } 655 #endif 656 657 static int vega10_patch_voltage_dependency_tables_with_lookup_table( 658 struct pp_hwmgr *hwmgr) 659 { 660 uint8_t entry_id, voltage_id; 661 unsigned i; 662 struct phm_ppt_v2_information *table_info = 663 (struct phm_ppt_v2_information *)(hwmgr->pptable); 664 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = 665 table_info->mm_dep_table; 666 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = 667 table_info->vdd_dep_on_mclk; 668 669 for (i = 0; i < 6; i++) { 670 struct phm_ppt_v1_clock_voltage_dependency_table *vdt; 671 switch (i) { 672 case 0: vdt = table_info->vdd_dep_on_socclk; break; 673 case 1: vdt = table_info->vdd_dep_on_sclk; break; 674 case 2: vdt = table_info->vdd_dep_on_dcefclk; break; 675 case 3: vdt = table_info->vdd_dep_on_pixclk; break; 676 case 4: vdt = table_info->vdd_dep_on_dispclk; break; 677 case 5: vdt = table_info->vdd_dep_on_phyclk; break; 678 } 679 680 for (entry_id = 0; entry_id < vdt->count; entry_id++) { 681 voltage_id = vdt->entries[entry_id].vddInd; 682 vdt->entries[entry_id].vddc = 683 table_info->vddc_lookup_table->entries[voltage_id].us_vdd; 684 } 685 } 686 687 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { 688 voltage_id = mm_table->entries[entry_id].vddcInd; 689 mm_table->entries[entry_id].vddc = 690 table_info->vddc_lookup_table->entries[voltage_id].us_vdd; 691 } 692 693 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { 694 voltage_id = mclk_table->entries[entry_id].vddInd; 695 mclk_table->entries[entry_id].vddc = 696 table_info->vddc_lookup_table->entries[voltage_id].us_vdd; 697 voltage_id = mclk_table->entries[entry_id].vddciInd; 698 mclk_table->entries[entry_id].vddci = 699 table_info->vddci_lookup_table->entries[voltage_id].us_vdd; 700 voltage_id = mclk_table->entries[entry_id].mvddInd; 701 mclk_table->entries[entry_id].mvdd = 702 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd; 703 } 704 705 706 return 0; 707 708 } 709 710 static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr, 711 struct phm_ppt_v1_voltage_lookup_table *lookup_table) 712 { 713 uint32_t table_size, i, j; 714 715 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count, 716 "Lookup table is empty", return -EINVAL); 717 718 table_size = lookup_table->count; 719 720 /* Sorting voltages */ 721 for (i = 0; i < table_size - 1; i++) { 722 for (j = i + 1; j > 0; j--) { 723 if (lookup_table->entries[j].us_vdd < 724 lookup_table->entries[j - 1].us_vdd) { 725 swap(lookup_table->entries[j - 1], 726 lookup_table->entries[j]); 727 } 728 } 729 } 730 731 return 0; 732 } 733 734 static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr) 735 { 736 int result = 0; 737 int tmp_result; 738 struct phm_ppt_v2_information *table_info = 739 (struct phm_ppt_v2_information *)(hwmgr->pptable); 740 #ifdef PPLIB_VEGA10_EVV_SUPPORT 741 struct vega10_hwmgr *data = hwmgr->backend; 742 743 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr, 744 table_info->vddc_lookup_table, &(data->vddc_leakage)); 745 if (tmp_result) 746 result = tmp_result; 747 748 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, 749 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); 750 if (tmp_result) 751 result = tmp_result; 752 #endif 753 754 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr); 755 if (tmp_result) 756 result = tmp_result; 757 758 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); 759 if (tmp_result) 760 result = tmp_result; 761 762 return result; 763 } 764 765 static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) 766 { 767 struct phm_ppt_v2_information *table_info = 768 (struct phm_ppt_v2_information *)(hwmgr->pptable); 769 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = 770 table_info->vdd_dep_on_socclk; 771 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = 772 table_info->vdd_dep_on_mclk; 773 774 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table, 775 "VDD dependency on SCLK table is missing. This table is mandatory", return -EINVAL); 776 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, 777 "VDD dependency on SCLK table is empty. This table is mandatory", return -EINVAL); 778 779 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table, 780 "VDD dependency on MCLK table is missing. This table is mandatory", return -EINVAL); 781 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, 782 "VDD dependency on MCLK table is empty. This table is mandatory", return -EINVAL); 783 784 table_info->max_clock_voltage_on_ac.sclk = 785 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; 786 table_info->max_clock_voltage_on_ac.mclk = 787 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; 788 table_info->max_clock_voltage_on_ac.vddc = 789 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; 790 table_info->max_clock_voltage_on_ac.vddci = 791 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; 792 793 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = 794 table_info->max_clock_voltage_on_ac.sclk; 795 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = 796 table_info->max_clock_voltage_on_ac.mclk; 797 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = 798 table_info->max_clock_voltage_on_ac.vddc; 799 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = 800 table_info->max_clock_voltage_on_ac.vddci; 801 802 return 0; 803 } 804 805 static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 806 { 807 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); 808 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; 809 810 kfree(hwmgr->backend); 811 hwmgr->backend = NULL; 812 813 return 0; 814 } 815 816 static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 817 { 818 int result = 0; 819 struct vega10_hwmgr *data; 820 uint32_t config_telemetry = 0; 821 struct pp_atomfwctrl_voltage_table vol_table; 822 struct amdgpu_device *adev = hwmgr->adev; 823 824 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL); 825 if (data == NULL) 826 return -ENOMEM; 827 828 hwmgr->backend = data; 829 830 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 831 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 832 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 833 834 vega10_set_default_registry_data(hwmgr); 835 data->disable_dpm_mask = 0xff; 836 837 /* need to set voltage control types before EVV patching */ 838 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE; 839 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE; 840 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE; 841 842 /* VDDCR_SOC */ 843 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, 844 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { 845 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr, 846 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2, 847 &vol_table)) { 848 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) | 849 (vol_table.telemetry_offset & 0xff); 850 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2; 851 } 852 } else { 853 kfree(hwmgr->backend); 854 hwmgr->backend = NULL; 855 PP_ASSERT_WITH_CODE(false, 856 "VDDCR_SOC is not SVID2!", 857 return -1); 858 } 859 860 /* MVDDC */ 861 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, 862 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) { 863 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr, 864 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2, 865 &vol_table)) { 866 config_telemetry |= 867 ((vol_table.telemetry_slope << 24) & 0xff000000) | 868 ((vol_table.telemetry_offset << 16) & 0xff0000); 869 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2; 870 } 871 } 872 873 /* VDDCI_MEM */ 874 if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) { 875 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, 876 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) 877 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO; 878 } 879 880 data->config_telemetry = config_telemetry; 881 882 vega10_set_features_platform_caps(hwmgr); 883 884 vega10_init_dpm_defaults(hwmgr); 885 886 #ifdef PPLIB_VEGA10_EVV_SUPPORT 887 /* Get leakage voltage based on leakage ID. */ 888 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr), 889 "Get EVV Voltage Failed. Abort Driver loading!", 890 return -1); 891 #endif 892 893 /* Patch our voltage dependency table with actual leakage voltage 894 * We need to perform leakage translation before it's used by other functions 895 */ 896 vega10_complete_dependency_tables(hwmgr); 897 898 /* Parse pptable data read from VBIOS */ 899 vega10_set_private_data_based_on_pptable(hwmgr); 900 901 data->is_tlu_enabled = false; 902 903 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 904 VEGA10_MAX_HARDWARE_POWERLEVELS; 905 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; 906 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 907 908 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ 909 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ 910 hwmgr->platform_descriptor.clockStep.engineClock = 500; 911 hwmgr->platform_descriptor.clockStep.memoryClock = 500; 912 913 data->total_active_cus = adev->gfx.cu_info.number; 914 if (!hwmgr->not_vf) 915 return result; 916 917 /* Setup default Overdrive Fan control settings */ 918 data->odn_fan_table.target_fan_speed = 919 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM; 920 data->odn_fan_table.target_temperature = 921 hwmgr->thermal_controller. 922 advanceFanControlParameters.ucTargetTemperature; 923 data->odn_fan_table.min_performance_clock = 924 hwmgr->thermal_controller.advanceFanControlParameters. 925 ulMinFanSCLKAcousticLimit; 926 data->odn_fan_table.min_fan_limit = 927 hwmgr->thermal_controller. 928 advanceFanControlParameters.usFanPWMMinLimit * 929 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100; 930 931 data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) & 932 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >> 933 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; 934 PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number), 935 "Mem Channel Index Exceeded maximum!", 936 return -EINVAL); 937 938 return result; 939 } 940 941 static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr) 942 { 943 struct vega10_hwmgr *data = hwmgr->backend; 944 945 data->low_sclk_interrupt_threshold = 0; 946 947 return 0; 948 } 949 950 static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr) 951 { 952 struct vega10_hwmgr *data = hwmgr->backend; 953 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 954 955 struct pp_atomfwctrl_voltage_table table; 956 uint8_t i, j; 957 uint32_t mask = 0; 958 uint32_t tmp; 959 int32_t ret = 0; 960 961 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM, 962 VOLTAGE_OBJ_GPIO_LUT, &table); 963 964 if (!ret) { 965 tmp = table.mask_low; 966 for (i = 0, j = 0; i < 32; i++) { 967 if (tmp & 1) { 968 mask |= (uint32_t)(i << (8 * j)); 969 if (++j >= 3) 970 break; 971 } 972 tmp >>= 1; 973 } 974 } 975 976 pp_table->LedPin0 = (uint8_t)(mask & 0xff); 977 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff); 978 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff); 979 return 0; 980 } 981 982 static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr) 983 { 984 if (!hwmgr->not_vf) 985 return 0; 986 987 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr), 988 "Failed to init sclk threshold!", 989 return -EINVAL); 990 991 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr), 992 "Failed to set up led dpm config!", 993 return -EINVAL); 994 995 smum_send_msg_to_smc_with_parameter(hwmgr, 996 PPSMC_MSG_NumOfDisplays, 997 0, 998 NULL); 999 1000 return 0; 1001 } 1002 1003 /** 1004 * Remove repeated voltage values and create table with unique values. 1005 * 1006 * @hwmgr: the address of the powerplay hardware manager. 1007 * @vol_table: the pointer to changing voltage table 1008 * return: 0 in success 1009 */ 1010 static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr, 1011 struct pp_atomfwctrl_voltage_table *vol_table) 1012 { 1013 uint32_t i, j; 1014 uint16_t vvalue; 1015 bool found = false; 1016 struct pp_atomfwctrl_voltage_table *table; 1017 1018 PP_ASSERT_WITH_CODE(vol_table, 1019 "Voltage Table empty.", return -EINVAL); 1020 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table), 1021 GFP_KERNEL); 1022 1023 if (!table) 1024 return -ENOMEM; 1025 1026 table->mask_low = vol_table->mask_low; 1027 table->phase_delay = vol_table->phase_delay; 1028 1029 for (i = 0; i < vol_table->count; i++) { 1030 vvalue = vol_table->entries[i].value; 1031 found = false; 1032 1033 for (j = 0; j < table->count; j++) { 1034 if (vvalue == table->entries[j].value) { 1035 found = true; 1036 break; 1037 } 1038 } 1039 1040 if (!found) { 1041 table->entries[table->count].value = vvalue; 1042 table->entries[table->count].smio_low = 1043 vol_table->entries[i].smio_low; 1044 table->count++; 1045 } 1046 } 1047 1048 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table)); 1049 kfree(table); 1050 1051 return 0; 1052 } 1053 1054 static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr, 1055 phm_ppt_v1_clock_voltage_dependency_table *dep_table, 1056 struct pp_atomfwctrl_voltage_table *vol_table) 1057 { 1058 int i; 1059 1060 PP_ASSERT_WITH_CODE(dep_table->count, 1061 "Voltage Dependency Table empty.", 1062 return -EINVAL); 1063 1064 vol_table->mask_low = 0; 1065 vol_table->phase_delay = 0; 1066 vol_table->count = dep_table->count; 1067 1068 for (i = 0; i < vol_table->count; i++) { 1069 vol_table->entries[i].value = dep_table->entries[i].mvdd; 1070 vol_table->entries[i].smio_low = 0; 1071 } 1072 1073 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, 1074 vol_table), 1075 "Failed to trim MVDD Table!", 1076 return -1); 1077 1078 return 0; 1079 } 1080 1081 static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr, 1082 phm_ppt_v1_clock_voltage_dependency_table *dep_table, 1083 struct pp_atomfwctrl_voltage_table *vol_table) 1084 { 1085 uint32_t i; 1086 1087 PP_ASSERT_WITH_CODE(dep_table->count, 1088 "Voltage Dependency Table empty.", 1089 return -EINVAL); 1090 1091 vol_table->mask_low = 0; 1092 vol_table->phase_delay = 0; 1093 vol_table->count = dep_table->count; 1094 1095 for (i = 0; i < dep_table->count; i++) { 1096 vol_table->entries[i].value = dep_table->entries[i].vddci; 1097 vol_table->entries[i].smio_low = 0; 1098 } 1099 1100 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table), 1101 "Failed to trim VDDCI table.", 1102 return -1); 1103 1104 return 0; 1105 } 1106 1107 static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr, 1108 phm_ppt_v1_clock_voltage_dependency_table *dep_table, 1109 struct pp_atomfwctrl_voltage_table *vol_table) 1110 { 1111 int i; 1112 1113 PP_ASSERT_WITH_CODE(dep_table->count, 1114 "Voltage Dependency Table empty.", 1115 return -EINVAL); 1116 1117 vol_table->mask_low = 0; 1118 vol_table->phase_delay = 0; 1119 vol_table->count = dep_table->count; 1120 1121 for (i = 0; i < vol_table->count; i++) { 1122 vol_table->entries[i].value = dep_table->entries[i].vddc; 1123 vol_table->entries[i].smio_low = 0; 1124 } 1125 1126 return 0; 1127 } 1128 1129 /* ---- Voltage Tables ---- 1130 * If the voltage table would be bigger than 1131 * what will fit into the state table on 1132 * the SMC keep only the higher entries. 1133 */ 1134 static void vega10_trim_voltage_table_to_fit_state_table( 1135 struct pp_hwmgr *hwmgr, 1136 uint32_t max_vol_steps, 1137 struct pp_atomfwctrl_voltage_table *vol_table) 1138 { 1139 unsigned int i, diff; 1140 1141 if (vol_table->count <= max_vol_steps) 1142 return; 1143 1144 diff = vol_table->count - max_vol_steps; 1145 1146 for (i = 0; i < max_vol_steps; i++) 1147 vol_table->entries[i] = vol_table->entries[i + diff]; 1148 1149 vol_table->count = max_vol_steps; 1150 } 1151 1152 /** 1153 * Create Voltage Tables. 1154 * 1155 * @hwmgr: the address of the powerplay hardware manager. 1156 * return: always 0 1157 */ 1158 static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr) 1159 { 1160 struct vega10_hwmgr *data = hwmgr->backend; 1161 struct phm_ppt_v2_information *table_info = 1162 (struct phm_ppt_v2_information *)hwmgr->pptable; 1163 int result; 1164 1165 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 || 1166 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) { 1167 result = vega10_get_mvdd_voltage_table(hwmgr, 1168 table_info->vdd_dep_on_mclk, 1169 &(data->mvdd_voltage_table)); 1170 PP_ASSERT_WITH_CODE(!result, 1171 "Failed to retrieve MVDDC table!", 1172 return result); 1173 } 1174 1175 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) { 1176 result = vega10_get_vddci_voltage_table(hwmgr, 1177 table_info->vdd_dep_on_mclk, 1178 &(data->vddci_voltage_table)); 1179 PP_ASSERT_WITH_CODE(!result, 1180 "Failed to retrieve VDDCI_MEM table!", 1181 return result); 1182 } 1183 1184 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 || 1185 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) { 1186 result = vega10_get_vdd_voltage_table(hwmgr, 1187 table_info->vdd_dep_on_sclk, 1188 &(data->vddc_voltage_table)); 1189 PP_ASSERT_WITH_CODE(!result, 1190 "Failed to retrieve VDDCR_SOC table!", 1191 return result); 1192 } 1193 1194 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16, 1195 "Too many voltage values for VDDC. Trimming to fit state table.", 1196 vega10_trim_voltage_table_to_fit_state_table(hwmgr, 1197 16, &(data->vddc_voltage_table))); 1198 1199 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16, 1200 "Too many voltage values for VDDCI. Trimming to fit state table.", 1201 vega10_trim_voltage_table_to_fit_state_table(hwmgr, 1202 16, &(data->vddci_voltage_table))); 1203 1204 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16, 1205 "Too many voltage values for MVDD. Trimming to fit state table.", 1206 vega10_trim_voltage_table_to_fit_state_table(hwmgr, 1207 16, &(data->mvdd_voltage_table))); 1208 1209 1210 return 0; 1211 } 1212 1213 /* 1214 * vega10_init_dpm_state 1215 * Function to initialize all Soft Min/Max and Hard Min/Max to 0xff. 1216 * 1217 * @dpm_state: - the address of the DPM Table to initiailize. 1218 * return: None. 1219 */ 1220 static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state) 1221 { 1222 dpm_state->soft_min_level = 0xff; 1223 dpm_state->soft_max_level = 0xff; 1224 dpm_state->hard_min_level = 0xff; 1225 dpm_state->hard_max_level = 0xff; 1226 } 1227 1228 static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr, 1229 struct vega10_single_dpm_table *dpm_table, 1230 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table) 1231 { 1232 int i; 1233 1234 dpm_table->count = 0; 1235 1236 for (i = 0; i < dep_table->count; i++) { 1237 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <= 1238 dep_table->entries[i].clk) { 1239 dpm_table->dpm_levels[dpm_table->count].value = 1240 dep_table->entries[i].clk; 1241 dpm_table->dpm_levels[dpm_table->count].enabled = true; 1242 dpm_table->count++; 1243 } 1244 } 1245 } 1246 static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr) 1247 { 1248 struct vega10_hwmgr *data = hwmgr->backend; 1249 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); 1250 struct phm_ppt_v2_information *table_info = 1251 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1252 struct phm_ppt_v1_pcie_table *bios_pcie_table = 1253 table_info->pcie_table; 1254 uint32_t i; 1255 1256 PP_ASSERT_WITH_CODE(bios_pcie_table->count, 1257 "Incorrect number of PCIE States from VBIOS!", 1258 return -1); 1259 1260 for (i = 0; i < NUM_LINK_LEVELS; i++) { 1261 if (data->registry_data.pcieSpeedOverride) 1262 pcie_table->pcie_gen[i] = 1263 data->registry_data.pcieSpeedOverride; 1264 else 1265 pcie_table->pcie_gen[i] = 1266 bios_pcie_table->entries[i].gen_speed; 1267 1268 if (data->registry_data.pcieLaneOverride) 1269 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width( 1270 data->registry_data.pcieLaneOverride); 1271 else 1272 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width( 1273 bios_pcie_table->entries[i].lane_width); 1274 if (data->registry_data.pcieClockOverride) 1275 pcie_table->lclk[i] = 1276 data->registry_data.pcieClockOverride; 1277 else 1278 pcie_table->lclk[i] = 1279 bios_pcie_table->entries[i].pcie_sclk; 1280 } 1281 1282 pcie_table->count = NUM_LINK_LEVELS; 1283 1284 return 0; 1285 } 1286 1287 /* 1288 * This function is to initialize all DPM state tables 1289 * for SMU based on the dependency table. 1290 * Dynamic state patching function will then trim these 1291 * state tables to the allowed range based 1292 * on the power policy or external client requests, 1293 * such as UVD request, etc. 1294 */ 1295 static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) 1296 { 1297 struct vega10_hwmgr *data = hwmgr->backend; 1298 struct phm_ppt_v2_information *table_info = 1299 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1300 struct vega10_single_dpm_table *dpm_table; 1301 uint32_t i; 1302 1303 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table = 1304 table_info->vdd_dep_on_socclk; 1305 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table = 1306 table_info->vdd_dep_on_sclk; 1307 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = 1308 table_info->vdd_dep_on_mclk; 1309 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table = 1310 table_info->mm_dep_table; 1311 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table = 1312 table_info->vdd_dep_on_dcefclk; 1313 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table = 1314 table_info->vdd_dep_on_pixclk; 1315 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table = 1316 table_info->vdd_dep_on_dispclk; 1317 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table = 1318 table_info->vdd_dep_on_phyclk; 1319 1320 PP_ASSERT_WITH_CODE(dep_soc_table, 1321 "SOCCLK dependency table is missing. This table is mandatory", 1322 return -EINVAL); 1323 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1, 1324 "SOCCLK dependency table is empty. This table is mandatory", 1325 return -EINVAL); 1326 1327 PP_ASSERT_WITH_CODE(dep_gfx_table, 1328 "GFXCLK dependency table is missing. This table is mandatory", 1329 return -EINVAL); 1330 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1, 1331 "GFXCLK dependency table is empty. This table is mandatory", 1332 return -EINVAL); 1333 1334 PP_ASSERT_WITH_CODE(dep_mclk_table, 1335 "MCLK dependency table is missing. This table is mandatory", 1336 return -EINVAL); 1337 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, 1338 "MCLK dependency table has to have is missing. This table is mandatory", 1339 return -EINVAL); 1340 1341 /* Initialize Sclk DPM table based on allow Sclk values */ 1342 dpm_table = &(data->dpm_table.soc_table); 1343 vega10_setup_default_single_dpm_table(hwmgr, 1344 dpm_table, 1345 dep_soc_table); 1346 1347 vega10_init_dpm_state(&(dpm_table->dpm_state)); 1348 1349 dpm_table = &(data->dpm_table.gfx_table); 1350 vega10_setup_default_single_dpm_table(hwmgr, 1351 dpm_table, 1352 dep_gfx_table); 1353 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) 1354 hwmgr->platform_descriptor.overdriveLimit.engineClock = 1355 dpm_table->dpm_levels[dpm_table->count-1].value; 1356 vega10_init_dpm_state(&(dpm_table->dpm_state)); 1357 1358 /* Initialize Mclk DPM table based on allow Mclk values */ 1359 data->dpm_table.mem_table.count = 0; 1360 dpm_table = &(data->dpm_table.mem_table); 1361 vega10_setup_default_single_dpm_table(hwmgr, 1362 dpm_table, 1363 dep_mclk_table); 1364 if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) 1365 hwmgr->platform_descriptor.overdriveLimit.memoryClock = 1366 dpm_table->dpm_levels[dpm_table->count-1].value; 1367 vega10_init_dpm_state(&(dpm_table->dpm_state)); 1368 1369 data->dpm_table.eclk_table.count = 0; 1370 dpm_table = &(data->dpm_table.eclk_table); 1371 for (i = 0; i < dep_mm_table->count; i++) { 1372 if (i == 0 || dpm_table->dpm_levels 1373 [dpm_table->count - 1].value <= 1374 dep_mm_table->entries[i].eclk) { 1375 dpm_table->dpm_levels[dpm_table->count].value = 1376 dep_mm_table->entries[i].eclk; 1377 dpm_table->dpm_levels[dpm_table->count].enabled = 1378 (i == 0) ? true : false; 1379 dpm_table->count++; 1380 } 1381 } 1382 vega10_init_dpm_state(&(dpm_table->dpm_state)); 1383 1384 data->dpm_table.vclk_table.count = 0; 1385 data->dpm_table.dclk_table.count = 0; 1386 dpm_table = &(data->dpm_table.vclk_table); 1387 for (i = 0; i < dep_mm_table->count; i++) { 1388 if (i == 0 || dpm_table->dpm_levels 1389 [dpm_table->count - 1].value <= 1390 dep_mm_table->entries[i].vclk) { 1391 dpm_table->dpm_levels[dpm_table->count].value = 1392 dep_mm_table->entries[i].vclk; 1393 dpm_table->dpm_levels[dpm_table->count].enabled = 1394 (i == 0) ? true : false; 1395 dpm_table->count++; 1396 } 1397 } 1398 vega10_init_dpm_state(&(dpm_table->dpm_state)); 1399 1400 dpm_table = &(data->dpm_table.dclk_table); 1401 for (i = 0; i < dep_mm_table->count; i++) { 1402 if (i == 0 || dpm_table->dpm_levels 1403 [dpm_table->count - 1].value <= 1404 dep_mm_table->entries[i].dclk) { 1405 dpm_table->dpm_levels[dpm_table->count].value = 1406 dep_mm_table->entries[i].dclk; 1407 dpm_table->dpm_levels[dpm_table->count].enabled = 1408 (i == 0) ? true : false; 1409 dpm_table->count++; 1410 } 1411 } 1412 vega10_init_dpm_state(&(dpm_table->dpm_state)); 1413 1414 /* Assume there is no headless Vega10 for now */ 1415 dpm_table = &(data->dpm_table.dcef_table); 1416 vega10_setup_default_single_dpm_table(hwmgr, 1417 dpm_table, 1418 dep_dcef_table); 1419 1420 vega10_init_dpm_state(&(dpm_table->dpm_state)); 1421 1422 dpm_table = &(data->dpm_table.pixel_table); 1423 vega10_setup_default_single_dpm_table(hwmgr, 1424 dpm_table, 1425 dep_pix_table); 1426 1427 vega10_init_dpm_state(&(dpm_table->dpm_state)); 1428 1429 dpm_table = &(data->dpm_table.display_table); 1430 vega10_setup_default_single_dpm_table(hwmgr, 1431 dpm_table, 1432 dep_disp_table); 1433 1434 vega10_init_dpm_state(&(dpm_table->dpm_state)); 1435 1436 dpm_table = &(data->dpm_table.phy_table); 1437 vega10_setup_default_single_dpm_table(hwmgr, 1438 dpm_table, 1439 dep_phy_table); 1440 1441 vega10_init_dpm_state(&(dpm_table->dpm_state)); 1442 1443 vega10_setup_default_pcie_table(hwmgr); 1444 1445 /* Zero out the saved copy of the CUSTOM profile 1446 * This will be checked when trying to set the profile 1447 * and will require that new values be passed in 1448 */ 1449 data->custom_profile_mode[0] = 0; 1450 data->custom_profile_mode[1] = 0; 1451 data->custom_profile_mode[2] = 0; 1452 data->custom_profile_mode[3] = 0; 1453 1454 /* save a copy of the default DPM table */ 1455 memcpy(&(data->golden_dpm_table), &(data->dpm_table), 1456 sizeof(struct vega10_dpm_table)); 1457 1458 return 0; 1459 } 1460 1461 /* 1462 * vega10_populate_ulv_state 1463 * Function to provide parameters for Utral Low Voltage state to SMC. 1464 * 1465 * @hwmgr: - the address of the hardware manager. 1466 * return: Always 0. 1467 */ 1468 static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr) 1469 { 1470 struct vega10_hwmgr *data = hwmgr->backend; 1471 struct phm_ppt_v2_information *table_info = 1472 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1473 1474 data->smc_state_table.pp_table.UlvOffsetVid = 1475 (uint8_t)table_info->us_ulv_voltage_offset; 1476 1477 data->smc_state_table.pp_table.UlvSmnclkDid = 1478 (uint8_t)(table_info->us_ulv_smnclk_did); 1479 data->smc_state_table.pp_table.UlvMp1clkDid = 1480 (uint8_t)(table_info->us_ulv_mp1clk_did); 1481 data->smc_state_table.pp_table.UlvGfxclkBypass = 1482 (uint8_t)(table_info->us_ulv_gfxclk_bypass); 1483 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 = 1484 (uint8_t)(data->vddc_voltage_table.psi0_enable); 1485 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 = 1486 (uint8_t)(data->vddc_voltage_table.psi1_enable); 1487 1488 return 0; 1489 } 1490 1491 static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr, 1492 uint32_t lclock, uint8_t *curr_lclk_did) 1493 { 1494 struct pp_atomfwctrl_clock_dividers_soc15 dividers; 1495 1496 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10( 1497 hwmgr, 1498 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 1499 lclock, ÷rs), 1500 "Failed to get LCLK clock settings from VBIOS!", 1501 return -1); 1502 1503 *curr_lclk_did = dividers.ulDid; 1504 1505 return 0; 1506 } 1507 1508 static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr) 1509 { 1510 int result = -1; 1511 struct vega10_hwmgr *data = hwmgr->backend; 1512 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 1513 struct vega10_pcie_table *pcie_table = 1514 &(data->dpm_table.pcie_table); 1515 uint32_t i, j; 1516 1517 for (i = 0; i < pcie_table->count; i++) { 1518 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i]; 1519 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i]; 1520 1521 result = vega10_populate_single_lclk_level(hwmgr, 1522 pcie_table->lclk[i], &(pp_table->LclkDid[i])); 1523 if (result) { 1524 pr_info("Populate LClock Level %d Failed!\n", i); 1525 return result; 1526 } 1527 } 1528 1529 j = i - 1; 1530 while (i < NUM_LINK_LEVELS) { 1531 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j]; 1532 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j]; 1533 1534 result = vega10_populate_single_lclk_level(hwmgr, 1535 pcie_table->lclk[j], &(pp_table->LclkDid[i])); 1536 if (result) { 1537 pr_info("Populate LClock Level %d Failed!\n", i); 1538 return result; 1539 } 1540 i++; 1541 } 1542 1543 return result; 1544 } 1545 1546 /** 1547 * Populates single SMC GFXSCLK structure using the provided engine clock 1548 * 1549 * @hwmgr: the address of the hardware manager 1550 * @gfx_clock: the GFX clock to use to populate the structure. 1551 * @current_gfxclk_level: location in PPTable for the SMC GFXCLK structure. 1552 * @acg_freq: ACG frequenty to return (MHz) 1553 */ 1554 static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, 1555 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level, 1556 uint32_t *acg_freq) 1557 { 1558 struct phm_ppt_v2_information *table_info = 1559 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1560 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk; 1561 struct vega10_hwmgr *data = hwmgr->backend; 1562 struct pp_atomfwctrl_clock_dividers_soc15 dividers; 1563 uint32_t gfx_max_clock = 1564 hwmgr->platform_descriptor.overdriveLimit.engineClock; 1565 uint32_t i = 0; 1566 1567 if (hwmgr->od_enabled) 1568 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) 1569 &(data->odn_dpm_table.vdd_dep_on_sclk); 1570 else 1571 dep_on_sclk = table_info->vdd_dep_on_sclk; 1572 1573 PP_ASSERT_WITH_CODE(dep_on_sclk, 1574 "Invalid SOC_VDD-GFX_CLK Dependency Table!", 1575 return -EINVAL); 1576 1577 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) 1578 gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock; 1579 else { 1580 for (i = 0; i < dep_on_sclk->count; i++) { 1581 if (dep_on_sclk->entries[i].clk == gfx_clock) 1582 break; 1583 } 1584 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i, 1585 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!", 1586 return -EINVAL); 1587 } 1588 1589 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, 1590 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK, 1591 gfx_clock, ÷rs), 1592 "Failed to get GFX Clock settings from VBIOS!", 1593 return -EINVAL); 1594 1595 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */ 1596 current_gfxclk_level->FbMult = 1597 cpu_to_le32(dividers.ulPll_fb_mult); 1598 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */ 1599 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable; 1600 current_gfxclk_level->SsFbMult = 1601 cpu_to_le32(dividers.ulPll_ss_fbsmult); 1602 current_gfxclk_level->SsSlewFrac = 1603 cpu_to_le16(dividers.usPll_ss_slew_frac); 1604 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid); 1605 1606 *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */ 1607 1608 return 0; 1609 } 1610 1611 /** 1612 * Populates single SMC SOCCLK structure using the provided clock. 1613 * 1614 * @hwmgr: the address of the hardware manager. 1615 * @soc_clock: the SOC clock to use to populate the structure. 1616 * @current_soc_did: DFS divider to pass back to caller 1617 * @current_vol_index: index of current VDD to pass back to caller 1618 * return: 0 on success 1619 */ 1620 static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr, 1621 uint32_t soc_clock, uint8_t *current_soc_did, 1622 uint8_t *current_vol_index) 1623 { 1624 struct vega10_hwmgr *data = hwmgr->backend; 1625 struct phm_ppt_v2_information *table_info = 1626 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1627 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc; 1628 struct pp_atomfwctrl_clock_dividers_soc15 dividers; 1629 uint32_t i; 1630 1631 if (hwmgr->od_enabled) { 1632 dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *) 1633 &data->odn_dpm_table.vdd_dep_on_socclk; 1634 for (i = 0; i < dep_on_soc->count; i++) { 1635 if (dep_on_soc->entries[i].clk >= soc_clock) 1636 break; 1637 } 1638 } else { 1639 dep_on_soc = table_info->vdd_dep_on_socclk; 1640 for (i = 0; i < dep_on_soc->count; i++) { 1641 if (dep_on_soc->entries[i].clk == soc_clock) 1642 break; 1643 } 1644 } 1645 1646 PP_ASSERT_WITH_CODE(dep_on_soc->count > i, 1647 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table", 1648 return -EINVAL); 1649 1650 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, 1651 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 1652 soc_clock, ÷rs), 1653 "Failed to get SOC Clock settings from VBIOS!", 1654 return -EINVAL); 1655 1656 *current_soc_did = (uint8_t)dividers.ulDid; 1657 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd); 1658 return 0; 1659 } 1660 1661 /** 1662 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states 1663 * 1664 * @hwmgr: the address of the hardware manager 1665 */ 1666 static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) 1667 { 1668 struct vega10_hwmgr *data = hwmgr->backend; 1669 struct phm_ppt_v2_information *table_info = 1670 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1671 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 1672 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); 1673 int result = 0; 1674 uint32_t i, j; 1675 1676 for (i = 0; i < dpm_table->count; i++) { 1677 result = vega10_populate_single_gfx_level(hwmgr, 1678 dpm_table->dpm_levels[i].value, 1679 &(pp_table->GfxclkLevel[i]), 1680 &(pp_table->AcgFreqTable[i])); 1681 if (result) 1682 return result; 1683 } 1684 1685 j = i - 1; 1686 while (i < NUM_GFXCLK_DPM_LEVELS) { 1687 result = vega10_populate_single_gfx_level(hwmgr, 1688 dpm_table->dpm_levels[j].value, 1689 &(pp_table->GfxclkLevel[i]), 1690 &(pp_table->AcgFreqTable[i])); 1691 if (result) 1692 return result; 1693 i++; 1694 } 1695 1696 pp_table->GfxclkSlewRate = 1697 cpu_to_le16(table_info->us_gfxclk_slew_rate); 1698 1699 dpm_table = &(data->dpm_table.soc_table); 1700 for (i = 0; i < dpm_table->count; i++) { 1701 result = vega10_populate_single_soc_level(hwmgr, 1702 dpm_table->dpm_levels[i].value, 1703 &(pp_table->SocclkDid[i]), 1704 &(pp_table->SocDpmVoltageIndex[i])); 1705 if (result) 1706 return result; 1707 } 1708 1709 j = i - 1; 1710 while (i < NUM_SOCCLK_DPM_LEVELS) { 1711 result = vega10_populate_single_soc_level(hwmgr, 1712 dpm_table->dpm_levels[j].value, 1713 &(pp_table->SocclkDid[i]), 1714 &(pp_table->SocDpmVoltageIndex[i])); 1715 if (result) 1716 return result; 1717 i++; 1718 } 1719 1720 return result; 1721 } 1722 1723 static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr) 1724 { 1725 struct vega10_hwmgr *data = hwmgr->backend; 1726 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 1727 struct phm_ppt_v2_information *table_info = hwmgr->pptable; 1728 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; 1729 1730 uint8_t soc_vid = 0; 1731 uint32_t i, max_vddc_level; 1732 1733 if (hwmgr->od_enabled) 1734 vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table; 1735 else 1736 vddc_lookup_table = table_info->vddc_lookup_table; 1737 1738 max_vddc_level = vddc_lookup_table->count; 1739 for (i = 0; i < max_vddc_level; i++) { 1740 soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd); 1741 pp_table->SocVid[i] = soc_vid; 1742 } 1743 while (i < MAX_REGULAR_DPM_NUMBER) { 1744 pp_table->SocVid[i] = soc_vid; 1745 i++; 1746 } 1747 } 1748 1749 /* 1750 * Populates single SMC GFXCLK structure using the provided clock. 1751 * 1752 * @hwmgr: the address of the hardware manager. 1753 * @mem_clock: the memory clock to use to populate the structure. 1754 * return: 0 on success.. 1755 */ 1756 static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr, 1757 uint32_t mem_clock, uint8_t *current_mem_vid, 1758 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind) 1759 { 1760 struct vega10_hwmgr *data = hwmgr->backend; 1761 struct phm_ppt_v2_information *table_info = 1762 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1763 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk; 1764 struct pp_atomfwctrl_clock_dividers_soc15 dividers; 1765 uint32_t mem_max_clock = 1766 hwmgr->platform_descriptor.overdriveLimit.memoryClock; 1767 uint32_t i = 0; 1768 1769 if (hwmgr->od_enabled) 1770 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) 1771 &data->odn_dpm_table.vdd_dep_on_mclk; 1772 else 1773 dep_on_mclk = table_info->vdd_dep_on_mclk; 1774 1775 PP_ASSERT_WITH_CODE(dep_on_mclk, 1776 "Invalid SOC_VDD-UCLK Dependency Table!", 1777 return -EINVAL); 1778 1779 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { 1780 mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock; 1781 } else { 1782 for (i = 0; i < dep_on_mclk->count; i++) { 1783 if (dep_on_mclk->entries[i].clk == mem_clock) 1784 break; 1785 } 1786 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i, 1787 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!", 1788 return -EINVAL); 1789 } 1790 1791 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10( 1792 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, ÷rs), 1793 "Failed to get UCLK settings from VBIOS!", 1794 return -1); 1795 1796 *current_mem_vid = 1797 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd)); 1798 *current_mem_soc_vind = 1799 (uint8_t)(dep_on_mclk->entries[i].vddInd); 1800 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult); 1801 current_memclk_level->Did = (uint8_t)(dividers.ulDid); 1802 1803 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1, 1804 "Invalid Divider ID!", 1805 return -EINVAL); 1806 1807 return 0; 1808 } 1809 1810 /** 1811 * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states. 1812 * 1813 * @hwmgr: the address of the hardware manager. 1814 * return: PP_Result_OK on success. 1815 */ 1816 static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) 1817 { 1818 struct vega10_hwmgr *data = hwmgr->backend; 1819 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 1820 struct vega10_single_dpm_table *dpm_table = 1821 &(data->dpm_table.mem_table); 1822 int result = 0; 1823 uint32_t i, j; 1824 1825 for (i = 0; i < dpm_table->count; i++) { 1826 result = vega10_populate_single_memory_level(hwmgr, 1827 dpm_table->dpm_levels[i].value, 1828 &(pp_table->MemVid[i]), 1829 &(pp_table->UclkLevel[i]), 1830 &(pp_table->MemSocVoltageIndex[i])); 1831 if (result) 1832 return result; 1833 } 1834 1835 j = i - 1; 1836 while (i < NUM_UCLK_DPM_LEVELS) { 1837 result = vega10_populate_single_memory_level(hwmgr, 1838 dpm_table->dpm_levels[j].value, 1839 &(pp_table->MemVid[i]), 1840 &(pp_table->UclkLevel[i]), 1841 &(pp_table->MemSocVoltageIndex[i])); 1842 if (result) 1843 return result; 1844 i++; 1845 } 1846 1847 pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels); 1848 pp_table->MemoryChannelWidth = 1849 (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH * 1850 channel_number[data->mem_channels]); 1851 1852 pp_table->LowestUclkReservedForUlv = 1853 (uint8_t)(data->lowest_uclk_reserved_for_ulv); 1854 1855 return result; 1856 } 1857 1858 static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr, 1859 DSPCLK_e disp_clock) 1860 { 1861 struct vega10_hwmgr *data = hwmgr->backend; 1862 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 1863 struct phm_ppt_v2_information *table_info = 1864 (struct phm_ppt_v2_information *) 1865 (hwmgr->pptable); 1866 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; 1867 uint32_t i; 1868 uint16_t clk = 0, vddc = 0; 1869 uint8_t vid = 0; 1870 1871 switch (disp_clock) { 1872 case DSPCLK_DCEFCLK: 1873 dep_table = table_info->vdd_dep_on_dcefclk; 1874 break; 1875 case DSPCLK_DISPCLK: 1876 dep_table = table_info->vdd_dep_on_dispclk; 1877 break; 1878 case DSPCLK_PIXCLK: 1879 dep_table = table_info->vdd_dep_on_pixclk; 1880 break; 1881 case DSPCLK_PHYCLK: 1882 dep_table = table_info->vdd_dep_on_phyclk; 1883 break; 1884 default: 1885 return -1; 1886 } 1887 1888 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS, 1889 "Number Of Entries Exceeded maximum!", 1890 return -1); 1891 1892 for (i = 0; i < dep_table->count; i++) { 1893 clk = (uint16_t)(dep_table->entries[i].clk / 100); 1894 vddc = table_info->vddc_lookup_table-> 1895 entries[dep_table->entries[i].vddInd].us_vdd; 1896 vid = (uint8_t)convert_to_vid(vddc); 1897 pp_table->DisplayClockTable[disp_clock][i].Freq = 1898 cpu_to_le16(clk); 1899 pp_table->DisplayClockTable[disp_clock][i].Vid = 1900 cpu_to_le16(vid); 1901 } 1902 1903 while (i < NUM_DSPCLK_LEVELS) { 1904 pp_table->DisplayClockTable[disp_clock][i].Freq = 1905 cpu_to_le16(clk); 1906 pp_table->DisplayClockTable[disp_clock][i].Vid = 1907 cpu_to_le16(vid); 1908 i++; 1909 } 1910 1911 return 0; 1912 } 1913 1914 static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr) 1915 { 1916 uint32_t i; 1917 1918 for (i = 0; i < DSPCLK_COUNT; i++) { 1919 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i), 1920 "Failed to populate Clock in DisplayClockTable!", 1921 return -1); 1922 } 1923 1924 return 0; 1925 } 1926 1927 static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr, 1928 uint32_t eclock, uint8_t *current_eclk_did, 1929 uint8_t *current_soc_vol) 1930 { 1931 struct phm_ppt_v2_information *table_info = 1932 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1933 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table = 1934 table_info->mm_dep_table; 1935 struct pp_atomfwctrl_clock_dividers_soc15 dividers; 1936 uint32_t i; 1937 1938 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, 1939 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 1940 eclock, ÷rs), 1941 "Failed to get ECLK clock settings from VBIOS!", 1942 return -1); 1943 1944 *current_eclk_did = (uint8_t)dividers.ulDid; 1945 1946 for (i = 0; i < dep_table->count; i++) { 1947 if (dep_table->entries[i].eclk == eclock) 1948 *current_soc_vol = dep_table->entries[i].vddcInd; 1949 } 1950 1951 return 0; 1952 } 1953 1954 static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr) 1955 { 1956 struct vega10_hwmgr *data = hwmgr->backend; 1957 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 1958 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table); 1959 int result = -EINVAL; 1960 uint32_t i, j; 1961 1962 for (i = 0; i < dpm_table->count; i++) { 1963 result = vega10_populate_single_eclock_level(hwmgr, 1964 dpm_table->dpm_levels[i].value, 1965 &(pp_table->EclkDid[i]), 1966 &(pp_table->VceDpmVoltageIndex[i])); 1967 if (result) 1968 return result; 1969 } 1970 1971 j = i - 1; 1972 while (i < NUM_VCE_DPM_LEVELS) { 1973 result = vega10_populate_single_eclock_level(hwmgr, 1974 dpm_table->dpm_levels[j].value, 1975 &(pp_table->EclkDid[i]), 1976 &(pp_table->VceDpmVoltageIndex[i])); 1977 if (result) 1978 return result; 1979 i++; 1980 } 1981 1982 return result; 1983 } 1984 1985 static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr, 1986 uint32_t vclock, uint8_t *current_vclk_did) 1987 { 1988 struct pp_atomfwctrl_clock_dividers_soc15 dividers; 1989 1990 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, 1991 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 1992 vclock, ÷rs), 1993 "Failed to get VCLK clock settings from VBIOS!", 1994 return -EINVAL); 1995 1996 *current_vclk_did = (uint8_t)dividers.ulDid; 1997 1998 return 0; 1999 } 2000 2001 static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr, 2002 uint32_t dclock, uint8_t *current_dclk_did) 2003 { 2004 struct pp_atomfwctrl_clock_dividers_soc15 dividers; 2005 2006 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, 2007 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2008 dclock, ÷rs), 2009 "Failed to get DCLK clock settings from VBIOS!", 2010 return -EINVAL); 2011 2012 *current_dclk_did = (uint8_t)dividers.ulDid; 2013 2014 return 0; 2015 } 2016 2017 static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr) 2018 { 2019 struct vega10_hwmgr *data = hwmgr->backend; 2020 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 2021 struct vega10_single_dpm_table *vclk_dpm_table = 2022 &(data->dpm_table.vclk_table); 2023 struct vega10_single_dpm_table *dclk_dpm_table = 2024 &(data->dpm_table.dclk_table); 2025 struct phm_ppt_v2_information *table_info = 2026 (struct phm_ppt_v2_information *)(hwmgr->pptable); 2027 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table = 2028 table_info->mm_dep_table; 2029 int result = -EINVAL; 2030 uint32_t i, j; 2031 2032 for (i = 0; i < vclk_dpm_table->count; i++) { 2033 result = vega10_populate_single_vclock_level(hwmgr, 2034 vclk_dpm_table->dpm_levels[i].value, 2035 &(pp_table->VclkDid[i])); 2036 if (result) 2037 return result; 2038 } 2039 2040 j = i - 1; 2041 while (i < NUM_UVD_DPM_LEVELS) { 2042 result = vega10_populate_single_vclock_level(hwmgr, 2043 vclk_dpm_table->dpm_levels[j].value, 2044 &(pp_table->VclkDid[i])); 2045 if (result) 2046 return result; 2047 i++; 2048 } 2049 2050 for (i = 0; i < dclk_dpm_table->count; i++) { 2051 result = vega10_populate_single_dclock_level(hwmgr, 2052 dclk_dpm_table->dpm_levels[i].value, 2053 &(pp_table->DclkDid[i])); 2054 if (result) 2055 return result; 2056 } 2057 2058 j = i - 1; 2059 while (i < NUM_UVD_DPM_LEVELS) { 2060 result = vega10_populate_single_dclock_level(hwmgr, 2061 dclk_dpm_table->dpm_levels[j].value, 2062 &(pp_table->DclkDid[i])); 2063 if (result) 2064 return result; 2065 i++; 2066 } 2067 2068 for (i = 0; i < dep_table->count; i++) { 2069 if (dep_table->entries[i].vclk == 2070 vclk_dpm_table->dpm_levels[i].value && 2071 dep_table->entries[i].dclk == 2072 dclk_dpm_table->dpm_levels[i].value) 2073 pp_table->UvdDpmVoltageIndex[i] = 2074 dep_table->entries[i].vddcInd; 2075 else 2076 return -1; 2077 } 2078 2079 j = i - 1; 2080 while (i < NUM_UVD_DPM_LEVELS) { 2081 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd; 2082 i++; 2083 } 2084 2085 return 0; 2086 } 2087 2088 static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr) 2089 { 2090 struct vega10_hwmgr *data = hwmgr->backend; 2091 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 2092 struct phm_ppt_v2_information *table_info = 2093 (struct phm_ppt_v2_information *)(hwmgr->pptable); 2094 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = 2095 table_info->vdd_dep_on_sclk; 2096 uint32_t i; 2097 2098 for (i = 0; i < dep_table->count; i++) { 2099 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable; 2100 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset 2101 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); 2102 } 2103 2104 return 0; 2105 } 2106 2107 static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) 2108 { 2109 struct vega10_hwmgr *data = hwmgr->backend; 2110 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 2111 struct phm_ppt_v2_information *table_info = 2112 (struct phm_ppt_v2_information *)(hwmgr->pptable); 2113 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = 2114 table_info->vdd_dep_on_sclk; 2115 struct pp_atomfwctrl_avfs_parameters avfs_params = {0}; 2116 int result = 0; 2117 uint32_t i; 2118 2119 pp_table->MinVoltageVid = (uint8_t)0xff; 2120 pp_table->MaxVoltageVid = (uint8_t)0; 2121 2122 if (data->smu_features[GNLD_AVFS].supported) { 2123 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params); 2124 if (!result) { 2125 pp_table->MinVoltageVid = (uint8_t) 2126 convert_to_vid((uint16_t)(avfs_params.ulMinVddc)); 2127 pp_table->MaxVoltageVid = (uint8_t) 2128 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc)); 2129 2130 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0); 2131 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1); 2132 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2); 2133 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma); 2134 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean); 2135 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma); 2136 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor); 2137 2138 pp_table->BtcGbVdroopTableCksOff.a0 = 2139 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0); 2140 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20; 2141 pp_table->BtcGbVdroopTableCksOff.a1 = 2142 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1); 2143 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20; 2144 pp_table->BtcGbVdroopTableCksOff.a2 = 2145 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2); 2146 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20; 2147 2148 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson; 2149 pp_table->BtcGbVdroopTableCksOn.a0 = 2150 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0); 2151 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20; 2152 pp_table->BtcGbVdroopTableCksOn.a1 = 2153 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1); 2154 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20; 2155 pp_table->BtcGbVdroopTableCksOn.a2 = 2156 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2); 2157 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20; 2158 2159 pp_table->AvfsGbCksOn.m1 = 2160 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1); 2161 pp_table->AvfsGbCksOn.m2 = 2162 cpu_to_le32(avfs_params.ulGbFuseTableCksonM2); 2163 pp_table->AvfsGbCksOn.b = 2164 cpu_to_le32(avfs_params.ulGbFuseTableCksonB); 2165 pp_table->AvfsGbCksOn.m1_shift = 24; 2166 pp_table->AvfsGbCksOn.m2_shift = 12; 2167 pp_table->AvfsGbCksOn.b_shift = 0; 2168 2169 pp_table->OverrideAvfsGbCksOn = 2170 avfs_params.ucEnableGbFuseTableCkson; 2171 pp_table->AvfsGbCksOff.m1 = 2172 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1); 2173 pp_table->AvfsGbCksOff.m2 = 2174 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2); 2175 pp_table->AvfsGbCksOff.b = 2176 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB); 2177 pp_table->AvfsGbCksOff.m1_shift = 24; 2178 pp_table->AvfsGbCksOff.m2_shift = 12; 2179 pp_table->AvfsGbCksOff.b_shift = 0; 2180 2181 for (i = 0; i < dep_table->count; i++) 2182 pp_table->StaticVoltageOffsetVid[i] = 2183 convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset)); 2184 2185 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != 2186 data->disp_clk_quad_eqn_a) && 2187 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != 2188 data->disp_clk_quad_eqn_b)) { 2189 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 = 2190 (int32_t)data->disp_clk_quad_eqn_a; 2191 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 = 2192 (int32_t)data->disp_clk_quad_eqn_b; 2193 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b = 2194 (int32_t)data->disp_clk_quad_eqn_c; 2195 } else { 2196 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 = 2197 (int32_t)avfs_params.ulDispclk2GfxclkM1; 2198 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 = 2199 (int32_t)avfs_params.ulDispclk2GfxclkM2; 2200 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b = 2201 (int32_t)avfs_params.ulDispclk2GfxclkB; 2202 } 2203 2204 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24; 2205 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12; 2206 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12; 2207 2208 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != 2209 data->dcef_clk_quad_eqn_a) && 2210 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != 2211 data->dcef_clk_quad_eqn_b)) { 2212 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 = 2213 (int32_t)data->dcef_clk_quad_eqn_a; 2214 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 = 2215 (int32_t)data->dcef_clk_quad_eqn_b; 2216 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b = 2217 (int32_t)data->dcef_clk_quad_eqn_c; 2218 } else { 2219 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 = 2220 (int32_t)avfs_params.ulDcefclk2GfxclkM1; 2221 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 = 2222 (int32_t)avfs_params.ulDcefclk2GfxclkM2; 2223 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b = 2224 (int32_t)avfs_params.ulDcefclk2GfxclkB; 2225 } 2226 2227 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24; 2228 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12; 2229 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12; 2230 2231 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != 2232 data->pixel_clk_quad_eqn_a) && 2233 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != 2234 data->pixel_clk_quad_eqn_b)) { 2235 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 = 2236 (int32_t)data->pixel_clk_quad_eqn_a; 2237 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 = 2238 (int32_t)data->pixel_clk_quad_eqn_b; 2239 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b = 2240 (int32_t)data->pixel_clk_quad_eqn_c; 2241 } else { 2242 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 = 2243 (int32_t)avfs_params.ulPixelclk2GfxclkM1; 2244 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 = 2245 (int32_t)avfs_params.ulPixelclk2GfxclkM2; 2246 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b = 2247 (int32_t)avfs_params.ulPixelclk2GfxclkB; 2248 } 2249 2250 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24; 2251 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12; 2252 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12; 2253 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != 2254 data->phy_clk_quad_eqn_a) && 2255 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != 2256 data->phy_clk_quad_eqn_b)) { 2257 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 = 2258 (int32_t)data->phy_clk_quad_eqn_a; 2259 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 = 2260 (int32_t)data->phy_clk_quad_eqn_b; 2261 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b = 2262 (int32_t)data->phy_clk_quad_eqn_c; 2263 } else { 2264 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 = 2265 (int32_t)avfs_params.ulPhyclk2GfxclkM1; 2266 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 = 2267 (int32_t)avfs_params.ulPhyclk2GfxclkM2; 2268 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b = 2269 (int32_t)avfs_params.ulPhyclk2GfxclkB; 2270 } 2271 2272 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24; 2273 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12; 2274 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12; 2275 2276 pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0; 2277 pp_table->AcgBtcGbVdroopTable.a0_shift = 20; 2278 pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1; 2279 pp_table->AcgBtcGbVdroopTable.a1_shift = 20; 2280 pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2; 2281 pp_table->AcgBtcGbVdroopTable.a2_shift = 20; 2282 2283 pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1; 2284 pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2; 2285 pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB; 2286 pp_table->AcgAvfsGb.m1_shift = 24; 2287 pp_table->AcgAvfsGb.m2_shift = 12; 2288 pp_table->AcgAvfsGb.b_shift = 0; 2289 2290 } else { 2291 data->smu_features[GNLD_AVFS].supported = false; 2292 } 2293 } 2294 2295 return 0; 2296 } 2297 2298 static int vega10_acg_enable(struct pp_hwmgr *hwmgr) 2299 { 2300 struct vega10_hwmgr *data = hwmgr->backend; 2301 uint32_t agc_btc_response; 2302 2303 if (data->smu_features[GNLD_ACG].supported) { 2304 if (0 == vega10_enable_smc_features(hwmgr, true, 2305 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap)) 2306 data->smu_features[GNLD_DPM_PREFETCHER].enabled = true; 2307 2308 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL); 2309 2310 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response); 2311 2312 if (1 == agc_btc_response) { 2313 if (1 == data->acg_loop_state) 2314 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop, NULL); 2315 else if (2 == data->acg_loop_state) 2316 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop, NULL); 2317 if (0 == vega10_enable_smc_features(hwmgr, true, 2318 data->smu_features[GNLD_ACG].smu_feature_bitmap)) 2319 data->smu_features[GNLD_ACG].enabled = true; 2320 } else { 2321 pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n"); 2322 data->smu_features[GNLD_ACG].enabled = false; 2323 } 2324 } 2325 2326 return 0; 2327 } 2328 2329 static int vega10_acg_disable(struct pp_hwmgr *hwmgr) 2330 { 2331 struct vega10_hwmgr *data = hwmgr->backend; 2332 2333 if (data->smu_features[GNLD_ACG].supported && 2334 data->smu_features[GNLD_ACG].enabled) 2335 if (!vega10_enable_smc_features(hwmgr, false, 2336 data->smu_features[GNLD_ACG].smu_feature_bitmap)) 2337 data->smu_features[GNLD_ACG].enabled = false; 2338 2339 return 0; 2340 } 2341 2342 static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr) 2343 { 2344 struct vega10_hwmgr *data = hwmgr->backend; 2345 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 2346 struct pp_atomfwctrl_gpio_parameters gpio_params = {0}; 2347 int result; 2348 2349 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params); 2350 if (!result) { 2351 if (PP_CAP(PHM_PlatformCaps_RegulatorHot) && 2352 data->registry_data.regulator_hot_gpio_support) { 2353 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio; 2354 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity; 2355 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio; 2356 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity; 2357 } else { 2358 pp_table->VR0HotGpio = 0; 2359 pp_table->VR0HotPolarity = 0; 2360 pp_table->VR1HotGpio = 0; 2361 pp_table->VR1HotPolarity = 0; 2362 } 2363 2364 if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) && 2365 data->registry_data.ac_dc_switch_gpio_support) { 2366 pp_table->AcDcGpio = gpio_params.ucAcDcGpio; 2367 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity; 2368 } else { 2369 pp_table->AcDcGpio = 0; 2370 pp_table->AcDcPolarity = 0; 2371 } 2372 } 2373 2374 return result; 2375 } 2376 2377 static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable) 2378 { 2379 struct vega10_hwmgr *data = hwmgr->backend; 2380 2381 if (data->smu_features[GNLD_AVFS].supported) { 2382 /* Already enabled or disabled */ 2383 if (!(enable ^ data->smu_features[GNLD_AVFS].enabled)) 2384 return 0; 2385 2386 if (enable) { 2387 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2388 true, 2389 data->smu_features[GNLD_AVFS].smu_feature_bitmap), 2390 "[avfs_control] Attempt to Enable AVFS feature Failed!", 2391 return -1); 2392 data->smu_features[GNLD_AVFS].enabled = true; 2393 } else { 2394 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2395 false, 2396 data->smu_features[GNLD_AVFS].smu_feature_bitmap), 2397 "[avfs_control] Attempt to Disable AVFS feature Failed!", 2398 return -1); 2399 data->smu_features[GNLD_AVFS].enabled = false; 2400 } 2401 } 2402 2403 return 0; 2404 } 2405 2406 static int vega10_update_avfs(struct pp_hwmgr *hwmgr) 2407 { 2408 struct vega10_hwmgr *data = hwmgr->backend; 2409 2410 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { 2411 vega10_avfs_enable(hwmgr, false); 2412 } else if (data->need_update_dpm_table) { 2413 vega10_avfs_enable(hwmgr, false); 2414 vega10_avfs_enable(hwmgr, true); 2415 } else { 2416 vega10_avfs_enable(hwmgr, true); 2417 } 2418 2419 return 0; 2420 } 2421 2422 static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr) 2423 { 2424 int result = 0; 2425 2426 uint64_t serial_number = 0; 2427 uint32_t top32, bottom32; 2428 struct phm_fuses_default fuse; 2429 2430 struct vega10_hwmgr *data = hwmgr->backend; 2431 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table); 2432 2433 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); 2434 2435 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); 2436 2437 serial_number = ((uint64_t)bottom32 << 32) | top32; 2438 2439 if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) { 2440 avfs_fuse_table->VFT0_b = fuse.VFT0_b; 2441 avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1; 2442 avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2; 2443 avfs_fuse_table->VFT1_b = fuse.VFT1_b; 2444 avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1; 2445 avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2; 2446 avfs_fuse_table->VFT2_b = fuse.VFT2_b; 2447 avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1; 2448 avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2; 2449 result = smum_smc_table_manager(hwmgr, (uint8_t *)avfs_fuse_table, 2450 AVFSFUSETABLE, false); 2451 PP_ASSERT_WITH_CODE(!result, 2452 "Failed to upload FuseOVerride!", 2453 ); 2454 } 2455 2456 return result; 2457 } 2458 2459 static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr) 2460 { 2461 struct vega10_hwmgr *data = hwmgr->backend; 2462 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); 2463 struct phm_ppt_v2_information *table_info = hwmgr->pptable; 2464 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; 2465 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; 2466 uint32_t i; 2467 2468 dep_table = table_info->vdd_dep_on_mclk; 2469 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk); 2470 2471 for (i = 0; i < dep_table->count; i++) { 2472 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 2473 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; 2474 return; 2475 } 2476 } 2477 2478 dep_table = table_info->vdd_dep_on_sclk; 2479 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk); 2480 for (i = 0; i < dep_table->count; i++) { 2481 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 2482 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; 2483 return; 2484 } 2485 } 2486 } 2487 2488 /** 2489 * Initializes the SMC table and uploads it 2490 * 2491 * @hwmgr: the address of the powerplay hardware manager. 2492 * return: always 0 2493 */ 2494 static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) 2495 { 2496 int result; 2497 struct vega10_hwmgr *data = hwmgr->backend; 2498 struct phm_ppt_v2_information *table_info = 2499 (struct phm_ppt_v2_information *)(hwmgr->pptable); 2500 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 2501 struct pp_atomfwctrl_voltage_table voltage_table; 2502 struct pp_atomfwctrl_bios_boot_up_values boot_up_values; 2503 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); 2504 2505 result = vega10_setup_default_dpm_tables(hwmgr); 2506 PP_ASSERT_WITH_CODE(!result, 2507 "Failed to setup default DPM tables!", 2508 return result); 2509 2510 if (!hwmgr->not_vf) 2511 return 0; 2512 2513 /* initialize ODN table */ 2514 if (hwmgr->od_enabled) { 2515 if (odn_table->max_vddc) { 2516 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; 2517 vega10_check_dpm_table_updated(hwmgr); 2518 } else { 2519 vega10_odn_initial_default_setting(hwmgr); 2520 } 2521 } 2522 2523 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC, 2524 VOLTAGE_OBJ_SVID2, &voltage_table); 2525 pp_table->MaxVidStep = voltage_table.max_vid_step; 2526 2527 pp_table->GfxDpmVoltageMode = 2528 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode); 2529 pp_table->SocDpmVoltageMode = 2530 (uint8_t)(table_info->uc_soc_dpm_voltage_mode); 2531 pp_table->UclkDpmVoltageMode = 2532 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode); 2533 pp_table->UvdDpmVoltageMode = 2534 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode); 2535 pp_table->VceDpmVoltageMode = 2536 (uint8_t)(table_info->uc_vce_dpm_voltage_mode); 2537 pp_table->Mp0DpmVoltageMode = 2538 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode); 2539 2540 pp_table->DisplayDpmVoltageMode = 2541 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode); 2542 2543 data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable; 2544 data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable; 2545 2546 if (data->registry_data.ulv_support && 2547 table_info->us_ulv_voltage_offset) { 2548 result = vega10_populate_ulv_state(hwmgr); 2549 PP_ASSERT_WITH_CODE(!result, 2550 "Failed to initialize ULV state!", 2551 return result); 2552 } 2553 2554 result = vega10_populate_smc_link_levels(hwmgr); 2555 PP_ASSERT_WITH_CODE(!result, 2556 "Failed to initialize Link Level!", 2557 return result); 2558 2559 result = vega10_populate_all_graphic_levels(hwmgr); 2560 PP_ASSERT_WITH_CODE(!result, 2561 "Failed to initialize Graphics Level!", 2562 return result); 2563 2564 result = vega10_populate_all_memory_levels(hwmgr); 2565 PP_ASSERT_WITH_CODE(!result, 2566 "Failed to initialize Memory Level!", 2567 return result); 2568 2569 vega10_populate_vddc_soc_levels(hwmgr); 2570 2571 result = vega10_populate_all_display_clock_levels(hwmgr); 2572 PP_ASSERT_WITH_CODE(!result, 2573 "Failed to initialize Display Level!", 2574 return result); 2575 2576 result = vega10_populate_smc_vce_levels(hwmgr); 2577 PP_ASSERT_WITH_CODE(!result, 2578 "Failed to initialize VCE Level!", 2579 return result); 2580 2581 result = vega10_populate_smc_uvd_levels(hwmgr); 2582 PP_ASSERT_WITH_CODE(!result, 2583 "Failed to initialize UVD Level!", 2584 return result); 2585 2586 if (data->registry_data.clock_stretcher_support) { 2587 result = vega10_populate_clock_stretcher_table(hwmgr); 2588 PP_ASSERT_WITH_CODE(!result, 2589 "Failed to populate Clock Stretcher Table!", 2590 return result); 2591 } 2592 2593 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values); 2594 if (!result) { 2595 data->vbios_boot_state.vddc = boot_up_values.usVddc; 2596 data->vbios_boot_state.vddci = boot_up_values.usVddci; 2597 data->vbios_boot_state.mvddc = boot_up_values.usMvddc; 2598 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; 2599 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; 2600 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, 2601 SMU9_SYSPLL0_SOCCLK_ID, 0, &boot_up_values.ulSocClk); 2602 2603 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, 2604 SMU9_SYSPLL0_DCEFCLK_ID, 0, &boot_up_values.ulDCEFClk); 2605 2606 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; 2607 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; 2608 if (0 != boot_up_values.usVddc) { 2609 smum_send_msg_to_smc_with_parameter(hwmgr, 2610 PPSMC_MSG_SetFloorSocVoltage, 2611 (boot_up_values.usVddc * 4), 2612 NULL); 2613 data->vbios_boot_state.bsoc_vddc_lock = true; 2614 } else { 2615 data->vbios_boot_state.bsoc_vddc_lock = false; 2616 } 2617 smum_send_msg_to_smc_with_parameter(hwmgr, 2618 PPSMC_MSG_SetMinDeepSleepDcefclk, 2619 (uint32_t)(data->vbios_boot_state.dcef_clock / 100), 2620 NULL); 2621 } 2622 2623 result = vega10_populate_avfs_parameters(hwmgr); 2624 PP_ASSERT_WITH_CODE(!result, 2625 "Failed to initialize AVFS Parameters!", 2626 return result); 2627 2628 result = vega10_populate_gpio_parameters(hwmgr); 2629 PP_ASSERT_WITH_CODE(!result, 2630 "Failed to initialize GPIO Parameters!", 2631 return result); 2632 2633 pp_table->GfxclkAverageAlpha = (uint8_t) 2634 (data->gfxclk_average_alpha); 2635 pp_table->SocclkAverageAlpha = (uint8_t) 2636 (data->socclk_average_alpha); 2637 pp_table->UclkAverageAlpha = (uint8_t) 2638 (data->uclk_average_alpha); 2639 pp_table->GfxActivityAverageAlpha = (uint8_t) 2640 (data->gfx_activity_average_alpha); 2641 2642 vega10_populate_and_upload_avfs_fuse_override(hwmgr); 2643 2644 result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false); 2645 2646 PP_ASSERT_WITH_CODE(!result, 2647 "Failed to upload PPtable!", return result); 2648 2649 result = vega10_avfs_enable(hwmgr, true); 2650 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!", 2651 return result); 2652 vega10_acg_enable(hwmgr); 2653 2654 return 0; 2655 } 2656 2657 static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr) 2658 { 2659 struct vega10_hwmgr *data = hwmgr->backend; 2660 2661 if (data->smu_features[GNLD_THERMAL].supported) { 2662 if (data->smu_features[GNLD_THERMAL].enabled) 2663 pr_info("THERMAL Feature Already enabled!"); 2664 2665 PP_ASSERT_WITH_CODE( 2666 !vega10_enable_smc_features(hwmgr, 2667 true, 2668 data->smu_features[GNLD_THERMAL].smu_feature_bitmap), 2669 "Enable THERMAL Feature Failed!", 2670 return -1); 2671 data->smu_features[GNLD_THERMAL].enabled = true; 2672 } 2673 2674 return 0; 2675 } 2676 2677 static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr) 2678 { 2679 struct vega10_hwmgr *data = hwmgr->backend; 2680 2681 if (data->smu_features[GNLD_THERMAL].supported) { 2682 if (!data->smu_features[GNLD_THERMAL].enabled) 2683 pr_info("THERMAL Feature Already disabled!"); 2684 2685 PP_ASSERT_WITH_CODE( 2686 !vega10_enable_smc_features(hwmgr, 2687 false, 2688 data->smu_features[GNLD_THERMAL].smu_feature_bitmap), 2689 "disable THERMAL Feature Failed!", 2690 return -1); 2691 data->smu_features[GNLD_THERMAL].enabled = false; 2692 } 2693 2694 return 0; 2695 } 2696 2697 static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr) 2698 { 2699 struct vega10_hwmgr *data = hwmgr->backend; 2700 2701 if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) { 2702 if (data->smu_features[GNLD_VR0HOT].supported) { 2703 PP_ASSERT_WITH_CODE( 2704 !vega10_enable_smc_features(hwmgr, 2705 true, 2706 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap), 2707 "Attempt to Enable VR0 Hot feature Failed!", 2708 return -1); 2709 data->smu_features[GNLD_VR0HOT].enabled = true; 2710 } else { 2711 if (data->smu_features[GNLD_VR1HOT].supported) { 2712 PP_ASSERT_WITH_CODE( 2713 !vega10_enable_smc_features(hwmgr, 2714 true, 2715 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap), 2716 "Attempt to Enable VR0 Hot feature Failed!", 2717 return -1); 2718 data->smu_features[GNLD_VR1HOT].enabled = true; 2719 } 2720 } 2721 } 2722 return 0; 2723 } 2724 2725 static int vega10_enable_ulv(struct pp_hwmgr *hwmgr) 2726 { 2727 struct vega10_hwmgr *data = hwmgr->backend; 2728 2729 if (data->registry_data.ulv_support) { 2730 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2731 true, data->smu_features[GNLD_ULV].smu_feature_bitmap), 2732 "Enable ULV Feature Failed!", 2733 return -1); 2734 data->smu_features[GNLD_ULV].enabled = true; 2735 } 2736 2737 return 0; 2738 } 2739 2740 static int vega10_disable_ulv(struct pp_hwmgr *hwmgr) 2741 { 2742 struct vega10_hwmgr *data = hwmgr->backend; 2743 2744 if (data->registry_data.ulv_support) { 2745 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2746 false, data->smu_features[GNLD_ULV].smu_feature_bitmap), 2747 "disable ULV Feature Failed!", 2748 return -EINVAL); 2749 data->smu_features[GNLD_ULV].enabled = false; 2750 } 2751 2752 return 0; 2753 } 2754 2755 static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) 2756 { 2757 struct vega10_hwmgr *data = hwmgr->backend; 2758 2759 if (data->smu_features[GNLD_DS_GFXCLK].supported) { 2760 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2761 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap), 2762 "Attempt to Enable DS_GFXCLK Feature Failed!", 2763 return -EINVAL); 2764 data->smu_features[GNLD_DS_GFXCLK].enabled = true; 2765 } 2766 2767 if (data->smu_features[GNLD_DS_SOCCLK].supported) { 2768 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2769 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap), 2770 "Attempt to Enable DS_SOCCLK Feature Failed!", 2771 return -EINVAL); 2772 data->smu_features[GNLD_DS_SOCCLK].enabled = true; 2773 } 2774 2775 if (data->smu_features[GNLD_DS_LCLK].supported) { 2776 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2777 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap), 2778 "Attempt to Enable DS_LCLK Feature Failed!", 2779 return -EINVAL); 2780 data->smu_features[GNLD_DS_LCLK].enabled = true; 2781 } 2782 2783 if (data->smu_features[GNLD_DS_DCEFCLK].supported) { 2784 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2785 true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap), 2786 "Attempt to Enable DS_DCEFCLK Feature Failed!", 2787 return -EINVAL); 2788 data->smu_features[GNLD_DS_DCEFCLK].enabled = true; 2789 } 2790 2791 return 0; 2792 } 2793 2794 static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) 2795 { 2796 struct vega10_hwmgr *data = hwmgr->backend; 2797 2798 if (data->smu_features[GNLD_DS_GFXCLK].supported) { 2799 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2800 false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap), 2801 "Attempt to disable DS_GFXCLK Feature Failed!", 2802 return -EINVAL); 2803 data->smu_features[GNLD_DS_GFXCLK].enabled = false; 2804 } 2805 2806 if (data->smu_features[GNLD_DS_SOCCLK].supported) { 2807 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2808 false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap), 2809 "Attempt to disable DS_ Feature Failed!", 2810 return -EINVAL); 2811 data->smu_features[GNLD_DS_SOCCLK].enabled = false; 2812 } 2813 2814 if (data->smu_features[GNLD_DS_LCLK].supported) { 2815 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2816 false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap), 2817 "Attempt to disable DS_LCLK Feature Failed!", 2818 return -EINVAL); 2819 data->smu_features[GNLD_DS_LCLK].enabled = false; 2820 } 2821 2822 if (data->smu_features[GNLD_DS_DCEFCLK].supported) { 2823 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2824 false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap), 2825 "Attempt to disable DS_DCEFCLK Feature Failed!", 2826 return -EINVAL); 2827 data->smu_features[GNLD_DS_DCEFCLK].enabled = false; 2828 } 2829 2830 return 0; 2831 } 2832 2833 static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) 2834 { 2835 struct vega10_hwmgr *data = hwmgr->backend; 2836 uint32_t i, feature_mask = 0; 2837 2838 if (!hwmgr->not_vf) 2839 return 0; 2840 2841 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){ 2842 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2843 false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap), 2844 "Attempt to disable LED DPM feature failed!", return -EINVAL); 2845 data->smu_features[GNLD_LED_DISPLAY].enabled = false; 2846 } 2847 2848 for (i = 0; i < GNLD_DPM_MAX; i++) { 2849 if (data->smu_features[i].smu_feature_bitmap & bitmap) { 2850 if (data->smu_features[i].supported) { 2851 if (data->smu_features[i].enabled) { 2852 feature_mask |= data->smu_features[i]. 2853 smu_feature_bitmap; 2854 data->smu_features[i].enabled = false; 2855 } 2856 } 2857 } 2858 } 2859 2860 vega10_enable_smc_features(hwmgr, false, feature_mask); 2861 2862 return 0; 2863 } 2864 2865 /** 2866 * Tell SMC to enabled the supported DPMs. 2867 * 2868 * @hwmgr: the address of the powerplay hardware manager. 2869 * @bitmap: bitmap for the features to enabled. 2870 * return: 0 on at least one DPM is successfully enabled. 2871 */ 2872 static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) 2873 { 2874 struct vega10_hwmgr *data = hwmgr->backend; 2875 uint32_t i, feature_mask = 0; 2876 2877 for (i = 0; i < GNLD_DPM_MAX; i++) { 2878 if (data->smu_features[i].smu_feature_bitmap & bitmap) { 2879 if (data->smu_features[i].supported) { 2880 if (!data->smu_features[i].enabled) { 2881 feature_mask |= data->smu_features[i]. 2882 smu_feature_bitmap; 2883 data->smu_features[i].enabled = true; 2884 } 2885 } 2886 } 2887 } 2888 2889 if (vega10_enable_smc_features(hwmgr, 2890 true, feature_mask)) { 2891 for (i = 0; i < GNLD_DPM_MAX; i++) { 2892 if (data->smu_features[i].smu_feature_bitmap & 2893 feature_mask) 2894 data->smu_features[i].enabled = false; 2895 } 2896 } 2897 2898 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){ 2899 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2900 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap), 2901 "Attempt to Enable LED DPM feature Failed!", return -EINVAL); 2902 data->smu_features[GNLD_LED_DISPLAY].enabled = true; 2903 } 2904 2905 if (data->vbios_boot_state.bsoc_vddc_lock) { 2906 smum_send_msg_to_smc_with_parameter(hwmgr, 2907 PPSMC_MSG_SetFloorSocVoltage, 0, 2908 NULL); 2909 data->vbios_boot_state.bsoc_vddc_lock = false; 2910 } 2911 2912 if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) { 2913 if (data->smu_features[GNLD_ACDC].supported) { 2914 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2915 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap), 2916 "Attempt to Enable DS_GFXCLK Feature Failed!", 2917 return -1); 2918 data->smu_features[GNLD_ACDC].enabled = true; 2919 } 2920 } 2921 2922 return 0; 2923 } 2924 2925 static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable) 2926 { 2927 struct vega10_hwmgr *data = hwmgr->backend; 2928 2929 if (data->smu_features[GNLD_PCC_LIMIT].supported) { 2930 if (enable == data->smu_features[GNLD_PCC_LIMIT].enabled) 2931 pr_info("GNLD_PCC_LIMIT has been %s \n", enable ? "enabled" : "disabled"); 2932 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2933 enable, data->smu_features[GNLD_PCC_LIMIT].smu_feature_bitmap), 2934 "Attempt to Enable PCC Limit feature Failed!", 2935 return -EINVAL); 2936 data->smu_features[GNLD_PCC_LIMIT].enabled = enable; 2937 } 2938 2939 return 0; 2940 } 2941 2942 static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 2943 { 2944 struct vega10_hwmgr *data = hwmgr->backend; 2945 int tmp_result, result = 0; 2946 2947 if (hwmgr->not_vf) { 2948 vega10_enable_disable_PCC_limit_feature(hwmgr, true); 2949 2950 smum_send_msg_to_smc_with_parameter(hwmgr, 2951 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry, 2952 NULL); 2953 2954 tmp_result = vega10_construct_voltage_tables(hwmgr); 2955 PP_ASSERT_WITH_CODE(!tmp_result, 2956 "Failed to construct voltage tables!", 2957 result = tmp_result); 2958 } 2959 2960 if (hwmgr->not_vf || hwmgr->pp_one_vf) { 2961 tmp_result = vega10_init_smc_table(hwmgr); 2962 PP_ASSERT_WITH_CODE(!tmp_result, 2963 "Failed to initialize SMC table!", 2964 result = tmp_result); 2965 } 2966 2967 if (hwmgr->not_vf) { 2968 if (PP_CAP(PHM_PlatformCaps_ThermalController)) { 2969 tmp_result = vega10_enable_thermal_protection(hwmgr); 2970 PP_ASSERT_WITH_CODE(!tmp_result, 2971 "Failed to enable thermal protection!", 2972 result = tmp_result); 2973 } 2974 2975 tmp_result = vega10_enable_vrhot_feature(hwmgr); 2976 PP_ASSERT_WITH_CODE(!tmp_result, 2977 "Failed to enable VR hot feature!", 2978 result = tmp_result); 2979 2980 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr); 2981 PP_ASSERT_WITH_CODE(!tmp_result, 2982 "Failed to enable deep sleep master switch!", 2983 result = tmp_result); 2984 } 2985 2986 if (hwmgr->not_vf) { 2987 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES); 2988 PP_ASSERT_WITH_CODE(!tmp_result, 2989 "Failed to start DPM!", result = tmp_result); 2990 } 2991 2992 if (hwmgr->not_vf) { 2993 /* enable didt, do not abort if failed didt */ 2994 tmp_result = vega10_enable_didt_config(hwmgr); 2995 PP_ASSERT(!tmp_result, 2996 "Failed to enable didt config!"); 2997 } 2998 2999 tmp_result = vega10_enable_power_containment(hwmgr); 3000 PP_ASSERT_WITH_CODE(!tmp_result, 3001 "Failed to enable power containment!", 3002 result = tmp_result); 3003 3004 if (hwmgr->not_vf) { 3005 tmp_result = vega10_power_control_set_level(hwmgr); 3006 PP_ASSERT_WITH_CODE(!tmp_result, 3007 "Failed to power control set level!", 3008 result = tmp_result); 3009 3010 tmp_result = vega10_enable_ulv(hwmgr); 3011 PP_ASSERT_WITH_CODE(!tmp_result, 3012 "Failed to enable ULV!", 3013 result = tmp_result); 3014 } 3015 3016 return result; 3017 } 3018 3019 static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr) 3020 { 3021 return sizeof(struct vega10_power_state); 3022 } 3023 3024 static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, 3025 void *state, struct pp_power_state *power_state, 3026 void *pp_table, uint32_t classification_flag) 3027 { 3028 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2; 3029 struct vega10_power_state *vega10_power_state = 3030 cast_phw_vega10_power_state(&(power_state->hardware)); 3031 struct vega10_performance_level *performance_level; 3032 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state; 3033 ATOM_Vega10_POWERPLAYTABLE *powerplay_table = 3034 (ATOM_Vega10_POWERPLAYTABLE *)pp_table; 3035 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table = 3036 (ATOM_Vega10_SOCCLK_Dependency_Table *) 3037 (((unsigned long)powerplay_table) + 3038 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset)); 3039 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table = 3040 (ATOM_Vega10_GFXCLK_Dependency_Table *) 3041 (((unsigned long)powerplay_table) + 3042 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset)); 3043 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table = 3044 (ATOM_Vega10_MCLK_Dependency_Table *) 3045 (((unsigned long)powerplay_table) + 3046 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); 3047 3048 3049 /* The following fields are not initialized here: 3050 * id orderedList allStatesList 3051 */ 3052 power_state->classification.ui_label = 3053 (le16_to_cpu(state_entry->usClassification) & 3054 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> 3055 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; 3056 power_state->classification.flags = classification_flag; 3057 /* NOTE: There is a classification2 flag in BIOS 3058 * that is not being used right now 3059 */ 3060 power_state->classification.temporary_state = false; 3061 power_state->classification.to_be_deleted = false; 3062 3063 power_state->validation.disallowOnDC = 3064 ((le32_to_cpu(state_entry->ulCapsAndSettings) & 3065 ATOM_Vega10_DISALLOW_ON_DC) != 0); 3066 3067 power_state->display.disableFrameModulation = false; 3068 power_state->display.limitRefreshrate = false; 3069 power_state->display.enableVariBright = 3070 ((le32_to_cpu(state_entry->ulCapsAndSettings) & 3071 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0); 3072 3073 power_state->validation.supportedPowerLevels = 0; 3074 power_state->uvd_clocks.VCLK = 0; 3075 power_state->uvd_clocks.DCLK = 0; 3076 power_state->temperatures.min = 0; 3077 power_state->temperatures.max = 0; 3078 3079 performance_level = &(vega10_power_state->performance_levels 3080 [vega10_power_state->performance_level_count++]); 3081 3082 PP_ASSERT_WITH_CODE( 3083 (vega10_power_state->performance_level_count < 3084 NUM_GFXCLK_DPM_LEVELS), 3085 "Performance levels exceeds SMC limit!", 3086 return -1); 3087 3088 PP_ASSERT_WITH_CODE( 3089 (vega10_power_state->performance_level_count <= 3090 hwmgr->platform_descriptor. 3091 hardwareActivityPerformanceLevels), 3092 "Performance levels exceeds Driver limit!", 3093 return -1); 3094 3095 /* Performance levels are arranged from low to high. */ 3096 performance_level->soc_clock = socclk_dep_table->entries 3097 [state_entry->ucSocClockIndexLow].ulClk; 3098 performance_level->gfx_clock = gfxclk_dep_table->entries 3099 [state_entry->ucGfxClockIndexLow].ulClk; 3100 performance_level->mem_clock = mclk_dep_table->entries 3101 [state_entry->ucMemClockIndexLow].ulMemClk; 3102 3103 performance_level = &(vega10_power_state->performance_levels 3104 [vega10_power_state->performance_level_count++]); 3105 performance_level->soc_clock = socclk_dep_table->entries 3106 [state_entry->ucSocClockIndexHigh].ulClk; 3107 if (gfxclk_dep_table->ucRevId == 0) { 3108 /* under vega10 pp one vf mode, the gfx clk dpm need be lower 3109 * to level-4 due to the limited 110w-power 3110 */ 3111 if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0)) 3112 performance_level->gfx_clock = 3113 gfxclk_dep_table->entries[4].ulClk; 3114 else 3115 performance_level->gfx_clock = gfxclk_dep_table->entries 3116 [state_entry->ucGfxClockIndexHigh].ulClk; 3117 } else if (gfxclk_dep_table->ucRevId == 1) { 3118 patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; 3119 if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0)) 3120 performance_level->gfx_clock = patom_record_V2[4].ulClk; 3121 else 3122 performance_level->gfx_clock = 3123 patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk; 3124 } 3125 3126 performance_level->mem_clock = mclk_dep_table->entries 3127 [state_entry->ucMemClockIndexHigh].ulMemClk; 3128 return 0; 3129 } 3130 3131 static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr, 3132 unsigned long entry_index, struct pp_power_state *state) 3133 { 3134 int result; 3135 struct vega10_power_state *ps; 3136 3137 state->hardware.magic = PhwVega10_Magic; 3138 3139 ps = cast_phw_vega10_power_state(&state->hardware); 3140 3141 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state, 3142 vega10_get_pp_table_entry_callback_func); 3143 if (result) 3144 return result; 3145 3146 /* 3147 * This is the earliest time we have all the dependency table 3148 * and the VBIOS boot state 3149 */ 3150 /* set DC compatible flag if this state supports DC */ 3151 if (!state->validation.disallowOnDC) 3152 ps->dc_compatible = true; 3153 3154 ps->uvd_clks.vclk = state->uvd_clocks.VCLK; 3155 ps->uvd_clks.dclk = state->uvd_clocks.DCLK; 3156 3157 return 0; 3158 } 3159 3160 static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr, 3161 struct pp_hw_power_state *hw_ps) 3162 { 3163 return 0; 3164 } 3165 3166 static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 3167 struct pp_power_state *request_ps, 3168 const struct pp_power_state *current_ps) 3169 { 3170 struct amdgpu_device *adev = hwmgr->adev; 3171 struct vega10_power_state *vega10_ps = 3172 cast_phw_vega10_power_state(&request_ps->hardware); 3173 uint32_t sclk; 3174 uint32_t mclk; 3175 struct PP_Clocks minimum_clocks = {0}; 3176 bool disable_mclk_switching; 3177 bool disable_mclk_switching_for_frame_lock; 3178 bool disable_mclk_switching_for_vr; 3179 bool force_mclk_high; 3180 const struct phm_clock_and_voltage_limits *max_limits; 3181 uint32_t i; 3182 struct vega10_hwmgr *data = hwmgr->backend; 3183 struct phm_ppt_v2_information *table_info = 3184 (struct phm_ppt_v2_information *)(hwmgr->pptable); 3185 int32_t count; 3186 uint32_t stable_pstate_sclk_dpm_percentage; 3187 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; 3188 uint32_t latency; 3189 3190 data->battery_state = (PP_StateUILabel_Battery == 3191 request_ps->classification.ui_label); 3192 3193 if (vega10_ps->performance_level_count != 2) 3194 pr_info("VI should always have 2 performance levels"); 3195 3196 max_limits = adev->pm.ac_power ? 3197 &(hwmgr->dyn_state.max_clock_voltage_on_ac) : 3198 &(hwmgr->dyn_state.max_clock_voltage_on_dc); 3199 3200 /* Cap clock DPM tables at DC MAX if it is in DC. */ 3201 if (!adev->pm.ac_power) { 3202 for (i = 0; i < vega10_ps->performance_level_count; i++) { 3203 if (vega10_ps->performance_levels[i].mem_clock > 3204 max_limits->mclk) 3205 vega10_ps->performance_levels[i].mem_clock = 3206 max_limits->mclk; 3207 if (vega10_ps->performance_levels[i].gfx_clock > 3208 max_limits->sclk) 3209 vega10_ps->performance_levels[i].gfx_clock = 3210 max_limits->sclk; 3211 } 3212 } 3213 3214 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ 3215 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock; 3216 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; 3217 3218 if (PP_CAP(PHM_PlatformCaps_StablePState)) { 3219 stable_pstate_sclk_dpm_percentage = 3220 data->registry_data.stable_pstate_sclk_dpm_percentage; 3221 PP_ASSERT_WITH_CODE( 3222 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 && 3223 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100, 3224 "percent sclk value must range from 1% to 100%, setting default value", 3225 stable_pstate_sclk_dpm_percentage = 75); 3226 3227 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); 3228 stable_pstate_sclk = (max_limits->sclk * 3229 stable_pstate_sclk_dpm_percentage) / 100; 3230 3231 for (count = table_info->vdd_dep_on_sclk->count - 1; 3232 count >= 0; count--) { 3233 if (stable_pstate_sclk >= 3234 table_info->vdd_dep_on_sclk->entries[count].clk) { 3235 stable_pstate_sclk = 3236 table_info->vdd_dep_on_sclk->entries[count].clk; 3237 break; 3238 } 3239 } 3240 3241 if (count < 0) 3242 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; 3243 3244 stable_pstate_mclk = max_limits->mclk; 3245 3246 minimum_clocks.engineClock = stable_pstate_sclk; 3247 minimum_clocks.memoryClock = stable_pstate_mclk; 3248 } 3249 3250 disable_mclk_switching_for_frame_lock = 3251 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); 3252 disable_mclk_switching_for_vr = 3253 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR); 3254 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh); 3255 3256 if (hwmgr->display_config->num_display == 0) 3257 disable_mclk_switching = false; 3258 else 3259 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && 3260 !hwmgr->display_config->multi_monitor_in_sync) || 3261 disable_mclk_switching_for_frame_lock || 3262 disable_mclk_switching_for_vr || 3263 force_mclk_high; 3264 3265 sclk = vega10_ps->performance_levels[0].gfx_clock; 3266 mclk = vega10_ps->performance_levels[0].mem_clock; 3267 3268 if (sclk < minimum_clocks.engineClock) 3269 sclk = (minimum_clocks.engineClock > max_limits->sclk) ? 3270 max_limits->sclk : minimum_clocks.engineClock; 3271 3272 if (mclk < minimum_clocks.memoryClock) 3273 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? 3274 max_limits->mclk : minimum_clocks.memoryClock; 3275 3276 vega10_ps->performance_levels[0].gfx_clock = sclk; 3277 vega10_ps->performance_levels[0].mem_clock = mclk; 3278 3279 if (vega10_ps->performance_levels[1].gfx_clock < 3280 vega10_ps->performance_levels[0].gfx_clock) 3281 vega10_ps->performance_levels[0].gfx_clock = 3282 vega10_ps->performance_levels[1].gfx_clock; 3283 3284 if (disable_mclk_switching) { 3285 /* Set Mclk the max of level 0 and level 1 */ 3286 if (mclk < vega10_ps->performance_levels[1].mem_clock) 3287 mclk = vega10_ps->performance_levels[1].mem_clock; 3288 3289 /* Find the lowest MCLK frequency that is within 3290 * the tolerable latency defined in DAL 3291 */ 3292 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; 3293 for (i = 0; i < data->mclk_latency_table.count; i++) { 3294 if ((data->mclk_latency_table.entries[i].latency <= latency) && 3295 (data->mclk_latency_table.entries[i].frequency >= 3296 vega10_ps->performance_levels[0].mem_clock) && 3297 (data->mclk_latency_table.entries[i].frequency <= 3298 vega10_ps->performance_levels[1].mem_clock)) 3299 mclk = data->mclk_latency_table.entries[i].frequency; 3300 } 3301 vega10_ps->performance_levels[0].mem_clock = mclk; 3302 } else { 3303 if (vega10_ps->performance_levels[1].mem_clock < 3304 vega10_ps->performance_levels[0].mem_clock) 3305 vega10_ps->performance_levels[0].mem_clock = 3306 vega10_ps->performance_levels[1].mem_clock; 3307 } 3308 3309 if (PP_CAP(PHM_PlatformCaps_StablePState)) { 3310 for (i = 0; i < vega10_ps->performance_level_count; i++) { 3311 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk; 3312 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk; 3313 } 3314 } 3315 3316 return 0; 3317 } 3318 3319 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) 3320 { 3321 struct vega10_hwmgr *data = hwmgr->backend; 3322 const struct phm_set_power_state_input *states = 3323 (const struct phm_set_power_state_input *)input; 3324 const struct vega10_power_state *vega10_ps = 3325 cast_const_phw_vega10_power_state(states->pnew_state); 3326 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); 3327 uint32_t sclk = vega10_ps->performance_levels 3328 [vega10_ps->performance_level_count - 1].gfx_clock; 3329 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); 3330 uint32_t mclk = vega10_ps->performance_levels 3331 [vega10_ps->performance_level_count - 1].mem_clock; 3332 uint32_t i; 3333 3334 for (i = 0; i < sclk_table->count; i++) { 3335 if (sclk == sclk_table->dpm_levels[i].value) 3336 break; 3337 } 3338 3339 if (i >= sclk_table->count) { 3340 if (sclk > sclk_table->dpm_levels[i-1].value) { 3341 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 3342 sclk_table->dpm_levels[i-1].value = sclk; 3343 } 3344 } 3345 3346 for (i = 0; i < mclk_table->count; i++) { 3347 if (mclk == mclk_table->dpm_levels[i].value) 3348 break; 3349 } 3350 3351 if (i >= mclk_table->count) { 3352 if (mclk > mclk_table->dpm_levels[i-1].value) { 3353 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 3354 mclk_table->dpm_levels[i-1].value = mclk; 3355 } 3356 } 3357 3358 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) 3359 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; 3360 3361 return 0; 3362 } 3363 3364 static int vega10_populate_and_upload_sclk_mclk_dpm_levels( 3365 struct pp_hwmgr *hwmgr, const void *input) 3366 { 3367 int result = 0; 3368 struct vega10_hwmgr *data = hwmgr->backend; 3369 struct vega10_dpm_table *dpm_table = &data->dpm_table; 3370 struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table; 3371 struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk; 3372 int count; 3373 3374 if (!data->need_update_dpm_table) 3375 return 0; 3376 3377 if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { 3378 for (count = 0; count < dpm_table->gfx_table.count; count++) 3379 dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk; 3380 } 3381 3382 odn_clk_table = &odn_table->vdd_dep_on_mclk; 3383 if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { 3384 for (count = 0; count < dpm_table->mem_table.count; count++) 3385 dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk; 3386 } 3387 3388 if (data->need_update_dpm_table & 3389 (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK | DPMTABLE_UPDATE_SOCCLK)) { 3390 result = vega10_populate_all_graphic_levels(hwmgr); 3391 PP_ASSERT_WITH_CODE((0 == result), 3392 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", 3393 return result); 3394 } 3395 3396 if (data->need_update_dpm_table & 3397 (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { 3398 result = vega10_populate_all_memory_levels(hwmgr); 3399 PP_ASSERT_WITH_CODE((0 == result), 3400 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", 3401 return result); 3402 } 3403 3404 vega10_populate_vddc_soc_levels(hwmgr); 3405 3406 return result; 3407 } 3408 3409 static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr, 3410 struct vega10_single_dpm_table *dpm_table, 3411 uint32_t low_limit, uint32_t high_limit) 3412 { 3413 uint32_t i; 3414 3415 for (i = 0; i < dpm_table->count; i++) { 3416 if ((dpm_table->dpm_levels[i].value < low_limit) || 3417 (dpm_table->dpm_levels[i].value > high_limit)) 3418 dpm_table->dpm_levels[i].enabled = false; 3419 else 3420 dpm_table->dpm_levels[i].enabled = true; 3421 } 3422 return 0; 3423 } 3424 3425 static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr, 3426 struct vega10_single_dpm_table *dpm_table, 3427 uint32_t low_limit, uint32_t high_limit, 3428 uint32_t disable_dpm_mask) 3429 { 3430 uint32_t i; 3431 3432 for (i = 0; i < dpm_table->count; i++) { 3433 if ((dpm_table->dpm_levels[i].value < low_limit) || 3434 (dpm_table->dpm_levels[i].value > high_limit)) 3435 dpm_table->dpm_levels[i].enabled = false; 3436 else if (!((1 << i) & disable_dpm_mask)) 3437 dpm_table->dpm_levels[i].enabled = false; 3438 else 3439 dpm_table->dpm_levels[i].enabled = true; 3440 } 3441 return 0; 3442 } 3443 3444 static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr, 3445 const struct vega10_power_state *vega10_ps) 3446 { 3447 struct vega10_hwmgr *data = hwmgr->backend; 3448 uint32_t high_limit_count; 3449 3450 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1), 3451 "power state did not have any performance level", 3452 return -1); 3453 3454 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1; 3455 3456 vega10_trim_single_dpm_states(hwmgr, 3457 &(data->dpm_table.soc_table), 3458 vega10_ps->performance_levels[0].soc_clock, 3459 vega10_ps->performance_levels[high_limit_count].soc_clock); 3460 3461 vega10_trim_single_dpm_states_with_mask(hwmgr, 3462 &(data->dpm_table.gfx_table), 3463 vega10_ps->performance_levels[0].gfx_clock, 3464 vega10_ps->performance_levels[high_limit_count].gfx_clock, 3465 data->disable_dpm_mask); 3466 3467 vega10_trim_single_dpm_states(hwmgr, 3468 &(data->dpm_table.mem_table), 3469 vega10_ps->performance_levels[0].mem_clock, 3470 vega10_ps->performance_levels[high_limit_count].mem_clock); 3471 3472 return 0; 3473 } 3474 3475 static uint32_t vega10_find_lowest_dpm_level( 3476 struct vega10_single_dpm_table *table) 3477 { 3478 uint32_t i; 3479 3480 for (i = 0; i < table->count; i++) { 3481 if (table->dpm_levels[i].enabled) 3482 break; 3483 } 3484 3485 return i; 3486 } 3487 3488 static uint32_t vega10_find_highest_dpm_level( 3489 struct vega10_single_dpm_table *table) 3490 { 3491 uint32_t i = 0; 3492 3493 if (table->count <= MAX_REGULAR_DPM_NUMBER) { 3494 for (i = table->count; i > 0; i--) { 3495 if (table->dpm_levels[i - 1].enabled) 3496 return i - 1; 3497 } 3498 } else { 3499 pr_info("DPM Table Has Too Many Entries!"); 3500 return MAX_REGULAR_DPM_NUMBER - 1; 3501 } 3502 3503 return i; 3504 } 3505 3506 static void vega10_apply_dal_minimum_voltage_request( 3507 struct pp_hwmgr *hwmgr) 3508 { 3509 return; 3510 } 3511 3512 static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr) 3513 { 3514 struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk; 3515 struct phm_ppt_v2_information *table_info = 3516 (struct phm_ppt_v2_information *)(hwmgr->pptable); 3517 3518 vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk; 3519 3520 return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1; 3521 } 3522 3523 static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) 3524 { 3525 struct vega10_hwmgr *data = hwmgr->backend; 3526 uint32_t socclk_idx; 3527 3528 vega10_apply_dal_minimum_voltage_request(hwmgr); 3529 3530 if (!data->registry_data.sclk_dpm_key_disabled) { 3531 if (data->smc_state_table.gfx_boot_level != 3532 data->dpm_table.gfx_table.dpm_state.soft_min_level) { 3533 smum_send_msg_to_smc_with_parameter(hwmgr, 3534 PPSMC_MSG_SetSoftMinGfxclkByIndex, 3535 data->smc_state_table.gfx_boot_level, 3536 NULL); 3537 3538 data->dpm_table.gfx_table.dpm_state.soft_min_level = 3539 data->smc_state_table.gfx_boot_level; 3540 } 3541 } 3542 3543 if (!data->registry_data.mclk_dpm_key_disabled) { 3544 if (data->smc_state_table.mem_boot_level != 3545 data->dpm_table.mem_table.dpm_state.soft_min_level) { 3546 if ((data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) 3547 && hwmgr->not_vf) { 3548 socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr); 3549 smum_send_msg_to_smc_with_parameter(hwmgr, 3550 PPSMC_MSG_SetSoftMinSocclkByIndex, 3551 socclk_idx, 3552 NULL); 3553 } else { 3554 smum_send_msg_to_smc_with_parameter(hwmgr, 3555 PPSMC_MSG_SetSoftMinUclkByIndex, 3556 data->smc_state_table.mem_boot_level, 3557 NULL); 3558 } 3559 data->dpm_table.mem_table.dpm_state.soft_min_level = 3560 data->smc_state_table.mem_boot_level; 3561 } 3562 } 3563 3564 if (!hwmgr->not_vf) 3565 return 0; 3566 3567 if (!data->registry_data.socclk_dpm_key_disabled) { 3568 if (data->smc_state_table.soc_boot_level != 3569 data->dpm_table.soc_table.dpm_state.soft_min_level) { 3570 smum_send_msg_to_smc_with_parameter(hwmgr, 3571 PPSMC_MSG_SetSoftMinSocclkByIndex, 3572 data->smc_state_table.soc_boot_level, 3573 NULL); 3574 data->dpm_table.soc_table.dpm_state.soft_min_level = 3575 data->smc_state_table.soc_boot_level; 3576 } 3577 } 3578 3579 return 0; 3580 } 3581 3582 static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) 3583 { 3584 struct vega10_hwmgr *data = hwmgr->backend; 3585 3586 vega10_apply_dal_minimum_voltage_request(hwmgr); 3587 3588 if (!data->registry_data.sclk_dpm_key_disabled) { 3589 if (data->smc_state_table.gfx_max_level != 3590 data->dpm_table.gfx_table.dpm_state.soft_max_level) { 3591 smum_send_msg_to_smc_with_parameter(hwmgr, 3592 PPSMC_MSG_SetSoftMaxGfxclkByIndex, 3593 data->smc_state_table.gfx_max_level, 3594 NULL); 3595 data->dpm_table.gfx_table.dpm_state.soft_max_level = 3596 data->smc_state_table.gfx_max_level; 3597 } 3598 } 3599 3600 if (!data->registry_data.mclk_dpm_key_disabled) { 3601 if (data->smc_state_table.mem_max_level != 3602 data->dpm_table.mem_table.dpm_state.soft_max_level) { 3603 smum_send_msg_to_smc_with_parameter(hwmgr, 3604 PPSMC_MSG_SetSoftMaxUclkByIndex, 3605 data->smc_state_table.mem_max_level, 3606 NULL); 3607 data->dpm_table.mem_table.dpm_state.soft_max_level = 3608 data->smc_state_table.mem_max_level; 3609 } 3610 } 3611 3612 if (!hwmgr->not_vf) 3613 return 0; 3614 3615 if (!data->registry_data.socclk_dpm_key_disabled) { 3616 if (data->smc_state_table.soc_max_level != 3617 data->dpm_table.soc_table.dpm_state.soft_max_level) { 3618 smum_send_msg_to_smc_with_parameter(hwmgr, 3619 PPSMC_MSG_SetSoftMaxSocclkByIndex, 3620 data->smc_state_table.soc_max_level, 3621 NULL); 3622 data->dpm_table.soc_table.dpm_state.soft_max_level = 3623 data->smc_state_table.soc_max_level; 3624 } 3625 } 3626 3627 return 0; 3628 } 3629 3630 static int vega10_generate_dpm_level_enable_mask( 3631 struct pp_hwmgr *hwmgr, const void *input) 3632 { 3633 struct vega10_hwmgr *data = hwmgr->backend; 3634 const struct phm_set_power_state_input *states = 3635 (const struct phm_set_power_state_input *)input; 3636 const struct vega10_power_state *vega10_ps = 3637 cast_const_phw_vega10_power_state(states->pnew_state); 3638 int i; 3639 3640 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps), 3641 "Attempt to Trim DPM States Failed!", 3642 return -1); 3643 3644 data->smc_state_table.gfx_boot_level = 3645 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); 3646 data->smc_state_table.gfx_max_level = 3647 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table)); 3648 data->smc_state_table.mem_boot_level = 3649 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table)); 3650 data->smc_state_table.mem_max_level = 3651 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table)); 3652 data->smc_state_table.soc_boot_level = 3653 vega10_find_lowest_dpm_level(&(data->dpm_table.soc_table)); 3654 data->smc_state_table.soc_max_level = 3655 vega10_find_highest_dpm_level(&(data->dpm_table.soc_table)); 3656 3657 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), 3658 "Attempt to upload DPM Bootup Levels Failed!", 3659 return -1); 3660 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), 3661 "Attempt to upload DPM Max Levels Failed!", 3662 return -1); 3663 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++) 3664 data->dpm_table.gfx_table.dpm_levels[i].enabled = true; 3665 3666 3667 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++) 3668 data->dpm_table.mem_table.dpm_levels[i].enabled = true; 3669 3670 for (i = data->smc_state_table.soc_boot_level; i < data->smc_state_table.soc_max_level; i++) 3671 data->dpm_table.soc_table.dpm_levels[i].enabled = true; 3672 3673 return 0; 3674 } 3675 3676 int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) 3677 { 3678 struct vega10_hwmgr *data = hwmgr->backend; 3679 3680 if (data->smu_features[GNLD_DPM_VCE].supported) { 3681 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 3682 enable, 3683 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap), 3684 "Attempt to Enable/Disable DPM VCE Failed!", 3685 return -1); 3686 data->smu_features[GNLD_DPM_VCE].enabled = enable; 3687 } 3688 3689 return 0; 3690 } 3691 3692 static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr) 3693 { 3694 struct vega10_hwmgr *data = hwmgr->backend; 3695 uint32_t low_sclk_interrupt_threshold = 0; 3696 3697 if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) && 3698 (data->low_sclk_interrupt_threshold != 0)) { 3699 low_sclk_interrupt_threshold = 3700 data->low_sclk_interrupt_threshold; 3701 3702 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold = 3703 cpu_to_le32(low_sclk_interrupt_threshold); 3704 3705 /* This message will also enable SmcToHost Interrupt */ 3706 smum_send_msg_to_smc_with_parameter(hwmgr, 3707 PPSMC_MSG_SetLowGfxclkInterruptThreshold, 3708 (uint32_t)low_sclk_interrupt_threshold, 3709 NULL); 3710 } 3711 3712 return 0; 3713 } 3714 3715 static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, 3716 const void *input) 3717 { 3718 int tmp_result, result = 0; 3719 struct vega10_hwmgr *data = hwmgr->backend; 3720 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 3721 3722 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input); 3723 PP_ASSERT_WITH_CODE(!tmp_result, 3724 "Failed to find DPM states clocks in DPM table!", 3725 result = tmp_result); 3726 3727 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); 3728 PP_ASSERT_WITH_CODE(!tmp_result, 3729 "Failed to populate and upload SCLK MCLK DPM levels!", 3730 result = tmp_result); 3731 3732 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input); 3733 PP_ASSERT_WITH_CODE(!tmp_result, 3734 "Failed to generate DPM level enabled mask!", 3735 result = tmp_result); 3736 3737 tmp_result = vega10_update_sclk_threshold(hwmgr); 3738 PP_ASSERT_WITH_CODE(!tmp_result, 3739 "Failed to update SCLK threshold!", 3740 result = tmp_result); 3741 3742 result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false); 3743 PP_ASSERT_WITH_CODE(!result, 3744 "Failed to upload PPtable!", return result); 3745 3746 /* 3747 * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. 3748 * That effectively disables AVFS feature. 3749 */ 3750 if(hwmgr->hardcode_pp_table != NULL) 3751 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; 3752 3753 vega10_update_avfs(hwmgr); 3754 3755 /* 3756 * Clear all OD flags except DPMTABLE_OD_UPDATE_VDDC. 3757 * That will help to keep AVFS disabled. 3758 */ 3759 data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC; 3760 3761 return 0; 3762 } 3763 3764 static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 3765 { 3766 struct pp_power_state *ps; 3767 struct vega10_power_state *vega10_ps; 3768 3769 if (hwmgr == NULL) 3770 return -EINVAL; 3771 3772 ps = hwmgr->request_ps; 3773 3774 if (ps == NULL) 3775 return -EINVAL; 3776 3777 vega10_ps = cast_phw_vega10_power_state(&ps->hardware); 3778 3779 if (low) 3780 return vega10_ps->performance_levels[0].gfx_clock; 3781 else 3782 return vega10_ps->performance_levels 3783 [vega10_ps->performance_level_count - 1].gfx_clock; 3784 } 3785 3786 static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 3787 { 3788 struct pp_power_state *ps; 3789 struct vega10_power_state *vega10_ps; 3790 3791 if (hwmgr == NULL) 3792 return -EINVAL; 3793 3794 ps = hwmgr->request_ps; 3795 3796 if (ps == NULL) 3797 return -EINVAL; 3798 3799 vega10_ps = cast_phw_vega10_power_state(&ps->hardware); 3800 3801 if (low) 3802 return vega10_ps->performance_levels[0].mem_clock; 3803 else 3804 return vega10_ps->performance_levels 3805 [vega10_ps->performance_level_count-1].mem_clock; 3806 } 3807 3808 static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr, 3809 uint32_t *query) 3810 { 3811 uint32_t value; 3812 3813 if (!query) 3814 return -EINVAL; 3815 3816 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value); 3817 3818 /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */ 3819 *query = value << 8; 3820 3821 return 0; 3822 } 3823 3824 static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, 3825 void *value, int *size) 3826 { 3827 struct amdgpu_device *adev = hwmgr->adev; 3828 uint32_t sclk_mhz, mclk_idx, activity_percent = 0; 3829 struct vega10_hwmgr *data = hwmgr->backend; 3830 struct vega10_dpm_table *dpm_table = &data->dpm_table; 3831 int ret = 0; 3832 uint32_t val_vid; 3833 3834 switch (idx) { 3835 case AMDGPU_PP_SENSOR_GFX_SCLK: 3836 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency, &sclk_mhz); 3837 *((uint32_t *)value) = sclk_mhz * 100; 3838 break; 3839 case AMDGPU_PP_SENSOR_GFX_MCLK: 3840 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &mclk_idx); 3841 if (mclk_idx < dpm_table->mem_table.count) { 3842 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value; 3843 *size = 4; 3844 } else { 3845 ret = -EINVAL; 3846 } 3847 break; 3848 case AMDGPU_PP_SENSOR_GPU_LOAD: 3849 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0, 3850 &activity_percent); 3851 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; 3852 *size = 4; 3853 break; 3854 case AMDGPU_PP_SENSOR_GPU_TEMP: 3855 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr); 3856 *size = 4; 3857 break; 3858 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 3859 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot, (uint32_t *)value); 3860 *((uint32_t *)value) = *((uint32_t *)value) * 3861 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 3862 *size = 4; 3863 break; 3864 case AMDGPU_PP_SENSOR_MEM_TEMP: 3865 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM, (uint32_t *)value); 3866 *((uint32_t *)value) = *((uint32_t *)value) * 3867 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 3868 *size = 4; 3869 break; 3870 case AMDGPU_PP_SENSOR_UVD_POWER: 3871 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; 3872 *size = 4; 3873 break; 3874 case AMDGPU_PP_SENSOR_VCE_POWER: 3875 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; 3876 *size = 4; 3877 break; 3878 case AMDGPU_PP_SENSOR_GPU_POWER: 3879 ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value); 3880 break; 3881 case AMDGPU_PP_SENSOR_VDDGFX: 3882 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) & 3883 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >> 3884 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT; 3885 *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid); 3886 return 0; 3887 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 3888 ret = vega10_get_enabled_smc_features(hwmgr, (uint64_t *)value); 3889 if (!ret) 3890 *size = 8; 3891 break; 3892 default: 3893 ret = -EINVAL; 3894 break; 3895 } 3896 3897 return ret; 3898 } 3899 3900 static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr, 3901 bool has_disp) 3902 { 3903 smum_send_msg_to_smc_with_parameter(hwmgr, 3904 PPSMC_MSG_SetUclkFastSwitch, 3905 has_disp ? 1 : 0, 3906 NULL); 3907 } 3908 3909 static int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 3910 struct pp_display_clock_request *clock_req) 3911 { 3912 int result = 0; 3913 enum amd_pp_clock_type clk_type = clock_req->clock_type; 3914 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 3915 DSPCLK_e clk_select = 0; 3916 uint32_t clk_request = 0; 3917 3918 switch (clk_type) { 3919 case amd_pp_dcef_clock: 3920 clk_select = DSPCLK_DCEFCLK; 3921 break; 3922 case amd_pp_disp_clock: 3923 clk_select = DSPCLK_DISPCLK; 3924 break; 3925 case amd_pp_pixel_clock: 3926 clk_select = DSPCLK_PIXCLK; 3927 break; 3928 case amd_pp_phy_clock: 3929 clk_select = DSPCLK_PHYCLK; 3930 break; 3931 default: 3932 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); 3933 result = -1; 3934 break; 3935 } 3936 3937 if (!result) { 3938 clk_request = (clk_freq << 16) | clk_select; 3939 smum_send_msg_to_smc_with_parameter(hwmgr, 3940 PPSMC_MSG_RequestDisplayClockByFreq, 3941 clk_request, 3942 NULL); 3943 } 3944 3945 return result; 3946 } 3947 3948 static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr, 3949 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table, 3950 uint32_t frequency) 3951 { 3952 uint8_t count; 3953 uint8_t i; 3954 3955 if (mclk_table == NULL || mclk_table->count == 0) 3956 return 0; 3957 3958 count = (uint8_t)(mclk_table->count); 3959 3960 for(i = 0; i < count; i++) { 3961 if(mclk_table->entries[i].clk >= frequency) 3962 return i; 3963 } 3964 3965 return i-1; 3966 } 3967 3968 static int vega10_notify_smc_display_config_after_ps_adjustment( 3969 struct pp_hwmgr *hwmgr) 3970 { 3971 struct vega10_hwmgr *data = hwmgr->backend; 3972 struct vega10_single_dpm_table *dpm_table = 3973 &data->dpm_table.dcef_table; 3974 struct phm_ppt_v2_information *table_info = 3975 (struct phm_ppt_v2_information *)hwmgr->pptable; 3976 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk; 3977 uint32_t idx; 3978 struct PP_Clocks min_clocks = {0}; 3979 uint32_t i; 3980 struct pp_display_clock_request clock_req; 3981 3982 if ((hwmgr->display_config->num_display > 1) && 3983 !hwmgr->display_config->multi_monitor_in_sync && 3984 !hwmgr->display_config->nb_pstate_switch_disable) 3985 vega10_notify_smc_display_change(hwmgr, false); 3986 else 3987 vega10_notify_smc_display_change(hwmgr, true); 3988 3989 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; 3990 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; 3991 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; 3992 3993 for (i = 0; i < dpm_table->count; i++) { 3994 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock) 3995 break; 3996 } 3997 3998 if (i < dpm_table->count) { 3999 clock_req.clock_type = amd_pp_dcef_clock; 4000 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10; 4001 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) { 4002 smum_send_msg_to_smc_with_parameter( 4003 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, 4004 min_clocks.dcefClockInSR / 100, 4005 NULL); 4006 } else { 4007 pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); 4008 } 4009 } else { 4010 pr_debug("Cannot find requested DCEFCLK!"); 4011 } 4012 4013 if (min_clocks.memoryClock != 0) { 4014 idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock); 4015 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx, 4016 NULL); 4017 data->dpm_table.mem_table.dpm_state.soft_min_level= idx; 4018 } 4019 4020 return 0; 4021 } 4022 4023 static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr) 4024 { 4025 struct vega10_hwmgr *data = hwmgr->backend; 4026 4027 data->smc_state_table.gfx_boot_level = 4028 data->smc_state_table.gfx_max_level = 4029 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table)); 4030 data->smc_state_table.mem_boot_level = 4031 data->smc_state_table.mem_max_level = 4032 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table)); 4033 4034 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), 4035 "Failed to upload boot level to highest!", 4036 return -1); 4037 4038 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), 4039 "Failed to upload dpm max level to highest!", 4040 return -1); 4041 4042 return 0; 4043 } 4044 4045 static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr) 4046 { 4047 struct vega10_hwmgr *data = hwmgr->backend; 4048 4049 data->smc_state_table.gfx_boot_level = 4050 data->smc_state_table.gfx_max_level = 4051 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); 4052 data->smc_state_table.mem_boot_level = 4053 data->smc_state_table.mem_max_level = 4054 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table)); 4055 4056 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), 4057 "Failed to upload boot level to highest!", 4058 return -1); 4059 4060 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), 4061 "Failed to upload dpm max level to highest!", 4062 return -1); 4063 4064 return 0; 4065 4066 } 4067 4068 static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 4069 { 4070 struct vega10_hwmgr *data = hwmgr->backend; 4071 4072 data->smc_state_table.gfx_boot_level = 4073 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); 4074 data->smc_state_table.gfx_max_level = 4075 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table)); 4076 data->smc_state_table.mem_boot_level = 4077 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table)); 4078 data->smc_state_table.mem_max_level = 4079 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table)); 4080 4081 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), 4082 "Failed to upload DPM Bootup Levels!", 4083 return -1); 4084 4085 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), 4086 "Failed to upload DPM Max Levels!", 4087 return -1); 4088 return 0; 4089 } 4090 4091 static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, 4092 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask) 4093 { 4094 struct phm_ppt_v2_information *table_info = 4095 (struct phm_ppt_v2_information *)(hwmgr->pptable); 4096 4097 if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL && 4098 table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL && 4099 table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) { 4100 *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL; 4101 *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL; 4102 *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL; 4103 hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk; 4104 hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk; 4105 } 4106 4107 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 4108 *sclk_mask = 0; 4109 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 4110 *mclk_mask = 0; 4111 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 4112 /* under vega10 pp one vf mode, the gfx clk dpm need be lower 4113 * to level-4 due to the limited power 4114 */ 4115 if (hwmgr->pp_one_vf) 4116 *sclk_mask = 4; 4117 else 4118 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; 4119 *soc_mask = table_info->vdd_dep_on_socclk->count - 1; 4120 *mclk_mask = table_info->vdd_dep_on_mclk->count - 1; 4121 } 4122 4123 return 0; 4124 } 4125 4126 static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) 4127 { 4128 if (!hwmgr->not_vf) 4129 return; 4130 4131 switch (mode) { 4132 case AMD_FAN_CTRL_NONE: 4133 vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100); 4134 break; 4135 case AMD_FAN_CTRL_MANUAL: 4136 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 4137 vega10_fan_ctrl_stop_smc_fan_control(hwmgr); 4138 break; 4139 case AMD_FAN_CTRL_AUTO: 4140 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 4141 vega10_fan_ctrl_start_smc_fan_control(hwmgr); 4142 break; 4143 default: 4144 break; 4145 } 4146 } 4147 4148 static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, 4149 enum pp_clock_type type, uint32_t mask) 4150 { 4151 struct vega10_hwmgr *data = hwmgr->backend; 4152 4153 switch (type) { 4154 case PP_SCLK: 4155 data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0; 4156 data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0; 4157 4158 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), 4159 "Failed to upload boot level to lowest!", 4160 return -EINVAL); 4161 4162 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), 4163 "Failed to upload dpm max level to highest!", 4164 return -EINVAL); 4165 break; 4166 4167 case PP_MCLK: 4168 data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0; 4169 data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0; 4170 4171 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), 4172 "Failed to upload boot level to lowest!", 4173 return -EINVAL); 4174 4175 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), 4176 "Failed to upload dpm max level to highest!", 4177 return -EINVAL); 4178 4179 break; 4180 4181 case PP_SOCCLK: 4182 data->smc_state_table.soc_boot_level = mask ? (ffs(mask) - 1) : 0; 4183 data->smc_state_table.soc_max_level = mask ? (fls(mask) - 1) : 0; 4184 4185 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), 4186 "Failed to upload boot level to lowest!", 4187 return -EINVAL); 4188 4189 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), 4190 "Failed to upload dpm max level to highest!", 4191 return -EINVAL); 4192 4193 break; 4194 4195 case PP_DCEFCLK: 4196 pr_info("Setting DCEFCLK min/max dpm level is not supported!\n"); 4197 break; 4198 4199 case PP_PCIE: 4200 default: 4201 break; 4202 } 4203 4204 return 0; 4205 } 4206 4207 static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 4208 enum amd_dpm_forced_level level) 4209 { 4210 int ret = 0; 4211 uint32_t sclk_mask = 0; 4212 uint32_t mclk_mask = 0; 4213 uint32_t soc_mask = 0; 4214 4215 if (hwmgr->pstate_sclk == 0) 4216 vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); 4217 4218 switch (level) { 4219 case AMD_DPM_FORCED_LEVEL_HIGH: 4220 ret = vega10_force_dpm_highest(hwmgr); 4221 break; 4222 case AMD_DPM_FORCED_LEVEL_LOW: 4223 ret = vega10_force_dpm_lowest(hwmgr); 4224 break; 4225 case AMD_DPM_FORCED_LEVEL_AUTO: 4226 ret = vega10_unforce_dpm_levels(hwmgr); 4227 break; 4228 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 4229 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 4230 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 4231 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 4232 ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); 4233 if (ret) 4234 return ret; 4235 vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); 4236 vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); 4237 break; 4238 case AMD_DPM_FORCED_LEVEL_MANUAL: 4239 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 4240 default: 4241 break; 4242 } 4243 4244 if (!hwmgr->not_vf) 4245 return ret; 4246 4247 if (!ret) { 4248 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 4249 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); 4250 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 4251 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO); 4252 } 4253 4254 return ret; 4255 } 4256 4257 static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr) 4258 { 4259 struct vega10_hwmgr *data = hwmgr->backend; 4260 4261 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false) 4262 return AMD_FAN_CTRL_MANUAL; 4263 else 4264 return AMD_FAN_CTRL_AUTO; 4265 } 4266 4267 static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr, 4268 struct amd_pp_simple_clock_info *info) 4269 { 4270 struct phm_ppt_v2_information *table_info = 4271 (struct phm_ppt_v2_information *)hwmgr->pptable; 4272 struct phm_clock_and_voltage_limits *max_limits = 4273 &table_info->max_clock_voltage_on_ac; 4274 4275 info->engine_max_clock = max_limits->sclk; 4276 info->memory_max_clock = max_limits->mclk; 4277 4278 return 0; 4279 } 4280 4281 static void vega10_get_sclks(struct pp_hwmgr *hwmgr, 4282 struct pp_clock_levels_with_latency *clocks) 4283 { 4284 struct phm_ppt_v2_information *table_info = 4285 (struct phm_ppt_v2_information *)hwmgr->pptable; 4286 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = 4287 table_info->vdd_dep_on_sclk; 4288 uint32_t i; 4289 4290 clocks->num_levels = 0; 4291 for (i = 0; i < dep_table->count; i++) { 4292 if (dep_table->entries[i].clk) { 4293 clocks->data[clocks->num_levels].clocks_in_khz = 4294 dep_table->entries[i].clk * 10; 4295 clocks->num_levels++; 4296 } 4297 } 4298 4299 } 4300 4301 static void vega10_get_memclocks(struct pp_hwmgr *hwmgr, 4302 struct pp_clock_levels_with_latency *clocks) 4303 { 4304 struct phm_ppt_v2_information *table_info = 4305 (struct phm_ppt_v2_information *)hwmgr->pptable; 4306 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = 4307 table_info->vdd_dep_on_mclk; 4308 struct vega10_hwmgr *data = hwmgr->backend; 4309 uint32_t j = 0; 4310 uint32_t i; 4311 4312 for (i = 0; i < dep_table->count; i++) { 4313 if (dep_table->entries[i].clk) { 4314 4315 clocks->data[j].clocks_in_khz = 4316 dep_table->entries[i].clk * 10; 4317 data->mclk_latency_table.entries[j].frequency = 4318 dep_table->entries[i].clk; 4319 clocks->data[j].latency_in_us = 4320 data->mclk_latency_table.entries[j].latency = 25; 4321 j++; 4322 } 4323 } 4324 clocks->num_levels = data->mclk_latency_table.count = j; 4325 } 4326 4327 static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr, 4328 struct pp_clock_levels_with_latency *clocks) 4329 { 4330 struct phm_ppt_v2_information *table_info = 4331 (struct phm_ppt_v2_information *)hwmgr->pptable; 4332 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = 4333 table_info->vdd_dep_on_dcefclk; 4334 uint32_t i; 4335 4336 for (i = 0; i < dep_table->count; i++) { 4337 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10; 4338 clocks->data[i].latency_in_us = 0; 4339 clocks->num_levels++; 4340 } 4341 } 4342 4343 static void vega10_get_socclocks(struct pp_hwmgr *hwmgr, 4344 struct pp_clock_levels_with_latency *clocks) 4345 { 4346 struct phm_ppt_v2_information *table_info = 4347 (struct phm_ppt_v2_information *)hwmgr->pptable; 4348 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = 4349 table_info->vdd_dep_on_socclk; 4350 uint32_t i; 4351 4352 for (i = 0; i < dep_table->count; i++) { 4353 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10; 4354 clocks->data[i].latency_in_us = 0; 4355 clocks->num_levels++; 4356 } 4357 } 4358 4359 static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, 4360 enum amd_pp_clock_type type, 4361 struct pp_clock_levels_with_latency *clocks) 4362 { 4363 switch (type) { 4364 case amd_pp_sys_clock: 4365 vega10_get_sclks(hwmgr, clocks); 4366 break; 4367 case amd_pp_mem_clock: 4368 vega10_get_memclocks(hwmgr, clocks); 4369 break; 4370 case amd_pp_dcef_clock: 4371 vega10_get_dcefclocks(hwmgr, clocks); 4372 break; 4373 case amd_pp_soc_clock: 4374 vega10_get_socclocks(hwmgr, clocks); 4375 break; 4376 default: 4377 return -1; 4378 } 4379 4380 return 0; 4381 } 4382 4383 static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, 4384 enum amd_pp_clock_type type, 4385 struct pp_clock_levels_with_voltage *clocks) 4386 { 4387 struct phm_ppt_v2_information *table_info = 4388 (struct phm_ppt_v2_information *)hwmgr->pptable; 4389 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; 4390 uint32_t i; 4391 4392 switch (type) { 4393 case amd_pp_mem_clock: 4394 dep_table = table_info->vdd_dep_on_mclk; 4395 break; 4396 case amd_pp_dcef_clock: 4397 dep_table = table_info->vdd_dep_on_dcefclk; 4398 break; 4399 case amd_pp_disp_clock: 4400 dep_table = table_info->vdd_dep_on_dispclk; 4401 break; 4402 case amd_pp_pixel_clock: 4403 dep_table = table_info->vdd_dep_on_pixclk; 4404 break; 4405 case amd_pp_phy_clock: 4406 dep_table = table_info->vdd_dep_on_phyclk; 4407 break; 4408 default: 4409 return -1; 4410 } 4411 4412 for (i = 0; i < dep_table->count; i++) { 4413 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10; 4414 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table-> 4415 entries[dep_table->entries[i].vddInd].us_vdd); 4416 clocks->num_levels++; 4417 } 4418 4419 if (i < dep_table->count) 4420 return -1; 4421 4422 return 0; 4423 } 4424 4425 static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 4426 void *clock_range) 4427 { 4428 struct vega10_hwmgr *data = hwmgr->backend; 4429 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range; 4430 Watermarks_t *table = &(data->smc_state_table.water_marks_table); 4431 4432 if (!data->registry_data.disable_water_mark) { 4433 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); 4434 data->water_marks_bitmap = WaterMarksExist; 4435 } 4436 4437 return 0; 4438 } 4439 4440 static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) 4441 { 4442 static const char *ppfeature_name[] = { 4443 "DPM_PREFETCHER", 4444 "GFXCLK_DPM", 4445 "UCLK_DPM", 4446 "SOCCLK_DPM", 4447 "UVD_DPM", 4448 "VCE_DPM", 4449 "ULV", 4450 "MP0CLK_DPM", 4451 "LINK_DPM", 4452 "DCEFCLK_DPM", 4453 "AVFS", 4454 "GFXCLK_DS", 4455 "SOCCLK_DS", 4456 "LCLK_DS", 4457 "PPT", 4458 "TDC", 4459 "THERMAL", 4460 "GFX_PER_CU_CG", 4461 "RM", 4462 "DCEFCLK_DS", 4463 "ACDC", 4464 "VR0HOT", 4465 "VR1HOT", 4466 "FW_CTF", 4467 "LED_DISPLAY", 4468 "FAN_CONTROL", 4469 "FAST_PPT", 4470 "DIDT", 4471 "ACG", 4472 "PCC_LIMIT"}; 4473 static const char *output_title[] = { 4474 "FEATURES", 4475 "BITMASK", 4476 "ENABLEMENT"}; 4477 uint64_t features_enabled; 4478 int i; 4479 int ret = 0; 4480 int size = 0; 4481 4482 ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled); 4483 PP_ASSERT_WITH_CODE(!ret, 4484 "[EnableAllSmuFeatures] Failed to get enabled smc features!", 4485 return ret); 4486 4487 size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled); 4488 size += sprintf(buf + size, "%-19s %-22s %s\n", 4489 output_title[0], 4490 output_title[1], 4491 output_title[2]); 4492 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 4493 size += sprintf(buf + size, "%-19s 0x%016llx %6s\n", 4494 ppfeature_name[i], 4495 1ULL << i, 4496 (features_enabled & (1ULL << i)) ? "Y" : "N"); 4497 } 4498 4499 return size; 4500 } 4501 4502 static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks) 4503 { 4504 uint64_t features_enabled; 4505 uint64_t features_to_enable; 4506 uint64_t features_to_disable; 4507 int ret = 0; 4508 4509 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX)) 4510 return -EINVAL; 4511 4512 ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled); 4513 if (ret) 4514 return ret; 4515 4516 features_to_disable = 4517 features_enabled & ~new_ppfeature_masks; 4518 features_to_enable = 4519 ~features_enabled & new_ppfeature_masks; 4520 4521 pr_debug("features_to_disable 0x%llx\n", features_to_disable); 4522 pr_debug("features_to_enable 0x%llx\n", features_to_enable); 4523 4524 if (features_to_disable) { 4525 ret = vega10_enable_smc_features(hwmgr, false, features_to_disable); 4526 if (ret) 4527 return ret; 4528 } 4529 4530 if (features_to_enable) { 4531 ret = vega10_enable_smc_features(hwmgr, true, features_to_enable); 4532 if (ret) 4533 return ret; 4534 } 4535 4536 return 0; 4537 } 4538 4539 static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, 4540 enum pp_clock_type type, char *buf) 4541 { 4542 struct vega10_hwmgr *data = hwmgr->backend; 4543 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); 4544 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); 4545 struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table); 4546 struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table); 4547 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); 4548 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; 4549 4550 int i, now, size = 0, count = 0; 4551 4552 switch (type) { 4553 case PP_SCLK: 4554 if (data->registry_data.sclk_dpm_key_disabled) 4555 break; 4556 4557 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now); 4558 4559 if (hwmgr->pp_one_vf && 4560 (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) 4561 count = 5; 4562 else 4563 count = sclk_table->count; 4564 for (i = 0; i < count; i++) 4565 size += sprintf(buf + size, "%d: %uMhz %s\n", 4566 i, sclk_table->dpm_levels[i].value / 100, 4567 (i == now) ? "*" : ""); 4568 break; 4569 case PP_MCLK: 4570 if (data->registry_data.mclk_dpm_key_disabled) 4571 break; 4572 4573 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now); 4574 4575 for (i = 0; i < mclk_table->count; i++) 4576 size += sprintf(buf + size, "%d: %uMhz %s\n", 4577 i, mclk_table->dpm_levels[i].value / 100, 4578 (i == now) ? "*" : ""); 4579 break; 4580 case PP_SOCCLK: 4581 if (data->registry_data.socclk_dpm_key_disabled) 4582 break; 4583 4584 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now); 4585 4586 for (i = 0; i < soc_table->count; i++) 4587 size += sprintf(buf + size, "%d: %uMhz %s\n", 4588 i, soc_table->dpm_levels[i].value / 100, 4589 (i == now) ? "*" : ""); 4590 break; 4591 case PP_DCEFCLK: 4592 if (data->registry_data.dcefclk_dpm_key_disabled) 4593 break; 4594 4595 smum_send_msg_to_smc_with_parameter(hwmgr, 4596 PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now); 4597 4598 for (i = 0; i < dcef_table->count; i++) 4599 size += sprintf(buf + size, "%d: %uMhz %s\n", 4600 i, dcef_table->dpm_levels[i].value / 100, 4601 (dcef_table->dpm_levels[i].value / 100 == now) ? 4602 "*" : ""); 4603 break; 4604 case PP_PCIE: 4605 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now); 4606 4607 for (i = 0; i < pcie_table->count; i++) 4608 size += sprintf(buf + size, "%d: %s %s\n", i, 4609 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" : 4610 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" : 4611 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "", 4612 (i == now) ? "*" : ""); 4613 break; 4614 case OD_SCLK: 4615 if (hwmgr->od_enabled) { 4616 size = sprintf(buf, "%s:\n", "OD_SCLK"); 4617 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; 4618 for (i = 0; i < podn_vdd_dep->count; i++) 4619 size += sprintf(buf + size, "%d: %10uMhz %10umV\n", 4620 i, podn_vdd_dep->entries[i].clk / 100, 4621 podn_vdd_dep->entries[i].vddc); 4622 } 4623 break; 4624 case OD_MCLK: 4625 if (hwmgr->od_enabled) { 4626 size = sprintf(buf, "%s:\n", "OD_MCLK"); 4627 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; 4628 for (i = 0; i < podn_vdd_dep->count; i++) 4629 size += sprintf(buf + size, "%d: %10uMhz %10umV\n", 4630 i, podn_vdd_dep->entries[i].clk/100, 4631 podn_vdd_dep->entries[i].vddc); 4632 } 4633 break; 4634 case OD_RANGE: 4635 if (hwmgr->od_enabled) { 4636 size = sprintf(buf, "%s:\n", "OD_RANGE"); 4637 size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", 4638 data->golden_dpm_table.gfx_table.dpm_levels[0].value/100, 4639 hwmgr->platform_descriptor.overdriveLimit.engineClock/100); 4640 size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", 4641 data->golden_dpm_table.mem_table.dpm_levels[0].value/100, 4642 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); 4643 size += sprintf(buf + size, "VDDC: %7umV %11umV\n", 4644 data->odn_dpm_table.min_vddc, 4645 data->odn_dpm_table.max_vddc); 4646 } 4647 break; 4648 default: 4649 break; 4650 } 4651 return size; 4652 } 4653 4654 static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 4655 { 4656 struct vega10_hwmgr *data = hwmgr->backend; 4657 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); 4658 int result = 0; 4659 4660 if ((data->water_marks_bitmap & WaterMarksExist) && 4661 !(data->water_marks_bitmap & WaterMarksLoaded)) { 4662 result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, WMTABLE, false); 4663 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return -EINVAL); 4664 data->water_marks_bitmap |= WaterMarksLoaded; 4665 } 4666 4667 if (data->water_marks_bitmap & WaterMarksLoaded) { 4668 smum_send_msg_to_smc_with_parameter(hwmgr, 4669 PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display, 4670 NULL); 4671 } 4672 4673 return result; 4674 } 4675 4676 static int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) 4677 { 4678 struct vega10_hwmgr *data = hwmgr->backend; 4679 4680 if (data->smu_features[GNLD_DPM_UVD].supported) { 4681 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 4682 enable, 4683 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap), 4684 "Attempt to Enable/Disable DPM UVD Failed!", 4685 return -1); 4686 data->smu_features[GNLD_DPM_UVD].enabled = enable; 4687 } 4688 return 0; 4689 } 4690 4691 static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) 4692 { 4693 struct vega10_hwmgr *data = hwmgr->backend; 4694 4695 data->vce_power_gated = bgate; 4696 vega10_enable_disable_vce_dpm(hwmgr, !bgate); 4697 } 4698 4699 static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) 4700 { 4701 struct vega10_hwmgr *data = hwmgr->backend; 4702 4703 data->uvd_power_gated = bgate; 4704 vega10_enable_disable_uvd_dpm(hwmgr, !bgate); 4705 } 4706 4707 static inline bool vega10_are_power_levels_equal( 4708 const struct vega10_performance_level *pl1, 4709 const struct vega10_performance_level *pl2) 4710 { 4711 return ((pl1->soc_clock == pl2->soc_clock) && 4712 (pl1->gfx_clock == pl2->gfx_clock) && 4713 (pl1->mem_clock == pl2->mem_clock)); 4714 } 4715 4716 static int vega10_check_states_equal(struct pp_hwmgr *hwmgr, 4717 const struct pp_hw_power_state *pstate1, 4718 const struct pp_hw_power_state *pstate2, bool *equal) 4719 { 4720 const struct vega10_power_state *psa; 4721 const struct vega10_power_state *psb; 4722 int i; 4723 4724 if (pstate1 == NULL || pstate2 == NULL || equal == NULL) 4725 return -EINVAL; 4726 4727 psa = cast_const_phw_vega10_power_state(pstate1); 4728 psb = cast_const_phw_vega10_power_state(pstate2); 4729 /* If the two states don't even have the same number of performance levels they cannot be the same state. */ 4730 if (psa->performance_level_count != psb->performance_level_count) { 4731 *equal = false; 4732 return 0; 4733 } 4734 4735 for (i = 0; i < psa->performance_level_count; i++) { 4736 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { 4737 /* If we have found even one performance level pair that is different the states are different. */ 4738 *equal = false; 4739 return 0; 4740 } 4741 } 4742 4743 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ 4744 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); 4745 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); 4746 *equal &= (psa->sclk_threshold == psb->sclk_threshold); 4747 4748 return 0; 4749 } 4750 4751 static bool 4752 vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) 4753 { 4754 struct vega10_hwmgr *data = hwmgr->backend; 4755 bool is_update_required = false; 4756 4757 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) 4758 is_update_required = true; 4759 4760 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) { 4761 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr) 4762 is_update_required = true; 4763 } 4764 4765 return is_update_required; 4766 } 4767 4768 static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 4769 { 4770 int tmp_result, result = 0; 4771 4772 if (!hwmgr->not_vf) 4773 return 0; 4774 4775 if (PP_CAP(PHM_PlatformCaps_ThermalController)) 4776 vega10_disable_thermal_protection(hwmgr); 4777 4778 tmp_result = vega10_disable_power_containment(hwmgr); 4779 PP_ASSERT_WITH_CODE((tmp_result == 0), 4780 "Failed to disable power containment!", result = tmp_result); 4781 4782 tmp_result = vega10_disable_didt_config(hwmgr); 4783 PP_ASSERT_WITH_CODE((tmp_result == 0), 4784 "Failed to disable didt config!", result = tmp_result); 4785 4786 tmp_result = vega10_avfs_enable(hwmgr, false); 4787 PP_ASSERT_WITH_CODE((tmp_result == 0), 4788 "Failed to disable AVFS!", result = tmp_result); 4789 4790 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES); 4791 PP_ASSERT_WITH_CODE((tmp_result == 0), 4792 "Failed to stop DPM!", result = tmp_result); 4793 4794 tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr); 4795 PP_ASSERT_WITH_CODE((tmp_result == 0), 4796 "Failed to disable deep sleep!", result = tmp_result); 4797 4798 tmp_result = vega10_disable_ulv(hwmgr); 4799 PP_ASSERT_WITH_CODE((tmp_result == 0), 4800 "Failed to disable ulv!", result = tmp_result); 4801 4802 tmp_result = vega10_acg_disable(hwmgr); 4803 PP_ASSERT_WITH_CODE((tmp_result == 0), 4804 "Failed to disable acg!", result = tmp_result); 4805 4806 vega10_enable_disable_PCC_limit_feature(hwmgr, false); 4807 return result; 4808 } 4809 4810 static int vega10_power_off_asic(struct pp_hwmgr *hwmgr) 4811 { 4812 struct vega10_hwmgr *data = hwmgr->backend; 4813 int result; 4814 4815 result = vega10_disable_dpm_tasks(hwmgr); 4816 PP_ASSERT_WITH_CODE((0 == result), 4817 "[disable_dpm_tasks] Failed to disable DPM!", 4818 ); 4819 data->water_marks_bitmap &= ~(WaterMarksLoaded); 4820 4821 return result; 4822 } 4823 4824 static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr) 4825 { 4826 struct vega10_hwmgr *data = hwmgr->backend; 4827 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); 4828 struct vega10_single_dpm_table *golden_sclk_table = 4829 &(data->golden_dpm_table.gfx_table); 4830 int value = sclk_table->dpm_levels[sclk_table->count - 1].value; 4831 int golden_value = golden_sclk_table->dpm_levels 4832 [golden_sclk_table->count - 1].value; 4833 4834 value -= golden_value; 4835 value = DIV_ROUND_UP(value * 100, golden_value); 4836 4837 return value; 4838 } 4839 4840 static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) 4841 { 4842 struct vega10_hwmgr *data = hwmgr->backend; 4843 struct vega10_single_dpm_table *golden_sclk_table = 4844 &(data->golden_dpm_table.gfx_table); 4845 struct pp_power_state *ps; 4846 struct vega10_power_state *vega10_ps; 4847 4848 ps = hwmgr->request_ps; 4849 4850 if (ps == NULL) 4851 return -EINVAL; 4852 4853 vega10_ps = cast_phw_vega10_power_state(&ps->hardware); 4854 4855 vega10_ps->performance_levels 4856 [vega10_ps->performance_level_count - 1].gfx_clock = 4857 golden_sclk_table->dpm_levels 4858 [golden_sclk_table->count - 1].value * 4859 value / 100 + 4860 golden_sclk_table->dpm_levels 4861 [golden_sclk_table->count - 1].value; 4862 4863 if (vega10_ps->performance_levels 4864 [vega10_ps->performance_level_count - 1].gfx_clock > 4865 hwmgr->platform_descriptor.overdriveLimit.engineClock) { 4866 vega10_ps->performance_levels 4867 [vega10_ps->performance_level_count - 1].gfx_clock = 4868 hwmgr->platform_descriptor.overdriveLimit.engineClock; 4869 pr_warn("max sclk supported by vbios is %d\n", 4870 hwmgr->platform_descriptor.overdriveLimit.engineClock); 4871 } 4872 return 0; 4873 } 4874 4875 static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr) 4876 { 4877 struct vega10_hwmgr *data = hwmgr->backend; 4878 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); 4879 struct vega10_single_dpm_table *golden_mclk_table = 4880 &(data->golden_dpm_table.mem_table); 4881 int value = mclk_table->dpm_levels[mclk_table->count - 1].value; 4882 int golden_value = golden_mclk_table->dpm_levels 4883 [golden_mclk_table->count - 1].value; 4884 4885 value -= golden_value; 4886 value = DIV_ROUND_UP(value * 100, golden_value); 4887 4888 return value; 4889 } 4890 4891 static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) 4892 { 4893 struct vega10_hwmgr *data = hwmgr->backend; 4894 struct vega10_single_dpm_table *golden_mclk_table = 4895 &(data->golden_dpm_table.mem_table); 4896 struct pp_power_state *ps; 4897 struct vega10_power_state *vega10_ps; 4898 4899 ps = hwmgr->request_ps; 4900 4901 if (ps == NULL) 4902 return -EINVAL; 4903 4904 vega10_ps = cast_phw_vega10_power_state(&ps->hardware); 4905 4906 vega10_ps->performance_levels 4907 [vega10_ps->performance_level_count - 1].mem_clock = 4908 golden_mclk_table->dpm_levels 4909 [golden_mclk_table->count - 1].value * 4910 value / 100 + 4911 golden_mclk_table->dpm_levels 4912 [golden_mclk_table->count - 1].value; 4913 4914 if (vega10_ps->performance_levels 4915 [vega10_ps->performance_level_count - 1].mem_clock > 4916 hwmgr->platform_descriptor.overdriveLimit.memoryClock) { 4917 vega10_ps->performance_levels 4918 [vega10_ps->performance_level_count - 1].mem_clock = 4919 hwmgr->platform_descriptor.overdriveLimit.memoryClock; 4920 pr_warn("max mclk supported by vbios is %d\n", 4921 hwmgr->platform_descriptor.overdriveLimit.memoryClock); 4922 } 4923 4924 return 0; 4925 } 4926 4927 static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, 4928 uint32_t virtual_addr_low, 4929 uint32_t virtual_addr_hi, 4930 uint32_t mc_addr_low, 4931 uint32_t mc_addr_hi, 4932 uint32_t size) 4933 { 4934 smum_send_msg_to_smc_with_parameter(hwmgr, 4935 PPSMC_MSG_SetSystemVirtualDramAddrHigh, 4936 virtual_addr_hi, 4937 NULL); 4938 smum_send_msg_to_smc_with_parameter(hwmgr, 4939 PPSMC_MSG_SetSystemVirtualDramAddrLow, 4940 virtual_addr_low, 4941 NULL); 4942 smum_send_msg_to_smc_with_parameter(hwmgr, 4943 PPSMC_MSG_DramLogSetDramAddrHigh, 4944 mc_addr_hi, 4945 NULL); 4946 4947 smum_send_msg_to_smc_with_parameter(hwmgr, 4948 PPSMC_MSG_DramLogSetDramAddrLow, 4949 mc_addr_low, 4950 NULL); 4951 4952 smum_send_msg_to_smc_with_parameter(hwmgr, 4953 PPSMC_MSG_DramLogSetDramSize, 4954 size, 4955 NULL); 4956 return 0; 4957 } 4958 4959 static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, 4960 struct PP_TemperatureRange *thermal_data) 4961 { 4962 struct vega10_hwmgr *data = hwmgr->backend; 4963 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 4964 4965 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange)); 4966 4967 thermal_data->max = pp_table->TedgeLimit * 4968 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4969 thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) * 4970 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4971 thermal_data->hotspot_crit_max = pp_table->ThotspotLimit * 4972 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4973 thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) * 4974 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4975 thermal_data->mem_crit_max = pp_table->ThbmLimit * 4976 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4977 thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)* 4978 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4979 4980 return 0; 4981 } 4982 4983 static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) 4984 { 4985 struct vega10_hwmgr *data = hwmgr->backend; 4986 uint32_t i, size = 0; 4987 static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,}, 4988 {70, 60, 1, 3,}, 4989 {90, 60, 0, 0,}, 4990 {70, 60, 0, 0,}, 4991 {70, 90, 0, 0,}, 4992 {30, 60, 0, 6,}, 4993 }; 4994 static const char *profile_name[7] = {"BOOTUP_DEFAULT", 4995 "3D_FULL_SCREEN", 4996 "POWER_SAVING", 4997 "VIDEO", 4998 "VR", 4999 "COMPUTE", 5000 "CUSTOM"}; 5001 static const char *title[6] = {"NUM", 5002 "MODE_NAME", 5003 "BUSY_SET_POINT", 5004 "FPS", 5005 "USE_RLC_BUSY", 5006 "MIN_ACTIVE_LEVEL"}; 5007 5008 if (!buf) 5009 return -EINVAL; 5010 5011 size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0], 5012 title[1], title[2], title[3], title[4], title[5]); 5013 5014 for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++) 5015 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", 5016 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", 5017 profile_mode_setting[i][0], profile_mode_setting[i][1], 5018 profile_mode_setting[i][2], profile_mode_setting[i][3]); 5019 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", i, 5020 profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", 5021 data->custom_profile_mode[0], data->custom_profile_mode[1], 5022 data->custom_profile_mode[2], data->custom_profile_mode[3]); 5023 return size; 5024 } 5025 5026 static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) 5027 { 5028 struct vega10_hwmgr *data = hwmgr->backend; 5029 uint8_t busy_set_point; 5030 uint8_t FPS; 5031 uint8_t use_rlc_busy; 5032 uint8_t min_active_level; 5033 uint32_t power_profile_mode = input[size]; 5034 5035 if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 5036 if (size != 0 && size != 4) 5037 return -EINVAL; 5038 5039 /* If size = 0 and the CUSTOM profile has been set already 5040 * then just apply the profile. The copy stored in the hwmgr 5041 * is zeroed out on init 5042 */ 5043 if (size == 0) { 5044 if (data->custom_profile_mode[0] != 0) 5045 goto out; 5046 else 5047 return -EINVAL; 5048 } 5049 5050 data->custom_profile_mode[0] = busy_set_point = input[0]; 5051 data->custom_profile_mode[1] = FPS = input[1]; 5052 data->custom_profile_mode[2] = use_rlc_busy = input[2]; 5053 data->custom_profile_mode[3] = min_active_level = input[3]; 5054 smum_send_msg_to_smc_with_parameter(hwmgr, 5055 PPSMC_MSG_SetCustomGfxDpmParameters, 5056 busy_set_point | FPS<<8 | 5057 use_rlc_busy << 16 | min_active_level<<24, 5058 NULL); 5059 } 5060 5061 out: 5062 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, 5063 1 << power_profile_mode, 5064 NULL); 5065 hwmgr->power_profile_mode = power_profile_mode; 5066 5067 return 0; 5068 } 5069 5070 5071 static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, 5072 enum PP_OD_DPM_TABLE_COMMAND type, 5073 uint32_t clk, 5074 uint32_t voltage) 5075 { 5076 struct vega10_hwmgr *data = hwmgr->backend; 5077 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); 5078 struct vega10_single_dpm_table *golden_table; 5079 5080 if (voltage < odn_table->min_vddc || voltage > odn_table->max_vddc) { 5081 pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table->min_vddc, odn_table->max_vddc); 5082 return false; 5083 } 5084 5085 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { 5086 golden_table = &(data->golden_dpm_table.gfx_table); 5087 if (golden_table->dpm_levels[0].value > clk || 5088 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) { 5089 pr_info("OD engine clock is out of range [%d - %d] MHz\n", 5090 golden_table->dpm_levels[0].value/100, 5091 hwmgr->platform_descriptor.overdriveLimit.engineClock/100); 5092 return false; 5093 } 5094 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { 5095 golden_table = &(data->golden_dpm_table.mem_table); 5096 if (golden_table->dpm_levels[0].value > clk || 5097 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) { 5098 pr_info("OD memory clock is out of range [%d - %d] MHz\n", 5099 golden_table->dpm_levels[0].value/100, 5100 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); 5101 return false; 5102 } 5103 } else { 5104 return false; 5105 } 5106 5107 return true; 5108 } 5109 5110 static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr) 5111 { 5112 struct vega10_hwmgr *data = hwmgr->backend; 5113 struct pp_power_state *ps = hwmgr->request_ps; 5114 struct vega10_power_state *vega10_ps; 5115 struct vega10_single_dpm_table *gfx_dpm_table = 5116 &data->dpm_table.gfx_table; 5117 struct vega10_single_dpm_table *soc_dpm_table = 5118 &data->dpm_table.soc_table; 5119 struct vega10_single_dpm_table *mem_dpm_table = 5120 &data->dpm_table.mem_table; 5121 int max_level; 5122 5123 if (!ps) 5124 return; 5125 5126 vega10_ps = cast_phw_vega10_power_state(&ps->hardware); 5127 max_level = vega10_ps->performance_level_count - 1; 5128 5129 if (vega10_ps->performance_levels[max_level].gfx_clock != 5130 gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value) 5131 vega10_ps->performance_levels[max_level].gfx_clock = 5132 gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value; 5133 5134 if (vega10_ps->performance_levels[max_level].soc_clock != 5135 soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value) 5136 vega10_ps->performance_levels[max_level].soc_clock = 5137 soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value; 5138 5139 if (vega10_ps->performance_levels[max_level].mem_clock != 5140 mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value) 5141 vega10_ps->performance_levels[max_level].mem_clock = 5142 mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value; 5143 5144 if (!hwmgr->ps) 5145 return; 5146 5147 ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1)); 5148 vega10_ps = cast_phw_vega10_power_state(&ps->hardware); 5149 max_level = vega10_ps->performance_level_count - 1; 5150 5151 if (vega10_ps->performance_levels[max_level].gfx_clock != 5152 gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value) 5153 vega10_ps->performance_levels[max_level].gfx_clock = 5154 gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value; 5155 5156 if (vega10_ps->performance_levels[max_level].soc_clock != 5157 soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value) 5158 vega10_ps->performance_levels[max_level].soc_clock = 5159 soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value; 5160 5161 if (vega10_ps->performance_levels[max_level].mem_clock != 5162 mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value) 5163 vega10_ps->performance_levels[max_level].mem_clock = 5164 mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value; 5165 } 5166 5167 static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr, 5168 enum PP_OD_DPM_TABLE_COMMAND type) 5169 { 5170 struct vega10_hwmgr *data = hwmgr->backend; 5171 struct phm_ppt_v2_information *table_info = hwmgr->pptable; 5172 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk; 5173 struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.mem_table; 5174 5175 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk = 5176 &data->odn_dpm_table.vdd_dep_on_socclk; 5177 struct vega10_odn_vddc_lookup_table *od_vddc_lookup_table = &data->odn_dpm_table.vddc_lookup_table; 5178 5179 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep; 5180 uint8_t i, j; 5181 5182 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { 5183 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; 5184 for (i = 0; i < podn_vdd_dep->count; i++) 5185 od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; 5186 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { 5187 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; 5188 for (i = 0; i < dpm_table->count; i++) { 5189 for (j = 0; j < od_vddc_lookup_table->count; j++) { 5190 if (od_vddc_lookup_table->entries[j].us_vdd > 5191 podn_vdd_dep->entries[i].vddc) 5192 break; 5193 } 5194 if (j == od_vddc_lookup_table->count) { 5195 j = od_vddc_lookup_table->count - 1; 5196 od_vddc_lookup_table->entries[j].us_vdd = 5197 podn_vdd_dep->entries[i].vddc; 5198 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; 5199 } 5200 podn_vdd_dep->entries[i].vddInd = j; 5201 } 5202 dpm_table = &data->dpm_table.soc_table; 5203 for (i = 0; i < dep_table->count; i++) { 5204 if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[podn_vdd_dep->count-1].vddInd && 5205 dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count-1].clk) { 5206 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK; 5207 for (; (i < dep_table->count) && 5208 (dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk); i++) { 5209 podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[podn_vdd_dep->count-1].clk; 5210 dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk; 5211 } 5212 break; 5213 } else { 5214 dpm_table->dpm_levels[i].value = dep_table->entries[i].clk; 5215 podn_vdd_dep_on_socclk->entries[i].vddc = dep_table->entries[i].vddc; 5216 podn_vdd_dep_on_socclk->entries[i].vddInd = dep_table->entries[i].vddInd; 5217 podn_vdd_dep_on_socclk->entries[i].clk = dep_table->entries[i].clk; 5218 } 5219 } 5220 if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk < 5221 podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk) { 5222 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK; 5223 podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = 5224 podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk; 5225 dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value = 5226 podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk; 5227 } 5228 if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd < 5229 podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd) { 5230 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK; 5231 podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = 5232 podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd; 5233 } 5234 } 5235 vega10_odn_update_power_state(hwmgr); 5236 } 5237 5238 static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, 5239 enum PP_OD_DPM_TABLE_COMMAND type, 5240 long *input, uint32_t size) 5241 { 5242 struct vega10_hwmgr *data = hwmgr->backend; 5243 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_table; 5244 struct vega10_single_dpm_table *dpm_table; 5245 5246 uint32_t input_clk; 5247 uint32_t input_vol; 5248 uint32_t input_level; 5249 uint32_t i; 5250 5251 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", 5252 return -EINVAL); 5253 5254 if (!hwmgr->od_enabled) { 5255 pr_info("OverDrive feature not enabled\n"); 5256 return -EINVAL; 5257 } 5258 5259 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) { 5260 dpm_table = &data->dpm_table.gfx_table; 5261 podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_sclk; 5262 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 5263 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) { 5264 dpm_table = &data->dpm_table.mem_table; 5265 podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_mclk; 5266 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 5267 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) { 5268 memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table)); 5269 vega10_odn_initial_default_setting(hwmgr); 5270 vega10_odn_update_power_state(hwmgr); 5271 /* force to update all clock tables */ 5272 data->need_update_dpm_table = DPMTABLE_UPDATE_SCLK | 5273 DPMTABLE_UPDATE_MCLK | 5274 DPMTABLE_UPDATE_SOCCLK; 5275 return 0; 5276 } else if (PP_OD_COMMIT_DPM_TABLE == type) { 5277 vega10_check_dpm_table_updated(hwmgr); 5278 return 0; 5279 } else { 5280 return -EINVAL; 5281 } 5282 5283 for (i = 0; i < size; i += 3) { 5284 if (i + 3 > size || input[i] >= podn_vdd_dep_table->count) { 5285 pr_info("invalid clock voltage input\n"); 5286 return 0; 5287 } 5288 input_level = input[i]; 5289 input_clk = input[i+1] * 100; 5290 input_vol = input[i+2]; 5291 5292 if (vega10_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) { 5293 dpm_table->dpm_levels[input_level].value = input_clk; 5294 podn_vdd_dep_table->entries[input_level].clk = input_clk; 5295 podn_vdd_dep_table->entries[input_level].vddc = input_vol; 5296 } else { 5297 return -EINVAL; 5298 } 5299 } 5300 vega10_odn_update_soc_table(hwmgr, type); 5301 return 0; 5302 } 5303 5304 static int vega10_set_mp1_state(struct pp_hwmgr *hwmgr, 5305 enum pp_mp1_state mp1_state) 5306 { 5307 uint16_t msg; 5308 int ret; 5309 5310 switch (mp1_state) { 5311 case PP_MP1_STATE_UNLOAD: 5312 msg = PPSMC_MSG_PrepareMp1ForUnload; 5313 break; 5314 case PP_MP1_STATE_SHUTDOWN: 5315 case PP_MP1_STATE_RESET: 5316 case PP_MP1_STATE_NONE: 5317 default: 5318 return 0; 5319 } 5320 5321 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0, 5322 "[PrepareMp1] Failed!", 5323 return ret); 5324 5325 return 0; 5326 } 5327 5328 static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 5329 PHM_PerformanceLevelDesignation designation, uint32_t index, 5330 PHM_PerformanceLevel *level) 5331 { 5332 const struct vega10_power_state *ps; 5333 uint32_t i; 5334 5335 if (level == NULL || hwmgr == NULL || state == NULL) 5336 return -EINVAL; 5337 5338 ps = cast_const_phw_vega10_power_state(state); 5339 5340 i = index > ps->performance_level_count - 1 ? 5341 ps->performance_level_count - 1 : index; 5342 5343 level->coreClock = ps->performance_levels[i].gfx_clock; 5344 level->memory_clock = ps->performance_levels[i].mem_clock; 5345 5346 return 0; 5347 } 5348 5349 static int vega10_disable_power_features_for_compute_performance(struct pp_hwmgr *hwmgr, bool disable) 5350 { 5351 struct vega10_hwmgr *data = hwmgr->backend; 5352 uint32_t feature_mask = 0; 5353 5354 if (disable) { 5355 feature_mask |= data->smu_features[GNLD_ULV].enabled ? 5356 data->smu_features[GNLD_ULV].smu_feature_bitmap : 0; 5357 feature_mask |= data->smu_features[GNLD_DS_GFXCLK].enabled ? 5358 data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0; 5359 feature_mask |= data->smu_features[GNLD_DS_SOCCLK].enabled ? 5360 data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0; 5361 feature_mask |= data->smu_features[GNLD_DS_LCLK].enabled ? 5362 data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0; 5363 feature_mask |= data->smu_features[GNLD_DS_DCEFCLK].enabled ? 5364 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0; 5365 } else { 5366 feature_mask |= (!data->smu_features[GNLD_ULV].enabled) ? 5367 data->smu_features[GNLD_ULV].smu_feature_bitmap : 0; 5368 feature_mask |= (!data->smu_features[GNLD_DS_GFXCLK].enabled) ? 5369 data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0; 5370 feature_mask |= (!data->smu_features[GNLD_DS_SOCCLK].enabled) ? 5371 data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0; 5372 feature_mask |= (!data->smu_features[GNLD_DS_LCLK].enabled) ? 5373 data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0; 5374 feature_mask |= (!data->smu_features[GNLD_DS_DCEFCLK].enabled) ? 5375 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0; 5376 } 5377 5378 if (feature_mask) 5379 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 5380 !disable, feature_mask), 5381 "enable/disable power features for compute performance Failed!", 5382 return -EINVAL); 5383 5384 if (disable) { 5385 data->smu_features[GNLD_ULV].enabled = false; 5386 data->smu_features[GNLD_DS_GFXCLK].enabled = false; 5387 data->smu_features[GNLD_DS_SOCCLK].enabled = false; 5388 data->smu_features[GNLD_DS_LCLK].enabled = false; 5389 data->smu_features[GNLD_DS_DCEFCLK].enabled = false; 5390 } else { 5391 data->smu_features[GNLD_ULV].enabled = true; 5392 data->smu_features[GNLD_DS_GFXCLK].enabled = true; 5393 data->smu_features[GNLD_DS_SOCCLK].enabled = true; 5394 data->smu_features[GNLD_DS_LCLK].enabled = true; 5395 data->smu_features[GNLD_DS_DCEFCLK].enabled = true; 5396 } 5397 5398 return 0; 5399 5400 } 5401 5402 static const struct pp_hwmgr_func vega10_hwmgr_funcs = { 5403 .backend_init = vega10_hwmgr_backend_init, 5404 .backend_fini = vega10_hwmgr_backend_fini, 5405 .asic_setup = vega10_setup_asic_task, 5406 .dynamic_state_management_enable = vega10_enable_dpm_tasks, 5407 .dynamic_state_management_disable = vega10_disable_dpm_tasks, 5408 .get_num_of_pp_table_entries = 5409 vega10_get_number_of_powerplay_table_entries, 5410 .get_power_state_size = vega10_get_power_state_size, 5411 .get_pp_table_entry = vega10_get_pp_table_entry, 5412 .patch_boot_state = vega10_patch_boot_state, 5413 .apply_state_adjust_rules = vega10_apply_state_adjust_rules, 5414 .power_state_set = vega10_set_power_state_tasks, 5415 .get_sclk = vega10_dpm_get_sclk, 5416 .get_mclk = vega10_dpm_get_mclk, 5417 .notify_smc_display_config_after_ps_adjustment = 5418 vega10_notify_smc_display_config_after_ps_adjustment, 5419 .force_dpm_level = vega10_dpm_force_dpm_level, 5420 .stop_thermal_controller = vega10_thermal_stop_thermal_controller, 5421 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info, 5422 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent, 5423 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent, 5424 .reset_fan_speed_to_default = 5425 vega10_fan_ctrl_reset_fan_speed_to_default, 5426 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm, 5427 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm, 5428 .uninitialize_thermal_controller = 5429 vega10_thermal_ctrl_uninitialize_thermal_controller, 5430 .set_fan_control_mode = vega10_set_fan_control_mode, 5431 .get_fan_control_mode = vega10_get_fan_control_mode, 5432 .read_sensor = vega10_read_sensor, 5433 .get_dal_power_level = vega10_get_dal_power_level, 5434 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency, 5435 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage, 5436 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges, 5437 .display_clock_voltage_request = vega10_display_clock_voltage_request, 5438 .force_clock_level = vega10_force_clock_level, 5439 .print_clock_levels = vega10_print_clock_levels, 5440 .display_config_changed = vega10_display_configuration_changed_task, 5441 .powergate_uvd = vega10_power_gate_uvd, 5442 .powergate_vce = vega10_power_gate_vce, 5443 .check_states_equal = vega10_check_states_equal, 5444 .check_smc_update_required_for_display_configuration = 5445 vega10_check_smc_update_required_for_display_configuration, 5446 .power_off_asic = vega10_power_off_asic, 5447 .disable_smc_firmware_ctf = vega10_thermal_disable_alert, 5448 .get_sclk_od = vega10_get_sclk_od, 5449 .set_sclk_od = vega10_set_sclk_od, 5450 .get_mclk_od = vega10_get_mclk_od, 5451 .set_mclk_od = vega10_set_mclk_od, 5452 .avfs_control = vega10_avfs_enable, 5453 .notify_cac_buffer_info = vega10_notify_cac_buffer_info, 5454 .get_thermal_temperature_range = vega10_get_thermal_temperature_range, 5455 .register_irq_handlers = smu9_register_irq_handlers, 5456 .start_thermal_controller = vega10_start_thermal_controller, 5457 .get_power_profile_mode = vega10_get_power_profile_mode, 5458 .set_power_profile_mode = vega10_set_power_profile_mode, 5459 .set_power_limit = vega10_set_power_limit, 5460 .odn_edit_dpm_table = vega10_odn_edit_dpm_table, 5461 .get_performance_level = vega10_get_performance_level, 5462 .get_asic_baco_capability = smu9_baco_get_capability, 5463 .get_asic_baco_state = smu9_baco_get_state, 5464 .set_asic_baco_state = vega10_baco_set_state, 5465 .enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost, 5466 .get_ppfeature_status = vega10_get_ppfeature_status, 5467 .set_ppfeature_status = vega10_set_ppfeature_status, 5468 .set_mp1_state = vega10_set_mp1_state, 5469 .disable_power_features_for_compute_performance = 5470 vega10_disable_power_features_for_compute_performance, 5471 }; 5472 5473 int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) 5474 { 5475 struct amdgpu_device *adev = hwmgr->adev; 5476 5477 hwmgr->hwmgr_func = &vega10_hwmgr_funcs; 5478 hwmgr->pptable_func = &vega10_pptable_funcs; 5479 if (amdgpu_passthrough(adev)) 5480 return vega10_baco_set_cap(hwmgr); 5481 5482 return 0; 5483 } 5484