1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include <linux/firmware.h> 27 #include <linux/pci.h> 28 #include <linux/i2c.h> 29 #include "amdgpu.h" 30 #include "amdgpu_smu.h" 31 #include "atomfirmware.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "amdgpu_atombios.h" 34 #include "smu_v13_0.h" 35 #include "smu13_driver_if_v13_0_7.h" 36 #include "soc15_common.h" 37 #include "atom.h" 38 #include "smu_v13_0_7_ppt.h" 39 #include "smu_v13_0_7_pptable.h" 40 #include "smu_v13_0_7_ppsmc.h" 41 #include "nbio/nbio_4_3_0_offset.h" 42 #include "nbio/nbio_4_3_0_sh_mask.h" 43 #include "mp/mp_13_0_0_offset.h" 44 #include "mp/mp_13_0_0_sh_mask.h" 45 46 #include "asic_reg/mp/mp_13_0_0_sh_mask.h" 47 #include "smu_cmn.h" 48 #include "amdgpu_ras.h" 49 50 /* 51 * DO NOT use these for err/warn/info/debug messages. 52 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 53 * They are more MGPU friendly. 54 */ 55 #undef pr_err 56 #undef pr_warn 57 #undef pr_info 58 #undef pr_debug 59 60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) 61 62 #define FEATURE_MASK(feature) (1ULL << feature) 63 #define SMC_DPM_FEATURE ( \ 64 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 65 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 66 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ 67 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 68 FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ 69 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)) 70 71 #define smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 0x3b10028 72 73 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 74 75 static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = { 76 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 77 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 78 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 79 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), 80 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), 81 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), 82 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), 83 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), 84 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), 85 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), 86 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), 87 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), 88 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), 89 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), 90 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), 91 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 92 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 93 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), 94 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), 95 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 96 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 97 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), 98 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), 99 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), 100 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), 101 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), 102 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), 103 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), 104 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), 105 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), 106 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), 107 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), 108 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 109 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 110 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 111 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 112 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), 113 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), 114 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), 115 MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), 116 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), 117 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), 118 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), 119 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 120 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 121 MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), 122 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), 123 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), 124 MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), 125 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), 126 MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), 127 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 128 }; 129 130 static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { 131 CLK_MAP(GFXCLK, PPCLK_GFXCLK), 132 CLK_MAP(SCLK, PPCLK_GFXCLK), 133 CLK_MAP(SOCCLK, PPCLK_SOCCLK), 134 CLK_MAP(FCLK, PPCLK_FCLK), 135 CLK_MAP(UCLK, PPCLK_UCLK), 136 CLK_MAP(MCLK, PPCLK_UCLK), 137 CLK_MAP(VCLK, PPCLK_VCLK_0), 138 CLK_MAP(VCLK1, PPCLK_VCLK_1), 139 CLK_MAP(DCLK, PPCLK_DCLK_0), 140 CLK_MAP(DCLK1, PPCLK_DCLK_1), 141 }; 142 143 static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] = { 144 FEA_MAP(FW_DATA_READ), 145 FEA_MAP(DPM_GFXCLK), 146 FEA_MAP(DPM_GFX_POWER_OPTIMIZER), 147 FEA_MAP(DPM_UCLK), 148 FEA_MAP(DPM_FCLK), 149 FEA_MAP(DPM_SOCCLK), 150 FEA_MAP(DPM_MP0CLK), 151 FEA_MAP(DPM_LINK), 152 FEA_MAP(DPM_DCN), 153 FEA_MAP(VMEMP_SCALING), 154 FEA_MAP(VDDIO_MEM_SCALING), 155 FEA_MAP(DS_GFXCLK), 156 FEA_MAP(DS_SOCCLK), 157 FEA_MAP(DS_FCLK), 158 FEA_MAP(DS_LCLK), 159 FEA_MAP(DS_DCFCLK), 160 FEA_MAP(DS_UCLK), 161 FEA_MAP(GFX_ULV), 162 FEA_MAP(FW_DSTATE), 163 FEA_MAP(GFXOFF), 164 FEA_MAP(BACO), 165 FEA_MAP(MM_DPM), 166 FEA_MAP(SOC_MPCLK_DS), 167 FEA_MAP(BACO_MPCLK_DS), 168 FEA_MAP(THROTTLERS), 169 FEA_MAP(SMARTSHIFT), 170 FEA_MAP(GTHR), 171 FEA_MAP(ACDC), 172 FEA_MAP(VR0HOT), 173 FEA_MAP(FW_CTF), 174 FEA_MAP(FAN_CONTROL), 175 FEA_MAP(GFX_DCS), 176 FEA_MAP(GFX_READ_MARGIN), 177 FEA_MAP(LED_DISPLAY), 178 FEA_MAP(GFXCLK_SPREAD_SPECTRUM), 179 FEA_MAP(OUT_OF_BAND_MONITOR), 180 FEA_MAP(OPTIMIZED_VMIN), 181 FEA_MAP(GFX_IMU), 182 FEA_MAP(BOOT_TIME_CAL), 183 FEA_MAP(GFX_PCC_DFLL), 184 FEA_MAP(SOC_CG), 185 FEA_MAP(DF_CSTATE), 186 FEA_MAP(GFX_EDC), 187 FEA_MAP(BOOT_POWER_OPT), 188 FEA_MAP(CLOCK_POWER_DOWN_BYPASS), 189 FEA_MAP(DS_VCN), 190 FEA_MAP(BACO_CG), 191 FEA_MAP(MEM_TEMP_READ), 192 FEA_MAP(ATHUB_MMHUB_PG), 193 FEA_MAP(SOC_PCC), 194 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 195 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 196 [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, 197 }; 198 199 static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = { 200 TAB_MAP(PPTABLE), 201 TAB_MAP(WATERMARKS), 202 TAB_MAP(AVFS_PSM_DEBUG), 203 TAB_MAP(PMSTATUSLOG), 204 TAB_MAP(SMU_METRICS), 205 TAB_MAP(DRIVER_SMU_CONFIG), 206 TAB_MAP(ACTIVITY_MONITOR_COEFF), 207 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, 208 }; 209 210 static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { 211 PWR_MAP(AC), 212 PWR_MAP(DC), 213 }; 214 215 static struct cmn2asic_mapping smu_v13_0_7_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 216 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), 217 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 218 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), 219 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 220 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 221 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 222 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 223 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), 224 }; 225 226 static const uint8_t smu_v13_0_7_throttler_map[] = { 227 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), 228 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), 229 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), 230 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), 231 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), 232 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), 233 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), 234 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), 235 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), 236 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), 237 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), 238 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), 239 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), 240 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), 241 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), 242 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), 243 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), 244 }; 245 246 static int 247 smu_v13_0_7_get_allowed_feature_mask(struct smu_context *smu, 248 uint32_t *feature_mask, uint32_t num) 249 { 250 struct amdgpu_device *adev = smu->adev; 251 252 if (num > 2) 253 return -EINVAL; 254 255 memset(feature_mask, 0, sizeof(uint32_t) * num); 256 257 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT); 258 259 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) { 260 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); 261 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT); 262 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT); 263 } 264 265 if (adev->pm.pp_feature & PP_GFXOFF_MASK) 266 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT); 267 268 if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) { 269 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT); 270 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT); 271 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); 272 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); 273 } 274 275 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); 276 277 if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) 278 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT); 279 280 if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) 281 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); 282 283 if (adev->pm.pp_feature & PP_ULV_MASK) 284 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT); 285 286 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT); 287 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT); 288 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT); 289 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT); 290 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT); 291 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT); 292 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT); 293 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT); 294 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT); 295 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT); 296 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT); 297 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT); 298 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT); 299 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT); 300 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT); 301 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT); 302 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_PCC_DFLL_BIT); 303 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT); 304 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT); 305 306 if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK) 307 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT); 308 309 if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) && 310 (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) 311 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); 312 313 return 0; 314 } 315 316 static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu) 317 { 318 struct smu_table_context *table_context = &smu->smu_table; 319 struct smu_13_0_7_powerplay_table *powerplay_table = 320 table_context->power_play_table; 321 struct smu_baco_context *smu_baco = &smu->smu_baco; 322 PPTable_t *smc_pptable = table_context->driver_pptable; 323 BoardTable_t *BoardTable = &smc_pptable->BoardTable; 324 325 if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC) 326 smu->dc_controlled_by_gpio = true; 327 328 if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO || 329 powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO) 330 smu_baco->platform_support = true; 331 332 if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled)) 333 smu_baco->maco_support = true; 334 335 table_context->thermal_controller_type = 336 powerplay_table->thermal_controller_type; 337 338 /* 339 * Instead of having its own buffer space and get overdrive_table copied, 340 * smu->od_settings just points to the actual overdrive_table 341 */ 342 smu->od_settings = &powerplay_table->overdrive_table; 343 344 return 0; 345 } 346 347 static int smu_v13_0_7_store_powerplay_table(struct smu_context *smu) 348 { 349 struct smu_table_context *table_context = &smu->smu_table; 350 struct smu_13_0_7_powerplay_table *powerplay_table = 351 table_context->power_play_table; 352 struct amdgpu_device *adev = smu->adev; 353 354 if (adev->pdev->device == 0x51) 355 powerplay_table->smc_pptable.SkuTable.DebugOverrides |= 0x00000080; 356 357 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, 358 sizeof(PPTable_t)); 359 360 return 0; 361 } 362 363 static int smu_v13_0_7_check_fw_status(struct smu_context *smu) 364 { 365 struct amdgpu_device *adev = smu->adev; 366 uint32_t mp1_fw_flags; 367 368 mp1_fw_flags = RREG32_PCIE(MP1_Public | 369 (smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 & 0xffffffff)); 370 371 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 372 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 373 return 0; 374 375 return -EIO; 376 } 377 378 #ifndef atom_smc_dpm_info_table_13_0_7 379 struct atom_smc_dpm_info_table_13_0_7 380 { 381 struct atom_common_table_header table_header; 382 BoardTable_t BoardTable; 383 }; 384 #endif 385 386 static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu) 387 { 388 struct smu_table_context *table_context = &smu->smu_table; 389 390 PPTable_t *smc_pptable = table_context->driver_pptable; 391 392 struct atom_smc_dpm_info_table_13_0_7 *smc_dpm_table; 393 394 BoardTable_t *BoardTable = &smc_pptable->BoardTable; 395 396 int index, ret; 397 398 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 399 smc_dpm_info); 400 401 ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, 402 (uint8_t **)&smc_dpm_table); 403 if (ret) 404 return ret; 405 406 memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); 407 408 return 0; 409 } 410 411 static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu, 412 void **table, 413 uint32_t *size) 414 { 415 struct smu_table_context *smu_table = &smu->smu_table; 416 void *combo_pptable = smu_table->combo_pptable; 417 int ret = 0; 418 419 ret = smu_cmn_get_combo_pptable(smu); 420 if (ret) 421 return ret; 422 423 *table = combo_pptable; 424 *size = sizeof(struct smu_13_0_7_powerplay_table); 425 426 return 0; 427 } 428 429 static int smu_v13_0_7_setup_pptable(struct smu_context *smu) 430 { 431 struct smu_table_context *smu_table = &smu->smu_table; 432 struct amdgpu_device *adev = smu->adev; 433 int ret = 0; 434 435 /* 436 * With SCPM enabled, the pptable used will be signed. It cannot 437 * be used directly by driver. To get the raw pptable, we need to 438 * rely on the combo pptable(and its revelant SMU message). 439 */ 440 ret = smu_v13_0_7_get_pptable_from_pmfw(smu, 441 &smu_table->power_play_table, 442 &smu_table->power_play_table_size); 443 if (ret) 444 return ret; 445 446 ret = smu_v13_0_7_store_powerplay_table(smu); 447 if (ret) 448 return ret; 449 450 /* 451 * With SCPM enabled, the operation below will be handled 452 * by PSP. Driver involvment is unnecessary and useless. 453 */ 454 if (!adev->scpm_enabled) { 455 ret = smu_v13_0_7_append_powerplay_table(smu); 456 if (ret) 457 return ret; 458 } 459 460 ret = smu_v13_0_7_check_powerplay_table(smu); 461 if (ret) 462 return ret; 463 464 return ret; 465 } 466 467 static int smu_v13_0_7_tables_init(struct smu_context *smu) 468 { 469 struct smu_table_context *smu_table = &smu->smu_table; 470 struct smu_table *tables = smu_table->tables; 471 472 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), 473 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 474 475 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 476 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 477 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), 478 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 479 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), 480 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 481 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), 482 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 483 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, 484 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 485 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, 486 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, 487 AMDGPU_GEM_DOMAIN_VRAM); 488 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, 489 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 490 491 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); 492 if (!smu_table->metrics_table) 493 goto err0_out; 494 smu_table->metrics_time = 0; 495 496 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); 497 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 498 if (!smu_table->gpu_metrics_table) 499 goto err1_out; 500 501 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 502 if (!smu_table->watermarks_table) 503 goto err2_out; 504 505 return 0; 506 507 err2_out: 508 kfree(smu_table->gpu_metrics_table); 509 err1_out: 510 kfree(smu_table->metrics_table); 511 err0_out: 512 return -ENOMEM; 513 } 514 515 static int smu_v13_0_7_allocate_dpm_context(struct smu_context *smu) 516 { 517 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 518 519 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), 520 GFP_KERNEL); 521 if (!smu_dpm->dpm_context) 522 return -ENOMEM; 523 524 smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); 525 526 return 0; 527 } 528 529 static int smu_v13_0_7_init_smc_tables(struct smu_context *smu) 530 { 531 int ret = 0; 532 533 ret = smu_v13_0_7_tables_init(smu); 534 if (ret) 535 return ret; 536 537 ret = smu_v13_0_7_allocate_dpm_context(smu); 538 if (ret) 539 return ret; 540 541 return smu_v13_0_init_smc_tables(smu); 542 } 543 544 static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) 545 { 546 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 547 PPTable_t *driver_ppt = smu->smu_table.driver_pptable; 548 SkuTable_t *skutable = &driver_ppt->SkuTable; 549 struct smu_13_0_dpm_table *dpm_table; 550 struct smu_13_0_pcie_table *pcie_table; 551 uint32_t link_level; 552 int ret = 0; 553 554 /* socclk dpm table setup */ 555 dpm_table = &dpm_context->dpm_tables.soc_table; 556 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 557 ret = smu_v13_0_set_single_dpm_table(smu, 558 SMU_SOCCLK, 559 dpm_table); 560 if (ret) 561 return ret; 562 } else { 563 dpm_table->count = 1; 564 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; 565 dpm_table->dpm_levels[0].enabled = true; 566 dpm_table->min = dpm_table->dpm_levels[0].value; 567 dpm_table->max = dpm_table->dpm_levels[0].value; 568 } 569 570 /* gfxclk dpm table setup */ 571 dpm_table = &dpm_context->dpm_tables.gfx_table; 572 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { 573 ret = smu_v13_0_set_single_dpm_table(smu, 574 SMU_GFXCLK, 575 dpm_table); 576 if (ret) 577 return ret; 578 579 if (skutable->DriverReportedClocks.GameClockAc && 580 (dpm_table->dpm_levels[dpm_table->count - 1].value > 581 skutable->DriverReportedClocks.GameClockAc)) { 582 dpm_table->dpm_levels[dpm_table->count - 1].value = 583 skutable->DriverReportedClocks.GameClockAc; 584 dpm_table->max = skutable->DriverReportedClocks.GameClockAc; 585 } 586 } else { 587 dpm_table->count = 1; 588 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; 589 dpm_table->dpm_levels[0].enabled = true; 590 dpm_table->min = dpm_table->dpm_levels[0].value; 591 dpm_table->max = dpm_table->dpm_levels[0].value; 592 } 593 594 /* uclk dpm table setup */ 595 dpm_table = &dpm_context->dpm_tables.uclk_table; 596 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 597 ret = smu_v13_0_set_single_dpm_table(smu, 598 SMU_UCLK, 599 dpm_table); 600 if (ret) 601 return ret; 602 } else { 603 dpm_table->count = 1; 604 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; 605 dpm_table->dpm_levels[0].enabled = true; 606 dpm_table->min = dpm_table->dpm_levels[0].value; 607 dpm_table->max = dpm_table->dpm_levels[0].value; 608 } 609 610 /* fclk dpm table setup */ 611 dpm_table = &dpm_context->dpm_tables.fclk_table; 612 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { 613 ret = smu_v13_0_set_single_dpm_table(smu, 614 SMU_FCLK, 615 dpm_table); 616 if (ret) 617 return ret; 618 } else { 619 dpm_table->count = 1; 620 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; 621 dpm_table->dpm_levels[0].enabled = true; 622 dpm_table->min = dpm_table->dpm_levels[0].value; 623 dpm_table->max = dpm_table->dpm_levels[0].value; 624 } 625 626 /* vclk dpm table setup */ 627 dpm_table = &dpm_context->dpm_tables.vclk_table; 628 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) { 629 ret = smu_v13_0_set_single_dpm_table(smu, 630 SMU_VCLK, 631 dpm_table); 632 if (ret) 633 return ret; 634 } else { 635 dpm_table->count = 1; 636 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; 637 dpm_table->dpm_levels[0].enabled = true; 638 dpm_table->min = dpm_table->dpm_levels[0].value; 639 dpm_table->max = dpm_table->dpm_levels[0].value; 640 } 641 642 /* dclk dpm table setup */ 643 dpm_table = &dpm_context->dpm_tables.dclk_table; 644 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) { 645 ret = smu_v13_0_set_single_dpm_table(smu, 646 SMU_DCLK, 647 dpm_table); 648 if (ret) 649 return ret; 650 } else { 651 dpm_table->count = 1; 652 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; 653 dpm_table->dpm_levels[0].enabled = true; 654 dpm_table->min = dpm_table->dpm_levels[0].value; 655 dpm_table->max = dpm_table->dpm_levels[0].value; 656 } 657 658 /* lclk dpm table setup */ 659 pcie_table = &dpm_context->dpm_tables.pcie_table; 660 pcie_table->num_of_link_levels = 0; 661 for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { 662 if (!skutable->PcieGenSpeed[link_level] && 663 !skutable->PcieLaneCount[link_level] && 664 !skutable->LclkFreq[link_level]) 665 continue; 666 667 pcie_table->pcie_gen[pcie_table->num_of_link_levels] = 668 skutable->PcieGenSpeed[link_level]; 669 pcie_table->pcie_lane[pcie_table->num_of_link_levels] = 670 skutable->PcieLaneCount[link_level]; 671 pcie_table->clk_freq[pcie_table->num_of_link_levels] = 672 skutable->LclkFreq[link_level]; 673 pcie_table->num_of_link_levels++; 674 } 675 676 return 0; 677 } 678 679 static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu) 680 { 681 int ret = 0; 682 uint64_t feature_enabled; 683 684 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 685 if (ret) 686 return false; 687 688 return !!(feature_enabled & SMC_DPM_FEATURE); 689 } 690 691 static void smu_v13_0_7_dump_pptable(struct smu_context *smu) 692 { 693 struct smu_table_context *table_context = &smu->smu_table; 694 PPTable_t *pptable = table_context->driver_pptable; 695 SkuTable_t *skutable = &pptable->SkuTable; 696 697 dev_info(smu->adev->dev, "Dumped PPTable:\n"); 698 699 dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); 700 dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); 701 dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); 702 } 703 704 static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics) 705 { 706 uint32_t throttler_status = 0; 707 int i; 708 709 for (i = 0; i < THROTTLER_COUNT; i++) 710 throttler_status |= 711 (metrics->ThrottlingPercentage[i] ? 1U << i : 0); 712 713 return throttler_status; 714 } 715 716 #define SMU_13_0_7_BUSY_THRESHOLD 15 717 static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu, 718 MetricsMember_t member, 719 uint32_t *value) 720 { 721 struct smu_table_context *smu_table= &smu->smu_table; 722 SmuMetrics_t *metrics = 723 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 724 int ret = 0; 725 726 ret = smu_cmn_get_metrics_table(smu, 727 NULL, 728 false); 729 if (ret) 730 return ret; 731 732 switch (member) { 733 case METRICS_CURR_GFXCLK: 734 *value = metrics->CurrClock[PPCLK_GFXCLK]; 735 break; 736 case METRICS_CURR_SOCCLK: 737 *value = metrics->CurrClock[PPCLK_SOCCLK]; 738 break; 739 case METRICS_CURR_UCLK: 740 *value = metrics->CurrClock[PPCLK_UCLK]; 741 break; 742 case METRICS_CURR_VCLK: 743 *value = metrics->CurrClock[PPCLK_VCLK_0]; 744 break; 745 case METRICS_CURR_VCLK1: 746 *value = metrics->CurrClock[PPCLK_VCLK_1]; 747 break; 748 case METRICS_CURR_DCLK: 749 *value = metrics->CurrClock[PPCLK_DCLK_0]; 750 break; 751 case METRICS_CURR_DCLK1: 752 *value = metrics->CurrClock[PPCLK_DCLK_1]; 753 break; 754 case METRICS_CURR_FCLK: 755 *value = metrics->CurrClock[PPCLK_FCLK]; 756 break; 757 case METRICS_AVERAGE_GFXCLK: 758 *value = metrics->AverageGfxclkFrequencyPreDs; 759 break; 760 case METRICS_AVERAGE_FCLK: 761 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD) 762 *value = metrics->AverageFclkFrequencyPostDs; 763 else 764 *value = metrics->AverageFclkFrequencyPreDs; 765 break; 766 case METRICS_AVERAGE_UCLK: 767 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD) 768 *value = metrics->AverageMemclkFrequencyPostDs; 769 else 770 *value = metrics->AverageMemclkFrequencyPreDs; 771 break; 772 case METRICS_AVERAGE_VCLK: 773 *value = metrics->AverageVclk0Frequency; 774 break; 775 case METRICS_AVERAGE_DCLK: 776 *value = metrics->AverageDclk0Frequency; 777 break; 778 case METRICS_AVERAGE_VCLK1: 779 *value = metrics->AverageVclk1Frequency; 780 break; 781 case METRICS_AVERAGE_DCLK1: 782 *value = metrics->AverageDclk1Frequency; 783 break; 784 case METRICS_AVERAGE_GFXACTIVITY: 785 *value = metrics->AverageGfxActivity; 786 break; 787 case METRICS_AVERAGE_MEMACTIVITY: 788 *value = metrics->AverageUclkActivity; 789 break; 790 case METRICS_AVERAGE_SOCKETPOWER: 791 *value = metrics->AverageSocketPower << 8; 792 break; 793 case METRICS_TEMPERATURE_EDGE: 794 *value = metrics->AvgTemperature[TEMP_EDGE] * 795 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 796 break; 797 case METRICS_TEMPERATURE_HOTSPOT: 798 *value = metrics->AvgTemperature[TEMP_HOTSPOT] * 799 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 800 break; 801 case METRICS_TEMPERATURE_MEM: 802 *value = metrics->AvgTemperature[TEMP_MEM] * 803 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 804 break; 805 case METRICS_TEMPERATURE_VRGFX: 806 *value = metrics->AvgTemperature[TEMP_VR_GFX] * 807 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 808 break; 809 case METRICS_TEMPERATURE_VRSOC: 810 *value = metrics->AvgTemperature[TEMP_VR_SOC] * 811 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 812 break; 813 case METRICS_THROTTLER_STATUS: 814 *value = smu_v13_0_7_get_throttler_status(metrics); 815 break; 816 case METRICS_CURR_FANSPEED: 817 *value = metrics->AvgFanRpm; 818 break; 819 case METRICS_CURR_FANPWM: 820 *value = metrics->AvgFanPwm; 821 break; 822 case METRICS_VOLTAGE_VDDGFX: 823 *value = metrics->AvgVoltage[SVI_PLANE_GFX]; 824 break; 825 case METRICS_PCIE_RATE: 826 *value = metrics->PcieRate; 827 break; 828 case METRICS_PCIE_WIDTH: 829 *value = metrics->PcieWidth; 830 break; 831 default: 832 *value = UINT_MAX; 833 break; 834 } 835 836 return ret; 837 } 838 839 static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu, 840 enum smu_clk_type clk_type, 841 uint32_t *min, 842 uint32_t *max) 843 { 844 struct smu_13_0_dpm_context *dpm_context = 845 smu->smu_dpm.dpm_context; 846 struct smu_13_0_dpm_table *dpm_table; 847 848 switch (clk_type) { 849 case SMU_MCLK: 850 case SMU_UCLK: 851 /* uclk dpm table */ 852 dpm_table = &dpm_context->dpm_tables.uclk_table; 853 break; 854 case SMU_GFXCLK: 855 case SMU_SCLK: 856 /* gfxclk dpm table */ 857 dpm_table = &dpm_context->dpm_tables.gfx_table; 858 break; 859 case SMU_SOCCLK: 860 /* socclk dpm table */ 861 dpm_table = &dpm_context->dpm_tables.soc_table; 862 break; 863 case SMU_FCLK: 864 /* fclk dpm table */ 865 dpm_table = &dpm_context->dpm_tables.fclk_table; 866 break; 867 case SMU_VCLK: 868 case SMU_VCLK1: 869 /* vclk dpm table */ 870 dpm_table = &dpm_context->dpm_tables.vclk_table; 871 break; 872 case SMU_DCLK: 873 case SMU_DCLK1: 874 /* dclk dpm table */ 875 dpm_table = &dpm_context->dpm_tables.dclk_table; 876 break; 877 default: 878 dev_err(smu->adev->dev, "Unsupported clock type!\n"); 879 return -EINVAL; 880 } 881 882 if (min) 883 *min = dpm_table->min; 884 if (max) 885 *max = dpm_table->max; 886 887 return 0; 888 } 889 890 static int smu_v13_0_7_read_sensor(struct smu_context *smu, 891 enum amd_pp_sensors sensor, 892 void *data, 893 uint32_t *size) 894 { 895 struct smu_table_context *table_context = &smu->smu_table; 896 PPTable_t *smc_pptable = table_context->driver_pptable; 897 int ret = 0; 898 899 switch (sensor) { 900 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 901 *(uint16_t *)data = smc_pptable->SkuTable.FanMaximumRpm; 902 *size = 4; 903 break; 904 case AMDGPU_PP_SENSOR_MEM_LOAD: 905 ret = smu_v13_0_7_get_smu_metrics_data(smu, 906 METRICS_AVERAGE_MEMACTIVITY, 907 (uint32_t *)data); 908 *size = 4; 909 break; 910 case AMDGPU_PP_SENSOR_GPU_LOAD: 911 ret = smu_v13_0_7_get_smu_metrics_data(smu, 912 METRICS_AVERAGE_GFXACTIVITY, 913 (uint32_t *)data); 914 *size = 4; 915 break; 916 case AMDGPU_PP_SENSOR_GPU_POWER: 917 ret = smu_v13_0_7_get_smu_metrics_data(smu, 918 METRICS_AVERAGE_SOCKETPOWER, 919 (uint32_t *)data); 920 *size = 4; 921 break; 922 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 923 ret = smu_v13_0_7_get_smu_metrics_data(smu, 924 METRICS_TEMPERATURE_HOTSPOT, 925 (uint32_t *)data); 926 *size = 4; 927 break; 928 case AMDGPU_PP_SENSOR_EDGE_TEMP: 929 ret = smu_v13_0_7_get_smu_metrics_data(smu, 930 METRICS_TEMPERATURE_EDGE, 931 (uint32_t *)data); 932 *size = 4; 933 break; 934 case AMDGPU_PP_SENSOR_MEM_TEMP: 935 ret = smu_v13_0_7_get_smu_metrics_data(smu, 936 METRICS_TEMPERATURE_MEM, 937 (uint32_t *)data); 938 *size = 4; 939 break; 940 case AMDGPU_PP_SENSOR_GFX_MCLK: 941 ret = smu_v13_0_7_get_smu_metrics_data(smu, 942 METRICS_AVERAGE_UCLK, 943 (uint32_t *)data); 944 *(uint32_t *)data *= 100; 945 *size = 4; 946 break; 947 case AMDGPU_PP_SENSOR_GFX_SCLK: 948 ret = smu_v13_0_7_get_smu_metrics_data(smu, 949 METRICS_AVERAGE_GFXCLK, 950 (uint32_t *)data); 951 *(uint32_t *)data *= 100; 952 *size = 4; 953 break; 954 case AMDGPU_PP_SENSOR_VDDGFX: 955 ret = smu_v13_0_7_get_smu_metrics_data(smu, 956 METRICS_VOLTAGE_VDDGFX, 957 (uint32_t *)data); 958 *size = 4; 959 break; 960 default: 961 ret = -EOPNOTSUPP; 962 break; 963 } 964 965 return ret; 966 } 967 968 static int smu_v13_0_7_get_current_clk_freq_by_table(struct smu_context *smu, 969 enum smu_clk_type clk_type, 970 uint32_t *value) 971 { 972 MetricsMember_t member_type; 973 int clk_id = 0; 974 975 clk_id = smu_cmn_to_asic_specific_index(smu, 976 CMN2ASIC_MAPPING_CLK, 977 clk_type); 978 if (clk_id < 0) 979 return -EINVAL; 980 981 switch (clk_id) { 982 case PPCLK_GFXCLK: 983 member_type = METRICS_AVERAGE_GFXCLK; 984 break; 985 case PPCLK_UCLK: 986 member_type = METRICS_CURR_UCLK; 987 break; 988 case PPCLK_FCLK: 989 member_type = METRICS_CURR_FCLK; 990 break; 991 case PPCLK_SOCCLK: 992 member_type = METRICS_CURR_SOCCLK; 993 break; 994 case PPCLK_VCLK_0: 995 member_type = METRICS_CURR_VCLK; 996 break; 997 case PPCLK_DCLK_0: 998 member_type = METRICS_CURR_DCLK; 999 break; 1000 case PPCLK_VCLK_1: 1001 member_type = METRICS_CURR_VCLK1; 1002 break; 1003 case PPCLK_DCLK_1: 1004 member_type = METRICS_CURR_DCLK1; 1005 break; 1006 default: 1007 return -EINVAL; 1008 } 1009 1010 return smu_v13_0_7_get_smu_metrics_data(smu, 1011 member_type, 1012 value); 1013 } 1014 1015 static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, 1016 enum smu_clk_type clk_type, 1017 char *buf) 1018 { 1019 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1020 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1021 struct smu_13_0_dpm_table *single_dpm_table; 1022 struct smu_13_0_pcie_table *pcie_table; 1023 uint32_t gen_speed, lane_width; 1024 int i, curr_freq, size = 0; 1025 int ret = 0; 1026 1027 smu_cmn_get_sysfs_buf(&buf, &size); 1028 1029 if (amdgpu_ras_intr_triggered()) { 1030 size += sysfs_emit_at(buf, size, "unavailable\n"); 1031 return size; 1032 } 1033 1034 switch (clk_type) { 1035 case SMU_SCLK: 1036 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1037 break; 1038 case SMU_MCLK: 1039 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1040 break; 1041 case SMU_SOCCLK: 1042 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1043 break; 1044 case SMU_FCLK: 1045 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1046 break; 1047 case SMU_VCLK: 1048 case SMU_VCLK1: 1049 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1050 break; 1051 case SMU_DCLK: 1052 case SMU_DCLK1: 1053 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1054 break; 1055 default: 1056 break; 1057 } 1058 1059 switch (clk_type) { 1060 case SMU_SCLK: 1061 case SMU_MCLK: 1062 case SMU_SOCCLK: 1063 case SMU_FCLK: 1064 case SMU_VCLK: 1065 case SMU_VCLK1: 1066 case SMU_DCLK: 1067 case SMU_DCLK1: 1068 ret = smu_v13_0_7_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); 1069 if (ret) { 1070 dev_err(smu->adev->dev, "Failed to get current clock freq!"); 1071 return ret; 1072 } 1073 1074 if (single_dpm_table->is_fine_grained) { 1075 /* 1076 * For fine grained dpms, there are only two dpm levels: 1077 * - level 0 -> min clock freq 1078 * - level 1 -> max clock freq 1079 * And the current clock frequency can be any value between them. 1080 * So, if the current clock frequency is not at level 0 or level 1, 1081 * we will fake it as three dpm levels: 1082 * - level 0 -> min clock freq 1083 * - level 1 -> current actual clock freq 1084 * - level 2 -> max clock freq 1085 */ 1086 if ((single_dpm_table->dpm_levels[0].value != curr_freq) && 1087 (single_dpm_table->dpm_levels[1].value != curr_freq)) { 1088 size += sysfs_emit_at(buf, size, "0: %uMhz\n", 1089 single_dpm_table->dpm_levels[0].value); 1090 size += sysfs_emit_at(buf, size, "1: %uMhz *\n", 1091 curr_freq); 1092 size += sysfs_emit_at(buf, size, "2: %uMhz\n", 1093 single_dpm_table->dpm_levels[1].value); 1094 } else { 1095 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", 1096 single_dpm_table->dpm_levels[0].value, 1097 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : ""); 1098 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 1099 single_dpm_table->dpm_levels[1].value, 1100 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : ""); 1101 } 1102 } else { 1103 for (i = 0; i < single_dpm_table->count; i++) 1104 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", 1105 i, single_dpm_table->dpm_levels[i].value, 1106 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : ""); 1107 } 1108 break; 1109 case SMU_PCIE: 1110 ret = smu_v13_0_7_get_smu_metrics_data(smu, 1111 METRICS_PCIE_RATE, 1112 &gen_speed); 1113 if (ret) 1114 return ret; 1115 1116 ret = smu_v13_0_7_get_smu_metrics_data(smu, 1117 METRICS_PCIE_WIDTH, 1118 &lane_width); 1119 if (ret) 1120 return ret; 1121 1122 pcie_table = &(dpm_context->dpm_tables.pcie_table); 1123 for (i = 0; i < pcie_table->num_of_link_levels; i++) 1124 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, 1125 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," : 1126 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," : 1127 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," : 1128 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "", 1129 (pcie_table->pcie_lane[i] == 1) ? "x1" : 1130 (pcie_table->pcie_lane[i] == 2) ? "x2" : 1131 (pcie_table->pcie_lane[i] == 3) ? "x4" : 1132 (pcie_table->pcie_lane[i] == 4) ? "x8" : 1133 (pcie_table->pcie_lane[i] == 5) ? "x12" : 1134 (pcie_table->pcie_lane[i] == 6) ? "x16" : "", 1135 pcie_table->clk_freq[i], 1136 (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) && 1137 (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ? 1138 "*" : ""); 1139 break; 1140 1141 default: 1142 break; 1143 } 1144 1145 return size; 1146 } 1147 1148 static int smu_v13_0_7_force_clk_levels(struct smu_context *smu, 1149 enum smu_clk_type clk_type, 1150 uint32_t mask) 1151 { 1152 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1153 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1154 struct smu_13_0_dpm_table *single_dpm_table; 1155 uint32_t soft_min_level, soft_max_level; 1156 uint32_t min_freq, max_freq; 1157 int ret = 0; 1158 1159 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1160 soft_max_level = mask ? (fls(mask) - 1) : 0; 1161 1162 switch (clk_type) { 1163 case SMU_GFXCLK: 1164 case SMU_SCLK: 1165 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1166 break; 1167 case SMU_MCLK: 1168 case SMU_UCLK: 1169 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1170 break; 1171 case SMU_SOCCLK: 1172 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1173 break; 1174 case SMU_FCLK: 1175 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1176 break; 1177 case SMU_VCLK: 1178 case SMU_VCLK1: 1179 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1180 break; 1181 case SMU_DCLK: 1182 case SMU_DCLK1: 1183 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1184 break; 1185 default: 1186 break; 1187 } 1188 1189 switch (clk_type) { 1190 case SMU_GFXCLK: 1191 case SMU_SCLK: 1192 case SMU_MCLK: 1193 case SMU_UCLK: 1194 case SMU_SOCCLK: 1195 case SMU_FCLK: 1196 case SMU_VCLK: 1197 case SMU_VCLK1: 1198 case SMU_DCLK: 1199 case SMU_DCLK1: 1200 if (single_dpm_table->is_fine_grained) { 1201 /* There is only 2 levels for fine grained DPM */ 1202 soft_max_level = (soft_max_level >= 1 ? 1 : 0); 1203 soft_min_level = (soft_min_level >= 1 ? 1 : 0); 1204 } else { 1205 if ((soft_max_level >= single_dpm_table->count) || 1206 (soft_min_level >= single_dpm_table->count)) 1207 return -EINVAL; 1208 } 1209 1210 min_freq = single_dpm_table->dpm_levels[soft_min_level].value; 1211 max_freq = single_dpm_table->dpm_levels[soft_max_level].value; 1212 1213 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1214 clk_type, 1215 min_freq, 1216 max_freq); 1217 break; 1218 case SMU_DCEFCLK: 1219 case SMU_PCIE: 1220 default: 1221 break; 1222 } 1223 1224 return ret; 1225 } 1226 1227 static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu, 1228 uint32_t pcie_gen_cap, 1229 uint32_t pcie_width_cap) 1230 { 1231 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 1232 struct smu_13_0_pcie_table *pcie_table = 1233 &dpm_context->dpm_tables.pcie_table; 1234 uint32_t smu_pcie_arg; 1235 int ret, i; 1236 1237 for (i = 0; i < pcie_table->num_of_link_levels; i++) { 1238 if (pcie_table->pcie_gen[i] > pcie_gen_cap) 1239 pcie_table->pcie_gen[i] = pcie_gen_cap; 1240 if (pcie_table->pcie_lane[i] > pcie_width_cap) 1241 pcie_table->pcie_lane[i] = pcie_width_cap; 1242 1243 smu_pcie_arg = i << 16; 1244 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; 1245 smu_pcie_arg |= pcie_table->pcie_lane[i]; 1246 1247 ret = smu_cmn_send_smc_msg_with_param(smu, 1248 SMU_MSG_OverridePcieParameters, 1249 smu_pcie_arg, 1250 NULL); 1251 if (ret) 1252 return ret; 1253 } 1254 1255 return 0; 1256 } 1257 1258 static const struct smu_temperature_range smu13_thermal_policy[] = 1259 { 1260 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, 1261 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, 1262 }; 1263 1264 static int smu_v13_0_7_get_thermal_temperature_range(struct smu_context *smu, 1265 struct smu_temperature_range *range) 1266 { 1267 struct smu_table_context *table_context = &smu->smu_table; 1268 struct smu_13_0_7_powerplay_table *powerplay_table = 1269 table_context->power_play_table; 1270 PPTable_t *pptable = smu->smu_table.driver_pptable; 1271 1272 if (!range) 1273 return -EINVAL; 1274 1275 memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range)); 1276 1277 range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] * 1278 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1279 range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) * 1280 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1281 range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] * 1282 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1283 range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) * 1284 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1285 range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] * 1286 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1287 range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)* 1288 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1289 range->software_shutdown_temp = powerplay_table->software_shutdown_temp; 1290 range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset; 1291 1292 return 0; 1293 } 1294 1295 #define MAX(a, b) ((a) > (b) ? (a) : (b)) 1296 static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu, 1297 void **table) 1298 { 1299 struct smu_table_context *smu_table = &smu->smu_table; 1300 struct gpu_metrics_v1_3 *gpu_metrics = 1301 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; 1302 SmuMetricsExternal_t metrics_ext; 1303 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics; 1304 int ret = 0; 1305 1306 ret = smu_cmn_get_metrics_table(smu, 1307 &metrics_ext, 1308 true); 1309 if (ret) 1310 return ret; 1311 1312 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); 1313 1314 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE]; 1315 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT]; 1316 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM]; 1317 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX]; 1318 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC]; 1319 gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0], 1320 metrics->AvgTemperature[TEMP_VR_MEM1]); 1321 1322 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; 1323 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; 1324 gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage, 1325 metrics->Vcn1ActivityPercentage); 1326 1327 gpu_metrics->average_socket_power = metrics->AverageSocketPower; 1328 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator; 1329 1330 if (metrics->AverageGfxActivity <= SMU_13_0_7_BUSY_THRESHOLD) 1331 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs; 1332 else 1333 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs; 1334 1335 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD) 1336 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs; 1337 else 1338 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs; 1339 1340 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency; 1341 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency; 1342 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; 1343 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; 1344 1345 gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; 1346 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; 1347 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; 1348 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1]; 1349 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1]; 1350 1351 gpu_metrics->throttle_status = 1352 smu_v13_0_7_get_throttler_status(metrics); 1353 gpu_metrics->indep_throttle_status = 1354 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, 1355 smu_v13_0_7_throttler_map); 1356 1357 gpu_metrics->current_fan_speed = metrics->AvgFanRpm; 1358 1359 gpu_metrics->pcie_link_width = metrics->PcieWidth; 1360 gpu_metrics->pcie_link_speed = metrics->PcieRate; 1361 1362 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1363 1364 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_GFX]; 1365 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_SOC]; 1366 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VMEMP]; 1367 1368 *table = (void *)gpu_metrics; 1369 1370 return sizeof(struct gpu_metrics_v1_3); 1371 } 1372 1373 static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu) 1374 { 1375 struct smu_13_0_dpm_context *dpm_context = 1376 smu->smu_dpm.dpm_context; 1377 struct smu_13_0_dpm_table *gfx_table = 1378 &dpm_context->dpm_tables.gfx_table; 1379 struct smu_13_0_dpm_table *mem_table = 1380 &dpm_context->dpm_tables.uclk_table; 1381 struct smu_13_0_dpm_table *soc_table = 1382 &dpm_context->dpm_tables.soc_table; 1383 struct smu_13_0_dpm_table *vclk_table = 1384 &dpm_context->dpm_tables.vclk_table; 1385 struct smu_13_0_dpm_table *dclk_table = 1386 &dpm_context->dpm_tables.dclk_table; 1387 struct smu_13_0_dpm_table *fclk_table = 1388 &dpm_context->dpm_tables.fclk_table; 1389 struct smu_umd_pstate_table *pstate_table = 1390 &smu->pstate_table; 1391 struct smu_table_context *table_context = &smu->smu_table; 1392 PPTable_t *pptable = table_context->driver_pptable; 1393 DriverReportedClocks_t driver_clocks = 1394 pptable->SkuTable.DriverReportedClocks; 1395 1396 pstate_table->gfxclk_pstate.min = gfx_table->min; 1397 if (driver_clocks.GameClockAc && 1398 (driver_clocks.GameClockAc < gfx_table->max)) 1399 pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc; 1400 else 1401 pstate_table->gfxclk_pstate.peak = gfx_table->max; 1402 1403 pstate_table->uclk_pstate.min = mem_table->min; 1404 pstate_table->uclk_pstate.peak = mem_table->max; 1405 1406 pstate_table->socclk_pstate.min = soc_table->min; 1407 pstate_table->socclk_pstate.peak = soc_table->max; 1408 1409 pstate_table->vclk_pstate.min = vclk_table->min; 1410 pstate_table->vclk_pstate.peak = vclk_table->max; 1411 1412 pstate_table->dclk_pstate.min = dclk_table->min; 1413 pstate_table->dclk_pstate.peak = dclk_table->max; 1414 1415 pstate_table->fclk_pstate.min = fclk_table->min; 1416 pstate_table->fclk_pstate.peak = fclk_table->max; 1417 1418 if (driver_clocks.BaseClockAc && 1419 driver_clocks.BaseClockAc < gfx_table->max) 1420 pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc; 1421 else 1422 pstate_table->gfxclk_pstate.standard = gfx_table->max; 1423 pstate_table->uclk_pstate.standard = mem_table->max; 1424 pstate_table->socclk_pstate.standard = soc_table->min; 1425 pstate_table->vclk_pstate.standard = vclk_table->min; 1426 pstate_table->dclk_pstate.standard = dclk_table->min; 1427 pstate_table->fclk_pstate.standard = fclk_table->min; 1428 1429 return 0; 1430 } 1431 1432 static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu, 1433 uint32_t *speed) 1434 { 1435 int ret; 1436 1437 if (!speed) 1438 return -EINVAL; 1439 1440 ret = smu_v13_0_7_get_smu_metrics_data(smu, 1441 METRICS_CURR_FANPWM, 1442 speed); 1443 if (ret) { 1444 dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!"); 1445 return ret; 1446 } 1447 1448 /* Convert the PMFW output which is in percent to pwm(255) based */ 1449 *speed = MIN(*speed * 255 / 100, 255); 1450 1451 return 0; 1452 } 1453 1454 static int smu_v13_0_7_get_fan_speed_rpm(struct smu_context *smu, 1455 uint32_t *speed) 1456 { 1457 if (!speed) 1458 return -EINVAL; 1459 1460 return smu_v13_0_7_get_smu_metrics_data(smu, 1461 METRICS_CURR_FANSPEED, 1462 speed); 1463 } 1464 1465 static int smu_v13_0_7_enable_mgpu_fan_boost(struct smu_context *smu) 1466 { 1467 struct smu_table_context *table_context = &smu->smu_table; 1468 PPTable_t *pptable = table_context->driver_pptable; 1469 SkuTable_t *skutable = &pptable->SkuTable; 1470 1471 /* 1472 * Skip the MGpuFanBoost setting for those ASICs 1473 * which do not support it 1474 */ 1475 if (skutable->MGpuAcousticLimitRpmThreshold == 0) 1476 return 0; 1477 1478 return smu_cmn_send_smc_msg_with_param(smu, 1479 SMU_MSG_SetMGpuFanBoostLimitRpm, 1480 0, 1481 NULL); 1482 } 1483 1484 static int smu_v13_0_7_get_power_limit(struct smu_context *smu, 1485 uint32_t *current_power_limit, 1486 uint32_t *default_power_limit, 1487 uint32_t *max_power_limit) 1488 { 1489 struct smu_table_context *table_context = &smu->smu_table; 1490 struct smu_13_0_7_powerplay_table *powerplay_table = 1491 (struct smu_13_0_7_powerplay_table *)table_context->power_play_table; 1492 PPTable_t *pptable = table_context->driver_pptable; 1493 SkuTable_t *skutable = &pptable->SkuTable; 1494 uint32_t power_limit, od_percent; 1495 1496 if (smu_v13_0_get_current_power_limit(smu, &power_limit)) 1497 power_limit = smu->adev->pm.ac_power ? 1498 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] : 1499 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0]; 1500 1501 if (current_power_limit) 1502 *current_power_limit = power_limit; 1503 if (default_power_limit) 1504 *default_power_limit = power_limit; 1505 1506 if (max_power_limit) { 1507 if (smu->od_enabled) { 1508 od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); 1509 1510 dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit); 1511 1512 power_limit *= (100 + od_percent); 1513 power_limit /= 100; 1514 } 1515 *max_power_limit = power_limit; 1516 } 1517 1518 return 0; 1519 } 1520 1521 static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf) 1522 { 1523 DpmActivityMonitorCoeffIntExternal_t *activity_monitor_external; 1524 uint32_t i, j, size = 0; 1525 int16_t workload_type = 0; 1526 int result = 0; 1527 1528 if (!buf) 1529 return -EINVAL; 1530 1531 activity_monitor_external = kcalloc(PP_SMC_POWER_PROFILE_COUNT, 1532 sizeof(*activity_monitor_external), 1533 GFP_KERNEL); 1534 if (!activity_monitor_external) 1535 return -ENOMEM; 1536 1537 size += sysfs_emit_at(buf, size, " "); 1538 for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) 1539 size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i], 1540 (i == smu->power_profile_mode) ? "* " : " "); 1541 1542 size += sysfs_emit_at(buf, size, "\n"); 1543 1544 for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) { 1545 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1546 workload_type = smu_cmn_to_asic_specific_index(smu, 1547 CMN2ASIC_MAPPING_WORKLOAD, 1548 i); 1549 if (workload_type == -ENOTSUPP) 1550 continue; 1551 else if (workload_type < 0) { 1552 result = -EINVAL; 1553 goto out; 1554 } 1555 1556 result = smu_cmn_update_table(smu, 1557 SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type, 1558 (void *)(&activity_monitor_external[i]), false); 1559 if (result) { 1560 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1561 goto out; 1562 } 1563 } 1564 1565 #define PRINT_DPM_MONITOR(field) \ 1566 do { \ 1567 size += sysfs_emit_at(buf, size, "%-30s", #field); \ 1568 for (j = 0; j <= PP_SMC_POWER_PROFILE_WINDOW3D; j++) \ 1569 size += sysfs_emit_at(buf, size, "%-16d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \ 1570 size += sysfs_emit_at(buf, size, "\n"); \ 1571 } while (0) 1572 1573 PRINT_DPM_MONITOR(Gfx_ActiveHystLimit); 1574 PRINT_DPM_MONITOR(Gfx_IdleHystLimit); 1575 PRINT_DPM_MONITOR(Gfx_FPS); 1576 PRINT_DPM_MONITOR(Gfx_MinActiveFreqType); 1577 PRINT_DPM_MONITOR(Gfx_BoosterFreqType); 1578 PRINT_DPM_MONITOR(Gfx_MinActiveFreq); 1579 PRINT_DPM_MONITOR(Gfx_BoosterFreq); 1580 PRINT_DPM_MONITOR(Fclk_ActiveHystLimit); 1581 PRINT_DPM_MONITOR(Fclk_IdleHystLimit); 1582 PRINT_DPM_MONITOR(Fclk_FPS); 1583 PRINT_DPM_MONITOR(Fclk_MinActiveFreqType); 1584 PRINT_DPM_MONITOR(Fclk_BoosterFreqType); 1585 PRINT_DPM_MONITOR(Fclk_MinActiveFreq); 1586 PRINT_DPM_MONITOR(Fclk_BoosterFreq); 1587 #undef PRINT_DPM_MONITOR 1588 1589 result = size; 1590 out: 1591 kfree(activity_monitor_external); 1592 return result; 1593 } 1594 1595 static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) 1596 { 1597 1598 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 1599 DpmActivityMonitorCoeffInt_t *activity_monitor = 1600 &(activity_monitor_external.DpmActivityMonitorCoeffInt); 1601 int workload_type, ret = 0; 1602 1603 smu->power_profile_mode = input[size]; 1604 1605 if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) { 1606 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); 1607 return -EINVAL; 1608 } 1609 1610 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1611 1612 ret = smu_cmn_update_table(smu, 1613 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, 1614 (void *)(&activity_monitor_external), false); 1615 if (ret) { 1616 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1617 return ret; 1618 } 1619 1620 switch (input[0]) { 1621 case 0: /* Gfxclk */ 1622 activity_monitor->Gfx_ActiveHystLimit = input[1]; 1623 activity_monitor->Gfx_IdleHystLimit = input[2]; 1624 activity_monitor->Gfx_FPS = input[3]; 1625 activity_monitor->Gfx_MinActiveFreqType = input[4]; 1626 activity_monitor->Gfx_BoosterFreqType = input[5]; 1627 activity_monitor->Gfx_MinActiveFreq = input[6]; 1628 activity_monitor->Gfx_BoosterFreq = input[7]; 1629 break; 1630 case 1: /* Fclk */ 1631 activity_monitor->Fclk_ActiveHystLimit = input[1]; 1632 activity_monitor->Fclk_IdleHystLimit = input[2]; 1633 activity_monitor->Fclk_FPS = input[3]; 1634 activity_monitor->Fclk_MinActiveFreqType = input[4]; 1635 activity_monitor->Fclk_BoosterFreqType = input[5]; 1636 activity_monitor->Fclk_MinActiveFreq = input[6]; 1637 activity_monitor->Fclk_BoosterFreq = input[7]; 1638 break; 1639 } 1640 1641 ret = smu_cmn_update_table(smu, 1642 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, 1643 (void *)(&activity_monitor_external), true); 1644 if (ret) { 1645 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); 1646 return ret; 1647 } 1648 } 1649 1650 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1651 workload_type = smu_cmn_to_asic_specific_index(smu, 1652 CMN2ASIC_MAPPING_WORKLOAD, 1653 smu->power_profile_mode); 1654 if (workload_type < 0) 1655 return -EINVAL; 1656 smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 1657 1 << workload_type, NULL); 1658 1659 return ret; 1660 } 1661 1662 static int smu_v13_0_7_set_mp1_state(struct smu_context *smu, 1663 enum pp_mp1_state mp1_state) 1664 { 1665 int ret; 1666 1667 switch (mp1_state) { 1668 case PP_MP1_STATE_UNLOAD: 1669 ret = smu_cmn_set_mp1_state(smu, mp1_state); 1670 break; 1671 default: 1672 /* Ignore others */ 1673 ret = 0; 1674 } 1675 1676 return ret; 1677 } 1678 1679 static int smu_v13_0_7_baco_enter(struct smu_context *smu) 1680 { 1681 struct smu_baco_context *smu_baco = &smu->smu_baco; 1682 struct amdgpu_device *adev = smu->adev; 1683 1684 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) 1685 return smu_v13_0_baco_set_armd3_sequence(smu, 1686 smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); 1687 else 1688 return smu_v13_0_baco_enter(smu); 1689 } 1690 1691 static int smu_v13_0_7_baco_exit(struct smu_context *smu) 1692 { 1693 struct amdgpu_device *adev = smu->adev; 1694 1695 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 1696 /* Wait for PMFW handling for the Dstate change */ 1697 usleep_range(10000, 11000); 1698 return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 1699 } else { 1700 return smu_v13_0_baco_exit(smu); 1701 } 1702 } 1703 1704 static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) 1705 { 1706 struct amdgpu_device *adev = smu->adev; 1707 1708 /* SRIOV does not support SMU mode1 reset */ 1709 if (amdgpu_sriov_vf(adev)) 1710 return false; 1711 1712 return true; 1713 } 1714 1715 static int smu_v13_0_7_set_df_cstate(struct smu_context *smu, 1716 enum pp_df_cstate state) 1717 { 1718 return smu_cmn_send_smc_msg_with_param(smu, 1719 SMU_MSG_DFCstateControl, 1720 state, 1721 NULL); 1722 } 1723 1724 static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { 1725 .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, 1726 .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, 1727 .is_dpm_running = smu_v13_0_7_is_dpm_running, 1728 .dump_pptable = smu_v13_0_7_dump_pptable, 1729 .init_microcode = smu_v13_0_init_microcode, 1730 .load_microcode = smu_v13_0_load_microcode, 1731 .fini_microcode = smu_v13_0_fini_microcode, 1732 .init_smc_tables = smu_v13_0_7_init_smc_tables, 1733 .fini_smc_tables = smu_v13_0_fini_smc_tables, 1734 .init_power = smu_v13_0_init_power, 1735 .fini_power = smu_v13_0_fini_power, 1736 .check_fw_status = smu_v13_0_7_check_fw_status, 1737 .setup_pptable = smu_v13_0_7_setup_pptable, 1738 .check_fw_version = smu_v13_0_check_fw_version, 1739 .write_pptable = smu_cmn_write_pptable, 1740 .set_driver_table_location = smu_v13_0_set_driver_table_location, 1741 .system_features_control = smu_v13_0_system_features_control, 1742 .set_allowed_mask = smu_v13_0_set_allowed_mask, 1743 .get_enabled_mask = smu_cmn_get_enabled_mask, 1744 .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable, 1745 .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, 1746 .init_pptable_microcode = smu_v13_0_init_pptable_microcode, 1747 .populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk, 1748 .get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq, 1749 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, 1750 .read_sensor = smu_v13_0_7_read_sensor, 1751 .feature_is_enabled = smu_cmn_feature_is_enabled, 1752 .print_clk_levels = smu_v13_0_7_print_clk_levels, 1753 .force_clk_levels = smu_v13_0_7_force_clk_levels, 1754 .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters, 1755 .get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range, 1756 .register_irq_handler = smu_v13_0_register_irq_handler, 1757 .enable_thermal_alert = smu_v13_0_enable_thermal_alert, 1758 .disable_thermal_alert = smu_v13_0_disable_thermal_alert, 1759 .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, 1760 .get_gpu_metrics = smu_v13_0_7_get_gpu_metrics, 1761 .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range, 1762 .set_performance_level = smu_v13_0_set_performance_level, 1763 .gfx_off_control = smu_v13_0_gfx_off_control, 1764 .get_fan_speed_pwm = smu_v13_0_7_get_fan_speed_pwm, 1765 .get_fan_speed_rpm = smu_v13_0_7_get_fan_speed_rpm, 1766 .set_fan_speed_pwm = smu_v13_0_set_fan_speed_pwm, 1767 .set_fan_speed_rpm = smu_v13_0_set_fan_speed_rpm, 1768 .get_fan_control_mode = smu_v13_0_get_fan_control_mode, 1769 .set_fan_control_mode = smu_v13_0_set_fan_control_mode, 1770 .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost, 1771 .get_power_limit = smu_v13_0_7_get_power_limit, 1772 .set_power_limit = smu_v13_0_set_power_limit, 1773 .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode, 1774 .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode, 1775 .set_tool_table_location = smu_v13_0_set_tool_table_location, 1776 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1777 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, 1778 .baco_is_support = smu_v13_0_baco_is_support, 1779 .baco_get_state = smu_v13_0_baco_get_state, 1780 .baco_set_state = smu_v13_0_baco_set_state, 1781 .baco_enter = smu_v13_0_7_baco_enter, 1782 .baco_exit = smu_v13_0_7_baco_exit, 1783 .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, 1784 .mode1_reset = smu_v13_0_mode1_reset, 1785 .set_mp1_state = smu_v13_0_7_set_mp1_state, 1786 .set_df_cstate = smu_v13_0_7_set_df_cstate, 1787 .gpo_control = smu_v13_0_gpo_control, 1788 }; 1789 1790 void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) 1791 { 1792 smu->ppt_funcs = &smu_v13_0_7_ppt_funcs; 1793 smu->message_map = smu_v13_0_7_message_map; 1794 smu->clock_map = smu_v13_0_7_clk_map; 1795 smu->feature_map = smu_v13_0_7_feature_mask_map; 1796 smu->table_map = smu_v13_0_7_table_map; 1797 smu->pwr_src_map = smu_v13_0_7_pwr_src_map; 1798 smu->workload_map = smu_v13_0_7_workload_map; 1799 smu_v13_0_set_smu_mailbox_registers(smu); 1800 } 1801