1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include <linux/firmware.h> 27 #include <linux/pci.h> 28 #include <linux/i2c.h> 29 #include "amdgpu.h" 30 #include "amdgpu_smu.h" 31 #include "atomfirmware.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "amdgpu_atombios.h" 34 #include "smu_v13_0.h" 35 #include "smu13_driver_if_v13_0_7.h" 36 #include "soc15_common.h" 37 #include "atom.h" 38 #include "smu_v13_0_7_ppt.h" 39 #include "smu_v13_0_7_pptable.h" 40 #include "smu_v13_0_7_ppsmc.h" 41 #include "nbio/nbio_4_3_0_offset.h" 42 #include "nbio/nbio_4_3_0_sh_mask.h" 43 #include "mp/mp_13_0_0_offset.h" 44 #include "mp/mp_13_0_0_sh_mask.h" 45 46 #include "asic_reg/mp/mp_13_0_0_sh_mask.h" 47 #include "smu_cmn.h" 48 #include "amdgpu_ras.h" 49 50 /* 51 * DO NOT use these for err/warn/info/debug messages. 52 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 53 * They are more MGPU friendly. 54 */ 55 #undef pr_err 56 #undef pr_warn 57 #undef pr_info 58 #undef pr_debug 59 60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) 61 62 #define FEATURE_MASK(feature) (1ULL << feature) 63 #define SMC_DPM_FEATURE ( \ 64 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 65 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 66 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ 67 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 68 FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ 69 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)) 70 71 #define smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 0x3b10028 72 73 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 74 75 static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = { 76 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 77 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 78 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 79 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), 80 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), 81 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), 82 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), 83 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), 84 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), 85 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), 86 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), 87 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), 88 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), 89 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), 90 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), 91 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 92 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 93 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), 94 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), 95 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 96 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 97 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), 98 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), 99 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), 100 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), 101 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), 102 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), 103 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), 104 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), 105 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), 106 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), 107 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), 108 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 109 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 110 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 111 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 112 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), 113 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), 114 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), 115 MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), 116 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), 117 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), 118 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), 119 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 120 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 121 MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), 122 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), 123 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), 124 MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), 125 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), 126 MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), 127 }; 128 129 static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { 130 CLK_MAP(GFXCLK, PPCLK_GFXCLK), 131 CLK_MAP(SCLK, PPCLK_GFXCLK), 132 CLK_MAP(SOCCLK, PPCLK_SOCCLK), 133 CLK_MAP(FCLK, PPCLK_FCLK), 134 CLK_MAP(UCLK, PPCLK_UCLK), 135 CLK_MAP(MCLK, PPCLK_UCLK), 136 CLK_MAP(VCLK, PPCLK_VCLK_0), 137 CLK_MAP(VCLK1, PPCLK_VCLK_1), 138 CLK_MAP(DCLK, PPCLK_DCLK_0), 139 CLK_MAP(DCLK1, PPCLK_DCLK_1), 140 }; 141 142 static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] = { 143 FEA_MAP(FW_DATA_READ), 144 FEA_MAP(DPM_GFXCLK), 145 FEA_MAP(DPM_GFX_POWER_OPTIMIZER), 146 FEA_MAP(DPM_UCLK), 147 FEA_MAP(DPM_FCLK), 148 FEA_MAP(DPM_SOCCLK), 149 FEA_MAP(DPM_MP0CLK), 150 FEA_MAP(DPM_LINK), 151 FEA_MAP(DPM_DCN), 152 FEA_MAP(VMEMP_SCALING), 153 FEA_MAP(VDDIO_MEM_SCALING), 154 FEA_MAP(DS_GFXCLK), 155 FEA_MAP(DS_SOCCLK), 156 FEA_MAP(DS_FCLK), 157 FEA_MAP(DS_LCLK), 158 FEA_MAP(DS_DCFCLK), 159 FEA_MAP(DS_UCLK), 160 FEA_MAP(GFX_ULV), 161 FEA_MAP(FW_DSTATE), 162 FEA_MAP(GFXOFF), 163 FEA_MAP(BACO), 164 FEA_MAP(MM_DPM), 165 FEA_MAP(SOC_MPCLK_DS), 166 FEA_MAP(BACO_MPCLK_DS), 167 FEA_MAP(THROTTLERS), 168 FEA_MAP(SMARTSHIFT), 169 FEA_MAP(GTHR), 170 FEA_MAP(ACDC), 171 FEA_MAP(VR0HOT), 172 FEA_MAP(FW_CTF), 173 FEA_MAP(FAN_CONTROL), 174 FEA_MAP(GFX_DCS), 175 FEA_MAP(GFX_READ_MARGIN), 176 FEA_MAP(LED_DISPLAY), 177 FEA_MAP(GFXCLK_SPREAD_SPECTRUM), 178 FEA_MAP(OUT_OF_BAND_MONITOR), 179 FEA_MAP(OPTIMIZED_VMIN), 180 FEA_MAP(GFX_IMU), 181 FEA_MAP(BOOT_TIME_CAL), 182 FEA_MAP(GFX_PCC_DFLL), 183 FEA_MAP(SOC_CG), 184 FEA_MAP(DF_CSTATE), 185 FEA_MAP(GFX_EDC), 186 FEA_MAP(BOOT_POWER_OPT), 187 FEA_MAP(CLOCK_POWER_DOWN_BYPASS), 188 FEA_MAP(DS_VCN), 189 FEA_MAP(BACO_CG), 190 FEA_MAP(MEM_TEMP_READ), 191 FEA_MAP(ATHUB_MMHUB_PG), 192 FEA_MAP(SOC_PCC), 193 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 194 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 195 }; 196 197 static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = { 198 TAB_MAP(PPTABLE), 199 TAB_MAP(WATERMARKS), 200 TAB_MAP(AVFS_PSM_DEBUG), 201 TAB_MAP(PMSTATUSLOG), 202 TAB_MAP(SMU_METRICS), 203 TAB_MAP(DRIVER_SMU_CONFIG), 204 TAB_MAP(ACTIVITY_MONITOR_COEFF), 205 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, 206 }; 207 208 static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { 209 PWR_MAP(AC), 210 PWR_MAP(DC), 211 }; 212 213 static struct cmn2asic_mapping smu_v13_0_7_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 214 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), 215 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 216 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), 217 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 218 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 219 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 220 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 221 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), 222 }; 223 224 static const uint8_t smu_v13_0_7_throttler_map[] = { 225 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), 226 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), 227 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), 228 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), 229 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), 230 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), 231 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), 232 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), 233 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), 234 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), 235 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), 236 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), 237 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), 238 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), 239 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), 240 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), 241 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), 242 }; 243 244 static int 245 smu_v13_0_7_get_allowed_feature_mask(struct smu_context *smu, 246 uint32_t *feature_mask, uint32_t num) 247 { 248 struct amdgpu_device *adev = smu->adev; 249 250 if (num > 2) 251 return -EINVAL; 252 253 memset(feature_mask, 0, sizeof(uint32_t) * num); 254 255 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT); 256 257 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) { 258 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); 259 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT); 260 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT); 261 } 262 263 if (adev->pm.pp_feature & PP_GFXOFF_MASK) 264 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT); 265 266 if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) { 267 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT); 268 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT); 269 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); 270 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); 271 } 272 273 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); 274 275 if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) 276 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT); 277 278 if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) 279 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); 280 281 if (adev->pm.pp_feature & PP_ULV_MASK) 282 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT); 283 284 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT); 285 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT); 286 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT); 287 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT); 288 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT); 289 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT); 290 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT); 291 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT); 292 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT); 293 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT); 294 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT); 295 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT); 296 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT); 297 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT); 298 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT); 299 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT); 300 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_PCC_DFLL_BIT); 301 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT); 302 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT); 303 304 if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK) 305 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT); 306 307 if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) && 308 (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) 309 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); 310 311 return 0; 312 } 313 314 static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu) 315 { 316 struct smu_table_context *table_context = &smu->smu_table; 317 struct smu_13_0_7_powerplay_table *powerplay_table = 318 table_context->power_play_table; 319 struct smu_baco_context *smu_baco = &smu->smu_baco; 320 PPTable_t *smc_pptable = table_context->driver_pptable; 321 BoardTable_t *BoardTable = &smc_pptable->BoardTable; 322 323 if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC) 324 smu->dc_controlled_by_gpio = true; 325 326 if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO || 327 powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO) 328 smu_baco->platform_support = true; 329 330 if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled)) 331 smu_baco->maco_support = true; 332 333 table_context->thermal_controller_type = 334 powerplay_table->thermal_controller_type; 335 336 /* 337 * Instead of having its own buffer space and get overdrive_table copied, 338 * smu->od_settings just points to the actual overdrive_table 339 */ 340 smu->od_settings = &powerplay_table->overdrive_table; 341 342 return 0; 343 } 344 345 static int smu_v13_0_7_store_powerplay_table(struct smu_context *smu) 346 { 347 struct smu_table_context *table_context = &smu->smu_table; 348 struct smu_13_0_7_powerplay_table *powerplay_table = 349 table_context->power_play_table; 350 struct amdgpu_device *adev = smu->adev; 351 352 if (adev->pdev->device == 0x51) 353 powerplay_table->smc_pptable.SkuTable.DebugOverrides |= 0x00000080; 354 355 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, 356 sizeof(PPTable_t)); 357 358 return 0; 359 } 360 361 static int smu_v13_0_7_check_fw_status(struct smu_context *smu) 362 { 363 struct amdgpu_device *adev = smu->adev; 364 uint32_t mp1_fw_flags; 365 366 mp1_fw_flags = RREG32_PCIE(MP1_Public | 367 (smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 & 0xffffffff)); 368 369 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 370 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 371 return 0; 372 373 return -EIO; 374 } 375 376 #ifndef atom_smc_dpm_info_table_13_0_7 377 struct atom_smc_dpm_info_table_13_0_7 378 { 379 struct atom_common_table_header table_header; 380 BoardTable_t BoardTable; 381 }; 382 #endif 383 384 static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu) 385 { 386 struct smu_table_context *table_context = &smu->smu_table; 387 388 PPTable_t *smc_pptable = table_context->driver_pptable; 389 390 struct atom_smc_dpm_info_table_13_0_7 *smc_dpm_table; 391 392 BoardTable_t *BoardTable = &smc_pptable->BoardTable; 393 394 int index, ret; 395 396 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 397 smc_dpm_info); 398 399 ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, 400 (uint8_t **)&smc_dpm_table); 401 if (ret) 402 return ret; 403 404 memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); 405 406 return 0; 407 } 408 409 static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu, 410 void **table, 411 uint32_t *size) 412 { 413 struct smu_table_context *smu_table = &smu->smu_table; 414 void *combo_pptable = smu_table->combo_pptable; 415 int ret = 0; 416 417 ret = smu_cmn_get_combo_pptable(smu); 418 if (ret) 419 return ret; 420 421 *table = combo_pptable; 422 *size = sizeof(struct smu_13_0_7_powerplay_table); 423 424 return 0; 425 } 426 427 static int smu_v13_0_7_setup_pptable(struct smu_context *smu) 428 { 429 struct smu_table_context *smu_table = &smu->smu_table; 430 struct amdgpu_device *adev = smu->adev; 431 int ret = 0; 432 433 /* 434 * With SCPM enabled, the pptable used will be signed. It cannot 435 * be used directly by driver. To get the raw pptable, we need to 436 * rely on the combo pptable(and its revelant SMU message). 437 */ 438 ret = smu_v13_0_7_get_pptable_from_pmfw(smu, 439 &smu_table->power_play_table, 440 &smu_table->power_play_table_size); 441 if (ret) 442 return ret; 443 444 ret = smu_v13_0_7_store_powerplay_table(smu); 445 if (ret) 446 return ret; 447 448 /* 449 * With SCPM enabled, the operation below will be handled 450 * by PSP. Driver involvment is unnecessary and useless. 451 */ 452 if (!adev->scpm_enabled) { 453 ret = smu_v13_0_7_append_powerplay_table(smu); 454 if (ret) 455 return ret; 456 } 457 458 ret = smu_v13_0_7_check_powerplay_table(smu); 459 if (ret) 460 return ret; 461 462 return ret; 463 } 464 465 static int smu_v13_0_7_tables_init(struct smu_context *smu) 466 { 467 struct smu_table_context *smu_table = &smu->smu_table; 468 struct smu_table *tables = smu_table->tables; 469 470 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), 471 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 472 473 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 474 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 475 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), 476 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 477 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), 478 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 479 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), 480 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 481 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, 482 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 483 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, 484 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, 485 AMDGPU_GEM_DOMAIN_VRAM); 486 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, 487 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 488 489 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); 490 if (!smu_table->metrics_table) 491 goto err0_out; 492 smu_table->metrics_time = 0; 493 494 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); 495 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 496 if (!smu_table->gpu_metrics_table) 497 goto err1_out; 498 499 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 500 if (!smu_table->watermarks_table) 501 goto err2_out; 502 503 return 0; 504 505 err2_out: 506 kfree(smu_table->gpu_metrics_table); 507 err1_out: 508 kfree(smu_table->metrics_table); 509 err0_out: 510 return -ENOMEM; 511 } 512 513 static int smu_v13_0_7_allocate_dpm_context(struct smu_context *smu) 514 { 515 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 516 517 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), 518 GFP_KERNEL); 519 if (!smu_dpm->dpm_context) 520 return -ENOMEM; 521 522 smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); 523 524 return 0; 525 } 526 527 static int smu_v13_0_7_init_smc_tables(struct smu_context *smu) 528 { 529 int ret = 0; 530 531 ret = smu_v13_0_7_tables_init(smu); 532 if (ret) 533 return ret; 534 535 ret = smu_v13_0_7_allocate_dpm_context(smu); 536 if (ret) 537 return ret; 538 539 return smu_v13_0_init_smc_tables(smu); 540 } 541 542 static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) 543 { 544 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 545 PPTable_t *driver_ppt = smu->smu_table.driver_pptable; 546 SkuTable_t *skutable = &driver_ppt->SkuTable; 547 struct smu_13_0_dpm_table *dpm_table; 548 struct smu_13_0_pcie_table *pcie_table; 549 uint32_t link_level; 550 int ret = 0; 551 552 /* socclk dpm table setup */ 553 dpm_table = &dpm_context->dpm_tables.soc_table; 554 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 555 ret = smu_v13_0_set_single_dpm_table(smu, 556 SMU_SOCCLK, 557 dpm_table); 558 if (ret) 559 return ret; 560 } else { 561 dpm_table->count = 1; 562 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; 563 dpm_table->dpm_levels[0].enabled = true; 564 dpm_table->min = dpm_table->dpm_levels[0].value; 565 dpm_table->max = dpm_table->dpm_levels[0].value; 566 } 567 568 /* gfxclk dpm table setup */ 569 dpm_table = &dpm_context->dpm_tables.gfx_table; 570 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { 571 ret = smu_v13_0_set_single_dpm_table(smu, 572 SMU_GFXCLK, 573 dpm_table); 574 if (ret) 575 return ret; 576 } else { 577 dpm_table->count = 1; 578 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; 579 dpm_table->dpm_levels[0].enabled = true; 580 dpm_table->min = dpm_table->dpm_levels[0].value; 581 dpm_table->max = dpm_table->dpm_levels[0].value; 582 } 583 584 /* uclk dpm table setup */ 585 dpm_table = &dpm_context->dpm_tables.uclk_table; 586 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 587 ret = smu_v13_0_set_single_dpm_table(smu, 588 SMU_UCLK, 589 dpm_table); 590 if (ret) 591 return ret; 592 } else { 593 dpm_table->count = 1; 594 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; 595 dpm_table->dpm_levels[0].enabled = true; 596 dpm_table->min = dpm_table->dpm_levels[0].value; 597 dpm_table->max = dpm_table->dpm_levels[0].value; 598 } 599 600 /* fclk dpm table setup */ 601 dpm_table = &dpm_context->dpm_tables.fclk_table; 602 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { 603 ret = smu_v13_0_set_single_dpm_table(smu, 604 SMU_FCLK, 605 dpm_table); 606 if (ret) 607 return ret; 608 } else { 609 dpm_table->count = 1; 610 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; 611 dpm_table->dpm_levels[0].enabled = true; 612 dpm_table->min = dpm_table->dpm_levels[0].value; 613 dpm_table->max = dpm_table->dpm_levels[0].value; 614 } 615 616 /* vclk dpm table setup */ 617 dpm_table = &dpm_context->dpm_tables.vclk_table; 618 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) { 619 ret = smu_v13_0_set_single_dpm_table(smu, 620 SMU_VCLK, 621 dpm_table); 622 if (ret) 623 return ret; 624 } else { 625 dpm_table->count = 1; 626 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; 627 dpm_table->dpm_levels[0].enabled = true; 628 dpm_table->min = dpm_table->dpm_levels[0].value; 629 dpm_table->max = dpm_table->dpm_levels[0].value; 630 } 631 632 /* dclk dpm table setup */ 633 dpm_table = &dpm_context->dpm_tables.dclk_table; 634 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) { 635 ret = smu_v13_0_set_single_dpm_table(smu, 636 SMU_DCLK, 637 dpm_table); 638 if (ret) 639 return ret; 640 } else { 641 dpm_table->count = 1; 642 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; 643 dpm_table->dpm_levels[0].enabled = true; 644 dpm_table->min = dpm_table->dpm_levels[0].value; 645 dpm_table->max = dpm_table->dpm_levels[0].value; 646 } 647 648 /* lclk dpm table setup */ 649 pcie_table = &dpm_context->dpm_tables.pcie_table; 650 pcie_table->num_of_link_levels = 0; 651 for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { 652 if (!skutable->PcieGenSpeed[link_level] && 653 !skutable->PcieLaneCount[link_level] && 654 !skutable->LclkFreq[link_level]) 655 continue; 656 657 pcie_table->pcie_gen[pcie_table->num_of_link_levels] = 658 skutable->PcieGenSpeed[link_level]; 659 pcie_table->pcie_lane[pcie_table->num_of_link_levels] = 660 skutable->PcieLaneCount[link_level]; 661 pcie_table->clk_freq[pcie_table->num_of_link_levels] = 662 skutable->LclkFreq[link_level]; 663 pcie_table->num_of_link_levels++; 664 } 665 666 return 0; 667 } 668 669 static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu) 670 { 671 int ret = 0; 672 uint64_t feature_enabled; 673 674 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 675 if (ret) 676 return false; 677 678 return !!(feature_enabled & SMC_DPM_FEATURE); 679 } 680 681 static void smu_v13_0_7_dump_pptable(struct smu_context *smu) 682 { 683 struct smu_table_context *table_context = &smu->smu_table; 684 PPTable_t *pptable = table_context->driver_pptable; 685 SkuTable_t *skutable = &pptable->SkuTable; 686 687 dev_info(smu->adev->dev, "Dumped PPTable:\n"); 688 689 dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); 690 dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); 691 dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); 692 } 693 694 static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics) 695 { 696 uint32_t throttler_status = 0; 697 int i; 698 699 for (i = 0; i < THROTTLER_COUNT; i++) 700 throttler_status |= 701 (metrics->ThrottlingPercentage[i] ? 1U << i : 0); 702 703 return throttler_status; 704 } 705 706 #define SMU_13_0_7_BUSY_THRESHOLD 15 707 static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu, 708 MetricsMember_t member, 709 uint32_t *value) 710 { 711 struct smu_table_context *smu_table= &smu->smu_table; 712 SmuMetrics_t *metrics = 713 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 714 int ret = 0; 715 716 ret = smu_cmn_get_metrics_table(smu, 717 NULL, 718 false); 719 if (ret) 720 return ret; 721 722 switch (member) { 723 case METRICS_CURR_GFXCLK: 724 *value = metrics->CurrClock[PPCLK_GFXCLK]; 725 break; 726 case METRICS_CURR_SOCCLK: 727 *value = metrics->CurrClock[PPCLK_SOCCLK]; 728 break; 729 case METRICS_CURR_UCLK: 730 *value = metrics->CurrClock[PPCLK_UCLK]; 731 break; 732 case METRICS_CURR_VCLK: 733 *value = metrics->CurrClock[PPCLK_VCLK_0]; 734 break; 735 case METRICS_CURR_VCLK1: 736 *value = metrics->CurrClock[PPCLK_VCLK_1]; 737 break; 738 case METRICS_CURR_DCLK: 739 *value = metrics->CurrClock[PPCLK_DCLK_0]; 740 break; 741 case METRICS_CURR_DCLK1: 742 *value = metrics->CurrClock[PPCLK_DCLK_1]; 743 break; 744 case METRICS_CURR_FCLK: 745 *value = metrics->CurrClock[PPCLK_FCLK]; 746 break; 747 case METRICS_AVERAGE_GFXCLK: 748 *value = metrics->AverageGfxclkFrequencyPreDs; 749 break; 750 case METRICS_AVERAGE_FCLK: 751 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD) 752 *value = metrics->AverageFclkFrequencyPostDs; 753 else 754 *value = metrics->AverageFclkFrequencyPreDs; 755 break; 756 case METRICS_AVERAGE_UCLK: 757 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD) 758 *value = metrics->AverageMemclkFrequencyPostDs; 759 else 760 *value = metrics->AverageMemclkFrequencyPreDs; 761 break; 762 case METRICS_AVERAGE_VCLK: 763 *value = metrics->AverageVclk0Frequency; 764 break; 765 case METRICS_AVERAGE_DCLK: 766 *value = metrics->AverageDclk0Frequency; 767 break; 768 case METRICS_AVERAGE_VCLK1: 769 *value = metrics->AverageVclk1Frequency; 770 break; 771 case METRICS_AVERAGE_DCLK1: 772 *value = metrics->AverageDclk1Frequency; 773 break; 774 case METRICS_AVERAGE_GFXACTIVITY: 775 *value = metrics->AverageGfxActivity; 776 break; 777 case METRICS_AVERAGE_MEMACTIVITY: 778 *value = metrics->AverageUclkActivity; 779 break; 780 case METRICS_AVERAGE_SOCKETPOWER: 781 *value = metrics->AverageSocketPower << 8; 782 break; 783 case METRICS_TEMPERATURE_EDGE: 784 *value = metrics->AvgTemperature[TEMP_EDGE] * 785 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 786 break; 787 case METRICS_TEMPERATURE_HOTSPOT: 788 *value = metrics->AvgTemperature[TEMP_HOTSPOT] * 789 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 790 break; 791 case METRICS_TEMPERATURE_MEM: 792 *value = metrics->AvgTemperature[TEMP_MEM] * 793 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 794 break; 795 case METRICS_TEMPERATURE_VRGFX: 796 *value = metrics->AvgTemperature[TEMP_VR_GFX] * 797 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 798 break; 799 case METRICS_TEMPERATURE_VRSOC: 800 *value = metrics->AvgTemperature[TEMP_VR_SOC] * 801 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 802 break; 803 case METRICS_THROTTLER_STATUS: 804 *value = smu_v13_0_7_get_throttler_status(metrics); 805 break; 806 case METRICS_CURR_FANSPEED: 807 *value = metrics->AvgFanRpm; 808 break; 809 case METRICS_CURR_FANPWM: 810 *value = metrics->AvgFanPwm; 811 break; 812 case METRICS_VOLTAGE_VDDGFX: 813 *value = metrics->AvgVoltage[SVI_PLANE_GFX]; 814 break; 815 case METRICS_PCIE_RATE: 816 *value = metrics->PcieRate; 817 break; 818 case METRICS_PCIE_WIDTH: 819 *value = metrics->PcieWidth; 820 break; 821 default: 822 *value = UINT_MAX; 823 break; 824 } 825 826 return ret; 827 } 828 829 static int smu_v13_0_7_read_sensor(struct smu_context *smu, 830 enum amd_pp_sensors sensor, 831 void *data, 832 uint32_t *size) 833 { 834 struct smu_table_context *table_context = &smu->smu_table; 835 PPTable_t *smc_pptable = table_context->driver_pptable; 836 int ret = 0; 837 838 switch (sensor) { 839 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 840 *(uint16_t *)data = smc_pptable->SkuTable.FanMaximumRpm; 841 *size = 4; 842 break; 843 case AMDGPU_PP_SENSOR_MEM_LOAD: 844 ret = smu_v13_0_7_get_smu_metrics_data(smu, 845 METRICS_AVERAGE_MEMACTIVITY, 846 (uint32_t *)data); 847 *size = 4; 848 break; 849 case AMDGPU_PP_SENSOR_GPU_LOAD: 850 ret = smu_v13_0_7_get_smu_metrics_data(smu, 851 METRICS_AVERAGE_GFXACTIVITY, 852 (uint32_t *)data); 853 *size = 4; 854 break; 855 case AMDGPU_PP_SENSOR_GPU_POWER: 856 ret = smu_v13_0_7_get_smu_metrics_data(smu, 857 METRICS_AVERAGE_SOCKETPOWER, 858 (uint32_t *)data); 859 *size = 4; 860 break; 861 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 862 ret = smu_v13_0_7_get_smu_metrics_data(smu, 863 METRICS_TEMPERATURE_HOTSPOT, 864 (uint32_t *)data); 865 *size = 4; 866 break; 867 case AMDGPU_PP_SENSOR_EDGE_TEMP: 868 ret = smu_v13_0_7_get_smu_metrics_data(smu, 869 METRICS_TEMPERATURE_EDGE, 870 (uint32_t *)data); 871 *size = 4; 872 break; 873 case AMDGPU_PP_SENSOR_MEM_TEMP: 874 ret = smu_v13_0_7_get_smu_metrics_data(smu, 875 METRICS_TEMPERATURE_MEM, 876 (uint32_t *)data); 877 *size = 4; 878 break; 879 case AMDGPU_PP_SENSOR_GFX_MCLK: 880 ret = smu_v13_0_7_get_smu_metrics_data(smu, 881 METRICS_AVERAGE_UCLK, 882 (uint32_t *)data); 883 *(uint32_t *)data *= 100; 884 *size = 4; 885 break; 886 case AMDGPU_PP_SENSOR_GFX_SCLK: 887 ret = smu_v13_0_7_get_smu_metrics_data(smu, 888 METRICS_AVERAGE_GFXCLK, 889 (uint32_t *)data); 890 *(uint32_t *)data *= 100; 891 *size = 4; 892 break; 893 case AMDGPU_PP_SENSOR_VDDGFX: 894 ret = smu_v13_0_7_get_smu_metrics_data(smu, 895 METRICS_VOLTAGE_VDDGFX, 896 (uint32_t *)data); 897 *size = 4; 898 break; 899 default: 900 ret = -EOPNOTSUPP; 901 break; 902 } 903 904 return ret; 905 } 906 907 static int smu_v13_0_7_get_current_clk_freq_by_table(struct smu_context *smu, 908 enum smu_clk_type clk_type, 909 uint32_t *value) 910 { 911 MetricsMember_t member_type; 912 int clk_id = 0; 913 914 clk_id = smu_cmn_to_asic_specific_index(smu, 915 CMN2ASIC_MAPPING_CLK, 916 clk_type); 917 if (clk_id < 0) 918 return -EINVAL; 919 920 switch (clk_id) { 921 case PPCLK_GFXCLK: 922 member_type = METRICS_AVERAGE_GFXCLK; 923 break; 924 case PPCLK_UCLK: 925 member_type = METRICS_CURR_UCLK; 926 break; 927 case PPCLK_FCLK: 928 member_type = METRICS_CURR_FCLK; 929 break; 930 case PPCLK_SOCCLK: 931 member_type = METRICS_CURR_SOCCLK; 932 break; 933 case PPCLK_VCLK_0: 934 member_type = METRICS_CURR_VCLK; 935 break; 936 case PPCLK_DCLK_0: 937 member_type = METRICS_CURR_DCLK; 938 break; 939 case PPCLK_VCLK_1: 940 member_type = METRICS_CURR_VCLK1; 941 break; 942 case PPCLK_DCLK_1: 943 member_type = METRICS_CURR_DCLK1; 944 break; 945 default: 946 return -EINVAL; 947 } 948 949 return smu_v13_0_7_get_smu_metrics_data(smu, 950 member_type, 951 value); 952 } 953 954 static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, 955 enum smu_clk_type clk_type, 956 char *buf) 957 { 958 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 959 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 960 struct smu_13_0_dpm_table *single_dpm_table; 961 struct smu_13_0_pcie_table *pcie_table; 962 uint32_t gen_speed, lane_width; 963 int i, curr_freq, size = 0; 964 int ret = 0; 965 966 smu_cmn_get_sysfs_buf(&buf, &size); 967 968 if (amdgpu_ras_intr_triggered()) { 969 size += sysfs_emit_at(buf, size, "unavailable\n"); 970 return size; 971 } 972 973 switch (clk_type) { 974 case SMU_SCLK: 975 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 976 break; 977 case SMU_MCLK: 978 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 979 break; 980 case SMU_SOCCLK: 981 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 982 break; 983 case SMU_FCLK: 984 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 985 break; 986 case SMU_VCLK: 987 case SMU_VCLK1: 988 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 989 break; 990 case SMU_DCLK: 991 case SMU_DCLK1: 992 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 993 break; 994 default: 995 break; 996 } 997 998 switch (clk_type) { 999 case SMU_SCLK: 1000 case SMU_MCLK: 1001 case SMU_SOCCLK: 1002 case SMU_FCLK: 1003 case SMU_VCLK: 1004 case SMU_VCLK1: 1005 case SMU_DCLK: 1006 case SMU_DCLK1: 1007 ret = smu_v13_0_7_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); 1008 if (ret) { 1009 dev_err(smu->adev->dev, "Failed to get current clock freq!"); 1010 return ret; 1011 } 1012 1013 if (single_dpm_table->is_fine_grained) { 1014 /* 1015 * For fine grained dpms, there are only two dpm levels: 1016 * - level 0 -> min clock freq 1017 * - level 1 -> max clock freq 1018 * And the current clock frequency can be any value between them. 1019 * So, if the current clock frequency is not at level 0 or level 1, 1020 * we will fake it as three dpm levels: 1021 * - level 0 -> min clock freq 1022 * - level 1 -> current actual clock freq 1023 * - level 2 -> max clock freq 1024 */ 1025 if ((single_dpm_table->dpm_levels[0].value != curr_freq) && 1026 (single_dpm_table->dpm_levels[1].value != curr_freq)) { 1027 size += sysfs_emit_at(buf, size, "0: %uMhz\n", 1028 single_dpm_table->dpm_levels[0].value); 1029 size += sysfs_emit_at(buf, size, "1: %uMhz *\n", 1030 curr_freq); 1031 size += sysfs_emit_at(buf, size, "2: %uMhz\n", 1032 single_dpm_table->dpm_levels[1].value); 1033 } else { 1034 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", 1035 single_dpm_table->dpm_levels[0].value, 1036 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : ""); 1037 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 1038 single_dpm_table->dpm_levels[1].value, 1039 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : ""); 1040 } 1041 } else { 1042 for (i = 0; i < single_dpm_table->count; i++) 1043 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", 1044 i, single_dpm_table->dpm_levels[i].value, 1045 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : ""); 1046 } 1047 break; 1048 case SMU_PCIE: 1049 ret = smu_v13_0_7_get_smu_metrics_data(smu, 1050 METRICS_PCIE_RATE, 1051 &gen_speed); 1052 if (ret) 1053 return ret; 1054 1055 ret = smu_v13_0_7_get_smu_metrics_data(smu, 1056 METRICS_PCIE_WIDTH, 1057 &lane_width); 1058 if (ret) 1059 return ret; 1060 1061 pcie_table = &(dpm_context->dpm_tables.pcie_table); 1062 for (i = 0; i < pcie_table->num_of_link_levels; i++) 1063 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, 1064 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," : 1065 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," : 1066 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," : 1067 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "", 1068 (pcie_table->pcie_lane[i] == 1) ? "x1" : 1069 (pcie_table->pcie_lane[i] == 2) ? "x2" : 1070 (pcie_table->pcie_lane[i] == 3) ? "x4" : 1071 (pcie_table->pcie_lane[i] == 4) ? "x8" : 1072 (pcie_table->pcie_lane[i] == 5) ? "x12" : 1073 (pcie_table->pcie_lane[i] == 6) ? "x16" : "", 1074 pcie_table->clk_freq[i], 1075 (gen_speed == pcie_table->pcie_gen[i]) && 1076 (lane_width == pcie_table->pcie_lane[i]) ? 1077 "*" : ""); 1078 break; 1079 1080 default: 1081 break; 1082 } 1083 1084 return size; 1085 } 1086 1087 static int smu_v13_0_7_force_clk_levels(struct smu_context *smu, 1088 enum smu_clk_type clk_type, 1089 uint32_t mask) 1090 { 1091 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1092 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1093 struct smu_13_0_dpm_table *single_dpm_table; 1094 uint32_t soft_min_level, soft_max_level; 1095 uint32_t min_freq, max_freq; 1096 int ret = 0; 1097 1098 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1099 soft_max_level = mask ? (fls(mask) - 1) : 0; 1100 1101 switch (clk_type) { 1102 case SMU_GFXCLK: 1103 case SMU_SCLK: 1104 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1105 break; 1106 case SMU_MCLK: 1107 case SMU_UCLK: 1108 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1109 break; 1110 case SMU_SOCCLK: 1111 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1112 break; 1113 case SMU_FCLK: 1114 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1115 break; 1116 case SMU_VCLK: 1117 case SMU_VCLK1: 1118 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1119 break; 1120 case SMU_DCLK: 1121 case SMU_DCLK1: 1122 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1123 break; 1124 default: 1125 break; 1126 } 1127 1128 switch (clk_type) { 1129 case SMU_GFXCLK: 1130 case SMU_SCLK: 1131 case SMU_MCLK: 1132 case SMU_UCLK: 1133 case SMU_SOCCLK: 1134 case SMU_FCLK: 1135 case SMU_VCLK: 1136 case SMU_VCLK1: 1137 case SMU_DCLK: 1138 case SMU_DCLK1: 1139 if (single_dpm_table->is_fine_grained) { 1140 /* There is only 2 levels for fine grained DPM */ 1141 soft_max_level = (soft_max_level >= 1 ? 1 : 0); 1142 soft_min_level = (soft_min_level >= 1 ? 1 : 0); 1143 } else { 1144 if ((soft_max_level >= single_dpm_table->count) || 1145 (soft_min_level >= single_dpm_table->count)) 1146 return -EINVAL; 1147 } 1148 1149 min_freq = single_dpm_table->dpm_levels[soft_min_level].value; 1150 max_freq = single_dpm_table->dpm_levels[soft_max_level].value; 1151 1152 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1153 clk_type, 1154 min_freq, 1155 max_freq); 1156 break; 1157 case SMU_DCEFCLK: 1158 case SMU_PCIE: 1159 default: 1160 break; 1161 } 1162 1163 return ret; 1164 } 1165 1166 static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu, 1167 uint32_t pcie_gen_cap, 1168 uint32_t pcie_width_cap) 1169 { 1170 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 1171 struct smu_13_0_pcie_table *pcie_table = 1172 &dpm_context->dpm_tables.pcie_table; 1173 uint32_t smu_pcie_arg; 1174 int ret, i; 1175 1176 for (i = 0; i < pcie_table->num_of_link_levels; i++) { 1177 if (pcie_table->pcie_gen[i] > pcie_gen_cap) 1178 pcie_table->pcie_gen[i] = pcie_gen_cap; 1179 if (pcie_table->pcie_lane[i] > pcie_width_cap) 1180 pcie_table->pcie_lane[i] = pcie_width_cap; 1181 1182 smu_pcie_arg = i << 16; 1183 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; 1184 smu_pcie_arg |= pcie_table->pcie_lane[i]; 1185 1186 ret = smu_cmn_send_smc_msg_with_param(smu, 1187 SMU_MSG_OverridePcieParameters, 1188 smu_pcie_arg, 1189 NULL); 1190 if (ret) 1191 return ret; 1192 } 1193 1194 return 0; 1195 } 1196 1197 static const struct smu_temperature_range smu13_thermal_policy[] = 1198 { 1199 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, 1200 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, 1201 }; 1202 1203 static int smu_v13_0_7_get_thermal_temperature_range(struct smu_context *smu, 1204 struct smu_temperature_range *range) 1205 { 1206 struct smu_table_context *table_context = &smu->smu_table; 1207 struct smu_13_0_7_powerplay_table *powerplay_table = 1208 table_context->power_play_table; 1209 PPTable_t *pptable = smu->smu_table.driver_pptable; 1210 1211 if (!range) 1212 return -EINVAL; 1213 1214 memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range)); 1215 1216 range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] * 1217 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1218 range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) * 1219 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1220 range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] * 1221 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1222 range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) * 1223 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1224 range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] * 1225 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1226 range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)* 1227 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1228 range->software_shutdown_temp = powerplay_table->software_shutdown_temp; 1229 range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset; 1230 1231 return 0; 1232 } 1233 1234 #define MAX(a, b) ((a) > (b) ? (a) : (b)) 1235 static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu, 1236 void **table) 1237 { 1238 struct smu_table_context *smu_table = &smu->smu_table; 1239 struct gpu_metrics_v1_3 *gpu_metrics = 1240 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; 1241 SmuMetricsExternal_t metrics_ext; 1242 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics; 1243 int ret = 0; 1244 1245 ret = smu_cmn_get_metrics_table(smu, 1246 &metrics_ext, 1247 true); 1248 if (ret) 1249 return ret; 1250 1251 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); 1252 1253 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE]; 1254 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT]; 1255 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM]; 1256 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX]; 1257 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC]; 1258 gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0], 1259 metrics->AvgTemperature[TEMP_VR_MEM1]); 1260 1261 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; 1262 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; 1263 gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage, 1264 metrics->Vcn1ActivityPercentage); 1265 1266 gpu_metrics->average_socket_power = metrics->AverageSocketPower; 1267 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator; 1268 1269 if (metrics->AverageGfxActivity <= SMU_13_0_7_BUSY_THRESHOLD) 1270 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs; 1271 else 1272 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs; 1273 1274 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD) 1275 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs; 1276 else 1277 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs; 1278 1279 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency; 1280 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency; 1281 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; 1282 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; 1283 1284 gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; 1285 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; 1286 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; 1287 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1]; 1288 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1]; 1289 1290 gpu_metrics->throttle_status = 1291 smu_v13_0_7_get_throttler_status(metrics); 1292 gpu_metrics->indep_throttle_status = 1293 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, 1294 smu_v13_0_7_throttler_map); 1295 1296 gpu_metrics->current_fan_speed = metrics->AvgFanRpm; 1297 1298 gpu_metrics->pcie_link_width = metrics->PcieWidth; 1299 gpu_metrics->pcie_link_speed = metrics->PcieRate; 1300 1301 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1302 1303 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_GFX]; 1304 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_SOC]; 1305 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VMEMP]; 1306 1307 *table = (void *)gpu_metrics; 1308 1309 return sizeof(struct gpu_metrics_v1_3); 1310 } 1311 1312 static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu) 1313 { 1314 struct smu_13_0_dpm_context *dpm_context = 1315 smu->smu_dpm.dpm_context; 1316 struct smu_13_0_dpm_table *gfx_table = 1317 &dpm_context->dpm_tables.gfx_table; 1318 struct smu_13_0_dpm_table *mem_table = 1319 &dpm_context->dpm_tables.uclk_table; 1320 struct smu_13_0_dpm_table *soc_table = 1321 &dpm_context->dpm_tables.soc_table; 1322 struct smu_13_0_dpm_table *vclk_table = 1323 &dpm_context->dpm_tables.vclk_table; 1324 struct smu_13_0_dpm_table *dclk_table = 1325 &dpm_context->dpm_tables.dclk_table; 1326 struct smu_13_0_dpm_table *fclk_table = 1327 &dpm_context->dpm_tables.fclk_table; 1328 struct smu_umd_pstate_table *pstate_table = 1329 &smu->pstate_table; 1330 1331 pstate_table->gfxclk_pstate.min = gfx_table->min; 1332 pstate_table->gfxclk_pstate.peak = gfx_table->max; 1333 1334 pstate_table->uclk_pstate.min = mem_table->min; 1335 pstate_table->uclk_pstate.peak = mem_table->max; 1336 1337 pstate_table->socclk_pstate.min = soc_table->min; 1338 pstate_table->socclk_pstate.peak = soc_table->max; 1339 1340 pstate_table->vclk_pstate.min = vclk_table->min; 1341 pstate_table->vclk_pstate.peak = vclk_table->max; 1342 1343 pstate_table->dclk_pstate.min = dclk_table->min; 1344 pstate_table->dclk_pstate.peak = dclk_table->max; 1345 1346 pstate_table->fclk_pstate.min = fclk_table->min; 1347 pstate_table->fclk_pstate.peak = fclk_table->max; 1348 1349 /* 1350 * For now, just use the mininum clock frequency. 1351 * TODO: update them when the real pstate settings available 1352 */ 1353 pstate_table->gfxclk_pstate.standard = gfx_table->min; 1354 pstate_table->uclk_pstate.standard = mem_table->min; 1355 pstate_table->socclk_pstate.standard = soc_table->min; 1356 pstate_table->vclk_pstate.standard = vclk_table->min; 1357 pstate_table->dclk_pstate.standard = dclk_table->min; 1358 pstate_table->fclk_pstate.standard = fclk_table->min; 1359 1360 return 0; 1361 } 1362 1363 static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu, 1364 uint32_t *speed) 1365 { 1366 if (!speed) 1367 return -EINVAL; 1368 1369 return smu_v13_0_7_get_smu_metrics_data(smu, 1370 METRICS_CURR_FANPWM, 1371 speed); 1372 } 1373 1374 static int smu_v13_0_7_get_fan_speed_rpm(struct smu_context *smu, 1375 uint32_t *speed) 1376 { 1377 if (!speed) 1378 return -EINVAL; 1379 1380 return smu_v13_0_7_get_smu_metrics_data(smu, 1381 METRICS_CURR_FANSPEED, 1382 speed); 1383 } 1384 1385 static int smu_v13_0_7_enable_mgpu_fan_boost(struct smu_context *smu) 1386 { 1387 struct smu_table_context *table_context = &smu->smu_table; 1388 PPTable_t *pptable = table_context->driver_pptable; 1389 SkuTable_t *skutable = &pptable->SkuTable; 1390 1391 /* 1392 * Skip the MGpuFanBoost setting for those ASICs 1393 * which do not support it 1394 */ 1395 if (skutable->MGpuAcousticLimitRpmThreshold == 0) 1396 return 0; 1397 1398 return smu_cmn_send_smc_msg_with_param(smu, 1399 SMU_MSG_SetMGpuFanBoostLimitRpm, 1400 0, 1401 NULL); 1402 } 1403 1404 static int smu_v13_0_7_get_power_limit(struct smu_context *smu, 1405 uint32_t *current_power_limit, 1406 uint32_t *default_power_limit, 1407 uint32_t *max_power_limit) 1408 { 1409 struct smu_table_context *table_context = &smu->smu_table; 1410 struct smu_13_0_7_powerplay_table *powerplay_table = 1411 (struct smu_13_0_7_powerplay_table *)table_context->power_play_table; 1412 PPTable_t *pptable = table_context->driver_pptable; 1413 SkuTable_t *skutable = &pptable->SkuTable; 1414 uint32_t power_limit, od_percent; 1415 1416 if (smu_v13_0_get_current_power_limit(smu, &power_limit)) 1417 power_limit = smu->adev->pm.ac_power ? 1418 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] : 1419 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0]; 1420 1421 if (current_power_limit) 1422 *current_power_limit = power_limit; 1423 if (default_power_limit) 1424 *default_power_limit = power_limit; 1425 1426 if (max_power_limit) { 1427 if (smu->od_enabled) { 1428 od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); 1429 1430 dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit); 1431 1432 power_limit *= (100 + od_percent); 1433 power_limit /= 100; 1434 } 1435 *max_power_limit = power_limit; 1436 } 1437 1438 return 0; 1439 } 1440 1441 static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf) 1442 { 1443 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external[PP_SMC_POWER_PROFILE_COUNT]; 1444 uint32_t i, j, size = 0; 1445 int16_t workload_type = 0; 1446 int result = 0; 1447 1448 if (!buf) 1449 return -EINVAL; 1450 1451 size += sysfs_emit_at(buf, size, " "); 1452 for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) 1453 size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i], 1454 (i == smu->power_profile_mode) ? "* " : " "); 1455 1456 size += sysfs_emit_at(buf, size, "\n"); 1457 1458 for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) { 1459 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1460 workload_type = smu_cmn_to_asic_specific_index(smu, 1461 CMN2ASIC_MAPPING_WORKLOAD, 1462 i); 1463 if (workload_type < 0) 1464 return -EINVAL; 1465 1466 result = smu_cmn_update_table(smu, 1467 SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type, 1468 (void *)(&activity_monitor_external[i]), false); 1469 if (result) { 1470 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1471 return result; 1472 } 1473 } 1474 1475 #define PRINT_DPM_MONITOR(field) \ 1476 do { \ 1477 size += sysfs_emit_at(buf, size, "%-30s", #field); \ 1478 for (j = 0; j <= PP_SMC_POWER_PROFILE_WINDOW3D; j++) \ 1479 size += sysfs_emit_at(buf, size, "%-16d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \ 1480 size += sysfs_emit_at(buf, size, "\n"); \ 1481 } while (0) 1482 1483 PRINT_DPM_MONITOR(Gfx_ActiveHystLimit); 1484 PRINT_DPM_MONITOR(Gfx_IdleHystLimit); 1485 PRINT_DPM_MONITOR(Gfx_FPS); 1486 PRINT_DPM_MONITOR(Gfx_MinActiveFreqType); 1487 PRINT_DPM_MONITOR(Gfx_BoosterFreqType); 1488 PRINT_DPM_MONITOR(Gfx_MinActiveFreq); 1489 PRINT_DPM_MONITOR(Gfx_BoosterFreq); 1490 PRINT_DPM_MONITOR(Fclk_ActiveHystLimit); 1491 PRINT_DPM_MONITOR(Fclk_IdleHystLimit); 1492 PRINT_DPM_MONITOR(Fclk_FPS); 1493 PRINT_DPM_MONITOR(Fclk_MinActiveFreqType); 1494 PRINT_DPM_MONITOR(Fclk_BoosterFreqType); 1495 PRINT_DPM_MONITOR(Fclk_MinActiveFreq); 1496 PRINT_DPM_MONITOR(Fclk_BoosterFreq); 1497 #undef PRINT_DPM_MONITOR 1498 1499 return size; 1500 } 1501 1502 static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) 1503 { 1504 1505 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 1506 DpmActivityMonitorCoeffInt_t *activity_monitor = 1507 &(activity_monitor_external.DpmActivityMonitorCoeffInt); 1508 int workload_type, ret = 0; 1509 1510 smu->power_profile_mode = input[size]; 1511 1512 if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) { 1513 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); 1514 return -EINVAL; 1515 } 1516 1517 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1518 1519 ret = smu_cmn_update_table(smu, 1520 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, 1521 (void *)(&activity_monitor_external), false); 1522 if (ret) { 1523 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1524 return ret; 1525 } 1526 1527 switch (input[0]) { 1528 case 0: /* Gfxclk */ 1529 activity_monitor->Gfx_ActiveHystLimit = input[1]; 1530 activity_monitor->Gfx_IdleHystLimit = input[2]; 1531 activity_monitor->Gfx_FPS = input[3]; 1532 activity_monitor->Gfx_MinActiveFreqType = input[4]; 1533 activity_monitor->Gfx_BoosterFreqType = input[5]; 1534 activity_monitor->Gfx_MinActiveFreq = input[6]; 1535 activity_monitor->Gfx_BoosterFreq = input[7]; 1536 break; 1537 case 1: /* Fclk */ 1538 activity_monitor->Fclk_ActiveHystLimit = input[1]; 1539 activity_monitor->Fclk_IdleHystLimit = input[2]; 1540 activity_monitor->Fclk_FPS = input[3]; 1541 activity_monitor->Fclk_MinActiveFreqType = input[4]; 1542 activity_monitor->Fclk_BoosterFreqType = input[5]; 1543 activity_monitor->Fclk_MinActiveFreq = input[6]; 1544 activity_monitor->Fclk_BoosterFreq = input[7]; 1545 break; 1546 } 1547 1548 ret = smu_cmn_update_table(smu, 1549 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, 1550 (void *)(&activity_monitor_external), true); 1551 if (ret) { 1552 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); 1553 return ret; 1554 } 1555 } 1556 1557 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1558 workload_type = smu_cmn_to_asic_specific_index(smu, 1559 CMN2ASIC_MAPPING_WORKLOAD, 1560 smu->power_profile_mode); 1561 if (workload_type < 0) 1562 return -EINVAL; 1563 smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 1564 1 << workload_type, NULL); 1565 1566 return ret; 1567 } 1568 1569 static int smu_v13_0_7_set_mp1_state(struct smu_context *smu, 1570 enum pp_mp1_state mp1_state) 1571 { 1572 int ret; 1573 1574 switch (mp1_state) { 1575 case PP_MP1_STATE_UNLOAD: 1576 ret = smu_cmn_set_mp1_state(smu, mp1_state); 1577 break; 1578 default: 1579 /* Ignore others */ 1580 ret = 0; 1581 } 1582 1583 return ret; 1584 } 1585 1586 static int smu_v13_0_7_baco_enter(struct smu_context *smu) 1587 { 1588 struct smu_baco_context *smu_baco = &smu->smu_baco; 1589 struct amdgpu_device *adev = smu->adev; 1590 1591 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) 1592 return smu_v13_0_baco_set_armd3_sequence(smu, 1593 smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); 1594 else 1595 return smu_v13_0_baco_enter(smu); 1596 } 1597 1598 static int smu_v13_0_7_baco_exit(struct smu_context *smu) 1599 { 1600 struct amdgpu_device *adev = smu->adev; 1601 1602 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 1603 /* Wait for PMFW handling for the Dstate change */ 1604 usleep_range(10000, 11000); 1605 return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 1606 } else { 1607 return smu_v13_0_baco_exit(smu); 1608 } 1609 } 1610 1611 static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) 1612 { 1613 struct amdgpu_device *adev = smu->adev; 1614 1615 /* SRIOV does not support SMU mode1 reset */ 1616 if (amdgpu_sriov_vf(adev)) 1617 return false; 1618 1619 return true; 1620 } 1621 1622 static int smu_v13_0_7_set_df_cstate(struct smu_context *smu, 1623 enum pp_df_cstate state) 1624 { 1625 return smu_cmn_send_smc_msg_with_param(smu, 1626 SMU_MSG_DFCstateControl, 1627 state, 1628 NULL); 1629 } 1630 1631 static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { 1632 .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, 1633 .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, 1634 .is_dpm_running = smu_v13_0_7_is_dpm_running, 1635 .dump_pptable = smu_v13_0_7_dump_pptable, 1636 .init_microcode = smu_v13_0_init_microcode, 1637 .load_microcode = smu_v13_0_load_microcode, 1638 .fini_microcode = smu_v13_0_fini_microcode, 1639 .init_smc_tables = smu_v13_0_7_init_smc_tables, 1640 .fini_smc_tables = smu_v13_0_fini_smc_tables, 1641 .init_power = smu_v13_0_init_power, 1642 .fini_power = smu_v13_0_fini_power, 1643 .check_fw_status = smu_v13_0_7_check_fw_status, 1644 .setup_pptable = smu_v13_0_7_setup_pptable, 1645 .check_fw_version = smu_v13_0_check_fw_version, 1646 .write_pptable = smu_cmn_write_pptable, 1647 .set_driver_table_location = smu_v13_0_set_driver_table_location, 1648 .system_features_control = smu_v13_0_system_features_control, 1649 .set_allowed_mask = smu_v13_0_set_allowed_mask, 1650 .get_enabled_mask = smu_cmn_get_enabled_mask, 1651 .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable, 1652 .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, 1653 .init_pptable_microcode = smu_v13_0_init_pptable_microcode, 1654 .populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk, 1655 .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, 1656 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, 1657 .read_sensor = smu_v13_0_7_read_sensor, 1658 .feature_is_enabled = smu_cmn_feature_is_enabled, 1659 .print_clk_levels = smu_v13_0_7_print_clk_levels, 1660 .force_clk_levels = smu_v13_0_7_force_clk_levels, 1661 .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters, 1662 .get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range, 1663 .register_irq_handler = smu_v13_0_register_irq_handler, 1664 .enable_thermal_alert = smu_v13_0_enable_thermal_alert, 1665 .disable_thermal_alert = smu_v13_0_disable_thermal_alert, 1666 .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, 1667 .get_gpu_metrics = smu_v13_0_7_get_gpu_metrics, 1668 .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range, 1669 .set_performance_level = smu_v13_0_set_performance_level, 1670 .gfx_off_control = smu_v13_0_gfx_off_control, 1671 .get_fan_speed_pwm = smu_v13_0_7_get_fan_speed_pwm, 1672 .get_fan_speed_rpm = smu_v13_0_7_get_fan_speed_rpm, 1673 .set_fan_speed_pwm = smu_v13_0_set_fan_speed_pwm, 1674 .set_fan_speed_rpm = smu_v13_0_set_fan_speed_rpm, 1675 .get_fan_control_mode = smu_v13_0_get_fan_control_mode, 1676 .set_fan_control_mode = smu_v13_0_set_fan_control_mode, 1677 .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost, 1678 .get_power_limit = smu_v13_0_7_get_power_limit, 1679 .set_power_limit = smu_v13_0_set_power_limit, 1680 .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode, 1681 .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode, 1682 .set_tool_table_location = smu_v13_0_set_tool_table_location, 1683 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1684 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, 1685 .baco_is_support = smu_v13_0_baco_is_support, 1686 .baco_get_state = smu_v13_0_baco_get_state, 1687 .baco_set_state = smu_v13_0_baco_set_state, 1688 .baco_enter = smu_v13_0_7_baco_enter, 1689 .baco_exit = smu_v13_0_7_baco_exit, 1690 .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, 1691 .mode1_reset = smu_v13_0_mode1_reset, 1692 .set_mp1_state = smu_v13_0_7_set_mp1_state, 1693 .set_df_cstate = smu_v13_0_7_set_df_cstate, 1694 .gpo_control = smu_v13_0_gpo_control, 1695 }; 1696 1697 void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) 1698 { 1699 smu->ppt_funcs = &smu_v13_0_7_ppt_funcs; 1700 smu->message_map = smu_v13_0_7_message_map; 1701 smu->clock_map = smu_v13_0_7_clk_map; 1702 smu->feature_map = smu_v13_0_7_feature_mask_map; 1703 smu->table_map = smu_v13_0_7_table_map; 1704 smu->pwr_src_map = smu_v13_0_7_pwr_src_map; 1705 smu->workload_map = smu_v13_0_7_workload_map; 1706 smu_v13_0_set_smu_mailbox_registers(smu); 1707 } 1708