1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include <linux/firmware.h> 27 #include <linux/pci.h> 28 #include <linux/i2c.h> 29 #include "amdgpu.h" 30 #include "amdgpu_smu.h" 31 #include "atomfirmware.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "amdgpu_atombios.h" 34 #include "smu_v13_0.h" 35 #include "smu13_driver_if_v13_0_0.h" 36 #include "soc15_common.h" 37 #include "atom.h" 38 #include "smu_v13_0_0_ppt.h" 39 #include "smu_v13_0_0_pptable.h" 40 #include "smu_v13_0_0_ppsmc.h" 41 #include "nbio/nbio_4_3_0_offset.h" 42 #include "nbio/nbio_4_3_0_sh_mask.h" 43 #include "mp/mp_13_0_0_offset.h" 44 #include "mp/mp_13_0_0_sh_mask.h" 45 46 #include "asic_reg/mp/mp_13_0_0_sh_mask.h" 47 #include "smu_cmn.h" 48 #include "amdgpu_ras.h" 49 #include "umc_v8_10.h" 50 51 /* 52 * DO NOT use these for err/warn/info/debug messages. 53 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 54 * They are more MGPU friendly. 55 */ 56 #undef pr_err 57 #undef pr_warn 58 #undef pr_info 59 #undef pr_debug 60 61 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) 62 63 #define FEATURE_MASK(feature) (1ULL << feature) 64 #define SMC_DPM_FEATURE ( \ 65 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 66 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 67 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ 68 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 69 FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ 70 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)) 71 72 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 73 74 #define mmMP1_SMN_C2PMSG_66 0x0282 75 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0 76 77 #define mmMP1_SMN_C2PMSG_82 0x0292 78 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0 79 80 #define mmMP1_SMN_C2PMSG_90 0x029a 81 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 82 83 #define mmMP1_SMN_C2PMSG_75 0x028b 84 #define mmMP1_SMN_C2PMSG_75_BASE_IDX 0 85 86 #define mmMP1_SMN_C2PMSG_53 0x0275 87 #define mmMP1_SMN_C2PMSG_53_BASE_IDX 0 88 89 #define mmMP1_SMN_C2PMSG_54 0x0276 90 #define mmMP1_SMN_C2PMSG_54_BASE_IDX 0 91 92 #define DEBUGSMC_MSG_Mode1Reset 2 93 94 /* 95 * SMU_v13_0_10 supports ECCTABLE since version 80.34.0, 96 * use this to check ECCTABLE feature whether support 97 */ 98 #define SUPPORT_ECCTABLE_SMU_13_0_10_VERSION 0x00502200 99 100 static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = { 101 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 102 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 103 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 104 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), 105 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), 106 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), 107 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), 108 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), 109 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), 110 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), 111 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), 112 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), 113 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), 114 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), 115 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), 116 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 117 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 118 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), 119 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), 120 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 121 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 122 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), 123 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), 124 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), 125 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), 126 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), 127 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), 128 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), 129 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), 130 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), 131 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), 132 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), 133 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 134 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 135 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 136 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 137 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), 138 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), 139 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), 140 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), 141 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), 142 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 143 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 144 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), 145 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 146 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), 147 MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), 148 MSG_MAP(Mode2Reset, PPSMC_MSG_Mode2Reset, 0), 149 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), 150 MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), 151 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), 152 MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0), 153 MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel, 154 PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0), 155 MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), 156 MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), 157 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), 158 }; 159 160 static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = { 161 CLK_MAP(GFXCLK, PPCLK_GFXCLK), 162 CLK_MAP(SCLK, PPCLK_GFXCLK), 163 CLK_MAP(SOCCLK, PPCLK_SOCCLK), 164 CLK_MAP(FCLK, PPCLK_FCLK), 165 CLK_MAP(UCLK, PPCLK_UCLK), 166 CLK_MAP(MCLK, PPCLK_UCLK), 167 CLK_MAP(VCLK, PPCLK_VCLK_0), 168 CLK_MAP(VCLK1, PPCLK_VCLK_1), 169 CLK_MAP(DCLK, PPCLK_DCLK_0), 170 CLK_MAP(DCLK1, PPCLK_DCLK_1), 171 }; 172 173 static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] = { 174 FEA_MAP(FW_DATA_READ), 175 FEA_MAP(DPM_GFXCLK), 176 FEA_MAP(DPM_GFX_POWER_OPTIMIZER), 177 FEA_MAP(DPM_UCLK), 178 FEA_MAP(DPM_FCLK), 179 FEA_MAP(DPM_SOCCLK), 180 FEA_MAP(DPM_MP0CLK), 181 FEA_MAP(DPM_LINK), 182 FEA_MAP(DPM_DCN), 183 FEA_MAP(VMEMP_SCALING), 184 FEA_MAP(VDDIO_MEM_SCALING), 185 FEA_MAP(DS_GFXCLK), 186 FEA_MAP(DS_SOCCLK), 187 FEA_MAP(DS_FCLK), 188 FEA_MAP(DS_LCLK), 189 FEA_MAP(DS_DCFCLK), 190 FEA_MAP(DS_UCLK), 191 FEA_MAP(GFX_ULV), 192 FEA_MAP(FW_DSTATE), 193 FEA_MAP(GFXOFF), 194 FEA_MAP(BACO), 195 FEA_MAP(MM_DPM), 196 FEA_MAP(SOC_MPCLK_DS), 197 FEA_MAP(BACO_MPCLK_DS), 198 FEA_MAP(THROTTLERS), 199 FEA_MAP(SMARTSHIFT), 200 FEA_MAP(GTHR), 201 FEA_MAP(ACDC), 202 FEA_MAP(VR0HOT), 203 FEA_MAP(FW_CTF), 204 FEA_MAP(FAN_CONTROL), 205 FEA_MAP(GFX_DCS), 206 FEA_MAP(GFX_READ_MARGIN), 207 FEA_MAP(LED_DISPLAY), 208 FEA_MAP(GFXCLK_SPREAD_SPECTRUM), 209 FEA_MAP(OUT_OF_BAND_MONITOR), 210 FEA_MAP(OPTIMIZED_VMIN), 211 FEA_MAP(GFX_IMU), 212 FEA_MAP(BOOT_TIME_CAL), 213 FEA_MAP(GFX_PCC_DFLL), 214 FEA_MAP(SOC_CG), 215 FEA_MAP(DF_CSTATE), 216 FEA_MAP(GFX_EDC), 217 FEA_MAP(BOOT_POWER_OPT), 218 FEA_MAP(CLOCK_POWER_DOWN_BYPASS), 219 FEA_MAP(DS_VCN), 220 FEA_MAP(BACO_CG), 221 FEA_MAP(MEM_TEMP_READ), 222 FEA_MAP(ATHUB_MMHUB_PG), 223 FEA_MAP(SOC_PCC), 224 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 225 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 226 [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, 227 }; 228 229 static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = { 230 TAB_MAP(PPTABLE), 231 TAB_MAP(WATERMARKS), 232 TAB_MAP(AVFS_PSM_DEBUG), 233 TAB_MAP(PMSTATUSLOG), 234 TAB_MAP(SMU_METRICS), 235 TAB_MAP(DRIVER_SMU_CONFIG), 236 TAB_MAP(ACTIVITY_MONITOR_COEFF), 237 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, 238 TAB_MAP(I2C_COMMANDS), 239 TAB_MAP(ECCINFO), 240 }; 241 242 static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { 243 PWR_MAP(AC), 244 PWR_MAP(DC), 245 }; 246 247 static struct cmn2asic_mapping smu_v13_0_0_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 248 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), 249 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 250 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), 251 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 252 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 253 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 254 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 255 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), 256 }; 257 258 static const uint8_t smu_v13_0_0_throttler_map[] = { 259 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), 260 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), 261 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), 262 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), 263 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), 264 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), 265 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), 266 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), 267 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), 268 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), 269 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), 270 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), 271 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), 272 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), 273 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), 274 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), 275 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), 276 }; 277 278 static int 279 smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu, 280 uint32_t *feature_mask, uint32_t num) 281 { 282 struct amdgpu_device *adev = smu->adev; 283 u32 smu_version; 284 285 if (num > 2) 286 return -EINVAL; 287 288 memset(feature_mask, 0xff, sizeof(uint32_t) * num); 289 290 if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) { 291 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); 292 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT); 293 } 294 295 if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) || 296 !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) 297 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); 298 299 if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)) 300 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); 301 302 /* PMFW 78.58 contains a critical fix for gfxoff feature */ 303 smu_cmn_get_smc_version(smu, NULL, &smu_version); 304 if ((smu_version < 0x004e3a00) || 305 !(adev->pm.pp_feature & PP_GFXOFF_MASK)) 306 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT); 307 308 if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) { 309 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT); 310 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); 311 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); 312 } 313 314 if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)) 315 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); 316 317 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { 318 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT); 319 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT); 320 } 321 322 if (!(adev->pm.pp_feature & PP_ULV_MASK)) 323 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT); 324 325 return 0; 326 } 327 328 static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) 329 { 330 struct smu_table_context *table_context = &smu->smu_table; 331 struct smu_13_0_0_powerplay_table *powerplay_table = 332 table_context->power_play_table; 333 struct smu_baco_context *smu_baco = &smu->smu_baco; 334 335 if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC) 336 smu->dc_controlled_by_gpio = true; 337 338 if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO || 339 powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) 340 smu_baco->platform_support = true; 341 342 if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) 343 smu_baco->maco_support = true; 344 345 table_context->thermal_controller_type = 346 powerplay_table->thermal_controller_type; 347 348 /* 349 * Instead of having its own buffer space and get overdrive_table copied, 350 * smu->od_settings just points to the actual overdrive_table 351 */ 352 smu->od_settings = &powerplay_table->overdrive_table; 353 354 return 0; 355 } 356 357 static int smu_v13_0_0_store_powerplay_table(struct smu_context *smu) 358 { 359 struct smu_table_context *table_context = &smu->smu_table; 360 struct smu_13_0_0_powerplay_table *powerplay_table = 361 table_context->power_play_table; 362 363 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, 364 sizeof(PPTable_t)); 365 366 return 0; 367 } 368 369 #ifndef atom_smc_dpm_info_table_13_0_0 370 struct atom_smc_dpm_info_table_13_0_0 { 371 struct atom_common_table_header table_header; 372 BoardTable_t BoardTable; 373 }; 374 #endif 375 376 static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu) 377 { 378 struct smu_table_context *table_context = &smu->smu_table; 379 PPTable_t *smc_pptable = table_context->driver_pptable; 380 struct atom_smc_dpm_info_table_13_0_0 *smc_dpm_table; 381 BoardTable_t *BoardTable = &smc_pptable->BoardTable; 382 int index, ret; 383 384 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 385 smc_dpm_info); 386 387 ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, 388 (uint8_t **)&smc_dpm_table); 389 if (ret) 390 return ret; 391 392 memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); 393 394 return 0; 395 } 396 397 static int smu_v13_0_0_get_pptable_from_pmfw(struct smu_context *smu, 398 void **table, 399 uint32_t *size) 400 { 401 struct smu_table_context *smu_table = &smu->smu_table; 402 void *combo_pptable = smu_table->combo_pptable; 403 int ret = 0; 404 405 ret = smu_cmn_get_combo_pptable(smu); 406 if (ret) 407 return ret; 408 409 *table = combo_pptable; 410 *size = sizeof(struct smu_13_0_0_powerplay_table); 411 412 return 0; 413 } 414 415 static int smu_v13_0_0_setup_pptable(struct smu_context *smu) 416 { 417 struct smu_table_context *smu_table = &smu->smu_table; 418 struct amdgpu_device *adev = smu->adev; 419 int ret = 0; 420 421 if (amdgpu_sriov_vf(smu->adev)) 422 return 0; 423 424 ret = smu_v13_0_0_get_pptable_from_pmfw(smu, 425 &smu_table->power_play_table, 426 &smu_table->power_play_table_size); 427 if (ret) 428 return ret; 429 430 ret = smu_v13_0_0_store_powerplay_table(smu); 431 if (ret) 432 return ret; 433 434 /* 435 * With SCPM enabled, the operation below will be handled 436 * by PSP. Driver involvment is unnecessary and useless. 437 */ 438 if (!adev->scpm_enabled) { 439 ret = smu_v13_0_0_append_powerplay_table(smu); 440 if (ret) 441 return ret; 442 } 443 444 ret = smu_v13_0_0_check_powerplay_table(smu); 445 if (ret) 446 return ret; 447 448 return ret; 449 } 450 451 static int smu_v13_0_0_tables_init(struct smu_context *smu) 452 { 453 struct smu_table_context *smu_table = &smu->smu_table; 454 struct smu_table *tables = smu_table->tables; 455 456 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), 457 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 458 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 459 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 460 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), 461 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 462 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), 463 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 464 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), 465 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 466 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, 467 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 468 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, 469 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, 470 AMDGPU_GEM_DOMAIN_VRAM); 471 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, 472 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 473 SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), 474 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 475 476 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); 477 if (!smu_table->metrics_table) 478 goto err0_out; 479 smu_table->metrics_time = 0; 480 481 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); 482 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 483 if (!smu_table->gpu_metrics_table) 484 goto err1_out; 485 486 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 487 if (!smu_table->watermarks_table) 488 goto err2_out; 489 490 smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); 491 if (!smu_table->ecc_table) 492 goto err3_out; 493 494 return 0; 495 496 err3_out: 497 kfree(smu_table->watermarks_table); 498 err2_out: 499 kfree(smu_table->gpu_metrics_table); 500 err1_out: 501 kfree(smu_table->metrics_table); 502 err0_out: 503 return -ENOMEM; 504 } 505 506 static int smu_v13_0_0_allocate_dpm_context(struct smu_context *smu) 507 { 508 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 509 510 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), 511 GFP_KERNEL); 512 if (!smu_dpm->dpm_context) 513 return -ENOMEM; 514 515 smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); 516 517 return 0; 518 } 519 520 static int smu_v13_0_0_init_smc_tables(struct smu_context *smu) 521 { 522 int ret = 0; 523 524 ret = smu_v13_0_0_tables_init(smu); 525 if (ret) 526 return ret; 527 528 ret = smu_v13_0_0_allocate_dpm_context(smu); 529 if (ret) 530 return ret; 531 532 return smu_v13_0_init_smc_tables(smu); 533 } 534 535 static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) 536 { 537 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 538 struct smu_table_context *table_context = &smu->smu_table; 539 PPTable_t *pptable = table_context->driver_pptable; 540 SkuTable_t *skutable = &pptable->SkuTable; 541 struct smu_13_0_dpm_table *dpm_table; 542 struct smu_13_0_pcie_table *pcie_table; 543 uint32_t link_level; 544 int ret = 0; 545 546 /* socclk dpm table setup */ 547 dpm_table = &dpm_context->dpm_tables.soc_table; 548 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 549 ret = smu_v13_0_set_single_dpm_table(smu, 550 SMU_SOCCLK, 551 dpm_table); 552 if (ret) 553 return ret; 554 } else { 555 dpm_table->count = 1; 556 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; 557 dpm_table->dpm_levels[0].enabled = true; 558 dpm_table->min = dpm_table->dpm_levels[0].value; 559 dpm_table->max = dpm_table->dpm_levels[0].value; 560 } 561 562 /* gfxclk dpm table setup */ 563 dpm_table = &dpm_context->dpm_tables.gfx_table; 564 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { 565 ret = smu_v13_0_set_single_dpm_table(smu, 566 SMU_GFXCLK, 567 dpm_table); 568 if (ret) 569 return ret; 570 571 /* 572 * Update the reported maximum shader clock to the value 573 * which can be guarded to be achieved on all cards. This 574 * is aligned with Window setting. And considering that value 575 * might be not the peak frequency the card can achieve, it 576 * is normal some real-time clock frequency can overtake this 577 * labelled maximum clock frequency(for example in pp_dpm_sclk 578 * sysfs output). 579 */ 580 if (skutable->DriverReportedClocks.GameClockAc && 581 (dpm_table->dpm_levels[dpm_table->count - 1].value > 582 skutable->DriverReportedClocks.GameClockAc)) { 583 dpm_table->dpm_levels[dpm_table->count - 1].value = 584 skutable->DriverReportedClocks.GameClockAc; 585 dpm_table->max = skutable->DriverReportedClocks.GameClockAc; 586 } 587 } else { 588 dpm_table->count = 1; 589 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; 590 dpm_table->dpm_levels[0].enabled = true; 591 dpm_table->min = dpm_table->dpm_levels[0].value; 592 dpm_table->max = dpm_table->dpm_levels[0].value; 593 } 594 595 /* uclk dpm table setup */ 596 dpm_table = &dpm_context->dpm_tables.uclk_table; 597 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 598 ret = smu_v13_0_set_single_dpm_table(smu, 599 SMU_UCLK, 600 dpm_table); 601 if (ret) 602 return ret; 603 } else { 604 dpm_table->count = 1; 605 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; 606 dpm_table->dpm_levels[0].enabled = true; 607 dpm_table->min = dpm_table->dpm_levels[0].value; 608 dpm_table->max = dpm_table->dpm_levels[0].value; 609 } 610 611 /* fclk dpm table setup */ 612 dpm_table = &dpm_context->dpm_tables.fclk_table; 613 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { 614 ret = smu_v13_0_set_single_dpm_table(smu, 615 SMU_FCLK, 616 dpm_table); 617 if (ret) 618 return ret; 619 } else { 620 dpm_table->count = 1; 621 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; 622 dpm_table->dpm_levels[0].enabled = true; 623 dpm_table->min = dpm_table->dpm_levels[0].value; 624 dpm_table->max = dpm_table->dpm_levels[0].value; 625 } 626 627 /* vclk dpm table setup */ 628 dpm_table = &dpm_context->dpm_tables.vclk_table; 629 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) { 630 ret = smu_v13_0_set_single_dpm_table(smu, 631 SMU_VCLK, 632 dpm_table); 633 if (ret) 634 return ret; 635 } else { 636 dpm_table->count = 1; 637 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; 638 dpm_table->dpm_levels[0].enabled = true; 639 dpm_table->min = dpm_table->dpm_levels[0].value; 640 dpm_table->max = dpm_table->dpm_levels[0].value; 641 } 642 643 /* dclk dpm table setup */ 644 dpm_table = &dpm_context->dpm_tables.dclk_table; 645 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) { 646 ret = smu_v13_0_set_single_dpm_table(smu, 647 SMU_DCLK, 648 dpm_table); 649 if (ret) 650 return ret; 651 } else { 652 dpm_table->count = 1; 653 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; 654 dpm_table->dpm_levels[0].enabled = true; 655 dpm_table->min = dpm_table->dpm_levels[0].value; 656 dpm_table->max = dpm_table->dpm_levels[0].value; 657 } 658 659 /* lclk dpm table setup */ 660 pcie_table = &dpm_context->dpm_tables.pcie_table; 661 pcie_table->num_of_link_levels = 0; 662 for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { 663 if (!skutable->PcieGenSpeed[link_level] && 664 !skutable->PcieLaneCount[link_level] && 665 !skutable->LclkFreq[link_level]) 666 continue; 667 668 pcie_table->pcie_gen[pcie_table->num_of_link_levels] = 669 skutable->PcieGenSpeed[link_level]; 670 pcie_table->pcie_lane[pcie_table->num_of_link_levels] = 671 skutable->PcieLaneCount[link_level]; 672 pcie_table->clk_freq[pcie_table->num_of_link_levels] = 673 skutable->LclkFreq[link_level]; 674 pcie_table->num_of_link_levels++; 675 } 676 677 return 0; 678 } 679 680 static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu) 681 { 682 int ret = 0; 683 uint64_t feature_enabled; 684 685 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 686 if (ret) 687 return false; 688 689 return !!(feature_enabled & SMC_DPM_FEATURE); 690 } 691 692 static void smu_v13_0_0_dump_pptable(struct smu_context *smu) 693 { 694 struct smu_table_context *table_context = &smu->smu_table; 695 PPTable_t *pptable = table_context->driver_pptable; 696 SkuTable_t *skutable = &pptable->SkuTable; 697 698 dev_info(smu->adev->dev, "Dumped PPTable:\n"); 699 700 dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); 701 dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); 702 dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); 703 } 704 705 static int smu_v13_0_0_system_features_control(struct smu_context *smu, 706 bool en) 707 { 708 return smu_v13_0_system_features_control(smu, en); 709 } 710 711 static uint32_t smu_v13_0_get_throttler_status(SmuMetrics_t *metrics) 712 { 713 uint32_t throttler_status = 0; 714 int i; 715 716 for (i = 0; i < THROTTLER_COUNT; i++) 717 throttler_status |= 718 (metrics->ThrottlingPercentage[i] ? 1U << i : 0); 719 720 return throttler_status; 721 } 722 723 #define SMU_13_0_0_BUSY_THRESHOLD 15 724 static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu, 725 MetricsMember_t member, 726 uint32_t *value) 727 { 728 struct smu_table_context *smu_table = &smu->smu_table; 729 SmuMetrics_t *metrics = 730 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 731 int ret = 0; 732 733 ret = smu_cmn_get_metrics_table(smu, 734 NULL, 735 false); 736 if (ret) 737 return ret; 738 739 switch (member) { 740 case METRICS_CURR_GFXCLK: 741 *value = metrics->CurrClock[PPCLK_GFXCLK]; 742 break; 743 case METRICS_CURR_SOCCLK: 744 *value = metrics->CurrClock[PPCLK_SOCCLK]; 745 break; 746 case METRICS_CURR_UCLK: 747 *value = metrics->CurrClock[PPCLK_UCLK]; 748 break; 749 case METRICS_CURR_VCLK: 750 *value = metrics->CurrClock[PPCLK_VCLK_0]; 751 break; 752 case METRICS_CURR_VCLK1: 753 *value = metrics->CurrClock[PPCLK_VCLK_1]; 754 break; 755 case METRICS_CURR_DCLK: 756 *value = metrics->CurrClock[PPCLK_DCLK_0]; 757 break; 758 case METRICS_CURR_DCLK1: 759 *value = metrics->CurrClock[PPCLK_DCLK_1]; 760 break; 761 case METRICS_CURR_FCLK: 762 *value = metrics->CurrClock[PPCLK_FCLK]; 763 break; 764 case METRICS_AVERAGE_GFXCLK: 765 if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD) 766 *value = metrics->AverageGfxclkFrequencyPostDs; 767 else 768 *value = metrics->AverageGfxclkFrequencyPreDs; 769 break; 770 case METRICS_AVERAGE_FCLK: 771 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD) 772 *value = metrics->AverageFclkFrequencyPostDs; 773 else 774 *value = metrics->AverageFclkFrequencyPreDs; 775 break; 776 case METRICS_AVERAGE_UCLK: 777 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD) 778 *value = metrics->AverageMemclkFrequencyPostDs; 779 else 780 *value = metrics->AverageMemclkFrequencyPreDs; 781 break; 782 case METRICS_AVERAGE_VCLK: 783 *value = metrics->AverageVclk0Frequency; 784 break; 785 case METRICS_AVERAGE_DCLK: 786 *value = metrics->AverageDclk0Frequency; 787 break; 788 case METRICS_AVERAGE_VCLK1: 789 *value = metrics->AverageVclk1Frequency; 790 break; 791 case METRICS_AVERAGE_DCLK1: 792 *value = metrics->AverageDclk1Frequency; 793 break; 794 case METRICS_AVERAGE_GFXACTIVITY: 795 *value = metrics->AverageGfxActivity; 796 break; 797 case METRICS_AVERAGE_MEMACTIVITY: 798 *value = metrics->AverageUclkActivity; 799 break; 800 case METRICS_AVERAGE_SOCKETPOWER: 801 *value = metrics->AverageSocketPower << 8; 802 break; 803 case METRICS_TEMPERATURE_EDGE: 804 *value = metrics->AvgTemperature[TEMP_EDGE] * 805 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 806 break; 807 case METRICS_TEMPERATURE_HOTSPOT: 808 *value = metrics->AvgTemperature[TEMP_HOTSPOT] * 809 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 810 break; 811 case METRICS_TEMPERATURE_MEM: 812 *value = metrics->AvgTemperature[TEMP_MEM] * 813 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 814 break; 815 case METRICS_TEMPERATURE_VRGFX: 816 *value = metrics->AvgTemperature[TEMP_VR_GFX] * 817 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 818 break; 819 case METRICS_TEMPERATURE_VRSOC: 820 *value = metrics->AvgTemperature[TEMP_VR_SOC] * 821 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 822 break; 823 case METRICS_THROTTLER_STATUS: 824 *value = smu_v13_0_get_throttler_status(metrics); 825 break; 826 case METRICS_CURR_FANSPEED: 827 *value = metrics->AvgFanRpm; 828 break; 829 case METRICS_CURR_FANPWM: 830 *value = metrics->AvgFanPwm; 831 break; 832 case METRICS_VOLTAGE_VDDGFX: 833 *value = metrics->AvgVoltage[SVI_PLANE_GFX]; 834 break; 835 case METRICS_PCIE_RATE: 836 *value = metrics->PcieRate; 837 break; 838 case METRICS_PCIE_WIDTH: 839 *value = metrics->PcieWidth; 840 break; 841 default: 842 *value = UINT_MAX; 843 break; 844 } 845 846 return ret; 847 } 848 849 static int smu_v13_0_0_get_dpm_ultimate_freq(struct smu_context *smu, 850 enum smu_clk_type clk_type, 851 uint32_t *min, 852 uint32_t *max) 853 { 854 struct smu_13_0_dpm_context *dpm_context = 855 smu->smu_dpm.dpm_context; 856 struct smu_13_0_dpm_table *dpm_table; 857 858 switch (clk_type) { 859 case SMU_MCLK: 860 case SMU_UCLK: 861 /* uclk dpm table */ 862 dpm_table = &dpm_context->dpm_tables.uclk_table; 863 break; 864 case SMU_GFXCLK: 865 case SMU_SCLK: 866 /* gfxclk dpm table */ 867 dpm_table = &dpm_context->dpm_tables.gfx_table; 868 break; 869 case SMU_SOCCLK: 870 /* socclk dpm table */ 871 dpm_table = &dpm_context->dpm_tables.soc_table; 872 break; 873 case SMU_FCLK: 874 /* fclk dpm table */ 875 dpm_table = &dpm_context->dpm_tables.fclk_table; 876 break; 877 case SMU_VCLK: 878 case SMU_VCLK1: 879 /* vclk dpm table */ 880 dpm_table = &dpm_context->dpm_tables.vclk_table; 881 break; 882 case SMU_DCLK: 883 case SMU_DCLK1: 884 /* dclk dpm table */ 885 dpm_table = &dpm_context->dpm_tables.dclk_table; 886 break; 887 default: 888 dev_err(smu->adev->dev, "Unsupported clock type!\n"); 889 return -EINVAL; 890 } 891 892 if (min) 893 *min = dpm_table->min; 894 if (max) 895 *max = dpm_table->max; 896 897 return 0; 898 } 899 900 static int smu_v13_0_0_read_sensor(struct smu_context *smu, 901 enum amd_pp_sensors sensor, 902 void *data, 903 uint32_t *size) 904 { 905 struct smu_table_context *table_context = &smu->smu_table; 906 PPTable_t *smc_pptable = table_context->driver_pptable; 907 int ret = 0; 908 909 switch (sensor) { 910 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 911 *(uint16_t *)data = smc_pptable->SkuTable.FanMaximumRpm; 912 *size = 4; 913 break; 914 case AMDGPU_PP_SENSOR_MEM_LOAD: 915 ret = smu_v13_0_0_get_smu_metrics_data(smu, 916 METRICS_AVERAGE_MEMACTIVITY, 917 (uint32_t *)data); 918 *size = 4; 919 break; 920 case AMDGPU_PP_SENSOR_GPU_LOAD: 921 ret = smu_v13_0_0_get_smu_metrics_data(smu, 922 METRICS_AVERAGE_GFXACTIVITY, 923 (uint32_t *)data); 924 *size = 4; 925 break; 926 case AMDGPU_PP_SENSOR_GPU_POWER: 927 ret = smu_v13_0_0_get_smu_metrics_data(smu, 928 METRICS_AVERAGE_SOCKETPOWER, 929 (uint32_t *)data); 930 *size = 4; 931 break; 932 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 933 ret = smu_v13_0_0_get_smu_metrics_data(smu, 934 METRICS_TEMPERATURE_HOTSPOT, 935 (uint32_t *)data); 936 *size = 4; 937 break; 938 case AMDGPU_PP_SENSOR_EDGE_TEMP: 939 ret = smu_v13_0_0_get_smu_metrics_data(smu, 940 METRICS_TEMPERATURE_EDGE, 941 (uint32_t *)data); 942 *size = 4; 943 break; 944 case AMDGPU_PP_SENSOR_MEM_TEMP: 945 ret = smu_v13_0_0_get_smu_metrics_data(smu, 946 METRICS_TEMPERATURE_MEM, 947 (uint32_t *)data); 948 *size = 4; 949 break; 950 case AMDGPU_PP_SENSOR_GFX_MCLK: 951 ret = smu_v13_0_0_get_smu_metrics_data(smu, 952 METRICS_CURR_UCLK, 953 (uint32_t *)data); 954 *(uint32_t *)data *= 100; 955 *size = 4; 956 break; 957 case AMDGPU_PP_SENSOR_GFX_SCLK: 958 ret = smu_v13_0_0_get_smu_metrics_data(smu, 959 METRICS_AVERAGE_GFXCLK, 960 (uint32_t *)data); 961 *(uint32_t *)data *= 100; 962 *size = 4; 963 break; 964 case AMDGPU_PP_SENSOR_VDDGFX: 965 ret = smu_v13_0_0_get_smu_metrics_data(smu, 966 METRICS_VOLTAGE_VDDGFX, 967 (uint32_t *)data); 968 *size = 4; 969 break; 970 default: 971 ret = -EOPNOTSUPP; 972 break; 973 } 974 975 return ret; 976 } 977 978 static int smu_v13_0_0_get_current_clk_freq_by_table(struct smu_context *smu, 979 enum smu_clk_type clk_type, 980 uint32_t *value) 981 { 982 MetricsMember_t member_type; 983 int clk_id = 0; 984 985 clk_id = smu_cmn_to_asic_specific_index(smu, 986 CMN2ASIC_MAPPING_CLK, 987 clk_type); 988 if (clk_id < 0) 989 return -EINVAL; 990 991 switch (clk_id) { 992 case PPCLK_GFXCLK: 993 member_type = METRICS_AVERAGE_GFXCLK; 994 break; 995 case PPCLK_UCLK: 996 member_type = METRICS_CURR_UCLK; 997 break; 998 case PPCLK_FCLK: 999 member_type = METRICS_CURR_FCLK; 1000 break; 1001 case PPCLK_SOCCLK: 1002 member_type = METRICS_CURR_SOCCLK; 1003 break; 1004 case PPCLK_VCLK_0: 1005 member_type = METRICS_AVERAGE_VCLK; 1006 break; 1007 case PPCLK_DCLK_0: 1008 member_type = METRICS_AVERAGE_DCLK; 1009 break; 1010 case PPCLK_VCLK_1: 1011 member_type = METRICS_AVERAGE_VCLK1; 1012 break; 1013 case PPCLK_DCLK_1: 1014 member_type = METRICS_AVERAGE_DCLK1; 1015 break; 1016 default: 1017 return -EINVAL; 1018 } 1019 1020 return smu_v13_0_0_get_smu_metrics_data(smu, 1021 member_type, 1022 value); 1023 } 1024 1025 static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, 1026 enum smu_clk_type clk_type, 1027 char *buf) 1028 { 1029 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1030 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1031 struct smu_13_0_dpm_table *single_dpm_table; 1032 struct smu_13_0_pcie_table *pcie_table; 1033 const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; 1034 uint32_t gen_speed, lane_width; 1035 int i, curr_freq, size = 0; 1036 int ret = 0; 1037 1038 smu_cmn_get_sysfs_buf(&buf, &size); 1039 1040 if (amdgpu_ras_intr_triggered()) { 1041 size += sysfs_emit_at(buf, size, "unavailable\n"); 1042 return size; 1043 } 1044 1045 switch (clk_type) { 1046 case SMU_SCLK: 1047 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1048 break; 1049 case SMU_MCLK: 1050 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1051 break; 1052 case SMU_SOCCLK: 1053 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1054 break; 1055 case SMU_FCLK: 1056 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1057 break; 1058 case SMU_VCLK: 1059 case SMU_VCLK1: 1060 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1061 break; 1062 case SMU_DCLK: 1063 case SMU_DCLK1: 1064 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1065 break; 1066 default: 1067 break; 1068 } 1069 1070 switch (clk_type) { 1071 case SMU_SCLK: 1072 case SMU_MCLK: 1073 case SMU_SOCCLK: 1074 case SMU_FCLK: 1075 case SMU_VCLK: 1076 case SMU_VCLK1: 1077 case SMU_DCLK: 1078 case SMU_DCLK1: 1079 ret = smu_v13_0_0_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); 1080 if (ret) { 1081 dev_err(smu->adev->dev, "Failed to get current clock freq!"); 1082 return ret; 1083 } 1084 1085 if (single_dpm_table->is_fine_grained) { 1086 /* 1087 * For fine grained dpms, there are only two dpm levels: 1088 * - level 0 -> min clock freq 1089 * - level 1 -> max clock freq 1090 * And the current clock frequency can be any value between them. 1091 * So, if the current clock frequency is not at level 0 or level 1, 1092 * we will fake it as three dpm levels: 1093 * - level 0 -> min clock freq 1094 * - level 1 -> current actual clock freq 1095 * - level 2 -> max clock freq 1096 */ 1097 if ((single_dpm_table->dpm_levels[0].value != curr_freq) && 1098 (single_dpm_table->dpm_levels[1].value != curr_freq)) { 1099 size += sysfs_emit_at(buf, size, "0: %uMhz\n", 1100 single_dpm_table->dpm_levels[0].value); 1101 size += sysfs_emit_at(buf, size, "1: %uMhz *\n", 1102 curr_freq); 1103 size += sysfs_emit_at(buf, size, "2: %uMhz\n", 1104 single_dpm_table->dpm_levels[1].value); 1105 } else { 1106 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", 1107 single_dpm_table->dpm_levels[0].value, 1108 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : ""); 1109 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 1110 single_dpm_table->dpm_levels[1].value, 1111 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : ""); 1112 } 1113 } else { 1114 for (i = 0; i < single_dpm_table->count; i++) 1115 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", 1116 i, single_dpm_table->dpm_levels[i].value, 1117 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : ""); 1118 } 1119 break; 1120 case SMU_PCIE: 1121 ret = smu_v13_0_0_get_smu_metrics_data(smu, 1122 METRICS_PCIE_RATE, 1123 &gen_speed); 1124 if (ret) 1125 return ret; 1126 1127 ret = smu_v13_0_0_get_smu_metrics_data(smu, 1128 METRICS_PCIE_WIDTH, 1129 &lane_width); 1130 if (ret) 1131 return ret; 1132 1133 pcie_table = &(dpm_context->dpm_tables.pcie_table); 1134 for (i = 0; i < pcie_table->num_of_link_levels; i++) 1135 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, 1136 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," : 1137 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," : 1138 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," : 1139 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "", 1140 (pcie_table->pcie_lane[i] == 1) ? "x1" : 1141 (pcie_table->pcie_lane[i] == 2) ? "x2" : 1142 (pcie_table->pcie_lane[i] == 3) ? "x4" : 1143 (pcie_table->pcie_lane[i] == 4) ? "x8" : 1144 (pcie_table->pcie_lane[i] == 5) ? "x12" : 1145 (pcie_table->pcie_lane[i] == 6) ? "x16" : "", 1146 pcie_table->clk_freq[i], 1147 ((gen_speed - 1) == pcie_table->pcie_gen[i]) && 1148 (lane_width == link_width[pcie_table->pcie_lane[i]]) ? 1149 "*" : ""); 1150 break; 1151 1152 default: 1153 break; 1154 } 1155 1156 return size; 1157 } 1158 1159 static int smu_v13_0_0_force_clk_levels(struct smu_context *smu, 1160 enum smu_clk_type clk_type, 1161 uint32_t mask) 1162 { 1163 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1164 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1165 struct smu_13_0_dpm_table *single_dpm_table; 1166 uint32_t soft_min_level, soft_max_level; 1167 uint32_t min_freq, max_freq; 1168 int ret = 0; 1169 1170 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1171 soft_max_level = mask ? (fls(mask) - 1) : 0; 1172 1173 switch (clk_type) { 1174 case SMU_GFXCLK: 1175 case SMU_SCLK: 1176 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1177 break; 1178 case SMU_MCLK: 1179 case SMU_UCLK: 1180 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1181 break; 1182 case SMU_SOCCLK: 1183 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1184 break; 1185 case SMU_FCLK: 1186 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1187 break; 1188 case SMU_VCLK: 1189 case SMU_VCLK1: 1190 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1191 break; 1192 case SMU_DCLK: 1193 case SMU_DCLK1: 1194 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1195 break; 1196 default: 1197 break; 1198 } 1199 1200 switch (clk_type) { 1201 case SMU_GFXCLK: 1202 case SMU_SCLK: 1203 case SMU_MCLK: 1204 case SMU_UCLK: 1205 case SMU_SOCCLK: 1206 case SMU_FCLK: 1207 case SMU_VCLK: 1208 case SMU_VCLK1: 1209 case SMU_DCLK: 1210 case SMU_DCLK1: 1211 if (single_dpm_table->is_fine_grained) { 1212 /* There is only 2 levels for fine grained DPM */ 1213 soft_max_level = (soft_max_level >= 1 ? 1 : 0); 1214 soft_min_level = (soft_min_level >= 1 ? 1 : 0); 1215 } else { 1216 if ((soft_max_level >= single_dpm_table->count) || 1217 (soft_min_level >= single_dpm_table->count)) 1218 return -EINVAL; 1219 } 1220 1221 min_freq = single_dpm_table->dpm_levels[soft_min_level].value; 1222 max_freq = single_dpm_table->dpm_levels[soft_max_level].value; 1223 1224 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1225 clk_type, 1226 min_freq, 1227 max_freq); 1228 break; 1229 case SMU_DCEFCLK: 1230 case SMU_PCIE: 1231 default: 1232 break; 1233 } 1234 1235 return ret; 1236 } 1237 1238 static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu, 1239 uint32_t pcie_gen_cap, 1240 uint32_t pcie_width_cap) 1241 { 1242 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 1243 struct smu_13_0_pcie_table *pcie_table = 1244 &dpm_context->dpm_tables.pcie_table; 1245 uint32_t smu_pcie_arg; 1246 int ret, i; 1247 1248 for (i = 0; i < pcie_table->num_of_link_levels; i++) { 1249 if (pcie_table->pcie_gen[i] > pcie_gen_cap) 1250 pcie_table->pcie_gen[i] = pcie_gen_cap; 1251 if (pcie_table->pcie_lane[i] > pcie_width_cap) 1252 pcie_table->pcie_lane[i] = pcie_width_cap; 1253 1254 smu_pcie_arg = i << 16; 1255 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; 1256 smu_pcie_arg |= pcie_table->pcie_lane[i]; 1257 1258 ret = smu_cmn_send_smc_msg_with_param(smu, 1259 SMU_MSG_OverridePcieParameters, 1260 smu_pcie_arg, 1261 NULL); 1262 if (ret) 1263 return ret; 1264 } 1265 1266 return 0; 1267 } 1268 1269 static const struct smu_temperature_range smu13_thermal_policy[] = { 1270 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, 1271 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, 1272 }; 1273 1274 static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu, 1275 struct smu_temperature_range *range) 1276 { 1277 struct smu_table_context *table_context = &smu->smu_table; 1278 struct smu_13_0_0_powerplay_table *powerplay_table = 1279 table_context->power_play_table; 1280 PPTable_t *pptable = smu->smu_table.driver_pptable; 1281 1282 if (amdgpu_sriov_vf(smu->adev)) 1283 return 0; 1284 1285 if (!range) 1286 return -EINVAL; 1287 1288 memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range)); 1289 1290 range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] * 1291 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1292 range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) * 1293 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1294 range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] * 1295 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1296 range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) * 1297 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1298 range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] * 1299 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1300 range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)* 1301 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1302 range->software_shutdown_temp = powerplay_table->software_shutdown_temp; 1303 1304 return 0; 1305 } 1306 1307 #define MAX(a, b) ((a) > (b) ? (a) : (b)) 1308 static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu, 1309 void **table) 1310 { 1311 struct smu_table_context *smu_table = &smu->smu_table; 1312 struct gpu_metrics_v1_3 *gpu_metrics = 1313 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; 1314 SmuMetricsExternal_t metrics_ext; 1315 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics; 1316 int ret = 0; 1317 1318 ret = smu_cmn_get_metrics_table(smu, 1319 &metrics_ext, 1320 true); 1321 if (ret) 1322 return ret; 1323 1324 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); 1325 1326 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE]; 1327 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT]; 1328 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM]; 1329 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX]; 1330 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC]; 1331 gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0], 1332 metrics->AvgTemperature[TEMP_VR_MEM1]); 1333 1334 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; 1335 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; 1336 gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage, 1337 metrics->Vcn1ActivityPercentage); 1338 1339 gpu_metrics->average_socket_power = metrics->AverageSocketPower; 1340 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator; 1341 1342 if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD) 1343 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs; 1344 else 1345 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs; 1346 1347 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD) 1348 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs; 1349 else 1350 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs; 1351 1352 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency; 1353 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency; 1354 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; 1355 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; 1356 1357 gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; 1358 gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; 1359 gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; 1360 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; 1361 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; 1362 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1]; 1363 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1]; 1364 1365 gpu_metrics->throttle_status = 1366 smu_v13_0_get_throttler_status(metrics); 1367 gpu_metrics->indep_throttle_status = 1368 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, 1369 smu_v13_0_0_throttler_map); 1370 1371 gpu_metrics->current_fan_speed = metrics->AvgFanRpm; 1372 1373 gpu_metrics->pcie_link_width = metrics->PcieWidth; 1374 gpu_metrics->pcie_link_speed = metrics->PcieRate; 1375 1376 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1377 1378 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_GFX]; 1379 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_SOC]; 1380 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VMEMP]; 1381 1382 *table = (void *)gpu_metrics; 1383 1384 return sizeof(struct gpu_metrics_v1_3); 1385 } 1386 1387 static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu) 1388 { 1389 struct smu_13_0_dpm_context *dpm_context = 1390 smu->smu_dpm.dpm_context; 1391 struct smu_13_0_dpm_table *gfx_table = 1392 &dpm_context->dpm_tables.gfx_table; 1393 struct smu_13_0_dpm_table *mem_table = 1394 &dpm_context->dpm_tables.uclk_table; 1395 struct smu_13_0_dpm_table *soc_table = 1396 &dpm_context->dpm_tables.soc_table; 1397 struct smu_13_0_dpm_table *vclk_table = 1398 &dpm_context->dpm_tables.vclk_table; 1399 struct smu_13_0_dpm_table *dclk_table = 1400 &dpm_context->dpm_tables.dclk_table; 1401 struct smu_13_0_dpm_table *fclk_table = 1402 &dpm_context->dpm_tables.fclk_table; 1403 struct smu_umd_pstate_table *pstate_table = 1404 &smu->pstate_table; 1405 struct smu_table_context *table_context = &smu->smu_table; 1406 PPTable_t *pptable = table_context->driver_pptable; 1407 DriverReportedClocks_t driver_clocks = 1408 pptable->SkuTable.DriverReportedClocks; 1409 1410 pstate_table->gfxclk_pstate.min = gfx_table->min; 1411 if (driver_clocks.GameClockAc && 1412 (driver_clocks.GameClockAc < gfx_table->max)) 1413 pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc; 1414 else 1415 pstate_table->gfxclk_pstate.peak = gfx_table->max; 1416 1417 pstate_table->uclk_pstate.min = mem_table->min; 1418 pstate_table->uclk_pstate.peak = mem_table->max; 1419 1420 pstate_table->socclk_pstate.min = soc_table->min; 1421 pstate_table->socclk_pstate.peak = soc_table->max; 1422 1423 pstate_table->vclk_pstate.min = vclk_table->min; 1424 pstate_table->vclk_pstate.peak = vclk_table->max; 1425 1426 pstate_table->dclk_pstate.min = dclk_table->min; 1427 pstate_table->dclk_pstate.peak = dclk_table->max; 1428 1429 pstate_table->fclk_pstate.min = fclk_table->min; 1430 pstate_table->fclk_pstate.peak = fclk_table->max; 1431 1432 if (driver_clocks.BaseClockAc && 1433 driver_clocks.BaseClockAc < gfx_table->max) 1434 pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc; 1435 else 1436 pstate_table->gfxclk_pstate.standard = gfx_table->max; 1437 pstate_table->uclk_pstate.standard = mem_table->max; 1438 pstate_table->socclk_pstate.standard = soc_table->min; 1439 pstate_table->vclk_pstate.standard = vclk_table->min; 1440 pstate_table->dclk_pstate.standard = dclk_table->min; 1441 pstate_table->fclk_pstate.standard = fclk_table->min; 1442 1443 return 0; 1444 } 1445 1446 static void smu_v13_0_0_get_unique_id(struct smu_context *smu) 1447 { 1448 struct smu_table_context *smu_table = &smu->smu_table; 1449 SmuMetrics_t *metrics = 1450 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 1451 struct amdgpu_device *adev = smu->adev; 1452 uint32_t upper32 = 0, lower32 = 0; 1453 int ret; 1454 1455 ret = smu_cmn_get_metrics_table(smu, NULL, false); 1456 if (ret) 1457 goto out; 1458 1459 upper32 = metrics->PublicSerialNumberUpper; 1460 lower32 = metrics->PublicSerialNumberLower; 1461 1462 out: 1463 adev->unique_id = ((uint64_t)upper32 << 32) | lower32; 1464 if (adev->serial[0] == '\0') 1465 sprintf(adev->serial, "%016llx", adev->unique_id); 1466 } 1467 1468 static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu, 1469 uint32_t *speed) 1470 { 1471 int ret; 1472 1473 if (!speed) 1474 return -EINVAL; 1475 1476 ret = smu_v13_0_0_get_smu_metrics_data(smu, 1477 METRICS_CURR_FANPWM, 1478 speed); 1479 if (ret) { 1480 dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!"); 1481 return ret; 1482 } 1483 1484 /* Convert the PMFW output which is in percent to pwm(255) based */ 1485 *speed = MIN(*speed * 255 / 100, 255); 1486 1487 return 0; 1488 } 1489 1490 static int smu_v13_0_0_get_fan_speed_rpm(struct smu_context *smu, 1491 uint32_t *speed) 1492 { 1493 if (!speed) 1494 return -EINVAL; 1495 1496 return smu_v13_0_0_get_smu_metrics_data(smu, 1497 METRICS_CURR_FANSPEED, 1498 speed); 1499 } 1500 1501 static int smu_v13_0_0_enable_mgpu_fan_boost(struct smu_context *smu) 1502 { 1503 struct smu_table_context *table_context = &smu->smu_table; 1504 PPTable_t *pptable = table_context->driver_pptable; 1505 SkuTable_t *skutable = &pptable->SkuTable; 1506 1507 /* 1508 * Skip the MGpuFanBoost setting for those ASICs 1509 * which do not support it 1510 */ 1511 if (skutable->MGpuAcousticLimitRpmThreshold == 0) 1512 return 0; 1513 1514 return smu_cmn_send_smc_msg_with_param(smu, 1515 SMU_MSG_SetMGpuFanBoostLimitRpm, 1516 0, 1517 NULL); 1518 } 1519 1520 static int smu_v13_0_0_get_power_limit(struct smu_context *smu, 1521 uint32_t *current_power_limit, 1522 uint32_t *default_power_limit, 1523 uint32_t *max_power_limit) 1524 { 1525 struct smu_table_context *table_context = &smu->smu_table; 1526 struct smu_13_0_0_powerplay_table *powerplay_table = 1527 (struct smu_13_0_0_powerplay_table *)table_context->power_play_table; 1528 PPTable_t *pptable = table_context->driver_pptable; 1529 SkuTable_t *skutable = &pptable->SkuTable; 1530 uint32_t power_limit, od_percent; 1531 1532 if (smu_v13_0_get_current_power_limit(smu, &power_limit)) 1533 power_limit = smu->adev->pm.ac_power ? 1534 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] : 1535 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0]; 1536 1537 if (current_power_limit) 1538 *current_power_limit = power_limit; 1539 if (default_power_limit) 1540 *default_power_limit = power_limit; 1541 1542 if (max_power_limit) { 1543 if (smu->od_enabled) { 1544 od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); 1545 1546 dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit); 1547 1548 power_limit *= (100 + od_percent); 1549 power_limit /= 100; 1550 } 1551 *max_power_limit = power_limit; 1552 } 1553 1554 return 0; 1555 } 1556 1557 static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu, 1558 char *buf) 1559 { 1560 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 1561 DpmActivityMonitorCoeffInt_t *activity_monitor = 1562 &(activity_monitor_external.DpmActivityMonitorCoeffInt); 1563 static const char *title[] = { 1564 "PROFILE_INDEX(NAME)", 1565 "CLOCK_TYPE(NAME)", 1566 "FPS", 1567 "MinActiveFreqType", 1568 "MinActiveFreq", 1569 "BoosterFreqType", 1570 "BoosterFreq", 1571 "PD_Data_limit_c", 1572 "PD_Data_error_coeff", 1573 "PD_Data_error_rate_coeff"}; 1574 int16_t workload_type = 0; 1575 uint32_t i, size = 0; 1576 int result = 0; 1577 1578 if (!buf) 1579 return -EINVAL; 1580 1581 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n", 1582 title[0], title[1], title[2], title[3], title[4], title[5], 1583 title[6], title[7], title[8], title[9]); 1584 1585 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { 1586 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1587 workload_type = smu_cmn_to_asic_specific_index(smu, 1588 CMN2ASIC_MAPPING_WORKLOAD, 1589 i); 1590 if (workload_type == -ENOTSUPP) 1591 continue; 1592 else if (workload_type < 0) 1593 return -EINVAL; 1594 1595 result = smu_cmn_update_table(smu, 1596 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1597 workload_type, 1598 (void *)(&activity_monitor_external), 1599 false); 1600 if (result) { 1601 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1602 return result; 1603 } 1604 1605 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", 1606 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 1607 1608 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", 1609 " ", 1610 0, 1611 "GFXCLK", 1612 activity_monitor->Gfx_FPS, 1613 activity_monitor->Gfx_MinActiveFreqType, 1614 activity_monitor->Gfx_MinActiveFreq, 1615 activity_monitor->Gfx_BoosterFreqType, 1616 activity_monitor->Gfx_BoosterFreq, 1617 activity_monitor->Gfx_PD_Data_limit_c, 1618 activity_monitor->Gfx_PD_Data_error_coeff, 1619 activity_monitor->Gfx_PD_Data_error_rate_coeff); 1620 1621 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", 1622 " ", 1623 1, 1624 "FCLK", 1625 activity_monitor->Fclk_FPS, 1626 activity_monitor->Fclk_MinActiveFreqType, 1627 activity_monitor->Fclk_MinActiveFreq, 1628 activity_monitor->Fclk_BoosterFreqType, 1629 activity_monitor->Fclk_BoosterFreq, 1630 activity_monitor->Fclk_PD_Data_limit_c, 1631 activity_monitor->Fclk_PD_Data_error_coeff, 1632 activity_monitor->Fclk_PD_Data_error_rate_coeff); 1633 } 1634 1635 return size; 1636 } 1637 1638 static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, 1639 long *input, 1640 uint32_t size) 1641 { 1642 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 1643 DpmActivityMonitorCoeffInt_t *activity_monitor = 1644 &(activity_monitor_external.DpmActivityMonitorCoeffInt); 1645 int workload_type, ret = 0; 1646 1647 smu->power_profile_mode = input[size]; 1648 1649 if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { 1650 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); 1651 return -EINVAL; 1652 } 1653 1654 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1655 ret = smu_cmn_update_table(smu, 1656 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1657 WORKLOAD_PPLIB_CUSTOM_BIT, 1658 (void *)(&activity_monitor_external), 1659 false); 1660 if (ret) { 1661 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1662 return ret; 1663 } 1664 1665 switch (input[0]) { 1666 case 0: /* Gfxclk */ 1667 activity_monitor->Gfx_FPS = input[1]; 1668 activity_monitor->Gfx_MinActiveFreqType = input[2]; 1669 activity_monitor->Gfx_MinActiveFreq = input[3]; 1670 activity_monitor->Gfx_BoosterFreqType = input[4]; 1671 activity_monitor->Gfx_BoosterFreq = input[5]; 1672 activity_monitor->Gfx_PD_Data_limit_c = input[6]; 1673 activity_monitor->Gfx_PD_Data_error_coeff = input[7]; 1674 activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; 1675 break; 1676 case 1: /* Fclk */ 1677 activity_monitor->Fclk_FPS = input[1]; 1678 activity_monitor->Fclk_MinActiveFreqType = input[2]; 1679 activity_monitor->Fclk_MinActiveFreq = input[3]; 1680 activity_monitor->Fclk_BoosterFreqType = input[4]; 1681 activity_monitor->Fclk_BoosterFreq = input[5]; 1682 activity_monitor->Fclk_PD_Data_limit_c = input[6]; 1683 activity_monitor->Fclk_PD_Data_error_coeff = input[7]; 1684 activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; 1685 break; 1686 } 1687 1688 ret = smu_cmn_update_table(smu, 1689 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1690 WORKLOAD_PPLIB_CUSTOM_BIT, 1691 (void *)(&activity_monitor_external), 1692 true); 1693 if (ret) { 1694 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); 1695 return ret; 1696 } 1697 } 1698 1699 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1700 workload_type = smu_cmn_to_asic_specific_index(smu, 1701 CMN2ASIC_MAPPING_WORKLOAD, 1702 smu->power_profile_mode); 1703 if (workload_type < 0) 1704 return -EINVAL; 1705 1706 return smu_cmn_send_smc_msg_with_param(smu, 1707 SMU_MSG_SetWorkloadMask, 1708 1 << workload_type, 1709 NULL); 1710 } 1711 1712 static int smu_v13_0_0_baco_enter(struct smu_context *smu) 1713 { 1714 struct smu_baco_context *smu_baco = &smu->smu_baco; 1715 struct amdgpu_device *adev = smu->adev; 1716 1717 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) 1718 return smu_v13_0_baco_set_armd3_sequence(smu, 1719 smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); 1720 else 1721 return smu_v13_0_baco_enter(smu); 1722 } 1723 1724 static int smu_v13_0_0_baco_exit(struct smu_context *smu) 1725 { 1726 struct amdgpu_device *adev = smu->adev; 1727 1728 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 1729 /* Wait for PMFW handling for the Dstate change */ 1730 usleep_range(10000, 11000); 1731 return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 1732 } else { 1733 return smu_v13_0_baco_exit(smu); 1734 } 1735 } 1736 1737 static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) 1738 { 1739 struct amdgpu_device *adev = smu->adev; 1740 u32 smu_version; 1741 1742 /* SRIOV does not support SMU mode1 reset */ 1743 if (amdgpu_sriov_vf(adev)) 1744 return false; 1745 1746 /* PMFW support is available since 78.41 */ 1747 smu_cmn_get_smc_version(smu, NULL, &smu_version); 1748 if (smu_version < 0x004e2900) 1749 return false; 1750 1751 return true; 1752 } 1753 1754 static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap, 1755 struct i2c_msg *msg, int num_msgs) 1756 { 1757 struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); 1758 struct amdgpu_device *adev = smu_i2c->adev; 1759 struct smu_context *smu = adev->powerplay.pp_handle; 1760 struct smu_table_context *smu_table = &smu->smu_table; 1761 struct smu_table *table = &smu_table->driver_table; 1762 SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; 1763 int i, j, r, c; 1764 u16 dir; 1765 1766 if (!adev->pm.dpm_enabled) 1767 return -EBUSY; 1768 1769 req = kzalloc(sizeof(*req), GFP_KERNEL); 1770 if (!req) 1771 return -ENOMEM; 1772 1773 req->I2CcontrollerPort = smu_i2c->port; 1774 req->I2CSpeed = I2C_SPEED_FAST_400K; 1775 req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ 1776 dir = msg[0].flags & I2C_M_RD; 1777 1778 for (c = i = 0; i < num_msgs; i++) { 1779 for (j = 0; j < msg[i].len; j++, c++) { 1780 SwI2cCmd_t *cmd = &req->SwI2cCmds[c]; 1781 1782 if (!(msg[i].flags & I2C_M_RD)) { 1783 /* write */ 1784 cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK; 1785 cmd->ReadWriteData = msg[i].buf[j]; 1786 } 1787 1788 if ((dir ^ msg[i].flags) & I2C_M_RD) { 1789 /* The direction changes. 1790 */ 1791 dir = msg[i].flags & I2C_M_RD; 1792 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK; 1793 } 1794 1795 req->NumCmds++; 1796 1797 /* 1798 * Insert STOP if we are at the last byte of either last 1799 * message for the transaction or the client explicitly 1800 * requires a STOP at this particular message. 1801 */ 1802 if ((j == msg[i].len - 1) && 1803 ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) { 1804 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK; 1805 cmd->CmdConfig |= CMDCONFIG_STOP_MASK; 1806 } 1807 } 1808 } 1809 mutex_lock(&adev->pm.mutex); 1810 r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); 1811 mutex_unlock(&adev->pm.mutex); 1812 if (r) 1813 goto fail; 1814 1815 for (c = i = 0; i < num_msgs; i++) { 1816 if (!(msg[i].flags & I2C_M_RD)) { 1817 c += msg[i].len; 1818 continue; 1819 } 1820 for (j = 0; j < msg[i].len; j++, c++) { 1821 SwI2cCmd_t *cmd = &res->SwI2cCmds[c]; 1822 1823 msg[i].buf[j] = cmd->ReadWriteData; 1824 } 1825 } 1826 r = num_msgs; 1827 fail: 1828 kfree(req); 1829 return r; 1830 } 1831 1832 static u32 smu_v13_0_0_i2c_func(struct i2c_adapter *adap) 1833 { 1834 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 1835 } 1836 1837 static const struct i2c_algorithm smu_v13_0_0_i2c_algo = { 1838 .master_xfer = smu_v13_0_0_i2c_xfer, 1839 .functionality = smu_v13_0_0_i2c_func, 1840 }; 1841 1842 static const struct i2c_adapter_quirks smu_v13_0_0_i2c_control_quirks = { 1843 .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN, 1844 .max_read_len = MAX_SW_I2C_COMMANDS, 1845 .max_write_len = MAX_SW_I2C_COMMANDS, 1846 .max_comb_1st_msg_len = 2, 1847 .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, 1848 }; 1849 1850 static int smu_v13_0_0_i2c_control_init(struct smu_context *smu) 1851 { 1852 struct amdgpu_device *adev = smu->adev; 1853 int res, i; 1854 1855 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 1856 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1857 struct i2c_adapter *control = &smu_i2c->adapter; 1858 1859 smu_i2c->adev = adev; 1860 smu_i2c->port = i; 1861 mutex_init(&smu_i2c->mutex); 1862 control->owner = THIS_MODULE; 1863 control->class = I2C_CLASS_SPD; 1864 control->dev.parent = &adev->pdev->dev; 1865 control->algo = &smu_v13_0_0_i2c_algo; 1866 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); 1867 control->quirks = &smu_v13_0_0_i2c_control_quirks; 1868 i2c_set_adapdata(control, smu_i2c); 1869 1870 res = i2c_add_adapter(control); 1871 if (res) { 1872 DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 1873 goto Out_err; 1874 } 1875 } 1876 1877 /* assign the buses used for the FRU EEPROM and RAS EEPROM */ 1878 /* XXX ideally this would be something in a vbios data table */ 1879 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; 1880 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 1881 1882 return 0; 1883 Out_err: 1884 for ( ; i >= 0; i--) { 1885 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1886 struct i2c_adapter *control = &smu_i2c->adapter; 1887 1888 i2c_del_adapter(control); 1889 } 1890 return res; 1891 } 1892 1893 static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu) 1894 { 1895 struct amdgpu_device *adev = smu->adev; 1896 int i; 1897 1898 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 1899 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1900 struct i2c_adapter *control = &smu_i2c->adapter; 1901 1902 i2c_del_adapter(control); 1903 } 1904 adev->pm.ras_eeprom_i2c_bus = NULL; 1905 adev->pm.fru_eeprom_i2c_bus = NULL; 1906 } 1907 1908 static int smu_v13_0_0_set_mp1_state(struct smu_context *smu, 1909 enum pp_mp1_state mp1_state) 1910 { 1911 int ret; 1912 1913 switch (mp1_state) { 1914 case PP_MP1_STATE_UNLOAD: 1915 ret = smu_cmn_set_mp1_state(smu, mp1_state); 1916 break; 1917 default: 1918 /* Ignore others */ 1919 ret = 0; 1920 } 1921 1922 return ret; 1923 } 1924 1925 static int smu_v13_0_0_set_df_cstate(struct smu_context *smu, 1926 enum pp_df_cstate state) 1927 { 1928 return smu_cmn_send_smc_msg_with_param(smu, 1929 SMU_MSG_DFCstateControl, 1930 state, 1931 NULL); 1932 } 1933 1934 static void smu_v13_0_0_set_mode1_reset_param(struct smu_context *smu, 1935 uint32_t supported_version, 1936 uint32_t *param) 1937 { 1938 uint32_t smu_version; 1939 struct amdgpu_device *adev = smu->adev; 1940 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1941 1942 smu_cmn_get_smc_version(smu, NULL, &smu_version); 1943 1944 if ((smu_version >= supported_version) && 1945 ras && atomic_read(&ras->in_recovery)) 1946 /* Set RAS fatal error reset flag */ 1947 *param = 1 << 16; 1948 else 1949 *param = 0; 1950 } 1951 1952 static int smu_v13_0_0_mode1_reset(struct smu_context *smu) 1953 { 1954 int ret; 1955 uint32_t param; 1956 struct amdgpu_device *adev = smu->adev; 1957 1958 switch (adev->ip_versions[MP1_HWIP][0]) { 1959 case IP_VERSION(13, 0, 0): 1960 /* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */ 1961 smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, ¶m); 1962 1963 ret = smu_cmn_send_smc_msg_with_param(smu, 1964 SMU_MSG_Mode1Reset, param, NULL); 1965 break; 1966 1967 case IP_VERSION(13, 0, 10): 1968 /* SMU 13_0_10 PMFW supports RAS fatal error reset from 80.28 */ 1969 smu_v13_0_0_set_mode1_reset_param(smu, 0x00501c00, ¶m); 1970 1971 ret = smu_cmn_send_debug_smc_msg_with_param(smu, 1972 DEBUGSMC_MSG_Mode1Reset, param); 1973 break; 1974 1975 default: 1976 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); 1977 break; 1978 } 1979 1980 if (!ret) 1981 msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); 1982 1983 return ret; 1984 } 1985 1986 static int smu_v13_0_0_mode2_reset(struct smu_context *smu) 1987 { 1988 int ret; 1989 struct amdgpu_device *adev = smu->adev; 1990 1991 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) 1992 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode2Reset, NULL); 1993 else 1994 return -EOPNOTSUPP; 1995 1996 return ret; 1997 } 1998 1999 static int smu_v13_0_0_enable_gfx_features(struct smu_context *smu) 2000 { 2001 struct amdgpu_device *adev = smu->adev; 2002 2003 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) 2004 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures, 2005 FEATURE_PWR_GFX, NULL); 2006 else 2007 return -EOPNOTSUPP; 2008 } 2009 2010 static void smu_v13_0_0_set_smu_mailbox_registers(struct smu_context *smu) 2011 { 2012 struct amdgpu_device *adev = smu->adev; 2013 2014 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82); 2015 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66); 2016 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 2017 2018 smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_53); 2019 smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_75); 2020 smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_54); 2021 } 2022 2023 static int smu_v13_0_0_smu_send_bad_mem_page_num(struct smu_context *smu, 2024 uint32_t size) 2025 { 2026 int ret = 0; 2027 2028 /* message SMU to update the bad page number on SMUBUS */ 2029 ret = smu_cmn_send_smc_msg_with_param(smu, 2030 SMU_MSG_SetNumBadMemoryPagesRetired, 2031 size, NULL); 2032 if (ret) 2033 dev_err(smu->adev->dev, 2034 "[%s] failed to message SMU to update bad memory pages number\n", 2035 __func__); 2036 2037 return ret; 2038 } 2039 2040 static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu, 2041 uint32_t size) 2042 { 2043 int ret = 0; 2044 2045 /* message SMU to update the bad channel info on SMUBUS */ 2046 ret = smu_cmn_send_smc_msg_with_param(smu, 2047 SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 2048 size, NULL); 2049 if (ret) 2050 dev_err(smu->adev->dev, 2051 "[%s] failed to message SMU to update bad memory pages channel info\n", 2052 __func__); 2053 2054 return ret; 2055 } 2056 2057 static int smu_v13_0_0_check_ecc_table_support(struct smu_context *smu) 2058 { 2059 struct amdgpu_device *adev = smu->adev; 2060 uint32_t if_version = 0xff, smu_version = 0xff; 2061 int ret = 0; 2062 2063 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 2064 if (ret) 2065 return -EOPNOTSUPP; 2066 2067 if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) && 2068 (smu_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION)) 2069 return ret; 2070 else 2071 return -EOPNOTSUPP; 2072 } 2073 2074 static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu, 2075 void *table) 2076 { 2077 struct smu_table_context *smu_table = &smu->smu_table; 2078 struct amdgpu_device *adev = smu->adev; 2079 EccInfoTable_t *ecc_table = NULL; 2080 struct ecc_info_per_ch *ecc_info_per_channel = NULL; 2081 int i, ret = 0; 2082 struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table; 2083 2084 ret = smu_v13_0_0_check_ecc_table_support(smu); 2085 if (ret) 2086 return ret; 2087 2088 ret = smu_cmn_update_table(smu, 2089 SMU_TABLE_ECCINFO, 2090 0, 2091 smu_table->ecc_table, 2092 false); 2093 if (ret) { 2094 dev_info(adev->dev, "Failed to export SMU ecc table!\n"); 2095 return ret; 2096 } 2097 2098 ecc_table = (EccInfoTable_t *)smu_table->ecc_table; 2099 2100 for (i = 0; i < UMC_V8_10_TOTAL_CHANNEL_NUM(adev); i++) { 2101 ecc_info_per_channel = &(eccinfo->ecc[i]); 2102 ecc_info_per_channel->ce_count_lo_chip = 2103 ecc_table->EccInfo[i].ce_count_lo_chip; 2104 ecc_info_per_channel->ce_count_hi_chip = 2105 ecc_table->EccInfo[i].ce_count_hi_chip; 2106 ecc_info_per_channel->mca_umc_status = 2107 ecc_table->EccInfo[i].mca_umc_status; 2108 ecc_info_per_channel->mca_umc_addr = 2109 ecc_table->EccInfo[i].mca_umc_addr; 2110 } 2111 2112 return ret; 2113 } 2114 2115 static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { 2116 .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask, 2117 .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table, 2118 .i2c_init = smu_v13_0_0_i2c_control_init, 2119 .i2c_fini = smu_v13_0_0_i2c_control_fini, 2120 .is_dpm_running = smu_v13_0_0_is_dpm_running, 2121 .dump_pptable = smu_v13_0_0_dump_pptable, 2122 .init_microcode = smu_v13_0_init_microcode, 2123 .load_microcode = smu_v13_0_load_microcode, 2124 .fini_microcode = smu_v13_0_fini_microcode, 2125 .init_smc_tables = smu_v13_0_0_init_smc_tables, 2126 .fini_smc_tables = smu_v13_0_fini_smc_tables, 2127 .init_power = smu_v13_0_init_power, 2128 .fini_power = smu_v13_0_fini_power, 2129 .check_fw_status = smu_v13_0_check_fw_status, 2130 .setup_pptable = smu_v13_0_0_setup_pptable, 2131 .check_fw_version = smu_v13_0_check_fw_version, 2132 .write_pptable = smu_cmn_write_pptable, 2133 .set_driver_table_location = smu_v13_0_set_driver_table_location, 2134 .system_features_control = smu_v13_0_0_system_features_control, 2135 .set_allowed_mask = smu_v13_0_set_allowed_mask, 2136 .get_enabled_mask = smu_cmn_get_enabled_mask, 2137 .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable, 2138 .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, 2139 .get_dpm_ultimate_freq = smu_v13_0_0_get_dpm_ultimate_freq, 2140 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, 2141 .read_sensor = smu_v13_0_0_read_sensor, 2142 .feature_is_enabled = smu_cmn_feature_is_enabled, 2143 .print_clk_levels = smu_v13_0_0_print_clk_levels, 2144 .force_clk_levels = smu_v13_0_0_force_clk_levels, 2145 .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters, 2146 .get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range, 2147 .register_irq_handler = smu_v13_0_register_irq_handler, 2148 .enable_thermal_alert = smu_v13_0_enable_thermal_alert, 2149 .disable_thermal_alert = smu_v13_0_disable_thermal_alert, 2150 .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, 2151 .get_gpu_metrics = smu_v13_0_0_get_gpu_metrics, 2152 .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range, 2153 .init_pptable_microcode = smu_v13_0_init_pptable_microcode, 2154 .populate_umd_state_clk = smu_v13_0_0_populate_umd_state_clk, 2155 .set_performance_level = smu_v13_0_set_performance_level, 2156 .gfx_off_control = smu_v13_0_gfx_off_control, 2157 .get_unique_id = smu_v13_0_0_get_unique_id, 2158 .get_fan_speed_pwm = smu_v13_0_0_get_fan_speed_pwm, 2159 .get_fan_speed_rpm = smu_v13_0_0_get_fan_speed_rpm, 2160 .set_fan_speed_pwm = smu_v13_0_set_fan_speed_pwm, 2161 .set_fan_speed_rpm = smu_v13_0_set_fan_speed_rpm, 2162 .get_fan_control_mode = smu_v13_0_get_fan_control_mode, 2163 .set_fan_control_mode = smu_v13_0_set_fan_control_mode, 2164 .enable_mgpu_fan_boost = smu_v13_0_0_enable_mgpu_fan_boost, 2165 .get_power_limit = smu_v13_0_0_get_power_limit, 2166 .set_power_limit = smu_v13_0_set_power_limit, 2167 .set_power_source = smu_v13_0_set_power_source, 2168 .get_power_profile_mode = smu_v13_0_0_get_power_profile_mode, 2169 .set_power_profile_mode = smu_v13_0_0_set_power_profile_mode, 2170 .run_btc = smu_v13_0_run_btc, 2171 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 2172 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, 2173 .set_tool_table_location = smu_v13_0_set_tool_table_location, 2174 .deep_sleep_control = smu_v13_0_deep_sleep_control, 2175 .gfx_ulv_control = smu_v13_0_gfx_ulv_control, 2176 .baco_is_support = smu_v13_0_baco_is_support, 2177 .baco_get_state = smu_v13_0_baco_get_state, 2178 .baco_set_state = smu_v13_0_baco_set_state, 2179 .baco_enter = smu_v13_0_0_baco_enter, 2180 .baco_exit = smu_v13_0_0_baco_exit, 2181 .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, 2182 .mode1_reset = smu_v13_0_0_mode1_reset, 2183 .mode2_reset = smu_v13_0_0_mode2_reset, 2184 .enable_gfx_features = smu_v13_0_0_enable_gfx_features, 2185 .set_mp1_state = smu_v13_0_0_set_mp1_state, 2186 .set_df_cstate = smu_v13_0_0_set_df_cstate, 2187 .send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num, 2188 .send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag, 2189 .gpo_control = smu_v13_0_gpo_control, 2190 .get_ecc_info = smu_v13_0_0_get_ecc_info, 2191 }; 2192 2193 void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) 2194 { 2195 smu->ppt_funcs = &smu_v13_0_0_ppt_funcs; 2196 smu->message_map = smu_v13_0_0_message_map; 2197 smu->clock_map = smu_v13_0_0_clk_map; 2198 smu->feature_map = smu_v13_0_0_feature_mask_map; 2199 smu->table_map = smu_v13_0_0_table_map; 2200 smu->pwr_src_map = smu_v13_0_0_pwr_src_map; 2201 smu->workload_map = smu_v13_0_0_workload_map; 2202 smu_v13_0_0_set_smu_mailbox_registers(smu); 2203 } 2204