1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include <linux/firmware.h> 27 #include <linux/pci.h> 28 #include <linux/i2c.h> 29 #include "amdgpu.h" 30 #include "amdgpu_smu.h" 31 #include "atomfirmware.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "amdgpu_atombios.h" 34 #include "smu_v13_0.h" 35 #include "smu13_driver_if_v13_0_0.h" 36 #include "soc15_common.h" 37 #include "atom.h" 38 #include "smu_v13_0_0_ppt.h" 39 #include "smu_v13_0_0_pptable.h" 40 #include "smu_v13_0_0_ppsmc.h" 41 #include "nbio/nbio_4_3_0_offset.h" 42 #include "nbio/nbio_4_3_0_sh_mask.h" 43 #include "mp/mp_13_0_0_offset.h" 44 #include "mp/mp_13_0_0_sh_mask.h" 45 46 #include "asic_reg/mp/mp_13_0_0_sh_mask.h" 47 #include "smu_cmn.h" 48 #include "amdgpu_ras.h" 49 50 /* 51 * DO NOT use these for err/warn/info/debug messages. 52 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 53 * They are more MGPU friendly. 54 */ 55 #undef pr_err 56 #undef pr_warn 57 #undef pr_info 58 #undef pr_debug 59 60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) 61 62 #define FEATURE_MASK(feature) (1ULL << feature) 63 #define SMC_DPM_FEATURE ( \ 64 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 65 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 66 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ 67 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 68 FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ 69 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)) 70 71 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 72 73 #define mmMP1_SMN_C2PMSG_66 0x0282 74 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0 75 76 #define mmMP1_SMN_C2PMSG_82 0x0292 77 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0 78 79 #define mmMP1_SMN_C2PMSG_90 0x029a 80 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 81 82 #define mmMP1_SMN_C2PMSG_75 0x028b 83 #define mmMP1_SMN_C2PMSG_75_BASE_IDX 0 84 85 #define mmMP1_SMN_C2PMSG_53 0x0275 86 #define mmMP1_SMN_C2PMSG_53_BASE_IDX 0 87 88 #define mmMP1_SMN_C2PMSG_54 0x0276 89 #define mmMP1_SMN_C2PMSG_54_BASE_IDX 0 90 91 #define DEBUGSMC_MSG_Mode1Reset 2 92 93 /* 94 * SMU_v13_0_10 supports ECCTABLE since version 80.34.0, 95 * use this to check ECCTABLE feature whether support 96 */ 97 #define SUPPORT_ECCTABLE_SMU_13_0_10_VERSION 0x00502200 98 99 #define PP_OD_FEATURE_GFXCLK_FMIN 0 100 #define PP_OD_FEATURE_GFXCLK_FMAX 1 101 #define PP_OD_FEATURE_UCLK_FMIN 2 102 #define PP_OD_FEATURE_UCLK_FMAX 3 103 #define PP_OD_FEATURE_GFX_VF_CURVE 4 104 105 #define LINK_SPEED_MAX 3 106 107 static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = { 108 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 109 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 110 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 111 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), 112 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), 113 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), 114 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), 115 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), 116 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), 117 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), 118 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), 119 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), 120 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), 121 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), 122 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), 123 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 124 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 125 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), 126 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), 127 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 128 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 129 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), 130 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), 131 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), 132 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), 133 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), 134 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), 135 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), 136 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), 137 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), 138 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), 139 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), 140 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 141 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 142 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 143 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 144 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), 145 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), 146 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), 147 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), 148 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), 149 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 150 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 151 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), 152 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 153 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), 154 MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), 155 MSG_MAP(Mode2Reset, PPSMC_MSG_Mode2Reset, 0), 156 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), 157 MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), 158 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), 159 MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0), 160 MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel, 161 PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0), 162 MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), 163 MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), 164 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), 165 MSG_MAP(DALNotPresent, PPSMC_MSG_DALNotPresent, 0), 166 }; 167 168 static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = { 169 CLK_MAP(GFXCLK, PPCLK_GFXCLK), 170 CLK_MAP(SCLK, PPCLK_GFXCLK), 171 CLK_MAP(SOCCLK, PPCLK_SOCCLK), 172 CLK_MAP(FCLK, PPCLK_FCLK), 173 CLK_MAP(UCLK, PPCLK_UCLK), 174 CLK_MAP(MCLK, PPCLK_UCLK), 175 CLK_MAP(VCLK, PPCLK_VCLK_0), 176 CLK_MAP(VCLK1, PPCLK_VCLK_1), 177 CLK_MAP(DCLK, PPCLK_DCLK_0), 178 CLK_MAP(DCLK1, PPCLK_DCLK_1), 179 }; 180 181 static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] = { 182 FEA_MAP(FW_DATA_READ), 183 FEA_MAP(DPM_GFXCLK), 184 FEA_MAP(DPM_GFX_POWER_OPTIMIZER), 185 FEA_MAP(DPM_UCLK), 186 FEA_MAP(DPM_FCLK), 187 FEA_MAP(DPM_SOCCLK), 188 FEA_MAP(DPM_MP0CLK), 189 FEA_MAP(DPM_LINK), 190 FEA_MAP(DPM_DCN), 191 FEA_MAP(VMEMP_SCALING), 192 FEA_MAP(VDDIO_MEM_SCALING), 193 FEA_MAP(DS_GFXCLK), 194 FEA_MAP(DS_SOCCLK), 195 FEA_MAP(DS_FCLK), 196 FEA_MAP(DS_LCLK), 197 FEA_MAP(DS_DCFCLK), 198 FEA_MAP(DS_UCLK), 199 FEA_MAP(GFX_ULV), 200 FEA_MAP(FW_DSTATE), 201 FEA_MAP(GFXOFF), 202 FEA_MAP(BACO), 203 FEA_MAP(MM_DPM), 204 FEA_MAP(SOC_MPCLK_DS), 205 FEA_MAP(BACO_MPCLK_DS), 206 FEA_MAP(THROTTLERS), 207 FEA_MAP(SMARTSHIFT), 208 FEA_MAP(GTHR), 209 FEA_MAP(ACDC), 210 FEA_MAP(VR0HOT), 211 FEA_MAP(FW_CTF), 212 FEA_MAP(FAN_CONTROL), 213 FEA_MAP(GFX_DCS), 214 FEA_MAP(GFX_READ_MARGIN), 215 FEA_MAP(LED_DISPLAY), 216 FEA_MAP(GFXCLK_SPREAD_SPECTRUM), 217 FEA_MAP(OUT_OF_BAND_MONITOR), 218 FEA_MAP(OPTIMIZED_VMIN), 219 FEA_MAP(GFX_IMU), 220 FEA_MAP(BOOT_TIME_CAL), 221 FEA_MAP(GFX_PCC_DFLL), 222 FEA_MAP(SOC_CG), 223 FEA_MAP(DF_CSTATE), 224 FEA_MAP(GFX_EDC), 225 FEA_MAP(BOOT_POWER_OPT), 226 FEA_MAP(CLOCK_POWER_DOWN_BYPASS), 227 FEA_MAP(DS_VCN), 228 FEA_MAP(BACO_CG), 229 FEA_MAP(MEM_TEMP_READ), 230 FEA_MAP(ATHUB_MMHUB_PG), 231 FEA_MAP(SOC_PCC), 232 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 233 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 234 [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, 235 }; 236 237 static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = { 238 TAB_MAP(PPTABLE), 239 TAB_MAP(WATERMARKS), 240 TAB_MAP(AVFS_PSM_DEBUG), 241 TAB_MAP(PMSTATUSLOG), 242 TAB_MAP(SMU_METRICS), 243 TAB_MAP(DRIVER_SMU_CONFIG), 244 TAB_MAP(ACTIVITY_MONITOR_COEFF), 245 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, 246 TAB_MAP(I2C_COMMANDS), 247 TAB_MAP(ECCINFO), 248 TAB_MAP(OVERDRIVE), 249 }; 250 251 static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { 252 PWR_MAP(AC), 253 PWR_MAP(DC), 254 }; 255 256 static struct cmn2asic_mapping smu_v13_0_0_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 257 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), 258 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 259 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), 260 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 261 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 262 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 263 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 264 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), 265 }; 266 267 static const uint8_t smu_v13_0_0_throttler_map[] = { 268 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), 269 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), 270 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), 271 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), 272 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), 273 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), 274 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), 275 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), 276 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), 277 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), 278 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), 279 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), 280 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), 281 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), 282 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), 283 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), 284 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), 285 }; 286 287 static int 288 smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu, 289 uint32_t *feature_mask, uint32_t num) 290 { 291 struct amdgpu_device *adev = smu->adev; 292 u32 smu_version; 293 294 if (num > 2) 295 return -EINVAL; 296 297 memset(feature_mask, 0xff, sizeof(uint32_t) * num); 298 299 if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) { 300 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); 301 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT); 302 } 303 304 if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) || 305 !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) 306 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); 307 308 if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)) 309 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); 310 311 /* PMFW 78.58 contains a critical fix for gfxoff feature */ 312 smu_cmn_get_smc_version(smu, NULL, &smu_version); 313 if ((smu_version < 0x004e3a00) || 314 !(adev->pm.pp_feature & PP_GFXOFF_MASK)) 315 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT); 316 317 if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) { 318 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT); 319 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); 320 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); 321 } 322 323 if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)) 324 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); 325 326 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { 327 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT); 328 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT); 329 } 330 331 if (!(adev->pm.pp_feature & PP_ULV_MASK)) 332 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT); 333 334 return 0; 335 } 336 337 static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) 338 { 339 struct smu_table_context *table_context = &smu->smu_table; 340 struct smu_13_0_0_powerplay_table *powerplay_table = 341 table_context->power_play_table; 342 struct smu_baco_context *smu_baco = &smu->smu_baco; 343 PPTable_t *pptable = smu->smu_table.driver_pptable; 344 #if 0 345 PPTable_t *pptable = smu->smu_table.driver_pptable; 346 const OverDriveLimits_t * const overdrive_upperlimits = 347 &pptable->SkuTable.OverDriveLimitsBasicMax; 348 const OverDriveLimits_t * const overdrive_lowerlimits = 349 &pptable->SkuTable.OverDriveLimitsMin; 350 #endif 351 352 if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC) 353 smu->dc_controlled_by_gpio = true; 354 355 if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO || 356 powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) 357 smu_baco->platform_support = true; 358 359 if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) 360 smu_baco->maco_support = true; 361 362 /* 363 * We are in the transition to a new OD mechanism. 364 * Disable the OD feature support for SMU13 temporarily. 365 * TODO: get this reverted when new OD mechanism online 366 */ 367 #if 0 368 if (!overdrive_lowerlimits->FeatureCtrlMask || 369 !overdrive_upperlimits->FeatureCtrlMask) 370 smu->od_enabled = false; 371 372 /* 373 * Instead of having its own buffer space and get overdrive_table copied, 374 * smu->od_settings just points to the actual overdrive_table 375 */ 376 smu->od_settings = &powerplay_table->overdrive_table; 377 #else 378 smu->od_enabled = false; 379 #endif 380 381 table_context->thermal_controller_type = 382 powerplay_table->thermal_controller_type; 383 384 smu->adev->pm.no_fan = 385 !(pptable->SkuTable.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT)); 386 387 return 0; 388 } 389 390 static int smu_v13_0_0_store_powerplay_table(struct smu_context *smu) 391 { 392 struct smu_table_context *table_context = &smu->smu_table; 393 struct smu_13_0_0_powerplay_table *powerplay_table = 394 table_context->power_play_table; 395 396 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, 397 sizeof(PPTable_t)); 398 399 return 0; 400 } 401 402 #ifndef atom_smc_dpm_info_table_13_0_0 403 struct atom_smc_dpm_info_table_13_0_0 { 404 struct atom_common_table_header table_header; 405 BoardTable_t BoardTable; 406 }; 407 #endif 408 409 static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu) 410 { 411 struct smu_table_context *table_context = &smu->smu_table; 412 PPTable_t *smc_pptable = table_context->driver_pptable; 413 struct atom_smc_dpm_info_table_13_0_0 *smc_dpm_table; 414 BoardTable_t *BoardTable = &smc_pptable->BoardTable; 415 int index, ret; 416 417 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 418 smc_dpm_info); 419 420 ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, 421 (uint8_t **)&smc_dpm_table); 422 if (ret) 423 return ret; 424 425 memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); 426 427 return 0; 428 } 429 430 static int smu_v13_0_0_get_pptable_from_pmfw(struct smu_context *smu, 431 void **table, 432 uint32_t *size) 433 { 434 struct smu_table_context *smu_table = &smu->smu_table; 435 void *combo_pptable = smu_table->combo_pptable; 436 int ret = 0; 437 438 ret = smu_cmn_get_combo_pptable(smu); 439 if (ret) 440 return ret; 441 442 *table = combo_pptable; 443 *size = sizeof(struct smu_13_0_0_powerplay_table); 444 445 return 0; 446 } 447 448 static int smu_v13_0_0_setup_pptable(struct smu_context *smu) 449 { 450 struct smu_table_context *smu_table = &smu->smu_table; 451 struct amdgpu_device *adev = smu->adev; 452 int ret = 0; 453 454 if (amdgpu_sriov_vf(smu->adev)) 455 return 0; 456 457 ret = smu_v13_0_0_get_pptable_from_pmfw(smu, 458 &smu_table->power_play_table, 459 &smu_table->power_play_table_size); 460 if (ret) 461 return ret; 462 463 ret = smu_v13_0_0_store_powerplay_table(smu); 464 if (ret) 465 return ret; 466 467 /* 468 * With SCPM enabled, the operation below will be handled 469 * by PSP. Driver involvment is unnecessary and useless. 470 */ 471 if (!adev->scpm_enabled) { 472 ret = smu_v13_0_0_append_powerplay_table(smu); 473 if (ret) 474 return ret; 475 } 476 477 ret = smu_v13_0_0_check_powerplay_table(smu); 478 if (ret) 479 return ret; 480 481 return ret; 482 } 483 484 static int smu_v13_0_0_tables_init(struct smu_context *smu) 485 { 486 struct smu_table_context *smu_table = &smu->smu_table; 487 struct smu_table *tables = smu_table->tables; 488 489 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), 490 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 491 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 492 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 493 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), 494 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 495 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), 496 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 497 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTableExternal_t), 498 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 499 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, 500 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 501 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, 502 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, 503 AMDGPU_GEM_DOMAIN_VRAM); 504 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, 505 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 506 SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), 507 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 508 509 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); 510 if (!smu_table->metrics_table) 511 goto err0_out; 512 smu_table->metrics_time = 0; 513 514 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); 515 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 516 if (!smu_table->gpu_metrics_table) 517 goto err1_out; 518 519 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 520 if (!smu_table->watermarks_table) 521 goto err2_out; 522 523 smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); 524 if (!smu_table->ecc_table) 525 goto err3_out; 526 527 return 0; 528 529 err3_out: 530 kfree(smu_table->watermarks_table); 531 err2_out: 532 kfree(smu_table->gpu_metrics_table); 533 err1_out: 534 kfree(smu_table->metrics_table); 535 err0_out: 536 return -ENOMEM; 537 } 538 539 static int smu_v13_0_0_allocate_dpm_context(struct smu_context *smu) 540 { 541 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 542 543 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), 544 GFP_KERNEL); 545 if (!smu_dpm->dpm_context) 546 return -ENOMEM; 547 548 smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); 549 550 return 0; 551 } 552 553 static int smu_v13_0_0_init_smc_tables(struct smu_context *smu) 554 { 555 int ret = 0; 556 557 ret = smu_v13_0_0_tables_init(smu); 558 if (ret) 559 return ret; 560 561 ret = smu_v13_0_0_allocate_dpm_context(smu); 562 if (ret) 563 return ret; 564 565 return smu_v13_0_init_smc_tables(smu); 566 } 567 568 static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) 569 { 570 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 571 struct smu_table_context *table_context = &smu->smu_table; 572 PPTable_t *pptable = table_context->driver_pptable; 573 SkuTable_t *skutable = &pptable->SkuTable; 574 struct smu_13_0_dpm_table *dpm_table; 575 struct smu_13_0_pcie_table *pcie_table; 576 uint32_t link_level; 577 int ret = 0; 578 579 /* socclk dpm table setup */ 580 dpm_table = &dpm_context->dpm_tables.soc_table; 581 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 582 ret = smu_v13_0_set_single_dpm_table(smu, 583 SMU_SOCCLK, 584 dpm_table); 585 if (ret) 586 return ret; 587 } else { 588 dpm_table->count = 1; 589 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; 590 dpm_table->dpm_levels[0].enabled = true; 591 dpm_table->min = dpm_table->dpm_levels[0].value; 592 dpm_table->max = dpm_table->dpm_levels[0].value; 593 } 594 595 /* gfxclk dpm table setup */ 596 dpm_table = &dpm_context->dpm_tables.gfx_table; 597 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { 598 ret = smu_v13_0_set_single_dpm_table(smu, 599 SMU_GFXCLK, 600 dpm_table); 601 if (ret) 602 return ret; 603 604 /* 605 * Update the reported maximum shader clock to the value 606 * which can be guarded to be achieved on all cards. This 607 * is aligned with Window setting. And considering that value 608 * might be not the peak frequency the card can achieve, it 609 * is normal some real-time clock frequency can overtake this 610 * labelled maximum clock frequency(for example in pp_dpm_sclk 611 * sysfs output). 612 */ 613 if (skutable->DriverReportedClocks.GameClockAc && 614 (dpm_table->dpm_levels[dpm_table->count - 1].value > 615 skutable->DriverReportedClocks.GameClockAc)) { 616 dpm_table->dpm_levels[dpm_table->count - 1].value = 617 skutable->DriverReportedClocks.GameClockAc; 618 dpm_table->max = skutable->DriverReportedClocks.GameClockAc; 619 } 620 } else { 621 dpm_table->count = 1; 622 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; 623 dpm_table->dpm_levels[0].enabled = true; 624 dpm_table->min = dpm_table->dpm_levels[0].value; 625 dpm_table->max = dpm_table->dpm_levels[0].value; 626 } 627 628 /* uclk dpm table setup */ 629 dpm_table = &dpm_context->dpm_tables.uclk_table; 630 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 631 ret = smu_v13_0_set_single_dpm_table(smu, 632 SMU_UCLK, 633 dpm_table); 634 if (ret) 635 return ret; 636 } else { 637 dpm_table->count = 1; 638 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; 639 dpm_table->dpm_levels[0].enabled = true; 640 dpm_table->min = dpm_table->dpm_levels[0].value; 641 dpm_table->max = dpm_table->dpm_levels[0].value; 642 } 643 644 /* fclk dpm table setup */ 645 dpm_table = &dpm_context->dpm_tables.fclk_table; 646 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { 647 ret = smu_v13_0_set_single_dpm_table(smu, 648 SMU_FCLK, 649 dpm_table); 650 if (ret) 651 return ret; 652 } else { 653 dpm_table->count = 1; 654 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; 655 dpm_table->dpm_levels[0].enabled = true; 656 dpm_table->min = dpm_table->dpm_levels[0].value; 657 dpm_table->max = dpm_table->dpm_levels[0].value; 658 } 659 660 /* vclk dpm table setup */ 661 dpm_table = &dpm_context->dpm_tables.vclk_table; 662 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) { 663 ret = smu_v13_0_set_single_dpm_table(smu, 664 SMU_VCLK, 665 dpm_table); 666 if (ret) 667 return ret; 668 } else { 669 dpm_table->count = 1; 670 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; 671 dpm_table->dpm_levels[0].enabled = true; 672 dpm_table->min = dpm_table->dpm_levels[0].value; 673 dpm_table->max = dpm_table->dpm_levels[0].value; 674 } 675 676 /* dclk dpm table setup */ 677 dpm_table = &dpm_context->dpm_tables.dclk_table; 678 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) { 679 ret = smu_v13_0_set_single_dpm_table(smu, 680 SMU_DCLK, 681 dpm_table); 682 if (ret) 683 return ret; 684 } else { 685 dpm_table->count = 1; 686 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; 687 dpm_table->dpm_levels[0].enabled = true; 688 dpm_table->min = dpm_table->dpm_levels[0].value; 689 dpm_table->max = dpm_table->dpm_levels[0].value; 690 } 691 692 /* lclk dpm table setup */ 693 pcie_table = &dpm_context->dpm_tables.pcie_table; 694 pcie_table->num_of_link_levels = 0; 695 for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { 696 if (!skutable->PcieGenSpeed[link_level] && 697 !skutable->PcieLaneCount[link_level] && 698 !skutable->LclkFreq[link_level]) 699 continue; 700 701 pcie_table->pcie_gen[pcie_table->num_of_link_levels] = 702 skutable->PcieGenSpeed[link_level]; 703 pcie_table->pcie_lane[pcie_table->num_of_link_levels] = 704 skutable->PcieLaneCount[link_level]; 705 pcie_table->clk_freq[pcie_table->num_of_link_levels] = 706 skutable->LclkFreq[link_level]; 707 pcie_table->num_of_link_levels++; 708 } 709 710 return 0; 711 } 712 713 static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu) 714 { 715 int ret = 0; 716 uint64_t feature_enabled; 717 718 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 719 if (ret) 720 return false; 721 722 return !!(feature_enabled & SMC_DPM_FEATURE); 723 } 724 725 static void smu_v13_0_0_dump_pptable(struct smu_context *smu) 726 { 727 struct smu_table_context *table_context = &smu->smu_table; 728 PPTable_t *pptable = table_context->driver_pptable; 729 SkuTable_t *skutable = &pptable->SkuTable; 730 731 dev_info(smu->adev->dev, "Dumped PPTable:\n"); 732 733 dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); 734 dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); 735 dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); 736 } 737 738 static int smu_v13_0_0_system_features_control(struct smu_context *smu, 739 bool en) 740 { 741 return smu_v13_0_system_features_control(smu, en); 742 } 743 744 static uint32_t smu_v13_0_get_throttler_status(SmuMetrics_t *metrics) 745 { 746 uint32_t throttler_status = 0; 747 int i; 748 749 for (i = 0; i < THROTTLER_COUNT; i++) 750 throttler_status |= 751 (metrics->ThrottlingPercentage[i] ? 1U << i : 0); 752 753 return throttler_status; 754 } 755 756 #define SMU_13_0_0_BUSY_THRESHOLD 15 757 static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu, 758 MetricsMember_t member, 759 uint32_t *value) 760 { 761 struct smu_table_context *smu_table = &smu->smu_table; 762 SmuMetrics_t *metrics = 763 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 764 int ret = 0; 765 766 ret = smu_cmn_get_metrics_table(smu, 767 NULL, 768 false); 769 if (ret) 770 return ret; 771 772 switch (member) { 773 case METRICS_CURR_GFXCLK: 774 *value = metrics->CurrClock[PPCLK_GFXCLK]; 775 break; 776 case METRICS_CURR_SOCCLK: 777 *value = metrics->CurrClock[PPCLK_SOCCLK]; 778 break; 779 case METRICS_CURR_UCLK: 780 *value = metrics->CurrClock[PPCLK_UCLK]; 781 break; 782 case METRICS_CURR_VCLK: 783 *value = metrics->CurrClock[PPCLK_VCLK_0]; 784 break; 785 case METRICS_CURR_VCLK1: 786 *value = metrics->CurrClock[PPCLK_VCLK_1]; 787 break; 788 case METRICS_CURR_DCLK: 789 *value = metrics->CurrClock[PPCLK_DCLK_0]; 790 break; 791 case METRICS_CURR_DCLK1: 792 *value = metrics->CurrClock[PPCLK_DCLK_1]; 793 break; 794 case METRICS_CURR_FCLK: 795 *value = metrics->CurrClock[PPCLK_FCLK]; 796 break; 797 case METRICS_AVERAGE_GFXCLK: 798 if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD) 799 *value = metrics->AverageGfxclkFrequencyPostDs; 800 else 801 *value = metrics->AverageGfxclkFrequencyPreDs; 802 break; 803 case METRICS_AVERAGE_FCLK: 804 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD) 805 *value = metrics->AverageFclkFrequencyPostDs; 806 else 807 *value = metrics->AverageFclkFrequencyPreDs; 808 break; 809 case METRICS_AVERAGE_UCLK: 810 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD) 811 *value = metrics->AverageMemclkFrequencyPostDs; 812 else 813 *value = metrics->AverageMemclkFrequencyPreDs; 814 break; 815 case METRICS_AVERAGE_VCLK: 816 *value = metrics->AverageVclk0Frequency; 817 break; 818 case METRICS_AVERAGE_DCLK: 819 *value = metrics->AverageDclk0Frequency; 820 break; 821 case METRICS_AVERAGE_VCLK1: 822 *value = metrics->AverageVclk1Frequency; 823 break; 824 case METRICS_AVERAGE_DCLK1: 825 *value = metrics->AverageDclk1Frequency; 826 break; 827 case METRICS_AVERAGE_GFXACTIVITY: 828 *value = metrics->AverageGfxActivity; 829 break; 830 case METRICS_AVERAGE_MEMACTIVITY: 831 *value = metrics->AverageUclkActivity; 832 break; 833 case METRICS_AVERAGE_SOCKETPOWER: 834 *value = metrics->AverageSocketPower << 8; 835 break; 836 case METRICS_TEMPERATURE_EDGE: 837 *value = metrics->AvgTemperature[TEMP_EDGE] * 838 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 839 break; 840 case METRICS_TEMPERATURE_HOTSPOT: 841 *value = metrics->AvgTemperature[TEMP_HOTSPOT] * 842 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 843 break; 844 case METRICS_TEMPERATURE_MEM: 845 *value = metrics->AvgTemperature[TEMP_MEM] * 846 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 847 break; 848 case METRICS_TEMPERATURE_VRGFX: 849 *value = metrics->AvgTemperature[TEMP_VR_GFX] * 850 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 851 break; 852 case METRICS_TEMPERATURE_VRSOC: 853 *value = metrics->AvgTemperature[TEMP_VR_SOC] * 854 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 855 break; 856 case METRICS_THROTTLER_STATUS: 857 *value = smu_v13_0_get_throttler_status(metrics); 858 break; 859 case METRICS_CURR_FANSPEED: 860 *value = metrics->AvgFanRpm; 861 break; 862 case METRICS_CURR_FANPWM: 863 *value = metrics->AvgFanPwm; 864 break; 865 case METRICS_VOLTAGE_VDDGFX: 866 *value = metrics->AvgVoltage[SVI_PLANE_GFX]; 867 break; 868 case METRICS_PCIE_RATE: 869 *value = metrics->PcieRate; 870 break; 871 case METRICS_PCIE_WIDTH: 872 *value = metrics->PcieWidth; 873 break; 874 default: 875 *value = UINT_MAX; 876 break; 877 } 878 879 return ret; 880 } 881 882 static int smu_v13_0_0_get_dpm_ultimate_freq(struct smu_context *smu, 883 enum smu_clk_type clk_type, 884 uint32_t *min, 885 uint32_t *max) 886 { 887 struct smu_13_0_dpm_context *dpm_context = 888 smu->smu_dpm.dpm_context; 889 struct smu_13_0_dpm_table *dpm_table; 890 891 switch (clk_type) { 892 case SMU_MCLK: 893 case SMU_UCLK: 894 /* uclk dpm table */ 895 dpm_table = &dpm_context->dpm_tables.uclk_table; 896 break; 897 case SMU_GFXCLK: 898 case SMU_SCLK: 899 /* gfxclk dpm table */ 900 dpm_table = &dpm_context->dpm_tables.gfx_table; 901 break; 902 case SMU_SOCCLK: 903 /* socclk dpm table */ 904 dpm_table = &dpm_context->dpm_tables.soc_table; 905 break; 906 case SMU_FCLK: 907 /* fclk dpm table */ 908 dpm_table = &dpm_context->dpm_tables.fclk_table; 909 break; 910 case SMU_VCLK: 911 case SMU_VCLK1: 912 /* vclk dpm table */ 913 dpm_table = &dpm_context->dpm_tables.vclk_table; 914 break; 915 case SMU_DCLK: 916 case SMU_DCLK1: 917 /* dclk dpm table */ 918 dpm_table = &dpm_context->dpm_tables.dclk_table; 919 break; 920 default: 921 dev_err(smu->adev->dev, "Unsupported clock type!\n"); 922 return -EINVAL; 923 } 924 925 if (min) 926 *min = dpm_table->min; 927 if (max) 928 *max = dpm_table->max; 929 930 return 0; 931 } 932 933 static int smu_v13_0_0_read_sensor(struct smu_context *smu, 934 enum amd_pp_sensors sensor, 935 void *data, 936 uint32_t *size) 937 { 938 struct smu_table_context *table_context = &smu->smu_table; 939 PPTable_t *smc_pptable = table_context->driver_pptable; 940 int ret = 0; 941 942 switch (sensor) { 943 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 944 *(uint16_t *)data = smc_pptable->SkuTable.FanMaximumRpm; 945 *size = 4; 946 break; 947 case AMDGPU_PP_SENSOR_MEM_LOAD: 948 ret = smu_v13_0_0_get_smu_metrics_data(smu, 949 METRICS_AVERAGE_MEMACTIVITY, 950 (uint32_t *)data); 951 *size = 4; 952 break; 953 case AMDGPU_PP_SENSOR_GPU_LOAD: 954 ret = smu_v13_0_0_get_smu_metrics_data(smu, 955 METRICS_AVERAGE_GFXACTIVITY, 956 (uint32_t *)data); 957 *size = 4; 958 break; 959 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 960 ret = smu_v13_0_0_get_smu_metrics_data(smu, 961 METRICS_AVERAGE_SOCKETPOWER, 962 (uint32_t *)data); 963 *size = 4; 964 break; 965 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 966 ret = smu_v13_0_0_get_smu_metrics_data(smu, 967 METRICS_TEMPERATURE_HOTSPOT, 968 (uint32_t *)data); 969 *size = 4; 970 break; 971 case AMDGPU_PP_SENSOR_EDGE_TEMP: 972 ret = smu_v13_0_0_get_smu_metrics_data(smu, 973 METRICS_TEMPERATURE_EDGE, 974 (uint32_t *)data); 975 *size = 4; 976 break; 977 case AMDGPU_PP_SENSOR_MEM_TEMP: 978 ret = smu_v13_0_0_get_smu_metrics_data(smu, 979 METRICS_TEMPERATURE_MEM, 980 (uint32_t *)data); 981 *size = 4; 982 break; 983 case AMDGPU_PP_SENSOR_GFX_MCLK: 984 ret = smu_v13_0_0_get_smu_metrics_data(smu, 985 METRICS_CURR_UCLK, 986 (uint32_t *)data); 987 *(uint32_t *)data *= 100; 988 *size = 4; 989 break; 990 case AMDGPU_PP_SENSOR_GFX_SCLK: 991 ret = smu_v13_0_0_get_smu_metrics_data(smu, 992 METRICS_AVERAGE_GFXCLK, 993 (uint32_t *)data); 994 *(uint32_t *)data *= 100; 995 *size = 4; 996 break; 997 case AMDGPU_PP_SENSOR_VDDGFX: 998 ret = smu_v13_0_0_get_smu_metrics_data(smu, 999 METRICS_VOLTAGE_VDDGFX, 1000 (uint32_t *)data); 1001 *size = 4; 1002 break; 1003 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: 1004 default: 1005 ret = -EOPNOTSUPP; 1006 break; 1007 } 1008 1009 return ret; 1010 } 1011 1012 static int smu_v13_0_0_get_current_clk_freq_by_table(struct smu_context *smu, 1013 enum smu_clk_type clk_type, 1014 uint32_t *value) 1015 { 1016 MetricsMember_t member_type; 1017 int clk_id = 0; 1018 1019 clk_id = smu_cmn_to_asic_specific_index(smu, 1020 CMN2ASIC_MAPPING_CLK, 1021 clk_type); 1022 if (clk_id < 0) 1023 return -EINVAL; 1024 1025 switch (clk_id) { 1026 case PPCLK_GFXCLK: 1027 member_type = METRICS_AVERAGE_GFXCLK; 1028 break; 1029 case PPCLK_UCLK: 1030 member_type = METRICS_CURR_UCLK; 1031 break; 1032 case PPCLK_FCLK: 1033 member_type = METRICS_CURR_FCLK; 1034 break; 1035 case PPCLK_SOCCLK: 1036 member_type = METRICS_CURR_SOCCLK; 1037 break; 1038 case PPCLK_VCLK_0: 1039 member_type = METRICS_AVERAGE_VCLK; 1040 break; 1041 case PPCLK_DCLK_0: 1042 member_type = METRICS_AVERAGE_DCLK; 1043 break; 1044 case PPCLK_VCLK_1: 1045 member_type = METRICS_AVERAGE_VCLK1; 1046 break; 1047 case PPCLK_DCLK_1: 1048 member_type = METRICS_AVERAGE_DCLK1; 1049 break; 1050 default: 1051 return -EINVAL; 1052 } 1053 1054 return smu_v13_0_0_get_smu_metrics_data(smu, 1055 member_type, 1056 value); 1057 } 1058 1059 static bool smu_v13_0_0_is_od_feature_supported(struct smu_context *smu, 1060 int od_feature_bit) 1061 { 1062 PPTable_t *pptable = smu->smu_table.driver_pptable; 1063 const OverDriveLimits_t * const overdrive_upperlimits = 1064 &pptable->SkuTable.OverDriveLimitsBasicMax; 1065 1066 return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit); 1067 } 1068 1069 static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu, 1070 int od_feature_bit, 1071 int32_t *min, 1072 int32_t *max) 1073 { 1074 PPTable_t *pptable = smu->smu_table.driver_pptable; 1075 const OverDriveLimits_t * const overdrive_upperlimits = 1076 &pptable->SkuTable.OverDriveLimitsBasicMax; 1077 const OverDriveLimits_t * const overdrive_lowerlimits = 1078 &pptable->SkuTable.OverDriveLimitsMin; 1079 int32_t od_min_setting, od_max_setting; 1080 1081 switch (od_feature_bit) { 1082 case PP_OD_FEATURE_GFXCLK_FMIN: 1083 od_min_setting = overdrive_lowerlimits->GfxclkFmin; 1084 od_max_setting = overdrive_upperlimits->GfxclkFmin; 1085 break; 1086 case PP_OD_FEATURE_GFXCLK_FMAX: 1087 od_min_setting = overdrive_lowerlimits->GfxclkFmax; 1088 od_max_setting = overdrive_upperlimits->GfxclkFmax; 1089 break; 1090 case PP_OD_FEATURE_UCLK_FMIN: 1091 od_min_setting = overdrive_lowerlimits->UclkFmin; 1092 od_max_setting = overdrive_upperlimits->UclkFmin; 1093 break; 1094 case PP_OD_FEATURE_UCLK_FMAX: 1095 od_min_setting = overdrive_lowerlimits->UclkFmax; 1096 od_max_setting = overdrive_upperlimits->UclkFmax; 1097 break; 1098 case PP_OD_FEATURE_GFX_VF_CURVE: 1099 od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary; 1100 od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary; 1101 break; 1102 default: 1103 od_min_setting = od_max_setting = INT_MAX; 1104 break; 1105 } 1106 1107 if (min) 1108 *min = od_min_setting; 1109 if (max) 1110 *max = od_max_setting; 1111 } 1112 1113 static void smu_v13_0_0_dump_od_table(struct smu_context *smu, 1114 OverDriveTableExternal_t *od_table) 1115 { 1116 struct amdgpu_device *adev = smu->adev; 1117 1118 dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, 1119 od_table->OverDriveTable.GfxclkFmax); 1120 dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, 1121 od_table->OverDriveTable.UclkFmax); 1122 } 1123 1124 static int smu_v13_0_0_get_overdrive_table(struct smu_context *smu, 1125 OverDriveTableExternal_t *od_table) 1126 { 1127 int ret = 0; 1128 1129 ret = smu_cmn_update_table(smu, 1130 SMU_TABLE_OVERDRIVE, 1131 0, 1132 (void *)od_table, 1133 false); 1134 if (ret) 1135 dev_err(smu->adev->dev, "Failed to get overdrive table!\n"); 1136 1137 return ret; 1138 } 1139 1140 static int smu_v13_0_0_upload_overdrive_table(struct smu_context *smu, 1141 OverDriveTableExternal_t *od_table) 1142 { 1143 int ret = 0; 1144 1145 ret = smu_cmn_update_table(smu, 1146 SMU_TABLE_OVERDRIVE, 1147 0, 1148 (void *)od_table, 1149 true); 1150 if (ret) 1151 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); 1152 1153 return ret; 1154 } 1155 1156 static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, 1157 enum smu_clk_type clk_type, 1158 char *buf) 1159 { 1160 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1161 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1162 OverDriveTableExternal_t *od_table = 1163 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; 1164 struct smu_13_0_dpm_table *single_dpm_table; 1165 struct smu_13_0_pcie_table *pcie_table; 1166 uint32_t gen_speed, lane_width; 1167 int i, curr_freq, size = 0; 1168 int32_t min_value, max_value; 1169 int ret = 0; 1170 1171 smu_cmn_get_sysfs_buf(&buf, &size); 1172 1173 if (amdgpu_ras_intr_triggered()) { 1174 size += sysfs_emit_at(buf, size, "unavailable\n"); 1175 return size; 1176 } 1177 1178 switch (clk_type) { 1179 case SMU_SCLK: 1180 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1181 break; 1182 case SMU_MCLK: 1183 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1184 break; 1185 case SMU_SOCCLK: 1186 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1187 break; 1188 case SMU_FCLK: 1189 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1190 break; 1191 case SMU_VCLK: 1192 case SMU_VCLK1: 1193 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1194 break; 1195 case SMU_DCLK: 1196 case SMU_DCLK1: 1197 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1198 break; 1199 default: 1200 break; 1201 } 1202 1203 switch (clk_type) { 1204 case SMU_SCLK: 1205 case SMU_MCLK: 1206 case SMU_SOCCLK: 1207 case SMU_FCLK: 1208 case SMU_VCLK: 1209 case SMU_VCLK1: 1210 case SMU_DCLK: 1211 case SMU_DCLK1: 1212 ret = smu_v13_0_0_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); 1213 if (ret) { 1214 dev_err(smu->adev->dev, "Failed to get current clock freq!"); 1215 return ret; 1216 } 1217 1218 if (single_dpm_table->is_fine_grained) { 1219 /* 1220 * For fine grained dpms, there are only two dpm levels: 1221 * - level 0 -> min clock freq 1222 * - level 1 -> max clock freq 1223 * And the current clock frequency can be any value between them. 1224 * So, if the current clock frequency is not at level 0 or level 1, 1225 * we will fake it as three dpm levels: 1226 * - level 0 -> min clock freq 1227 * - level 1 -> current actual clock freq 1228 * - level 2 -> max clock freq 1229 */ 1230 if ((single_dpm_table->dpm_levels[0].value != curr_freq) && 1231 (single_dpm_table->dpm_levels[1].value != curr_freq)) { 1232 size += sysfs_emit_at(buf, size, "0: %uMhz\n", 1233 single_dpm_table->dpm_levels[0].value); 1234 size += sysfs_emit_at(buf, size, "1: %uMhz *\n", 1235 curr_freq); 1236 size += sysfs_emit_at(buf, size, "2: %uMhz\n", 1237 single_dpm_table->dpm_levels[1].value); 1238 } else { 1239 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", 1240 single_dpm_table->dpm_levels[0].value, 1241 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : ""); 1242 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 1243 single_dpm_table->dpm_levels[1].value, 1244 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : ""); 1245 } 1246 } else { 1247 for (i = 0; i < single_dpm_table->count; i++) 1248 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", 1249 i, single_dpm_table->dpm_levels[i].value, 1250 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : ""); 1251 } 1252 break; 1253 case SMU_PCIE: 1254 ret = smu_v13_0_0_get_smu_metrics_data(smu, 1255 METRICS_PCIE_RATE, 1256 &gen_speed); 1257 if (ret) 1258 return ret; 1259 1260 ret = smu_v13_0_0_get_smu_metrics_data(smu, 1261 METRICS_PCIE_WIDTH, 1262 &lane_width); 1263 if (ret) 1264 return ret; 1265 1266 pcie_table = &(dpm_context->dpm_tables.pcie_table); 1267 for (i = 0; i < pcie_table->num_of_link_levels; i++) 1268 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, 1269 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," : 1270 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," : 1271 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," : 1272 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "", 1273 (pcie_table->pcie_lane[i] == 1) ? "x1" : 1274 (pcie_table->pcie_lane[i] == 2) ? "x2" : 1275 (pcie_table->pcie_lane[i] == 3) ? "x4" : 1276 (pcie_table->pcie_lane[i] == 4) ? "x8" : 1277 (pcie_table->pcie_lane[i] == 5) ? "x12" : 1278 (pcie_table->pcie_lane[i] == 6) ? "x16" : "", 1279 pcie_table->clk_freq[i], 1280 (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) && 1281 (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ? 1282 "*" : ""); 1283 break; 1284 1285 case SMU_OD_SCLK: 1286 if (!smu_v13_0_0_is_od_feature_supported(smu, 1287 PP_OD_FEATURE_GFXCLK_BIT)) 1288 break; 1289 1290 size += sysfs_emit_at(buf, size, "OD_SCLK:\n"); 1291 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", 1292 od_table->OverDriveTable.GfxclkFmin, 1293 od_table->OverDriveTable.GfxclkFmax); 1294 break; 1295 1296 case SMU_OD_MCLK: 1297 if (!smu_v13_0_0_is_od_feature_supported(smu, 1298 PP_OD_FEATURE_UCLK_BIT)) 1299 break; 1300 1301 size += sysfs_emit_at(buf, size, "OD_MCLK:\n"); 1302 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n", 1303 od_table->OverDriveTable.UclkFmin, 1304 od_table->OverDriveTable.UclkFmax); 1305 break; 1306 1307 case SMU_OD_VDDC_CURVE: 1308 if (!smu_v13_0_0_is_od_feature_supported(smu, 1309 PP_OD_FEATURE_GFX_VF_CURVE_BIT)) 1310 break; 1311 1312 size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n"); 1313 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) 1314 size += sysfs_emit_at(buf, size, "%d: %dmv\n", 1315 i, 1316 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i]); 1317 break; 1318 1319 case SMU_OD_RANGE: 1320 if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && 1321 !smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && 1322 !smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) 1323 break; 1324 1325 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1326 1327 if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { 1328 smu_v13_0_0_get_od_setting_limits(smu, 1329 PP_OD_FEATURE_GFXCLK_FMIN, 1330 &min_value, 1331 NULL); 1332 smu_v13_0_0_get_od_setting_limits(smu, 1333 PP_OD_FEATURE_GFXCLK_FMAX, 1334 NULL, 1335 &max_value); 1336 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 1337 min_value, max_value); 1338 } 1339 1340 if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { 1341 smu_v13_0_0_get_od_setting_limits(smu, 1342 PP_OD_FEATURE_UCLK_FMIN, 1343 &min_value, 1344 NULL); 1345 smu_v13_0_0_get_od_setting_limits(smu, 1346 PP_OD_FEATURE_UCLK_FMAX, 1347 NULL, 1348 &max_value); 1349 size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", 1350 min_value, max_value); 1351 } 1352 1353 if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { 1354 smu_v13_0_0_get_od_setting_limits(smu, 1355 PP_OD_FEATURE_GFX_VF_CURVE, 1356 &min_value, 1357 &max_value); 1358 size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n", 1359 min_value, max_value); 1360 } 1361 break; 1362 1363 default: 1364 break; 1365 } 1366 1367 return size; 1368 } 1369 1370 static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, 1371 enum PP_OD_DPM_TABLE_COMMAND type, 1372 long input[], 1373 uint32_t size) 1374 { 1375 struct smu_table_context *table_context = &smu->smu_table; 1376 OverDriveTableExternal_t *od_table = 1377 (OverDriveTableExternal_t *)table_context->overdrive_table; 1378 struct amdgpu_device *adev = smu->adev; 1379 uint32_t offset_of_voltageoffset; 1380 int32_t minimum, maximum; 1381 uint32_t feature_ctrlmask; 1382 int i, ret = 0; 1383 1384 switch (type) { 1385 case PP_OD_EDIT_SCLK_VDDC_TABLE: 1386 if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { 1387 dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n"); 1388 return -ENOTSUPP; 1389 } 1390 1391 for (i = 0; i < size; i += 2) { 1392 if (i + 2 > size) { 1393 dev_info(adev->dev, "invalid number of input parameters %d\n", size); 1394 return -EINVAL; 1395 } 1396 1397 switch (input[i]) { 1398 case 0: 1399 smu_v13_0_0_get_od_setting_limits(smu, 1400 PP_OD_FEATURE_GFXCLK_FMIN, 1401 &minimum, 1402 &maximum); 1403 if (input[i + 1] < minimum || 1404 input[i + 1] > maximum) { 1405 dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n", 1406 input[i + 1], minimum, maximum); 1407 return -EINVAL; 1408 } 1409 1410 od_table->OverDriveTable.GfxclkFmin = input[i + 1]; 1411 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; 1412 break; 1413 1414 case 1: 1415 smu_v13_0_0_get_od_setting_limits(smu, 1416 PP_OD_FEATURE_GFXCLK_FMAX, 1417 &minimum, 1418 &maximum); 1419 if (input[i + 1] < minimum || 1420 input[i + 1] > maximum) { 1421 dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n", 1422 input[i + 1], minimum, maximum); 1423 return -EINVAL; 1424 } 1425 1426 od_table->OverDriveTable.GfxclkFmax = input[i + 1]; 1427 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; 1428 break; 1429 1430 default: 1431 dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]); 1432 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); 1433 return -EINVAL; 1434 } 1435 } 1436 1437 if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) { 1438 dev_err(adev->dev, 1439 "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n", 1440 (uint32_t)od_table->OverDriveTable.GfxclkFmin, 1441 (uint32_t)od_table->OverDriveTable.GfxclkFmax); 1442 return -EINVAL; 1443 } 1444 break; 1445 1446 case PP_OD_EDIT_MCLK_VDDC_TABLE: 1447 if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { 1448 dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n"); 1449 return -ENOTSUPP; 1450 } 1451 1452 for (i = 0; i < size; i += 2) { 1453 if (i + 2 > size) { 1454 dev_info(adev->dev, "invalid number of input parameters %d\n", size); 1455 return -EINVAL; 1456 } 1457 1458 switch (input[i]) { 1459 case 0: 1460 smu_v13_0_0_get_od_setting_limits(smu, 1461 PP_OD_FEATURE_UCLK_FMIN, 1462 &minimum, 1463 &maximum); 1464 if (input[i + 1] < minimum || 1465 input[i + 1] > maximum) { 1466 dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n", 1467 input[i + 1], minimum, maximum); 1468 return -EINVAL; 1469 } 1470 1471 od_table->OverDriveTable.UclkFmin = input[i + 1]; 1472 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; 1473 break; 1474 1475 case 1: 1476 smu_v13_0_0_get_od_setting_limits(smu, 1477 PP_OD_FEATURE_UCLK_FMAX, 1478 &minimum, 1479 &maximum); 1480 if (input[i + 1] < minimum || 1481 input[i + 1] > maximum) { 1482 dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n", 1483 input[i + 1], minimum, maximum); 1484 return -EINVAL; 1485 } 1486 1487 od_table->OverDriveTable.UclkFmax = input[i + 1]; 1488 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; 1489 break; 1490 1491 default: 1492 dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]); 1493 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); 1494 return -EINVAL; 1495 } 1496 } 1497 1498 if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) { 1499 dev_err(adev->dev, 1500 "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n", 1501 (uint32_t)od_table->OverDriveTable.UclkFmin, 1502 (uint32_t)od_table->OverDriveTable.UclkFmax); 1503 return -EINVAL; 1504 } 1505 break; 1506 1507 case PP_OD_EDIT_VDDC_CURVE: 1508 if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { 1509 dev_warn(adev->dev, "VF curve setting not supported!\n"); 1510 return -ENOTSUPP; 1511 } 1512 1513 if (input[0] >= PP_NUM_OD_VF_CURVE_POINTS || 1514 input[0] < 0) 1515 return -EINVAL; 1516 1517 smu_v13_0_0_get_od_setting_limits(smu, 1518 PP_OD_FEATURE_GFX_VF_CURVE, 1519 &minimum, 1520 &maximum); 1521 if (input[1] < minimum || 1522 input[1] > maximum) { 1523 dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n", 1524 input[1], minimum, maximum); 1525 return -EINVAL; 1526 } 1527 1528 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[input[0]] = input[1]; 1529 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT; 1530 break; 1531 1532 case PP_OD_RESTORE_DEFAULT_TABLE: 1533 feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask; 1534 memcpy(od_table, 1535 table_context->boot_overdrive_table, 1536 sizeof(OverDriveTableExternal_t)); 1537 od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask; 1538 fallthrough; 1539 1540 case PP_OD_COMMIT_DPM_TABLE: 1541 /* 1542 * The member below instructs PMFW the settings focused in 1543 * this single operation. 1544 * `uint32_t FeatureCtrlMask;` 1545 * It does not contain actual informations about user's custom 1546 * settings. Thus we do not cache it. 1547 */ 1548 offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary); 1549 if (memcmp((u8 *)od_table + offset_of_voltageoffset, 1550 table_context->user_overdrive_table + offset_of_voltageoffset, 1551 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) { 1552 smu_v13_0_0_dump_od_table(smu, od_table); 1553 1554 ret = smu_v13_0_0_upload_overdrive_table(smu, od_table); 1555 if (ret) { 1556 dev_err(adev->dev, "Failed to upload overdrive table!\n"); 1557 return ret; 1558 } 1559 1560 od_table->OverDriveTable.FeatureCtrlMask = 0; 1561 memcpy(table_context->user_overdrive_table + offset_of_voltageoffset, 1562 (u8 *)od_table + offset_of_voltageoffset, 1563 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset); 1564 1565 if (!memcmp(table_context->user_overdrive_table, 1566 table_context->boot_overdrive_table, 1567 sizeof(OverDriveTableExternal_t))) 1568 smu->user_dpm_profile.user_od = false; 1569 else 1570 smu->user_dpm_profile.user_od = true; 1571 } 1572 break; 1573 1574 default: 1575 return -ENOSYS; 1576 } 1577 1578 return ret; 1579 } 1580 1581 static int smu_v13_0_0_force_clk_levels(struct smu_context *smu, 1582 enum smu_clk_type clk_type, 1583 uint32_t mask) 1584 { 1585 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1586 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1587 struct smu_13_0_dpm_table *single_dpm_table; 1588 uint32_t soft_min_level, soft_max_level; 1589 uint32_t min_freq, max_freq; 1590 int ret = 0; 1591 1592 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1593 soft_max_level = mask ? (fls(mask) - 1) : 0; 1594 1595 switch (clk_type) { 1596 case SMU_GFXCLK: 1597 case SMU_SCLK: 1598 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1599 break; 1600 case SMU_MCLK: 1601 case SMU_UCLK: 1602 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1603 break; 1604 case SMU_SOCCLK: 1605 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1606 break; 1607 case SMU_FCLK: 1608 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1609 break; 1610 case SMU_VCLK: 1611 case SMU_VCLK1: 1612 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1613 break; 1614 case SMU_DCLK: 1615 case SMU_DCLK1: 1616 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1617 break; 1618 default: 1619 break; 1620 } 1621 1622 switch (clk_type) { 1623 case SMU_GFXCLK: 1624 case SMU_SCLK: 1625 case SMU_MCLK: 1626 case SMU_UCLK: 1627 case SMU_SOCCLK: 1628 case SMU_FCLK: 1629 case SMU_VCLK: 1630 case SMU_VCLK1: 1631 case SMU_DCLK: 1632 case SMU_DCLK1: 1633 if (single_dpm_table->is_fine_grained) { 1634 /* There is only 2 levels for fine grained DPM */ 1635 soft_max_level = (soft_max_level >= 1 ? 1 : 0); 1636 soft_min_level = (soft_min_level >= 1 ? 1 : 0); 1637 } else { 1638 if ((soft_max_level >= single_dpm_table->count) || 1639 (soft_min_level >= single_dpm_table->count)) 1640 return -EINVAL; 1641 } 1642 1643 min_freq = single_dpm_table->dpm_levels[soft_min_level].value; 1644 max_freq = single_dpm_table->dpm_levels[soft_max_level].value; 1645 1646 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1647 clk_type, 1648 min_freq, 1649 max_freq); 1650 break; 1651 case SMU_DCEFCLK: 1652 case SMU_PCIE: 1653 default: 1654 break; 1655 } 1656 1657 return ret; 1658 } 1659 1660 static const struct smu_temperature_range smu13_thermal_policy[] = { 1661 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, 1662 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, 1663 }; 1664 1665 static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu, 1666 struct smu_temperature_range *range) 1667 { 1668 struct smu_table_context *table_context = &smu->smu_table; 1669 struct smu_13_0_0_powerplay_table *powerplay_table = 1670 table_context->power_play_table; 1671 PPTable_t *pptable = smu->smu_table.driver_pptable; 1672 1673 if (amdgpu_sriov_vf(smu->adev)) 1674 return 0; 1675 1676 if (!range) 1677 return -EINVAL; 1678 1679 memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range)); 1680 1681 range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] * 1682 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1683 range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) * 1684 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1685 range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] * 1686 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1687 range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) * 1688 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1689 range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] * 1690 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1691 range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)* 1692 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1693 range->software_shutdown_temp = powerplay_table->software_shutdown_temp; 1694 range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset; 1695 1696 return 0; 1697 } 1698 1699 #define MAX(a, b) ((a) > (b) ? (a) : (b)) 1700 static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu, 1701 void **table) 1702 { 1703 struct smu_table_context *smu_table = &smu->smu_table; 1704 struct gpu_metrics_v1_3 *gpu_metrics = 1705 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; 1706 SmuMetricsExternal_t metrics_ext; 1707 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics; 1708 int ret = 0; 1709 1710 ret = smu_cmn_get_metrics_table(smu, 1711 &metrics_ext, 1712 true); 1713 if (ret) 1714 return ret; 1715 1716 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); 1717 1718 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE]; 1719 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT]; 1720 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM]; 1721 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX]; 1722 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC]; 1723 gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0], 1724 metrics->AvgTemperature[TEMP_VR_MEM1]); 1725 1726 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; 1727 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; 1728 gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage, 1729 metrics->Vcn1ActivityPercentage); 1730 1731 gpu_metrics->average_socket_power = metrics->AverageSocketPower; 1732 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator; 1733 1734 if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD) 1735 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs; 1736 else 1737 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs; 1738 1739 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD) 1740 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs; 1741 else 1742 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs; 1743 1744 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency; 1745 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency; 1746 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; 1747 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; 1748 1749 gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency; 1750 gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; 1751 gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; 1752 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; 1753 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; 1754 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1]; 1755 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1]; 1756 1757 gpu_metrics->throttle_status = 1758 smu_v13_0_get_throttler_status(metrics); 1759 gpu_metrics->indep_throttle_status = 1760 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, 1761 smu_v13_0_0_throttler_map); 1762 1763 gpu_metrics->current_fan_speed = metrics->AvgFanRpm; 1764 1765 gpu_metrics->pcie_link_width = metrics->PcieWidth; 1766 if ((metrics->PcieRate - 1) > LINK_SPEED_MAX) 1767 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1); 1768 else 1769 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate); 1770 1771 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1772 1773 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_GFX]; 1774 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_SOC]; 1775 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VMEMP]; 1776 1777 *table = (void *)gpu_metrics; 1778 1779 return sizeof(struct gpu_metrics_v1_3); 1780 } 1781 1782 static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu) 1783 { 1784 OverDriveTableExternal_t *od_table = 1785 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; 1786 OverDriveTableExternal_t *boot_od_table = 1787 (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table; 1788 OverDriveTableExternal_t *user_od_table = 1789 (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table; 1790 OverDriveTableExternal_t user_od_table_bak; 1791 int ret = 0; 1792 int i; 1793 1794 ret = smu_v13_0_0_get_overdrive_table(smu, boot_od_table); 1795 if (ret) 1796 return ret; 1797 1798 smu_v13_0_0_dump_od_table(smu, boot_od_table); 1799 1800 memcpy(od_table, 1801 boot_od_table, 1802 sizeof(OverDriveTableExternal_t)); 1803 1804 /* 1805 * For S3/S4/Runpm resume, we need to setup those overdrive tables again, 1806 * but we have to preserve user defined values in "user_od_table". 1807 */ 1808 if (!smu->adev->in_suspend) { 1809 memcpy(user_od_table, 1810 boot_od_table, 1811 sizeof(OverDriveTableExternal_t)); 1812 smu->user_dpm_profile.user_od = false; 1813 } else if (smu->user_dpm_profile.user_od) { 1814 memcpy(&user_od_table_bak, 1815 user_od_table, 1816 sizeof(OverDriveTableExternal_t)); 1817 memcpy(user_od_table, 1818 boot_od_table, 1819 sizeof(OverDriveTableExternal_t)); 1820 user_od_table->OverDriveTable.GfxclkFmin = 1821 user_od_table_bak.OverDriveTable.GfxclkFmin; 1822 user_od_table->OverDriveTable.GfxclkFmax = 1823 user_od_table_bak.OverDriveTable.GfxclkFmax; 1824 user_od_table->OverDriveTable.UclkFmin = 1825 user_od_table_bak.OverDriveTable.UclkFmin; 1826 user_od_table->OverDriveTable.UclkFmax = 1827 user_od_table_bak.OverDriveTable.UclkFmax; 1828 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) 1829 user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = 1830 user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i]; 1831 } 1832 1833 return 0; 1834 } 1835 1836 static int smu_v13_0_0_restore_user_od_settings(struct smu_context *smu) 1837 { 1838 struct smu_table_context *table_context = &smu->smu_table; 1839 OverDriveTableExternal_t *od_table = table_context->overdrive_table; 1840 OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table; 1841 int res; 1842 1843 user_od_table->OverDriveTable.FeatureCtrlMask = 1U << PP_OD_FEATURE_GFXCLK_BIT | 1844 1U << PP_OD_FEATURE_UCLK_BIT | 1845 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT; 1846 res = smu_v13_0_0_upload_overdrive_table(smu, user_od_table); 1847 user_od_table->OverDriveTable.FeatureCtrlMask = 0; 1848 if (res == 0) 1849 memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t)); 1850 1851 return res; 1852 } 1853 1854 static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu) 1855 { 1856 struct smu_13_0_dpm_context *dpm_context = 1857 smu->smu_dpm.dpm_context; 1858 struct smu_13_0_dpm_table *gfx_table = 1859 &dpm_context->dpm_tables.gfx_table; 1860 struct smu_13_0_dpm_table *mem_table = 1861 &dpm_context->dpm_tables.uclk_table; 1862 struct smu_13_0_dpm_table *soc_table = 1863 &dpm_context->dpm_tables.soc_table; 1864 struct smu_13_0_dpm_table *vclk_table = 1865 &dpm_context->dpm_tables.vclk_table; 1866 struct smu_13_0_dpm_table *dclk_table = 1867 &dpm_context->dpm_tables.dclk_table; 1868 struct smu_13_0_dpm_table *fclk_table = 1869 &dpm_context->dpm_tables.fclk_table; 1870 struct smu_umd_pstate_table *pstate_table = 1871 &smu->pstate_table; 1872 struct smu_table_context *table_context = &smu->smu_table; 1873 PPTable_t *pptable = table_context->driver_pptable; 1874 DriverReportedClocks_t driver_clocks = 1875 pptable->SkuTable.DriverReportedClocks; 1876 1877 pstate_table->gfxclk_pstate.min = gfx_table->min; 1878 if (driver_clocks.GameClockAc && 1879 (driver_clocks.GameClockAc < gfx_table->max)) 1880 pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc; 1881 else 1882 pstate_table->gfxclk_pstate.peak = gfx_table->max; 1883 1884 pstate_table->uclk_pstate.min = mem_table->min; 1885 pstate_table->uclk_pstate.peak = mem_table->max; 1886 1887 pstate_table->socclk_pstate.min = soc_table->min; 1888 pstate_table->socclk_pstate.peak = soc_table->max; 1889 1890 pstate_table->vclk_pstate.min = vclk_table->min; 1891 pstate_table->vclk_pstate.peak = vclk_table->max; 1892 1893 pstate_table->dclk_pstate.min = dclk_table->min; 1894 pstate_table->dclk_pstate.peak = dclk_table->max; 1895 1896 pstate_table->fclk_pstate.min = fclk_table->min; 1897 pstate_table->fclk_pstate.peak = fclk_table->max; 1898 1899 if (driver_clocks.BaseClockAc && 1900 driver_clocks.BaseClockAc < gfx_table->max) 1901 pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc; 1902 else 1903 pstate_table->gfxclk_pstate.standard = gfx_table->max; 1904 pstate_table->uclk_pstate.standard = mem_table->max; 1905 pstate_table->socclk_pstate.standard = soc_table->min; 1906 pstate_table->vclk_pstate.standard = vclk_table->min; 1907 pstate_table->dclk_pstate.standard = dclk_table->min; 1908 pstate_table->fclk_pstate.standard = fclk_table->min; 1909 1910 return 0; 1911 } 1912 1913 static void smu_v13_0_0_get_unique_id(struct smu_context *smu) 1914 { 1915 struct smu_table_context *smu_table = &smu->smu_table; 1916 SmuMetrics_t *metrics = 1917 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 1918 struct amdgpu_device *adev = smu->adev; 1919 uint32_t upper32 = 0, lower32 = 0; 1920 int ret; 1921 1922 ret = smu_cmn_get_metrics_table(smu, NULL, false); 1923 if (ret) 1924 goto out; 1925 1926 upper32 = metrics->PublicSerialNumberUpper; 1927 lower32 = metrics->PublicSerialNumberLower; 1928 1929 out: 1930 adev->unique_id = ((uint64_t)upper32 << 32) | lower32; 1931 if (adev->serial[0] == '\0') 1932 sprintf(adev->serial, "%016llx", adev->unique_id); 1933 } 1934 1935 static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu, 1936 uint32_t *speed) 1937 { 1938 int ret; 1939 1940 if (!speed) 1941 return -EINVAL; 1942 1943 ret = smu_v13_0_0_get_smu_metrics_data(smu, 1944 METRICS_CURR_FANPWM, 1945 speed); 1946 if (ret) { 1947 dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!"); 1948 return ret; 1949 } 1950 1951 /* Convert the PMFW output which is in percent to pwm(255) based */ 1952 *speed = MIN(*speed * 255 / 100, 255); 1953 1954 return 0; 1955 } 1956 1957 static int smu_v13_0_0_get_fan_speed_rpm(struct smu_context *smu, 1958 uint32_t *speed) 1959 { 1960 if (!speed) 1961 return -EINVAL; 1962 1963 return smu_v13_0_0_get_smu_metrics_data(smu, 1964 METRICS_CURR_FANSPEED, 1965 speed); 1966 } 1967 1968 static int smu_v13_0_0_enable_mgpu_fan_boost(struct smu_context *smu) 1969 { 1970 struct smu_table_context *table_context = &smu->smu_table; 1971 PPTable_t *pptable = table_context->driver_pptable; 1972 SkuTable_t *skutable = &pptable->SkuTable; 1973 1974 /* 1975 * Skip the MGpuFanBoost setting for those ASICs 1976 * which do not support it 1977 */ 1978 if (skutable->MGpuAcousticLimitRpmThreshold == 0) 1979 return 0; 1980 1981 return smu_cmn_send_smc_msg_with_param(smu, 1982 SMU_MSG_SetMGpuFanBoostLimitRpm, 1983 0, 1984 NULL); 1985 } 1986 1987 static int smu_v13_0_0_get_power_limit(struct smu_context *smu, 1988 uint32_t *current_power_limit, 1989 uint32_t *default_power_limit, 1990 uint32_t *max_power_limit) 1991 { 1992 struct smu_table_context *table_context = &smu->smu_table; 1993 struct smu_13_0_0_powerplay_table *powerplay_table = 1994 (struct smu_13_0_0_powerplay_table *)table_context->power_play_table; 1995 PPTable_t *pptable = table_context->driver_pptable; 1996 SkuTable_t *skutable = &pptable->SkuTable; 1997 uint32_t power_limit, od_percent; 1998 1999 if (smu_v13_0_get_current_power_limit(smu, &power_limit)) 2000 power_limit = smu->adev->pm.ac_power ? 2001 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] : 2002 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0]; 2003 2004 if (current_power_limit) 2005 *current_power_limit = power_limit; 2006 if (default_power_limit) 2007 *default_power_limit = power_limit; 2008 2009 if (max_power_limit) { 2010 if (smu->od_enabled) { 2011 od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); 2012 2013 dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit); 2014 2015 power_limit *= (100 + od_percent); 2016 power_limit /= 100; 2017 } 2018 *max_power_limit = power_limit; 2019 } 2020 2021 return 0; 2022 } 2023 2024 static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu, 2025 char *buf) 2026 { 2027 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 2028 DpmActivityMonitorCoeffInt_t *activity_monitor = 2029 &(activity_monitor_external.DpmActivityMonitorCoeffInt); 2030 static const char *title[] = { 2031 "PROFILE_INDEX(NAME)", 2032 "CLOCK_TYPE(NAME)", 2033 "FPS", 2034 "MinActiveFreqType", 2035 "MinActiveFreq", 2036 "BoosterFreqType", 2037 "BoosterFreq", 2038 "PD_Data_limit_c", 2039 "PD_Data_error_coeff", 2040 "PD_Data_error_rate_coeff"}; 2041 int16_t workload_type = 0; 2042 uint32_t i, size = 0; 2043 int result = 0; 2044 2045 if (!buf) 2046 return -EINVAL; 2047 2048 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n", 2049 title[0], title[1], title[2], title[3], title[4], title[5], 2050 title[6], title[7], title[8], title[9]); 2051 2052 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { 2053 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 2054 workload_type = smu_cmn_to_asic_specific_index(smu, 2055 CMN2ASIC_MAPPING_WORKLOAD, 2056 i); 2057 if (workload_type == -ENOTSUPP) 2058 continue; 2059 else if (workload_type < 0) 2060 return -EINVAL; 2061 2062 result = smu_cmn_update_table(smu, 2063 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 2064 workload_type, 2065 (void *)(&activity_monitor_external), 2066 false); 2067 if (result) { 2068 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 2069 return result; 2070 } 2071 2072 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", 2073 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 2074 2075 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", 2076 " ", 2077 0, 2078 "GFXCLK", 2079 activity_monitor->Gfx_FPS, 2080 activity_monitor->Gfx_MinActiveFreqType, 2081 activity_monitor->Gfx_MinActiveFreq, 2082 activity_monitor->Gfx_BoosterFreqType, 2083 activity_monitor->Gfx_BoosterFreq, 2084 activity_monitor->Gfx_PD_Data_limit_c, 2085 activity_monitor->Gfx_PD_Data_error_coeff, 2086 activity_monitor->Gfx_PD_Data_error_rate_coeff); 2087 2088 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", 2089 " ", 2090 1, 2091 "FCLK", 2092 activity_monitor->Fclk_FPS, 2093 activity_monitor->Fclk_MinActiveFreqType, 2094 activity_monitor->Fclk_MinActiveFreq, 2095 activity_monitor->Fclk_BoosterFreqType, 2096 activity_monitor->Fclk_BoosterFreq, 2097 activity_monitor->Fclk_PD_Data_limit_c, 2098 activity_monitor->Fclk_PD_Data_error_coeff, 2099 activity_monitor->Fclk_PD_Data_error_rate_coeff); 2100 } 2101 2102 return size; 2103 } 2104 2105 static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, 2106 long *input, 2107 uint32_t size) 2108 { 2109 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 2110 DpmActivityMonitorCoeffInt_t *activity_monitor = 2111 &(activity_monitor_external.DpmActivityMonitorCoeffInt); 2112 int workload_type, ret = 0; 2113 2114 smu->power_profile_mode = input[size]; 2115 2116 if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { 2117 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); 2118 return -EINVAL; 2119 } 2120 2121 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 2122 ret = smu_cmn_update_table(smu, 2123 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 2124 WORKLOAD_PPLIB_CUSTOM_BIT, 2125 (void *)(&activity_monitor_external), 2126 false); 2127 if (ret) { 2128 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 2129 return ret; 2130 } 2131 2132 switch (input[0]) { 2133 case 0: /* Gfxclk */ 2134 activity_monitor->Gfx_FPS = input[1]; 2135 activity_monitor->Gfx_MinActiveFreqType = input[2]; 2136 activity_monitor->Gfx_MinActiveFreq = input[3]; 2137 activity_monitor->Gfx_BoosterFreqType = input[4]; 2138 activity_monitor->Gfx_BoosterFreq = input[5]; 2139 activity_monitor->Gfx_PD_Data_limit_c = input[6]; 2140 activity_monitor->Gfx_PD_Data_error_coeff = input[7]; 2141 activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; 2142 break; 2143 case 1: /* Fclk */ 2144 activity_monitor->Fclk_FPS = input[1]; 2145 activity_monitor->Fclk_MinActiveFreqType = input[2]; 2146 activity_monitor->Fclk_MinActiveFreq = input[3]; 2147 activity_monitor->Fclk_BoosterFreqType = input[4]; 2148 activity_monitor->Fclk_BoosterFreq = input[5]; 2149 activity_monitor->Fclk_PD_Data_limit_c = input[6]; 2150 activity_monitor->Fclk_PD_Data_error_coeff = input[7]; 2151 activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; 2152 break; 2153 } 2154 2155 ret = smu_cmn_update_table(smu, 2156 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 2157 WORKLOAD_PPLIB_CUSTOM_BIT, 2158 (void *)(&activity_monitor_external), 2159 true); 2160 if (ret) { 2161 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); 2162 return ret; 2163 } 2164 } 2165 2166 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE && 2167 (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) || 2168 ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) { 2169 ret = smu_cmn_update_table(smu, 2170 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 2171 WORKLOAD_PPLIB_COMPUTE_BIT, 2172 (void *)(&activity_monitor_external), 2173 false); 2174 if (ret) { 2175 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 2176 return ret; 2177 } 2178 2179 ret = smu_cmn_update_table(smu, 2180 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 2181 WORKLOAD_PPLIB_CUSTOM_BIT, 2182 (void *)(&activity_monitor_external), 2183 true); 2184 if (ret) { 2185 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); 2186 return ret; 2187 } 2188 2189 workload_type = smu_cmn_to_asic_specific_index(smu, 2190 CMN2ASIC_MAPPING_WORKLOAD, 2191 PP_SMC_POWER_PROFILE_CUSTOM); 2192 } else { 2193 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 2194 workload_type = smu_cmn_to_asic_specific_index(smu, 2195 CMN2ASIC_MAPPING_WORKLOAD, 2196 smu->power_profile_mode); 2197 } 2198 2199 if (workload_type < 0) 2200 return -EINVAL; 2201 2202 return smu_cmn_send_smc_msg_with_param(smu, 2203 SMU_MSG_SetWorkloadMask, 2204 1 << workload_type, 2205 NULL); 2206 } 2207 2208 static int smu_v13_0_0_baco_enter(struct smu_context *smu) 2209 { 2210 struct smu_baco_context *smu_baco = &smu->smu_baco; 2211 struct amdgpu_device *adev = smu->adev; 2212 2213 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) 2214 return smu_v13_0_baco_set_armd3_sequence(smu, 2215 (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? 2216 BACO_SEQ_BAMACO : BACO_SEQ_BACO); 2217 else 2218 return smu_v13_0_baco_enter(smu); 2219 } 2220 2221 static int smu_v13_0_0_baco_exit(struct smu_context *smu) 2222 { 2223 struct amdgpu_device *adev = smu->adev; 2224 2225 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 2226 /* Wait for PMFW handling for the Dstate change */ 2227 usleep_range(10000, 11000); 2228 return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 2229 } else { 2230 return smu_v13_0_baco_exit(smu); 2231 } 2232 } 2233 2234 static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) 2235 { 2236 struct amdgpu_device *adev = smu->adev; 2237 u32 smu_version; 2238 2239 /* SRIOV does not support SMU mode1 reset */ 2240 if (amdgpu_sriov_vf(adev)) 2241 return false; 2242 2243 /* PMFW support is available since 78.41 */ 2244 smu_cmn_get_smc_version(smu, NULL, &smu_version); 2245 if (smu_version < 0x004e2900) 2246 return false; 2247 2248 return true; 2249 } 2250 2251 static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap, 2252 struct i2c_msg *msg, int num_msgs) 2253 { 2254 struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); 2255 struct amdgpu_device *adev = smu_i2c->adev; 2256 struct smu_context *smu = adev->powerplay.pp_handle; 2257 struct smu_table_context *smu_table = &smu->smu_table; 2258 struct smu_table *table = &smu_table->driver_table; 2259 SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; 2260 int i, j, r, c; 2261 u16 dir; 2262 2263 if (!adev->pm.dpm_enabled) 2264 return -EBUSY; 2265 2266 req = kzalloc(sizeof(*req), GFP_KERNEL); 2267 if (!req) 2268 return -ENOMEM; 2269 2270 req->I2CcontrollerPort = smu_i2c->port; 2271 req->I2CSpeed = I2C_SPEED_FAST_400K; 2272 req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ 2273 dir = msg[0].flags & I2C_M_RD; 2274 2275 for (c = i = 0; i < num_msgs; i++) { 2276 for (j = 0; j < msg[i].len; j++, c++) { 2277 SwI2cCmd_t *cmd = &req->SwI2cCmds[c]; 2278 2279 if (!(msg[i].flags & I2C_M_RD)) { 2280 /* write */ 2281 cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK; 2282 cmd->ReadWriteData = msg[i].buf[j]; 2283 } 2284 2285 if ((dir ^ msg[i].flags) & I2C_M_RD) { 2286 /* The direction changes. 2287 */ 2288 dir = msg[i].flags & I2C_M_RD; 2289 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK; 2290 } 2291 2292 req->NumCmds++; 2293 2294 /* 2295 * Insert STOP if we are at the last byte of either last 2296 * message for the transaction or the client explicitly 2297 * requires a STOP at this particular message. 2298 */ 2299 if ((j == msg[i].len - 1) && 2300 ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) { 2301 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK; 2302 cmd->CmdConfig |= CMDCONFIG_STOP_MASK; 2303 } 2304 } 2305 } 2306 mutex_lock(&adev->pm.mutex); 2307 r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); 2308 if (r) 2309 goto fail; 2310 2311 for (c = i = 0; i < num_msgs; i++) { 2312 if (!(msg[i].flags & I2C_M_RD)) { 2313 c += msg[i].len; 2314 continue; 2315 } 2316 for (j = 0; j < msg[i].len; j++, c++) { 2317 SwI2cCmd_t *cmd = &res->SwI2cCmds[c]; 2318 2319 msg[i].buf[j] = cmd->ReadWriteData; 2320 } 2321 } 2322 r = num_msgs; 2323 fail: 2324 mutex_unlock(&adev->pm.mutex); 2325 kfree(req); 2326 return r; 2327 } 2328 2329 static u32 smu_v13_0_0_i2c_func(struct i2c_adapter *adap) 2330 { 2331 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 2332 } 2333 2334 static const struct i2c_algorithm smu_v13_0_0_i2c_algo = { 2335 .master_xfer = smu_v13_0_0_i2c_xfer, 2336 .functionality = smu_v13_0_0_i2c_func, 2337 }; 2338 2339 static const struct i2c_adapter_quirks smu_v13_0_0_i2c_control_quirks = { 2340 .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN, 2341 .max_read_len = MAX_SW_I2C_COMMANDS, 2342 .max_write_len = MAX_SW_I2C_COMMANDS, 2343 .max_comb_1st_msg_len = 2, 2344 .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, 2345 }; 2346 2347 static int smu_v13_0_0_i2c_control_init(struct smu_context *smu) 2348 { 2349 struct amdgpu_device *adev = smu->adev; 2350 int res, i; 2351 2352 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 2353 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2354 struct i2c_adapter *control = &smu_i2c->adapter; 2355 2356 smu_i2c->adev = adev; 2357 smu_i2c->port = i; 2358 mutex_init(&smu_i2c->mutex); 2359 control->owner = THIS_MODULE; 2360 control->class = I2C_CLASS_SPD; 2361 control->dev.parent = &adev->pdev->dev; 2362 control->algo = &smu_v13_0_0_i2c_algo; 2363 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); 2364 control->quirks = &smu_v13_0_0_i2c_control_quirks; 2365 i2c_set_adapdata(control, smu_i2c); 2366 2367 res = i2c_add_adapter(control); 2368 if (res) { 2369 DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 2370 goto Out_err; 2371 } 2372 } 2373 2374 /* assign the buses used for the FRU EEPROM and RAS EEPROM */ 2375 /* XXX ideally this would be something in a vbios data table */ 2376 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; 2377 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 2378 2379 return 0; 2380 Out_err: 2381 for ( ; i >= 0; i--) { 2382 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2383 struct i2c_adapter *control = &smu_i2c->adapter; 2384 2385 i2c_del_adapter(control); 2386 } 2387 return res; 2388 } 2389 2390 static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu) 2391 { 2392 struct amdgpu_device *adev = smu->adev; 2393 int i; 2394 2395 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 2396 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2397 struct i2c_adapter *control = &smu_i2c->adapter; 2398 2399 i2c_del_adapter(control); 2400 } 2401 adev->pm.ras_eeprom_i2c_bus = NULL; 2402 adev->pm.fru_eeprom_i2c_bus = NULL; 2403 } 2404 2405 static int smu_v13_0_0_set_mp1_state(struct smu_context *smu, 2406 enum pp_mp1_state mp1_state) 2407 { 2408 int ret; 2409 2410 switch (mp1_state) { 2411 case PP_MP1_STATE_UNLOAD: 2412 ret = smu_cmn_set_mp1_state(smu, mp1_state); 2413 break; 2414 default: 2415 /* Ignore others */ 2416 ret = 0; 2417 } 2418 2419 return ret; 2420 } 2421 2422 static int smu_v13_0_0_set_df_cstate(struct smu_context *smu, 2423 enum pp_df_cstate state) 2424 { 2425 return smu_cmn_send_smc_msg_with_param(smu, 2426 SMU_MSG_DFCstateControl, 2427 state, 2428 NULL); 2429 } 2430 2431 static void smu_v13_0_0_set_mode1_reset_param(struct smu_context *smu, 2432 uint32_t supported_version, 2433 uint32_t *param) 2434 { 2435 uint32_t smu_version; 2436 struct amdgpu_device *adev = smu->adev; 2437 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 2438 2439 smu_cmn_get_smc_version(smu, NULL, &smu_version); 2440 2441 if ((smu_version >= supported_version) && 2442 ras && atomic_read(&ras->in_recovery)) 2443 /* Set RAS fatal error reset flag */ 2444 *param = 1 << 16; 2445 else 2446 *param = 0; 2447 } 2448 2449 static int smu_v13_0_0_mode1_reset(struct smu_context *smu) 2450 { 2451 int ret; 2452 uint32_t param; 2453 struct amdgpu_device *adev = smu->adev; 2454 2455 switch (adev->ip_versions[MP1_HWIP][0]) { 2456 case IP_VERSION(13, 0, 0): 2457 /* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */ 2458 smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, ¶m); 2459 2460 ret = smu_cmn_send_smc_msg_with_param(smu, 2461 SMU_MSG_Mode1Reset, param, NULL); 2462 break; 2463 2464 case IP_VERSION(13, 0, 10): 2465 /* SMU 13_0_10 PMFW supports RAS fatal error reset from 80.28 */ 2466 smu_v13_0_0_set_mode1_reset_param(smu, 0x00501c00, ¶m); 2467 2468 ret = smu_cmn_send_debug_smc_msg_with_param(smu, 2469 DEBUGSMC_MSG_Mode1Reset, param); 2470 break; 2471 2472 default: 2473 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); 2474 break; 2475 } 2476 2477 if (!ret) 2478 msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); 2479 2480 return ret; 2481 } 2482 2483 static int smu_v13_0_0_mode2_reset(struct smu_context *smu) 2484 { 2485 int ret; 2486 struct amdgpu_device *adev = smu->adev; 2487 2488 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) 2489 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode2Reset, NULL); 2490 else 2491 return -EOPNOTSUPP; 2492 2493 return ret; 2494 } 2495 2496 static int smu_v13_0_0_enable_gfx_features(struct smu_context *smu) 2497 { 2498 struct amdgpu_device *adev = smu->adev; 2499 2500 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) 2501 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures, 2502 FEATURE_PWR_GFX, NULL); 2503 else 2504 return -EOPNOTSUPP; 2505 } 2506 2507 static void smu_v13_0_0_set_smu_mailbox_registers(struct smu_context *smu) 2508 { 2509 struct amdgpu_device *adev = smu->adev; 2510 2511 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82); 2512 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66); 2513 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 2514 2515 smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_53); 2516 smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_75); 2517 smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_54); 2518 } 2519 2520 static int smu_v13_0_0_smu_send_bad_mem_page_num(struct smu_context *smu, 2521 uint32_t size) 2522 { 2523 int ret = 0; 2524 2525 /* message SMU to update the bad page number on SMUBUS */ 2526 ret = smu_cmn_send_smc_msg_with_param(smu, 2527 SMU_MSG_SetNumBadMemoryPagesRetired, 2528 size, NULL); 2529 if (ret) 2530 dev_err(smu->adev->dev, 2531 "[%s] failed to message SMU to update bad memory pages number\n", 2532 __func__); 2533 2534 return ret; 2535 } 2536 2537 static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu, 2538 uint32_t size) 2539 { 2540 int ret = 0; 2541 2542 /* message SMU to update the bad channel info on SMUBUS */ 2543 ret = smu_cmn_send_smc_msg_with_param(smu, 2544 SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 2545 size, NULL); 2546 if (ret) 2547 dev_err(smu->adev->dev, 2548 "[%s] failed to message SMU to update bad memory pages channel info\n", 2549 __func__); 2550 2551 return ret; 2552 } 2553 2554 static int smu_v13_0_0_check_ecc_table_support(struct smu_context *smu) 2555 { 2556 struct amdgpu_device *adev = smu->adev; 2557 uint32_t if_version = 0xff, smu_version = 0xff; 2558 int ret = 0; 2559 2560 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 2561 if (ret) 2562 return -EOPNOTSUPP; 2563 2564 if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) && 2565 (smu_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION)) 2566 return ret; 2567 else 2568 return -EOPNOTSUPP; 2569 } 2570 2571 static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu, 2572 void *table) 2573 { 2574 struct smu_table_context *smu_table = &smu->smu_table; 2575 struct amdgpu_device *adev = smu->adev; 2576 EccInfoTable_t *ecc_table = NULL; 2577 struct ecc_info_per_ch *ecc_info_per_channel = NULL; 2578 int i, ret = 0; 2579 struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table; 2580 2581 ret = smu_v13_0_0_check_ecc_table_support(smu); 2582 if (ret) 2583 return ret; 2584 2585 ret = smu_cmn_update_table(smu, 2586 SMU_TABLE_ECCINFO, 2587 0, 2588 smu_table->ecc_table, 2589 false); 2590 if (ret) { 2591 dev_info(adev->dev, "Failed to export SMU ecc table!\n"); 2592 return ret; 2593 } 2594 2595 ecc_table = (EccInfoTable_t *)smu_table->ecc_table; 2596 2597 for (i = 0; i < ARRAY_SIZE(ecc_table->EccInfo); i++) { 2598 ecc_info_per_channel = &(eccinfo->ecc[i]); 2599 ecc_info_per_channel->ce_count_lo_chip = 2600 ecc_table->EccInfo[i].ce_count_lo_chip; 2601 ecc_info_per_channel->ce_count_hi_chip = 2602 ecc_table->EccInfo[i].ce_count_hi_chip; 2603 ecc_info_per_channel->mca_umc_status = 2604 ecc_table->EccInfo[i].mca_umc_status; 2605 ecc_info_per_channel->mca_umc_addr = 2606 ecc_table->EccInfo[i].mca_umc_addr; 2607 } 2608 2609 return ret; 2610 } 2611 2612 static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { 2613 .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask, 2614 .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table, 2615 .i2c_init = smu_v13_0_0_i2c_control_init, 2616 .i2c_fini = smu_v13_0_0_i2c_control_fini, 2617 .is_dpm_running = smu_v13_0_0_is_dpm_running, 2618 .dump_pptable = smu_v13_0_0_dump_pptable, 2619 .init_microcode = smu_v13_0_init_microcode, 2620 .load_microcode = smu_v13_0_load_microcode, 2621 .fini_microcode = smu_v13_0_fini_microcode, 2622 .init_smc_tables = smu_v13_0_0_init_smc_tables, 2623 .fini_smc_tables = smu_v13_0_fini_smc_tables, 2624 .init_power = smu_v13_0_init_power, 2625 .fini_power = smu_v13_0_fini_power, 2626 .check_fw_status = smu_v13_0_check_fw_status, 2627 .setup_pptable = smu_v13_0_0_setup_pptable, 2628 .check_fw_version = smu_v13_0_check_fw_version, 2629 .write_pptable = smu_cmn_write_pptable, 2630 .set_driver_table_location = smu_v13_0_set_driver_table_location, 2631 .system_features_control = smu_v13_0_0_system_features_control, 2632 .set_allowed_mask = smu_v13_0_set_allowed_mask, 2633 .get_enabled_mask = smu_cmn_get_enabled_mask, 2634 .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable, 2635 .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, 2636 .get_dpm_ultimate_freq = smu_v13_0_0_get_dpm_ultimate_freq, 2637 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, 2638 .read_sensor = smu_v13_0_0_read_sensor, 2639 .feature_is_enabled = smu_cmn_feature_is_enabled, 2640 .print_clk_levels = smu_v13_0_0_print_clk_levels, 2641 .force_clk_levels = smu_v13_0_0_force_clk_levels, 2642 .update_pcie_parameters = smu_v13_0_update_pcie_parameters, 2643 .get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range, 2644 .register_irq_handler = smu_v13_0_register_irq_handler, 2645 .enable_thermal_alert = smu_v13_0_enable_thermal_alert, 2646 .disable_thermal_alert = smu_v13_0_disable_thermal_alert, 2647 .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, 2648 .get_gpu_metrics = smu_v13_0_0_get_gpu_metrics, 2649 .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range, 2650 .set_default_od_settings = smu_v13_0_0_set_default_od_settings, 2651 .restore_user_od_settings = smu_v13_0_0_restore_user_od_settings, 2652 .od_edit_dpm_table = smu_v13_0_0_od_edit_dpm_table, 2653 .init_pptable_microcode = smu_v13_0_init_pptable_microcode, 2654 .populate_umd_state_clk = smu_v13_0_0_populate_umd_state_clk, 2655 .set_performance_level = smu_v13_0_set_performance_level, 2656 .gfx_off_control = smu_v13_0_gfx_off_control, 2657 .get_unique_id = smu_v13_0_0_get_unique_id, 2658 .get_fan_speed_pwm = smu_v13_0_0_get_fan_speed_pwm, 2659 .get_fan_speed_rpm = smu_v13_0_0_get_fan_speed_rpm, 2660 .set_fan_speed_pwm = smu_v13_0_set_fan_speed_pwm, 2661 .set_fan_speed_rpm = smu_v13_0_set_fan_speed_rpm, 2662 .get_fan_control_mode = smu_v13_0_get_fan_control_mode, 2663 .set_fan_control_mode = smu_v13_0_set_fan_control_mode, 2664 .enable_mgpu_fan_boost = smu_v13_0_0_enable_mgpu_fan_boost, 2665 .get_power_limit = smu_v13_0_0_get_power_limit, 2666 .set_power_limit = smu_v13_0_set_power_limit, 2667 .set_power_source = smu_v13_0_set_power_source, 2668 .get_power_profile_mode = smu_v13_0_0_get_power_profile_mode, 2669 .set_power_profile_mode = smu_v13_0_0_set_power_profile_mode, 2670 .run_btc = smu_v13_0_run_btc, 2671 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 2672 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, 2673 .set_tool_table_location = smu_v13_0_set_tool_table_location, 2674 .deep_sleep_control = smu_v13_0_deep_sleep_control, 2675 .gfx_ulv_control = smu_v13_0_gfx_ulv_control, 2676 .baco_is_support = smu_v13_0_baco_is_support, 2677 .baco_get_state = smu_v13_0_baco_get_state, 2678 .baco_set_state = smu_v13_0_baco_set_state, 2679 .baco_enter = smu_v13_0_0_baco_enter, 2680 .baco_exit = smu_v13_0_0_baco_exit, 2681 .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, 2682 .mode1_reset = smu_v13_0_0_mode1_reset, 2683 .mode2_reset = smu_v13_0_0_mode2_reset, 2684 .enable_gfx_features = smu_v13_0_0_enable_gfx_features, 2685 .set_mp1_state = smu_v13_0_0_set_mp1_state, 2686 .set_df_cstate = smu_v13_0_0_set_df_cstate, 2687 .send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num, 2688 .send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag, 2689 .gpo_control = smu_v13_0_gpo_control, 2690 .get_ecc_info = smu_v13_0_0_get_ecc_info, 2691 .notify_display_change = smu_v13_0_notify_display_change, 2692 }; 2693 2694 void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) 2695 { 2696 smu->ppt_funcs = &smu_v13_0_0_ppt_funcs; 2697 smu->message_map = smu_v13_0_0_message_map; 2698 smu->clock_map = smu_v13_0_0_clk_map; 2699 smu->feature_map = smu_v13_0_0_feature_mask_map; 2700 smu->table_map = smu_v13_0_0_table_map; 2701 smu->pwr_src_map = smu_v13_0_0_pwr_src_map; 2702 smu->workload_map = smu_v13_0_0_workload_map; 2703 smu->smc_driver_if_version = SMU13_0_0_DRIVER_IF_VERSION; 2704 smu_v13_0_0_set_smu_mailbox_registers(smu); 2705 } 2706