1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include <linux/firmware.h> 27 #include <linux/pci.h> 28 #include <linux/i2c.h> 29 #include "amdgpu.h" 30 #include "amdgpu_smu.h" 31 #include "atomfirmware.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "amdgpu_atombios.h" 34 #include "smu_v13_0.h" 35 #include "smu13_driver_if_v13_0_0.h" 36 #include "soc15_common.h" 37 #include "atom.h" 38 #include "smu_v13_0_0_ppt.h" 39 #include "smu_v13_0_0_pptable.h" 40 #include "smu_v13_0_0_ppsmc.h" 41 #include "nbio/nbio_4_3_0_offset.h" 42 #include "nbio/nbio_4_3_0_sh_mask.h" 43 #include "mp/mp_13_0_0_offset.h" 44 #include "mp/mp_13_0_0_sh_mask.h" 45 46 #include "asic_reg/mp/mp_13_0_0_sh_mask.h" 47 #include "smu_cmn.h" 48 #include "amdgpu_ras.h" 49 50 /* 51 * DO NOT use these for err/warn/info/debug messages. 52 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 53 * They are more MGPU friendly. 54 */ 55 #undef pr_err 56 #undef pr_warn 57 #undef pr_info 58 #undef pr_debug 59 60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) 61 62 #define FEATURE_MASK(feature) (1ULL << feature) 63 #define SMC_DPM_FEATURE ( \ 64 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 65 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 66 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ 67 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 68 FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ 69 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)) 70 71 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 72 73 static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = { 74 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 75 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 76 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 77 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), 78 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), 79 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), 80 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), 81 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), 82 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), 83 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), 84 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), 85 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), 86 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), 87 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), 88 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), 89 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 90 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 91 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), 92 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), 93 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 94 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 95 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), 96 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), 97 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), 98 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), 99 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), 100 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), 101 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), 102 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), 103 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), 104 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), 105 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), 106 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 107 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 108 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 109 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 110 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), 111 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), 112 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), 113 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), 114 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), 115 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 116 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 117 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), 118 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 119 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), 120 MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), 121 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), 122 }; 123 124 static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = { 125 CLK_MAP(GFXCLK, PPCLK_GFXCLK), 126 CLK_MAP(SCLK, PPCLK_GFXCLK), 127 CLK_MAP(SOCCLK, PPCLK_SOCCLK), 128 CLK_MAP(FCLK, PPCLK_FCLK), 129 CLK_MAP(UCLK, PPCLK_UCLK), 130 CLK_MAP(MCLK, PPCLK_UCLK), 131 CLK_MAP(VCLK, PPCLK_VCLK_0), 132 CLK_MAP(VCLK1, PPCLK_VCLK_1), 133 CLK_MAP(DCLK, PPCLK_DCLK_0), 134 CLK_MAP(DCLK1, PPCLK_DCLK_1), 135 }; 136 137 static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] = { 138 FEA_MAP(FW_DATA_READ), 139 FEA_MAP(DPM_GFXCLK), 140 FEA_MAP(DPM_GFX_POWER_OPTIMIZER), 141 FEA_MAP(DPM_UCLK), 142 FEA_MAP(DPM_FCLK), 143 FEA_MAP(DPM_SOCCLK), 144 FEA_MAP(DPM_MP0CLK), 145 FEA_MAP(DPM_LINK), 146 FEA_MAP(DPM_DCN), 147 FEA_MAP(VMEMP_SCALING), 148 FEA_MAP(VDDIO_MEM_SCALING), 149 FEA_MAP(DS_GFXCLK), 150 FEA_MAP(DS_SOCCLK), 151 FEA_MAP(DS_FCLK), 152 FEA_MAP(DS_LCLK), 153 FEA_MAP(DS_DCFCLK), 154 FEA_MAP(DS_UCLK), 155 FEA_MAP(GFX_ULV), 156 FEA_MAP(FW_DSTATE), 157 FEA_MAP(GFXOFF), 158 FEA_MAP(BACO), 159 FEA_MAP(MM_DPM), 160 FEA_MAP(SOC_MPCLK_DS), 161 FEA_MAP(BACO_MPCLK_DS), 162 FEA_MAP(THROTTLERS), 163 FEA_MAP(SMARTSHIFT), 164 FEA_MAP(GTHR), 165 FEA_MAP(ACDC), 166 FEA_MAP(VR0HOT), 167 FEA_MAP(FW_CTF), 168 FEA_MAP(FAN_CONTROL), 169 FEA_MAP(GFX_DCS), 170 FEA_MAP(GFX_READ_MARGIN), 171 FEA_MAP(LED_DISPLAY), 172 FEA_MAP(GFXCLK_SPREAD_SPECTRUM), 173 FEA_MAP(OUT_OF_BAND_MONITOR), 174 FEA_MAP(OPTIMIZED_VMIN), 175 FEA_MAP(GFX_IMU), 176 FEA_MAP(BOOT_TIME_CAL), 177 FEA_MAP(GFX_PCC_DFLL), 178 FEA_MAP(SOC_CG), 179 FEA_MAP(DF_CSTATE), 180 FEA_MAP(GFX_EDC), 181 FEA_MAP(BOOT_POWER_OPT), 182 FEA_MAP(CLOCK_POWER_DOWN_BYPASS), 183 FEA_MAP(DS_VCN), 184 FEA_MAP(BACO_CG), 185 FEA_MAP(MEM_TEMP_READ), 186 FEA_MAP(ATHUB_MMHUB_PG), 187 FEA_MAP(SOC_PCC), 188 }; 189 190 static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = { 191 TAB_MAP(PPTABLE), 192 TAB_MAP(WATERMARKS), 193 TAB_MAP(AVFS_PSM_DEBUG), 194 TAB_MAP(PMSTATUSLOG), 195 TAB_MAP(SMU_METRICS), 196 TAB_MAP(DRIVER_SMU_CONFIG), 197 TAB_MAP(ACTIVITY_MONITOR_COEFF), 198 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, 199 TAB_MAP(I2C_COMMANDS), 200 }; 201 202 static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { 203 PWR_MAP(AC), 204 PWR_MAP(DC), 205 }; 206 207 static struct cmn2asic_mapping smu_v13_0_0_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 208 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), 209 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 210 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), 211 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 212 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 213 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 214 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 215 }; 216 217 static const uint8_t smu_v13_0_0_throttler_map[] = { 218 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), 219 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), 220 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), 221 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), 222 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), 223 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), 224 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), 225 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), 226 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), 227 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), 228 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), 229 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), 230 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), 231 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), 232 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), 233 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), 234 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), 235 }; 236 237 static int 238 smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu, 239 uint32_t *feature_mask, uint32_t num) 240 { 241 struct amdgpu_device *adev = smu->adev; 242 243 if (num > 2) 244 return -EINVAL; 245 246 memset(feature_mask, 0, sizeof(uint32_t) * num); 247 248 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT); 249 250 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) { 251 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); 252 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT); 253 } 254 255 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT); 256 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT); 257 258 if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) && 259 (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) 260 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); 261 262 if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK) 263 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); 264 265 #if 0 266 if (adev->pm.pp_feature & PP_GFXOFF_MASK) 267 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT); 268 #endif 269 270 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT); 271 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT); 272 273 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT); 274 275 if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) { 276 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT); 277 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); 278 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); 279 } 280 281 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT); 282 283 if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) 284 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); 285 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT); 286 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT); 287 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT); 288 289 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT); 290 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT); 291 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_DCFCLK_BIT); 292 293 if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) { 294 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT); 295 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT); 296 } 297 298 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT); 299 300 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT); 301 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT); 302 303 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT); 304 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT); 305 306 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT); 307 308 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT); 309 310 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_UCLK_BIT); 311 312 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT); 313 314 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT); 315 316 return 0; 317 } 318 319 static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) 320 { 321 struct smu_table_context *table_context = &smu->smu_table; 322 struct smu_13_0_0_powerplay_table *powerplay_table = 323 table_context->power_play_table; 324 struct smu_baco_context *smu_baco = &smu->smu_baco; 325 326 if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC) 327 smu->dc_controlled_by_gpio = true; 328 329 if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO || 330 powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) 331 smu_baco->platform_support = true; 332 333 if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) 334 smu_baco->maco_support = true; 335 336 table_context->thermal_controller_type = 337 powerplay_table->thermal_controller_type; 338 339 /* 340 * Instead of having its own buffer space and get overdrive_table copied, 341 * smu->od_settings just points to the actual overdrive_table 342 */ 343 smu->od_settings = &powerplay_table->overdrive_table; 344 345 return 0; 346 } 347 348 static int smu_v13_0_0_store_powerplay_table(struct smu_context *smu) 349 { 350 struct smu_table_context *table_context = &smu->smu_table; 351 struct smu_13_0_0_powerplay_table *powerplay_table = 352 table_context->power_play_table; 353 354 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, 355 sizeof(PPTable_t)); 356 357 return 0; 358 } 359 360 #ifndef atom_smc_dpm_info_table_13_0_0 361 struct atom_smc_dpm_info_table_13_0_0 { 362 struct atom_common_table_header table_header; 363 BoardTable_t BoardTable; 364 }; 365 #endif 366 367 static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu) 368 { 369 struct smu_table_context *table_context = &smu->smu_table; 370 PPTable_t *smc_pptable = table_context->driver_pptable; 371 struct atom_smc_dpm_info_table_13_0_0 *smc_dpm_table; 372 BoardTable_t *BoardTable = &smc_pptable->BoardTable; 373 int index, ret; 374 375 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 376 smc_dpm_info); 377 378 ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, 379 (uint8_t **)&smc_dpm_table); 380 if (ret) 381 return ret; 382 383 memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); 384 385 return 0; 386 } 387 388 static int smu_v13_0_0_setup_pptable(struct smu_context *smu) 389 { 390 struct smu_table_context *smu_table = &smu->smu_table; 391 void *combo_pptable = smu_table->combo_pptable; 392 struct amdgpu_device *adev = smu->adev; 393 int ret = 0; 394 395 /* 396 * With SCPM enabled, the pptable used will be signed. It cannot 397 * be used directly by driver. To get the raw pptable, we need to 398 * rely on the combo pptable(and its revelant SMU message). 399 */ 400 if (adev->scpm_enabled) { 401 ret = smu_cmn_get_combo_pptable(smu); 402 if (ret) 403 return ret; 404 405 smu->smu_table.power_play_table = combo_pptable; 406 smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_0_powerplay_table); 407 } else { 408 ret = smu_v13_0_setup_pptable(smu); 409 if (ret) 410 return ret; 411 } 412 413 ret = smu_v13_0_0_store_powerplay_table(smu); 414 if (ret) 415 return ret; 416 417 /* 418 * With SCPM enabled, the operation below will be handled 419 * by PSP. Driver involvment is unnecessary and useless. 420 */ 421 if (!adev->scpm_enabled) { 422 ret = smu_v13_0_0_append_powerplay_table(smu); 423 if (ret) 424 return ret; 425 } 426 427 ret = smu_v13_0_0_check_powerplay_table(smu); 428 if (ret) 429 return ret; 430 431 return ret; 432 } 433 434 static int smu_v13_0_0_tables_init(struct smu_context *smu) 435 { 436 struct smu_table_context *smu_table = &smu->smu_table; 437 struct smu_table *tables = smu_table->tables; 438 439 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), 440 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 441 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 442 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 443 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), 444 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 445 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), 446 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 447 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), 448 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 449 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, 450 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 451 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, 452 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, 453 AMDGPU_GEM_DOMAIN_VRAM); 454 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, 455 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 456 457 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); 458 if (!smu_table->metrics_table) 459 goto err0_out; 460 smu_table->metrics_time = 0; 461 462 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); 463 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 464 if (!smu_table->gpu_metrics_table) 465 goto err1_out; 466 467 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 468 if (!smu_table->watermarks_table) 469 goto err2_out; 470 471 return 0; 472 473 err2_out: 474 kfree(smu_table->gpu_metrics_table); 475 err1_out: 476 kfree(smu_table->metrics_table); 477 err0_out: 478 return -ENOMEM; 479 } 480 481 static int smu_v13_0_0_allocate_dpm_context(struct smu_context *smu) 482 { 483 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 484 485 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), 486 GFP_KERNEL); 487 if (!smu_dpm->dpm_context) 488 return -ENOMEM; 489 490 smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); 491 492 return 0; 493 } 494 495 static int smu_v13_0_0_init_smc_tables(struct smu_context *smu) 496 { 497 int ret = 0; 498 499 ret = smu_v13_0_0_tables_init(smu); 500 if (ret) 501 return ret; 502 503 ret = smu_v13_0_0_allocate_dpm_context(smu); 504 if (ret) 505 return ret; 506 507 return smu_v13_0_init_smc_tables(smu); 508 } 509 510 static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) 511 { 512 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 513 struct smu_table_context *table_context = &smu->smu_table; 514 PPTable_t *pptable = table_context->driver_pptable; 515 SkuTable_t *skutable = &pptable->SkuTable; 516 struct smu_13_0_dpm_table *dpm_table; 517 struct smu_13_0_pcie_table *pcie_table; 518 uint32_t link_level; 519 int ret = 0; 520 521 /* socclk dpm table setup */ 522 dpm_table = &dpm_context->dpm_tables.soc_table; 523 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 524 ret = smu_v13_0_set_single_dpm_table(smu, 525 SMU_SOCCLK, 526 dpm_table); 527 if (ret) 528 return ret; 529 } else { 530 dpm_table->count = 1; 531 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; 532 dpm_table->dpm_levels[0].enabled = true; 533 dpm_table->min = dpm_table->dpm_levels[0].value; 534 dpm_table->max = dpm_table->dpm_levels[0].value; 535 } 536 537 /* gfxclk dpm table setup */ 538 dpm_table = &dpm_context->dpm_tables.gfx_table; 539 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { 540 ret = smu_v13_0_set_single_dpm_table(smu, 541 SMU_GFXCLK, 542 dpm_table); 543 if (ret) 544 return ret; 545 } else { 546 dpm_table->count = 1; 547 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; 548 dpm_table->dpm_levels[0].enabled = true; 549 dpm_table->min = dpm_table->dpm_levels[0].value; 550 dpm_table->max = dpm_table->dpm_levels[0].value; 551 } 552 553 /* uclk dpm table setup */ 554 dpm_table = &dpm_context->dpm_tables.uclk_table; 555 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 556 ret = smu_v13_0_set_single_dpm_table(smu, 557 SMU_UCLK, 558 dpm_table); 559 if (ret) 560 return ret; 561 } else { 562 dpm_table->count = 1; 563 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; 564 dpm_table->dpm_levels[0].enabled = true; 565 dpm_table->min = dpm_table->dpm_levels[0].value; 566 dpm_table->max = dpm_table->dpm_levels[0].value; 567 } 568 569 /* fclk dpm table setup */ 570 dpm_table = &dpm_context->dpm_tables.fclk_table; 571 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { 572 ret = smu_v13_0_set_single_dpm_table(smu, 573 SMU_FCLK, 574 dpm_table); 575 if (ret) 576 return ret; 577 } else { 578 dpm_table->count = 1; 579 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; 580 dpm_table->dpm_levels[0].enabled = true; 581 dpm_table->min = dpm_table->dpm_levels[0].value; 582 dpm_table->max = dpm_table->dpm_levels[0].value; 583 } 584 585 /* vclk dpm table setup */ 586 dpm_table = &dpm_context->dpm_tables.vclk_table; 587 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) { 588 ret = smu_v13_0_set_single_dpm_table(smu, 589 SMU_VCLK, 590 dpm_table); 591 if (ret) 592 return ret; 593 } else { 594 dpm_table->count = 1; 595 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; 596 dpm_table->dpm_levels[0].enabled = true; 597 dpm_table->min = dpm_table->dpm_levels[0].value; 598 dpm_table->max = dpm_table->dpm_levels[0].value; 599 } 600 601 /* dclk dpm table setup */ 602 dpm_table = &dpm_context->dpm_tables.dclk_table; 603 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) { 604 ret = smu_v13_0_set_single_dpm_table(smu, 605 SMU_DCLK, 606 dpm_table); 607 if (ret) 608 return ret; 609 } else { 610 dpm_table->count = 1; 611 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; 612 dpm_table->dpm_levels[0].enabled = true; 613 dpm_table->min = dpm_table->dpm_levels[0].value; 614 dpm_table->max = dpm_table->dpm_levels[0].value; 615 } 616 617 /* lclk dpm table setup */ 618 pcie_table = &dpm_context->dpm_tables.pcie_table; 619 pcie_table->num_of_link_levels = 0; 620 for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { 621 if (!skutable->PcieGenSpeed[link_level] && 622 !skutable->PcieLaneCount[link_level] && 623 !skutable->LclkFreq[link_level]) 624 continue; 625 626 pcie_table->pcie_gen[pcie_table->num_of_link_levels] = 627 skutable->PcieGenSpeed[link_level]; 628 pcie_table->pcie_lane[pcie_table->num_of_link_levels] = 629 skutable->PcieLaneCount[link_level]; 630 pcie_table->clk_freq[pcie_table->num_of_link_levels] = 631 skutable->LclkFreq[link_level]; 632 pcie_table->num_of_link_levels++; 633 } 634 635 return 0; 636 } 637 638 static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu) 639 { 640 int ret = 0; 641 uint64_t feature_enabled; 642 643 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 644 if (ret) 645 return false; 646 647 return !!(feature_enabled & SMC_DPM_FEATURE); 648 } 649 650 static void smu_v13_0_0_dump_pptable(struct smu_context *smu) 651 { 652 struct smu_table_context *table_context = &smu->smu_table; 653 PPTable_t *pptable = table_context->driver_pptable; 654 SkuTable_t *skutable = &pptable->SkuTable; 655 656 dev_info(smu->adev->dev, "Dumped PPTable:\n"); 657 658 dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); 659 dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); 660 dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); 661 } 662 663 static int smu_v13_0_0_system_features_control(struct smu_context *smu, 664 bool en) 665 { 666 return smu_v13_0_system_features_control(smu, en); 667 } 668 669 static uint32_t smu_v13_0_get_throttler_status(SmuMetrics_t *metrics) 670 { 671 uint32_t throttler_status = 0; 672 int i; 673 674 for (i = 0; i < THROTTLER_COUNT; i++) 675 throttler_status |= 676 (metrics->ThrottlingPercentage[i] ? 1U << i : 0); 677 678 return throttler_status; 679 } 680 681 #define SMU_13_0_0_BUSY_THRESHOLD 15 682 static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu, 683 MetricsMember_t member, 684 uint32_t *value) 685 { 686 struct smu_table_context *smu_table = &smu->smu_table; 687 SmuMetrics_t *metrics = 688 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 689 int ret = 0; 690 691 ret = smu_cmn_get_metrics_table(smu, 692 NULL, 693 false); 694 if (ret) 695 return ret; 696 697 switch (member) { 698 case METRICS_CURR_GFXCLK: 699 *value = metrics->CurrClock[PPCLK_GFXCLK]; 700 break; 701 case METRICS_CURR_SOCCLK: 702 *value = metrics->CurrClock[PPCLK_SOCCLK]; 703 break; 704 case METRICS_CURR_UCLK: 705 *value = metrics->CurrClock[PPCLK_UCLK]; 706 break; 707 case METRICS_CURR_VCLK: 708 *value = metrics->CurrClock[PPCLK_VCLK_0]; 709 break; 710 case METRICS_CURR_VCLK1: 711 *value = metrics->CurrClock[PPCLK_VCLK_1]; 712 break; 713 case METRICS_CURR_DCLK: 714 *value = metrics->CurrClock[PPCLK_DCLK_0]; 715 break; 716 case METRICS_CURR_DCLK1: 717 *value = metrics->CurrClock[PPCLK_DCLK_1]; 718 break; 719 case METRICS_CURR_FCLK: 720 *value = metrics->CurrClock[PPCLK_FCLK]; 721 break; 722 case METRICS_AVERAGE_GFXCLK: 723 if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD) 724 *value = metrics->AverageGfxclkFrequencyPostDs; 725 else 726 *value = metrics->AverageGfxclkFrequencyPreDs; 727 break; 728 case METRICS_AVERAGE_FCLK: 729 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD) 730 *value = metrics->AverageFclkFrequencyPostDs; 731 else 732 *value = metrics->AverageFclkFrequencyPreDs; 733 break; 734 case METRICS_AVERAGE_UCLK: 735 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD) 736 *value = metrics->AverageMemclkFrequencyPostDs; 737 else 738 *value = metrics->AverageMemclkFrequencyPreDs; 739 break; 740 case METRICS_AVERAGE_VCLK: 741 *value = metrics->AverageVclk0Frequency; 742 break; 743 case METRICS_AVERAGE_DCLK: 744 *value = metrics->AverageDclk0Frequency; 745 break; 746 case METRICS_AVERAGE_VCLK1: 747 *value = metrics->AverageVclk1Frequency; 748 break; 749 case METRICS_AVERAGE_DCLK1: 750 *value = metrics->AverageDclk1Frequency; 751 break; 752 case METRICS_AVERAGE_GFXACTIVITY: 753 *value = metrics->AverageGfxActivity; 754 break; 755 case METRICS_AVERAGE_MEMACTIVITY: 756 *value = metrics->AverageUclkActivity; 757 break; 758 case METRICS_AVERAGE_SOCKETPOWER: 759 *value = metrics->AverageSocketPower << 8; 760 break; 761 case METRICS_TEMPERATURE_EDGE: 762 *value = metrics->AvgTemperature[TEMP_EDGE] * 763 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 764 break; 765 case METRICS_TEMPERATURE_HOTSPOT: 766 *value = metrics->AvgTemperature[TEMP_HOTSPOT] * 767 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 768 break; 769 case METRICS_TEMPERATURE_MEM: 770 *value = metrics->AvgTemperature[TEMP_MEM] * 771 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 772 break; 773 case METRICS_TEMPERATURE_VRGFX: 774 *value = metrics->AvgTemperature[TEMP_VR_GFX] * 775 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 776 break; 777 case METRICS_TEMPERATURE_VRSOC: 778 *value = metrics->AvgTemperature[TEMP_VR_SOC] * 779 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 780 break; 781 case METRICS_THROTTLER_STATUS: 782 *value = smu_v13_0_get_throttler_status(metrics); 783 break; 784 case METRICS_CURR_FANSPEED: 785 *value = metrics->AvgFanRpm; 786 break; 787 case METRICS_CURR_FANPWM: 788 *value = metrics->AvgFanPwm; 789 break; 790 case METRICS_VOLTAGE_VDDGFX: 791 *value = metrics->AvgVoltage[SVI_PLANE_GFX]; 792 break; 793 case METRICS_PCIE_RATE: 794 *value = metrics->PcieRate; 795 break; 796 case METRICS_PCIE_WIDTH: 797 *value = metrics->PcieWidth; 798 break; 799 default: 800 *value = UINT_MAX; 801 break; 802 } 803 804 return ret; 805 } 806 807 static int smu_v13_0_0_read_sensor(struct smu_context *smu, 808 enum amd_pp_sensors sensor, 809 void *data, 810 uint32_t *size) 811 { 812 struct smu_table_context *table_context = &smu->smu_table; 813 PPTable_t *smc_pptable = table_context->driver_pptable; 814 int ret = 0; 815 816 switch (sensor) { 817 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 818 *(uint16_t *)data = smc_pptable->SkuTable.FanMaximumRpm; 819 *size = 4; 820 break; 821 case AMDGPU_PP_SENSOR_MEM_LOAD: 822 ret = smu_v13_0_0_get_smu_metrics_data(smu, 823 METRICS_AVERAGE_MEMACTIVITY, 824 (uint32_t *)data); 825 *size = 4; 826 break; 827 case AMDGPU_PP_SENSOR_GPU_LOAD: 828 ret = smu_v13_0_0_get_smu_metrics_data(smu, 829 METRICS_AVERAGE_GFXACTIVITY, 830 (uint32_t *)data); 831 *size = 4; 832 break; 833 case AMDGPU_PP_SENSOR_GPU_POWER: 834 ret = smu_v13_0_0_get_smu_metrics_data(smu, 835 METRICS_AVERAGE_SOCKETPOWER, 836 (uint32_t *)data); 837 *size = 4; 838 break; 839 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 840 ret = smu_v13_0_0_get_smu_metrics_data(smu, 841 METRICS_TEMPERATURE_HOTSPOT, 842 (uint32_t *)data); 843 *size = 4; 844 break; 845 case AMDGPU_PP_SENSOR_EDGE_TEMP: 846 ret = smu_v13_0_0_get_smu_metrics_data(smu, 847 METRICS_TEMPERATURE_EDGE, 848 (uint32_t *)data); 849 *size = 4; 850 break; 851 case AMDGPU_PP_SENSOR_MEM_TEMP: 852 ret = smu_v13_0_0_get_smu_metrics_data(smu, 853 METRICS_TEMPERATURE_MEM, 854 (uint32_t *)data); 855 *size = 4; 856 break; 857 case AMDGPU_PP_SENSOR_GFX_MCLK: 858 ret = smu_v13_0_0_get_smu_metrics_data(smu, 859 METRICS_CURR_UCLK, 860 (uint32_t *)data); 861 *(uint32_t *)data *= 100; 862 *size = 4; 863 break; 864 case AMDGPU_PP_SENSOR_GFX_SCLK: 865 ret = smu_v13_0_0_get_smu_metrics_data(smu, 866 METRICS_AVERAGE_GFXCLK, 867 (uint32_t *)data); 868 *(uint32_t *)data *= 100; 869 *size = 4; 870 break; 871 case AMDGPU_PP_SENSOR_VDDGFX: 872 ret = smu_v13_0_0_get_smu_metrics_data(smu, 873 METRICS_VOLTAGE_VDDGFX, 874 (uint32_t *)data); 875 *size = 4; 876 break; 877 default: 878 ret = -EOPNOTSUPP; 879 break; 880 } 881 882 return ret; 883 } 884 885 static int smu_v13_0_0_get_current_clk_freq_by_table(struct smu_context *smu, 886 enum smu_clk_type clk_type, 887 uint32_t *value) 888 { 889 MetricsMember_t member_type; 890 int clk_id = 0; 891 892 clk_id = smu_cmn_to_asic_specific_index(smu, 893 CMN2ASIC_MAPPING_CLK, 894 clk_type); 895 if (clk_id < 0) 896 return -EINVAL; 897 898 switch (clk_id) { 899 case PPCLK_GFXCLK: 900 member_type = METRICS_AVERAGE_GFXCLK; 901 break; 902 case PPCLK_UCLK: 903 member_type = METRICS_CURR_UCLK; 904 break; 905 case PPCLK_FCLK: 906 member_type = METRICS_CURR_FCLK; 907 break; 908 case PPCLK_SOCCLK: 909 member_type = METRICS_CURR_SOCCLK; 910 break; 911 case PPCLK_VCLK_0: 912 member_type = METRICS_AVERAGE_VCLK; 913 break; 914 case PPCLK_DCLK_0: 915 member_type = METRICS_AVERAGE_DCLK; 916 break; 917 case PPCLK_VCLK_1: 918 member_type = METRICS_AVERAGE_VCLK1; 919 break; 920 case PPCLK_DCLK_1: 921 member_type = METRICS_AVERAGE_DCLK1; 922 break; 923 default: 924 return -EINVAL; 925 } 926 927 return smu_v13_0_0_get_smu_metrics_data(smu, 928 member_type, 929 value); 930 } 931 932 static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, 933 enum smu_clk_type clk_type, 934 char *buf) 935 { 936 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 937 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 938 struct smu_13_0_dpm_table *single_dpm_table; 939 struct smu_13_0_pcie_table *pcie_table; 940 const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; 941 uint32_t gen_speed, lane_width; 942 int i, curr_freq, size = 0; 943 int ret = 0; 944 945 smu_cmn_get_sysfs_buf(&buf, &size); 946 947 if (amdgpu_ras_intr_triggered()) { 948 size += sysfs_emit_at(buf, size, "unavailable\n"); 949 return size; 950 } 951 952 switch (clk_type) { 953 case SMU_SCLK: 954 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 955 break; 956 case SMU_MCLK: 957 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 958 break; 959 case SMU_SOCCLK: 960 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 961 break; 962 case SMU_FCLK: 963 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 964 break; 965 case SMU_VCLK: 966 case SMU_VCLK1: 967 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 968 break; 969 case SMU_DCLK: 970 case SMU_DCLK1: 971 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 972 break; 973 default: 974 break; 975 } 976 977 switch (clk_type) { 978 case SMU_SCLK: 979 case SMU_MCLK: 980 case SMU_SOCCLK: 981 case SMU_FCLK: 982 case SMU_VCLK: 983 case SMU_VCLK1: 984 case SMU_DCLK: 985 case SMU_DCLK1: 986 ret = smu_v13_0_0_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); 987 if (ret) { 988 dev_err(smu->adev->dev, "Failed to get current clock freq!"); 989 return ret; 990 } 991 992 if (single_dpm_table->is_fine_grained) { 993 /* 994 * For fine grained dpms, there are only two dpm levels: 995 * - level 0 -> min clock freq 996 * - level 1 -> max clock freq 997 * And the current clock frequency can be any value between them. 998 * So, if the current clock frequency is not at level 0 or level 1, 999 * we will fake it as three dpm levels: 1000 * - level 0 -> min clock freq 1001 * - level 1 -> current actual clock freq 1002 * - level 2 -> max clock freq 1003 */ 1004 if ((single_dpm_table->dpm_levels[0].value != curr_freq) && 1005 (single_dpm_table->dpm_levels[1].value != curr_freq)) { 1006 size += sysfs_emit_at(buf, size, "0: %uMhz\n", 1007 single_dpm_table->dpm_levels[0].value); 1008 size += sysfs_emit_at(buf, size, "1: %uMhz *\n", 1009 curr_freq); 1010 size += sysfs_emit_at(buf, size, "2: %uMhz\n", 1011 single_dpm_table->dpm_levels[1].value); 1012 } else { 1013 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", 1014 single_dpm_table->dpm_levels[0].value, 1015 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : ""); 1016 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 1017 single_dpm_table->dpm_levels[1].value, 1018 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : ""); 1019 } 1020 } else { 1021 for (i = 0; i < single_dpm_table->count; i++) 1022 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", 1023 i, single_dpm_table->dpm_levels[i].value, 1024 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : ""); 1025 } 1026 break; 1027 case SMU_PCIE: 1028 ret = smu_v13_0_0_get_smu_metrics_data(smu, 1029 METRICS_PCIE_RATE, 1030 &gen_speed); 1031 if (ret) 1032 return ret; 1033 1034 ret = smu_v13_0_0_get_smu_metrics_data(smu, 1035 METRICS_PCIE_WIDTH, 1036 &lane_width); 1037 if (ret) 1038 return ret; 1039 1040 pcie_table = &(dpm_context->dpm_tables.pcie_table); 1041 for (i = 0; i < pcie_table->num_of_link_levels; i++) 1042 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, 1043 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," : 1044 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," : 1045 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," : 1046 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "", 1047 (pcie_table->pcie_lane[i] == 1) ? "x1" : 1048 (pcie_table->pcie_lane[i] == 2) ? "x2" : 1049 (pcie_table->pcie_lane[i] == 3) ? "x4" : 1050 (pcie_table->pcie_lane[i] == 4) ? "x8" : 1051 (pcie_table->pcie_lane[i] == 5) ? "x12" : 1052 (pcie_table->pcie_lane[i] == 6) ? "x16" : "", 1053 pcie_table->clk_freq[i], 1054 ((gen_speed - 1) == pcie_table->pcie_gen[i]) && 1055 (lane_width == link_width[pcie_table->pcie_lane[i]]) ? 1056 "*" : ""); 1057 break; 1058 1059 default: 1060 break; 1061 } 1062 1063 return size; 1064 } 1065 1066 static int smu_v13_0_0_force_clk_levels(struct smu_context *smu, 1067 enum smu_clk_type clk_type, 1068 uint32_t mask) 1069 { 1070 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1071 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1072 struct smu_13_0_dpm_table *single_dpm_table; 1073 uint32_t soft_min_level, soft_max_level; 1074 uint32_t min_freq, max_freq; 1075 int ret = 0; 1076 1077 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1078 soft_max_level = mask ? (fls(mask) - 1) : 0; 1079 1080 switch (clk_type) { 1081 case SMU_GFXCLK: 1082 case SMU_SCLK: 1083 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1084 break; 1085 case SMU_MCLK: 1086 case SMU_UCLK: 1087 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1088 break; 1089 case SMU_SOCCLK: 1090 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1091 break; 1092 case SMU_FCLK: 1093 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1094 break; 1095 case SMU_VCLK: 1096 case SMU_VCLK1: 1097 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1098 break; 1099 case SMU_DCLK: 1100 case SMU_DCLK1: 1101 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1102 break; 1103 default: 1104 break; 1105 } 1106 1107 switch (clk_type) { 1108 case SMU_GFXCLK: 1109 case SMU_SCLK: 1110 case SMU_MCLK: 1111 case SMU_UCLK: 1112 case SMU_SOCCLK: 1113 case SMU_FCLK: 1114 case SMU_VCLK: 1115 case SMU_VCLK1: 1116 case SMU_DCLK: 1117 case SMU_DCLK1: 1118 if (single_dpm_table->is_fine_grained) { 1119 /* There is only 2 levels for fine grained DPM */ 1120 soft_max_level = (soft_max_level >= 1 ? 1 : 0); 1121 soft_min_level = (soft_min_level >= 1 ? 1 : 0); 1122 } else { 1123 if ((soft_max_level >= single_dpm_table->count) || 1124 (soft_min_level >= single_dpm_table->count)) 1125 return -EINVAL; 1126 } 1127 1128 min_freq = single_dpm_table->dpm_levels[soft_min_level].value; 1129 max_freq = single_dpm_table->dpm_levels[soft_max_level].value; 1130 1131 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1132 clk_type, 1133 min_freq, 1134 max_freq); 1135 break; 1136 case SMU_DCEFCLK: 1137 case SMU_PCIE: 1138 default: 1139 break; 1140 } 1141 1142 return ret; 1143 } 1144 1145 static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu, 1146 uint32_t pcie_gen_cap, 1147 uint32_t pcie_width_cap) 1148 { 1149 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 1150 struct smu_13_0_pcie_table *pcie_table = 1151 &dpm_context->dpm_tables.pcie_table; 1152 uint32_t smu_pcie_arg; 1153 int ret, i; 1154 1155 for (i = 0; i < pcie_table->num_of_link_levels; i++) { 1156 if (pcie_table->pcie_gen[i] > pcie_gen_cap) 1157 pcie_table->pcie_gen[i] = pcie_gen_cap; 1158 if (pcie_table->pcie_lane[i] > pcie_width_cap) 1159 pcie_table->pcie_lane[i] = pcie_width_cap; 1160 1161 smu_pcie_arg = i << 16; 1162 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; 1163 smu_pcie_arg |= pcie_table->pcie_lane[i]; 1164 1165 ret = smu_cmn_send_smc_msg_with_param(smu, 1166 SMU_MSG_OverridePcieParameters, 1167 smu_pcie_arg, 1168 NULL); 1169 if (ret) 1170 return ret; 1171 } 1172 1173 return 0; 1174 } 1175 1176 static const struct smu_temperature_range smu13_thermal_policy[] = { 1177 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, 1178 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, 1179 }; 1180 1181 static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu, 1182 struct smu_temperature_range *range) 1183 { 1184 struct smu_table_context *table_context = &smu->smu_table; 1185 struct smu_13_0_0_powerplay_table *powerplay_table = 1186 table_context->power_play_table; 1187 PPTable_t *pptable = smu->smu_table.driver_pptable; 1188 1189 if (!range) 1190 return -EINVAL; 1191 1192 memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range)); 1193 1194 range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] * 1195 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1196 range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) * 1197 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1198 range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] * 1199 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1200 range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) * 1201 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1202 range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] * 1203 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1204 range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)* 1205 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1206 range->software_shutdown_temp = powerplay_table->software_shutdown_temp; 1207 1208 return 0; 1209 } 1210 1211 #define MAX(a, b) ((a) > (b) ? (a) : (b)) 1212 static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu, 1213 void **table) 1214 { 1215 struct smu_table_context *smu_table = &smu->smu_table; 1216 struct gpu_metrics_v1_3 *gpu_metrics = 1217 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; 1218 SmuMetricsExternal_t metrics_ext; 1219 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics; 1220 int ret = 0; 1221 1222 ret = smu_cmn_get_metrics_table(smu, 1223 &metrics_ext, 1224 true); 1225 if (ret) 1226 return ret; 1227 1228 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); 1229 1230 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE]; 1231 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT]; 1232 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM]; 1233 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX]; 1234 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC]; 1235 gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0], 1236 metrics->AvgTemperature[TEMP_VR_MEM1]); 1237 1238 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; 1239 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; 1240 gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage, 1241 metrics->Vcn1ActivityPercentage); 1242 1243 gpu_metrics->average_socket_power = metrics->AverageSocketPower; 1244 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator; 1245 1246 if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD) 1247 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs; 1248 else 1249 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs; 1250 1251 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD) 1252 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs; 1253 else 1254 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs; 1255 1256 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency; 1257 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency; 1258 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; 1259 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; 1260 1261 gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; 1262 gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; 1263 gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; 1264 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; 1265 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; 1266 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1]; 1267 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1]; 1268 1269 gpu_metrics->throttle_status = 1270 smu_v13_0_get_throttler_status(metrics); 1271 gpu_metrics->indep_throttle_status = 1272 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, 1273 smu_v13_0_0_throttler_map); 1274 1275 gpu_metrics->current_fan_speed = metrics->AvgFanRpm; 1276 1277 gpu_metrics->pcie_link_width = metrics->PcieWidth; 1278 gpu_metrics->pcie_link_speed = metrics->PcieRate; 1279 1280 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1281 1282 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_GFX]; 1283 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_SOC]; 1284 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VMEMP]; 1285 1286 *table = (void *)gpu_metrics; 1287 1288 return sizeof(struct gpu_metrics_v1_3); 1289 } 1290 1291 static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu) 1292 { 1293 struct smu_13_0_dpm_context *dpm_context = 1294 smu->smu_dpm.dpm_context; 1295 struct smu_13_0_dpm_table *gfx_table = 1296 &dpm_context->dpm_tables.gfx_table; 1297 struct smu_13_0_dpm_table *mem_table = 1298 &dpm_context->dpm_tables.uclk_table; 1299 struct smu_13_0_dpm_table *soc_table = 1300 &dpm_context->dpm_tables.soc_table; 1301 struct smu_13_0_dpm_table *vclk_table = 1302 &dpm_context->dpm_tables.vclk_table; 1303 struct smu_13_0_dpm_table *dclk_table = 1304 &dpm_context->dpm_tables.dclk_table; 1305 struct smu_13_0_dpm_table *fclk_table = 1306 &dpm_context->dpm_tables.fclk_table; 1307 struct smu_umd_pstate_table *pstate_table = 1308 &smu->pstate_table; 1309 1310 pstate_table->gfxclk_pstate.min = gfx_table->min; 1311 pstate_table->gfxclk_pstate.peak = gfx_table->max; 1312 1313 pstate_table->uclk_pstate.min = mem_table->min; 1314 pstate_table->uclk_pstate.peak = mem_table->max; 1315 1316 pstate_table->socclk_pstate.min = soc_table->min; 1317 pstate_table->socclk_pstate.peak = soc_table->max; 1318 1319 pstate_table->vclk_pstate.min = vclk_table->min; 1320 pstate_table->vclk_pstate.peak = vclk_table->max; 1321 1322 pstate_table->dclk_pstate.min = dclk_table->min; 1323 pstate_table->dclk_pstate.peak = dclk_table->max; 1324 1325 pstate_table->fclk_pstate.min = fclk_table->min; 1326 pstate_table->fclk_pstate.peak = fclk_table->max; 1327 1328 /* 1329 * For now, just use the mininum clock frequency. 1330 * TODO: update them when the real pstate settings available 1331 */ 1332 pstate_table->gfxclk_pstate.standard = gfx_table->min; 1333 pstate_table->uclk_pstate.standard = mem_table->min; 1334 pstate_table->socclk_pstate.standard = soc_table->min; 1335 pstate_table->vclk_pstate.standard = vclk_table->min; 1336 pstate_table->dclk_pstate.standard = dclk_table->min; 1337 pstate_table->fclk_pstate.standard = fclk_table->min; 1338 1339 return 0; 1340 } 1341 1342 static void smu_v13_0_0_get_unique_id(struct smu_context *smu) 1343 { 1344 struct smu_table_context *smu_table = &smu->smu_table; 1345 SmuMetrics_t *metrics = 1346 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 1347 struct amdgpu_device *adev = smu->adev; 1348 uint32_t upper32 = 0, lower32 = 0; 1349 int ret; 1350 1351 ret = smu_cmn_get_metrics_table(smu, NULL, false); 1352 if (ret) 1353 goto out; 1354 1355 upper32 = metrics->PublicSerialNumberUpper; 1356 lower32 = metrics->PublicSerialNumberLower; 1357 1358 out: 1359 adev->unique_id = ((uint64_t)upper32 << 32) | lower32; 1360 if (adev->serial[0] == '\0') 1361 sprintf(adev->serial, "%016llx", adev->unique_id); 1362 } 1363 1364 static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu, 1365 uint32_t *speed) 1366 { 1367 if (!speed) 1368 return -EINVAL; 1369 1370 return smu_v13_0_0_get_smu_metrics_data(smu, 1371 METRICS_CURR_FANPWM, 1372 speed); 1373 } 1374 1375 static int smu_v13_0_0_get_fan_speed_rpm(struct smu_context *smu, 1376 uint32_t *speed) 1377 { 1378 if (!speed) 1379 return -EINVAL; 1380 1381 return smu_v13_0_0_get_smu_metrics_data(smu, 1382 METRICS_CURR_FANSPEED, 1383 speed); 1384 } 1385 1386 static int smu_v13_0_0_enable_mgpu_fan_boost(struct smu_context *smu) 1387 { 1388 struct smu_table_context *table_context = &smu->smu_table; 1389 PPTable_t *pptable = table_context->driver_pptable; 1390 SkuTable_t *skutable = &pptable->SkuTable; 1391 1392 /* 1393 * Skip the MGpuFanBoost setting for those ASICs 1394 * which do not support it 1395 */ 1396 if (skutable->MGpuAcousticLimitRpmThreshold == 0) 1397 return 0; 1398 1399 return smu_cmn_send_smc_msg_with_param(smu, 1400 SMU_MSG_SetMGpuFanBoostLimitRpm, 1401 0, 1402 NULL); 1403 } 1404 1405 static int smu_v13_0_0_get_power_limit(struct smu_context *smu, 1406 uint32_t *current_power_limit, 1407 uint32_t *default_power_limit, 1408 uint32_t *max_power_limit) 1409 { 1410 struct smu_table_context *table_context = &smu->smu_table; 1411 struct smu_13_0_0_powerplay_table *powerplay_table = 1412 (struct smu_13_0_0_powerplay_table *)table_context->power_play_table; 1413 PPTable_t *pptable = table_context->driver_pptable; 1414 SkuTable_t *skutable = &pptable->SkuTable; 1415 uint32_t power_limit, od_percent; 1416 1417 if (smu_v13_0_get_current_power_limit(smu, &power_limit)) 1418 power_limit = smu->adev->pm.ac_power ? 1419 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] : 1420 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0]; 1421 1422 if (current_power_limit) 1423 *current_power_limit = power_limit; 1424 if (default_power_limit) 1425 *default_power_limit = power_limit; 1426 1427 if (max_power_limit) { 1428 if (smu->od_enabled) { 1429 od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); 1430 1431 dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit); 1432 1433 power_limit *= (100 + od_percent); 1434 power_limit /= 100; 1435 } 1436 *max_power_limit = power_limit; 1437 } 1438 1439 return 0; 1440 } 1441 1442 static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu, 1443 char *buf) 1444 { 1445 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 1446 DpmActivityMonitorCoeffInt_t *activity_monitor = 1447 &(activity_monitor_external.DpmActivityMonitorCoeffInt); 1448 static const char *title[] = { 1449 "PROFILE_INDEX(NAME)", 1450 "CLOCK_TYPE(NAME)", 1451 "FPS", 1452 "MinActiveFreqType", 1453 "MinActiveFreq", 1454 "BoosterFreqType", 1455 "BoosterFreq", 1456 "PD_Data_limit_c", 1457 "PD_Data_error_coeff", 1458 "PD_Data_error_rate_coeff"}; 1459 int16_t workload_type = 0; 1460 uint32_t i, size = 0; 1461 int result = 0; 1462 1463 if (!buf) 1464 return -EINVAL; 1465 1466 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n", 1467 title[0], title[1], title[2], title[3], title[4], title[5], 1468 title[6], title[7], title[8], title[9]); 1469 1470 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { 1471 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1472 workload_type = smu_cmn_to_asic_specific_index(smu, 1473 CMN2ASIC_MAPPING_WORKLOAD, 1474 i); 1475 if (workload_type < 0) 1476 return -EINVAL; 1477 1478 result = smu_cmn_update_table(smu, 1479 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1480 workload_type, 1481 (void *)(&activity_monitor_external), 1482 false); 1483 if (result) { 1484 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1485 return result; 1486 } 1487 1488 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", 1489 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 1490 1491 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", 1492 " ", 1493 0, 1494 "GFXCLK", 1495 activity_monitor->Gfx_FPS, 1496 activity_monitor->Gfx_MinActiveFreqType, 1497 activity_monitor->Gfx_MinActiveFreq, 1498 activity_monitor->Gfx_BoosterFreqType, 1499 activity_monitor->Gfx_BoosterFreq, 1500 activity_monitor->Gfx_PD_Data_limit_c, 1501 activity_monitor->Gfx_PD_Data_error_coeff, 1502 activity_monitor->Gfx_PD_Data_error_rate_coeff); 1503 1504 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", 1505 " ", 1506 1, 1507 "FCLK", 1508 activity_monitor->Fclk_FPS, 1509 activity_monitor->Fclk_MinActiveFreqType, 1510 activity_monitor->Fclk_MinActiveFreq, 1511 activity_monitor->Fclk_BoosterFreqType, 1512 activity_monitor->Fclk_BoosterFreq, 1513 activity_monitor->Fclk_PD_Data_limit_c, 1514 activity_monitor->Fclk_PD_Data_error_coeff, 1515 activity_monitor->Fclk_PD_Data_error_rate_coeff); 1516 } 1517 1518 return size; 1519 } 1520 1521 static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, 1522 long *input, 1523 uint32_t size) 1524 { 1525 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 1526 DpmActivityMonitorCoeffInt_t *activity_monitor = 1527 &(activity_monitor_external.DpmActivityMonitorCoeffInt); 1528 int workload_type, ret = 0; 1529 1530 smu->power_profile_mode = input[size]; 1531 1532 if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { 1533 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); 1534 return -EINVAL; 1535 } 1536 1537 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1538 ret = smu_cmn_update_table(smu, 1539 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1540 WORKLOAD_PPLIB_CUSTOM_BIT, 1541 (void *)(&activity_monitor_external), 1542 false); 1543 if (ret) { 1544 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1545 return ret; 1546 } 1547 1548 switch (input[0]) { 1549 case 0: /* Gfxclk */ 1550 activity_monitor->Gfx_FPS = input[1]; 1551 activity_monitor->Gfx_MinActiveFreqType = input[2]; 1552 activity_monitor->Gfx_MinActiveFreq = input[3]; 1553 activity_monitor->Gfx_BoosterFreqType = input[4]; 1554 activity_monitor->Gfx_BoosterFreq = input[5]; 1555 activity_monitor->Gfx_PD_Data_limit_c = input[6]; 1556 activity_monitor->Gfx_PD_Data_error_coeff = input[7]; 1557 activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; 1558 break; 1559 case 1: /* Fclk */ 1560 activity_monitor->Fclk_FPS = input[1]; 1561 activity_monitor->Fclk_MinActiveFreqType = input[2]; 1562 activity_monitor->Fclk_MinActiveFreq = input[3]; 1563 activity_monitor->Fclk_BoosterFreqType = input[4]; 1564 activity_monitor->Fclk_BoosterFreq = input[5]; 1565 activity_monitor->Fclk_PD_Data_limit_c = input[6]; 1566 activity_monitor->Fclk_PD_Data_error_coeff = input[7]; 1567 activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; 1568 break; 1569 } 1570 1571 ret = smu_cmn_update_table(smu, 1572 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1573 WORKLOAD_PPLIB_CUSTOM_BIT, 1574 (void *)(&activity_monitor_external), 1575 true); 1576 if (ret) { 1577 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); 1578 return ret; 1579 } 1580 } 1581 1582 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1583 workload_type = smu_cmn_to_asic_specific_index(smu, 1584 CMN2ASIC_MAPPING_WORKLOAD, 1585 smu->power_profile_mode); 1586 if (workload_type < 0) 1587 return -EINVAL; 1588 1589 return smu_cmn_send_smc_msg_with_param(smu, 1590 SMU_MSG_SetWorkloadMask, 1591 1 << workload_type, 1592 NULL); 1593 } 1594 1595 static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) 1596 { 1597 struct amdgpu_device *adev = smu->adev; 1598 u32 smu_version; 1599 1600 /* SRIOV does not support SMU mode1 reset */ 1601 if (amdgpu_sriov_vf(adev)) 1602 return false; 1603 1604 /* PMFW support is available since 78.41 */ 1605 smu_cmn_get_smc_version(smu, NULL, &smu_version); 1606 if (smu_version < 0x004e2900) 1607 return false; 1608 1609 return true; 1610 } 1611 1612 static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap, 1613 struct i2c_msg *msg, int num_msgs) 1614 { 1615 struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); 1616 struct amdgpu_device *adev = smu_i2c->adev; 1617 struct smu_context *smu = adev->powerplay.pp_handle; 1618 struct smu_table_context *smu_table = &smu->smu_table; 1619 struct smu_table *table = &smu_table->driver_table; 1620 SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; 1621 int i, j, r, c; 1622 u16 dir; 1623 1624 if (!adev->pm.dpm_enabled) 1625 return -EBUSY; 1626 1627 req = kzalloc(sizeof(*req), GFP_KERNEL); 1628 if (!req) 1629 return -ENOMEM; 1630 1631 req->I2CcontrollerPort = smu_i2c->port; 1632 req->I2CSpeed = I2C_SPEED_FAST_400K; 1633 req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ 1634 dir = msg[0].flags & I2C_M_RD; 1635 1636 for (c = i = 0; i < num_msgs; i++) { 1637 for (j = 0; j < msg[i].len; j++, c++) { 1638 SwI2cCmd_t *cmd = &req->SwI2cCmds[c]; 1639 1640 if (!(msg[i].flags & I2C_M_RD)) { 1641 /* write */ 1642 cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK; 1643 cmd->ReadWriteData = msg[i].buf[j]; 1644 } 1645 1646 if ((dir ^ msg[i].flags) & I2C_M_RD) { 1647 /* The direction changes. 1648 */ 1649 dir = msg[i].flags & I2C_M_RD; 1650 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK; 1651 } 1652 1653 req->NumCmds++; 1654 1655 /* 1656 * Insert STOP if we are at the last byte of either last 1657 * message for the transaction or the client explicitly 1658 * requires a STOP at this particular message. 1659 */ 1660 if ((j == msg[i].len - 1) && 1661 ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) { 1662 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK; 1663 cmd->CmdConfig |= CMDCONFIG_STOP_MASK; 1664 } 1665 } 1666 } 1667 mutex_lock(&adev->pm.mutex); 1668 r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); 1669 mutex_unlock(&adev->pm.mutex); 1670 if (r) 1671 goto fail; 1672 1673 for (c = i = 0; i < num_msgs; i++) { 1674 if (!(msg[i].flags & I2C_M_RD)) { 1675 c += msg[i].len; 1676 continue; 1677 } 1678 for (j = 0; j < msg[i].len; j++, c++) { 1679 SwI2cCmd_t *cmd = &res->SwI2cCmds[c]; 1680 1681 msg[i].buf[j] = cmd->ReadWriteData; 1682 } 1683 } 1684 r = num_msgs; 1685 fail: 1686 kfree(req); 1687 return r; 1688 } 1689 1690 static u32 smu_v13_0_0_i2c_func(struct i2c_adapter *adap) 1691 { 1692 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 1693 } 1694 1695 static const struct i2c_algorithm smu_v13_0_0_i2c_algo = { 1696 .master_xfer = smu_v13_0_0_i2c_xfer, 1697 .functionality = smu_v13_0_0_i2c_func, 1698 }; 1699 1700 static const struct i2c_adapter_quirks smu_v13_0_0_i2c_control_quirks = { 1701 .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN, 1702 .max_read_len = MAX_SW_I2C_COMMANDS, 1703 .max_write_len = MAX_SW_I2C_COMMANDS, 1704 .max_comb_1st_msg_len = 2, 1705 .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, 1706 }; 1707 1708 static int smu_v13_0_0_i2c_control_init(struct smu_context *smu) 1709 { 1710 struct amdgpu_device *adev = smu->adev; 1711 int res, i; 1712 1713 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 1714 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1715 struct i2c_adapter *control = &smu_i2c->adapter; 1716 1717 smu_i2c->adev = adev; 1718 smu_i2c->port = i; 1719 mutex_init(&smu_i2c->mutex); 1720 control->owner = THIS_MODULE; 1721 control->class = I2C_CLASS_SPD; 1722 control->dev.parent = &adev->pdev->dev; 1723 control->algo = &smu_v13_0_0_i2c_algo; 1724 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); 1725 control->quirks = &smu_v13_0_0_i2c_control_quirks; 1726 i2c_set_adapdata(control, smu_i2c); 1727 1728 res = i2c_add_adapter(control); 1729 if (res) { 1730 DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 1731 goto Out_err; 1732 } 1733 } 1734 1735 /* assign the buses used for the FRU EEPROM and RAS EEPROM */ 1736 /* XXX ideally this would be something in a vbios data table */ 1737 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; 1738 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 1739 1740 return 0; 1741 Out_err: 1742 for ( ; i >= 0; i--) { 1743 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1744 struct i2c_adapter *control = &smu_i2c->adapter; 1745 1746 i2c_del_adapter(control); 1747 } 1748 return res; 1749 } 1750 1751 static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu) 1752 { 1753 struct amdgpu_device *adev = smu->adev; 1754 int i; 1755 1756 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 1757 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1758 struct i2c_adapter *control = &smu_i2c->adapter; 1759 1760 i2c_del_adapter(control); 1761 } 1762 adev->pm.ras_eeprom_i2c_bus = NULL; 1763 adev->pm.fru_eeprom_i2c_bus = NULL; 1764 } 1765 1766 static int smu_v13_0_0_set_mp1_state(struct smu_context *smu, 1767 enum pp_mp1_state mp1_state) 1768 { 1769 int ret; 1770 1771 switch (mp1_state) { 1772 case PP_MP1_STATE_UNLOAD: 1773 ret = smu_cmn_set_mp1_state(smu, mp1_state); 1774 break; 1775 default: 1776 /* Ignore others */ 1777 ret = 0; 1778 } 1779 1780 return ret; 1781 } 1782 1783 static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { 1784 .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask, 1785 .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table, 1786 .i2c_init = smu_v13_0_0_i2c_control_init, 1787 .i2c_fini = smu_v13_0_0_i2c_control_fini, 1788 .is_dpm_running = smu_v13_0_0_is_dpm_running, 1789 .dump_pptable = smu_v13_0_0_dump_pptable, 1790 .init_microcode = smu_v13_0_init_microcode, 1791 .load_microcode = smu_v13_0_load_microcode, 1792 .init_smc_tables = smu_v13_0_0_init_smc_tables, 1793 .init_power = smu_v13_0_init_power, 1794 .fini_power = smu_v13_0_fini_power, 1795 .check_fw_status = smu_v13_0_check_fw_status, 1796 .setup_pptable = smu_v13_0_0_setup_pptable, 1797 .check_fw_version = smu_v13_0_check_fw_version, 1798 .write_pptable = smu_cmn_write_pptable, 1799 .set_driver_table_location = smu_v13_0_set_driver_table_location, 1800 .system_features_control = smu_v13_0_0_system_features_control, 1801 .set_allowed_mask = smu_v13_0_set_allowed_mask, 1802 .get_enabled_mask = smu_cmn_get_enabled_mask, 1803 .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable, 1804 .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, 1805 .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, 1806 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, 1807 .read_sensor = smu_v13_0_0_read_sensor, 1808 .feature_is_enabled = smu_cmn_feature_is_enabled, 1809 .print_clk_levels = smu_v13_0_0_print_clk_levels, 1810 .force_clk_levels = smu_v13_0_0_force_clk_levels, 1811 .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters, 1812 .get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range, 1813 .register_irq_handler = smu_v13_0_register_irq_handler, 1814 .enable_thermal_alert = smu_v13_0_enable_thermal_alert, 1815 .disable_thermal_alert = smu_v13_0_disable_thermal_alert, 1816 .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, 1817 .get_gpu_metrics = smu_v13_0_0_get_gpu_metrics, 1818 .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range, 1819 .init_pptable_microcode = smu_v13_0_init_pptable_microcode, 1820 .populate_umd_state_clk = smu_v13_0_0_populate_umd_state_clk, 1821 .set_performance_level = smu_v13_0_set_performance_level, 1822 .gfx_off_control = smu_v13_0_gfx_off_control, 1823 .get_unique_id = smu_v13_0_0_get_unique_id, 1824 .get_fan_speed_pwm = smu_v13_0_0_get_fan_speed_pwm, 1825 .get_fan_speed_rpm = smu_v13_0_0_get_fan_speed_rpm, 1826 .set_fan_speed_pwm = smu_v13_0_set_fan_speed_pwm, 1827 .set_fan_speed_rpm = smu_v13_0_set_fan_speed_rpm, 1828 .get_fan_control_mode = smu_v13_0_get_fan_control_mode, 1829 .set_fan_control_mode = smu_v13_0_set_fan_control_mode, 1830 .enable_mgpu_fan_boost = smu_v13_0_0_enable_mgpu_fan_boost, 1831 .get_power_limit = smu_v13_0_0_get_power_limit, 1832 .set_power_limit = smu_v13_0_set_power_limit, 1833 .set_power_source = smu_v13_0_set_power_source, 1834 .get_power_profile_mode = smu_v13_0_0_get_power_profile_mode, 1835 .set_power_profile_mode = smu_v13_0_0_set_power_profile_mode, 1836 .run_btc = smu_v13_0_run_btc, 1837 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1838 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, 1839 .set_tool_table_location = smu_v13_0_set_tool_table_location, 1840 .deep_sleep_control = smu_v13_0_deep_sleep_control, 1841 .gfx_ulv_control = smu_v13_0_gfx_ulv_control, 1842 .baco_is_support = smu_v13_0_baco_is_support, 1843 .baco_get_state = smu_v13_0_baco_get_state, 1844 .baco_set_state = smu_v13_0_baco_set_state, 1845 .baco_enter = smu_v13_0_baco_enter, 1846 .baco_exit = smu_v13_0_baco_exit, 1847 .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, 1848 .mode1_reset = smu_v13_0_mode1_reset, 1849 .set_mp1_state = smu_v13_0_0_set_mp1_state, 1850 }; 1851 1852 void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) 1853 { 1854 smu->ppt_funcs = &smu_v13_0_0_ppt_funcs; 1855 smu->message_map = smu_v13_0_0_message_map; 1856 smu->clock_map = smu_v13_0_0_clk_map; 1857 smu->feature_map = smu_v13_0_0_feature_mask_map; 1858 smu->table_map = smu_v13_0_0_table_map; 1859 smu->pwr_src_map = smu_v13_0_0_pwr_src_map; 1860 smu->workload_map = smu_v13_0_0_workload_map; 1861 smu_v13_0_set_smu_mailbox_registers(smu); 1862 } 1863