1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include "amdgpu.h" 27 #include "amdgpu_smu.h" 28 #include "smu_v11_0.h" 29 #include "smu11_driver_if_vangogh.h" 30 #include "vangogh_ppt.h" 31 #include "smu_v11_5_ppsmc.h" 32 #include "smu_v11_5_pmfw.h" 33 #include "smu_cmn.h" 34 #include "soc15_common.h" 35 #include "asic_reg/gc/gc_10_3_0_offset.h" 36 #include "asic_reg/gc/gc_10_3_0_sh_mask.h" 37 #include <asm/processor.h> 38 39 /* 40 * DO NOT use these for err/warn/info/debug messages. 41 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 42 * They are more MGPU friendly. 43 */ 44 #undef pr_err 45 #undef pr_warn 46 #undef pr_info 47 #undef pr_debug 48 49 // Registers related to GFXOFF 50 // addressBlock: smuio_smuio_SmuSmuioDec 51 // base address: 0x5a000 52 #define mmSMUIO_GFX_MISC_CNTL 0x00c5 53 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0 54 55 //SMUIO_GFX_MISC_CNTL 56 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0 57 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 58 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L 59 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L 60 61 #define FEATURE_MASK(feature) (1ULL << feature) 62 #define SMC_DPM_FEATURE ( \ 63 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 64 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 65 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 66 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 67 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 68 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 69 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 70 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 71 FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 72 73 static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = { 74 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), 75 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0), 76 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0), 77 MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0), 78 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 79 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 80 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0), 81 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0), 82 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 83 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 84 MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0), 85 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0), 86 MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0), 87 MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0), 88 MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 0), 89 MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 0), 90 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0), 91 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0), 92 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), 93 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 94 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 0), 95 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0), 96 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 0), 97 MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0), 98 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 0), 99 MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 0), 100 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 0), 101 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 0), 102 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0), 103 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 0), 104 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 0), 105 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 0), 106 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 0), 107 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 0), 108 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 109 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 110 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 0), 111 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 0), 112 MSG_MAP(PowerUpCvip, PPSMC_MSG_PowerUpCvip, 0), 113 MSG_MAP(PowerDownCvip, PPSMC_MSG_PowerDownCvip, 0), 114 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 115 MSG_MAP(GetThermalLimit, PPSMC_MSG_GetThermalLimit, 0), 116 MSG_MAP(GetCurrentTemperature, PPSMC_MSG_GetCurrentTemperature, 0), 117 MSG_MAP(GetCurrentPower, PPSMC_MSG_GetCurrentPower, 0), 118 MSG_MAP(GetCurrentVoltage, PPSMC_MSG_GetCurrentVoltage, 0), 119 MSG_MAP(GetCurrentCurrent, PPSMC_MSG_GetCurrentCurrent, 0), 120 MSG_MAP(GetAverageCpuActivity, PPSMC_MSG_GetAverageCpuActivity, 0), 121 MSG_MAP(GetAverageGfxActivity, PPSMC_MSG_GetAverageGfxActivity, 0), 122 MSG_MAP(GetAveragePower, PPSMC_MSG_GetAveragePower, 0), 123 MSG_MAP(GetAverageTemperature, PPSMC_MSG_GetAverageTemperature, 0), 124 MSG_MAP(SetAveragePowerTimeConstant, PPSMC_MSG_SetAveragePowerTimeConstant, 0), 125 MSG_MAP(SetAverageActivityTimeConstant, PPSMC_MSG_SetAverageActivityTimeConstant, 0), 126 MSG_MAP(SetAverageTemperatureTimeConstant, PPSMC_MSG_SetAverageTemperatureTimeConstant, 0), 127 MSG_MAP(SetMitigationEndHysteresis, PPSMC_MSG_SetMitigationEndHysteresis, 0), 128 MSG_MAP(GetCurrentFreq, PPSMC_MSG_GetCurrentFreq, 0), 129 MSG_MAP(SetReducedPptLimit, PPSMC_MSG_SetReducedPptLimit, 0), 130 MSG_MAP(SetReducedThermalLimit, PPSMC_MSG_SetReducedThermalLimit, 0), 131 MSG_MAP(DramLogSetDramAddr, PPSMC_MSG_DramLogSetDramAddr, 0), 132 MSG_MAP(StartDramLogging, PPSMC_MSG_StartDramLogging, 0), 133 MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0), 134 MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0), 135 MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0), 136 MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0), 137 MSG_MAP(SetFastPPTLimit, PPSMC_MSG_SetFastPPTLimit, 0), 138 MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0), 139 MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0), 140 MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0), 141 MSG_MAP(GetGfxOffStatus, PPSMC_MSG_GetGfxOffStatus, 0), 142 MSG_MAP(GetGfxOffEntryCount, PPSMC_MSG_GetGfxOffEntryCount, 0), 143 MSG_MAP(LogGfxOffResidency, PPSMC_MSG_LogGfxOffResidency, 0), 144 }; 145 146 static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = { 147 FEA_MAP(PPT), 148 FEA_MAP(TDC), 149 FEA_MAP(THERMAL), 150 FEA_MAP(DS_GFXCLK), 151 FEA_MAP(DS_SOCCLK), 152 FEA_MAP(DS_LCLK), 153 FEA_MAP(DS_FCLK), 154 FEA_MAP(DS_MP1CLK), 155 FEA_MAP(DS_MP0CLK), 156 FEA_MAP(ATHUB_PG), 157 FEA_MAP(CCLK_DPM), 158 FEA_MAP(FAN_CONTROLLER), 159 FEA_MAP(ULV), 160 FEA_MAP(VCN_DPM), 161 FEA_MAP(LCLK_DPM), 162 FEA_MAP(SHUBCLK_DPM), 163 FEA_MAP(DCFCLK_DPM), 164 FEA_MAP(DS_DCFCLK), 165 FEA_MAP(S0I2), 166 FEA_MAP(SMU_LOW_POWER), 167 FEA_MAP(GFX_DEM), 168 FEA_MAP(PSI), 169 FEA_MAP(PROCHOT), 170 FEA_MAP(CPUOFF), 171 FEA_MAP(STAPM), 172 FEA_MAP(S0I3), 173 FEA_MAP(DF_CSTATES), 174 FEA_MAP(PERF_LIMIT), 175 FEA_MAP(CORE_DLDO), 176 FEA_MAP(RSMU_LOW_POWER), 177 FEA_MAP(SMN_LOW_POWER), 178 FEA_MAP(THM_LOW_POWER), 179 FEA_MAP(SMUIO_LOW_POWER), 180 FEA_MAP(MP1_LOW_POWER), 181 FEA_MAP(DS_VCN), 182 FEA_MAP(CPPC), 183 FEA_MAP(OS_CSTATES), 184 FEA_MAP(ISP_DPM), 185 FEA_MAP(A55_DPM), 186 FEA_MAP(CVIP_DSP_DPM), 187 FEA_MAP(MSMU_LOW_POWER), 188 FEA_MAP_REVERSE(SOCCLK), 189 FEA_MAP_REVERSE(FCLK), 190 FEA_MAP_HALF_REVERSE(GFX), 191 }; 192 193 static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = { 194 TAB_MAP_VALID(WATERMARKS), 195 TAB_MAP_VALID(SMU_METRICS), 196 TAB_MAP_VALID(CUSTOM_DPM), 197 TAB_MAP_VALID(DPMCLOCKS), 198 }; 199 200 static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 201 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 202 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 203 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 204 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 205 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 206 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CAPPED, WORKLOAD_PPLIB_CAPPED_BIT), 207 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_UNCAPPED, WORKLOAD_PPLIB_UNCAPPED_BIT), 208 }; 209 210 static const uint8_t vangogh_throttler_map[] = { 211 [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT), 212 [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT), 213 [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT), 214 [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT), 215 [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT), 216 [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT), 217 [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT), 218 [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT), 219 [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT), 220 [THROTTLER_STATUS_BIT_TDC_GFX] = (SMU_THROTTLER_TDC_GFX_BIT), 221 [THROTTLER_STATUS_BIT_TDC_CVIP] = (SMU_THROTTLER_TDC_CVIP_BIT), 222 }; 223 224 static int vangogh_tables_init(struct smu_context *smu) 225 { 226 struct smu_table_context *smu_table = &smu->smu_table; 227 struct smu_table *tables = smu_table->tables; 228 uint32_t if_version; 229 uint32_t smu_version; 230 uint32_t ret = 0; 231 232 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 233 if (ret) { 234 return ret; 235 } 236 237 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 238 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 239 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 240 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 241 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, 242 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 243 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), 244 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 245 246 if (if_version < 0x3) { 247 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t), 248 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 249 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL); 250 } else { 251 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 252 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 253 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 254 } 255 if (!smu_table->metrics_table) 256 goto err0_out; 257 smu_table->metrics_time = 0; 258 259 if (smu_version >= 0x043F3E00) 260 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_3); 261 else 262 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); 263 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 264 if (!smu_table->gpu_metrics_table) 265 goto err1_out; 266 267 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 268 if (!smu_table->watermarks_table) 269 goto err2_out; 270 271 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 272 if (!smu_table->clocks_table) 273 goto err3_out; 274 275 return 0; 276 277 err3_out: 278 kfree(smu_table->watermarks_table); 279 err2_out: 280 kfree(smu_table->gpu_metrics_table); 281 err1_out: 282 kfree(smu_table->metrics_table); 283 err0_out: 284 return -ENOMEM; 285 } 286 287 static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, 288 MetricsMember_t member, 289 uint32_t *value) 290 { 291 struct smu_table_context *smu_table = &smu->smu_table; 292 SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; 293 int ret = 0; 294 295 ret = smu_cmn_get_metrics_table(smu, 296 NULL, 297 false); 298 if (ret) 299 return ret; 300 301 switch (member) { 302 case METRICS_CURR_GFXCLK: 303 *value = metrics->GfxclkFrequency; 304 break; 305 case METRICS_AVERAGE_SOCCLK: 306 *value = metrics->SocclkFrequency; 307 break; 308 case METRICS_AVERAGE_VCLK: 309 *value = metrics->VclkFrequency; 310 break; 311 case METRICS_AVERAGE_DCLK: 312 *value = metrics->DclkFrequency; 313 break; 314 case METRICS_CURR_UCLK: 315 *value = metrics->MemclkFrequency; 316 break; 317 case METRICS_AVERAGE_GFXACTIVITY: 318 *value = metrics->GfxActivity / 100; 319 break; 320 case METRICS_AVERAGE_VCNACTIVITY: 321 *value = metrics->UvdActivity; 322 break; 323 case METRICS_AVERAGE_SOCKETPOWER: 324 *value = (metrics->CurrentSocketPower << 8) / 325 1000 ; 326 break; 327 case METRICS_TEMPERATURE_EDGE: 328 *value = metrics->GfxTemperature / 100 * 329 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 330 break; 331 case METRICS_TEMPERATURE_HOTSPOT: 332 *value = metrics->SocTemperature / 100 * 333 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 334 break; 335 case METRICS_THROTTLER_STATUS: 336 *value = metrics->ThrottlerStatus; 337 break; 338 case METRICS_VOLTAGE_VDDGFX: 339 *value = metrics->Voltage[2]; 340 break; 341 case METRICS_VOLTAGE_VDDSOC: 342 *value = metrics->Voltage[1]; 343 break; 344 case METRICS_AVERAGE_CPUCLK: 345 memcpy(value, &metrics->CoreFrequency[0], 346 smu->cpu_core_num * sizeof(uint16_t)); 347 break; 348 default: 349 *value = UINT_MAX; 350 break; 351 } 352 353 return ret; 354 } 355 356 static int vangogh_get_smu_metrics_data(struct smu_context *smu, 357 MetricsMember_t member, 358 uint32_t *value) 359 { 360 struct smu_table_context *smu_table = &smu->smu_table; 361 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 362 int ret = 0; 363 364 ret = smu_cmn_get_metrics_table(smu, 365 NULL, 366 false); 367 if (ret) 368 return ret; 369 370 switch (member) { 371 case METRICS_CURR_GFXCLK: 372 *value = metrics->Current.GfxclkFrequency; 373 break; 374 case METRICS_AVERAGE_SOCCLK: 375 *value = metrics->Current.SocclkFrequency; 376 break; 377 case METRICS_AVERAGE_VCLK: 378 *value = metrics->Current.VclkFrequency; 379 break; 380 case METRICS_AVERAGE_DCLK: 381 *value = metrics->Current.DclkFrequency; 382 break; 383 case METRICS_CURR_UCLK: 384 *value = metrics->Current.MemclkFrequency; 385 break; 386 case METRICS_AVERAGE_GFXACTIVITY: 387 *value = metrics->Current.GfxActivity; 388 break; 389 case METRICS_AVERAGE_VCNACTIVITY: 390 *value = metrics->Current.UvdActivity; 391 break; 392 case METRICS_AVERAGE_SOCKETPOWER: 393 *value = (metrics->Current.CurrentSocketPower << 8) / 394 1000; 395 break; 396 case METRICS_TEMPERATURE_EDGE: 397 *value = metrics->Current.GfxTemperature / 100 * 398 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 399 break; 400 case METRICS_TEMPERATURE_HOTSPOT: 401 *value = metrics->Current.SocTemperature / 100 * 402 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 403 break; 404 case METRICS_THROTTLER_STATUS: 405 *value = metrics->Current.ThrottlerStatus; 406 break; 407 case METRICS_VOLTAGE_VDDGFX: 408 *value = metrics->Current.Voltage[2]; 409 break; 410 case METRICS_VOLTAGE_VDDSOC: 411 *value = metrics->Current.Voltage[1]; 412 break; 413 case METRICS_AVERAGE_CPUCLK: 414 memcpy(value, &metrics->Current.CoreFrequency[0], 415 smu->cpu_core_num * sizeof(uint16_t)); 416 break; 417 default: 418 *value = UINT_MAX; 419 break; 420 } 421 422 return ret; 423 } 424 425 static int vangogh_common_get_smu_metrics_data(struct smu_context *smu, 426 MetricsMember_t member, 427 uint32_t *value) 428 { 429 struct amdgpu_device *adev = smu->adev; 430 uint32_t if_version; 431 int ret = 0; 432 433 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 434 if (ret) { 435 dev_err(adev->dev, "Failed to get smu if version!\n"); 436 return ret; 437 } 438 439 if (if_version < 0x3) 440 ret = vangogh_get_legacy_smu_metrics_data(smu, member, value); 441 else 442 ret = vangogh_get_smu_metrics_data(smu, member, value); 443 444 return ret; 445 } 446 447 static int vangogh_allocate_dpm_context(struct smu_context *smu) 448 { 449 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 450 451 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context), 452 GFP_KERNEL); 453 if (!smu_dpm->dpm_context) 454 return -ENOMEM; 455 456 smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); 457 458 return 0; 459 } 460 461 static int vangogh_init_smc_tables(struct smu_context *smu) 462 { 463 int ret = 0; 464 465 ret = vangogh_tables_init(smu); 466 if (ret) 467 return ret; 468 469 ret = vangogh_allocate_dpm_context(smu); 470 if (ret) 471 return ret; 472 473 #ifdef CONFIG_X86 474 /* AMD x86 APU only */ 475 smu->cpu_core_num = boot_cpu_data.x86_max_cores; 476 #else 477 smu->cpu_core_num = 4; 478 #endif 479 480 return smu_v11_0_init_smc_tables(smu); 481 } 482 483 static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable) 484 { 485 int ret = 0; 486 487 if (enable) { 488 /* vcn dpm on is a prerequisite for vcn power gate messages */ 489 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); 490 if (ret) 491 return ret; 492 } else { 493 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); 494 if (ret) 495 return ret; 496 } 497 498 return ret; 499 } 500 501 static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 502 { 503 int ret = 0; 504 505 if (enable) { 506 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); 507 if (ret) 508 return ret; 509 } else { 510 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); 511 if (ret) 512 return ret; 513 } 514 515 return ret; 516 } 517 518 static bool vangogh_is_dpm_running(struct smu_context *smu) 519 { 520 struct amdgpu_device *adev = smu->adev; 521 int ret = 0; 522 uint64_t feature_enabled; 523 524 /* we need to re-init after suspend so return false */ 525 if (adev->in_suspend) 526 return false; 527 528 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 529 530 if (ret) 531 return false; 532 533 return !!(feature_enabled & SMC_DPM_FEATURE); 534 } 535 536 static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, 537 uint32_t dpm_level, uint32_t *freq) 538 { 539 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 540 541 if (!clk_table || clk_type >= SMU_CLK_COUNT) 542 return -EINVAL; 543 544 switch (clk_type) { 545 case SMU_SOCCLK: 546 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 547 return -EINVAL; 548 *freq = clk_table->SocClocks[dpm_level]; 549 break; 550 case SMU_VCLK: 551 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 552 return -EINVAL; 553 *freq = clk_table->VcnClocks[dpm_level].vclk; 554 break; 555 case SMU_DCLK: 556 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 557 return -EINVAL; 558 *freq = clk_table->VcnClocks[dpm_level].dclk; 559 break; 560 case SMU_UCLK: 561 case SMU_MCLK: 562 if (dpm_level >= clk_table->NumDfPstatesEnabled) 563 return -EINVAL; 564 *freq = clk_table->DfPstateTable[dpm_level].memclk; 565 566 break; 567 case SMU_FCLK: 568 if (dpm_level >= clk_table->NumDfPstatesEnabled) 569 return -EINVAL; 570 *freq = clk_table->DfPstateTable[dpm_level].fclk; 571 break; 572 default: 573 return -EINVAL; 574 } 575 576 return 0; 577 } 578 579 static int vangogh_print_legacy_clk_levels(struct smu_context *smu, 580 enum smu_clk_type clk_type, char *buf) 581 { 582 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 583 SmuMetrics_legacy_t metrics; 584 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 585 int i, idx, size = 0, ret = 0; 586 uint32_t cur_value = 0, value = 0, count = 0; 587 bool cur_value_match_level = false; 588 589 memset(&metrics, 0, sizeof(metrics)); 590 591 ret = smu_cmn_get_metrics_table(smu, &metrics, false); 592 if (ret) 593 return ret; 594 595 smu_cmn_get_sysfs_buf(&buf, &size); 596 597 switch (clk_type) { 598 case SMU_OD_SCLK: 599 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 600 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 601 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 602 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 603 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 604 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 605 } 606 break; 607 case SMU_OD_CCLK: 608 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 609 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 610 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 611 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 612 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 613 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); 614 } 615 break; 616 case SMU_OD_RANGE: 617 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 618 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 619 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 620 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 621 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", 622 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); 623 } 624 break; 625 case SMU_SOCCLK: 626 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ 627 count = clk_table->NumSocClkLevelsEnabled; 628 cur_value = metrics.SocclkFrequency; 629 break; 630 case SMU_VCLK: 631 count = clk_table->VcnClkLevelsEnabled; 632 cur_value = metrics.VclkFrequency; 633 break; 634 case SMU_DCLK: 635 count = clk_table->VcnClkLevelsEnabled; 636 cur_value = metrics.DclkFrequency; 637 break; 638 case SMU_MCLK: 639 count = clk_table->NumDfPstatesEnabled; 640 cur_value = metrics.MemclkFrequency; 641 break; 642 case SMU_FCLK: 643 count = clk_table->NumDfPstatesEnabled; 644 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); 645 if (ret) 646 return ret; 647 break; 648 default: 649 break; 650 } 651 652 switch (clk_type) { 653 case SMU_SOCCLK: 654 case SMU_VCLK: 655 case SMU_DCLK: 656 case SMU_MCLK: 657 case SMU_FCLK: 658 for (i = 0; i < count; i++) { 659 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 660 ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); 661 if (ret) 662 return ret; 663 if (!value) 664 continue; 665 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 666 cur_value == value ? "*" : ""); 667 if (cur_value == value) 668 cur_value_match_level = true; 669 } 670 671 if (!cur_value_match_level) 672 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); 673 break; 674 default: 675 break; 676 } 677 678 return size; 679 } 680 681 static int vangogh_print_clk_levels(struct smu_context *smu, 682 enum smu_clk_type clk_type, char *buf) 683 { 684 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 685 SmuMetrics_t metrics; 686 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 687 int i, idx, size = 0, ret = 0; 688 uint32_t cur_value = 0, value = 0, count = 0; 689 bool cur_value_match_level = false; 690 uint32_t min, max; 691 692 memset(&metrics, 0, sizeof(metrics)); 693 694 ret = smu_cmn_get_metrics_table(smu, &metrics, false); 695 if (ret) 696 return ret; 697 698 smu_cmn_get_sysfs_buf(&buf, &size); 699 700 switch (clk_type) { 701 case SMU_OD_SCLK: 702 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 703 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 704 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 705 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 706 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 707 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 708 } 709 break; 710 case SMU_OD_CCLK: 711 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 712 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 713 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 714 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 715 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 716 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); 717 } 718 break; 719 case SMU_OD_RANGE: 720 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 721 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 722 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 723 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 724 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", 725 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); 726 } 727 break; 728 case SMU_SOCCLK: 729 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ 730 count = clk_table->NumSocClkLevelsEnabled; 731 cur_value = metrics.Current.SocclkFrequency; 732 break; 733 case SMU_VCLK: 734 count = clk_table->VcnClkLevelsEnabled; 735 cur_value = metrics.Current.VclkFrequency; 736 break; 737 case SMU_DCLK: 738 count = clk_table->VcnClkLevelsEnabled; 739 cur_value = metrics.Current.DclkFrequency; 740 break; 741 case SMU_MCLK: 742 count = clk_table->NumDfPstatesEnabled; 743 cur_value = metrics.Current.MemclkFrequency; 744 break; 745 case SMU_FCLK: 746 count = clk_table->NumDfPstatesEnabled; 747 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); 748 if (ret) 749 return ret; 750 break; 751 case SMU_GFXCLK: 752 case SMU_SCLK: 753 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value); 754 if (ret) { 755 return ret; 756 } 757 break; 758 default: 759 break; 760 } 761 762 switch (clk_type) { 763 case SMU_SOCCLK: 764 case SMU_VCLK: 765 case SMU_DCLK: 766 case SMU_MCLK: 767 case SMU_FCLK: 768 for (i = 0; i < count; i++) { 769 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 770 ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); 771 if (ret) 772 return ret; 773 if (!value) 774 continue; 775 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 776 cur_value == value ? "*" : ""); 777 if (cur_value == value) 778 cur_value_match_level = true; 779 } 780 781 if (!cur_value_match_level) 782 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); 783 break; 784 case SMU_GFXCLK: 785 case SMU_SCLK: 786 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 787 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 788 if (cur_value == max) 789 i = 2; 790 else if (cur_value == min) 791 i = 0; 792 else 793 i = 1; 794 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 795 i == 0 ? "*" : ""); 796 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 797 i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, 798 i == 1 ? "*" : ""); 799 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 800 i == 2 ? "*" : ""); 801 break; 802 default: 803 break; 804 } 805 806 return size; 807 } 808 809 static int vangogh_common_print_clk_levels(struct smu_context *smu, 810 enum smu_clk_type clk_type, char *buf) 811 { 812 struct amdgpu_device *adev = smu->adev; 813 uint32_t if_version; 814 int ret = 0; 815 816 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 817 if (ret) { 818 dev_err(adev->dev, "Failed to get smu if version!\n"); 819 return ret; 820 } 821 822 if (if_version < 0x3) 823 ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf); 824 else 825 ret = vangogh_print_clk_levels(smu, clk_type, buf); 826 827 return ret; 828 } 829 830 static int vangogh_get_profiling_clk_mask(struct smu_context *smu, 831 enum amd_dpm_forced_level level, 832 uint32_t *vclk_mask, 833 uint32_t *dclk_mask, 834 uint32_t *mclk_mask, 835 uint32_t *fclk_mask, 836 uint32_t *soc_mask) 837 { 838 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 839 840 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 841 if (mclk_mask) 842 *mclk_mask = clk_table->NumDfPstatesEnabled - 1; 843 844 if (fclk_mask) 845 *fclk_mask = clk_table->NumDfPstatesEnabled - 1; 846 847 if (soc_mask) 848 *soc_mask = 0; 849 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 850 if (mclk_mask) 851 *mclk_mask = 0; 852 853 if (fclk_mask) 854 *fclk_mask = 0; 855 856 if (soc_mask) 857 *soc_mask = 1; 858 859 if (vclk_mask) 860 *vclk_mask = 1; 861 862 if (dclk_mask) 863 *dclk_mask = 1; 864 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) { 865 if (mclk_mask) 866 *mclk_mask = 0; 867 868 if (fclk_mask) 869 *fclk_mask = 0; 870 871 if (soc_mask) 872 *soc_mask = 1; 873 874 if (vclk_mask) 875 *vclk_mask = 1; 876 877 if (dclk_mask) 878 *dclk_mask = 1; 879 } 880 881 return 0; 882 } 883 884 static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu, 885 enum smu_clk_type clk_type) 886 { 887 enum smu_feature_mask feature_id = 0; 888 889 switch (clk_type) { 890 case SMU_MCLK: 891 case SMU_UCLK: 892 case SMU_FCLK: 893 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 894 break; 895 case SMU_GFXCLK: 896 case SMU_SCLK: 897 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 898 break; 899 case SMU_SOCCLK: 900 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 901 break; 902 case SMU_VCLK: 903 case SMU_DCLK: 904 feature_id = SMU_FEATURE_VCN_DPM_BIT; 905 break; 906 default: 907 return true; 908 } 909 910 if (!smu_cmn_feature_is_enabled(smu, feature_id)) 911 return false; 912 913 return true; 914 } 915 916 static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu, 917 enum smu_clk_type clk_type, 918 uint32_t *min, 919 uint32_t *max) 920 { 921 int ret = 0; 922 uint32_t soc_mask; 923 uint32_t vclk_mask; 924 uint32_t dclk_mask; 925 uint32_t mclk_mask; 926 uint32_t fclk_mask; 927 uint32_t clock_limit; 928 929 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) { 930 switch (clk_type) { 931 case SMU_MCLK: 932 case SMU_UCLK: 933 clock_limit = smu->smu_table.boot_values.uclk; 934 break; 935 case SMU_FCLK: 936 clock_limit = smu->smu_table.boot_values.fclk; 937 break; 938 case SMU_GFXCLK: 939 case SMU_SCLK: 940 clock_limit = smu->smu_table.boot_values.gfxclk; 941 break; 942 case SMU_SOCCLK: 943 clock_limit = smu->smu_table.boot_values.socclk; 944 break; 945 case SMU_VCLK: 946 clock_limit = smu->smu_table.boot_values.vclk; 947 break; 948 case SMU_DCLK: 949 clock_limit = smu->smu_table.boot_values.dclk; 950 break; 951 default: 952 clock_limit = 0; 953 break; 954 } 955 956 /* clock in Mhz unit */ 957 if (min) 958 *min = clock_limit / 100; 959 if (max) 960 *max = clock_limit / 100; 961 962 return 0; 963 } 964 if (max) { 965 ret = vangogh_get_profiling_clk_mask(smu, 966 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, 967 &vclk_mask, 968 &dclk_mask, 969 &mclk_mask, 970 &fclk_mask, 971 &soc_mask); 972 if (ret) 973 goto failed; 974 975 switch (clk_type) { 976 case SMU_UCLK: 977 case SMU_MCLK: 978 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); 979 if (ret) 980 goto failed; 981 break; 982 case SMU_SOCCLK: 983 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max); 984 if (ret) 985 goto failed; 986 break; 987 case SMU_FCLK: 988 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max); 989 if (ret) 990 goto failed; 991 break; 992 case SMU_VCLK: 993 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max); 994 if (ret) 995 goto failed; 996 break; 997 case SMU_DCLK: 998 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max); 999 if (ret) 1000 goto failed; 1001 break; 1002 default: 1003 ret = -EINVAL; 1004 goto failed; 1005 } 1006 } 1007 if (min) { 1008 switch (clk_type) { 1009 case SMU_UCLK: 1010 case SMU_MCLK: 1011 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min); 1012 if (ret) 1013 goto failed; 1014 break; 1015 case SMU_SOCCLK: 1016 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min); 1017 if (ret) 1018 goto failed; 1019 break; 1020 case SMU_FCLK: 1021 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min); 1022 if (ret) 1023 goto failed; 1024 break; 1025 case SMU_VCLK: 1026 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min); 1027 if (ret) 1028 goto failed; 1029 break; 1030 case SMU_DCLK: 1031 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min); 1032 if (ret) 1033 goto failed; 1034 break; 1035 default: 1036 ret = -EINVAL; 1037 goto failed; 1038 } 1039 } 1040 failed: 1041 return ret; 1042 } 1043 1044 static int vangogh_get_power_profile_mode(struct smu_context *smu, 1045 char *buf) 1046 { 1047 uint32_t i, size = 0; 1048 int16_t workload_type = 0; 1049 1050 if (!buf) 1051 return -EINVAL; 1052 1053 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { 1054 /* 1055 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT 1056 * Not all profile modes are supported on vangogh. 1057 */ 1058 workload_type = smu_cmn_to_asic_specific_index(smu, 1059 CMN2ASIC_MAPPING_WORKLOAD, 1060 i); 1061 1062 if (workload_type < 0) 1063 continue; 1064 1065 size += sysfs_emit_at(buf, size, "%2d %14s%s\n", 1066 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 1067 } 1068 1069 return size; 1070 } 1071 1072 static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) 1073 { 1074 int workload_type, ret; 1075 uint32_t profile_mode = input[size]; 1076 1077 if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { 1078 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); 1079 return -EINVAL; 1080 } 1081 1082 if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || 1083 profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) 1084 return 0; 1085 1086 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1087 workload_type = smu_cmn_to_asic_specific_index(smu, 1088 CMN2ASIC_MAPPING_WORKLOAD, 1089 profile_mode); 1090 if (workload_type < 0) { 1091 dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n", 1092 profile_mode); 1093 return -EINVAL; 1094 } 1095 1096 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, 1097 1 << workload_type, 1098 NULL); 1099 if (ret) { 1100 dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", 1101 workload_type); 1102 return ret; 1103 } 1104 1105 smu->power_profile_mode = profile_mode; 1106 1107 return 0; 1108 } 1109 1110 static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, 1111 enum smu_clk_type clk_type, 1112 uint32_t min, 1113 uint32_t max) 1114 { 1115 int ret = 0; 1116 1117 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) 1118 return 0; 1119 1120 switch (clk_type) { 1121 case SMU_GFXCLK: 1122 case SMU_SCLK: 1123 ret = smu_cmn_send_smc_msg_with_param(smu, 1124 SMU_MSG_SetHardMinGfxClk, 1125 min, NULL); 1126 if (ret) 1127 return ret; 1128 1129 ret = smu_cmn_send_smc_msg_with_param(smu, 1130 SMU_MSG_SetSoftMaxGfxClk, 1131 max, NULL); 1132 if (ret) 1133 return ret; 1134 break; 1135 case SMU_FCLK: 1136 ret = smu_cmn_send_smc_msg_with_param(smu, 1137 SMU_MSG_SetHardMinFclkByFreq, 1138 min, NULL); 1139 if (ret) 1140 return ret; 1141 1142 ret = smu_cmn_send_smc_msg_with_param(smu, 1143 SMU_MSG_SetSoftMaxFclkByFreq, 1144 max, NULL); 1145 if (ret) 1146 return ret; 1147 break; 1148 case SMU_SOCCLK: 1149 ret = smu_cmn_send_smc_msg_with_param(smu, 1150 SMU_MSG_SetHardMinSocclkByFreq, 1151 min, NULL); 1152 if (ret) 1153 return ret; 1154 1155 ret = smu_cmn_send_smc_msg_with_param(smu, 1156 SMU_MSG_SetSoftMaxSocclkByFreq, 1157 max, NULL); 1158 if (ret) 1159 return ret; 1160 break; 1161 case SMU_VCLK: 1162 ret = smu_cmn_send_smc_msg_with_param(smu, 1163 SMU_MSG_SetHardMinVcn, 1164 min << 16, NULL); 1165 if (ret) 1166 return ret; 1167 ret = smu_cmn_send_smc_msg_with_param(smu, 1168 SMU_MSG_SetSoftMaxVcn, 1169 max << 16, NULL); 1170 if (ret) 1171 return ret; 1172 break; 1173 case SMU_DCLK: 1174 ret = smu_cmn_send_smc_msg_with_param(smu, 1175 SMU_MSG_SetHardMinVcn, 1176 min, NULL); 1177 if (ret) 1178 return ret; 1179 ret = smu_cmn_send_smc_msg_with_param(smu, 1180 SMU_MSG_SetSoftMaxVcn, 1181 max, NULL); 1182 if (ret) 1183 return ret; 1184 break; 1185 default: 1186 return -EINVAL; 1187 } 1188 1189 return ret; 1190 } 1191 1192 static int vangogh_force_clk_levels(struct smu_context *smu, 1193 enum smu_clk_type clk_type, uint32_t mask) 1194 { 1195 uint32_t soft_min_level = 0, soft_max_level = 0; 1196 uint32_t min_freq = 0, max_freq = 0; 1197 int ret = 0 ; 1198 1199 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1200 soft_max_level = mask ? (fls(mask) - 1) : 0; 1201 1202 switch (clk_type) { 1203 case SMU_SOCCLK: 1204 ret = vangogh_get_dpm_clk_limited(smu, clk_type, 1205 soft_min_level, &min_freq); 1206 if (ret) 1207 return ret; 1208 ret = vangogh_get_dpm_clk_limited(smu, clk_type, 1209 soft_max_level, &max_freq); 1210 if (ret) 1211 return ret; 1212 ret = smu_cmn_send_smc_msg_with_param(smu, 1213 SMU_MSG_SetSoftMaxSocclkByFreq, 1214 max_freq, NULL); 1215 if (ret) 1216 return ret; 1217 ret = smu_cmn_send_smc_msg_with_param(smu, 1218 SMU_MSG_SetHardMinSocclkByFreq, 1219 min_freq, NULL); 1220 if (ret) 1221 return ret; 1222 break; 1223 case SMU_FCLK: 1224 ret = vangogh_get_dpm_clk_limited(smu, 1225 clk_type, soft_min_level, &min_freq); 1226 if (ret) 1227 return ret; 1228 ret = vangogh_get_dpm_clk_limited(smu, 1229 clk_type, soft_max_level, &max_freq); 1230 if (ret) 1231 return ret; 1232 ret = smu_cmn_send_smc_msg_with_param(smu, 1233 SMU_MSG_SetSoftMaxFclkByFreq, 1234 max_freq, NULL); 1235 if (ret) 1236 return ret; 1237 ret = smu_cmn_send_smc_msg_with_param(smu, 1238 SMU_MSG_SetHardMinFclkByFreq, 1239 min_freq, NULL); 1240 if (ret) 1241 return ret; 1242 break; 1243 case SMU_VCLK: 1244 ret = vangogh_get_dpm_clk_limited(smu, 1245 clk_type, soft_min_level, &min_freq); 1246 if (ret) 1247 return ret; 1248 1249 ret = vangogh_get_dpm_clk_limited(smu, 1250 clk_type, soft_max_level, &max_freq); 1251 if (ret) 1252 return ret; 1253 1254 1255 ret = smu_cmn_send_smc_msg_with_param(smu, 1256 SMU_MSG_SetHardMinVcn, 1257 min_freq << 16, NULL); 1258 if (ret) 1259 return ret; 1260 1261 ret = smu_cmn_send_smc_msg_with_param(smu, 1262 SMU_MSG_SetSoftMaxVcn, 1263 max_freq << 16, NULL); 1264 if (ret) 1265 return ret; 1266 1267 break; 1268 case SMU_DCLK: 1269 ret = vangogh_get_dpm_clk_limited(smu, 1270 clk_type, soft_min_level, &min_freq); 1271 if (ret) 1272 return ret; 1273 1274 ret = vangogh_get_dpm_clk_limited(smu, 1275 clk_type, soft_max_level, &max_freq); 1276 if (ret) 1277 return ret; 1278 1279 ret = smu_cmn_send_smc_msg_with_param(smu, 1280 SMU_MSG_SetHardMinVcn, 1281 min_freq, NULL); 1282 if (ret) 1283 return ret; 1284 1285 ret = smu_cmn_send_smc_msg_with_param(smu, 1286 SMU_MSG_SetSoftMaxVcn, 1287 max_freq, NULL); 1288 if (ret) 1289 return ret; 1290 1291 break; 1292 default: 1293 break; 1294 } 1295 1296 return ret; 1297 } 1298 1299 static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest) 1300 { 1301 int ret = 0, i = 0; 1302 uint32_t min_freq, max_freq, force_freq; 1303 enum smu_clk_type clk_type; 1304 1305 enum smu_clk_type clks[] = { 1306 SMU_SOCCLK, 1307 SMU_VCLK, 1308 SMU_DCLK, 1309 SMU_FCLK, 1310 }; 1311 1312 for (i = 0; i < ARRAY_SIZE(clks); i++) { 1313 clk_type = clks[i]; 1314 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 1315 if (ret) 1316 return ret; 1317 1318 force_freq = highest ? max_freq : min_freq; 1319 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); 1320 if (ret) 1321 return ret; 1322 } 1323 1324 return ret; 1325 } 1326 1327 static int vangogh_unforce_dpm_levels(struct smu_context *smu) 1328 { 1329 int ret = 0, i = 0; 1330 uint32_t min_freq, max_freq; 1331 enum smu_clk_type clk_type; 1332 1333 struct clk_feature_map { 1334 enum smu_clk_type clk_type; 1335 uint32_t feature; 1336 } clk_feature_map[] = { 1337 {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT}, 1338 {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, 1339 {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT}, 1340 {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT}, 1341 }; 1342 1343 for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) { 1344 1345 if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature)) 1346 continue; 1347 1348 clk_type = clk_feature_map[i].clk_type; 1349 1350 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 1351 1352 if (ret) 1353 return ret; 1354 1355 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); 1356 1357 if (ret) 1358 return ret; 1359 } 1360 1361 return ret; 1362 } 1363 1364 static int vangogh_set_peak_clock_by_device(struct smu_context *smu) 1365 { 1366 int ret = 0; 1367 uint32_t socclk_freq = 0, fclk_freq = 0; 1368 uint32_t vclk_freq = 0, dclk_freq = 0; 1369 1370 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq); 1371 if (ret) 1372 return ret; 1373 1374 ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq); 1375 if (ret) 1376 return ret; 1377 1378 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq); 1379 if (ret) 1380 return ret; 1381 1382 ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq); 1383 if (ret) 1384 return ret; 1385 1386 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq); 1387 if (ret) 1388 return ret; 1389 1390 ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq); 1391 if (ret) 1392 return ret; 1393 1394 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq); 1395 if (ret) 1396 return ret; 1397 1398 ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq); 1399 if (ret) 1400 return ret; 1401 1402 return ret; 1403 } 1404 1405 static int vangogh_set_performance_level(struct smu_context *smu, 1406 enum amd_dpm_forced_level level) 1407 { 1408 int ret = 0, i; 1409 uint32_t soc_mask, mclk_mask, fclk_mask; 1410 uint32_t vclk_mask = 0, dclk_mask = 0; 1411 1412 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; 1413 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; 1414 1415 switch (level) { 1416 case AMD_DPM_FORCED_LEVEL_HIGH: 1417 smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq; 1418 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1419 1420 1421 ret = vangogh_force_dpm_limit_value(smu, true); 1422 if (ret) 1423 return ret; 1424 break; 1425 case AMD_DPM_FORCED_LEVEL_LOW: 1426 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1427 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; 1428 1429 ret = vangogh_force_dpm_limit_value(smu, false); 1430 if (ret) 1431 return ret; 1432 break; 1433 case AMD_DPM_FORCED_LEVEL_AUTO: 1434 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1435 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1436 1437 ret = vangogh_unforce_dpm_levels(smu); 1438 if (ret) 1439 return ret; 1440 break; 1441 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1442 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; 1443 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; 1444 1445 ret = vangogh_get_profiling_clk_mask(smu, level, 1446 &vclk_mask, 1447 &dclk_mask, 1448 &mclk_mask, 1449 &fclk_mask, 1450 &soc_mask); 1451 if (ret) 1452 return ret; 1453 1454 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1455 vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); 1456 vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); 1457 vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask); 1458 break; 1459 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1460 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1461 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; 1462 break; 1463 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1464 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1465 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1466 1467 ret = vangogh_get_profiling_clk_mask(smu, level, 1468 NULL, 1469 NULL, 1470 &mclk_mask, 1471 &fclk_mask, 1472 NULL); 1473 if (ret) 1474 return ret; 1475 1476 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1477 break; 1478 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1479 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; 1480 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; 1481 1482 ret = vangogh_set_peak_clock_by_device(smu); 1483 if (ret) 1484 return ret; 1485 break; 1486 case AMD_DPM_FORCED_LEVEL_MANUAL: 1487 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1488 default: 1489 return 0; 1490 } 1491 1492 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 1493 smu->gfx_actual_hard_min_freq, NULL); 1494 if (ret) 1495 return ret; 1496 1497 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 1498 smu->gfx_actual_soft_max_freq, NULL); 1499 if (ret) 1500 return ret; 1501 1502 if (smu->adev->pm.fw_version >= 0x43f1b00) { 1503 for (i = 0; i < smu->cpu_core_num; i++) { 1504 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, 1505 ((i << 20) 1506 | smu->cpu_actual_soft_min_freq), 1507 NULL); 1508 if (ret) 1509 return ret; 1510 1511 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, 1512 ((i << 20) 1513 | smu->cpu_actual_soft_max_freq), 1514 NULL); 1515 if (ret) 1516 return ret; 1517 } 1518 } 1519 1520 return ret; 1521 } 1522 1523 static int vangogh_read_sensor(struct smu_context *smu, 1524 enum amd_pp_sensors sensor, 1525 void *data, uint32_t *size) 1526 { 1527 int ret = 0; 1528 1529 if (!data || !size) 1530 return -EINVAL; 1531 1532 switch (sensor) { 1533 case AMDGPU_PP_SENSOR_GPU_LOAD: 1534 ret = vangogh_common_get_smu_metrics_data(smu, 1535 METRICS_AVERAGE_GFXACTIVITY, 1536 (uint32_t *)data); 1537 *size = 4; 1538 break; 1539 case AMDGPU_PP_SENSOR_GPU_POWER: 1540 ret = vangogh_common_get_smu_metrics_data(smu, 1541 METRICS_AVERAGE_SOCKETPOWER, 1542 (uint32_t *)data); 1543 *size = 4; 1544 break; 1545 case AMDGPU_PP_SENSOR_EDGE_TEMP: 1546 ret = vangogh_common_get_smu_metrics_data(smu, 1547 METRICS_TEMPERATURE_EDGE, 1548 (uint32_t *)data); 1549 *size = 4; 1550 break; 1551 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 1552 ret = vangogh_common_get_smu_metrics_data(smu, 1553 METRICS_TEMPERATURE_HOTSPOT, 1554 (uint32_t *)data); 1555 *size = 4; 1556 break; 1557 case AMDGPU_PP_SENSOR_GFX_MCLK: 1558 ret = vangogh_common_get_smu_metrics_data(smu, 1559 METRICS_CURR_UCLK, 1560 (uint32_t *)data); 1561 *(uint32_t *)data *= 100; 1562 *size = 4; 1563 break; 1564 case AMDGPU_PP_SENSOR_GFX_SCLK: 1565 ret = vangogh_common_get_smu_metrics_data(smu, 1566 METRICS_CURR_GFXCLK, 1567 (uint32_t *)data); 1568 *(uint32_t *)data *= 100; 1569 *size = 4; 1570 break; 1571 case AMDGPU_PP_SENSOR_VDDGFX: 1572 ret = vangogh_common_get_smu_metrics_data(smu, 1573 METRICS_VOLTAGE_VDDGFX, 1574 (uint32_t *)data); 1575 *size = 4; 1576 break; 1577 case AMDGPU_PP_SENSOR_VDDNB: 1578 ret = vangogh_common_get_smu_metrics_data(smu, 1579 METRICS_VOLTAGE_VDDSOC, 1580 (uint32_t *)data); 1581 *size = 4; 1582 break; 1583 case AMDGPU_PP_SENSOR_CPU_CLK: 1584 ret = vangogh_common_get_smu_metrics_data(smu, 1585 METRICS_AVERAGE_CPUCLK, 1586 (uint32_t *)data); 1587 *size = smu->cpu_core_num * sizeof(uint16_t); 1588 break; 1589 default: 1590 ret = -EOPNOTSUPP; 1591 break; 1592 } 1593 1594 return ret; 1595 } 1596 1597 static int vangogh_get_apu_thermal_limit(struct smu_context *smu, uint32_t *limit) 1598 { 1599 return smu_cmn_send_smc_msg_with_param(smu, 1600 SMU_MSG_GetThermalLimit, 1601 0, limit); 1602 } 1603 1604 static int vangogh_set_apu_thermal_limit(struct smu_context *smu, uint32_t limit) 1605 { 1606 return smu_cmn_send_smc_msg_with_param(smu, 1607 SMU_MSG_SetReducedThermalLimit, 1608 limit, NULL); 1609 } 1610 1611 1612 static int vangogh_set_watermarks_table(struct smu_context *smu, 1613 struct pp_smu_wm_range_sets *clock_ranges) 1614 { 1615 int i; 1616 int ret = 0; 1617 Watermarks_t *table = smu->smu_table.watermarks_table; 1618 1619 if (!table || !clock_ranges) 1620 return -EINVAL; 1621 1622 if (clock_ranges) { 1623 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 1624 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 1625 return -EINVAL; 1626 1627 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 1628 table->WatermarkRow[WM_DCFCLK][i].MinClock = 1629 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 1630 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 1631 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 1632 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 1633 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 1634 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 1635 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 1636 1637 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 1638 clock_ranges->reader_wm_sets[i].wm_inst; 1639 } 1640 1641 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 1642 table->WatermarkRow[WM_SOCCLK][i].MinClock = 1643 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 1644 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 1645 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 1646 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 1647 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 1648 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 1649 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 1650 1651 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 1652 clock_ranges->writer_wm_sets[i].wm_inst; 1653 } 1654 1655 smu->watermarks_bitmap |= WATERMARKS_EXIST; 1656 } 1657 1658 /* pass data to smu controller */ 1659 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 1660 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 1661 ret = smu_cmn_write_watermarks_table(smu); 1662 if (ret) { 1663 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 1664 return ret; 1665 } 1666 smu->watermarks_bitmap |= WATERMARKS_LOADED; 1667 } 1668 1669 return 0; 1670 } 1671 1672 static ssize_t vangogh_get_legacy_gpu_metrics_v2_3(struct smu_context *smu, 1673 void **table) 1674 { 1675 struct smu_table_context *smu_table = &smu->smu_table; 1676 struct gpu_metrics_v2_3 *gpu_metrics = 1677 (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table; 1678 SmuMetrics_legacy_t metrics; 1679 int ret = 0; 1680 1681 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1682 if (ret) 1683 return ret; 1684 1685 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3); 1686 1687 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1688 gpu_metrics->temperature_soc = metrics.SocTemperature; 1689 memcpy(&gpu_metrics->temperature_core[0], 1690 &metrics.CoreTemperature[0], 1691 sizeof(uint16_t) * 4); 1692 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 1693 1694 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 1695 gpu_metrics->average_mm_activity = metrics.UvdActivity; 1696 1697 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 1698 gpu_metrics->average_cpu_power = metrics.Power[0]; 1699 gpu_metrics->average_soc_power = metrics.Power[1]; 1700 gpu_metrics->average_gfx_power = metrics.Power[2]; 1701 memcpy(&gpu_metrics->average_core_power[0], 1702 &metrics.CorePower[0], 1703 sizeof(uint16_t) * 4); 1704 1705 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 1706 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 1707 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 1708 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 1709 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 1710 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 1711 1712 memcpy(&gpu_metrics->current_coreclk[0], 1713 &metrics.CoreFrequency[0], 1714 sizeof(uint16_t) * 4); 1715 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1716 1717 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1718 gpu_metrics->indep_throttle_status = 1719 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1720 vangogh_throttler_map); 1721 1722 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1723 1724 *table = (void *)gpu_metrics; 1725 1726 return sizeof(struct gpu_metrics_v2_3); 1727 } 1728 1729 static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu, 1730 void **table) 1731 { 1732 struct smu_table_context *smu_table = &smu->smu_table; 1733 struct gpu_metrics_v2_2 *gpu_metrics = 1734 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1735 SmuMetrics_legacy_t metrics; 1736 int ret = 0; 1737 1738 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1739 if (ret) 1740 return ret; 1741 1742 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1743 1744 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1745 gpu_metrics->temperature_soc = metrics.SocTemperature; 1746 memcpy(&gpu_metrics->temperature_core[0], 1747 &metrics.CoreTemperature[0], 1748 sizeof(uint16_t) * 4); 1749 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 1750 1751 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 1752 gpu_metrics->average_mm_activity = metrics.UvdActivity; 1753 1754 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 1755 gpu_metrics->average_cpu_power = metrics.Power[0]; 1756 gpu_metrics->average_soc_power = metrics.Power[1]; 1757 gpu_metrics->average_gfx_power = metrics.Power[2]; 1758 memcpy(&gpu_metrics->average_core_power[0], 1759 &metrics.CorePower[0], 1760 sizeof(uint16_t) * 4); 1761 1762 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 1763 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 1764 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 1765 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 1766 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 1767 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 1768 1769 memcpy(&gpu_metrics->current_coreclk[0], 1770 &metrics.CoreFrequency[0], 1771 sizeof(uint16_t) * 4); 1772 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1773 1774 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1775 gpu_metrics->indep_throttle_status = 1776 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1777 vangogh_throttler_map); 1778 1779 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1780 1781 *table = (void *)gpu_metrics; 1782 1783 return sizeof(struct gpu_metrics_v2_2); 1784 } 1785 1786 static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu, 1787 void **table) 1788 { 1789 struct smu_table_context *smu_table = &smu->smu_table; 1790 struct gpu_metrics_v2_3 *gpu_metrics = 1791 (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table; 1792 SmuMetrics_t metrics; 1793 int ret = 0; 1794 1795 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1796 if (ret) 1797 return ret; 1798 1799 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3); 1800 1801 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1802 gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 1803 memcpy(&gpu_metrics->temperature_core[0], 1804 &metrics.Current.CoreTemperature[0], 1805 sizeof(uint16_t) * 4); 1806 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; 1807 1808 gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature; 1809 gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature; 1810 memcpy(&gpu_metrics->average_temperature_core[0], 1811 &metrics.Average.CoreTemperature[0], 1812 sizeof(uint16_t) * 4); 1813 gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0]; 1814 1815 gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; 1816 gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; 1817 1818 gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; 1819 gpu_metrics->average_cpu_power = metrics.Current.Power[0]; 1820 gpu_metrics->average_soc_power = metrics.Current.Power[1]; 1821 gpu_metrics->average_gfx_power = metrics.Current.Power[2]; 1822 memcpy(&gpu_metrics->average_core_power[0], 1823 &metrics.Average.CorePower[0], 1824 sizeof(uint16_t) * 4); 1825 1826 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 1827 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 1828 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 1829 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 1830 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 1831 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 1832 1833 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 1834 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 1835 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 1836 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 1837 gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 1838 gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 1839 1840 memcpy(&gpu_metrics->current_coreclk[0], 1841 &metrics.Current.CoreFrequency[0], 1842 sizeof(uint16_t) * 4); 1843 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1844 1845 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1846 gpu_metrics->indep_throttle_status = 1847 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, 1848 vangogh_throttler_map); 1849 1850 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1851 1852 *table = (void *)gpu_metrics; 1853 1854 return sizeof(struct gpu_metrics_v2_3); 1855 } 1856 1857 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, 1858 void **table) 1859 { 1860 struct smu_table_context *smu_table = &smu->smu_table; 1861 struct gpu_metrics_v2_2 *gpu_metrics = 1862 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1863 SmuMetrics_t metrics; 1864 int ret = 0; 1865 1866 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1867 if (ret) 1868 return ret; 1869 1870 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1871 1872 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1873 gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 1874 memcpy(&gpu_metrics->temperature_core[0], 1875 &metrics.Current.CoreTemperature[0], 1876 sizeof(uint16_t) * 4); 1877 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; 1878 1879 gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; 1880 gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; 1881 1882 gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; 1883 gpu_metrics->average_cpu_power = metrics.Current.Power[0]; 1884 gpu_metrics->average_soc_power = metrics.Current.Power[1]; 1885 gpu_metrics->average_gfx_power = metrics.Current.Power[2]; 1886 memcpy(&gpu_metrics->average_core_power[0], 1887 &metrics.Average.CorePower[0], 1888 sizeof(uint16_t) * 4); 1889 1890 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 1891 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 1892 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 1893 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 1894 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 1895 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 1896 1897 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 1898 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 1899 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 1900 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 1901 gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 1902 gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 1903 1904 memcpy(&gpu_metrics->current_coreclk[0], 1905 &metrics.Current.CoreFrequency[0], 1906 sizeof(uint16_t) * 4); 1907 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1908 1909 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1910 gpu_metrics->indep_throttle_status = 1911 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, 1912 vangogh_throttler_map); 1913 1914 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1915 1916 *table = (void *)gpu_metrics; 1917 1918 return sizeof(struct gpu_metrics_v2_2); 1919 } 1920 1921 static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, 1922 void **table) 1923 { 1924 uint32_t if_version; 1925 uint32_t smu_version; 1926 int ret = 0; 1927 1928 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 1929 if (ret) { 1930 return ret; 1931 } 1932 1933 if (smu_version >= 0x043F3E00) { 1934 if (if_version < 0x3) 1935 ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table); 1936 else 1937 ret = vangogh_get_gpu_metrics_v2_3(smu, table); 1938 } else { 1939 if (if_version < 0x3) 1940 ret = vangogh_get_legacy_gpu_metrics(smu, table); 1941 else 1942 ret = vangogh_get_gpu_metrics(smu, table); 1943 } 1944 1945 return ret; 1946 } 1947 1948 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, 1949 long input[], uint32_t size) 1950 { 1951 int ret = 0; 1952 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1953 1954 if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) { 1955 dev_warn(smu->adev->dev, 1956 "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n"); 1957 return -EINVAL; 1958 } 1959 1960 switch (type) { 1961 case PP_OD_EDIT_CCLK_VDDC_TABLE: 1962 if (size != 3) { 1963 dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n"); 1964 return -EINVAL; 1965 } 1966 if (input[0] >= smu->cpu_core_num) { 1967 dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n", 1968 smu->cpu_core_num); 1969 } 1970 smu->cpu_core_id_select = input[0]; 1971 if (input[1] == 0) { 1972 if (input[2] < smu->cpu_default_soft_min_freq) { 1973 dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 1974 input[2], smu->cpu_default_soft_min_freq); 1975 return -EINVAL; 1976 } 1977 smu->cpu_actual_soft_min_freq = input[2]; 1978 } else if (input[1] == 1) { 1979 if (input[2] > smu->cpu_default_soft_max_freq) { 1980 dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 1981 input[2], smu->cpu_default_soft_max_freq); 1982 return -EINVAL; 1983 } 1984 smu->cpu_actual_soft_max_freq = input[2]; 1985 } else { 1986 return -EINVAL; 1987 } 1988 break; 1989 case PP_OD_EDIT_SCLK_VDDC_TABLE: 1990 if (size != 2) { 1991 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1992 return -EINVAL; 1993 } 1994 1995 if (input[0] == 0) { 1996 if (input[1] < smu->gfx_default_hard_min_freq) { 1997 dev_warn(smu->adev->dev, 1998 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 1999 input[1], smu->gfx_default_hard_min_freq); 2000 return -EINVAL; 2001 } 2002 smu->gfx_actual_hard_min_freq = input[1]; 2003 } else if (input[0] == 1) { 2004 if (input[1] > smu->gfx_default_soft_max_freq) { 2005 dev_warn(smu->adev->dev, 2006 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 2007 input[1], smu->gfx_default_soft_max_freq); 2008 return -EINVAL; 2009 } 2010 smu->gfx_actual_soft_max_freq = input[1]; 2011 } else { 2012 return -EINVAL; 2013 } 2014 break; 2015 case PP_OD_RESTORE_DEFAULT_TABLE: 2016 if (size != 0) { 2017 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2018 return -EINVAL; 2019 } else { 2020 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 2021 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 2022 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; 2023 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; 2024 } 2025 break; 2026 case PP_OD_COMMIT_DPM_TABLE: 2027 if (size != 0) { 2028 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2029 return -EINVAL; 2030 } else { 2031 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 2032 dev_err(smu->adev->dev, 2033 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 2034 smu->gfx_actual_hard_min_freq, 2035 smu->gfx_actual_soft_max_freq); 2036 return -EINVAL; 2037 } 2038 2039 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 2040 smu->gfx_actual_hard_min_freq, NULL); 2041 if (ret) { 2042 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 2043 return ret; 2044 } 2045 2046 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 2047 smu->gfx_actual_soft_max_freq, NULL); 2048 if (ret) { 2049 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 2050 return ret; 2051 } 2052 2053 if (smu->adev->pm.fw_version < 0x43f1b00) { 2054 dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n"); 2055 break; 2056 } 2057 2058 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, 2059 ((smu->cpu_core_id_select << 20) 2060 | smu->cpu_actual_soft_min_freq), 2061 NULL); 2062 if (ret) { 2063 dev_err(smu->adev->dev, "Set hard min cclk failed!"); 2064 return ret; 2065 } 2066 2067 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, 2068 ((smu->cpu_core_id_select << 20) 2069 | smu->cpu_actual_soft_max_freq), 2070 NULL); 2071 if (ret) { 2072 dev_err(smu->adev->dev, "Set soft max cclk failed!"); 2073 return ret; 2074 } 2075 } 2076 break; 2077 default: 2078 return -ENOSYS; 2079 } 2080 2081 return ret; 2082 } 2083 2084 static int vangogh_set_default_dpm_tables(struct smu_context *smu) 2085 { 2086 struct smu_table_context *smu_table = &smu->smu_table; 2087 2088 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); 2089 } 2090 2091 static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 2092 { 2093 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 2094 2095 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 2096 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 2097 smu->gfx_actual_hard_min_freq = 0; 2098 smu->gfx_actual_soft_max_freq = 0; 2099 2100 smu->cpu_default_soft_min_freq = 1400; 2101 smu->cpu_default_soft_max_freq = 3500; 2102 smu->cpu_actual_soft_min_freq = 0; 2103 smu->cpu_actual_soft_max_freq = 0; 2104 2105 return 0; 2106 } 2107 2108 static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) 2109 { 2110 DpmClocks_t *table = smu->smu_table.clocks_table; 2111 int i; 2112 2113 if (!clock_table || !table) 2114 return -EINVAL; 2115 2116 for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) { 2117 clock_table->SocClocks[i].Freq = table->SocClocks[i]; 2118 clock_table->SocClocks[i].Vol = table->SocVoltage[i]; 2119 } 2120 2121 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 2122 clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk; 2123 clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage; 2124 } 2125 2126 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 2127 clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk; 2128 clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage; 2129 } 2130 2131 return 0; 2132 } 2133 2134 2135 static int vangogh_system_features_control(struct smu_context *smu, bool en) 2136 { 2137 struct amdgpu_device *adev = smu->adev; 2138 int ret = 0; 2139 2140 if (adev->pm.fw_version >= 0x43f1700 && !en) 2141 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify, 2142 RLC_STATUS_OFF, NULL); 2143 2144 return ret; 2145 } 2146 2147 static int vangogh_post_smu_init(struct smu_context *smu) 2148 { 2149 struct amdgpu_device *adev = smu->adev; 2150 uint32_t tmp; 2151 int ret = 0; 2152 uint8_t aon_bits = 0; 2153 /* Two CUs in one WGP */ 2154 uint32_t req_active_wgps = adev->gfx.cu_info.number/2; 2155 uint32_t total_cu = adev->gfx.config.max_cu_per_sh * 2156 adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; 2157 2158 /* allow message will be sent after enable message on Vangogh*/ 2159 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && 2160 (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 2161 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL); 2162 if (ret) { 2163 dev_err(adev->dev, "Failed to Enable GfxOff!\n"); 2164 return ret; 2165 } 2166 } else { 2167 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 2168 dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n"); 2169 } 2170 2171 /* if all CUs are active, no need to power off any WGPs */ 2172 if (total_cu == adev->gfx.cu_info.number) 2173 return 0; 2174 2175 /* 2176 * Calculate the total bits number of always on WGPs for all SA/SEs in 2177 * RLC_PG_ALWAYS_ON_WGP_MASK. 2178 */ 2179 tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK)); 2180 tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK; 2181 2182 aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; 2183 2184 /* Do not request any WGPs less than set in the AON_WGP_MASK */ 2185 if (aon_bits > req_active_wgps) { 2186 dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n"); 2187 return 0; 2188 } else { 2189 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL); 2190 } 2191 } 2192 2193 static int vangogh_mode_reset(struct smu_context *smu, int type) 2194 { 2195 int ret = 0, index = 0; 2196 2197 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, 2198 SMU_MSG_GfxDeviceDriverReset); 2199 if (index < 0) 2200 return index == -EACCES ? 0 : index; 2201 2202 mutex_lock(&smu->message_lock); 2203 2204 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type); 2205 2206 mutex_unlock(&smu->message_lock); 2207 2208 mdelay(10); 2209 2210 return ret; 2211 } 2212 2213 static int vangogh_mode2_reset(struct smu_context *smu) 2214 { 2215 return vangogh_mode_reset(smu, SMU_RESET_MODE_2); 2216 } 2217 2218 /** 2219 * vangogh_get_gfxoff_status - Get gfxoff status 2220 * 2221 * @smu: amdgpu_device pointer 2222 * 2223 * Get current gfxoff status 2224 * 2225 * Return: 2226 * * 0 - GFXOFF (default if enabled). 2227 * * 1 - Transition out of GFX State. 2228 * * 2 - Not in GFXOFF. 2229 * * 3 - Transition into GFXOFF. 2230 */ 2231 static u32 vangogh_get_gfxoff_status(struct smu_context *smu) 2232 { 2233 struct amdgpu_device *adev = smu->adev; 2234 u32 reg, gfxoff_status; 2235 2236 reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL); 2237 gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK) 2238 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT; 2239 2240 return gfxoff_status; 2241 } 2242 2243 static int vangogh_get_power_limit(struct smu_context *smu, 2244 uint32_t *current_power_limit, 2245 uint32_t *default_power_limit, 2246 uint32_t *max_power_limit) 2247 { 2248 struct smu_11_5_power_context *power_context = 2249 smu->smu_power.power_context; 2250 uint32_t ppt_limit; 2251 int ret = 0; 2252 2253 if (smu->adev->pm.fw_version < 0x43f1e00) 2254 return ret; 2255 2256 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit); 2257 if (ret) { 2258 dev_err(smu->adev->dev, "Get slow PPT limit failed!\n"); 2259 return ret; 2260 } 2261 /* convert from milliwatt to watt */ 2262 if (current_power_limit) 2263 *current_power_limit = ppt_limit / 1000; 2264 if (default_power_limit) 2265 *default_power_limit = ppt_limit / 1000; 2266 if (max_power_limit) 2267 *max_power_limit = 29; 2268 2269 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit); 2270 if (ret) { 2271 dev_err(smu->adev->dev, "Get fast PPT limit failed!\n"); 2272 return ret; 2273 } 2274 /* convert from milliwatt to watt */ 2275 power_context->current_fast_ppt_limit = 2276 power_context->default_fast_ppt_limit = ppt_limit / 1000; 2277 power_context->max_fast_ppt_limit = 30; 2278 2279 return ret; 2280 } 2281 2282 static int vangogh_get_ppt_limit(struct smu_context *smu, 2283 uint32_t *ppt_limit, 2284 enum smu_ppt_limit_type type, 2285 enum smu_ppt_limit_level level) 2286 { 2287 struct smu_11_5_power_context *power_context = 2288 smu->smu_power.power_context; 2289 2290 if (!power_context) 2291 return -EOPNOTSUPP; 2292 2293 if (type == SMU_FAST_PPT_LIMIT) { 2294 switch (level) { 2295 case SMU_PPT_LIMIT_MAX: 2296 *ppt_limit = power_context->max_fast_ppt_limit; 2297 break; 2298 case SMU_PPT_LIMIT_CURRENT: 2299 *ppt_limit = power_context->current_fast_ppt_limit; 2300 break; 2301 case SMU_PPT_LIMIT_DEFAULT: 2302 *ppt_limit = power_context->default_fast_ppt_limit; 2303 break; 2304 default: 2305 break; 2306 } 2307 } 2308 2309 return 0; 2310 } 2311 2312 static int vangogh_set_power_limit(struct smu_context *smu, 2313 enum smu_ppt_limit_type limit_type, 2314 uint32_t ppt_limit) 2315 { 2316 struct smu_11_5_power_context *power_context = 2317 smu->smu_power.power_context; 2318 int ret = 0; 2319 2320 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 2321 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 2322 return -EOPNOTSUPP; 2323 } 2324 2325 switch (limit_type) { 2326 case SMU_DEFAULT_PPT_LIMIT: 2327 ret = smu_cmn_send_smc_msg_with_param(smu, 2328 SMU_MSG_SetSlowPPTLimit, 2329 ppt_limit * 1000, /* convert from watt to milliwatt */ 2330 NULL); 2331 if (ret) 2332 return ret; 2333 2334 smu->current_power_limit = ppt_limit; 2335 break; 2336 case SMU_FAST_PPT_LIMIT: 2337 ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24); 2338 if (ppt_limit > power_context->max_fast_ppt_limit) { 2339 dev_err(smu->adev->dev, 2340 "New power limit (%d) is over the max allowed %d\n", 2341 ppt_limit, power_context->max_fast_ppt_limit); 2342 return ret; 2343 } 2344 2345 ret = smu_cmn_send_smc_msg_with_param(smu, 2346 SMU_MSG_SetFastPPTLimit, 2347 ppt_limit * 1000, /* convert from watt to milliwatt */ 2348 NULL); 2349 if (ret) 2350 return ret; 2351 2352 power_context->current_fast_ppt_limit = ppt_limit; 2353 break; 2354 default: 2355 return -EINVAL; 2356 } 2357 2358 return ret; 2359 } 2360 2361 /** 2362 * vangogh_set_gfxoff_residency 2363 * 2364 * @smu: amdgpu_device pointer 2365 * @start: start/stop residency log 2366 * 2367 * This function will be used to log gfxoff residency 2368 * 2369 * 2370 * Returns standard response codes. 2371 */ 2372 static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start) 2373 { 2374 int ret = 0; 2375 u32 residency; 2376 struct amdgpu_device *adev = smu->adev; 2377 2378 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 2379 return 0; 2380 2381 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency, 2382 start, &residency); 2383 2384 if (!start) 2385 adev->gfx.gfx_off_residency = residency; 2386 2387 return ret; 2388 } 2389 2390 /** 2391 * vangogh_get_gfxoff_residency 2392 * 2393 * @smu: amdgpu_device pointer 2394 * @residency: placeholder for return value 2395 * 2396 * This function will be used to get gfxoff residency. 2397 * 2398 * Returns standard response codes. 2399 */ 2400 static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *residency) 2401 { 2402 struct amdgpu_device *adev = smu->adev; 2403 2404 *residency = adev->gfx.gfx_off_residency; 2405 2406 return 0; 2407 } 2408 2409 /** 2410 * vangogh_get_gfxoff_entrycount - get gfxoff entry count 2411 * 2412 * @smu: amdgpu_device pointer 2413 * @entrycount: placeholder for return value 2414 * 2415 * This function will be used to get gfxoff entry count 2416 * 2417 * Returns standard response codes. 2418 */ 2419 static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entrycount) 2420 { 2421 int ret = 0, value = 0; 2422 struct amdgpu_device *adev = smu->adev; 2423 2424 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 2425 return 0; 2426 2427 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetGfxOffEntryCount, &value); 2428 *entrycount = value + adev->gfx.gfx_off_entrycount; 2429 2430 return ret; 2431 } 2432 2433 static const struct pptable_funcs vangogh_ppt_funcs = { 2434 2435 .check_fw_status = smu_v11_0_check_fw_status, 2436 .check_fw_version = smu_v11_0_check_fw_version, 2437 .init_smc_tables = vangogh_init_smc_tables, 2438 .fini_smc_tables = smu_v11_0_fini_smc_tables, 2439 .init_power = smu_v11_0_init_power, 2440 .fini_power = smu_v11_0_fini_power, 2441 .register_irq_handler = smu_v11_0_register_irq_handler, 2442 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, 2443 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 2444 .send_smc_msg = smu_cmn_send_smc_msg, 2445 .dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable, 2446 .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable, 2447 .is_dpm_running = vangogh_is_dpm_running, 2448 .read_sensor = vangogh_read_sensor, 2449 .get_apu_thermal_limit = vangogh_get_apu_thermal_limit, 2450 .set_apu_thermal_limit = vangogh_set_apu_thermal_limit, 2451 .get_enabled_mask = smu_cmn_get_enabled_mask, 2452 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 2453 .set_watermarks_table = vangogh_set_watermarks_table, 2454 .set_driver_table_location = smu_v11_0_set_driver_table_location, 2455 .interrupt_work = smu_v11_0_interrupt_work, 2456 .get_gpu_metrics = vangogh_common_get_gpu_metrics, 2457 .od_edit_dpm_table = vangogh_od_edit_dpm_table, 2458 .print_clk_levels = vangogh_common_print_clk_levels, 2459 .set_default_dpm_table = vangogh_set_default_dpm_tables, 2460 .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters, 2461 .system_features_control = vangogh_system_features_control, 2462 .feature_is_enabled = smu_cmn_feature_is_enabled, 2463 .set_power_profile_mode = vangogh_set_power_profile_mode, 2464 .get_power_profile_mode = vangogh_get_power_profile_mode, 2465 .get_dpm_clock_table = vangogh_get_dpm_clock_table, 2466 .force_clk_levels = vangogh_force_clk_levels, 2467 .set_performance_level = vangogh_set_performance_level, 2468 .post_init = vangogh_post_smu_init, 2469 .mode2_reset = vangogh_mode2_reset, 2470 .gfx_off_control = smu_v11_0_gfx_off_control, 2471 .get_gfx_off_status = vangogh_get_gfxoff_status, 2472 .get_gfx_off_entrycount = vangogh_get_gfxoff_entrycount, 2473 .get_gfx_off_residency = vangogh_get_gfxoff_residency, 2474 .set_gfx_off_residency = vangogh_set_gfxoff_residency, 2475 .get_ppt_limit = vangogh_get_ppt_limit, 2476 .get_power_limit = vangogh_get_power_limit, 2477 .set_power_limit = vangogh_set_power_limit, 2478 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, 2479 }; 2480 2481 void vangogh_set_ppt_funcs(struct smu_context *smu) 2482 { 2483 smu->ppt_funcs = &vangogh_ppt_funcs; 2484 smu->message_map = vangogh_message_map; 2485 smu->feature_map = vangogh_feature_mask_map; 2486 smu->table_map = vangogh_table_map; 2487 smu->workload_map = vangogh_workload_map; 2488 smu->is_apu = true; 2489 smu_v11_0_set_smu_mailbox_registers(smu); 2490 } 2491