1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include "amdgpu.h" 27 #include "amdgpu_smu.h" 28 #include "smu_v11_0.h" 29 #include "smu11_driver_if_vangogh.h" 30 #include "vangogh_ppt.h" 31 #include "smu_v11_5_ppsmc.h" 32 #include "smu_v11_5_pmfw.h" 33 #include "smu_cmn.h" 34 #include "soc15_common.h" 35 #include "asic_reg/gc/gc_10_3_0_offset.h" 36 #include "asic_reg/gc/gc_10_3_0_sh_mask.h" 37 #include <asm/processor.h> 38 39 /* 40 * DO NOT use these for err/warn/info/debug messages. 41 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 42 * They are more MGPU friendly. 43 */ 44 #undef pr_err 45 #undef pr_warn 46 #undef pr_info 47 #undef pr_debug 48 49 // Registers related to GFXOFF 50 // addressBlock: smuio_smuio_SmuSmuioDec 51 // base address: 0x5a000 52 #define mmSMUIO_GFX_MISC_CNTL 0x00c5 53 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0 54 55 //SMUIO_GFX_MISC_CNTL 56 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0 57 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 58 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L 59 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L 60 61 #define FEATURE_MASK(feature) (1ULL << feature) 62 #define SMC_DPM_FEATURE ( \ 63 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 64 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 65 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 66 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 67 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 68 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 69 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 70 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 71 FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 72 73 static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = { 74 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), 75 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0), 76 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0), 77 MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0), 78 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 79 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 80 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0), 81 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0), 82 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 83 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 84 MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0), 85 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0), 86 MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0), 87 MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0), 88 MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 0), 89 MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 0), 90 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0), 91 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0), 92 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), 93 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 94 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 0), 95 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0), 96 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 0), 97 MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0), 98 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 0), 99 MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 0), 100 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 0), 101 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 0), 102 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0), 103 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 0), 104 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 0), 105 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 0), 106 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 0), 107 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 0), 108 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 109 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 110 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 0), 111 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 0), 112 MSG_MAP(PowerUpCvip, PPSMC_MSG_PowerUpCvip, 0), 113 MSG_MAP(PowerDownCvip, PPSMC_MSG_PowerDownCvip, 0), 114 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 115 MSG_MAP(GetThermalLimit, PPSMC_MSG_GetThermalLimit, 0), 116 MSG_MAP(GetCurrentTemperature, PPSMC_MSG_GetCurrentTemperature, 0), 117 MSG_MAP(GetCurrentPower, PPSMC_MSG_GetCurrentPower, 0), 118 MSG_MAP(GetCurrentVoltage, PPSMC_MSG_GetCurrentVoltage, 0), 119 MSG_MAP(GetCurrentCurrent, PPSMC_MSG_GetCurrentCurrent, 0), 120 MSG_MAP(GetAverageCpuActivity, PPSMC_MSG_GetAverageCpuActivity, 0), 121 MSG_MAP(GetAverageGfxActivity, PPSMC_MSG_GetAverageGfxActivity, 0), 122 MSG_MAP(GetAveragePower, PPSMC_MSG_GetAveragePower, 0), 123 MSG_MAP(GetAverageTemperature, PPSMC_MSG_GetAverageTemperature, 0), 124 MSG_MAP(SetAveragePowerTimeConstant, PPSMC_MSG_SetAveragePowerTimeConstant, 0), 125 MSG_MAP(SetAverageActivityTimeConstant, PPSMC_MSG_SetAverageActivityTimeConstant, 0), 126 MSG_MAP(SetAverageTemperatureTimeConstant, PPSMC_MSG_SetAverageTemperatureTimeConstant, 0), 127 MSG_MAP(SetMitigationEndHysteresis, PPSMC_MSG_SetMitigationEndHysteresis, 0), 128 MSG_MAP(GetCurrentFreq, PPSMC_MSG_GetCurrentFreq, 0), 129 MSG_MAP(SetReducedPptLimit, PPSMC_MSG_SetReducedPptLimit, 0), 130 MSG_MAP(SetReducedThermalLimit, PPSMC_MSG_SetReducedThermalLimit, 0), 131 MSG_MAP(DramLogSetDramAddr, PPSMC_MSG_DramLogSetDramAddr, 0), 132 MSG_MAP(StartDramLogging, PPSMC_MSG_StartDramLogging, 0), 133 MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0), 134 MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0), 135 MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0), 136 MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0), 137 MSG_MAP(SetFastPPTLimit, PPSMC_MSG_SetFastPPTLimit, 0), 138 MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0), 139 MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0), 140 MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0), 141 MSG_MAP(GetGfxOffStatus, PPSMC_MSG_GetGfxOffStatus, 0), 142 MSG_MAP(GetGfxOffEntryCount, PPSMC_MSG_GetGfxOffEntryCount, 0), 143 MSG_MAP(LogGfxOffResidency, PPSMC_MSG_LogGfxOffResidency, 0), 144 }; 145 146 static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = { 147 FEA_MAP(PPT), 148 FEA_MAP(TDC), 149 FEA_MAP(THERMAL), 150 FEA_MAP(DS_GFXCLK), 151 FEA_MAP(DS_SOCCLK), 152 FEA_MAP(DS_LCLK), 153 FEA_MAP(DS_FCLK), 154 FEA_MAP(DS_MP1CLK), 155 FEA_MAP(DS_MP0CLK), 156 FEA_MAP(ATHUB_PG), 157 FEA_MAP(CCLK_DPM), 158 FEA_MAP(FAN_CONTROLLER), 159 FEA_MAP(ULV), 160 FEA_MAP(VCN_DPM), 161 FEA_MAP(LCLK_DPM), 162 FEA_MAP(SHUBCLK_DPM), 163 FEA_MAP(DCFCLK_DPM), 164 FEA_MAP(DS_DCFCLK), 165 FEA_MAP(S0I2), 166 FEA_MAP(SMU_LOW_POWER), 167 FEA_MAP(GFX_DEM), 168 FEA_MAP(PSI), 169 FEA_MAP(PROCHOT), 170 FEA_MAP(CPUOFF), 171 FEA_MAP(STAPM), 172 FEA_MAP(S0I3), 173 FEA_MAP(DF_CSTATES), 174 FEA_MAP(PERF_LIMIT), 175 FEA_MAP(CORE_DLDO), 176 FEA_MAP(RSMU_LOW_POWER), 177 FEA_MAP(SMN_LOW_POWER), 178 FEA_MAP(THM_LOW_POWER), 179 FEA_MAP(SMUIO_LOW_POWER), 180 FEA_MAP(MP1_LOW_POWER), 181 FEA_MAP(DS_VCN), 182 FEA_MAP(CPPC), 183 FEA_MAP(OS_CSTATES), 184 FEA_MAP(ISP_DPM), 185 FEA_MAP(A55_DPM), 186 FEA_MAP(CVIP_DSP_DPM), 187 FEA_MAP(MSMU_LOW_POWER), 188 FEA_MAP_REVERSE(SOCCLK), 189 FEA_MAP_REVERSE(FCLK), 190 FEA_MAP_HALF_REVERSE(GFX), 191 }; 192 193 static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = { 194 TAB_MAP_VALID(WATERMARKS), 195 TAB_MAP_VALID(SMU_METRICS), 196 TAB_MAP_VALID(CUSTOM_DPM), 197 TAB_MAP_VALID(DPMCLOCKS), 198 }; 199 200 static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 201 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 202 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 203 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 204 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 205 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 206 }; 207 208 static const uint8_t vangogh_throttler_map[] = { 209 [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT), 210 [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT), 211 [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT), 212 [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT), 213 [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT), 214 [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT), 215 [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT), 216 [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT), 217 [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT), 218 [THROTTLER_STATUS_BIT_TDC_GFX] = (SMU_THROTTLER_TDC_GFX_BIT), 219 [THROTTLER_STATUS_BIT_TDC_CVIP] = (SMU_THROTTLER_TDC_CVIP_BIT), 220 }; 221 222 static int vangogh_tables_init(struct smu_context *smu) 223 { 224 struct smu_table_context *smu_table = &smu->smu_table; 225 struct smu_table *tables = smu_table->tables; 226 uint32_t if_version; 227 uint32_t smu_version; 228 uint32_t ret = 0; 229 230 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 231 if (ret) { 232 return ret; 233 } 234 235 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 236 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 237 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 238 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 239 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, 240 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 241 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), 242 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 243 244 if (if_version < 0x3) { 245 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t), 246 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 247 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL); 248 } else { 249 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 250 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 251 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 252 } 253 if (!smu_table->metrics_table) 254 goto err0_out; 255 smu_table->metrics_time = 0; 256 257 if (smu_version >= 0x043F3E00) 258 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_3); 259 else 260 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); 261 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 262 if (!smu_table->gpu_metrics_table) 263 goto err1_out; 264 265 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 266 if (!smu_table->watermarks_table) 267 goto err2_out; 268 269 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 270 if (!smu_table->clocks_table) 271 goto err3_out; 272 273 return 0; 274 275 err3_out: 276 kfree(smu_table->watermarks_table); 277 err2_out: 278 kfree(smu_table->gpu_metrics_table); 279 err1_out: 280 kfree(smu_table->metrics_table); 281 err0_out: 282 return -ENOMEM; 283 } 284 285 static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, 286 MetricsMember_t member, 287 uint32_t *value) 288 { 289 struct smu_table_context *smu_table = &smu->smu_table; 290 SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; 291 int ret = 0; 292 293 ret = smu_cmn_get_metrics_table(smu, 294 NULL, 295 false); 296 if (ret) 297 return ret; 298 299 switch (member) { 300 case METRICS_CURR_GFXCLK: 301 *value = metrics->GfxclkFrequency; 302 break; 303 case METRICS_AVERAGE_SOCCLK: 304 *value = metrics->SocclkFrequency; 305 break; 306 case METRICS_AVERAGE_VCLK: 307 *value = metrics->VclkFrequency; 308 break; 309 case METRICS_AVERAGE_DCLK: 310 *value = metrics->DclkFrequency; 311 break; 312 case METRICS_CURR_UCLK: 313 *value = metrics->MemclkFrequency; 314 break; 315 case METRICS_AVERAGE_GFXACTIVITY: 316 *value = metrics->GfxActivity / 100; 317 break; 318 case METRICS_AVERAGE_VCNACTIVITY: 319 *value = metrics->UvdActivity; 320 break; 321 case METRICS_AVERAGE_SOCKETPOWER: 322 *value = (metrics->CurrentSocketPower << 8) / 323 1000 ; 324 break; 325 case METRICS_TEMPERATURE_EDGE: 326 *value = metrics->GfxTemperature / 100 * 327 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 328 break; 329 case METRICS_TEMPERATURE_HOTSPOT: 330 *value = metrics->SocTemperature / 100 * 331 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 332 break; 333 case METRICS_THROTTLER_STATUS: 334 *value = metrics->ThrottlerStatus; 335 break; 336 case METRICS_VOLTAGE_VDDGFX: 337 *value = metrics->Voltage[2]; 338 break; 339 case METRICS_VOLTAGE_VDDSOC: 340 *value = metrics->Voltage[1]; 341 break; 342 case METRICS_AVERAGE_CPUCLK: 343 memcpy(value, &metrics->CoreFrequency[0], 344 smu->cpu_core_num * sizeof(uint16_t)); 345 break; 346 default: 347 *value = UINT_MAX; 348 break; 349 } 350 351 return ret; 352 } 353 354 static int vangogh_get_smu_metrics_data(struct smu_context *smu, 355 MetricsMember_t member, 356 uint32_t *value) 357 { 358 struct smu_table_context *smu_table = &smu->smu_table; 359 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 360 int ret = 0; 361 362 ret = smu_cmn_get_metrics_table(smu, 363 NULL, 364 false); 365 if (ret) 366 return ret; 367 368 switch (member) { 369 case METRICS_CURR_GFXCLK: 370 *value = metrics->Current.GfxclkFrequency; 371 break; 372 case METRICS_AVERAGE_SOCCLK: 373 *value = metrics->Current.SocclkFrequency; 374 break; 375 case METRICS_AVERAGE_VCLK: 376 *value = metrics->Current.VclkFrequency; 377 break; 378 case METRICS_AVERAGE_DCLK: 379 *value = metrics->Current.DclkFrequency; 380 break; 381 case METRICS_CURR_UCLK: 382 *value = metrics->Current.MemclkFrequency; 383 break; 384 case METRICS_AVERAGE_GFXACTIVITY: 385 *value = metrics->Current.GfxActivity; 386 break; 387 case METRICS_AVERAGE_VCNACTIVITY: 388 *value = metrics->Current.UvdActivity; 389 break; 390 case METRICS_AVERAGE_SOCKETPOWER: 391 *value = (metrics->Current.CurrentSocketPower << 8) / 392 1000; 393 break; 394 case METRICS_TEMPERATURE_EDGE: 395 *value = metrics->Current.GfxTemperature / 100 * 396 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 397 break; 398 case METRICS_TEMPERATURE_HOTSPOT: 399 *value = metrics->Current.SocTemperature / 100 * 400 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 401 break; 402 case METRICS_THROTTLER_STATUS: 403 *value = metrics->Current.ThrottlerStatus; 404 break; 405 case METRICS_VOLTAGE_VDDGFX: 406 *value = metrics->Current.Voltage[2]; 407 break; 408 case METRICS_VOLTAGE_VDDSOC: 409 *value = metrics->Current.Voltage[1]; 410 break; 411 case METRICS_AVERAGE_CPUCLK: 412 memcpy(value, &metrics->Current.CoreFrequency[0], 413 smu->cpu_core_num * sizeof(uint16_t)); 414 break; 415 default: 416 *value = UINT_MAX; 417 break; 418 } 419 420 return ret; 421 } 422 423 static int vangogh_common_get_smu_metrics_data(struct smu_context *smu, 424 MetricsMember_t member, 425 uint32_t *value) 426 { 427 struct amdgpu_device *adev = smu->adev; 428 uint32_t if_version; 429 int ret = 0; 430 431 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 432 if (ret) { 433 dev_err(adev->dev, "Failed to get smu if version!\n"); 434 return ret; 435 } 436 437 if (if_version < 0x3) 438 ret = vangogh_get_legacy_smu_metrics_data(smu, member, value); 439 else 440 ret = vangogh_get_smu_metrics_data(smu, member, value); 441 442 return ret; 443 } 444 445 static int vangogh_allocate_dpm_context(struct smu_context *smu) 446 { 447 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 448 449 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context), 450 GFP_KERNEL); 451 if (!smu_dpm->dpm_context) 452 return -ENOMEM; 453 454 smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); 455 456 return 0; 457 } 458 459 static int vangogh_init_smc_tables(struct smu_context *smu) 460 { 461 int ret = 0; 462 463 ret = vangogh_tables_init(smu); 464 if (ret) 465 return ret; 466 467 ret = vangogh_allocate_dpm_context(smu); 468 if (ret) 469 return ret; 470 471 #ifdef CONFIG_X86 472 /* AMD x86 APU only */ 473 smu->cpu_core_num = boot_cpu_data.x86_max_cores; 474 #else 475 smu->cpu_core_num = 4; 476 #endif 477 478 return smu_v11_0_init_smc_tables(smu); 479 } 480 481 static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable) 482 { 483 int ret = 0; 484 485 if (enable) { 486 /* vcn dpm on is a prerequisite for vcn power gate messages */ 487 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); 488 if (ret) 489 return ret; 490 } else { 491 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); 492 if (ret) 493 return ret; 494 } 495 496 return ret; 497 } 498 499 static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 500 { 501 int ret = 0; 502 503 if (enable) { 504 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); 505 if (ret) 506 return ret; 507 } else { 508 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); 509 if (ret) 510 return ret; 511 } 512 513 return ret; 514 } 515 516 static bool vangogh_is_dpm_running(struct smu_context *smu) 517 { 518 struct amdgpu_device *adev = smu->adev; 519 int ret = 0; 520 uint64_t feature_enabled; 521 522 /* we need to re-init after suspend so return false */ 523 if (adev->in_suspend) 524 return false; 525 526 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 527 528 if (ret) 529 return false; 530 531 return !!(feature_enabled & SMC_DPM_FEATURE); 532 } 533 534 static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, 535 uint32_t dpm_level, uint32_t *freq) 536 { 537 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 538 539 if (!clk_table || clk_type >= SMU_CLK_COUNT) 540 return -EINVAL; 541 542 switch (clk_type) { 543 case SMU_SOCCLK: 544 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 545 return -EINVAL; 546 *freq = clk_table->SocClocks[dpm_level]; 547 break; 548 case SMU_VCLK: 549 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 550 return -EINVAL; 551 *freq = clk_table->VcnClocks[dpm_level].vclk; 552 break; 553 case SMU_DCLK: 554 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 555 return -EINVAL; 556 *freq = clk_table->VcnClocks[dpm_level].dclk; 557 break; 558 case SMU_UCLK: 559 case SMU_MCLK: 560 if (dpm_level >= clk_table->NumDfPstatesEnabled) 561 return -EINVAL; 562 *freq = clk_table->DfPstateTable[dpm_level].memclk; 563 564 break; 565 case SMU_FCLK: 566 if (dpm_level >= clk_table->NumDfPstatesEnabled) 567 return -EINVAL; 568 *freq = clk_table->DfPstateTable[dpm_level].fclk; 569 break; 570 default: 571 return -EINVAL; 572 } 573 574 return 0; 575 } 576 577 static int vangogh_print_legacy_clk_levels(struct smu_context *smu, 578 enum smu_clk_type clk_type, char *buf) 579 { 580 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 581 SmuMetrics_legacy_t metrics; 582 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 583 int i, size = 0, ret = 0; 584 uint32_t cur_value = 0, value = 0, count = 0; 585 bool cur_value_match_level = false; 586 587 memset(&metrics, 0, sizeof(metrics)); 588 589 ret = smu_cmn_get_metrics_table(smu, &metrics, false); 590 if (ret) 591 return ret; 592 593 smu_cmn_get_sysfs_buf(&buf, &size); 594 595 switch (clk_type) { 596 case SMU_OD_SCLK: 597 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 598 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 599 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 600 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 601 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 602 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 603 } 604 break; 605 case SMU_OD_CCLK: 606 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 607 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 608 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 609 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 610 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 611 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); 612 } 613 break; 614 case SMU_OD_RANGE: 615 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 616 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 617 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 618 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 619 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", 620 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); 621 } 622 break; 623 case SMU_SOCCLK: 624 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ 625 count = clk_table->NumSocClkLevelsEnabled; 626 cur_value = metrics.SocclkFrequency; 627 break; 628 case SMU_VCLK: 629 count = clk_table->VcnClkLevelsEnabled; 630 cur_value = metrics.VclkFrequency; 631 break; 632 case SMU_DCLK: 633 count = clk_table->VcnClkLevelsEnabled; 634 cur_value = metrics.DclkFrequency; 635 break; 636 case SMU_MCLK: 637 count = clk_table->NumDfPstatesEnabled; 638 cur_value = metrics.MemclkFrequency; 639 break; 640 case SMU_FCLK: 641 count = clk_table->NumDfPstatesEnabled; 642 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); 643 if (ret) 644 return ret; 645 break; 646 default: 647 break; 648 } 649 650 switch (clk_type) { 651 case SMU_SOCCLK: 652 case SMU_VCLK: 653 case SMU_DCLK: 654 case SMU_MCLK: 655 case SMU_FCLK: 656 for (i = 0; i < count; i++) { 657 ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); 658 if (ret) 659 return ret; 660 if (!value) 661 continue; 662 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 663 cur_value == value ? "*" : ""); 664 if (cur_value == value) 665 cur_value_match_level = true; 666 } 667 668 if (!cur_value_match_level) 669 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); 670 break; 671 default: 672 break; 673 } 674 675 return size; 676 } 677 678 static int vangogh_print_clk_levels(struct smu_context *smu, 679 enum smu_clk_type clk_type, char *buf) 680 { 681 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 682 SmuMetrics_t metrics; 683 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 684 int i, size = 0, ret = 0; 685 uint32_t cur_value = 0, value = 0, count = 0; 686 bool cur_value_match_level = false; 687 uint32_t min, max; 688 689 memset(&metrics, 0, sizeof(metrics)); 690 691 ret = smu_cmn_get_metrics_table(smu, &metrics, false); 692 if (ret) 693 return ret; 694 695 smu_cmn_get_sysfs_buf(&buf, &size); 696 697 switch (clk_type) { 698 case SMU_OD_SCLK: 699 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 700 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 701 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 702 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 703 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 704 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 705 } 706 break; 707 case SMU_OD_CCLK: 708 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 709 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 710 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 711 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 712 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 713 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); 714 } 715 break; 716 case SMU_OD_RANGE: 717 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 718 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 719 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 720 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 721 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", 722 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); 723 } 724 break; 725 case SMU_SOCCLK: 726 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ 727 count = clk_table->NumSocClkLevelsEnabled; 728 cur_value = metrics.Current.SocclkFrequency; 729 break; 730 case SMU_VCLK: 731 count = clk_table->VcnClkLevelsEnabled; 732 cur_value = metrics.Current.VclkFrequency; 733 break; 734 case SMU_DCLK: 735 count = clk_table->VcnClkLevelsEnabled; 736 cur_value = metrics.Current.DclkFrequency; 737 break; 738 case SMU_MCLK: 739 count = clk_table->NumDfPstatesEnabled; 740 cur_value = metrics.Current.MemclkFrequency; 741 break; 742 case SMU_FCLK: 743 count = clk_table->NumDfPstatesEnabled; 744 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); 745 if (ret) 746 return ret; 747 break; 748 case SMU_GFXCLK: 749 case SMU_SCLK: 750 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value); 751 if (ret) { 752 return ret; 753 } 754 break; 755 default: 756 break; 757 } 758 759 switch (clk_type) { 760 case SMU_SOCCLK: 761 case SMU_VCLK: 762 case SMU_DCLK: 763 case SMU_MCLK: 764 case SMU_FCLK: 765 for (i = 0; i < count; i++) { 766 ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); 767 if (ret) 768 return ret; 769 if (!value) 770 continue; 771 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 772 cur_value == value ? "*" : ""); 773 if (cur_value == value) 774 cur_value_match_level = true; 775 } 776 777 if (!cur_value_match_level) 778 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); 779 break; 780 case SMU_GFXCLK: 781 case SMU_SCLK: 782 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 783 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 784 if (cur_value == max) 785 i = 2; 786 else if (cur_value == min) 787 i = 0; 788 else 789 i = 1; 790 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 791 i == 0 ? "*" : ""); 792 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 793 i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, 794 i == 1 ? "*" : ""); 795 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 796 i == 2 ? "*" : ""); 797 break; 798 default: 799 break; 800 } 801 802 return size; 803 } 804 805 static int vangogh_common_print_clk_levels(struct smu_context *smu, 806 enum smu_clk_type clk_type, char *buf) 807 { 808 struct amdgpu_device *adev = smu->adev; 809 uint32_t if_version; 810 int ret = 0; 811 812 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 813 if (ret) { 814 dev_err(adev->dev, "Failed to get smu if version!\n"); 815 return ret; 816 } 817 818 if (if_version < 0x3) 819 ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf); 820 else 821 ret = vangogh_print_clk_levels(smu, clk_type, buf); 822 823 return ret; 824 } 825 826 static int vangogh_get_profiling_clk_mask(struct smu_context *smu, 827 enum amd_dpm_forced_level level, 828 uint32_t *vclk_mask, 829 uint32_t *dclk_mask, 830 uint32_t *mclk_mask, 831 uint32_t *fclk_mask, 832 uint32_t *soc_mask) 833 { 834 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 835 836 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 837 if (mclk_mask) 838 *mclk_mask = clk_table->NumDfPstatesEnabled - 1; 839 840 if (fclk_mask) 841 *fclk_mask = clk_table->NumDfPstatesEnabled - 1; 842 843 if (soc_mask) 844 *soc_mask = 0; 845 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 846 if (mclk_mask) 847 *mclk_mask = 0; 848 849 if (fclk_mask) 850 *fclk_mask = 0; 851 852 if (soc_mask) 853 *soc_mask = 1; 854 855 if (vclk_mask) 856 *vclk_mask = 1; 857 858 if (dclk_mask) 859 *dclk_mask = 1; 860 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) { 861 if (mclk_mask) 862 *mclk_mask = 0; 863 864 if (fclk_mask) 865 *fclk_mask = 0; 866 867 if (soc_mask) 868 *soc_mask = 1; 869 870 if (vclk_mask) 871 *vclk_mask = 1; 872 873 if (dclk_mask) 874 *dclk_mask = 1; 875 } 876 877 return 0; 878 } 879 880 static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu, 881 enum smu_clk_type clk_type) 882 { 883 enum smu_feature_mask feature_id = 0; 884 885 switch (clk_type) { 886 case SMU_MCLK: 887 case SMU_UCLK: 888 case SMU_FCLK: 889 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 890 break; 891 case SMU_GFXCLK: 892 case SMU_SCLK: 893 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 894 break; 895 case SMU_SOCCLK: 896 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 897 break; 898 case SMU_VCLK: 899 case SMU_DCLK: 900 feature_id = SMU_FEATURE_VCN_DPM_BIT; 901 break; 902 default: 903 return true; 904 } 905 906 if (!smu_cmn_feature_is_enabled(smu, feature_id)) 907 return false; 908 909 return true; 910 } 911 912 static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu, 913 enum smu_clk_type clk_type, 914 uint32_t *min, 915 uint32_t *max) 916 { 917 int ret = 0; 918 uint32_t soc_mask; 919 uint32_t vclk_mask; 920 uint32_t dclk_mask; 921 uint32_t mclk_mask; 922 uint32_t fclk_mask; 923 uint32_t clock_limit; 924 925 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) { 926 switch (clk_type) { 927 case SMU_MCLK: 928 case SMU_UCLK: 929 clock_limit = smu->smu_table.boot_values.uclk; 930 break; 931 case SMU_FCLK: 932 clock_limit = smu->smu_table.boot_values.fclk; 933 break; 934 case SMU_GFXCLK: 935 case SMU_SCLK: 936 clock_limit = smu->smu_table.boot_values.gfxclk; 937 break; 938 case SMU_SOCCLK: 939 clock_limit = smu->smu_table.boot_values.socclk; 940 break; 941 case SMU_VCLK: 942 clock_limit = smu->smu_table.boot_values.vclk; 943 break; 944 case SMU_DCLK: 945 clock_limit = smu->smu_table.boot_values.dclk; 946 break; 947 default: 948 clock_limit = 0; 949 break; 950 } 951 952 /* clock in Mhz unit */ 953 if (min) 954 *min = clock_limit / 100; 955 if (max) 956 *max = clock_limit / 100; 957 958 return 0; 959 } 960 if (max) { 961 ret = vangogh_get_profiling_clk_mask(smu, 962 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, 963 &vclk_mask, 964 &dclk_mask, 965 &mclk_mask, 966 &fclk_mask, 967 &soc_mask); 968 if (ret) 969 goto failed; 970 971 switch (clk_type) { 972 case SMU_UCLK: 973 case SMU_MCLK: 974 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); 975 if (ret) 976 goto failed; 977 break; 978 case SMU_SOCCLK: 979 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max); 980 if (ret) 981 goto failed; 982 break; 983 case SMU_FCLK: 984 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max); 985 if (ret) 986 goto failed; 987 break; 988 case SMU_VCLK: 989 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max); 990 if (ret) 991 goto failed; 992 break; 993 case SMU_DCLK: 994 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max); 995 if (ret) 996 goto failed; 997 break; 998 default: 999 ret = -EINVAL; 1000 goto failed; 1001 } 1002 } 1003 if (min) { 1004 switch (clk_type) { 1005 case SMU_UCLK: 1006 case SMU_MCLK: 1007 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min); 1008 if (ret) 1009 goto failed; 1010 break; 1011 case SMU_SOCCLK: 1012 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min); 1013 if (ret) 1014 goto failed; 1015 break; 1016 case SMU_FCLK: 1017 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min); 1018 if (ret) 1019 goto failed; 1020 break; 1021 case SMU_VCLK: 1022 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min); 1023 if (ret) 1024 goto failed; 1025 break; 1026 case SMU_DCLK: 1027 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min); 1028 if (ret) 1029 goto failed; 1030 break; 1031 default: 1032 ret = -EINVAL; 1033 goto failed; 1034 } 1035 } 1036 failed: 1037 return ret; 1038 } 1039 1040 static int vangogh_get_power_profile_mode(struct smu_context *smu, 1041 char *buf) 1042 { 1043 uint32_t i, size = 0; 1044 int16_t workload_type = 0; 1045 1046 if (!buf) 1047 return -EINVAL; 1048 1049 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { 1050 /* 1051 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT 1052 * Not all profile modes are supported on vangogh. 1053 */ 1054 workload_type = smu_cmn_to_asic_specific_index(smu, 1055 CMN2ASIC_MAPPING_WORKLOAD, 1056 i); 1057 1058 if (workload_type < 0) 1059 continue; 1060 1061 size += sysfs_emit_at(buf, size, "%2d %14s%s\n", 1062 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 1063 } 1064 1065 return size; 1066 } 1067 1068 static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) 1069 { 1070 int workload_type, ret; 1071 uint32_t profile_mode = input[size]; 1072 1073 if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { 1074 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); 1075 return -EINVAL; 1076 } 1077 1078 if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || 1079 profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) 1080 return 0; 1081 1082 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1083 workload_type = smu_cmn_to_asic_specific_index(smu, 1084 CMN2ASIC_MAPPING_WORKLOAD, 1085 profile_mode); 1086 if (workload_type < 0) { 1087 dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n", 1088 profile_mode); 1089 return -EINVAL; 1090 } 1091 1092 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, 1093 1 << workload_type, 1094 NULL); 1095 if (ret) { 1096 dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", 1097 workload_type); 1098 return ret; 1099 } 1100 1101 smu->power_profile_mode = profile_mode; 1102 1103 return 0; 1104 } 1105 1106 static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, 1107 enum smu_clk_type clk_type, 1108 uint32_t min, 1109 uint32_t max) 1110 { 1111 int ret = 0; 1112 1113 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) 1114 return 0; 1115 1116 switch (clk_type) { 1117 case SMU_GFXCLK: 1118 case SMU_SCLK: 1119 ret = smu_cmn_send_smc_msg_with_param(smu, 1120 SMU_MSG_SetHardMinGfxClk, 1121 min, NULL); 1122 if (ret) 1123 return ret; 1124 1125 ret = smu_cmn_send_smc_msg_with_param(smu, 1126 SMU_MSG_SetSoftMaxGfxClk, 1127 max, NULL); 1128 if (ret) 1129 return ret; 1130 break; 1131 case SMU_FCLK: 1132 ret = smu_cmn_send_smc_msg_with_param(smu, 1133 SMU_MSG_SetHardMinFclkByFreq, 1134 min, NULL); 1135 if (ret) 1136 return ret; 1137 1138 ret = smu_cmn_send_smc_msg_with_param(smu, 1139 SMU_MSG_SetSoftMaxFclkByFreq, 1140 max, NULL); 1141 if (ret) 1142 return ret; 1143 break; 1144 case SMU_SOCCLK: 1145 ret = smu_cmn_send_smc_msg_with_param(smu, 1146 SMU_MSG_SetHardMinSocclkByFreq, 1147 min, NULL); 1148 if (ret) 1149 return ret; 1150 1151 ret = smu_cmn_send_smc_msg_with_param(smu, 1152 SMU_MSG_SetSoftMaxSocclkByFreq, 1153 max, NULL); 1154 if (ret) 1155 return ret; 1156 break; 1157 case SMU_VCLK: 1158 ret = smu_cmn_send_smc_msg_with_param(smu, 1159 SMU_MSG_SetHardMinVcn, 1160 min << 16, NULL); 1161 if (ret) 1162 return ret; 1163 ret = smu_cmn_send_smc_msg_with_param(smu, 1164 SMU_MSG_SetSoftMaxVcn, 1165 max << 16, NULL); 1166 if (ret) 1167 return ret; 1168 break; 1169 case SMU_DCLK: 1170 ret = smu_cmn_send_smc_msg_with_param(smu, 1171 SMU_MSG_SetHardMinVcn, 1172 min, NULL); 1173 if (ret) 1174 return ret; 1175 ret = smu_cmn_send_smc_msg_with_param(smu, 1176 SMU_MSG_SetSoftMaxVcn, 1177 max, NULL); 1178 if (ret) 1179 return ret; 1180 break; 1181 default: 1182 return -EINVAL; 1183 } 1184 1185 return ret; 1186 } 1187 1188 static int vangogh_force_clk_levels(struct smu_context *smu, 1189 enum smu_clk_type clk_type, uint32_t mask) 1190 { 1191 uint32_t soft_min_level = 0, soft_max_level = 0; 1192 uint32_t min_freq = 0, max_freq = 0; 1193 int ret = 0 ; 1194 1195 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1196 soft_max_level = mask ? (fls(mask) - 1) : 0; 1197 1198 switch (clk_type) { 1199 case SMU_SOCCLK: 1200 ret = vangogh_get_dpm_clk_limited(smu, clk_type, 1201 soft_min_level, &min_freq); 1202 if (ret) 1203 return ret; 1204 ret = vangogh_get_dpm_clk_limited(smu, clk_type, 1205 soft_max_level, &max_freq); 1206 if (ret) 1207 return ret; 1208 ret = smu_cmn_send_smc_msg_with_param(smu, 1209 SMU_MSG_SetSoftMaxSocclkByFreq, 1210 max_freq, NULL); 1211 if (ret) 1212 return ret; 1213 ret = smu_cmn_send_smc_msg_with_param(smu, 1214 SMU_MSG_SetHardMinSocclkByFreq, 1215 min_freq, NULL); 1216 if (ret) 1217 return ret; 1218 break; 1219 case SMU_FCLK: 1220 ret = vangogh_get_dpm_clk_limited(smu, 1221 clk_type, soft_min_level, &min_freq); 1222 if (ret) 1223 return ret; 1224 ret = vangogh_get_dpm_clk_limited(smu, 1225 clk_type, soft_max_level, &max_freq); 1226 if (ret) 1227 return ret; 1228 ret = smu_cmn_send_smc_msg_with_param(smu, 1229 SMU_MSG_SetSoftMaxFclkByFreq, 1230 max_freq, NULL); 1231 if (ret) 1232 return ret; 1233 ret = smu_cmn_send_smc_msg_with_param(smu, 1234 SMU_MSG_SetHardMinFclkByFreq, 1235 min_freq, NULL); 1236 if (ret) 1237 return ret; 1238 break; 1239 case SMU_VCLK: 1240 ret = vangogh_get_dpm_clk_limited(smu, 1241 clk_type, soft_min_level, &min_freq); 1242 if (ret) 1243 return ret; 1244 1245 ret = vangogh_get_dpm_clk_limited(smu, 1246 clk_type, soft_max_level, &max_freq); 1247 if (ret) 1248 return ret; 1249 1250 1251 ret = smu_cmn_send_smc_msg_with_param(smu, 1252 SMU_MSG_SetHardMinVcn, 1253 min_freq << 16, NULL); 1254 if (ret) 1255 return ret; 1256 1257 ret = smu_cmn_send_smc_msg_with_param(smu, 1258 SMU_MSG_SetSoftMaxVcn, 1259 max_freq << 16, NULL); 1260 if (ret) 1261 return ret; 1262 1263 break; 1264 case SMU_DCLK: 1265 ret = vangogh_get_dpm_clk_limited(smu, 1266 clk_type, soft_min_level, &min_freq); 1267 if (ret) 1268 return ret; 1269 1270 ret = vangogh_get_dpm_clk_limited(smu, 1271 clk_type, soft_max_level, &max_freq); 1272 if (ret) 1273 return ret; 1274 1275 ret = smu_cmn_send_smc_msg_with_param(smu, 1276 SMU_MSG_SetHardMinVcn, 1277 min_freq, NULL); 1278 if (ret) 1279 return ret; 1280 1281 ret = smu_cmn_send_smc_msg_with_param(smu, 1282 SMU_MSG_SetSoftMaxVcn, 1283 max_freq, NULL); 1284 if (ret) 1285 return ret; 1286 1287 break; 1288 default: 1289 break; 1290 } 1291 1292 return ret; 1293 } 1294 1295 static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest) 1296 { 1297 int ret = 0, i = 0; 1298 uint32_t min_freq, max_freq, force_freq; 1299 enum smu_clk_type clk_type; 1300 1301 enum smu_clk_type clks[] = { 1302 SMU_SOCCLK, 1303 SMU_VCLK, 1304 SMU_DCLK, 1305 SMU_FCLK, 1306 }; 1307 1308 for (i = 0; i < ARRAY_SIZE(clks); i++) { 1309 clk_type = clks[i]; 1310 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 1311 if (ret) 1312 return ret; 1313 1314 force_freq = highest ? max_freq : min_freq; 1315 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); 1316 if (ret) 1317 return ret; 1318 } 1319 1320 return ret; 1321 } 1322 1323 static int vangogh_unforce_dpm_levels(struct smu_context *smu) 1324 { 1325 int ret = 0, i = 0; 1326 uint32_t min_freq, max_freq; 1327 enum smu_clk_type clk_type; 1328 1329 struct clk_feature_map { 1330 enum smu_clk_type clk_type; 1331 uint32_t feature; 1332 } clk_feature_map[] = { 1333 {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT}, 1334 {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, 1335 {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT}, 1336 {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT}, 1337 }; 1338 1339 for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) { 1340 1341 if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature)) 1342 continue; 1343 1344 clk_type = clk_feature_map[i].clk_type; 1345 1346 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 1347 1348 if (ret) 1349 return ret; 1350 1351 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); 1352 1353 if (ret) 1354 return ret; 1355 } 1356 1357 return ret; 1358 } 1359 1360 static int vangogh_set_peak_clock_by_device(struct smu_context *smu) 1361 { 1362 int ret = 0; 1363 uint32_t socclk_freq = 0, fclk_freq = 0; 1364 uint32_t vclk_freq = 0, dclk_freq = 0; 1365 1366 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq); 1367 if (ret) 1368 return ret; 1369 1370 ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq); 1371 if (ret) 1372 return ret; 1373 1374 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq); 1375 if (ret) 1376 return ret; 1377 1378 ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq); 1379 if (ret) 1380 return ret; 1381 1382 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq); 1383 if (ret) 1384 return ret; 1385 1386 ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq); 1387 if (ret) 1388 return ret; 1389 1390 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq); 1391 if (ret) 1392 return ret; 1393 1394 ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq); 1395 if (ret) 1396 return ret; 1397 1398 return ret; 1399 } 1400 1401 static int vangogh_set_performance_level(struct smu_context *smu, 1402 enum amd_dpm_forced_level level) 1403 { 1404 int ret = 0, i; 1405 uint32_t soc_mask, mclk_mask, fclk_mask; 1406 uint32_t vclk_mask = 0, dclk_mask = 0; 1407 1408 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; 1409 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; 1410 1411 switch (level) { 1412 case AMD_DPM_FORCED_LEVEL_HIGH: 1413 smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq; 1414 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1415 1416 1417 ret = vangogh_force_dpm_limit_value(smu, true); 1418 if (ret) 1419 return ret; 1420 break; 1421 case AMD_DPM_FORCED_LEVEL_LOW: 1422 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1423 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; 1424 1425 ret = vangogh_force_dpm_limit_value(smu, false); 1426 if (ret) 1427 return ret; 1428 break; 1429 case AMD_DPM_FORCED_LEVEL_AUTO: 1430 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1431 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1432 1433 ret = vangogh_unforce_dpm_levels(smu); 1434 if (ret) 1435 return ret; 1436 break; 1437 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1438 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; 1439 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; 1440 1441 ret = vangogh_get_profiling_clk_mask(smu, level, 1442 &vclk_mask, 1443 &dclk_mask, 1444 &mclk_mask, 1445 &fclk_mask, 1446 &soc_mask); 1447 if (ret) 1448 return ret; 1449 1450 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1451 vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); 1452 vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); 1453 vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask); 1454 break; 1455 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1456 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1457 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; 1458 break; 1459 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1460 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1461 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1462 1463 ret = vangogh_get_profiling_clk_mask(smu, level, 1464 NULL, 1465 NULL, 1466 &mclk_mask, 1467 &fclk_mask, 1468 NULL); 1469 if (ret) 1470 return ret; 1471 1472 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1473 break; 1474 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1475 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; 1476 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; 1477 1478 ret = vangogh_set_peak_clock_by_device(smu); 1479 if (ret) 1480 return ret; 1481 break; 1482 case AMD_DPM_FORCED_LEVEL_MANUAL: 1483 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1484 default: 1485 return 0; 1486 } 1487 1488 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 1489 smu->gfx_actual_hard_min_freq, NULL); 1490 if (ret) 1491 return ret; 1492 1493 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 1494 smu->gfx_actual_soft_max_freq, NULL); 1495 if (ret) 1496 return ret; 1497 1498 if (smu->adev->pm.fw_version >= 0x43f1b00) { 1499 for (i = 0; i < smu->cpu_core_num; i++) { 1500 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, 1501 ((i << 20) 1502 | smu->cpu_actual_soft_min_freq), 1503 NULL); 1504 if (ret) 1505 return ret; 1506 1507 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, 1508 ((i << 20) 1509 | smu->cpu_actual_soft_max_freq), 1510 NULL); 1511 if (ret) 1512 return ret; 1513 } 1514 } 1515 1516 return ret; 1517 } 1518 1519 static int vangogh_read_sensor(struct smu_context *smu, 1520 enum amd_pp_sensors sensor, 1521 void *data, uint32_t *size) 1522 { 1523 int ret = 0; 1524 1525 if (!data || !size) 1526 return -EINVAL; 1527 1528 switch (sensor) { 1529 case AMDGPU_PP_SENSOR_GPU_LOAD: 1530 ret = vangogh_common_get_smu_metrics_data(smu, 1531 METRICS_AVERAGE_GFXACTIVITY, 1532 (uint32_t *)data); 1533 *size = 4; 1534 break; 1535 case AMDGPU_PP_SENSOR_GPU_POWER: 1536 ret = vangogh_common_get_smu_metrics_data(smu, 1537 METRICS_AVERAGE_SOCKETPOWER, 1538 (uint32_t *)data); 1539 *size = 4; 1540 break; 1541 case AMDGPU_PP_SENSOR_EDGE_TEMP: 1542 ret = vangogh_common_get_smu_metrics_data(smu, 1543 METRICS_TEMPERATURE_EDGE, 1544 (uint32_t *)data); 1545 *size = 4; 1546 break; 1547 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 1548 ret = vangogh_common_get_smu_metrics_data(smu, 1549 METRICS_TEMPERATURE_HOTSPOT, 1550 (uint32_t *)data); 1551 *size = 4; 1552 break; 1553 case AMDGPU_PP_SENSOR_GFX_MCLK: 1554 ret = vangogh_common_get_smu_metrics_data(smu, 1555 METRICS_CURR_UCLK, 1556 (uint32_t *)data); 1557 *(uint32_t *)data *= 100; 1558 *size = 4; 1559 break; 1560 case AMDGPU_PP_SENSOR_GFX_SCLK: 1561 ret = vangogh_common_get_smu_metrics_data(smu, 1562 METRICS_CURR_GFXCLK, 1563 (uint32_t *)data); 1564 *(uint32_t *)data *= 100; 1565 *size = 4; 1566 break; 1567 case AMDGPU_PP_SENSOR_VDDGFX: 1568 ret = vangogh_common_get_smu_metrics_data(smu, 1569 METRICS_VOLTAGE_VDDGFX, 1570 (uint32_t *)data); 1571 *size = 4; 1572 break; 1573 case AMDGPU_PP_SENSOR_VDDNB: 1574 ret = vangogh_common_get_smu_metrics_data(smu, 1575 METRICS_VOLTAGE_VDDSOC, 1576 (uint32_t *)data); 1577 *size = 4; 1578 break; 1579 case AMDGPU_PP_SENSOR_CPU_CLK: 1580 ret = vangogh_common_get_smu_metrics_data(smu, 1581 METRICS_AVERAGE_CPUCLK, 1582 (uint32_t *)data); 1583 *size = smu->cpu_core_num * sizeof(uint16_t); 1584 break; 1585 default: 1586 ret = -EOPNOTSUPP; 1587 break; 1588 } 1589 1590 return ret; 1591 } 1592 1593 static int vangogh_set_watermarks_table(struct smu_context *smu, 1594 struct pp_smu_wm_range_sets *clock_ranges) 1595 { 1596 int i; 1597 int ret = 0; 1598 Watermarks_t *table = smu->smu_table.watermarks_table; 1599 1600 if (!table || !clock_ranges) 1601 return -EINVAL; 1602 1603 if (clock_ranges) { 1604 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 1605 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 1606 return -EINVAL; 1607 1608 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 1609 table->WatermarkRow[WM_DCFCLK][i].MinClock = 1610 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 1611 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 1612 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 1613 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 1614 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 1615 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 1616 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 1617 1618 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 1619 clock_ranges->reader_wm_sets[i].wm_inst; 1620 } 1621 1622 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 1623 table->WatermarkRow[WM_SOCCLK][i].MinClock = 1624 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 1625 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 1626 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 1627 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 1628 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 1629 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 1630 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 1631 1632 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 1633 clock_ranges->writer_wm_sets[i].wm_inst; 1634 } 1635 1636 smu->watermarks_bitmap |= WATERMARKS_EXIST; 1637 } 1638 1639 /* pass data to smu controller */ 1640 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 1641 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 1642 ret = smu_cmn_write_watermarks_table(smu); 1643 if (ret) { 1644 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 1645 return ret; 1646 } 1647 smu->watermarks_bitmap |= WATERMARKS_LOADED; 1648 } 1649 1650 return 0; 1651 } 1652 1653 static ssize_t vangogh_get_legacy_gpu_metrics_v2_3(struct smu_context *smu, 1654 void **table) 1655 { 1656 struct smu_table_context *smu_table = &smu->smu_table; 1657 struct gpu_metrics_v2_3 *gpu_metrics = 1658 (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table; 1659 SmuMetrics_legacy_t metrics; 1660 int ret = 0; 1661 1662 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1663 if (ret) 1664 return ret; 1665 1666 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3); 1667 1668 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1669 gpu_metrics->temperature_soc = metrics.SocTemperature; 1670 memcpy(&gpu_metrics->temperature_core[0], 1671 &metrics.CoreTemperature[0], 1672 sizeof(uint16_t) * 4); 1673 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 1674 1675 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 1676 gpu_metrics->average_mm_activity = metrics.UvdActivity; 1677 1678 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 1679 gpu_metrics->average_cpu_power = metrics.Power[0]; 1680 gpu_metrics->average_soc_power = metrics.Power[1]; 1681 gpu_metrics->average_gfx_power = metrics.Power[2]; 1682 memcpy(&gpu_metrics->average_core_power[0], 1683 &metrics.CorePower[0], 1684 sizeof(uint16_t) * 4); 1685 1686 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 1687 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 1688 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 1689 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 1690 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 1691 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 1692 1693 memcpy(&gpu_metrics->current_coreclk[0], 1694 &metrics.CoreFrequency[0], 1695 sizeof(uint16_t) * 4); 1696 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1697 1698 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1699 gpu_metrics->indep_throttle_status = 1700 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1701 vangogh_throttler_map); 1702 1703 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1704 1705 *table = (void *)gpu_metrics; 1706 1707 return sizeof(struct gpu_metrics_v2_3); 1708 } 1709 1710 static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu, 1711 void **table) 1712 { 1713 struct smu_table_context *smu_table = &smu->smu_table; 1714 struct gpu_metrics_v2_2 *gpu_metrics = 1715 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1716 SmuMetrics_legacy_t metrics; 1717 int ret = 0; 1718 1719 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1720 if (ret) 1721 return ret; 1722 1723 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1724 1725 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1726 gpu_metrics->temperature_soc = metrics.SocTemperature; 1727 memcpy(&gpu_metrics->temperature_core[0], 1728 &metrics.CoreTemperature[0], 1729 sizeof(uint16_t) * 4); 1730 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 1731 1732 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 1733 gpu_metrics->average_mm_activity = metrics.UvdActivity; 1734 1735 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 1736 gpu_metrics->average_cpu_power = metrics.Power[0]; 1737 gpu_metrics->average_soc_power = metrics.Power[1]; 1738 gpu_metrics->average_gfx_power = metrics.Power[2]; 1739 memcpy(&gpu_metrics->average_core_power[0], 1740 &metrics.CorePower[0], 1741 sizeof(uint16_t) * 4); 1742 1743 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 1744 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 1745 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 1746 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 1747 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 1748 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 1749 1750 memcpy(&gpu_metrics->current_coreclk[0], 1751 &metrics.CoreFrequency[0], 1752 sizeof(uint16_t) * 4); 1753 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1754 1755 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1756 gpu_metrics->indep_throttle_status = 1757 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1758 vangogh_throttler_map); 1759 1760 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1761 1762 *table = (void *)gpu_metrics; 1763 1764 return sizeof(struct gpu_metrics_v2_2); 1765 } 1766 1767 static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu, 1768 void **table) 1769 { 1770 struct smu_table_context *smu_table = &smu->smu_table; 1771 struct gpu_metrics_v2_3 *gpu_metrics = 1772 (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table; 1773 SmuMetrics_t metrics; 1774 int ret = 0; 1775 1776 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1777 if (ret) 1778 return ret; 1779 1780 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3); 1781 1782 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1783 gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 1784 memcpy(&gpu_metrics->temperature_core[0], 1785 &metrics.Current.CoreTemperature[0], 1786 sizeof(uint16_t) * 4); 1787 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; 1788 1789 gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature; 1790 gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature; 1791 memcpy(&gpu_metrics->average_temperature_core[0], 1792 &metrics.Average.CoreTemperature[0], 1793 sizeof(uint16_t) * 4); 1794 gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0]; 1795 1796 gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; 1797 gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; 1798 1799 gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; 1800 gpu_metrics->average_cpu_power = metrics.Current.Power[0]; 1801 gpu_metrics->average_soc_power = metrics.Current.Power[1]; 1802 gpu_metrics->average_gfx_power = metrics.Current.Power[2]; 1803 memcpy(&gpu_metrics->average_core_power[0], 1804 &metrics.Average.CorePower[0], 1805 sizeof(uint16_t) * 4); 1806 1807 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 1808 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 1809 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 1810 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 1811 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 1812 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 1813 1814 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 1815 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 1816 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 1817 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 1818 gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 1819 gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 1820 1821 memcpy(&gpu_metrics->current_coreclk[0], 1822 &metrics.Current.CoreFrequency[0], 1823 sizeof(uint16_t) * 4); 1824 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1825 1826 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1827 gpu_metrics->indep_throttle_status = 1828 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, 1829 vangogh_throttler_map); 1830 1831 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1832 1833 *table = (void *)gpu_metrics; 1834 1835 return sizeof(struct gpu_metrics_v2_3); 1836 } 1837 1838 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, 1839 void **table) 1840 { 1841 struct smu_table_context *smu_table = &smu->smu_table; 1842 struct gpu_metrics_v2_2 *gpu_metrics = 1843 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1844 SmuMetrics_t metrics; 1845 int ret = 0; 1846 1847 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1848 if (ret) 1849 return ret; 1850 1851 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1852 1853 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1854 gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 1855 memcpy(&gpu_metrics->temperature_core[0], 1856 &metrics.Current.CoreTemperature[0], 1857 sizeof(uint16_t) * 4); 1858 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; 1859 1860 gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; 1861 gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; 1862 1863 gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; 1864 gpu_metrics->average_cpu_power = metrics.Current.Power[0]; 1865 gpu_metrics->average_soc_power = metrics.Current.Power[1]; 1866 gpu_metrics->average_gfx_power = metrics.Current.Power[2]; 1867 memcpy(&gpu_metrics->average_core_power[0], 1868 &metrics.Average.CorePower[0], 1869 sizeof(uint16_t) * 4); 1870 1871 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 1872 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 1873 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 1874 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 1875 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 1876 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 1877 1878 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 1879 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 1880 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 1881 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 1882 gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 1883 gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 1884 1885 memcpy(&gpu_metrics->current_coreclk[0], 1886 &metrics.Current.CoreFrequency[0], 1887 sizeof(uint16_t) * 4); 1888 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1889 1890 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1891 gpu_metrics->indep_throttle_status = 1892 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, 1893 vangogh_throttler_map); 1894 1895 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1896 1897 *table = (void *)gpu_metrics; 1898 1899 return sizeof(struct gpu_metrics_v2_2); 1900 } 1901 1902 static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, 1903 void **table) 1904 { 1905 uint32_t if_version; 1906 uint32_t smu_version; 1907 int ret = 0; 1908 1909 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 1910 if (ret) { 1911 return ret; 1912 } 1913 1914 if (smu_version >= 0x043F3E00) { 1915 if (if_version < 0x3) 1916 ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table); 1917 else 1918 ret = vangogh_get_gpu_metrics_v2_3(smu, table); 1919 } else { 1920 if (if_version < 0x3) 1921 ret = vangogh_get_legacy_gpu_metrics(smu, table); 1922 else 1923 ret = vangogh_get_gpu_metrics(smu, table); 1924 } 1925 1926 return ret; 1927 } 1928 1929 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, 1930 long input[], uint32_t size) 1931 { 1932 int ret = 0; 1933 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1934 1935 if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) { 1936 dev_warn(smu->adev->dev, 1937 "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n"); 1938 return -EINVAL; 1939 } 1940 1941 switch (type) { 1942 case PP_OD_EDIT_CCLK_VDDC_TABLE: 1943 if (size != 3) { 1944 dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n"); 1945 return -EINVAL; 1946 } 1947 if (input[0] >= smu->cpu_core_num) { 1948 dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n", 1949 smu->cpu_core_num); 1950 } 1951 smu->cpu_core_id_select = input[0]; 1952 if (input[1] == 0) { 1953 if (input[2] < smu->cpu_default_soft_min_freq) { 1954 dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 1955 input[2], smu->cpu_default_soft_min_freq); 1956 return -EINVAL; 1957 } 1958 smu->cpu_actual_soft_min_freq = input[2]; 1959 } else if (input[1] == 1) { 1960 if (input[2] > smu->cpu_default_soft_max_freq) { 1961 dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 1962 input[2], smu->cpu_default_soft_max_freq); 1963 return -EINVAL; 1964 } 1965 smu->cpu_actual_soft_max_freq = input[2]; 1966 } else { 1967 return -EINVAL; 1968 } 1969 break; 1970 case PP_OD_EDIT_SCLK_VDDC_TABLE: 1971 if (size != 2) { 1972 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1973 return -EINVAL; 1974 } 1975 1976 if (input[0] == 0) { 1977 if (input[1] < smu->gfx_default_hard_min_freq) { 1978 dev_warn(smu->adev->dev, 1979 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 1980 input[1], smu->gfx_default_hard_min_freq); 1981 return -EINVAL; 1982 } 1983 smu->gfx_actual_hard_min_freq = input[1]; 1984 } else if (input[0] == 1) { 1985 if (input[1] > smu->gfx_default_soft_max_freq) { 1986 dev_warn(smu->adev->dev, 1987 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 1988 input[1], smu->gfx_default_soft_max_freq); 1989 return -EINVAL; 1990 } 1991 smu->gfx_actual_soft_max_freq = input[1]; 1992 } else { 1993 return -EINVAL; 1994 } 1995 break; 1996 case PP_OD_RESTORE_DEFAULT_TABLE: 1997 if (size != 0) { 1998 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1999 return -EINVAL; 2000 } else { 2001 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 2002 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 2003 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; 2004 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; 2005 } 2006 break; 2007 case PP_OD_COMMIT_DPM_TABLE: 2008 if (size != 0) { 2009 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2010 return -EINVAL; 2011 } else { 2012 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 2013 dev_err(smu->adev->dev, 2014 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 2015 smu->gfx_actual_hard_min_freq, 2016 smu->gfx_actual_soft_max_freq); 2017 return -EINVAL; 2018 } 2019 2020 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 2021 smu->gfx_actual_hard_min_freq, NULL); 2022 if (ret) { 2023 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 2024 return ret; 2025 } 2026 2027 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 2028 smu->gfx_actual_soft_max_freq, NULL); 2029 if (ret) { 2030 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 2031 return ret; 2032 } 2033 2034 if (smu->adev->pm.fw_version < 0x43f1b00) { 2035 dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n"); 2036 break; 2037 } 2038 2039 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, 2040 ((smu->cpu_core_id_select << 20) 2041 | smu->cpu_actual_soft_min_freq), 2042 NULL); 2043 if (ret) { 2044 dev_err(smu->adev->dev, "Set hard min cclk failed!"); 2045 return ret; 2046 } 2047 2048 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, 2049 ((smu->cpu_core_id_select << 20) 2050 | smu->cpu_actual_soft_max_freq), 2051 NULL); 2052 if (ret) { 2053 dev_err(smu->adev->dev, "Set soft max cclk failed!"); 2054 return ret; 2055 } 2056 } 2057 break; 2058 default: 2059 return -ENOSYS; 2060 } 2061 2062 return ret; 2063 } 2064 2065 static int vangogh_set_default_dpm_tables(struct smu_context *smu) 2066 { 2067 struct smu_table_context *smu_table = &smu->smu_table; 2068 2069 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); 2070 } 2071 2072 static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 2073 { 2074 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 2075 2076 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 2077 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 2078 smu->gfx_actual_hard_min_freq = 0; 2079 smu->gfx_actual_soft_max_freq = 0; 2080 2081 smu->cpu_default_soft_min_freq = 1400; 2082 smu->cpu_default_soft_max_freq = 3500; 2083 smu->cpu_actual_soft_min_freq = 0; 2084 smu->cpu_actual_soft_max_freq = 0; 2085 2086 return 0; 2087 } 2088 2089 static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) 2090 { 2091 DpmClocks_t *table = smu->smu_table.clocks_table; 2092 int i; 2093 2094 if (!clock_table || !table) 2095 return -EINVAL; 2096 2097 for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) { 2098 clock_table->SocClocks[i].Freq = table->SocClocks[i]; 2099 clock_table->SocClocks[i].Vol = table->SocVoltage[i]; 2100 } 2101 2102 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 2103 clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk; 2104 clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage; 2105 } 2106 2107 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 2108 clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk; 2109 clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage; 2110 } 2111 2112 return 0; 2113 } 2114 2115 2116 static int vangogh_system_features_control(struct smu_context *smu, bool en) 2117 { 2118 struct amdgpu_device *adev = smu->adev; 2119 int ret = 0; 2120 2121 if (adev->pm.fw_version >= 0x43f1700 && !en) 2122 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify, 2123 RLC_STATUS_OFF, NULL); 2124 2125 return ret; 2126 } 2127 2128 static int vangogh_post_smu_init(struct smu_context *smu) 2129 { 2130 struct amdgpu_device *adev = smu->adev; 2131 uint32_t tmp; 2132 int ret = 0; 2133 uint8_t aon_bits = 0; 2134 /* Two CUs in one WGP */ 2135 uint32_t req_active_wgps = adev->gfx.cu_info.number/2; 2136 uint32_t total_cu = adev->gfx.config.max_cu_per_sh * 2137 adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; 2138 2139 /* allow message will be sent after enable message on Vangogh*/ 2140 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && 2141 (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 2142 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL); 2143 if (ret) { 2144 dev_err(adev->dev, "Failed to Enable GfxOff!\n"); 2145 return ret; 2146 } 2147 } else { 2148 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 2149 dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n"); 2150 } 2151 2152 /* if all CUs are active, no need to power off any WGPs */ 2153 if (total_cu == adev->gfx.cu_info.number) 2154 return 0; 2155 2156 /* 2157 * Calculate the total bits number of always on WGPs for all SA/SEs in 2158 * RLC_PG_ALWAYS_ON_WGP_MASK. 2159 */ 2160 tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK)); 2161 tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK; 2162 2163 aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; 2164 2165 /* Do not request any WGPs less than set in the AON_WGP_MASK */ 2166 if (aon_bits > req_active_wgps) { 2167 dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n"); 2168 return 0; 2169 } else { 2170 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL); 2171 } 2172 } 2173 2174 static int vangogh_mode_reset(struct smu_context *smu, int type) 2175 { 2176 int ret = 0, index = 0; 2177 2178 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, 2179 SMU_MSG_GfxDeviceDriverReset); 2180 if (index < 0) 2181 return index == -EACCES ? 0 : index; 2182 2183 mutex_lock(&smu->message_lock); 2184 2185 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type); 2186 2187 mutex_unlock(&smu->message_lock); 2188 2189 mdelay(10); 2190 2191 return ret; 2192 } 2193 2194 static int vangogh_mode2_reset(struct smu_context *smu) 2195 { 2196 return vangogh_mode_reset(smu, SMU_RESET_MODE_2); 2197 } 2198 2199 /** 2200 * vangogh_get_gfxoff_status - Get gfxoff status 2201 * 2202 * @smu: amdgpu_device pointer 2203 * 2204 * Get current gfxoff status 2205 * 2206 * Return: 2207 * * 0 - GFXOFF (default if enabled). 2208 * * 1 - Transition out of GFX State. 2209 * * 2 - Not in GFXOFF. 2210 * * 3 - Transition into GFXOFF. 2211 */ 2212 static u32 vangogh_get_gfxoff_status(struct smu_context *smu) 2213 { 2214 struct amdgpu_device *adev = smu->adev; 2215 u32 reg, gfxoff_status; 2216 2217 reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL); 2218 gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK) 2219 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT; 2220 2221 return gfxoff_status; 2222 } 2223 2224 static int vangogh_get_power_limit(struct smu_context *smu, 2225 uint32_t *current_power_limit, 2226 uint32_t *default_power_limit, 2227 uint32_t *max_power_limit) 2228 { 2229 struct smu_11_5_power_context *power_context = 2230 smu->smu_power.power_context; 2231 uint32_t ppt_limit; 2232 int ret = 0; 2233 2234 if (smu->adev->pm.fw_version < 0x43f1e00) 2235 return ret; 2236 2237 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit); 2238 if (ret) { 2239 dev_err(smu->adev->dev, "Get slow PPT limit failed!\n"); 2240 return ret; 2241 } 2242 /* convert from milliwatt to watt */ 2243 if (current_power_limit) 2244 *current_power_limit = ppt_limit / 1000; 2245 if (default_power_limit) 2246 *default_power_limit = ppt_limit / 1000; 2247 if (max_power_limit) 2248 *max_power_limit = 29; 2249 2250 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit); 2251 if (ret) { 2252 dev_err(smu->adev->dev, "Get fast PPT limit failed!\n"); 2253 return ret; 2254 } 2255 /* convert from milliwatt to watt */ 2256 power_context->current_fast_ppt_limit = 2257 power_context->default_fast_ppt_limit = ppt_limit / 1000; 2258 power_context->max_fast_ppt_limit = 30; 2259 2260 return ret; 2261 } 2262 2263 static int vangogh_get_ppt_limit(struct smu_context *smu, 2264 uint32_t *ppt_limit, 2265 enum smu_ppt_limit_type type, 2266 enum smu_ppt_limit_level level) 2267 { 2268 struct smu_11_5_power_context *power_context = 2269 smu->smu_power.power_context; 2270 2271 if (!power_context) 2272 return -EOPNOTSUPP; 2273 2274 if (type == SMU_FAST_PPT_LIMIT) { 2275 switch (level) { 2276 case SMU_PPT_LIMIT_MAX: 2277 *ppt_limit = power_context->max_fast_ppt_limit; 2278 break; 2279 case SMU_PPT_LIMIT_CURRENT: 2280 *ppt_limit = power_context->current_fast_ppt_limit; 2281 break; 2282 case SMU_PPT_LIMIT_DEFAULT: 2283 *ppt_limit = power_context->default_fast_ppt_limit; 2284 break; 2285 default: 2286 break; 2287 } 2288 } 2289 2290 return 0; 2291 } 2292 2293 static int vangogh_set_power_limit(struct smu_context *smu, 2294 enum smu_ppt_limit_type limit_type, 2295 uint32_t ppt_limit) 2296 { 2297 struct smu_11_5_power_context *power_context = 2298 smu->smu_power.power_context; 2299 int ret = 0; 2300 2301 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 2302 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 2303 return -EOPNOTSUPP; 2304 } 2305 2306 switch (limit_type) { 2307 case SMU_DEFAULT_PPT_LIMIT: 2308 ret = smu_cmn_send_smc_msg_with_param(smu, 2309 SMU_MSG_SetSlowPPTLimit, 2310 ppt_limit * 1000, /* convert from watt to milliwatt */ 2311 NULL); 2312 if (ret) 2313 return ret; 2314 2315 smu->current_power_limit = ppt_limit; 2316 break; 2317 case SMU_FAST_PPT_LIMIT: 2318 ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24); 2319 if (ppt_limit > power_context->max_fast_ppt_limit) { 2320 dev_err(smu->adev->dev, 2321 "New power limit (%d) is over the max allowed %d\n", 2322 ppt_limit, power_context->max_fast_ppt_limit); 2323 return ret; 2324 } 2325 2326 ret = smu_cmn_send_smc_msg_with_param(smu, 2327 SMU_MSG_SetFastPPTLimit, 2328 ppt_limit * 1000, /* convert from watt to milliwatt */ 2329 NULL); 2330 if (ret) 2331 return ret; 2332 2333 power_context->current_fast_ppt_limit = ppt_limit; 2334 break; 2335 default: 2336 return -EINVAL; 2337 } 2338 2339 return ret; 2340 } 2341 2342 /** 2343 * vangogh_set_gfxoff_residency 2344 * 2345 * @smu: amdgpu_device pointer 2346 * @start: start/stop residency log 2347 * 2348 * This function will be used to log gfxoff residency 2349 * 2350 * 2351 * Returns standard response codes. 2352 */ 2353 static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start) 2354 { 2355 int ret = 0; 2356 u32 residency; 2357 struct amdgpu_device *adev = smu->adev; 2358 2359 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 2360 return 0; 2361 2362 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency, 2363 start, &residency); 2364 2365 if (!start) 2366 adev->gfx.gfx_off_residency = residency; 2367 2368 return ret; 2369 } 2370 2371 /** 2372 * vangogh_get_gfxoff_residency 2373 * 2374 * @smu: amdgpu_device pointer 2375 * 2376 * This function will be used to get gfxoff residency. 2377 * 2378 * Returns standard response codes. 2379 */ 2380 static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *residency) 2381 { 2382 struct amdgpu_device *adev = smu->adev; 2383 2384 *residency = adev->gfx.gfx_off_residency; 2385 2386 return 0; 2387 } 2388 2389 /** 2390 * vangogh_get_gfxoff_entrycount - get gfxoff entry count 2391 * 2392 * @smu: amdgpu_device pointer 2393 * 2394 * This function will be used to get gfxoff entry count 2395 * 2396 * Returns standard response codes. 2397 */ 2398 static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entrycount) 2399 { 2400 int ret = 0, value = 0; 2401 struct amdgpu_device *adev = smu->adev; 2402 2403 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 2404 return 0; 2405 2406 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetGfxOffEntryCount, &value); 2407 *entrycount = value + adev->gfx.gfx_off_entrycount; 2408 2409 return ret; 2410 } 2411 2412 static const struct pptable_funcs vangogh_ppt_funcs = { 2413 2414 .check_fw_status = smu_v11_0_check_fw_status, 2415 .check_fw_version = smu_v11_0_check_fw_version, 2416 .init_smc_tables = vangogh_init_smc_tables, 2417 .fini_smc_tables = smu_v11_0_fini_smc_tables, 2418 .init_power = smu_v11_0_init_power, 2419 .fini_power = smu_v11_0_fini_power, 2420 .register_irq_handler = smu_v11_0_register_irq_handler, 2421 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, 2422 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 2423 .send_smc_msg = smu_cmn_send_smc_msg, 2424 .dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable, 2425 .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable, 2426 .is_dpm_running = vangogh_is_dpm_running, 2427 .read_sensor = vangogh_read_sensor, 2428 .get_enabled_mask = smu_cmn_get_enabled_mask, 2429 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 2430 .set_watermarks_table = vangogh_set_watermarks_table, 2431 .set_driver_table_location = smu_v11_0_set_driver_table_location, 2432 .interrupt_work = smu_v11_0_interrupt_work, 2433 .get_gpu_metrics = vangogh_common_get_gpu_metrics, 2434 .od_edit_dpm_table = vangogh_od_edit_dpm_table, 2435 .print_clk_levels = vangogh_common_print_clk_levels, 2436 .set_default_dpm_table = vangogh_set_default_dpm_tables, 2437 .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters, 2438 .system_features_control = vangogh_system_features_control, 2439 .feature_is_enabled = smu_cmn_feature_is_enabled, 2440 .set_power_profile_mode = vangogh_set_power_profile_mode, 2441 .get_power_profile_mode = vangogh_get_power_profile_mode, 2442 .get_dpm_clock_table = vangogh_get_dpm_clock_table, 2443 .force_clk_levels = vangogh_force_clk_levels, 2444 .set_performance_level = vangogh_set_performance_level, 2445 .post_init = vangogh_post_smu_init, 2446 .mode2_reset = vangogh_mode2_reset, 2447 .gfx_off_control = smu_v11_0_gfx_off_control, 2448 .get_gfx_off_status = vangogh_get_gfxoff_status, 2449 .get_gfx_off_entrycount = vangogh_get_gfxoff_entrycount, 2450 .get_gfx_off_residency = vangogh_get_gfxoff_residency, 2451 .set_gfx_off_residency = vangogh_set_gfxoff_residency, 2452 .get_ppt_limit = vangogh_get_ppt_limit, 2453 .get_power_limit = vangogh_get_power_limit, 2454 .set_power_limit = vangogh_set_power_limit, 2455 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, 2456 }; 2457 2458 void vangogh_set_ppt_funcs(struct smu_context *smu) 2459 { 2460 smu->ppt_funcs = &vangogh_ppt_funcs; 2461 smu->message_map = vangogh_message_map; 2462 smu->feature_map = vangogh_feature_mask_map; 2463 smu->table_map = vangogh_table_map; 2464 smu->workload_map = vangogh_workload_map; 2465 smu->is_apu = true; 2466 smu_v11_0_set_smu_mailbox_registers(smu); 2467 } 2468