1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include "amdgpu.h" 27 #include "amdgpu_smu.h" 28 #include "smu_v11_0.h" 29 #include "smu11_driver_if_vangogh.h" 30 #include "vangogh_ppt.h" 31 #include "smu_v11_5_ppsmc.h" 32 #include "smu_v11_5_pmfw.h" 33 #include "smu_cmn.h" 34 #include "soc15_common.h" 35 #include "asic_reg/gc/gc_10_3_0_offset.h" 36 #include "asic_reg/gc/gc_10_3_0_sh_mask.h" 37 #include <asm/processor.h> 38 39 /* 40 * DO NOT use these for err/warn/info/debug messages. 41 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 42 * They are more MGPU friendly. 43 */ 44 #undef pr_err 45 #undef pr_warn 46 #undef pr_info 47 #undef pr_debug 48 49 // Registers related to GFXOFF 50 // addressBlock: smuio_smuio_SmuSmuioDec 51 // base address: 0x5a000 52 #define mmSMUIO_GFX_MISC_CNTL 0x00c5 53 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0 54 55 //SMUIO_GFX_MISC_CNTL 56 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0 57 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 58 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L 59 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L 60 61 #define FEATURE_MASK(feature) (1ULL << feature) 62 #define SMC_DPM_FEATURE ( \ 63 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 64 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 65 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 66 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 67 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 68 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 69 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 70 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 71 FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 72 73 static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = { 74 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), 75 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0), 76 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0), 77 MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0), 78 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 79 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 80 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0), 81 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0), 82 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 83 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 84 MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0), 85 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0), 86 MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0), 87 MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0), 88 MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 0), 89 MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 0), 90 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0), 91 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0), 92 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), 93 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 94 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 0), 95 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0), 96 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 0), 97 MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0), 98 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 0), 99 MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 0), 100 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 0), 101 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 0), 102 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0), 103 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 0), 104 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 0), 105 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 0), 106 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 0), 107 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 0), 108 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 109 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 110 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 0), 111 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 0), 112 MSG_MAP(PowerUpCvip, PPSMC_MSG_PowerUpCvip, 0), 113 MSG_MAP(PowerDownCvip, PPSMC_MSG_PowerDownCvip, 0), 114 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 115 MSG_MAP(GetThermalLimit, PPSMC_MSG_GetThermalLimit, 0), 116 MSG_MAP(GetCurrentTemperature, PPSMC_MSG_GetCurrentTemperature, 0), 117 MSG_MAP(GetCurrentPower, PPSMC_MSG_GetCurrentPower, 0), 118 MSG_MAP(GetCurrentVoltage, PPSMC_MSG_GetCurrentVoltage, 0), 119 MSG_MAP(GetCurrentCurrent, PPSMC_MSG_GetCurrentCurrent, 0), 120 MSG_MAP(GetAverageCpuActivity, PPSMC_MSG_GetAverageCpuActivity, 0), 121 MSG_MAP(GetAverageGfxActivity, PPSMC_MSG_GetAverageGfxActivity, 0), 122 MSG_MAP(GetAveragePower, PPSMC_MSG_GetAveragePower, 0), 123 MSG_MAP(GetAverageTemperature, PPSMC_MSG_GetAverageTemperature, 0), 124 MSG_MAP(SetAveragePowerTimeConstant, PPSMC_MSG_SetAveragePowerTimeConstant, 0), 125 MSG_MAP(SetAverageActivityTimeConstant, PPSMC_MSG_SetAverageActivityTimeConstant, 0), 126 MSG_MAP(SetAverageTemperatureTimeConstant, PPSMC_MSG_SetAverageTemperatureTimeConstant, 0), 127 MSG_MAP(SetMitigationEndHysteresis, PPSMC_MSG_SetMitigationEndHysteresis, 0), 128 MSG_MAP(GetCurrentFreq, PPSMC_MSG_GetCurrentFreq, 0), 129 MSG_MAP(SetReducedPptLimit, PPSMC_MSG_SetReducedPptLimit, 0), 130 MSG_MAP(SetReducedThermalLimit, PPSMC_MSG_SetReducedThermalLimit, 0), 131 MSG_MAP(DramLogSetDramAddr, PPSMC_MSG_DramLogSetDramAddr, 0), 132 MSG_MAP(StartDramLogging, PPSMC_MSG_StartDramLogging, 0), 133 MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0), 134 MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0), 135 MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0), 136 MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0), 137 MSG_MAP(SetFastPPTLimit, PPSMC_MSG_SetFastPPTLimit, 0), 138 MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0), 139 MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0), 140 MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0), 141 MSG_MAP(GetGfxOffStatus, PPSMC_MSG_GetGfxOffStatus, 0), 142 MSG_MAP(GetGfxOffEntryCount, PPSMC_MSG_GetGfxOffEntryCount, 0), 143 MSG_MAP(LogGfxOffResidency, PPSMC_MSG_LogGfxOffResidency, 0), 144 }; 145 146 static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = { 147 FEA_MAP(PPT), 148 FEA_MAP(TDC), 149 FEA_MAP(THERMAL), 150 FEA_MAP(DS_GFXCLK), 151 FEA_MAP(DS_SOCCLK), 152 FEA_MAP(DS_LCLK), 153 FEA_MAP(DS_FCLK), 154 FEA_MAP(DS_MP1CLK), 155 FEA_MAP(DS_MP0CLK), 156 FEA_MAP(ATHUB_PG), 157 FEA_MAP(CCLK_DPM), 158 FEA_MAP(FAN_CONTROLLER), 159 FEA_MAP(ULV), 160 FEA_MAP(VCN_DPM), 161 FEA_MAP(LCLK_DPM), 162 FEA_MAP(SHUBCLK_DPM), 163 FEA_MAP(DCFCLK_DPM), 164 FEA_MAP(DS_DCFCLK), 165 FEA_MAP(S0I2), 166 FEA_MAP(SMU_LOW_POWER), 167 FEA_MAP(GFX_DEM), 168 FEA_MAP(PSI), 169 FEA_MAP(PROCHOT), 170 FEA_MAP(CPUOFF), 171 FEA_MAP(STAPM), 172 FEA_MAP(S0I3), 173 FEA_MAP(DF_CSTATES), 174 FEA_MAP(PERF_LIMIT), 175 FEA_MAP(CORE_DLDO), 176 FEA_MAP(RSMU_LOW_POWER), 177 FEA_MAP(SMN_LOW_POWER), 178 FEA_MAP(THM_LOW_POWER), 179 FEA_MAP(SMUIO_LOW_POWER), 180 FEA_MAP(MP1_LOW_POWER), 181 FEA_MAP(DS_VCN), 182 FEA_MAP(CPPC), 183 FEA_MAP(OS_CSTATES), 184 FEA_MAP(ISP_DPM), 185 FEA_MAP(A55_DPM), 186 FEA_MAP(CVIP_DSP_DPM), 187 FEA_MAP(MSMU_LOW_POWER), 188 FEA_MAP_REVERSE(SOCCLK), 189 FEA_MAP_REVERSE(FCLK), 190 FEA_MAP_HALF_REVERSE(GFX), 191 }; 192 193 static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = { 194 TAB_MAP_VALID(WATERMARKS), 195 TAB_MAP_VALID(SMU_METRICS), 196 TAB_MAP_VALID(CUSTOM_DPM), 197 TAB_MAP_VALID(DPMCLOCKS), 198 }; 199 200 static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 201 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 202 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 203 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 204 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 205 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 206 }; 207 208 static const uint8_t vangogh_throttler_map[] = { 209 [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT), 210 [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT), 211 [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT), 212 [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT), 213 [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT), 214 [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT), 215 [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT), 216 [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT), 217 [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT), 218 [THROTTLER_STATUS_BIT_TDC_GFX] = (SMU_THROTTLER_TDC_GFX_BIT), 219 [THROTTLER_STATUS_BIT_TDC_CVIP] = (SMU_THROTTLER_TDC_CVIP_BIT), 220 }; 221 222 static int vangogh_tables_init(struct smu_context *smu) 223 { 224 struct smu_table_context *smu_table = &smu->smu_table; 225 struct smu_table *tables = smu_table->tables; 226 struct amdgpu_device *adev = smu->adev; 227 uint32_t if_version; 228 uint32_t ret = 0; 229 230 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 231 if (ret) { 232 dev_err(adev->dev, "Failed to get smu if version!\n"); 233 goto err0_out; 234 } 235 236 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 237 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 238 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 239 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 240 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, 241 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 242 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), 243 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 244 245 if (if_version < 0x3) { 246 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t), 247 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 248 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL); 249 } else { 250 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 251 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 252 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 253 } 254 if (!smu_table->metrics_table) 255 goto err0_out; 256 smu_table->metrics_time = 0; 257 258 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); 259 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 260 if (!smu_table->gpu_metrics_table) 261 goto err1_out; 262 263 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 264 if (!smu_table->watermarks_table) 265 goto err2_out; 266 267 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 268 if (!smu_table->clocks_table) 269 goto err3_out; 270 271 return 0; 272 273 err3_out: 274 kfree(smu_table->watermarks_table); 275 err2_out: 276 kfree(smu_table->gpu_metrics_table); 277 err1_out: 278 kfree(smu_table->metrics_table); 279 err0_out: 280 return -ENOMEM; 281 } 282 283 static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, 284 MetricsMember_t member, 285 uint32_t *value) 286 { 287 struct smu_table_context *smu_table = &smu->smu_table; 288 SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; 289 int ret = 0; 290 291 ret = smu_cmn_get_metrics_table(smu, 292 NULL, 293 false); 294 if (ret) 295 return ret; 296 297 switch (member) { 298 case METRICS_CURR_GFXCLK: 299 *value = metrics->GfxclkFrequency; 300 break; 301 case METRICS_AVERAGE_SOCCLK: 302 *value = metrics->SocclkFrequency; 303 break; 304 case METRICS_AVERAGE_VCLK: 305 *value = metrics->VclkFrequency; 306 break; 307 case METRICS_AVERAGE_DCLK: 308 *value = metrics->DclkFrequency; 309 break; 310 case METRICS_CURR_UCLK: 311 *value = metrics->MemclkFrequency; 312 break; 313 case METRICS_AVERAGE_GFXACTIVITY: 314 *value = metrics->GfxActivity / 100; 315 break; 316 case METRICS_AVERAGE_VCNACTIVITY: 317 *value = metrics->UvdActivity; 318 break; 319 case METRICS_AVERAGE_SOCKETPOWER: 320 *value = (metrics->CurrentSocketPower << 8) / 321 1000 ; 322 break; 323 case METRICS_TEMPERATURE_EDGE: 324 *value = metrics->GfxTemperature / 100 * 325 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 326 break; 327 case METRICS_TEMPERATURE_HOTSPOT: 328 *value = metrics->SocTemperature / 100 * 329 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 330 break; 331 case METRICS_THROTTLER_STATUS: 332 *value = metrics->ThrottlerStatus; 333 break; 334 case METRICS_VOLTAGE_VDDGFX: 335 *value = metrics->Voltage[2]; 336 break; 337 case METRICS_VOLTAGE_VDDSOC: 338 *value = metrics->Voltage[1]; 339 break; 340 case METRICS_AVERAGE_CPUCLK: 341 memcpy(value, &metrics->CoreFrequency[0], 342 smu->cpu_core_num * sizeof(uint16_t)); 343 break; 344 default: 345 *value = UINT_MAX; 346 break; 347 } 348 349 return ret; 350 } 351 352 static int vangogh_get_smu_metrics_data(struct smu_context *smu, 353 MetricsMember_t member, 354 uint32_t *value) 355 { 356 struct smu_table_context *smu_table = &smu->smu_table; 357 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 358 int ret = 0; 359 360 ret = smu_cmn_get_metrics_table(smu, 361 NULL, 362 false); 363 if (ret) 364 return ret; 365 366 switch (member) { 367 case METRICS_CURR_GFXCLK: 368 *value = metrics->Current.GfxclkFrequency; 369 break; 370 case METRICS_AVERAGE_SOCCLK: 371 *value = metrics->Current.SocclkFrequency; 372 break; 373 case METRICS_AVERAGE_VCLK: 374 *value = metrics->Current.VclkFrequency; 375 break; 376 case METRICS_AVERAGE_DCLK: 377 *value = metrics->Current.DclkFrequency; 378 break; 379 case METRICS_CURR_UCLK: 380 *value = metrics->Current.MemclkFrequency; 381 break; 382 case METRICS_AVERAGE_GFXACTIVITY: 383 *value = metrics->Current.GfxActivity; 384 break; 385 case METRICS_AVERAGE_VCNACTIVITY: 386 *value = metrics->Current.UvdActivity; 387 break; 388 case METRICS_AVERAGE_SOCKETPOWER: 389 *value = (metrics->Current.CurrentSocketPower << 8) / 390 1000; 391 break; 392 case METRICS_TEMPERATURE_EDGE: 393 *value = metrics->Current.GfxTemperature / 100 * 394 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 395 break; 396 case METRICS_TEMPERATURE_HOTSPOT: 397 *value = metrics->Current.SocTemperature / 100 * 398 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 399 break; 400 case METRICS_THROTTLER_STATUS: 401 *value = metrics->Current.ThrottlerStatus; 402 break; 403 case METRICS_VOLTAGE_VDDGFX: 404 *value = metrics->Current.Voltage[2]; 405 break; 406 case METRICS_VOLTAGE_VDDSOC: 407 *value = metrics->Current.Voltage[1]; 408 break; 409 case METRICS_AVERAGE_CPUCLK: 410 memcpy(value, &metrics->Current.CoreFrequency[0], 411 smu->cpu_core_num * sizeof(uint16_t)); 412 break; 413 default: 414 *value = UINT_MAX; 415 break; 416 } 417 418 return ret; 419 } 420 421 static int vangogh_common_get_smu_metrics_data(struct smu_context *smu, 422 MetricsMember_t member, 423 uint32_t *value) 424 { 425 struct amdgpu_device *adev = smu->adev; 426 uint32_t if_version; 427 int ret = 0; 428 429 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 430 if (ret) { 431 dev_err(adev->dev, "Failed to get smu if version!\n"); 432 return ret; 433 } 434 435 if (if_version < 0x3) 436 ret = vangogh_get_legacy_smu_metrics_data(smu, member, value); 437 else 438 ret = vangogh_get_smu_metrics_data(smu, member, value); 439 440 return ret; 441 } 442 443 static int vangogh_allocate_dpm_context(struct smu_context *smu) 444 { 445 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 446 447 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context), 448 GFP_KERNEL); 449 if (!smu_dpm->dpm_context) 450 return -ENOMEM; 451 452 smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); 453 454 return 0; 455 } 456 457 static int vangogh_init_smc_tables(struct smu_context *smu) 458 { 459 int ret = 0; 460 461 ret = vangogh_tables_init(smu); 462 if (ret) 463 return ret; 464 465 ret = vangogh_allocate_dpm_context(smu); 466 if (ret) 467 return ret; 468 469 #ifdef CONFIG_X86 470 /* AMD x86 APU only */ 471 smu->cpu_core_num = boot_cpu_data.x86_max_cores; 472 #else 473 smu->cpu_core_num = 4; 474 #endif 475 476 return smu_v11_0_init_smc_tables(smu); 477 } 478 479 static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable) 480 { 481 int ret = 0; 482 483 if (enable) { 484 /* vcn dpm on is a prerequisite for vcn power gate messages */ 485 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); 486 if (ret) 487 return ret; 488 } else { 489 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); 490 if (ret) 491 return ret; 492 } 493 494 return ret; 495 } 496 497 static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 498 { 499 int ret = 0; 500 501 if (enable) { 502 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); 503 if (ret) 504 return ret; 505 } else { 506 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); 507 if (ret) 508 return ret; 509 } 510 511 return ret; 512 } 513 514 static bool vangogh_is_dpm_running(struct smu_context *smu) 515 { 516 struct amdgpu_device *adev = smu->adev; 517 int ret = 0; 518 uint64_t feature_enabled; 519 520 /* we need to re-init after suspend so return false */ 521 if (adev->in_suspend) 522 return false; 523 524 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 525 526 if (ret) 527 return false; 528 529 return !!(feature_enabled & SMC_DPM_FEATURE); 530 } 531 532 static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, 533 uint32_t dpm_level, uint32_t *freq) 534 { 535 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 536 537 if (!clk_table || clk_type >= SMU_CLK_COUNT) 538 return -EINVAL; 539 540 switch (clk_type) { 541 case SMU_SOCCLK: 542 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 543 return -EINVAL; 544 *freq = clk_table->SocClocks[dpm_level]; 545 break; 546 case SMU_VCLK: 547 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 548 return -EINVAL; 549 *freq = clk_table->VcnClocks[dpm_level].vclk; 550 break; 551 case SMU_DCLK: 552 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 553 return -EINVAL; 554 *freq = clk_table->VcnClocks[dpm_level].dclk; 555 break; 556 case SMU_UCLK: 557 case SMU_MCLK: 558 if (dpm_level >= clk_table->NumDfPstatesEnabled) 559 return -EINVAL; 560 *freq = clk_table->DfPstateTable[dpm_level].memclk; 561 562 break; 563 case SMU_FCLK: 564 if (dpm_level >= clk_table->NumDfPstatesEnabled) 565 return -EINVAL; 566 *freq = clk_table->DfPstateTable[dpm_level].fclk; 567 break; 568 default: 569 return -EINVAL; 570 } 571 572 return 0; 573 } 574 575 static int vangogh_print_legacy_clk_levels(struct smu_context *smu, 576 enum smu_clk_type clk_type, char *buf) 577 { 578 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 579 SmuMetrics_legacy_t metrics; 580 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 581 int i, size = 0, ret = 0; 582 uint32_t cur_value = 0, value = 0, count = 0; 583 bool cur_value_match_level = false; 584 585 memset(&metrics, 0, sizeof(metrics)); 586 587 ret = smu_cmn_get_metrics_table(smu, &metrics, false); 588 if (ret) 589 return ret; 590 591 smu_cmn_get_sysfs_buf(&buf, &size); 592 593 switch (clk_type) { 594 case SMU_OD_SCLK: 595 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 596 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 597 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 598 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 599 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 600 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 601 } 602 break; 603 case SMU_OD_CCLK: 604 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 605 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 606 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 607 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 608 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 609 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); 610 } 611 break; 612 case SMU_OD_RANGE: 613 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 614 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 615 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 616 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 617 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", 618 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); 619 } 620 break; 621 case SMU_SOCCLK: 622 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ 623 count = clk_table->NumSocClkLevelsEnabled; 624 cur_value = metrics.SocclkFrequency; 625 break; 626 case SMU_VCLK: 627 count = clk_table->VcnClkLevelsEnabled; 628 cur_value = metrics.VclkFrequency; 629 break; 630 case SMU_DCLK: 631 count = clk_table->VcnClkLevelsEnabled; 632 cur_value = metrics.DclkFrequency; 633 break; 634 case SMU_MCLK: 635 count = clk_table->NumDfPstatesEnabled; 636 cur_value = metrics.MemclkFrequency; 637 break; 638 case SMU_FCLK: 639 count = clk_table->NumDfPstatesEnabled; 640 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); 641 if (ret) 642 return ret; 643 break; 644 default: 645 break; 646 } 647 648 switch (clk_type) { 649 case SMU_SOCCLK: 650 case SMU_VCLK: 651 case SMU_DCLK: 652 case SMU_MCLK: 653 case SMU_FCLK: 654 for (i = 0; i < count; i++) { 655 ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); 656 if (ret) 657 return ret; 658 if (!value) 659 continue; 660 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 661 cur_value == value ? "*" : ""); 662 if (cur_value == value) 663 cur_value_match_level = true; 664 } 665 666 if (!cur_value_match_level) 667 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); 668 break; 669 default: 670 break; 671 } 672 673 return size; 674 } 675 676 static int vangogh_print_clk_levels(struct smu_context *smu, 677 enum smu_clk_type clk_type, char *buf) 678 { 679 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 680 SmuMetrics_t metrics; 681 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 682 int i, size = 0, ret = 0; 683 uint32_t cur_value = 0, value = 0, count = 0; 684 bool cur_value_match_level = false; 685 uint32_t min, max; 686 687 memset(&metrics, 0, sizeof(metrics)); 688 689 ret = smu_cmn_get_metrics_table(smu, &metrics, false); 690 if (ret) 691 return ret; 692 693 smu_cmn_get_sysfs_buf(&buf, &size); 694 695 switch (clk_type) { 696 case SMU_OD_SCLK: 697 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 698 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 699 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 700 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 701 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 702 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 703 } 704 break; 705 case SMU_OD_CCLK: 706 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 707 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 708 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 709 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 710 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 711 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); 712 } 713 break; 714 case SMU_OD_RANGE: 715 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 716 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 717 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 718 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 719 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", 720 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); 721 } 722 break; 723 case SMU_SOCCLK: 724 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ 725 count = clk_table->NumSocClkLevelsEnabled; 726 cur_value = metrics.Current.SocclkFrequency; 727 break; 728 case SMU_VCLK: 729 count = clk_table->VcnClkLevelsEnabled; 730 cur_value = metrics.Current.VclkFrequency; 731 break; 732 case SMU_DCLK: 733 count = clk_table->VcnClkLevelsEnabled; 734 cur_value = metrics.Current.DclkFrequency; 735 break; 736 case SMU_MCLK: 737 count = clk_table->NumDfPstatesEnabled; 738 cur_value = metrics.Current.MemclkFrequency; 739 break; 740 case SMU_FCLK: 741 count = clk_table->NumDfPstatesEnabled; 742 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); 743 if (ret) 744 return ret; 745 break; 746 case SMU_GFXCLK: 747 case SMU_SCLK: 748 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value); 749 if (ret) { 750 return ret; 751 } 752 break; 753 default: 754 break; 755 } 756 757 switch (clk_type) { 758 case SMU_SOCCLK: 759 case SMU_VCLK: 760 case SMU_DCLK: 761 case SMU_MCLK: 762 case SMU_FCLK: 763 for (i = 0; i < count; i++) { 764 ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); 765 if (ret) 766 return ret; 767 if (!value) 768 continue; 769 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 770 cur_value == value ? "*" : ""); 771 if (cur_value == value) 772 cur_value_match_level = true; 773 } 774 775 if (!cur_value_match_level) 776 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); 777 break; 778 case SMU_GFXCLK: 779 case SMU_SCLK: 780 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 781 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 782 if (cur_value == max) 783 i = 2; 784 else if (cur_value == min) 785 i = 0; 786 else 787 i = 1; 788 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 789 i == 0 ? "*" : ""); 790 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 791 i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, 792 i == 1 ? "*" : ""); 793 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 794 i == 2 ? "*" : ""); 795 break; 796 default: 797 break; 798 } 799 800 return size; 801 } 802 803 static int vangogh_common_print_clk_levels(struct smu_context *smu, 804 enum smu_clk_type clk_type, char *buf) 805 { 806 struct amdgpu_device *adev = smu->adev; 807 uint32_t if_version; 808 int ret = 0; 809 810 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 811 if (ret) { 812 dev_err(adev->dev, "Failed to get smu if version!\n"); 813 return ret; 814 } 815 816 if (if_version < 0x3) 817 ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf); 818 else 819 ret = vangogh_print_clk_levels(smu, clk_type, buf); 820 821 return ret; 822 } 823 824 static int vangogh_get_profiling_clk_mask(struct smu_context *smu, 825 enum amd_dpm_forced_level level, 826 uint32_t *vclk_mask, 827 uint32_t *dclk_mask, 828 uint32_t *mclk_mask, 829 uint32_t *fclk_mask, 830 uint32_t *soc_mask) 831 { 832 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 833 834 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 835 if (mclk_mask) 836 *mclk_mask = clk_table->NumDfPstatesEnabled - 1; 837 838 if (fclk_mask) 839 *fclk_mask = clk_table->NumDfPstatesEnabled - 1; 840 841 if (soc_mask) 842 *soc_mask = 0; 843 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 844 if (mclk_mask) 845 *mclk_mask = 0; 846 847 if (fclk_mask) 848 *fclk_mask = 0; 849 850 if (soc_mask) 851 *soc_mask = 1; 852 853 if (vclk_mask) 854 *vclk_mask = 1; 855 856 if (dclk_mask) 857 *dclk_mask = 1; 858 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) { 859 if (mclk_mask) 860 *mclk_mask = 0; 861 862 if (fclk_mask) 863 *fclk_mask = 0; 864 865 if (soc_mask) 866 *soc_mask = 1; 867 868 if (vclk_mask) 869 *vclk_mask = 1; 870 871 if (dclk_mask) 872 *dclk_mask = 1; 873 } 874 875 return 0; 876 } 877 878 static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu, 879 enum smu_clk_type clk_type) 880 { 881 enum smu_feature_mask feature_id = 0; 882 883 switch (clk_type) { 884 case SMU_MCLK: 885 case SMU_UCLK: 886 case SMU_FCLK: 887 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 888 break; 889 case SMU_GFXCLK: 890 case SMU_SCLK: 891 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 892 break; 893 case SMU_SOCCLK: 894 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 895 break; 896 case SMU_VCLK: 897 case SMU_DCLK: 898 feature_id = SMU_FEATURE_VCN_DPM_BIT; 899 break; 900 default: 901 return true; 902 } 903 904 if (!smu_cmn_feature_is_enabled(smu, feature_id)) 905 return false; 906 907 return true; 908 } 909 910 static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu, 911 enum smu_clk_type clk_type, 912 uint32_t *min, 913 uint32_t *max) 914 { 915 int ret = 0; 916 uint32_t soc_mask; 917 uint32_t vclk_mask; 918 uint32_t dclk_mask; 919 uint32_t mclk_mask; 920 uint32_t fclk_mask; 921 uint32_t clock_limit; 922 923 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) { 924 switch (clk_type) { 925 case SMU_MCLK: 926 case SMU_UCLK: 927 clock_limit = smu->smu_table.boot_values.uclk; 928 break; 929 case SMU_FCLK: 930 clock_limit = smu->smu_table.boot_values.fclk; 931 break; 932 case SMU_GFXCLK: 933 case SMU_SCLK: 934 clock_limit = smu->smu_table.boot_values.gfxclk; 935 break; 936 case SMU_SOCCLK: 937 clock_limit = smu->smu_table.boot_values.socclk; 938 break; 939 case SMU_VCLK: 940 clock_limit = smu->smu_table.boot_values.vclk; 941 break; 942 case SMU_DCLK: 943 clock_limit = smu->smu_table.boot_values.dclk; 944 break; 945 default: 946 clock_limit = 0; 947 break; 948 } 949 950 /* clock in Mhz unit */ 951 if (min) 952 *min = clock_limit / 100; 953 if (max) 954 *max = clock_limit / 100; 955 956 return 0; 957 } 958 if (max) { 959 ret = vangogh_get_profiling_clk_mask(smu, 960 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, 961 &vclk_mask, 962 &dclk_mask, 963 &mclk_mask, 964 &fclk_mask, 965 &soc_mask); 966 if (ret) 967 goto failed; 968 969 switch (clk_type) { 970 case SMU_UCLK: 971 case SMU_MCLK: 972 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); 973 if (ret) 974 goto failed; 975 break; 976 case SMU_SOCCLK: 977 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max); 978 if (ret) 979 goto failed; 980 break; 981 case SMU_FCLK: 982 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max); 983 if (ret) 984 goto failed; 985 break; 986 case SMU_VCLK: 987 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max); 988 if (ret) 989 goto failed; 990 break; 991 case SMU_DCLK: 992 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max); 993 if (ret) 994 goto failed; 995 break; 996 default: 997 ret = -EINVAL; 998 goto failed; 999 } 1000 } 1001 if (min) { 1002 switch (clk_type) { 1003 case SMU_UCLK: 1004 case SMU_MCLK: 1005 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min); 1006 if (ret) 1007 goto failed; 1008 break; 1009 case SMU_SOCCLK: 1010 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min); 1011 if (ret) 1012 goto failed; 1013 break; 1014 case SMU_FCLK: 1015 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min); 1016 if (ret) 1017 goto failed; 1018 break; 1019 case SMU_VCLK: 1020 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min); 1021 if (ret) 1022 goto failed; 1023 break; 1024 case SMU_DCLK: 1025 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min); 1026 if (ret) 1027 goto failed; 1028 break; 1029 default: 1030 ret = -EINVAL; 1031 goto failed; 1032 } 1033 } 1034 failed: 1035 return ret; 1036 } 1037 1038 static int vangogh_get_power_profile_mode(struct smu_context *smu, 1039 char *buf) 1040 { 1041 uint32_t i, size = 0; 1042 int16_t workload_type = 0; 1043 1044 if (!buf) 1045 return -EINVAL; 1046 1047 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { 1048 /* 1049 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT 1050 * Not all profile modes are supported on vangogh. 1051 */ 1052 workload_type = smu_cmn_to_asic_specific_index(smu, 1053 CMN2ASIC_MAPPING_WORKLOAD, 1054 i); 1055 1056 if (workload_type < 0) 1057 continue; 1058 1059 size += sysfs_emit_at(buf, size, "%2d %14s%s\n", 1060 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 1061 } 1062 1063 return size; 1064 } 1065 1066 static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) 1067 { 1068 int workload_type, ret; 1069 uint32_t profile_mode = input[size]; 1070 1071 if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { 1072 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); 1073 return -EINVAL; 1074 } 1075 1076 if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || 1077 profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) 1078 return 0; 1079 1080 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1081 workload_type = smu_cmn_to_asic_specific_index(smu, 1082 CMN2ASIC_MAPPING_WORKLOAD, 1083 profile_mode); 1084 if (workload_type < 0) { 1085 dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n", 1086 profile_mode); 1087 return -EINVAL; 1088 } 1089 1090 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, 1091 1 << workload_type, 1092 NULL); 1093 if (ret) { 1094 dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", 1095 workload_type); 1096 return ret; 1097 } 1098 1099 smu->power_profile_mode = profile_mode; 1100 1101 return 0; 1102 } 1103 1104 static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, 1105 enum smu_clk_type clk_type, 1106 uint32_t min, 1107 uint32_t max) 1108 { 1109 int ret = 0; 1110 1111 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) 1112 return 0; 1113 1114 switch (clk_type) { 1115 case SMU_GFXCLK: 1116 case SMU_SCLK: 1117 ret = smu_cmn_send_smc_msg_with_param(smu, 1118 SMU_MSG_SetHardMinGfxClk, 1119 min, NULL); 1120 if (ret) 1121 return ret; 1122 1123 ret = smu_cmn_send_smc_msg_with_param(smu, 1124 SMU_MSG_SetSoftMaxGfxClk, 1125 max, NULL); 1126 if (ret) 1127 return ret; 1128 break; 1129 case SMU_FCLK: 1130 ret = smu_cmn_send_smc_msg_with_param(smu, 1131 SMU_MSG_SetHardMinFclkByFreq, 1132 min, NULL); 1133 if (ret) 1134 return ret; 1135 1136 ret = smu_cmn_send_smc_msg_with_param(smu, 1137 SMU_MSG_SetSoftMaxFclkByFreq, 1138 max, NULL); 1139 if (ret) 1140 return ret; 1141 break; 1142 case SMU_SOCCLK: 1143 ret = smu_cmn_send_smc_msg_with_param(smu, 1144 SMU_MSG_SetHardMinSocclkByFreq, 1145 min, NULL); 1146 if (ret) 1147 return ret; 1148 1149 ret = smu_cmn_send_smc_msg_with_param(smu, 1150 SMU_MSG_SetSoftMaxSocclkByFreq, 1151 max, NULL); 1152 if (ret) 1153 return ret; 1154 break; 1155 case SMU_VCLK: 1156 ret = smu_cmn_send_smc_msg_with_param(smu, 1157 SMU_MSG_SetHardMinVcn, 1158 min << 16, NULL); 1159 if (ret) 1160 return ret; 1161 ret = smu_cmn_send_smc_msg_with_param(smu, 1162 SMU_MSG_SetSoftMaxVcn, 1163 max << 16, NULL); 1164 if (ret) 1165 return ret; 1166 break; 1167 case SMU_DCLK: 1168 ret = smu_cmn_send_smc_msg_with_param(smu, 1169 SMU_MSG_SetHardMinVcn, 1170 min, NULL); 1171 if (ret) 1172 return ret; 1173 ret = smu_cmn_send_smc_msg_with_param(smu, 1174 SMU_MSG_SetSoftMaxVcn, 1175 max, NULL); 1176 if (ret) 1177 return ret; 1178 break; 1179 default: 1180 return -EINVAL; 1181 } 1182 1183 return ret; 1184 } 1185 1186 static int vangogh_force_clk_levels(struct smu_context *smu, 1187 enum smu_clk_type clk_type, uint32_t mask) 1188 { 1189 uint32_t soft_min_level = 0, soft_max_level = 0; 1190 uint32_t min_freq = 0, max_freq = 0; 1191 int ret = 0 ; 1192 1193 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1194 soft_max_level = mask ? (fls(mask) - 1) : 0; 1195 1196 switch (clk_type) { 1197 case SMU_SOCCLK: 1198 ret = vangogh_get_dpm_clk_limited(smu, clk_type, 1199 soft_min_level, &min_freq); 1200 if (ret) 1201 return ret; 1202 ret = vangogh_get_dpm_clk_limited(smu, clk_type, 1203 soft_max_level, &max_freq); 1204 if (ret) 1205 return ret; 1206 ret = smu_cmn_send_smc_msg_with_param(smu, 1207 SMU_MSG_SetSoftMaxSocclkByFreq, 1208 max_freq, NULL); 1209 if (ret) 1210 return ret; 1211 ret = smu_cmn_send_smc_msg_with_param(smu, 1212 SMU_MSG_SetHardMinSocclkByFreq, 1213 min_freq, NULL); 1214 if (ret) 1215 return ret; 1216 break; 1217 case SMU_FCLK: 1218 ret = vangogh_get_dpm_clk_limited(smu, 1219 clk_type, soft_min_level, &min_freq); 1220 if (ret) 1221 return ret; 1222 ret = vangogh_get_dpm_clk_limited(smu, 1223 clk_type, soft_max_level, &max_freq); 1224 if (ret) 1225 return ret; 1226 ret = smu_cmn_send_smc_msg_with_param(smu, 1227 SMU_MSG_SetSoftMaxFclkByFreq, 1228 max_freq, NULL); 1229 if (ret) 1230 return ret; 1231 ret = smu_cmn_send_smc_msg_with_param(smu, 1232 SMU_MSG_SetHardMinFclkByFreq, 1233 min_freq, NULL); 1234 if (ret) 1235 return ret; 1236 break; 1237 case SMU_VCLK: 1238 ret = vangogh_get_dpm_clk_limited(smu, 1239 clk_type, soft_min_level, &min_freq); 1240 if (ret) 1241 return ret; 1242 1243 ret = vangogh_get_dpm_clk_limited(smu, 1244 clk_type, soft_max_level, &max_freq); 1245 if (ret) 1246 return ret; 1247 1248 1249 ret = smu_cmn_send_smc_msg_with_param(smu, 1250 SMU_MSG_SetHardMinVcn, 1251 min_freq << 16, NULL); 1252 if (ret) 1253 return ret; 1254 1255 ret = smu_cmn_send_smc_msg_with_param(smu, 1256 SMU_MSG_SetSoftMaxVcn, 1257 max_freq << 16, NULL); 1258 if (ret) 1259 return ret; 1260 1261 break; 1262 case SMU_DCLK: 1263 ret = vangogh_get_dpm_clk_limited(smu, 1264 clk_type, soft_min_level, &min_freq); 1265 if (ret) 1266 return ret; 1267 1268 ret = vangogh_get_dpm_clk_limited(smu, 1269 clk_type, soft_max_level, &max_freq); 1270 if (ret) 1271 return ret; 1272 1273 ret = smu_cmn_send_smc_msg_with_param(smu, 1274 SMU_MSG_SetHardMinVcn, 1275 min_freq, NULL); 1276 if (ret) 1277 return ret; 1278 1279 ret = smu_cmn_send_smc_msg_with_param(smu, 1280 SMU_MSG_SetSoftMaxVcn, 1281 max_freq, NULL); 1282 if (ret) 1283 return ret; 1284 1285 break; 1286 default: 1287 break; 1288 } 1289 1290 return ret; 1291 } 1292 1293 static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest) 1294 { 1295 int ret = 0, i = 0; 1296 uint32_t min_freq, max_freq, force_freq; 1297 enum smu_clk_type clk_type; 1298 1299 enum smu_clk_type clks[] = { 1300 SMU_SOCCLK, 1301 SMU_VCLK, 1302 SMU_DCLK, 1303 SMU_FCLK, 1304 }; 1305 1306 for (i = 0; i < ARRAY_SIZE(clks); i++) { 1307 clk_type = clks[i]; 1308 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 1309 if (ret) 1310 return ret; 1311 1312 force_freq = highest ? max_freq : min_freq; 1313 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); 1314 if (ret) 1315 return ret; 1316 } 1317 1318 return ret; 1319 } 1320 1321 static int vangogh_unforce_dpm_levels(struct smu_context *smu) 1322 { 1323 int ret = 0, i = 0; 1324 uint32_t min_freq, max_freq; 1325 enum smu_clk_type clk_type; 1326 1327 struct clk_feature_map { 1328 enum smu_clk_type clk_type; 1329 uint32_t feature; 1330 } clk_feature_map[] = { 1331 {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT}, 1332 {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, 1333 {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT}, 1334 {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT}, 1335 }; 1336 1337 for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) { 1338 1339 if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature)) 1340 continue; 1341 1342 clk_type = clk_feature_map[i].clk_type; 1343 1344 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 1345 1346 if (ret) 1347 return ret; 1348 1349 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); 1350 1351 if (ret) 1352 return ret; 1353 } 1354 1355 return ret; 1356 } 1357 1358 static int vangogh_set_peak_clock_by_device(struct smu_context *smu) 1359 { 1360 int ret = 0; 1361 uint32_t socclk_freq = 0, fclk_freq = 0; 1362 uint32_t vclk_freq = 0, dclk_freq = 0; 1363 1364 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq); 1365 if (ret) 1366 return ret; 1367 1368 ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq); 1369 if (ret) 1370 return ret; 1371 1372 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq); 1373 if (ret) 1374 return ret; 1375 1376 ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq); 1377 if (ret) 1378 return ret; 1379 1380 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq); 1381 if (ret) 1382 return ret; 1383 1384 ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq); 1385 if (ret) 1386 return ret; 1387 1388 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq); 1389 if (ret) 1390 return ret; 1391 1392 ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq); 1393 if (ret) 1394 return ret; 1395 1396 return ret; 1397 } 1398 1399 static int vangogh_set_performance_level(struct smu_context *smu, 1400 enum amd_dpm_forced_level level) 1401 { 1402 int ret = 0, i; 1403 uint32_t soc_mask, mclk_mask, fclk_mask; 1404 uint32_t vclk_mask = 0, dclk_mask = 0; 1405 1406 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; 1407 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; 1408 1409 switch (level) { 1410 case AMD_DPM_FORCED_LEVEL_HIGH: 1411 smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq; 1412 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1413 1414 1415 ret = vangogh_force_dpm_limit_value(smu, true); 1416 if (ret) 1417 return ret; 1418 break; 1419 case AMD_DPM_FORCED_LEVEL_LOW: 1420 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1421 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; 1422 1423 ret = vangogh_force_dpm_limit_value(smu, false); 1424 if (ret) 1425 return ret; 1426 break; 1427 case AMD_DPM_FORCED_LEVEL_AUTO: 1428 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1429 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1430 1431 ret = vangogh_unforce_dpm_levels(smu); 1432 if (ret) 1433 return ret; 1434 break; 1435 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1436 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; 1437 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; 1438 1439 ret = vangogh_get_profiling_clk_mask(smu, level, 1440 &vclk_mask, 1441 &dclk_mask, 1442 &mclk_mask, 1443 &fclk_mask, 1444 &soc_mask); 1445 if (ret) 1446 return ret; 1447 1448 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1449 vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); 1450 vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); 1451 vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask); 1452 break; 1453 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1454 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1455 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; 1456 break; 1457 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1458 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1459 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1460 1461 ret = vangogh_get_profiling_clk_mask(smu, level, 1462 NULL, 1463 NULL, 1464 &mclk_mask, 1465 &fclk_mask, 1466 NULL); 1467 if (ret) 1468 return ret; 1469 1470 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1471 break; 1472 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1473 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; 1474 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; 1475 1476 ret = vangogh_set_peak_clock_by_device(smu); 1477 if (ret) 1478 return ret; 1479 break; 1480 case AMD_DPM_FORCED_LEVEL_MANUAL: 1481 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1482 default: 1483 return 0; 1484 } 1485 1486 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 1487 smu->gfx_actual_hard_min_freq, NULL); 1488 if (ret) 1489 return ret; 1490 1491 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 1492 smu->gfx_actual_soft_max_freq, NULL); 1493 if (ret) 1494 return ret; 1495 1496 if (smu->adev->pm.fw_version >= 0x43f1b00) { 1497 for (i = 0; i < smu->cpu_core_num; i++) { 1498 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, 1499 ((i << 20) 1500 | smu->cpu_actual_soft_min_freq), 1501 NULL); 1502 if (ret) 1503 return ret; 1504 1505 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, 1506 ((i << 20) 1507 | smu->cpu_actual_soft_max_freq), 1508 NULL); 1509 if (ret) 1510 return ret; 1511 } 1512 } 1513 1514 return ret; 1515 } 1516 1517 static int vangogh_read_sensor(struct smu_context *smu, 1518 enum amd_pp_sensors sensor, 1519 void *data, uint32_t *size) 1520 { 1521 int ret = 0; 1522 1523 if (!data || !size) 1524 return -EINVAL; 1525 1526 switch (sensor) { 1527 case AMDGPU_PP_SENSOR_GPU_LOAD: 1528 ret = vangogh_common_get_smu_metrics_data(smu, 1529 METRICS_AVERAGE_GFXACTIVITY, 1530 (uint32_t *)data); 1531 *size = 4; 1532 break; 1533 case AMDGPU_PP_SENSOR_GPU_POWER: 1534 ret = vangogh_common_get_smu_metrics_data(smu, 1535 METRICS_AVERAGE_SOCKETPOWER, 1536 (uint32_t *)data); 1537 *size = 4; 1538 break; 1539 case AMDGPU_PP_SENSOR_EDGE_TEMP: 1540 ret = vangogh_common_get_smu_metrics_data(smu, 1541 METRICS_TEMPERATURE_EDGE, 1542 (uint32_t *)data); 1543 *size = 4; 1544 break; 1545 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 1546 ret = vangogh_common_get_smu_metrics_data(smu, 1547 METRICS_TEMPERATURE_HOTSPOT, 1548 (uint32_t *)data); 1549 *size = 4; 1550 break; 1551 case AMDGPU_PP_SENSOR_GFX_MCLK: 1552 ret = vangogh_common_get_smu_metrics_data(smu, 1553 METRICS_CURR_UCLK, 1554 (uint32_t *)data); 1555 *(uint32_t *)data *= 100; 1556 *size = 4; 1557 break; 1558 case AMDGPU_PP_SENSOR_GFX_SCLK: 1559 ret = vangogh_common_get_smu_metrics_data(smu, 1560 METRICS_CURR_GFXCLK, 1561 (uint32_t *)data); 1562 *(uint32_t *)data *= 100; 1563 *size = 4; 1564 break; 1565 case AMDGPU_PP_SENSOR_VDDGFX: 1566 ret = vangogh_common_get_smu_metrics_data(smu, 1567 METRICS_VOLTAGE_VDDGFX, 1568 (uint32_t *)data); 1569 *size = 4; 1570 break; 1571 case AMDGPU_PP_SENSOR_VDDNB: 1572 ret = vangogh_common_get_smu_metrics_data(smu, 1573 METRICS_VOLTAGE_VDDSOC, 1574 (uint32_t *)data); 1575 *size = 4; 1576 break; 1577 case AMDGPU_PP_SENSOR_CPU_CLK: 1578 ret = vangogh_common_get_smu_metrics_data(smu, 1579 METRICS_AVERAGE_CPUCLK, 1580 (uint32_t *)data); 1581 *size = smu->cpu_core_num * sizeof(uint16_t); 1582 break; 1583 default: 1584 ret = -EOPNOTSUPP; 1585 break; 1586 } 1587 1588 return ret; 1589 } 1590 1591 static int vangogh_set_watermarks_table(struct smu_context *smu, 1592 struct pp_smu_wm_range_sets *clock_ranges) 1593 { 1594 int i; 1595 int ret = 0; 1596 Watermarks_t *table = smu->smu_table.watermarks_table; 1597 1598 if (!table || !clock_ranges) 1599 return -EINVAL; 1600 1601 if (clock_ranges) { 1602 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 1603 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 1604 return -EINVAL; 1605 1606 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 1607 table->WatermarkRow[WM_DCFCLK][i].MinClock = 1608 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 1609 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 1610 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 1611 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 1612 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 1613 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 1614 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 1615 1616 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 1617 clock_ranges->reader_wm_sets[i].wm_inst; 1618 } 1619 1620 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 1621 table->WatermarkRow[WM_SOCCLK][i].MinClock = 1622 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 1623 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 1624 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 1625 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 1626 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 1627 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 1628 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 1629 1630 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 1631 clock_ranges->writer_wm_sets[i].wm_inst; 1632 } 1633 1634 smu->watermarks_bitmap |= WATERMARKS_EXIST; 1635 } 1636 1637 /* pass data to smu controller */ 1638 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 1639 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 1640 ret = smu_cmn_write_watermarks_table(smu); 1641 if (ret) { 1642 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 1643 return ret; 1644 } 1645 smu->watermarks_bitmap |= WATERMARKS_LOADED; 1646 } 1647 1648 return 0; 1649 } 1650 1651 static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu, 1652 void **table) 1653 { 1654 struct smu_table_context *smu_table = &smu->smu_table; 1655 struct gpu_metrics_v2_2 *gpu_metrics = 1656 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1657 SmuMetrics_legacy_t metrics; 1658 int ret = 0; 1659 1660 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1661 if (ret) 1662 return ret; 1663 1664 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1665 1666 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1667 gpu_metrics->temperature_soc = metrics.SocTemperature; 1668 memcpy(&gpu_metrics->temperature_core[0], 1669 &metrics.CoreTemperature[0], 1670 sizeof(uint16_t) * 4); 1671 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 1672 1673 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 1674 gpu_metrics->average_mm_activity = metrics.UvdActivity; 1675 1676 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 1677 gpu_metrics->average_cpu_power = metrics.Power[0]; 1678 gpu_metrics->average_soc_power = metrics.Power[1]; 1679 gpu_metrics->average_gfx_power = metrics.Power[2]; 1680 memcpy(&gpu_metrics->average_core_power[0], 1681 &metrics.CorePower[0], 1682 sizeof(uint16_t) * 4); 1683 1684 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 1685 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 1686 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 1687 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 1688 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 1689 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 1690 1691 memcpy(&gpu_metrics->current_coreclk[0], 1692 &metrics.CoreFrequency[0], 1693 sizeof(uint16_t) * 4); 1694 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1695 1696 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1697 gpu_metrics->indep_throttle_status = 1698 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1699 vangogh_throttler_map); 1700 1701 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1702 1703 *table = (void *)gpu_metrics; 1704 1705 return sizeof(struct gpu_metrics_v2_2); 1706 } 1707 1708 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, 1709 void **table) 1710 { 1711 struct smu_table_context *smu_table = &smu->smu_table; 1712 struct gpu_metrics_v2_2 *gpu_metrics = 1713 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1714 SmuMetrics_t metrics; 1715 int ret = 0; 1716 1717 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1718 if (ret) 1719 return ret; 1720 1721 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1722 1723 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1724 gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 1725 memcpy(&gpu_metrics->temperature_core[0], 1726 &metrics.Current.CoreTemperature[0], 1727 sizeof(uint16_t) * 4); 1728 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; 1729 1730 gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; 1731 gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; 1732 1733 gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; 1734 gpu_metrics->average_cpu_power = metrics.Current.Power[0]; 1735 gpu_metrics->average_soc_power = metrics.Current.Power[1]; 1736 gpu_metrics->average_gfx_power = metrics.Current.Power[2]; 1737 memcpy(&gpu_metrics->average_core_power[0], 1738 &metrics.Average.CorePower[0], 1739 sizeof(uint16_t) * 4); 1740 1741 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 1742 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 1743 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 1744 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 1745 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 1746 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 1747 1748 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 1749 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 1750 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 1751 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 1752 gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 1753 gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 1754 1755 memcpy(&gpu_metrics->current_coreclk[0], 1756 &metrics.Current.CoreFrequency[0], 1757 sizeof(uint16_t) * 4); 1758 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1759 1760 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1761 gpu_metrics->indep_throttle_status = 1762 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, 1763 vangogh_throttler_map); 1764 1765 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1766 1767 *table = (void *)gpu_metrics; 1768 1769 return sizeof(struct gpu_metrics_v2_2); 1770 } 1771 1772 static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, 1773 void **table) 1774 { 1775 struct amdgpu_device *adev = smu->adev; 1776 uint32_t if_version; 1777 int ret = 0; 1778 1779 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 1780 if (ret) { 1781 dev_err(adev->dev, "Failed to get smu if version!\n"); 1782 return ret; 1783 } 1784 1785 if (if_version < 0x3) 1786 ret = vangogh_get_legacy_gpu_metrics(smu, table); 1787 else 1788 ret = vangogh_get_gpu_metrics(smu, table); 1789 1790 return ret; 1791 } 1792 1793 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, 1794 long input[], uint32_t size) 1795 { 1796 int ret = 0; 1797 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1798 1799 if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) { 1800 dev_warn(smu->adev->dev, 1801 "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n"); 1802 return -EINVAL; 1803 } 1804 1805 switch (type) { 1806 case PP_OD_EDIT_CCLK_VDDC_TABLE: 1807 if (size != 3) { 1808 dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n"); 1809 return -EINVAL; 1810 } 1811 if (input[0] >= smu->cpu_core_num) { 1812 dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n", 1813 smu->cpu_core_num); 1814 } 1815 smu->cpu_core_id_select = input[0]; 1816 if (input[1] == 0) { 1817 if (input[2] < smu->cpu_default_soft_min_freq) { 1818 dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 1819 input[2], smu->cpu_default_soft_min_freq); 1820 return -EINVAL; 1821 } 1822 smu->cpu_actual_soft_min_freq = input[2]; 1823 } else if (input[1] == 1) { 1824 if (input[2] > smu->cpu_default_soft_max_freq) { 1825 dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 1826 input[2], smu->cpu_default_soft_max_freq); 1827 return -EINVAL; 1828 } 1829 smu->cpu_actual_soft_max_freq = input[2]; 1830 } else { 1831 return -EINVAL; 1832 } 1833 break; 1834 case PP_OD_EDIT_SCLK_VDDC_TABLE: 1835 if (size != 2) { 1836 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1837 return -EINVAL; 1838 } 1839 1840 if (input[0] == 0) { 1841 if (input[1] < smu->gfx_default_hard_min_freq) { 1842 dev_warn(smu->adev->dev, 1843 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 1844 input[1], smu->gfx_default_hard_min_freq); 1845 return -EINVAL; 1846 } 1847 smu->gfx_actual_hard_min_freq = input[1]; 1848 } else if (input[0] == 1) { 1849 if (input[1] > smu->gfx_default_soft_max_freq) { 1850 dev_warn(smu->adev->dev, 1851 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 1852 input[1], smu->gfx_default_soft_max_freq); 1853 return -EINVAL; 1854 } 1855 smu->gfx_actual_soft_max_freq = input[1]; 1856 } else { 1857 return -EINVAL; 1858 } 1859 break; 1860 case PP_OD_RESTORE_DEFAULT_TABLE: 1861 if (size != 0) { 1862 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1863 return -EINVAL; 1864 } else { 1865 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1866 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1867 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; 1868 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; 1869 } 1870 break; 1871 case PP_OD_COMMIT_DPM_TABLE: 1872 if (size != 0) { 1873 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1874 return -EINVAL; 1875 } else { 1876 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 1877 dev_err(smu->adev->dev, 1878 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 1879 smu->gfx_actual_hard_min_freq, 1880 smu->gfx_actual_soft_max_freq); 1881 return -EINVAL; 1882 } 1883 1884 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 1885 smu->gfx_actual_hard_min_freq, NULL); 1886 if (ret) { 1887 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 1888 return ret; 1889 } 1890 1891 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 1892 smu->gfx_actual_soft_max_freq, NULL); 1893 if (ret) { 1894 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 1895 return ret; 1896 } 1897 1898 if (smu->adev->pm.fw_version < 0x43f1b00) { 1899 dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n"); 1900 break; 1901 } 1902 1903 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, 1904 ((smu->cpu_core_id_select << 20) 1905 | smu->cpu_actual_soft_min_freq), 1906 NULL); 1907 if (ret) { 1908 dev_err(smu->adev->dev, "Set hard min cclk failed!"); 1909 return ret; 1910 } 1911 1912 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, 1913 ((smu->cpu_core_id_select << 20) 1914 | smu->cpu_actual_soft_max_freq), 1915 NULL); 1916 if (ret) { 1917 dev_err(smu->adev->dev, "Set soft max cclk failed!"); 1918 return ret; 1919 } 1920 } 1921 break; 1922 default: 1923 return -ENOSYS; 1924 } 1925 1926 return ret; 1927 } 1928 1929 static int vangogh_set_default_dpm_tables(struct smu_context *smu) 1930 { 1931 struct smu_table_context *smu_table = &smu->smu_table; 1932 1933 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); 1934 } 1935 1936 static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 1937 { 1938 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 1939 1940 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 1941 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 1942 smu->gfx_actual_hard_min_freq = 0; 1943 smu->gfx_actual_soft_max_freq = 0; 1944 1945 smu->cpu_default_soft_min_freq = 1400; 1946 smu->cpu_default_soft_max_freq = 3500; 1947 smu->cpu_actual_soft_min_freq = 0; 1948 smu->cpu_actual_soft_max_freq = 0; 1949 1950 return 0; 1951 } 1952 1953 static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) 1954 { 1955 DpmClocks_t *table = smu->smu_table.clocks_table; 1956 int i; 1957 1958 if (!clock_table || !table) 1959 return -EINVAL; 1960 1961 for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) { 1962 clock_table->SocClocks[i].Freq = table->SocClocks[i]; 1963 clock_table->SocClocks[i].Vol = table->SocVoltage[i]; 1964 } 1965 1966 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 1967 clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk; 1968 clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage; 1969 } 1970 1971 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 1972 clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk; 1973 clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage; 1974 } 1975 1976 return 0; 1977 } 1978 1979 1980 static int vangogh_system_features_control(struct smu_context *smu, bool en) 1981 { 1982 struct amdgpu_device *adev = smu->adev; 1983 int ret = 0; 1984 1985 if (adev->pm.fw_version >= 0x43f1700 && !en) 1986 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify, 1987 RLC_STATUS_OFF, NULL); 1988 1989 return ret; 1990 } 1991 1992 static int vangogh_post_smu_init(struct smu_context *smu) 1993 { 1994 struct amdgpu_device *adev = smu->adev; 1995 uint32_t tmp; 1996 int ret = 0; 1997 uint8_t aon_bits = 0; 1998 /* Two CUs in one WGP */ 1999 uint32_t req_active_wgps = adev->gfx.cu_info.number/2; 2000 uint32_t total_cu = adev->gfx.config.max_cu_per_sh * 2001 adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; 2002 2003 /* allow message will be sent after enable message on Vangogh*/ 2004 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && 2005 (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 2006 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL); 2007 if (ret) { 2008 dev_err(adev->dev, "Failed to Enable GfxOff!\n"); 2009 return ret; 2010 } 2011 } else { 2012 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 2013 dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n"); 2014 } 2015 2016 /* if all CUs are active, no need to power off any WGPs */ 2017 if (total_cu == adev->gfx.cu_info.number) 2018 return 0; 2019 2020 /* 2021 * Calculate the total bits number of always on WGPs for all SA/SEs in 2022 * RLC_PG_ALWAYS_ON_WGP_MASK. 2023 */ 2024 tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK)); 2025 tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK; 2026 2027 aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; 2028 2029 /* Do not request any WGPs less than set in the AON_WGP_MASK */ 2030 if (aon_bits > req_active_wgps) { 2031 dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n"); 2032 return 0; 2033 } else { 2034 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL); 2035 } 2036 } 2037 2038 static int vangogh_mode_reset(struct smu_context *smu, int type) 2039 { 2040 int ret = 0, index = 0; 2041 2042 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, 2043 SMU_MSG_GfxDeviceDriverReset); 2044 if (index < 0) 2045 return index == -EACCES ? 0 : index; 2046 2047 mutex_lock(&smu->message_lock); 2048 2049 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type); 2050 2051 mutex_unlock(&smu->message_lock); 2052 2053 mdelay(10); 2054 2055 return ret; 2056 } 2057 2058 static int vangogh_mode2_reset(struct smu_context *smu) 2059 { 2060 return vangogh_mode_reset(smu, SMU_RESET_MODE_2); 2061 } 2062 2063 /** 2064 * vangogh_get_gfxoff_status - Get gfxoff status 2065 * 2066 * @smu: amdgpu_device pointer 2067 * 2068 * Get current gfxoff status 2069 * 2070 * Return: 2071 * * 0 - GFXOFF (default if enabled). 2072 * * 1 - Transition out of GFX State. 2073 * * 2 - Not in GFXOFF. 2074 * * 3 - Transition into GFXOFF. 2075 */ 2076 static u32 vangogh_get_gfxoff_status(struct smu_context *smu) 2077 { 2078 struct amdgpu_device *adev = smu->adev; 2079 u32 reg, gfxoff_status; 2080 2081 reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL); 2082 gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK) 2083 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT; 2084 2085 return gfxoff_status; 2086 } 2087 2088 static int vangogh_get_power_limit(struct smu_context *smu, 2089 uint32_t *current_power_limit, 2090 uint32_t *default_power_limit, 2091 uint32_t *max_power_limit) 2092 { 2093 struct smu_11_5_power_context *power_context = 2094 smu->smu_power.power_context; 2095 uint32_t ppt_limit; 2096 int ret = 0; 2097 2098 if (smu->adev->pm.fw_version < 0x43f1e00) 2099 return ret; 2100 2101 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit); 2102 if (ret) { 2103 dev_err(smu->adev->dev, "Get slow PPT limit failed!\n"); 2104 return ret; 2105 } 2106 /* convert from milliwatt to watt */ 2107 if (current_power_limit) 2108 *current_power_limit = ppt_limit / 1000; 2109 if (default_power_limit) 2110 *default_power_limit = ppt_limit / 1000; 2111 if (max_power_limit) 2112 *max_power_limit = 29; 2113 2114 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit); 2115 if (ret) { 2116 dev_err(smu->adev->dev, "Get fast PPT limit failed!\n"); 2117 return ret; 2118 } 2119 /* convert from milliwatt to watt */ 2120 power_context->current_fast_ppt_limit = 2121 power_context->default_fast_ppt_limit = ppt_limit / 1000; 2122 power_context->max_fast_ppt_limit = 30; 2123 2124 return ret; 2125 } 2126 2127 static int vangogh_get_ppt_limit(struct smu_context *smu, 2128 uint32_t *ppt_limit, 2129 enum smu_ppt_limit_type type, 2130 enum smu_ppt_limit_level level) 2131 { 2132 struct smu_11_5_power_context *power_context = 2133 smu->smu_power.power_context; 2134 2135 if (!power_context) 2136 return -EOPNOTSUPP; 2137 2138 if (type == SMU_FAST_PPT_LIMIT) { 2139 switch (level) { 2140 case SMU_PPT_LIMIT_MAX: 2141 *ppt_limit = power_context->max_fast_ppt_limit; 2142 break; 2143 case SMU_PPT_LIMIT_CURRENT: 2144 *ppt_limit = power_context->current_fast_ppt_limit; 2145 break; 2146 case SMU_PPT_LIMIT_DEFAULT: 2147 *ppt_limit = power_context->default_fast_ppt_limit; 2148 break; 2149 default: 2150 break; 2151 } 2152 } 2153 2154 return 0; 2155 } 2156 2157 static int vangogh_set_power_limit(struct smu_context *smu, 2158 enum smu_ppt_limit_type limit_type, 2159 uint32_t ppt_limit) 2160 { 2161 struct smu_11_5_power_context *power_context = 2162 smu->smu_power.power_context; 2163 int ret = 0; 2164 2165 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 2166 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 2167 return -EOPNOTSUPP; 2168 } 2169 2170 switch (limit_type) { 2171 case SMU_DEFAULT_PPT_LIMIT: 2172 ret = smu_cmn_send_smc_msg_with_param(smu, 2173 SMU_MSG_SetSlowPPTLimit, 2174 ppt_limit * 1000, /* convert from watt to milliwatt */ 2175 NULL); 2176 if (ret) 2177 return ret; 2178 2179 smu->current_power_limit = ppt_limit; 2180 break; 2181 case SMU_FAST_PPT_LIMIT: 2182 ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24); 2183 if (ppt_limit > power_context->max_fast_ppt_limit) { 2184 dev_err(smu->adev->dev, 2185 "New power limit (%d) is over the max allowed %d\n", 2186 ppt_limit, power_context->max_fast_ppt_limit); 2187 return ret; 2188 } 2189 2190 ret = smu_cmn_send_smc_msg_with_param(smu, 2191 SMU_MSG_SetFastPPTLimit, 2192 ppt_limit * 1000, /* convert from watt to milliwatt */ 2193 NULL); 2194 if (ret) 2195 return ret; 2196 2197 power_context->current_fast_ppt_limit = ppt_limit; 2198 break; 2199 default: 2200 return -EINVAL; 2201 } 2202 2203 return ret; 2204 } 2205 2206 /** 2207 * vangogh_set_gfxoff_residency 2208 * 2209 * @smu: amdgpu_device pointer 2210 * @start: start/stop residency log 2211 * 2212 * This function will be used to log gfxoff residency 2213 * 2214 * 2215 * Returns standard response codes. 2216 */ 2217 static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start) 2218 { 2219 int ret = 0; 2220 u32 residency; 2221 struct amdgpu_device *adev = smu->adev; 2222 2223 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 2224 return 0; 2225 2226 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency, 2227 start, &residency); 2228 2229 if (!start) 2230 adev->gfx.gfx_off_residency = residency; 2231 2232 return ret; 2233 } 2234 2235 /** 2236 * vangogh_get_gfxoff_residency 2237 * 2238 * @smu: amdgpu_device pointer 2239 * 2240 * This function will be used to get gfxoff residency. 2241 * 2242 * Returns standard response codes. 2243 */ 2244 static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *residency) 2245 { 2246 struct amdgpu_device *adev = smu->adev; 2247 2248 *residency = adev->gfx.gfx_off_residency; 2249 2250 return 0; 2251 } 2252 2253 /** 2254 * vangogh_get_gfxoff_entrycount - get gfxoff entry count 2255 * 2256 * @smu: amdgpu_device pointer 2257 * 2258 * This function will be used to get gfxoff entry count 2259 * 2260 * Returns standard response codes. 2261 */ 2262 static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entrycount) 2263 { 2264 int ret = 0, value = 0; 2265 struct amdgpu_device *adev = smu->adev; 2266 2267 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 2268 return 0; 2269 2270 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetGfxOffEntryCount, &value); 2271 *entrycount = value + adev->gfx.gfx_off_entrycount; 2272 2273 return ret; 2274 } 2275 2276 static const struct pptable_funcs vangogh_ppt_funcs = { 2277 2278 .check_fw_status = smu_v11_0_check_fw_status, 2279 .check_fw_version = smu_v11_0_check_fw_version, 2280 .init_smc_tables = vangogh_init_smc_tables, 2281 .fini_smc_tables = smu_v11_0_fini_smc_tables, 2282 .init_power = smu_v11_0_init_power, 2283 .fini_power = smu_v11_0_fini_power, 2284 .register_irq_handler = smu_v11_0_register_irq_handler, 2285 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, 2286 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 2287 .send_smc_msg = smu_cmn_send_smc_msg, 2288 .dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable, 2289 .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable, 2290 .is_dpm_running = vangogh_is_dpm_running, 2291 .read_sensor = vangogh_read_sensor, 2292 .get_enabled_mask = smu_cmn_get_enabled_mask, 2293 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 2294 .set_watermarks_table = vangogh_set_watermarks_table, 2295 .set_driver_table_location = smu_v11_0_set_driver_table_location, 2296 .interrupt_work = smu_v11_0_interrupt_work, 2297 .get_gpu_metrics = vangogh_common_get_gpu_metrics, 2298 .od_edit_dpm_table = vangogh_od_edit_dpm_table, 2299 .print_clk_levels = vangogh_common_print_clk_levels, 2300 .set_default_dpm_table = vangogh_set_default_dpm_tables, 2301 .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters, 2302 .system_features_control = vangogh_system_features_control, 2303 .feature_is_enabled = smu_cmn_feature_is_enabled, 2304 .set_power_profile_mode = vangogh_set_power_profile_mode, 2305 .get_power_profile_mode = vangogh_get_power_profile_mode, 2306 .get_dpm_clock_table = vangogh_get_dpm_clock_table, 2307 .force_clk_levels = vangogh_force_clk_levels, 2308 .set_performance_level = vangogh_set_performance_level, 2309 .post_init = vangogh_post_smu_init, 2310 .mode2_reset = vangogh_mode2_reset, 2311 .gfx_off_control = smu_v11_0_gfx_off_control, 2312 .get_gfx_off_status = vangogh_get_gfxoff_status, 2313 .get_gfx_off_entrycount = vangogh_get_gfxoff_entrycount, 2314 .get_gfx_off_residency = vangogh_get_gfxoff_residency, 2315 .set_gfx_off_residency = vangogh_set_gfxoff_residency, 2316 .get_ppt_limit = vangogh_get_ppt_limit, 2317 .get_power_limit = vangogh_get_power_limit, 2318 .set_power_limit = vangogh_set_power_limit, 2319 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, 2320 }; 2321 2322 void vangogh_set_ppt_funcs(struct smu_context *smu) 2323 { 2324 smu->ppt_funcs = &vangogh_ppt_funcs; 2325 smu->message_map = vangogh_message_map; 2326 smu->feature_map = vangogh_feature_mask_map; 2327 smu->table_map = vangogh_table_map; 2328 smu->workload_map = vangogh_workload_map; 2329 smu->is_apu = true; 2330 smu_v11_0_set_smu_mailbox_registers(smu); 2331 } 2332