1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include "amdgpu.h" 27 #include "amdgpu_smu.h" 28 #include "smu_v11_0.h" 29 #include "smu11_driver_if_vangogh.h" 30 #include "vangogh_ppt.h" 31 #include "smu_v11_5_ppsmc.h" 32 #include "smu_v11_5_pmfw.h" 33 #include "smu_cmn.h" 34 #include "soc15_common.h" 35 #include "asic_reg/gc/gc_10_3_0_offset.h" 36 #include "asic_reg/gc/gc_10_3_0_sh_mask.h" 37 #include <asm/processor.h> 38 39 /* 40 * DO NOT use these for err/warn/info/debug messages. 41 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 42 * They are more MGPU friendly. 43 */ 44 #undef pr_err 45 #undef pr_warn 46 #undef pr_info 47 #undef pr_debug 48 49 // Registers related to GFXOFF 50 // addressBlock: smuio_smuio_SmuSmuioDec 51 // base address: 0x5a000 52 #define mmSMUIO_GFX_MISC_CNTL 0x00c5 53 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0 54 55 //SMUIO_GFX_MISC_CNTL 56 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0 57 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 58 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L 59 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L 60 61 #define FEATURE_MASK(feature) (1ULL << feature) 62 #define SMC_DPM_FEATURE ( \ 63 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 64 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 65 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 66 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 67 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 68 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 69 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 70 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 71 FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 72 73 static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = { 74 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), 75 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0), 76 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0), 77 MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0), 78 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 79 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 80 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0), 81 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0), 82 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 83 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 84 MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0), 85 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0), 86 MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0), 87 MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0), 88 MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 0), 89 MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 0), 90 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0), 91 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0), 92 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), 93 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 94 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 0), 95 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0), 96 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 0), 97 MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0), 98 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 0), 99 MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 0), 100 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 0), 101 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 0), 102 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0), 103 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 0), 104 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 0), 105 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 0), 106 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 0), 107 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 0), 108 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 109 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 110 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 0), 111 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 0), 112 MSG_MAP(PowerUpCvip, PPSMC_MSG_PowerUpCvip, 0), 113 MSG_MAP(PowerDownCvip, PPSMC_MSG_PowerDownCvip, 0), 114 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 115 MSG_MAP(GetThermalLimit, PPSMC_MSG_GetThermalLimit, 0), 116 MSG_MAP(GetCurrentTemperature, PPSMC_MSG_GetCurrentTemperature, 0), 117 MSG_MAP(GetCurrentPower, PPSMC_MSG_GetCurrentPower, 0), 118 MSG_MAP(GetCurrentVoltage, PPSMC_MSG_GetCurrentVoltage, 0), 119 MSG_MAP(GetCurrentCurrent, PPSMC_MSG_GetCurrentCurrent, 0), 120 MSG_MAP(GetAverageCpuActivity, PPSMC_MSG_GetAverageCpuActivity, 0), 121 MSG_MAP(GetAverageGfxActivity, PPSMC_MSG_GetAverageGfxActivity, 0), 122 MSG_MAP(GetAveragePower, PPSMC_MSG_GetAveragePower, 0), 123 MSG_MAP(GetAverageTemperature, PPSMC_MSG_GetAverageTemperature, 0), 124 MSG_MAP(SetAveragePowerTimeConstant, PPSMC_MSG_SetAveragePowerTimeConstant, 0), 125 MSG_MAP(SetAverageActivityTimeConstant, PPSMC_MSG_SetAverageActivityTimeConstant, 0), 126 MSG_MAP(SetAverageTemperatureTimeConstant, PPSMC_MSG_SetAverageTemperatureTimeConstant, 0), 127 MSG_MAP(SetMitigationEndHysteresis, PPSMC_MSG_SetMitigationEndHysteresis, 0), 128 MSG_MAP(GetCurrentFreq, PPSMC_MSG_GetCurrentFreq, 0), 129 MSG_MAP(SetReducedPptLimit, PPSMC_MSG_SetReducedPptLimit, 0), 130 MSG_MAP(SetReducedThermalLimit, PPSMC_MSG_SetReducedThermalLimit, 0), 131 MSG_MAP(DramLogSetDramAddr, PPSMC_MSG_DramLogSetDramAddr, 0), 132 MSG_MAP(StartDramLogging, PPSMC_MSG_StartDramLogging, 0), 133 MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0), 134 MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0), 135 MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0), 136 MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0), 137 MSG_MAP(SetFastPPTLimit, PPSMC_MSG_SetFastPPTLimit, 0), 138 MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0), 139 MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0), 140 MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0), 141 }; 142 143 static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = { 144 FEA_MAP(PPT), 145 FEA_MAP(TDC), 146 FEA_MAP(THERMAL), 147 FEA_MAP(DS_GFXCLK), 148 FEA_MAP(DS_SOCCLK), 149 FEA_MAP(DS_LCLK), 150 FEA_MAP(DS_FCLK), 151 FEA_MAP(DS_MP1CLK), 152 FEA_MAP(DS_MP0CLK), 153 FEA_MAP(ATHUB_PG), 154 FEA_MAP(CCLK_DPM), 155 FEA_MAP(FAN_CONTROLLER), 156 FEA_MAP(ULV), 157 FEA_MAP(VCN_DPM), 158 FEA_MAP(LCLK_DPM), 159 FEA_MAP(SHUBCLK_DPM), 160 FEA_MAP(DCFCLK_DPM), 161 FEA_MAP(DS_DCFCLK), 162 FEA_MAP(S0I2), 163 FEA_MAP(SMU_LOW_POWER), 164 FEA_MAP(GFX_DEM), 165 FEA_MAP(PSI), 166 FEA_MAP(PROCHOT), 167 FEA_MAP(CPUOFF), 168 FEA_MAP(STAPM), 169 FEA_MAP(S0I3), 170 FEA_MAP(DF_CSTATES), 171 FEA_MAP(PERF_LIMIT), 172 FEA_MAP(CORE_DLDO), 173 FEA_MAP(RSMU_LOW_POWER), 174 FEA_MAP(SMN_LOW_POWER), 175 FEA_MAP(THM_LOW_POWER), 176 FEA_MAP(SMUIO_LOW_POWER), 177 FEA_MAP(MP1_LOW_POWER), 178 FEA_MAP(DS_VCN), 179 FEA_MAP(CPPC), 180 FEA_MAP(OS_CSTATES), 181 FEA_MAP(ISP_DPM), 182 FEA_MAP(A55_DPM), 183 FEA_MAP(CVIP_DSP_DPM), 184 FEA_MAP(MSMU_LOW_POWER), 185 FEA_MAP_REVERSE(SOCCLK), 186 FEA_MAP_REVERSE(FCLK), 187 FEA_MAP_HALF_REVERSE(GFX), 188 }; 189 190 static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = { 191 TAB_MAP_VALID(WATERMARKS), 192 TAB_MAP_VALID(SMU_METRICS), 193 TAB_MAP_VALID(CUSTOM_DPM), 194 TAB_MAP_VALID(DPMCLOCKS), 195 }; 196 197 static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 198 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 199 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 200 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 201 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 202 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 203 }; 204 205 static const uint8_t vangogh_throttler_map[] = { 206 [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT), 207 [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT), 208 [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT), 209 [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT), 210 [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT), 211 [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT), 212 [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT), 213 [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT), 214 [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT), 215 [THROTTLER_STATUS_BIT_TDC_GFX] = (SMU_THROTTLER_TDC_GFX_BIT), 216 [THROTTLER_STATUS_BIT_TDC_CVIP] = (SMU_THROTTLER_TDC_CVIP_BIT), 217 }; 218 219 static int vangogh_tables_init(struct smu_context *smu) 220 { 221 struct smu_table_context *smu_table = &smu->smu_table; 222 struct smu_table *tables = smu_table->tables; 223 struct amdgpu_device *adev = smu->adev; 224 uint32_t if_version; 225 uint32_t ret = 0; 226 227 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 228 if (ret) { 229 dev_err(adev->dev, "Failed to get smu if version!\n"); 230 goto err0_out; 231 } 232 233 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 234 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 235 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 236 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 237 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, 238 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 239 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), 240 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 241 242 if (if_version < 0x3) { 243 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t), 244 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 245 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL); 246 } else { 247 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 248 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 249 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 250 } 251 if (!smu_table->metrics_table) 252 goto err0_out; 253 smu_table->metrics_time = 0; 254 255 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); 256 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 257 if (!smu_table->gpu_metrics_table) 258 goto err1_out; 259 260 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 261 if (!smu_table->watermarks_table) 262 goto err2_out; 263 264 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 265 if (!smu_table->clocks_table) 266 goto err3_out; 267 268 return 0; 269 270 err3_out: 271 kfree(smu_table->watermarks_table); 272 err2_out: 273 kfree(smu_table->gpu_metrics_table); 274 err1_out: 275 kfree(smu_table->metrics_table); 276 err0_out: 277 return -ENOMEM; 278 } 279 280 static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, 281 MetricsMember_t member, 282 uint32_t *value) 283 { 284 struct smu_table_context *smu_table = &smu->smu_table; 285 SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; 286 int ret = 0; 287 288 ret = smu_cmn_get_metrics_table(smu, 289 NULL, 290 false); 291 if (ret) 292 return ret; 293 294 switch (member) { 295 case METRICS_CURR_GFXCLK: 296 *value = metrics->GfxclkFrequency; 297 break; 298 case METRICS_AVERAGE_SOCCLK: 299 *value = metrics->SocclkFrequency; 300 break; 301 case METRICS_AVERAGE_VCLK: 302 *value = metrics->VclkFrequency; 303 break; 304 case METRICS_AVERAGE_DCLK: 305 *value = metrics->DclkFrequency; 306 break; 307 case METRICS_CURR_UCLK: 308 *value = metrics->MemclkFrequency; 309 break; 310 case METRICS_AVERAGE_GFXACTIVITY: 311 *value = metrics->GfxActivity / 100; 312 break; 313 case METRICS_AVERAGE_VCNACTIVITY: 314 *value = metrics->UvdActivity; 315 break; 316 case METRICS_AVERAGE_SOCKETPOWER: 317 *value = (metrics->CurrentSocketPower << 8) / 318 1000 ; 319 break; 320 case METRICS_TEMPERATURE_EDGE: 321 *value = metrics->GfxTemperature / 100 * 322 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 323 break; 324 case METRICS_TEMPERATURE_HOTSPOT: 325 *value = metrics->SocTemperature / 100 * 326 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 327 break; 328 case METRICS_THROTTLER_STATUS: 329 *value = metrics->ThrottlerStatus; 330 break; 331 case METRICS_VOLTAGE_VDDGFX: 332 *value = metrics->Voltage[2]; 333 break; 334 case METRICS_VOLTAGE_VDDSOC: 335 *value = metrics->Voltage[1]; 336 break; 337 case METRICS_AVERAGE_CPUCLK: 338 memcpy(value, &metrics->CoreFrequency[0], 339 smu->cpu_core_num * sizeof(uint16_t)); 340 break; 341 default: 342 *value = UINT_MAX; 343 break; 344 } 345 346 return ret; 347 } 348 349 static int vangogh_get_smu_metrics_data(struct smu_context *smu, 350 MetricsMember_t member, 351 uint32_t *value) 352 { 353 struct smu_table_context *smu_table = &smu->smu_table; 354 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 355 int ret = 0; 356 357 ret = smu_cmn_get_metrics_table(smu, 358 NULL, 359 false); 360 if (ret) 361 return ret; 362 363 switch (member) { 364 case METRICS_CURR_GFXCLK: 365 *value = metrics->Current.GfxclkFrequency; 366 break; 367 case METRICS_AVERAGE_SOCCLK: 368 *value = metrics->Current.SocclkFrequency; 369 break; 370 case METRICS_AVERAGE_VCLK: 371 *value = metrics->Current.VclkFrequency; 372 break; 373 case METRICS_AVERAGE_DCLK: 374 *value = metrics->Current.DclkFrequency; 375 break; 376 case METRICS_CURR_UCLK: 377 *value = metrics->Current.MemclkFrequency; 378 break; 379 case METRICS_AVERAGE_GFXACTIVITY: 380 *value = metrics->Current.GfxActivity; 381 break; 382 case METRICS_AVERAGE_VCNACTIVITY: 383 *value = metrics->Current.UvdActivity; 384 break; 385 case METRICS_AVERAGE_SOCKETPOWER: 386 *value = (metrics->Current.CurrentSocketPower << 8) / 387 1000; 388 break; 389 case METRICS_TEMPERATURE_EDGE: 390 *value = metrics->Current.GfxTemperature / 100 * 391 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 392 break; 393 case METRICS_TEMPERATURE_HOTSPOT: 394 *value = metrics->Current.SocTemperature / 100 * 395 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 396 break; 397 case METRICS_THROTTLER_STATUS: 398 *value = metrics->Current.ThrottlerStatus; 399 break; 400 case METRICS_VOLTAGE_VDDGFX: 401 *value = metrics->Current.Voltage[2]; 402 break; 403 case METRICS_VOLTAGE_VDDSOC: 404 *value = metrics->Current.Voltage[1]; 405 break; 406 case METRICS_AVERAGE_CPUCLK: 407 memcpy(value, &metrics->Current.CoreFrequency[0], 408 smu->cpu_core_num * sizeof(uint16_t)); 409 break; 410 default: 411 *value = UINT_MAX; 412 break; 413 } 414 415 return ret; 416 } 417 418 static int vangogh_common_get_smu_metrics_data(struct smu_context *smu, 419 MetricsMember_t member, 420 uint32_t *value) 421 { 422 struct amdgpu_device *adev = smu->adev; 423 uint32_t if_version; 424 int ret = 0; 425 426 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 427 if (ret) { 428 dev_err(adev->dev, "Failed to get smu if version!\n"); 429 return ret; 430 } 431 432 if (if_version < 0x3) 433 ret = vangogh_get_legacy_smu_metrics_data(smu, member, value); 434 else 435 ret = vangogh_get_smu_metrics_data(smu, member, value); 436 437 return ret; 438 } 439 440 static int vangogh_allocate_dpm_context(struct smu_context *smu) 441 { 442 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 443 444 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context), 445 GFP_KERNEL); 446 if (!smu_dpm->dpm_context) 447 return -ENOMEM; 448 449 smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); 450 451 return 0; 452 } 453 454 static int vangogh_init_smc_tables(struct smu_context *smu) 455 { 456 int ret = 0; 457 458 ret = vangogh_tables_init(smu); 459 if (ret) 460 return ret; 461 462 ret = vangogh_allocate_dpm_context(smu); 463 if (ret) 464 return ret; 465 466 #ifdef CONFIG_X86 467 /* AMD x86 APU only */ 468 smu->cpu_core_num = boot_cpu_data.x86_max_cores; 469 #else 470 smu->cpu_core_num = 4; 471 #endif 472 473 return smu_v11_0_init_smc_tables(smu); 474 } 475 476 static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable) 477 { 478 int ret = 0; 479 480 if (enable) { 481 /* vcn dpm on is a prerequisite for vcn power gate messages */ 482 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); 483 if (ret) 484 return ret; 485 } else { 486 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); 487 if (ret) 488 return ret; 489 } 490 491 return ret; 492 } 493 494 static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 495 { 496 int ret = 0; 497 498 if (enable) { 499 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); 500 if (ret) 501 return ret; 502 } else { 503 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); 504 if (ret) 505 return ret; 506 } 507 508 return ret; 509 } 510 511 static bool vangogh_is_dpm_running(struct smu_context *smu) 512 { 513 struct amdgpu_device *adev = smu->adev; 514 int ret = 0; 515 uint64_t feature_enabled; 516 517 /* we need to re-init after suspend so return false */ 518 if (adev->in_suspend) 519 return false; 520 521 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 522 523 if (ret) 524 return false; 525 526 return !!(feature_enabled & SMC_DPM_FEATURE); 527 } 528 529 static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, 530 uint32_t dpm_level, uint32_t *freq) 531 { 532 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 533 534 if (!clk_table || clk_type >= SMU_CLK_COUNT) 535 return -EINVAL; 536 537 switch (clk_type) { 538 case SMU_SOCCLK: 539 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 540 return -EINVAL; 541 *freq = clk_table->SocClocks[dpm_level]; 542 break; 543 case SMU_VCLK: 544 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 545 return -EINVAL; 546 *freq = clk_table->VcnClocks[dpm_level].vclk; 547 break; 548 case SMU_DCLK: 549 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 550 return -EINVAL; 551 *freq = clk_table->VcnClocks[dpm_level].dclk; 552 break; 553 case SMU_UCLK: 554 case SMU_MCLK: 555 if (dpm_level >= clk_table->NumDfPstatesEnabled) 556 return -EINVAL; 557 *freq = clk_table->DfPstateTable[dpm_level].memclk; 558 559 break; 560 case SMU_FCLK: 561 if (dpm_level >= clk_table->NumDfPstatesEnabled) 562 return -EINVAL; 563 *freq = clk_table->DfPstateTable[dpm_level].fclk; 564 break; 565 default: 566 return -EINVAL; 567 } 568 569 return 0; 570 } 571 572 static int vangogh_print_legacy_clk_levels(struct smu_context *smu, 573 enum smu_clk_type clk_type, char *buf) 574 { 575 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 576 SmuMetrics_legacy_t metrics; 577 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 578 int i, size = 0, ret = 0; 579 uint32_t cur_value = 0, value = 0, count = 0; 580 bool cur_value_match_level = false; 581 582 memset(&metrics, 0, sizeof(metrics)); 583 584 ret = smu_cmn_get_metrics_table(smu, &metrics, false); 585 if (ret) 586 return ret; 587 588 smu_cmn_get_sysfs_buf(&buf, &size); 589 590 switch (clk_type) { 591 case SMU_OD_SCLK: 592 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 593 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 594 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 595 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 596 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 597 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 598 } 599 break; 600 case SMU_OD_CCLK: 601 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 602 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 603 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 604 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 605 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 606 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); 607 } 608 break; 609 case SMU_OD_RANGE: 610 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 611 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 612 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 613 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 614 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", 615 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); 616 } 617 break; 618 case SMU_SOCCLK: 619 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ 620 count = clk_table->NumSocClkLevelsEnabled; 621 cur_value = metrics.SocclkFrequency; 622 break; 623 case SMU_VCLK: 624 count = clk_table->VcnClkLevelsEnabled; 625 cur_value = metrics.VclkFrequency; 626 break; 627 case SMU_DCLK: 628 count = clk_table->VcnClkLevelsEnabled; 629 cur_value = metrics.DclkFrequency; 630 break; 631 case SMU_MCLK: 632 count = clk_table->NumDfPstatesEnabled; 633 cur_value = metrics.MemclkFrequency; 634 break; 635 case SMU_FCLK: 636 count = clk_table->NumDfPstatesEnabled; 637 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); 638 if (ret) 639 return ret; 640 break; 641 default: 642 break; 643 } 644 645 switch (clk_type) { 646 case SMU_SOCCLK: 647 case SMU_VCLK: 648 case SMU_DCLK: 649 case SMU_MCLK: 650 case SMU_FCLK: 651 for (i = 0; i < count; i++) { 652 ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); 653 if (ret) 654 return ret; 655 if (!value) 656 continue; 657 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 658 cur_value == value ? "*" : ""); 659 if (cur_value == value) 660 cur_value_match_level = true; 661 } 662 663 if (!cur_value_match_level) 664 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); 665 break; 666 default: 667 break; 668 } 669 670 return size; 671 } 672 673 static int vangogh_print_clk_levels(struct smu_context *smu, 674 enum smu_clk_type clk_type, char *buf) 675 { 676 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 677 SmuMetrics_t metrics; 678 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 679 int i, size = 0, ret = 0; 680 uint32_t cur_value = 0, value = 0, count = 0; 681 bool cur_value_match_level = false; 682 uint32_t min, max; 683 684 memset(&metrics, 0, sizeof(metrics)); 685 686 ret = smu_cmn_get_metrics_table(smu, &metrics, false); 687 if (ret) 688 return ret; 689 690 smu_cmn_get_sysfs_buf(&buf, &size); 691 692 switch (clk_type) { 693 case SMU_OD_SCLK: 694 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 695 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 696 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 697 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 698 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 699 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 700 } 701 break; 702 case SMU_OD_CCLK: 703 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 704 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 705 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 706 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 707 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 708 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); 709 } 710 break; 711 case SMU_OD_RANGE: 712 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 713 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 714 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 715 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 716 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", 717 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); 718 } 719 break; 720 case SMU_SOCCLK: 721 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ 722 count = clk_table->NumSocClkLevelsEnabled; 723 cur_value = metrics.Current.SocclkFrequency; 724 break; 725 case SMU_VCLK: 726 count = clk_table->VcnClkLevelsEnabled; 727 cur_value = metrics.Current.VclkFrequency; 728 break; 729 case SMU_DCLK: 730 count = clk_table->VcnClkLevelsEnabled; 731 cur_value = metrics.Current.DclkFrequency; 732 break; 733 case SMU_MCLK: 734 count = clk_table->NumDfPstatesEnabled; 735 cur_value = metrics.Current.MemclkFrequency; 736 break; 737 case SMU_FCLK: 738 count = clk_table->NumDfPstatesEnabled; 739 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); 740 if (ret) 741 return ret; 742 break; 743 case SMU_GFXCLK: 744 case SMU_SCLK: 745 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value); 746 if (ret) { 747 return ret; 748 } 749 break; 750 default: 751 break; 752 } 753 754 switch (clk_type) { 755 case SMU_SOCCLK: 756 case SMU_VCLK: 757 case SMU_DCLK: 758 case SMU_MCLK: 759 case SMU_FCLK: 760 for (i = 0; i < count; i++) { 761 ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); 762 if (ret) 763 return ret; 764 if (!value) 765 continue; 766 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 767 cur_value == value ? "*" : ""); 768 if (cur_value == value) 769 cur_value_match_level = true; 770 } 771 772 if (!cur_value_match_level) 773 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); 774 break; 775 case SMU_GFXCLK: 776 case SMU_SCLK: 777 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 778 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 779 if (cur_value == max) 780 i = 2; 781 else if (cur_value == min) 782 i = 0; 783 else 784 i = 1; 785 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 786 i == 0 ? "*" : ""); 787 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 788 i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, 789 i == 1 ? "*" : ""); 790 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 791 i == 2 ? "*" : ""); 792 break; 793 default: 794 break; 795 } 796 797 return size; 798 } 799 800 static int vangogh_common_print_clk_levels(struct smu_context *smu, 801 enum smu_clk_type clk_type, char *buf) 802 { 803 struct amdgpu_device *adev = smu->adev; 804 uint32_t if_version; 805 int ret = 0; 806 807 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 808 if (ret) { 809 dev_err(adev->dev, "Failed to get smu if version!\n"); 810 return ret; 811 } 812 813 if (if_version < 0x3) 814 ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf); 815 else 816 ret = vangogh_print_clk_levels(smu, clk_type, buf); 817 818 return ret; 819 } 820 821 static int vangogh_get_profiling_clk_mask(struct smu_context *smu, 822 enum amd_dpm_forced_level level, 823 uint32_t *vclk_mask, 824 uint32_t *dclk_mask, 825 uint32_t *mclk_mask, 826 uint32_t *fclk_mask, 827 uint32_t *soc_mask) 828 { 829 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 830 831 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 832 if (mclk_mask) 833 *mclk_mask = clk_table->NumDfPstatesEnabled - 1; 834 835 if (fclk_mask) 836 *fclk_mask = clk_table->NumDfPstatesEnabled - 1; 837 838 if (soc_mask) 839 *soc_mask = 0; 840 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 841 if (mclk_mask) 842 *mclk_mask = 0; 843 844 if (fclk_mask) 845 *fclk_mask = 0; 846 847 if (soc_mask) 848 *soc_mask = 1; 849 850 if (vclk_mask) 851 *vclk_mask = 1; 852 853 if (dclk_mask) 854 *dclk_mask = 1; 855 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) { 856 if (mclk_mask) 857 *mclk_mask = 0; 858 859 if (fclk_mask) 860 *fclk_mask = 0; 861 862 if (soc_mask) 863 *soc_mask = 1; 864 865 if (vclk_mask) 866 *vclk_mask = 1; 867 868 if (dclk_mask) 869 *dclk_mask = 1; 870 } 871 872 return 0; 873 } 874 875 static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu, 876 enum smu_clk_type clk_type) 877 { 878 enum smu_feature_mask feature_id = 0; 879 880 switch (clk_type) { 881 case SMU_MCLK: 882 case SMU_UCLK: 883 case SMU_FCLK: 884 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 885 break; 886 case SMU_GFXCLK: 887 case SMU_SCLK: 888 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 889 break; 890 case SMU_SOCCLK: 891 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 892 break; 893 case SMU_VCLK: 894 case SMU_DCLK: 895 feature_id = SMU_FEATURE_VCN_DPM_BIT; 896 break; 897 default: 898 return true; 899 } 900 901 if (!smu_cmn_feature_is_enabled(smu, feature_id)) 902 return false; 903 904 return true; 905 } 906 907 static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu, 908 enum smu_clk_type clk_type, 909 uint32_t *min, 910 uint32_t *max) 911 { 912 int ret = 0; 913 uint32_t soc_mask; 914 uint32_t vclk_mask; 915 uint32_t dclk_mask; 916 uint32_t mclk_mask; 917 uint32_t fclk_mask; 918 uint32_t clock_limit; 919 920 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) { 921 switch (clk_type) { 922 case SMU_MCLK: 923 case SMU_UCLK: 924 clock_limit = smu->smu_table.boot_values.uclk; 925 break; 926 case SMU_FCLK: 927 clock_limit = smu->smu_table.boot_values.fclk; 928 break; 929 case SMU_GFXCLK: 930 case SMU_SCLK: 931 clock_limit = smu->smu_table.boot_values.gfxclk; 932 break; 933 case SMU_SOCCLK: 934 clock_limit = smu->smu_table.boot_values.socclk; 935 break; 936 case SMU_VCLK: 937 clock_limit = smu->smu_table.boot_values.vclk; 938 break; 939 case SMU_DCLK: 940 clock_limit = smu->smu_table.boot_values.dclk; 941 break; 942 default: 943 clock_limit = 0; 944 break; 945 } 946 947 /* clock in Mhz unit */ 948 if (min) 949 *min = clock_limit / 100; 950 if (max) 951 *max = clock_limit / 100; 952 953 return 0; 954 } 955 if (max) { 956 ret = vangogh_get_profiling_clk_mask(smu, 957 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, 958 &vclk_mask, 959 &dclk_mask, 960 &mclk_mask, 961 &fclk_mask, 962 &soc_mask); 963 if (ret) 964 goto failed; 965 966 switch (clk_type) { 967 case SMU_UCLK: 968 case SMU_MCLK: 969 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); 970 if (ret) 971 goto failed; 972 break; 973 case SMU_SOCCLK: 974 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max); 975 if (ret) 976 goto failed; 977 break; 978 case SMU_FCLK: 979 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max); 980 if (ret) 981 goto failed; 982 break; 983 case SMU_VCLK: 984 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max); 985 if (ret) 986 goto failed; 987 break; 988 case SMU_DCLK: 989 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max); 990 if (ret) 991 goto failed; 992 break; 993 default: 994 ret = -EINVAL; 995 goto failed; 996 } 997 } 998 if (min) { 999 switch (clk_type) { 1000 case SMU_UCLK: 1001 case SMU_MCLK: 1002 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min); 1003 if (ret) 1004 goto failed; 1005 break; 1006 case SMU_SOCCLK: 1007 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min); 1008 if (ret) 1009 goto failed; 1010 break; 1011 case SMU_FCLK: 1012 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min); 1013 if (ret) 1014 goto failed; 1015 break; 1016 case SMU_VCLK: 1017 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min); 1018 if (ret) 1019 goto failed; 1020 break; 1021 case SMU_DCLK: 1022 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min); 1023 if (ret) 1024 goto failed; 1025 break; 1026 default: 1027 ret = -EINVAL; 1028 goto failed; 1029 } 1030 } 1031 failed: 1032 return ret; 1033 } 1034 1035 static int vangogh_get_power_profile_mode(struct smu_context *smu, 1036 char *buf) 1037 { 1038 uint32_t i, size = 0; 1039 int16_t workload_type = 0; 1040 1041 if (!buf) 1042 return -EINVAL; 1043 1044 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { 1045 /* 1046 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT 1047 * Not all profile modes are supported on vangogh. 1048 */ 1049 workload_type = smu_cmn_to_asic_specific_index(smu, 1050 CMN2ASIC_MAPPING_WORKLOAD, 1051 i); 1052 1053 if (workload_type < 0) 1054 continue; 1055 1056 size += sysfs_emit_at(buf, size, "%2d %14s%s\n", 1057 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 1058 } 1059 1060 return size; 1061 } 1062 1063 static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) 1064 { 1065 int workload_type, ret; 1066 uint32_t profile_mode = input[size]; 1067 1068 if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { 1069 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); 1070 return -EINVAL; 1071 } 1072 1073 if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || 1074 profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) 1075 return 0; 1076 1077 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1078 workload_type = smu_cmn_to_asic_specific_index(smu, 1079 CMN2ASIC_MAPPING_WORKLOAD, 1080 profile_mode); 1081 if (workload_type < 0) { 1082 dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n", 1083 profile_mode); 1084 return -EINVAL; 1085 } 1086 1087 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, 1088 1 << workload_type, 1089 NULL); 1090 if (ret) { 1091 dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", 1092 workload_type); 1093 return ret; 1094 } 1095 1096 smu->power_profile_mode = profile_mode; 1097 1098 return 0; 1099 } 1100 1101 static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, 1102 enum smu_clk_type clk_type, 1103 uint32_t min, 1104 uint32_t max) 1105 { 1106 int ret = 0; 1107 1108 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) 1109 return 0; 1110 1111 switch (clk_type) { 1112 case SMU_GFXCLK: 1113 case SMU_SCLK: 1114 ret = smu_cmn_send_smc_msg_with_param(smu, 1115 SMU_MSG_SetHardMinGfxClk, 1116 min, NULL); 1117 if (ret) 1118 return ret; 1119 1120 ret = smu_cmn_send_smc_msg_with_param(smu, 1121 SMU_MSG_SetSoftMaxGfxClk, 1122 max, NULL); 1123 if (ret) 1124 return ret; 1125 break; 1126 case SMU_FCLK: 1127 ret = smu_cmn_send_smc_msg_with_param(smu, 1128 SMU_MSG_SetHardMinFclkByFreq, 1129 min, NULL); 1130 if (ret) 1131 return ret; 1132 1133 ret = smu_cmn_send_smc_msg_with_param(smu, 1134 SMU_MSG_SetSoftMaxFclkByFreq, 1135 max, NULL); 1136 if (ret) 1137 return ret; 1138 break; 1139 case SMU_SOCCLK: 1140 ret = smu_cmn_send_smc_msg_with_param(smu, 1141 SMU_MSG_SetHardMinSocclkByFreq, 1142 min, NULL); 1143 if (ret) 1144 return ret; 1145 1146 ret = smu_cmn_send_smc_msg_with_param(smu, 1147 SMU_MSG_SetSoftMaxSocclkByFreq, 1148 max, NULL); 1149 if (ret) 1150 return ret; 1151 break; 1152 case SMU_VCLK: 1153 ret = smu_cmn_send_smc_msg_with_param(smu, 1154 SMU_MSG_SetHardMinVcn, 1155 min << 16, NULL); 1156 if (ret) 1157 return ret; 1158 ret = smu_cmn_send_smc_msg_with_param(smu, 1159 SMU_MSG_SetSoftMaxVcn, 1160 max << 16, NULL); 1161 if (ret) 1162 return ret; 1163 break; 1164 case SMU_DCLK: 1165 ret = smu_cmn_send_smc_msg_with_param(smu, 1166 SMU_MSG_SetHardMinVcn, 1167 min, NULL); 1168 if (ret) 1169 return ret; 1170 ret = smu_cmn_send_smc_msg_with_param(smu, 1171 SMU_MSG_SetSoftMaxVcn, 1172 max, NULL); 1173 if (ret) 1174 return ret; 1175 break; 1176 default: 1177 return -EINVAL; 1178 } 1179 1180 return ret; 1181 } 1182 1183 static int vangogh_force_clk_levels(struct smu_context *smu, 1184 enum smu_clk_type clk_type, uint32_t mask) 1185 { 1186 uint32_t soft_min_level = 0, soft_max_level = 0; 1187 uint32_t min_freq = 0, max_freq = 0; 1188 int ret = 0 ; 1189 1190 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1191 soft_max_level = mask ? (fls(mask) - 1) : 0; 1192 1193 switch (clk_type) { 1194 case SMU_SOCCLK: 1195 ret = vangogh_get_dpm_clk_limited(smu, clk_type, 1196 soft_min_level, &min_freq); 1197 if (ret) 1198 return ret; 1199 ret = vangogh_get_dpm_clk_limited(smu, clk_type, 1200 soft_max_level, &max_freq); 1201 if (ret) 1202 return ret; 1203 ret = smu_cmn_send_smc_msg_with_param(smu, 1204 SMU_MSG_SetSoftMaxSocclkByFreq, 1205 max_freq, NULL); 1206 if (ret) 1207 return ret; 1208 ret = smu_cmn_send_smc_msg_with_param(smu, 1209 SMU_MSG_SetHardMinSocclkByFreq, 1210 min_freq, NULL); 1211 if (ret) 1212 return ret; 1213 break; 1214 case SMU_FCLK: 1215 ret = vangogh_get_dpm_clk_limited(smu, 1216 clk_type, soft_min_level, &min_freq); 1217 if (ret) 1218 return ret; 1219 ret = vangogh_get_dpm_clk_limited(smu, 1220 clk_type, soft_max_level, &max_freq); 1221 if (ret) 1222 return ret; 1223 ret = smu_cmn_send_smc_msg_with_param(smu, 1224 SMU_MSG_SetSoftMaxFclkByFreq, 1225 max_freq, NULL); 1226 if (ret) 1227 return ret; 1228 ret = smu_cmn_send_smc_msg_with_param(smu, 1229 SMU_MSG_SetHardMinFclkByFreq, 1230 min_freq, NULL); 1231 if (ret) 1232 return ret; 1233 break; 1234 case SMU_VCLK: 1235 ret = vangogh_get_dpm_clk_limited(smu, 1236 clk_type, soft_min_level, &min_freq); 1237 if (ret) 1238 return ret; 1239 1240 ret = vangogh_get_dpm_clk_limited(smu, 1241 clk_type, soft_max_level, &max_freq); 1242 if (ret) 1243 return ret; 1244 1245 1246 ret = smu_cmn_send_smc_msg_with_param(smu, 1247 SMU_MSG_SetHardMinVcn, 1248 min_freq << 16, NULL); 1249 if (ret) 1250 return ret; 1251 1252 ret = smu_cmn_send_smc_msg_with_param(smu, 1253 SMU_MSG_SetSoftMaxVcn, 1254 max_freq << 16, NULL); 1255 if (ret) 1256 return ret; 1257 1258 break; 1259 case SMU_DCLK: 1260 ret = vangogh_get_dpm_clk_limited(smu, 1261 clk_type, soft_min_level, &min_freq); 1262 if (ret) 1263 return ret; 1264 1265 ret = vangogh_get_dpm_clk_limited(smu, 1266 clk_type, soft_max_level, &max_freq); 1267 if (ret) 1268 return ret; 1269 1270 ret = smu_cmn_send_smc_msg_with_param(smu, 1271 SMU_MSG_SetHardMinVcn, 1272 min_freq, NULL); 1273 if (ret) 1274 return ret; 1275 1276 ret = smu_cmn_send_smc_msg_with_param(smu, 1277 SMU_MSG_SetSoftMaxVcn, 1278 max_freq, NULL); 1279 if (ret) 1280 return ret; 1281 1282 break; 1283 default: 1284 break; 1285 } 1286 1287 return ret; 1288 } 1289 1290 static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest) 1291 { 1292 int ret = 0, i = 0; 1293 uint32_t min_freq, max_freq, force_freq; 1294 enum smu_clk_type clk_type; 1295 1296 enum smu_clk_type clks[] = { 1297 SMU_SOCCLK, 1298 SMU_VCLK, 1299 SMU_DCLK, 1300 SMU_FCLK, 1301 }; 1302 1303 for (i = 0; i < ARRAY_SIZE(clks); i++) { 1304 clk_type = clks[i]; 1305 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 1306 if (ret) 1307 return ret; 1308 1309 force_freq = highest ? max_freq : min_freq; 1310 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); 1311 if (ret) 1312 return ret; 1313 } 1314 1315 return ret; 1316 } 1317 1318 static int vangogh_unforce_dpm_levels(struct smu_context *smu) 1319 { 1320 int ret = 0, i = 0; 1321 uint32_t min_freq, max_freq; 1322 enum smu_clk_type clk_type; 1323 1324 struct clk_feature_map { 1325 enum smu_clk_type clk_type; 1326 uint32_t feature; 1327 } clk_feature_map[] = { 1328 {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT}, 1329 {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, 1330 {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT}, 1331 {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT}, 1332 }; 1333 1334 for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) { 1335 1336 if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature)) 1337 continue; 1338 1339 clk_type = clk_feature_map[i].clk_type; 1340 1341 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 1342 1343 if (ret) 1344 return ret; 1345 1346 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); 1347 1348 if (ret) 1349 return ret; 1350 } 1351 1352 return ret; 1353 } 1354 1355 static int vangogh_set_peak_clock_by_device(struct smu_context *smu) 1356 { 1357 int ret = 0; 1358 uint32_t socclk_freq = 0, fclk_freq = 0; 1359 uint32_t vclk_freq = 0, dclk_freq = 0; 1360 1361 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq); 1362 if (ret) 1363 return ret; 1364 1365 ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq); 1366 if (ret) 1367 return ret; 1368 1369 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq); 1370 if (ret) 1371 return ret; 1372 1373 ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq); 1374 if (ret) 1375 return ret; 1376 1377 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq); 1378 if (ret) 1379 return ret; 1380 1381 ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq); 1382 if (ret) 1383 return ret; 1384 1385 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq); 1386 if (ret) 1387 return ret; 1388 1389 ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq); 1390 if (ret) 1391 return ret; 1392 1393 return ret; 1394 } 1395 1396 static int vangogh_set_performance_level(struct smu_context *smu, 1397 enum amd_dpm_forced_level level) 1398 { 1399 int ret = 0, i; 1400 uint32_t soc_mask, mclk_mask, fclk_mask; 1401 uint32_t vclk_mask = 0, dclk_mask = 0; 1402 1403 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; 1404 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; 1405 1406 switch (level) { 1407 case AMD_DPM_FORCED_LEVEL_HIGH: 1408 smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq; 1409 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1410 1411 1412 ret = vangogh_force_dpm_limit_value(smu, true); 1413 if (ret) 1414 return ret; 1415 break; 1416 case AMD_DPM_FORCED_LEVEL_LOW: 1417 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1418 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; 1419 1420 ret = vangogh_force_dpm_limit_value(smu, false); 1421 if (ret) 1422 return ret; 1423 break; 1424 case AMD_DPM_FORCED_LEVEL_AUTO: 1425 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1426 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1427 1428 ret = vangogh_unforce_dpm_levels(smu); 1429 if (ret) 1430 return ret; 1431 break; 1432 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1433 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; 1434 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; 1435 1436 ret = vangogh_get_profiling_clk_mask(smu, level, 1437 &vclk_mask, 1438 &dclk_mask, 1439 &mclk_mask, 1440 &fclk_mask, 1441 &soc_mask); 1442 if (ret) 1443 return ret; 1444 1445 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1446 vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); 1447 vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); 1448 vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask); 1449 break; 1450 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1451 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1452 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; 1453 break; 1454 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1455 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1456 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1457 1458 ret = vangogh_get_profiling_clk_mask(smu, level, 1459 NULL, 1460 NULL, 1461 &mclk_mask, 1462 &fclk_mask, 1463 NULL); 1464 if (ret) 1465 return ret; 1466 1467 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1468 break; 1469 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1470 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; 1471 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; 1472 1473 ret = vangogh_set_peak_clock_by_device(smu); 1474 if (ret) 1475 return ret; 1476 break; 1477 case AMD_DPM_FORCED_LEVEL_MANUAL: 1478 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1479 default: 1480 return 0; 1481 } 1482 1483 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 1484 smu->gfx_actual_hard_min_freq, NULL); 1485 if (ret) 1486 return ret; 1487 1488 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 1489 smu->gfx_actual_soft_max_freq, NULL); 1490 if (ret) 1491 return ret; 1492 1493 if (smu->adev->pm.fw_version >= 0x43f1b00) { 1494 for (i = 0; i < smu->cpu_core_num; i++) { 1495 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, 1496 ((i << 20) 1497 | smu->cpu_actual_soft_min_freq), 1498 NULL); 1499 if (ret) 1500 return ret; 1501 1502 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, 1503 ((i << 20) 1504 | smu->cpu_actual_soft_max_freq), 1505 NULL); 1506 if (ret) 1507 return ret; 1508 } 1509 } 1510 1511 return ret; 1512 } 1513 1514 static int vangogh_read_sensor(struct smu_context *smu, 1515 enum amd_pp_sensors sensor, 1516 void *data, uint32_t *size) 1517 { 1518 int ret = 0; 1519 1520 if (!data || !size) 1521 return -EINVAL; 1522 1523 switch (sensor) { 1524 case AMDGPU_PP_SENSOR_GPU_LOAD: 1525 ret = vangogh_common_get_smu_metrics_data(smu, 1526 METRICS_AVERAGE_GFXACTIVITY, 1527 (uint32_t *)data); 1528 *size = 4; 1529 break; 1530 case AMDGPU_PP_SENSOR_GPU_POWER: 1531 ret = vangogh_common_get_smu_metrics_data(smu, 1532 METRICS_AVERAGE_SOCKETPOWER, 1533 (uint32_t *)data); 1534 *size = 4; 1535 break; 1536 case AMDGPU_PP_SENSOR_EDGE_TEMP: 1537 ret = vangogh_common_get_smu_metrics_data(smu, 1538 METRICS_TEMPERATURE_EDGE, 1539 (uint32_t *)data); 1540 *size = 4; 1541 break; 1542 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 1543 ret = vangogh_common_get_smu_metrics_data(smu, 1544 METRICS_TEMPERATURE_HOTSPOT, 1545 (uint32_t *)data); 1546 *size = 4; 1547 break; 1548 case AMDGPU_PP_SENSOR_GFX_MCLK: 1549 ret = vangogh_common_get_smu_metrics_data(smu, 1550 METRICS_CURR_UCLK, 1551 (uint32_t *)data); 1552 *(uint32_t *)data *= 100; 1553 *size = 4; 1554 break; 1555 case AMDGPU_PP_SENSOR_GFX_SCLK: 1556 ret = vangogh_common_get_smu_metrics_data(smu, 1557 METRICS_CURR_GFXCLK, 1558 (uint32_t *)data); 1559 *(uint32_t *)data *= 100; 1560 *size = 4; 1561 break; 1562 case AMDGPU_PP_SENSOR_VDDGFX: 1563 ret = vangogh_common_get_smu_metrics_data(smu, 1564 METRICS_VOLTAGE_VDDGFX, 1565 (uint32_t *)data); 1566 *size = 4; 1567 break; 1568 case AMDGPU_PP_SENSOR_VDDNB: 1569 ret = vangogh_common_get_smu_metrics_data(smu, 1570 METRICS_VOLTAGE_VDDSOC, 1571 (uint32_t *)data); 1572 *size = 4; 1573 break; 1574 case AMDGPU_PP_SENSOR_CPU_CLK: 1575 ret = vangogh_common_get_smu_metrics_data(smu, 1576 METRICS_AVERAGE_CPUCLK, 1577 (uint32_t *)data); 1578 *size = smu->cpu_core_num * sizeof(uint16_t); 1579 break; 1580 default: 1581 ret = -EOPNOTSUPP; 1582 break; 1583 } 1584 1585 return ret; 1586 } 1587 1588 static int vangogh_set_watermarks_table(struct smu_context *smu, 1589 struct pp_smu_wm_range_sets *clock_ranges) 1590 { 1591 int i; 1592 int ret = 0; 1593 Watermarks_t *table = smu->smu_table.watermarks_table; 1594 1595 if (!table || !clock_ranges) 1596 return -EINVAL; 1597 1598 if (clock_ranges) { 1599 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 1600 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 1601 return -EINVAL; 1602 1603 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 1604 table->WatermarkRow[WM_DCFCLK][i].MinClock = 1605 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 1606 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 1607 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 1608 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 1609 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 1610 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 1611 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 1612 1613 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 1614 clock_ranges->reader_wm_sets[i].wm_inst; 1615 } 1616 1617 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 1618 table->WatermarkRow[WM_SOCCLK][i].MinClock = 1619 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 1620 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 1621 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 1622 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 1623 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 1624 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 1625 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 1626 1627 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 1628 clock_ranges->writer_wm_sets[i].wm_inst; 1629 } 1630 1631 smu->watermarks_bitmap |= WATERMARKS_EXIST; 1632 } 1633 1634 /* pass data to smu controller */ 1635 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 1636 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 1637 ret = smu_cmn_write_watermarks_table(smu); 1638 if (ret) { 1639 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 1640 return ret; 1641 } 1642 smu->watermarks_bitmap |= WATERMARKS_LOADED; 1643 } 1644 1645 return 0; 1646 } 1647 1648 static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu, 1649 void **table) 1650 { 1651 struct smu_table_context *smu_table = &smu->smu_table; 1652 struct gpu_metrics_v2_2 *gpu_metrics = 1653 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1654 SmuMetrics_legacy_t metrics; 1655 int ret = 0; 1656 1657 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1658 if (ret) 1659 return ret; 1660 1661 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1662 1663 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1664 gpu_metrics->temperature_soc = metrics.SocTemperature; 1665 memcpy(&gpu_metrics->temperature_core[0], 1666 &metrics.CoreTemperature[0], 1667 sizeof(uint16_t) * 4); 1668 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 1669 1670 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 1671 gpu_metrics->average_mm_activity = metrics.UvdActivity; 1672 1673 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 1674 gpu_metrics->average_cpu_power = metrics.Power[0]; 1675 gpu_metrics->average_soc_power = metrics.Power[1]; 1676 gpu_metrics->average_gfx_power = metrics.Power[2]; 1677 memcpy(&gpu_metrics->average_core_power[0], 1678 &metrics.CorePower[0], 1679 sizeof(uint16_t) * 4); 1680 1681 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 1682 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 1683 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 1684 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 1685 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 1686 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 1687 1688 memcpy(&gpu_metrics->current_coreclk[0], 1689 &metrics.CoreFrequency[0], 1690 sizeof(uint16_t) * 4); 1691 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1692 1693 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1694 gpu_metrics->indep_throttle_status = 1695 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1696 vangogh_throttler_map); 1697 1698 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1699 1700 *table = (void *)gpu_metrics; 1701 1702 return sizeof(struct gpu_metrics_v2_2); 1703 } 1704 1705 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, 1706 void **table) 1707 { 1708 struct smu_table_context *smu_table = &smu->smu_table; 1709 struct gpu_metrics_v2_2 *gpu_metrics = 1710 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1711 SmuMetrics_t metrics; 1712 int ret = 0; 1713 1714 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1715 if (ret) 1716 return ret; 1717 1718 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1719 1720 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1721 gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 1722 memcpy(&gpu_metrics->temperature_core[0], 1723 &metrics.Current.CoreTemperature[0], 1724 sizeof(uint16_t) * 4); 1725 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; 1726 1727 gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; 1728 gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; 1729 1730 gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; 1731 gpu_metrics->average_cpu_power = metrics.Current.Power[0]; 1732 gpu_metrics->average_soc_power = metrics.Current.Power[1]; 1733 gpu_metrics->average_gfx_power = metrics.Current.Power[2]; 1734 memcpy(&gpu_metrics->average_core_power[0], 1735 &metrics.Average.CorePower[0], 1736 sizeof(uint16_t) * 4); 1737 1738 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 1739 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 1740 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 1741 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 1742 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 1743 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 1744 1745 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 1746 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 1747 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 1748 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 1749 gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 1750 gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 1751 1752 memcpy(&gpu_metrics->current_coreclk[0], 1753 &metrics.Current.CoreFrequency[0], 1754 sizeof(uint16_t) * 4); 1755 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1756 1757 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1758 gpu_metrics->indep_throttle_status = 1759 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, 1760 vangogh_throttler_map); 1761 1762 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1763 1764 *table = (void *)gpu_metrics; 1765 1766 return sizeof(struct gpu_metrics_v2_2); 1767 } 1768 1769 static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, 1770 void **table) 1771 { 1772 struct amdgpu_device *adev = smu->adev; 1773 uint32_t if_version; 1774 int ret = 0; 1775 1776 ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 1777 if (ret) { 1778 dev_err(adev->dev, "Failed to get smu if version!\n"); 1779 return ret; 1780 } 1781 1782 if (if_version < 0x3) 1783 ret = vangogh_get_legacy_gpu_metrics(smu, table); 1784 else 1785 ret = vangogh_get_gpu_metrics(smu, table); 1786 1787 return ret; 1788 } 1789 1790 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, 1791 long input[], uint32_t size) 1792 { 1793 int ret = 0; 1794 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1795 1796 if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) { 1797 dev_warn(smu->adev->dev, 1798 "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n"); 1799 return -EINVAL; 1800 } 1801 1802 switch (type) { 1803 case PP_OD_EDIT_CCLK_VDDC_TABLE: 1804 if (size != 3) { 1805 dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n"); 1806 return -EINVAL; 1807 } 1808 if (input[0] >= smu->cpu_core_num) { 1809 dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n", 1810 smu->cpu_core_num); 1811 } 1812 smu->cpu_core_id_select = input[0]; 1813 if (input[1] == 0) { 1814 if (input[2] < smu->cpu_default_soft_min_freq) { 1815 dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 1816 input[2], smu->cpu_default_soft_min_freq); 1817 return -EINVAL; 1818 } 1819 smu->cpu_actual_soft_min_freq = input[2]; 1820 } else if (input[1] == 1) { 1821 if (input[2] > smu->cpu_default_soft_max_freq) { 1822 dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 1823 input[2], smu->cpu_default_soft_max_freq); 1824 return -EINVAL; 1825 } 1826 smu->cpu_actual_soft_max_freq = input[2]; 1827 } else { 1828 return -EINVAL; 1829 } 1830 break; 1831 case PP_OD_EDIT_SCLK_VDDC_TABLE: 1832 if (size != 2) { 1833 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1834 return -EINVAL; 1835 } 1836 1837 if (input[0] == 0) { 1838 if (input[1] < smu->gfx_default_hard_min_freq) { 1839 dev_warn(smu->adev->dev, 1840 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 1841 input[1], smu->gfx_default_hard_min_freq); 1842 return -EINVAL; 1843 } 1844 smu->gfx_actual_hard_min_freq = input[1]; 1845 } else if (input[0] == 1) { 1846 if (input[1] > smu->gfx_default_soft_max_freq) { 1847 dev_warn(smu->adev->dev, 1848 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 1849 input[1], smu->gfx_default_soft_max_freq); 1850 return -EINVAL; 1851 } 1852 smu->gfx_actual_soft_max_freq = input[1]; 1853 } else { 1854 return -EINVAL; 1855 } 1856 break; 1857 case PP_OD_RESTORE_DEFAULT_TABLE: 1858 if (size != 0) { 1859 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1860 return -EINVAL; 1861 } else { 1862 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1863 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1864 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; 1865 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; 1866 } 1867 break; 1868 case PP_OD_COMMIT_DPM_TABLE: 1869 if (size != 0) { 1870 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1871 return -EINVAL; 1872 } else { 1873 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 1874 dev_err(smu->adev->dev, 1875 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 1876 smu->gfx_actual_hard_min_freq, 1877 smu->gfx_actual_soft_max_freq); 1878 return -EINVAL; 1879 } 1880 1881 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 1882 smu->gfx_actual_hard_min_freq, NULL); 1883 if (ret) { 1884 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 1885 return ret; 1886 } 1887 1888 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 1889 smu->gfx_actual_soft_max_freq, NULL); 1890 if (ret) { 1891 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 1892 return ret; 1893 } 1894 1895 if (smu->adev->pm.fw_version < 0x43f1b00) { 1896 dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n"); 1897 break; 1898 } 1899 1900 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, 1901 ((smu->cpu_core_id_select << 20) 1902 | smu->cpu_actual_soft_min_freq), 1903 NULL); 1904 if (ret) { 1905 dev_err(smu->adev->dev, "Set hard min cclk failed!"); 1906 return ret; 1907 } 1908 1909 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, 1910 ((smu->cpu_core_id_select << 20) 1911 | smu->cpu_actual_soft_max_freq), 1912 NULL); 1913 if (ret) { 1914 dev_err(smu->adev->dev, "Set soft max cclk failed!"); 1915 return ret; 1916 } 1917 } 1918 break; 1919 default: 1920 return -ENOSYS; 1921 } 1922 1923 return ret; 1924 } 1925 1926 static int vangogh_set_default_dpm_tables(struct smu_context *smu) 1927 { 1928 struct smu_table_context *smu_table = &smu->smu_table; 1929 1930 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); 1931 } 1932 1933 static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 1934 { 1935 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 1936 1937 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 1938 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 1939 smu->gfx_actual_hard_min_freq = 0; 1940 smu->gfx_actual_soft_max_freq = 0; 1941 1942 smu->cpu_default_soft_min_freq = 1400; 1943 smu->cpu_default_soft_max_freq = 3500; 1944 smu->cpu_actual_soft_min_freq = 0; 1945 smu->cpu_actual_soft_max_freq = 0; 1946 1947 return 0; 1948 } 1949 1950 static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) 1951 { 1952 DpmClocks_t *table = smu->smu_table.clocks_table; 1953 int i; 1954 1955 if (!clock_table || !table) 1956 return -EINVAL; 1957 1958 for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) { 1959 clock_table->SocClocks[i].Freq = table->SocClocks[i]; 1960 clock_table->SocClocks[i].Vol = table->SocVoltage[i]; 1961 } 1962 1963 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 1964 clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk; 1965 clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage; 1966 } 1967 1968 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 1969 clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk; 1970 clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage; 1971 } 1972 1973 return 0; 1974 } 1975 1976 1977 static int vangogh_system_features_control(struct smu_context *smu, bool en) 1978 { 1979 struct amdgpu_device *adev = smu->adev; 1980 int ret = 0; 1981 1982 if (adev->pm.fw_version >= 0x43f1700 && !en) 1983 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify, 1984 RLC_STATUS_OFF, NULL); 1985 1986 return ret; 1987 } 1988 1989 static int vangogh_post_smu_init(struct smu_context *smu) 1990 { 1991 struct amdgpu_device *adev = smu->adev; 1992 uint32_t tmp; 1993 int ret = 0; 1994 uint8_t aon_bits = 0; 1995 /* Two CUs in one WGP */ 1996 uint32_t req_active_wgps = adev->gfx.cu_info.number/2; 1997 uint32_t total_cu = adev->gfx.config.max_cu_per_sh * 1998 adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; 1999 2000 /* allow message will be sent after enable message on Vangogh*/ 2001 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && 2002 (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 2003 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL); 2004 if (ret) { 2005 dev_err(adev->dev, "Failed to Enable GfxOff!\n"); 2006 return ret; 2007 } 2008 } else { 2009 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 2010 dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n"); 2011 } 2012 2013 /* if all CUs are active, no need to power off any WGPs */ 2014 if (total_cu == adev->gfx.cu_info.number) 2015 return 0; 2016 2017 /* 2018 * Calculate the total bits number of always on WGPs for all SA/SEs in 2019 * RLC_PG_ALWAYS_ON_WGP_MASK. 2020 */ 2021 tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK)); 2022 tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK; 2023 2024 aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; 2025 2026 /* Do not request any WGPs less than set in the AON_WGP_MASK */ 2027 if (aon_bits > req_active_wgps) { 2028 dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n"); 2029 return 0; 2030 } else { 2031 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL); 2032 } 2033 } 2034 2035 static int vangogh_mode_reset(struct smu_context *smu, int type) 2036 { 2037 int ret = 0, index = 0; 2038 2039 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, 2040 SMU_MSG_GfxDeviceDriverReset); 2041 if (index < 0) 2042 return index == -EACCES ? 0 : index; 2043 2044 mutex_lock(&smu->message_lock); 2045 2046 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type); 2047 2048 mutex_unlock(&smu->message_lock); 2049 2050 mdelay(10); 2051 2052 return ret; 2053 } 2054 2055 static int vangogh_mode2_reset(struct smu_context *smu) 2056 { 2057 return vangogh_mode_reset(smu, SMU_RESET_MODE_2); 2058 } 2059 2060 /** 2061 * vangogh_get_gfxoff_status - Get gfxoff status 2062 * 2063 * @smu: amdgpu_device pointer 2064 * 2065 * Get current gfxoff status 2066 * 2067 * Return: 2068 * * 0 - GFXOFF (default if enabled). 2069 * * 1 - Transition out of GFX State. 2070 * * 2 - Not in GFXOFF. 2071 * * 3 - Transition into GFXOFF. 2072 */ 2073 static u32 vangogh_get_gfxoff_status(struct smu_context *smu) 2074 { 2075 struct amdgpu_device *adev = smu->adev; 2076 u32 reg, gfxoff_status; 2077 2078 reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL); 2079 gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK) 2080 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT; 2081 2082 return gfxoff_status; 2083 } 2084 2085 static int vangogh_get_power_limit(struct smu_context *smu, 2086 uint32_t *current_power_limit, 2087 uint32_t *default_power_limit, 2088 uint32_t *max_power_limit) 2089 { 2090 struct smu_11_5_power_context *power_context = 2091 smu->smu_power.power_context; 2092 uint32_t ppt_limit; 2093 int ret = 0; 2094 2095 if (smu->adev->pm.fw_version < 0x43f1e00) 2096 return ret; 2097 2098 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit); 2099 if (ret) { 2100 dev_err(smu->adev->dev, "Get slow PPT limit failed!\n"); 2101 return ret; 2102 } 2103 /* convert from milliwatt to watt */ 2104 if (current_power_limit) 2105 *current_power_limit = ppt_limit / 1000; 2106 if (default_power_limit) 2107 *default_power_limit = ppt_limit / 1000; 2108 if (max_power_limit) 2109 *max_power_limit = 29; 2110 2111 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit); 2112 if (ret) { 2113 dev_err(smu->adev->dev, "Get fast PPT limit failed!\n"); 2114 return ret; 2115 } 2116 /* convert from milliwatt to watt */ 2117 power_context->current_fast_ppt_limit = 2118 power_context->default_fast_ppt_limit = ppt_limit / 1000; 2119 power_context->max_fast_ppt_limit = 30; 2120 2121 return ret; 2122 } 2123 2124 static int vangogh_get_ppt_limit(struct smu_context *smu, 2125 uint32_t *ppt_limit, 2126 enum smu_ppt_limit_type type, 2127 enum smu_ppt_limit_level level) 2128 { 2129 struct smu_11_5_power_context *power_context = 2130 smu->smu_power.power_context; 2131 2132 if (!power_context) 2133 return -EOPNOTSUPP; 2134 2135 if (type == SMU_FAST_PPT_LIMIT) { 2136 switch (level) { 2137 case SMU_PPT_LIMIT_MAX: 2138 *ppt_limit = power_context->max_fast_ppt_limit; 2139 break; 2140 case SMU_PPT_LIMIT_CURRENT: 2141 *ppt_limit = power_context->current_fast_ppt_limit; 2142 break; 2143 case SMU_PPT_LIMIT_DEFAULT: 2144 *ppt_limit = power_context->default_fast_ppt_limit; 2145 break; 2146 default: 2147 break; 2148 } 2149 } 2150 2151 return 0; 2152 } 2153 2154 static int vangogh_set_power_limit(struct smu_context *smu, 2155 enum smu_ppt_limit_type limit_type, 2156 uint32_t ppt_limit) 2157 { 2158 struct smu_11_5_power_context *power_context = 2159 smu->smu_power.power_context; 2160 int ret = 0; 2161 2162 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 2163 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 2164 return -EOPNOTSUPP; 2165 } 2166 2167 switch (limit_type) { 2168 case SMU_DEFAULT_PPT_LIMIT: 2169 ret = smu_cmn_send_smc_msg_with_param(smu, 2170 SMU_MSG_SetSlowPPTLimit, 2171 ppt_limit * 1000, /* convert from watt to milliwatt */ 2172 NULL); 2173 if (ret) 2174 return ret; 2175 2176 smu->current_power_limit = ppt_limit; 2177 break; 2178 case SMU_FAST_PPT_LIMIT: 2179 ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24); 2180 if (ppt_limit > power_context->max_fast_ppt_limit) { 2181 dev_err(smu->adev->dev, 2182 "New power limit (%d) is over the max allowed %d\n", 2183 ppt_limit, power_context->max_fast_ppt_limit); 2184 return ret; 2185 } 2186 2187 ret = smu_cmn_send_smc_msg_with_param(smu, 2188 SMU_MSG_SetFastPPTLimit, 2189 ppt_limit * 1000, /* convert from watt to milliwatt */ 2190 NULL); 2191 if (ret) 2192 return ret; 2193 2194 power_context->current_fast_ppt_limit = ppt_limit; 2195 break; 2196 default: 2197 return -EINVAL; 2198 } 2199 2200 return ret; 2201 } 2202 2203 static const struct pptable_funcs vangogh_ppt_funcs = { 2204 2205 .check_fw_status = smu_v11_0_check_fw_status, 2206 .check_fw_version = smu_v11_0_check_fw_version, 2207 .init_smc_tables = vangogh_init_smc_tables, 2208 .fini_smc_tables = smu_v11_0_fini_smc_tables, 2209 .init_power = smu_v11_0_init_power, 2210 .fini_power = smu_v11_0_fini_power, 2211 .register_irq_handler = smu_v11_0_register_irq_handler, 2212 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, 2213 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 2214 .send_smc_msg = smu_cmn_send_smc_msg, 2215 .dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable, 2216 .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable, 2217 .is_dpm_running = vangogh_is_dpm_running, 2218 .read_sensor = vangogh_read_sensor, 2219 .get_enabled_mask = smu_cmn_get_enabled_mask, 2220 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 2221 .set_watermarks_table = vangogh_set_watermarks_table, 2222 .set_driver_table_location = smu_v11_0_set_driver_table_location, 2223 .interrupt_work = smu_v11_0_interrupt_work, 2224 .get_gpu_metrics = vangogh_common_get_gpu_metrics, 2225 .od_edit_dpm_table = vangogh_od_edit_dpm_table, 2226 .print_clk_levels = vangogh_common_print_clk_levels, 2227 .set_default_dpm_table = vangogh_set_default_dpm_tables, 2228 .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters, 2229 .system_features_control = vangogh_system_features_control, 2230 .feature_is_enabled = smu_cmn_feature_is_enabled, 2231 .set_power_profile_mode = vangogh_set_power_profile_mode, 2232 .get_power_profile_mode = vangogh_get_power_profile_mode, 2233 .get_dpm_clock_table = vangogh_get_dpm_clock_table, 2234 .force_clk_levels = vangogh_force_clk_levels, 2235 .set_performance_level = vangogh_set_performance_level, 2236 .post_init = vangogh_post_smu_init, 2237 .mode2_reset = vangogh_mode2_reset, 2238 .gfx_off_control = smu_v11_0_gfx_off_control, 2239 .get_gfx_off_status = vangogh_get_gfxoff_status, 2240 .get_ppt_limit = vangogh_get_ppt_limit, 2241 .get_power_limit = vangogh_get_power_limit, 2242 .set_power_limit = vangogh_set_power_limit, 2243 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, 2244 }; 2245 2246 void vangogh_set_ppt_funcs(struct smu_context *smu) 2247 { 2248 smu->ppt_funcs = &vangogh_ppt_funcs; 2249 smu->message_map = vangogh_message_map; 2250 smu->feature_map = vangogh_feature_mask_map; 2251 smu->table_map = vangogh_table_map; 2252 smu->workload_map = vangogh_workload_map; 2253 smu->is_apu = true; 2254 smu_v11_0_set_smu_mailbox_registers(smu); 2255 } 2256