1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include "amdgpu.h" 27 #include "amdgpu_smu.h" 28 #include "smu_v11_0.h" 29 #include "smu11_driver_if_vangogh.h" 30 #include "vangogh_ppt.h" 31 #include "smu_v11_5_ppsmc.h" 32 #include "smu_v11_5_pmfw.h" 33 #include "smu_cmn.h" 34 35 /* 36 * DO NOT use these for err/warn/info/debug messages. 37 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 38 * They are more MGPU friendly. 39 */ 40 #undef pr_err 41 #undef pr_warn 42 #undef pr_info 43 #undef pr_debug 44 45 #define FEATURE_MASK(feature) (1ULL << feature) 46 #define SMC_DPM_FEATURE ( \ 47 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 48 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 49 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 50 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 51 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 52 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 53 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 54 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 55 FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 56 57 static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = { 58 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), 59 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0), 60 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0), 61 MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0), 62 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisableGfxOff, 0), 63 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0), 64 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0), 65 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 66 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 67 MSG_MAP(Spare, PPSMC_MSG_spare, 0), 68 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0), 69 MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0), 70 MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0), 71 MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 0), 72 MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 0), 73 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0), 74 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0), 75 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), 76 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 77 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 0), 78 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0), 79 MSG_MAP(Spare1, PPSMC_MSG_spare1, 0), 80 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 0), 81 MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0), 82 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 0), 83 MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 0), 84 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 0), 85 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 0), 86 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0), 87 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 0), 88 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 0), 89 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 0), 90 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 0), 91 MSG_MAP(Spare2, PPSMC_MSG_spare2, 0), 92 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 0), 93 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 94 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 95 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 0), 96 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 0), 97 MSG_MAP(PowerUpCvip, PPSMC_MSG_PowerUpCvip, 0), 98 MSG_MAP(PowerDownCvip, PPSMC_MSG_PowerDownCvip, 0), 99 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 100 MSG_MAP(GetThermalLimit, PPSMC_MSG_GetThermalLimit, 0), 101 MSG_MAP(GetCurrentTemperature, PPSMC_MSG_GetCurrentTemperature, 0), 102 MSG_MAP(GetCurrentPower, PPSMC_MSG_GetCurrentPower, 0), 103 MSG_MAP(GetCurrentVoltage, PPSMC_MSG_GetCurrentVoltage, 0), 104 MSG_MAP(GetCurrentCurrent, PPSMC_MSG_GetCurrentCurrent, 0), 105 MSG_MAP(GetAverageCpuActivity, PPSMC_MSG_GetAverageCpuActivity, 0), 106 MSG_MAP(GetAverageGfxActivity, PPSMC_MSG_GetAverageGfxActivity, 0), 107 MSG_MAP(GetAveragePower, PPSMC_MSG_GetAveragePower, 0), 108 MSG_MAP(GetAverageTemperature, PPSMC_MSG_GetAverageTemperature, 0), 109 MSG_MAP(SetAveragePowerTimeConstant, PPSMC_MSG_SetAveragePowerTimeConstant, 0), 110 MSG_MAP(SetAverageActivityTimeConstant, PPSMC_MSG_SetAverageActivityTimeConstant, 0), 111 MSG_MAP(SetAverageTemperatureTimeConstant, PPSMC_MSG_SetAverageTemperatureTimeConstant, 0), 112 MSG_MAP(SetMitigationEndHysteresis, PPSMC_MSG_SetMitigationEndHysteresis, 0), 113 MSG_MAP(GetCurrentFreq, PPSMC_MSG_GetCurrentFreq, 0), 114 MSG_MAP(SetReducedPptLimit, PPSMC_MSG_SetReducedPptLimit, 0), 115 MSG_MAP(SetReducedThermalLimit, PPSMC_MSG_SetReducedThermalLimit, 0), 116 MSG_MAP(DramLogSetDramAddr, PPSMC_MSG_DramLogSetDramAddr, 0), 117 MSG_MAP(StartDramLogging, PPSMC_MSG_StartDramLogging, 0), 118 MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0), 119 MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0), 120 MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0), 121 }; 122 123 static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = { 124 FEA_MAP(PPT), 125 FEA_MAP(TDC), 126 FEA_MAP(THERMAL), 127 FEA_MAP(DS_GFXCLK), 128 FEA_MAP(DS_SOCCLK), 129 FEA_MAP(DS_LCLK), 130 FEA_MAP(DS_FCLK), 131 FEA_MAP(DS_MP1CLK), 132 FEA_MAP(DS_MP0CLK), 133 FEA_MAP(ATHUB_PG), 134 FEA_MAP(CCLK_DPM), 135 FEA_MAP(FAN_CONTROLLER), 136 FEA_MAP(ULV), 137 FEA_MAP(VCN_DPM), 138 FEA_MAP(LCLK_DPM), 139 FEA_MAP(SHUBCLK_DPM), 140 FEA_MAP(DCFCLK_DPM), 141 FEA_MAP(DS_DCFCLK), 142 FEA_MAP(S0I2), 143 FEA_MAP(SMU_LOW_POWER), 144 FEA_MAP(GFX_DEM), 145 FEA_MAP(PSI), 146 FEA_MAP(PROCHOT), 147 FEA_MAP(CPUOFF), 148 FEA_MAP(STAPM), 149 FEA_MAP(S0I3), 150 FEA_MAP(DF_CSTATES), 151 FEA_MAP(PERF_LIMIT), 152 FEA_MAP(CORE_DLDO), 153 FEA_MAP(RSMU_LOW_POWER), 154 FEA_MAP(SMN_LOW_POWER), 155 FEA_MAP(THM_LOW_POWER), 156 FEA_MAP(SMUIO_LOW_POWER), 157 FEA_MAP(MP1_LOW_POWER), 158 FEA_MAP(DS_VCN), 159 FEA_MAP(CPPC), 160 FEA_MAP(OS_CSTATES), 161 FEA_MAP(ISP_DPM), 162 FEA_MAP(A55_DPM), 163 FEA_MAP(CVIP_DSP_DPM), 164 FEA_MAP(MSMU_LOW_POWER), 165 }; 166 167 static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = { 168 TAB_MAP_VALID(WATERMARKS), 169 TAB_MAP_VALID(SMU_METRICS), 170 TAB_MAP_VALID(CUSTOM_DPM), 171 TAB_MAP_VALID(DPMCLOCKS), 172 }; 173 174 static int vangogh_tables_init(struct smu_context *smu) 175 { 176 struct smu_table_context *smu_table = &smu->smu_table; 177 struct smu_table *tables = smu_table->tables; 178 179 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 180 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 181 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 182 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 183 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 184 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 185 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, 186 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 187 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), 188 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 189 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 190 if (!smu_table->metrics_table) 191 goto err0_out; 192 smu_table->metrics_time = 0; 193 194 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_0); 195 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 196 if (!smu_table->gpu_metrics_table) 197 goto err1_out; 198 199 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 200 if (!smu_table->watermarks_table) 201 goto err2_out; 202 203 return 0; 204 205 err2_out: 206 kfree(smu_table->gpu_metrics_table); 207 err1_out: 208 kfree(smu_table->metrics_table); 209 err0_out: 210 return -ENOMEM; 211 } 212 213 static int vangogh_get_smu_metrics_data(struct smu_context *smu, 214 MetricsMember_t member, 215 uint32_t *value) 216 { 217 struct smu_table_context *smu_table = &smu->smu_table; 218 219 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 220 int ret = 0; 221 222 mutex_lock(&smu->metrics_lock); 223 224 ret = smu_cmn_get_metrics_table_locked(smu, 225 NULL, 226 false); 227 if (ret) { 228 mutex_unlock(&smu->metrics_lock); 229 return ret; 230 } 231 232 switch (member) { 233 case METRICS_AVERAGE_GFXCLK: 234 *value = metrics->GfxclkFrequency; 235 break; 236 case METRICS_AVERAGE_SOCCLK: 237 *value = metrics->SocclkFrequency; 238 break; 239 case METRICS_AVERAGE_UCLK: 240 *value = metrics->MemclkFrequency; 241 break; 242 case METRICS_AVERAGE_GFXACTIVITY: 243 *value = metrics->GfxActivity / 100; 244 break; 245 case METRICS_AVERAGE_VCNACTIVITY: 246 *value = metrics->UvdActivity; 247 break; 248 case METRICS_AVERAGE_SOCKETPOWER: 249 *value = metrics->CurrentSocketPower; 250 break; 251 case METRICS_TEMPERATURE_EDGE: 252 *value = metrics->GfxTemperature / 100 * 253 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 254 break; 255 case METRICS_TEMPERATURE_HOTSPOT: 256 *value = metrics->SocTemperature / 100 * 257 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 258 break; 259 case METRICS_THROTTLER_STATUS: 260 *value = metrics->ThrottlerStatus; 261 break; 262 default: 263 *value = UINT_MAX; 264 break; 265 } 266 267 mutex_unlock(&smu->metrics_lock); 268 269 return ret; 270 } 271 272 static int vangogh_allocate_dpm_context(struct smu_context *smu) 273 { 274 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 275 276 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context), 277 GFP_KERNEL); 278 if (!smu_dpm->dpm_context) 279 return -ENOMEM; 280 281 smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); 282 283 return 0; 284 } 285 286 static int vangogh_init_smc_tables(struct smu_context *smu) 287 { 288 int ret = 0; 289 290 ret = vangogh_tables_init(smu); 291 if (ret) 292 return ret; 293 294 ret = vangogh_allocate_dpm_context(smu); 295 if (ret) 296 return ret; 297 298 return smu_v11_0_init_smc_tables(smu); 299 } 300 301 static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable) 302 { 303 int ret = 0; 304 305 if (enable) { 306 /* vcn dpm on is a prerequisite for vcn power gate messages */ 307 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { 308 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); 309 if (ret) 310 return ret; 311 } 312 } else { 313 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { 314 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); 315 if (ret) 316 return ret; 317 } 318 } 319 320 return ret; 321 } 322 323 static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 324 { 325 int ret = 0; 326 327 if (enable) { 328 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { 329 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); 330 if (ret) 331 return ret; 332 } 333 } else { 334 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { 335 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); 336 if (ret) 337 return ret; 338 } 339 } 340 341 return ret; 342 } 343 344 static int vangogh_get_allowed_feature_mask(struct smu_context *smu, 345 uint32_t *feature_mask, 346 uint32_t num) 347 { 348 struct amdgpu_device *adev = smu->adev; 349 350 if (num > 2) 351 return -EINVAL; 352 353 memset(feature_mask, 0, sizeof(uint32_t) * num); 354 355 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DPM_BIT) 356 | FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) 357 | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT) 358 | FEATURE_MASK(FEATURE_PPT_BIT) 359 | FEATURE_MASK(FEATURE_TDC_BIT) 360 | FEATURE_MASK(FEATURE_FAN_CONTROLLER_BIT) 361 | FEATURE_MASK(FEATURE_DS_LCLK_BIT) 362 | FEATURE_MASK(FEATURE_DS_DCFCLK_BIT); 363 364 if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK) 365 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT); 366 367 if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK) 368 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT); 369 370 if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB) 371 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT); 372 373 return 0; 374 } 375 376 static bool vangogh_is_dpm_running(struct smu_context *smu) 377 { 378 int ret = 0; 379 uint32_t feature_mask[2]; 380 uint64_t feature_enabled; 381 382 ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); 383 384 if (ret) 385 return false; 386 387 feature_enabled = (unsigned long)((uint64_t)feature_mask[0] | 388 ((uint64_t)feature_mask[1] << 32)); 389 390 return !!(feature_enabled & SMC_DPM_FEATURE); 391 } 392 393 static int vangogh_get_current_activity_percent(struct smu_context *smu, 394 enum amd_pp_sensors sensor, 395 uint32_t *value) 396 { 397 int ret = 0; 398 399 if (!value) 400 return -EINVAL; 401 402 switch (sensor) { 403 case AMDGPU_PP_SENSOR_GPU_LOAD: 404 ret = vangogh_get_smu_metrics_data(smu, 405 METRICS_AVERAGE_GFXACTIVITY, 406 value); 407 break; 408 default: 409 dev_err(smu->adev->dev, "Invalid sensor for retrieving clock activity\n"); 410 return -EINVAL; 411 } 412 413 return 0; 414 } 415 416 static int vangogh_get_gpu_power(struct smu_context *smu, uint32_t *value) 417 { 418 if (!value) 419 return -EINVAL; 420 421 return vangogh_get_smu_metrics_data(smu, 422 METRICS_AVERAGE_SOCKETPOWER, 423 value); 424 } 425 426 static int vangogh_thermal_get_temperature(struct smu_context *smu, 427 enum amd_pp_sensors sensor, 428 uint32_t *value) 429 { 430 int ret = 0; 431 432 if (!value) 433 return -EINVAL; 434 435 switch (sensor) { 436 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 437 ret = vangogh_get_smu_metrics_data(smu, 438 METRICS_TEMPERATURE_HOTSPOT, 439 value); 440 break; 441 case AMDGPU_PP_SENSOR_EDGE_TEMP: 442 ret = vangogh_get_smu_metrics_data(smu, 443 METRICS_TEMPERATURE_EDGE, 444 value); 445 break; 446 default: 447 dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n"); 448 return -EINVAL; 449 } 450 451 return ret; 452 } 453 454 static int vangogh_get_current_clk_freq_by_table(struct smu_context *smu, 455 enum smu_clk_type clk_type, 456 uint32_t *value) 457 { 458 MetricsMember_t member_type; 459 460 switch (clk_type) { 461 case SMU_GFXCLK: 462 member_type = METRICS_AVERAGE_GFXCLK; 463 break; 464 case SMU_MCLK: 465 case SMU_UCLK: 466 member_type = METRICS_AVERAGE_UCLK; 467 break; 468 case SMU_SOCCLK: 469 member_type = METRICS_AVERAGE_SOCCLK; 470 break; 471 default: 472 return -EINVAL; 473 } 474 475 return vangogh_get_smu_metrics_data(smu, 476 member_type, 477 value); 478 } 479 480 static int vangogh_read_sensor(struct smu_context *smu, 481 enum amd_pp_sensors sensor, 482 void *data, uint32_t *size) 483 { 484 int ret = 0; 485 486 if (!data || !size) 487 return -EINVAL; 488 489 mutex_lock(&smu->sensor_lock); 490 switch (sensor) { 491 case AMDGPU_PP_SENSOR_GPU_LOAD: 492 ret = vangogh_get_current_activity_percent(smu, sensor, (uint32_t *)data); 493 *size = 4; 494 break; 495 case AMDGPU_PP_SENSOR_GPU_POWER: 496 ret = vangogh_get_gpu_power(smu, (uint32_t *)data); 497 *size = 4; 498 break; 499 case AMDGPU_PP_SENSOR_EDGE_TEMP: 500 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 501 ret = vangogh_thermal_get_temperature(smu, sensor, (uint32_t *)data); 502 *size = 4; 503 break; 504 case AMDGPU_PP_SENSOR_GFX_MCLK: 505 ret = vangogh_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data); 506 *(uint32_t *)data *= 100; 507 *size = 4; 508 break; 509 case AMDGPU_PP_SENSOR_GFX_SCLK: 510 ret = vangogh_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data); 511 *(uint32_t *)data *= 100; 512 *size = 4; 513 break; 514 case AMDGPU_PP_SENSOR_VDDGFX: 515 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data); 516 *size = 4; 517 break; 518 default: 519 ret = -EOPNOTSUPP; 520 break; 521 } 522 mutex_unlock(&smu->sensor_lock); 523 524 return ret; 525 } 526 527 static int vangogh_set_watermarks_table(struct smu_context *smu, 528 struct pp_smu_wm_range_sets *clock_ranges) 529 { 530 int i; 531 int ret = 0; 532 Watermarks_t *table = smu->smu_table.watermarks_table; 533 534 if (!table || !clock_ranges) 535 return -EINVAL; 536 537 if (clock_ranges) { 538 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 539 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 540 return -EINVAL; 541 542 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 543 table->WatermarkRow[WM_DCFCLK][i].MinClock = 544 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 545 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 546 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 547 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 548 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 549 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 550 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 551 552 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 553 clock_ranges->reader_wm_sets[i].wm_inst; 554 } 555 556 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 557 table->WatermarkRow[WM_SOCCLK][i].MinClock = 558 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 559 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 560 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 561 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 562 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 563 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 564 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 565 566 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 567 clock_ranges->writer_wm_sets[i].wm_inst; 568 } 569 570 smu->watermarks_bitmap |= WATERMARKS_EXIST; 571 } 572 573 /* pass data to smu controller */ 574 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 575 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 576 ret = smu_cmn_write_watermarks_table(smu); 577 if (ret) { 578 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 579 return ret; 580 } 581 smu->watermarks_bitmap |= WATERMARKS_LOADED; 582 } 583 584 return 0; 585 } 586 587 static const struct pptable_funcs vangogh_ppt_funcs = { 588 589 .check_fw_status = smu_v11_0_check_fw_status, 590 .check_fw_version = smu_v11_0_check_fw_version, 591 .init_smc_tables = vangogh_init_smc_tables, 592 .fini_smc_tables = smu_v11_0_fini_smc_tables, 593 .init_power = smu_v11_0_init_power, 594 .fini_power = smu_v11_0_fini_power, 595 .register_irq_handler = smu_v11_0_register_irq_handler, 596 .get_allowed_feature_mask = vangogh_get_allowed_feature_mask, 597 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, 598 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 599 .send_smc_msg = smu_cmn_send_smc_msg, 600 .dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable, 601 .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable, 602 .is_dpm_running = vangogh_is_dpm_running, 603 .read_sensor = vangogh_read_sensor, 604 .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask, 605 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 606 .set_watermarks_table = vangogh_set_watermarks_table, 607 .set_driver_table_location = smu_v11_0_set_driver_table_location, 608 .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception, 609 .interrupt_work = smu_v11_0_interrupt_work, 610 }; 611 612 void vangogh_set_ppt_funcs(struct smu_context *smu) 613 { 614 smu->ppt_funcs = &vangogh_ppt_funcs; 615 smu->message_map = vangogh_message_map; 616 smu->feature_map = vangogh_feature_mask_map; 617 smu->table_map = vangogh_table_map; 618 smu->is_apu = true; 619 } 620