1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "smu_types.h" 25 #define SWSMU_CODE_LAYER_L2 26 27 #include "amdgpu.h" 28 #include "amdgpu_smu.h" 29 #include "smu_v13_0.h" 30 #include "smu13_driver_if_v13_0_4.h" 31 #include "smu_v13_0_4_ppt.h" 32 #include "smu_v13_0_4_ppsmc.h" 33 #include "smu_v13_0_4_pmfw.h" 34 #include "smu_cmn.h" 35 36 /* 37 * DO NOT use these for err/warn/info/debug messages. 38 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 39 * They are more MGPU friendly. 40 */ 41 #undef pr_err 42 #undef pr_warn 43 #undef pr_info 44 #undef pr_debug 45 46 #define mmMP1_SMN_C2PMSG_66 0x0282 47 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 1 48 49 #define mmMP1_SMN_C2PMSG_82 0x0292 50 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 1 51 52 #define mmMP1_SMN_C2PMSG_90 0x029a 53 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 1 54 55 #define FEATURE_MASK(feature) (1ULL << feature) 56 57 #define SMU_13_0_4_UMD_PSTATE_GFXCLK 938 58 #define SMU_13_0_4_UMD_PSTATE_SOCCLK 938 59 #define SMU_13_0_4_UMD_PSTATE_FCLK 1875 60 61 #define SMC_DPM_FEATURE ( \ 62 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 63 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 64 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 65 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 66 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 67 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 68 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 69 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT) | \ 70 FEATURE_MASK(FEATURE_ISP_DPM_BIT) | \ 71 FEATURE_MASK(FEATURE_IPU_DPM_BIT) | \ 72 FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 73 74 static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] = { 75 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 76 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1), 77 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 78 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1), 79 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1), 80 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), 81 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), 82 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), 83 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), 84 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 85 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 86 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 87 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1), 88 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1), 89 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1), 90 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1), 91 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1), 92 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1), 93 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 1), 94 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), 95 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1), 96 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1), 97 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1), 98 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1), 99 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 1), 100 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1), 101 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1), 102 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1), 103 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1), 104 MSG_MAP(EnableGfxImu, PPSMC_MSG_EnableGfxImu, 1), 105 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 1), 106 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1), 107 }; 108 109 static struct cmn2asic_mapping smu_v13_0_4_feature_mask_map[SMU_FEATURE_COUNT] = { 110 FEA_MAP(CCLK_DPM), 111 FEA_MAP(FAN_CONTROLLER), 112 FEA_MAP(PPT), 113 FEA_MAP(TDC), 114 FEA_MAP(THERMAL), 115 FEA_MAP(VCN_DPM), 116 FEA_MAP_REVERSE(FCLK), 117 FEA_MAP_REVERSE(SOCCLK), 118 FEA_MAP(LCLK_DPM), 119 FEA_MAP(SHUBCLK_DPM), 120 FEA_MAP(DCFCLK_DPM), 121 FEA_MAP_HALF_REVERSE(GFX), 122 FEA_MAP(DS_GFXCLK), 123 FEA_MAP(DS_SOCCLK), 124 FEA_MAP(DS_LCLK), 125 FEA_MAP(DS_DCFCLK), 126 FEA_MAP(DS_FCLK), 127 FEA_MAP(DS_MP1CLK), 128 FEA_MAP(DS_MP0CLK), 129 FEA_MAP(GFX_DEM), 130 FEA_MAP(PSI), 131 FEA_MAP(PROCHOT), 132 FEA_MAP(CPUOFF), 133 FEA_MAP(STAPM), 134 FEA_MAP(S0I3), 135 FEA_MAP(PERF_LIMIT), 136 FEA_MAP(CORE_DLDO), 137 FEA_MAP(DS_VCN), 138 FEA_MAP(CPPC), 139 FEA_MAP(DF_CSTATES), 140 FEA_MAP(ATHUB_PG), 141 }; 142 143 static struct cmn2asic_mapping smu_v13_0_4_table_map[SMU_TABLE_COUNT] = { 144 TAB_MAP_VALID(WATERMARKS), 145 TAB_MAP_VALID(SMU_METRICS), 146 TAB_MAP_VALID(CUSTOM_DPM), 147 TAB_MAP_VALID(DPMCLOCKS), 148 }; 149 150 static int smu_v13_0_4_init_smc_tables(struct smu_context *smu) 151 { 152 struct smu_table_context *smu_table = &smu->smu_table; 153 struct smu_table *tables = smu_table->tables; 154 155 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 156 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 157 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 158 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 159 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 160 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 161 162 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 163 if (!smu_table->clocks_table) 164 goto err0_out; 165 166 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 167 if (!smu_table->metrics_table) 168 goto err1_out; 169 smu_table->metrics_time = 0; 170 171 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 172 if (!smu_table->watermarks_table) 173 goto err2_out; 174 175 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1); 176 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 177 if (!smu_table->gpu_metrics_table) 178 goto err3_out; 179 180 return 0; 181 182 err3_out: 183 kfree(smu_table->watermarks_table); 184 err2_out: 185 kfree(smu_table->metrics_table); 186 err1_out: 187 kfree(smu_table->clocks_table); 188 err0_out: 189 return -ENOMEM; 190 } 191 192 static int smu_v13_0_4_fini_smc_tables(struct smu_context *smu) 193 { 194 struct smu_table_context *smu_table = &smu->smu_table; 195 196 kfree(smu_table->clocks_table); 197 smu_table->clocks_table = NULL; 198 199 kfree(smu_table->metrics_table); 200 smu_table->metrics_table = NULL; 201 202 kfree(smu_table->watermarks_table); 203 smu_table->watermarks_table = NULL; 204 205 kfree(smu_table->gpu_metrics_table); 206 smu_table->gpu_metrics_table = NULL; 207 208 return 0; 209 } 210 211 static bool smu_v13_0_4_is_dpm_running(struct smu_context *smu) 212 { 213 int ret = 0; 214 uint64_t feature_enabled; 215 216 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 217 218 if (ret) 219 return false; 220 221 return !!(feature_enabled & SMC_DPM_FEATURE); 222 } 223 224 static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) 225 { 226 struct amdgpu_device *adev = smu->adev; 227 int ret = 0; 228 229 if (!en && !adev->in_s0ix) 230 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); 231 232 return ret; 233 } 234 235 static ssize_t smu_v13_0_4_get_gpu_metrics(struct smu_context *smu, 236 void **table) 237 { 238 struct smu_table_context *smu_table = &smu->smu_table; 239 struct gpu_metrics_v2_1 *gpu_metrics = 240 (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; 241 SmuMetrics_t metrics; 242 int ret = 0; 243 244 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 245 if (ret) 246 return ret; 247 248 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 249 250 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 251 gpu_metrics->temperature_soc = metrics.SocTemperature; 252 memcpy(&gpu_metrics->temperature_core[0], 253 &metrics.CoreTemperature[0], 254 sizeof(uint16_t) * 8); 255 gpu_metrics->temperature_l3[0] = metrics.L3Temperature; 256 257 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 258 gpu_metrics->average_mm_activity = metrics.UvdActivity; 259 260 gpu_metrics->average_socket_power = metrics.AverageSocketPower; 261 gpu_metrics->average_gfx_power = metrics.Power[0]; 262 gpu_metrics->average_soc_power = metrics.Power[1]; 263 memcpy(&gpu_metrics->average_core_power[0], 264 &metrics.CorePower[0], 265 sizeof(uint16_t) * 8); 266 267 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 268 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 269 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 270 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 271 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 272 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 273 274 memcpy(&gpu_metrics->current_coreclk[0], 275 &metrics.CoreFrequency[0], 276 sizeof(uint16_t) * 8); 277 gpu_metrics->current_l3clk[0] = metrics.L3Frequency; 278 279 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 280 281 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 282 283 *table = (void *)gpu_metrics; 284 285 return sizeof(struct gpu_metrics_v2_1); 286 } 287 288 static int smu_v13_0_4_get_smu_metrics_data(struct smu_context *smu, 289 MetricsMember_t member, 290 uint32_t *value) 291 { 292 struct smu_table_context *smu_table = &smu->smu_table; 293 294 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 295 int ret = 0; 296 297 ret = smu_cmn_get_metrics_table(smu, NULL, false); 298 if (ret) 299 return ret; 300 301 switch (member) { 302 case METRICS_AVERAGE_GFXCLK: 303 *value = metrics->GfxclkFrequency; 304 break; 305 case METRICS_AVERAGE_SOCCLK: 306 *value = metrics->SocclkFrequency; 307 break; 308 case METRICS_AVERAGE_VCLK: 309 *value = metrics->VclkFrequency; 310 break; 311 case METRICS_AVERAGE_DCLK: 312 *value = metrics->DclkFrequency; 313 break; 314 case METRICS_AVERAGE_UCLK: 315 *value = metrics->MemclkFrequency; 316 break; 317 case METRICS_AVERAGE_GFXACTIVITY: 318 *value = metrics->GfxActivity / 100; 319 break; 320 case METRICS_AVERAGE_VCNACTIVITY: 321 *value = metrics->UvdActivity; 322 break; 323 case METRICS_AVERAGE_SOCKETPOWER: 324 *value = (metrics->AverageSocketPower << 8) / 1000; 325 break; 326 case METRICS_CURR_SOCKETPOWER: 327 *value = (metrics->CurrentSocketPower << 8) / 1000; 328 break; 329 case METRICS_TEMPERATURE_EDGE: 330 *value = metrics->GfxTemperature / 100 * 331 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 332 break; 333 case METRICS_TEMPERATURE_HOTSPOT: 334 *value = metrics->SocTemperature / 100 * 335 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 336 break; 337 case METRICS_THROTTLER_STATUS: 338 *value = metrics->ThrottlerStatus; 339 break; 340 case METRICS_VOLTAGE_VDDGFX: 341 *value = metrics->Voltage[0]; 342 break; 343 case METRICS_VOLTAGE_VDDSOC: 344 *value = metrics->Voltage[1]; 345 break; 346 case METRICS_SS_APU_SHARE: 347 /* return the percentage of APU power with respect to APU's power limit. 348 * percentage is reported, this isn't boost value. Smartshift power 349 * boost/shift is only when the percentage is more than 100. 350 */ 351 if (metrics->StapmOpnLimit > 0) 352 *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit; 353 else 354 *value = 0; 355 break; 356 case METRICS_SS_DGPU_SHARE: 357 /* return the percentage of dGPU power with respect to dGPU's power limit. 358 * percentage is reported, this isn't boost value. Smartshift power 359 * boost/shift is only when the percentage is more than 100. 360 */ 361 if ((metrics->dGpuPower > 0) && 362 (metrics->StapmCurrentLimit > metrics->StapmOpnLimit)) 363 *value = (metrics->dGpuPower * 100) / 364 (metrics->StapmCurrentLimit - metrics->StapmOpnLimit); 365 else 366 *value = 0; 367 break; 368 default: 369 *value = UINT_MAX; 370 break; 371 } 372 373 return ret; 374 } 375 376 static int smu_v13_0_4_get_current_clk_freq(struct smu_context *smu, 377 enum smu_clk_type clk_type, 378 uint32_t *value) 379 { 380 MetricsMember_t member_type; 381 382 switch (clk_type) { 383 case SMU_SOCCLK: 384 member_type = METRICS_AVERAGE_SOCCLK; 385 break; 386 case SMU_VCLK: 387 member_type = METRICS_AVERAGE_VCLK; 388 break; 389 case SMU_DCLK: 390 member_type = METRICS_AVERAGE_DCLK; 391 break; 392 case SMU_MCLK: 393 member_type = METRICS_AVERAGE_UCLK; 394 break; 395 case SMU_FCLK: 396 return smu_cmn_send_smc_msg_with_param(smu, 397 SMU_MSG_GetFclkFrequency, 398 0, value); 399 case SMU_GFXCLK: 400 case SMU_SCLK: 401 return smu_cmn_send_smc_msg_with_param(smu, 402 SMU_MSG_GetGfxclkFrequency, 403 0, value); 404 break; 405 default: 406 return -EINVAL; 407 } 408 409 return smu_v13_0_4_get_smu_metrics_data(smu, member_type, value); 410 } 411 412 static int smu_v13_0_4_get_dpm_freq_by_index(struct smu_context *smu, 413 enum smu_clk_type clk_type, 414 uint32_t dpm_level, 415 uint32_t *freq) 416 { 417 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 418 419 if (!clk_table || clk_type >= SMU_CLK_COUNT) 420 return -EINVAL; 421 422 switch (clk_type) { 423 case SMU_SOCCLK: 424 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 425 return -EINVAL; 426 *freq = clk_table->SocClocks[dpm_level]; 427 break; 428 case SMU_VCLK: 429 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 430 return -EINVAL; 431 *freq = clk_table->VClocks[dpm_level]; 432 break; 433 case SMU_DCLK: 434 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 435 return -EINVAL; 436 *freq = clk_table->DClocks[dpm_level]; 437 break; 438 case SMU_UCLK: 439 case SMU_MCLK: 440 if (dpm_level >= clk_table->NumDfPstatesEnabled) 441 return -EINVAL; 442 *freq = clk_table->DfPstateTable[dpm_level].MemClk; 443 break; 444 case SMU_FCLK: 445 if (dpm_level >= clk_table->NumDfPstatesEnabled) 446 return -EINVAL; 447 *freq = clk_table->DfPstateTable[dpm_level].FClk; 448 break; 449 default: 450 return -EINVAL; 451 } 452 453 return 0; 454 } 455 456 static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu, 457 enum smu_clk_type clk_type, 458 uint32_t *count) 459 { 460 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 461 462 switch (clk_type) { 463 case SMU_SOCCLK: 464 *count = clk_table->NumSocClkLevelsEnabled; 465 break; 466 case SMU_VCLK: 467 *count = clk_table->VcnClkLevelsEnabled; 468 break; 469 case SMU_DCLK: 470 *count = clk_table->VcnClkLevelsEnabled; 471 break; 472 case SMU_MCLK: 473 *count = clk_table->NumDfPstatesEnabled; 474 break; 475 case SMU_FCLK: 476 *count = clk_table->NumDfPstatesEnabled; 477 break; 478 default: 479 break; 480 } 481 482 return 0; 483 } 484 485 static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, 486 enum smu_clk_type clk_type, char *buf) 487 { 488 int i, idx, size = 0, ret = 0; 489 uint32_t cur_value = 0, value = 0, count = 0; 490 uint32_t min, max; 491 492 smu_cmn_get_sysfs_buf(&buf, &size); 493 494 switch (clk_type) { 495 case SMU_OD_SCLK: 496 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 497 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 498 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 499 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 500 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 501 break; 502 case SMU_OD_RANGE: 503 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 504 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 505 smu->gfx_default_hard_min_freq, 506 smu->gfx_default_soft_max_freq); 507 break; 508 case SMU_SOCCLK: 509 case SMU_VCLK: 510 case SMU_DCLK: 511 case SMU_MCLK: 512 case SMU_FCLK: 513 ret = smu_v13_0_4_get_current_clk_freq(smu, clk_type, &cur_value); 514 if (ret) 515 break; 516 517 ret = smu_v13_0_4_get_dpm_level_count(smu, clk_type, &count); 518 if (ret) 519 break; 520 521 for (i = 0; i < count; i++) { 522 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 523 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value); 524 if (ret) 525 break; 526 527 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 528 cur_value == value ? "*" : ""); 529 } 530 break; 531 case SMU_GFXCLK: 532 case SMU_SCLK: 533 ret = smu_v13_0_4_get_current_clk_freq(smu, clk_type, &cur_value); 534 if (ret) 535 break; 536 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 537 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 538 if (cur_value == max) 539 i = 2; 540 else if (cur_value == min) 541 i = 0; 542 else 543 i = 1; 544 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 545 i == 0 ? "*" : ""); 546 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 547 i == 1 ? cur_value : 1100, /* UMD PSTATE GFXCLK 1100 */ 548 i == 1 ? "*" : ""); 549 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 550 i == 2 ? "*" : ""); 551 break; 552 default: 553 break; 554 } 555 556 return size; 557 } 558 559 static int smu_v13_0_4_read_sensor(struct smu_context *smu, 560 enum amd_pp_sensors sensor, 561 void *data, uint32_t *size) 562 { 563 int ret = 0; 564 565 if (!data || !size) 566 return -EINVAL; 567 568 switch (sensor) { 569 case AMDGPU_PP_SENSOR_GPU_LOAD: 570 ret = smu_v13_0_4_get_smu_metrics_data(smu, 571 METRICS_AVERAGE_GFXACTIVITY, 572 (uint32_t *)data); 573 *size = 4; 574 break; 575 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 576 ret = smu_v13_0_4_get_smu_metrics_data(smu, 577 METRICS_AVERAGE_SOCKETPOWER, 578 (uint32_t *)data); 579 *size = 4; 580 break; 581 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: 582 ret = smu_v13_0_4_get_smu_metrics_data(smu, 583 METRICS_CURR_SOCKETPOWER, 584 (uint32_t *)data); 585 *size = 4; 586 break; 587 case AMDGPU_PP_SENSOR_EDGE_TEMP: 588 ret = smu_v13_0_4_get_smu_metrics_data(smu, 589 METRICS_TEMPERATURE_EDGE, 590 (uint32_t *)data); 591 *size = 4; 592 break; 593 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 594 ret = smu_v13_0_4_get_smu_metrics_data(smu, 595 METRICS_TEMPERATURE_HOTSPOT, 596 (uint32_t *)data); 597 *size = 4; 598 break; 599 case AMDGPU_PP_SENSOR_GFX_MCLK: 600 ret = smu_v13_0_4_get_smu_metrics_data(smu, 601 METRICS_AVERAGE_UCLK, 602 (uint32_t *)data); 603 *(uint32_t *)data *= 100; 604 *size = 4; 605 break; 606 case AMDGPU_PP_SENSOR_GFX_SCLK: 607 ret = smu_v13_0_4_get_smu_metrics_data(smu, 608 METRICS_AVERAGE_GFXCLK, 609 (uint32_t *)data); 610 *(uint32_t *)data *= 100; 611 *size = 4; 612 break; 613 case AMDGPU_PP_SENSOR_VDDGFX: 614 ret = smu_v13_0_4_get_smu_metrics_data(smu, 615 METRICS_VOLTAGE_VDDGFX, 616 (uint32_t *)data); 617 *size = 4; 618 break; 619 case AMDGPU_PP_SENSOR_VDDNB: 620 ret = smu_v13_0_4_get_smu_metrics_data(smu, 621 METRICS_VOLTAGE_VDDSOC, 622 (uint32_t *)data); 623 *size = 4; 624 break; 625 case AMDGPU_PP_SENSOR_SS_APU_SHARE: 626 ret = smu_v13_0_4_get_smu_metrics_data(smu, 627 METRICS_SS_APU_SHARE, 628 (uint32_t *)data); 629 *size = 4; 630 break; 631 case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: 632 ret = smu_v13_0_4_get_smu_metrics_data(smu, 633 METRICS_SS_DGPU_SHARE, 634 (uint32_t *)data); 635 *size = 4; 636 break; 637 default: 638 ret = -EOPNOTSUPP; 639 break; 640 } 641 642 return ret; 643 } 644 645 static int smu_v13_0_4_set_watermarks_table(struct smu_context *smu, 646 struct pp_smu_wm_range_sets *clock_ranges) 647 { 648 int i; 649 int ret = 0; 650 Watermarks_t *table = smu->smu_table.watermarks_table; 651 652 if (!table || !clock_ranges) 653 return -EINVAL; 654 655 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 656 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 657 return -EINVAL; 658 659 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 660 table->WatermarkRow[WM_DCFCLK][i].MinClock = 661 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 662 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 663 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 664 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 665 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 666 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 667 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 668 669 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 670 clock_ranges->reader_wm_sets[i].wm_inst; 671 } 672 673 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 674 table->WatermarkRow[WM_SOCCLK][i].MinClock = 675 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 676 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 677 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 678 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 679 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 680 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 681 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 682 683 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 684 clock_ranges->writer_wm_sets[i].wm_inst; 685 } 686 687 smu->watermarks_bitmap |= WATERMARKS_EXIST; 688 689 /* pass data to smu controller */ 690 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 691 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 692 ret = smu_cmn_write_watermarks_table(smu); 693 if (ret) { 694 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 695 return ret; 696 } 697 smu->watermarks_bitmap |= WATERMARKS_LOADED; 698 } 699 700 return 0; 701 } 702 703 static bool smu_v13_0_4_clk_dpm_is_enabled(struct smu_context *smu, 704 enum smu_clk_type clk_type) 705 { 706 enum smu_feature_mask feature_id = 0; 707 708 switch (clk_type) { 709 case SMU_MCLK: 710 case SMU_UCLK: 711 case SMU_FCLK: 712 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 713 break; 714 case SMU_GFXCLK: 715 case SMU_SCLK: 716 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 717 break; 718 case SMU_SOCCLK: 719 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 720 break; 721 case SMU_VCLK: 722 case SMU_DCLK: 723 feature_id = SMU_FEATURE_VCN_DPM_BIT; 724 break; 725 default: 726 return true; 727 } 728 729 return smu_cmn_feature_is_enabled(smu, feature_id); 730 } 731 732 static int smu_v13_0_4_get_dpm_ultimate_freq(struct smu_context *smu, 733 enum smu_clk_type clk_type, 734 uint32_t *min, 735 uint32_t *max) 736 { 737 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 738 uint32_t clock_limit; 739 uint32_t max_dpm_level, min_dpm_level; 740 int ret = 0; 741 742 if (!smu_v13_0_4_clk_dpm_is_enabled(smu, clk_type)) { 743 switch (clk_type) { 744 case SMU_MCLK: 745 case SMU_UCLK: 746 clock_limit = smu->smu_table.boot_values.uclk; 747 break; 748 case SMU_FCLK: 749 clock_limit = smu->smu_table.boot_values.fclk; 750 break; 751 case SMU_GFXCLK: 752 case SMU_SCLK: 753 clock_limit = smu->smu_table.boot_values.gfxclk; 754 break; 755 case SMU_SOCCLK: 756 clock_limit = smu->smu_table.boot_values.socclk; 757 break; 758 case SMU_VCLK: 759 clock_limit = smu->smu_table.boot_values.vclk; 760 break; 761 case SMU_DCLK: 762 clock_limit = smu->smu_table.boot_values.dclk; 763 break; 764 default: 765 clock_limit = 0; 766 break; 767 } 768 769 /* clock in Mhz unit */ 770 if (min) 771 *min = clock_limit / 100; 772 if (max) 773 *max = clock_limit / 100; 774 775 return 0; 776 } 777 778 if (max) { 779 switch (clk_type) { 780 case SMU_GFXCLK: 781 case SMU_SCLK: 782 *max = clk_table->MaxGfxClk; 783 break; 784 case SMU_MCLK: 785 case SMU_UCLK: 786 case SMU_FCLK: 787 max_dpm_level = 0; 788 break; 789 case SMU_SOCCLK: 790 max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; 791 break; 792 case SMU_VCLK: 793 case SMU_DCLK: 794 max_dpm_level = clk_table->VcnClkLevelsEnabled - 1; 795 break; 796 default: 797 return -EINVAL; 798 } 799 800 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 801 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, 802 max_dpm_level, 803 max); 804 if (ret) 805 return ret; 806 } 807 } 808 809 if (min) { 810 switch (clk_type) { 811 case SMU_GFXCLK: 812 case SMU_SCLK: 813 *min = clk_table->MinGfxClk; 814 break; 815 case SMU_MCLK: 816 case SMU_UCLK: 817 case SMU_FCLK: 818 min_dpm_level = clk_table->NumDfPstatesEnabled - 1; 819 break; 820 case SMU_SOCCLK: 821 min_dpm_level = 0; 822 break; 823 case SMU_VCLK: 824 case SMU_DCLK: 825 min_dpm_level = 0; 826 break; 827 default: 828 return -EINVAL; 829 } 830 831 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 832 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, 833 min_dpm_level, 834 min); 835 } 836 } 837 838 return ret; 839 } 840 841 static int smu_v13_0_4_set_soft_freq_limited_range(struct smu_context *smu, 842 enum smu_clk_type clk_type, 843 uint32_t min, 844 uint32_t max) 845 { 846 enum smu_message_type msg_set_min, msg_set_max; 847 uint32_t min_clk = min; 848 uint32_t max_clk = max; 849 int ret = 0; 850 851 if (!smu_v13_0_4_clk_dpm_is_enabled(smu, clk_type)) 852 return -EINVAL; 853 854 switch (clk_type) { 855 case SMU_GFXCLK: 856 case SMU_SCLK: 857 msg_set_min = SMU_MSG_SetHardMinGfxClk; 858 msg_set_max = SMU_MSG_SetSoftMaxGfxClk; 859 break; 860 case SMU_FCLK: 861 msg_set_min = SMU_MSG_SetHardMinFclkByFreq; 862 msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq; 863 break; 864 case SMU_SOCCLK: 865 msg_set_min = SMU_MSG_SetHardMinSocclkByFreq; 866 msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq; 867 break; 868 case SMU_VCLK: 869 case SMU_DCLK: 870 msg_set_min = SMU_MSG_SetHardMinVcn; 871 msg_set_max = SMU_MSG_SetSoftMaxVcn; 872 break; 873 default: 874 return -EINVAL; 875 } 876 877 if (clk_type == SMU_VCLK) { 878 min_clk = min << SMU_13_VCLK_SHIFT; 879 max_clk = max << SMU_13_VCLK_SHIFT; 880 } 881 882 ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min_clk, NULL); 883 if (ret) 884 return ret; 885 886 return smu_cmn_send_smc_msg_with_param(smu, msg_set_max, 887 max_clk, NULL); 888 } 889 890 static int smu_v13_0_4_force_clk_levels(struct smu_context *smu, 891 enum smu_clk_type clk_type, 892 uint32_t mask) 893 { 894 uint32_t soft_min_level = 0, soft_max_level = 0; 895 uint32_t min_freq = 0, max_freq = 0; 896 int ret = 0; 897 898 soft_min_level = mask ? (ffs(mask) - 1) : 0; 899 soft_max_level = mask ? (fls(mask) - 1) : 0; 900 901 switch (clk_type) { 902 case SMU_SOCCLK: 903 case SMU_FCLK: 904 case SMU_VCLK: 905 case SMU_DCLK: 906 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); 907 if (ret) 908 break; 909 910 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); 911 if (ret) 912 break; 913 914 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); 915 break; 916 default: 917 ret = -EINVAL; 918 break; 919 } 920 921 return ret; 922 } 923 924 static int smu_v13_0_4_get_dpm_profile_freq(struct smu_context *smu, 925 enum amd_dpm_forced_level level, 926 enum smu_clk_type clk_type, 927 uint32_t *min_clk, 928 uint32_t *max_clk) 929 { 930 int ret = 0; 931 uint32_t clk_limit = 0; 932 933 switch (clk_type) { 934 case SMU_GFXCLK: 935 case SMU_SCLK: 936 clk_limit = SMU_13_0_4_UMD_PSTATE_GFXCLK; 937 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 938 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit); 939 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) 940 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL); 941 break; 942 case SMU_SOCCLK: 943 clk_limit = SMU_13_0_4_UMD_PSTATE_SOCCLK; 944 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 945 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit); 946 break; 947 case SMU_FCLK: 948 clk_limit = SMU_13_0_4_UMD_PSTATE_FCLK; 949 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 950 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit); 951 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) 952 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &clk_limit, NULL); 953 break; 954 case SMU_VCLK: 955 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &clk_limit); 956 break; 957 case SMU_DCLK: 958 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &clk_limit); 959 break; 960 default: 961 ret = -EINVAL; 962 break; 963 } 964 *min_clk = *max_clk = clk_limit; 965 return ret; 966 } 967 968 static int smu_v13_0_4_set_performance_level(struct smu_context *smu, 969 enum amd_dpm_forced_level level) 970 { 971 struct amdgpu_device *adev = smu->adev; 972 uint32_t sclk_min = 0, sclk_max = 0; 973 uint32_t fclk_min = 0, fclk_max = 0; 974 uint32_t socclk_min = 0, socclk_max = 0; 975 uint32_t vclk_min = 0, vclk_max = 0; 976 uint32_t dclk_min = 0, dclk_max = 0; 977 int ret = 0; 978 979 switch (level) { 980 case AMD_DPM_FORCED_LEVEL_HIGH: 981 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); 982 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); 983 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); 984 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_max); 985 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_max); 986 sclk_min = sclk_max; 987 fclk_min = fclk_max; 988 socclk_min = socclk_max; 989 vclk_min = vclk_max; 990 dclk_min = dclk_max; 991 break; 992 case AMD_DPM_FORCED_LEVEL_LOW: 993 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); 994 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); 995 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); 996 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, NULL); 997 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, NULL); 998 sclk_max = sclk_min; 999 fclk_max = fclk_min; 1000 socclk_max = socclk_min; 1001 vclk_max = vclk_min; 1002 dclk_max = dclk_min; 1003 break; 1004 case AMD_DPM_FORCED_LEVEL_AUTO: 1005 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); 1006 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); 1007 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); 1008 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, &vclk_max); 1009 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, &dclk_max); 1010 break; 1011 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1012 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1013 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1014 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1015 smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_SCLK, &sclk_min, &sclk_max); 1016 smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_FCLK, &fclk_min, &fclk_max); 1017 smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_SOCCLK, &socclk_min, &socclk_max); 1018 smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_VCLK, &vclk_min, &vclk_max); 1019 smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_DCLK, &dclk_min, &dclk_max); 1020 break; 1021 case AMD_DPM_FORCED_LEVEL_MANUAL: 1022 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1023 return 0; 1024 default: 1025 dev_err(adev->dev, "Invalid performance level %d\n", level); 1026 return -EINVAL; 1027 } 1028 1029 if (sclk_min && sclk_max) { 1030 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, 1031 SMU_SCLK, 1032 sclk_min, 1033 sclk_max); 1034 if (ret) 1035 return ret; 1036 1037 smu->gfx_actual_hard_min_freq = sclk_min; 1038 smu->gfx_actual_soft_max_freq = sclk_max; 1039 } 1040 1041 if (fclk_min && fclk_max) { 1042 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, 1043 SMU_FCLK, 1044 fclk_min, 1045 fclk_max); 1046 if (ret) 1047 return ret; 1048 } 1049 1050 if (socclk_min && socclk_max) { 1051 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, 1052 SMU_SOCCLK, 1053 socclk_min, 1054 socclk_max); 1055 if (ret) 1056 return ret; 1057 } 1058 1059 if (vclk_min && vclk_max) { 1060 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, 1061 SMU_VCLK, 1062 vclk_min, 1063 vclk_max); 1064 if (ret) 1065 return ret; 1066 } 1067 1068 if (dclk_min && dclk_max) { 1069 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, 1070 SMU_DCLK, 1071 dclk_min, 1072 dclk_max); 1073 if (ret) 1074 return ret; 1075 } 1076 return ret; 1077 } 1078 1079 static int smu_v13_0_4_mode2_reset(struct smu_context *smu) 1080 { 1081 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, 1082 SMU_RESET_MODE_2, NULL); 1083 } 1084 1085 static int smu_v13_0_4_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 1086 { 1087 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 1088 1089 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 1090 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 1091 smu->gfx_actual_hard_min_freq = 0; 1092 smu->gfx_actual_soft_max_freq = 0; 1093 1094 return 0; 1095 } 1096 1097 static const struct pptable_funcs smu_v13_0_4_ppt_funcs = { 1098 .check_fw_status = smu_v13_0_check_fw_status, 1099 .check_fw_version = smu_v13_0_check_fw_version, 1100 .init_smc_tables = smu_v13_0_4_init_smc_tables, 1101 .fini_smc_tables = smu_v13_0_4_fini_smc_tables, 1102 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, 1103 .system_features_control = smu_v13_0_4_system_features_control, 1104 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 1105 .send_smc_msg = smu_cmn_send_smc_msg, 1106 .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable, 1107 .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, 1108 .set_default_dpm_table = smu_v13_0_set_default_dpm_tables, 1109 .read_sensor = smu_v13_0_4_read_sensor, 1110 .is_dpm_running = smu_v13_0_4_is_dpm_running, 1111 .set_watermarks_table = smu_v13_0_4_set_watermarks_table, 1112 .get_gpu_metrics = smu_v13_0_4_get_gpu_metrics, 1113 .get_enabled_mask = smu_cmn_get_enabled_mask, 1114 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1115 .set_driver_table_location = smu_v13_0_set_driver_table_location, 1116 .gfx_off_control = smu_v13_0_gfx_off_control, 1117 .mode2_reset = smu_v13_0_4_mode2_reset, 1118 .get_dpm_ultimate_freq = smu_v13_0_4_get_dpm_ultimate_freq, 1119 .od_edit_dpm_table = smu_v13_0_od_edit_dpm_table, 1120 .print_clk_levels = smu_v13_0_4_print_clk_levels, 1121 .force_clk_levels = smu_v13_0_4_force_clk_levels, 1122 .set_performance_level = smu_v13_0_4_set_performance_level, 1123 .set_fine_grain_gfx_freq_parameters = smu_v13_0_4_set_fine_grain_gfx_freq_parameters, 1124 .set_gfx_power_up_by_imu = smu_v13_0_set_gfx_power_up_by_imu, 1125 }; 1126 1127 static void smu_v13_0_4_set_smu_mailbox_registers(struct smu_context *smu) 1128 { 1129 struct amdgpu_device *adev = smu->adev; 1130 1131 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82); 1132 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66); 1133 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 1134 } 1135 1136 void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu) 1137 { 1138 struct amdgpu_device *adev = smu->adev; 1139 1140 smu->ppt_funcs = &smu_v13_0_4_ppt_funcs; 1141 smu->message_map = smu_v13_0_4_message_map; 1142 smu->feature_map = smu_v13_0_4_feature_mask_map; 1143 smu->table_map = smu_v13_0_4_table_map; 1144 smu->smc_driver_if_version = SMU13_0_4_DRIVER_IF_VERSION; 1145 smu->is_apu = true; 1146 1147 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 4)) 1148 smu_v13_0_4_set_smu_mailbox_registers(smu); 1149 else 1150 smu_v13_0_set_smu_mailbox_registers(smu); 1151 } 1152