1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include "amdgpu.h" 27 #include "amdgpu_smu.h" 28 #include "smu_v13_0.h" 29 #include "smu13_driver_if_yellow_carp.h" 30 #include "yellow_carp_ppt.h" 31 #include "smu_v13_0_1_ppsmc.h" 32 #include "smu_v13_0_1_pmfw.h" 33 #include "smu_cmn.h" 34 35 /* 36 * DO NOT use these for err/warn/info/debug messages. 37 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 38 * They are more MGPU friendly. 39 */ 40 #undef pr_err 41 #undef pr_warn 42 #undef pr_info 43 #undef pr_debug 44 45 #define regSMUIO_GFX_MISC_CNTL 0x00c5 46 #define regSMUIO_GFX_MISC_CNTL_BASE_IDX 0 47 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L 48 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1L 49 50 #define SMU_13_0_8_UMD_PSTATE_GFXCLK 533 51 #define SMU_13_0_8_UMD_PSTATE_SOCCLK 533 52 #define SMU_13_0_8_UMD_PSTATE_FCLK 800 53 54 #define SMU_13_0_1_UMD_PSTATE_GFXCLK 700 55 #define SMU_13_0_1_UMD_PSTATE_SOCCLK 678 56 #define SMU_13_0_1_UMD_PSTATE_FCLK 1800 57 58 #define FEATURE_MASK(feature) (1ULL << feature) 59 #define SMC_DPM_FEATURE ( \ 60 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 61 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 62 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 63 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 64 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 65 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 66 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 67 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 68 FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 69 70 static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] = { 71 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 72 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 73 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 74 MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 1), 75 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1), 76 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1), 77 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), 78 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), 79 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), 80 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), 81 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 82 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 83 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 84 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1), 85 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1), 86 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1), 87 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1), 88 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1), 89 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1), 90 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 1), 91 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), 92 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1), 93 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1), 94 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1), 95 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1), 96 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 1), 97 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1), 98 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1), 99 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1), 100 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1), 101 }; 102 103 static struct cmn2asic_mapping yellow_carp_feature_mask_map[SMU_FEATURE_COUNT] = { 104 FEA_MAP(CCLK_DPM), 105 FEA_MAP(FAN_CONTROLLER), 106 FEA_MAP(PPT), 107 FEA_MAP(TDC), 108 FEA_MAP(THERMAL), 109 FEA_MAP(ULV), 110 FEA_MAP(VCN_DPM), 111 FEA_MAP_REVERSE(FCLK), 112 FEA_MAP_REVERSE(SOCCLK), 113 FEA_MAP(LCLK_DPM), 114 FEA_MAP(SHUBCLK_DPM), 115 FEA_MAP(DCFCLK_DPM), 116 FEA_MAP_HALF_REVERSE(GFX), 117 FEA_MAP(DS_GFXCLK), 118 FEA_MAP(DS_SOCCLK), 119 FEA_MAP(DS_LCLK), 120 FEA_MAP(DS_DCFCLK), 121 FEA_MAP(DS_FCLK), 122 FEA_MAP(DS_MP1CLK), 123 FEA_MAP(DS_MP0CLK), 124 FEA_MAP(GFX_DEM), 125 FEA_MAP(PSI), 126 FEA_MAP(PROCHOT), 127 FEA_MAP(CPUOFF), 128 FEA_MAP(STAPM), 129 FEA_MAP(S0I3), 130 FEA_MAP(PERF_LIMIT), 131 FEA_MAP(CORE_DLDO), 132 FEA_MAP(RSMU_LOW_POWER), 133 FEA_MAP(SMN_LOW_POWER), 134 FEA_MAP(THM_LOW_POWER), 135 FEA_MAP(SMUIO_LOW_POWER), 136 FEA_MAP(MP1_LOW_POWER), 137 FEA_MAP(DS_VCN), 138 FEA_MAP(CPPC), 139 FEA_MAP(DF_CSTATES), 140 FEA_MAP(MSMU_LOW_POWER), 141 FEA_MAP(ATHUB_PG), 142 }; 143 144 static struct cmn2asic_mapping yellow_carp_table_map[SMU_TABLE_COUNT] = { 145 TAB_MAP_VALID(WATERMARKS), 146 TAB_MAP_VALID(SMU_METRICS), 147 TAB_MAP_VALID(CUSTOM_DPM), 148 TAB_MAP_VALID(DPMCLOCKS), 149 }; 150 151 static int yellow_carp_init_smc_tables(struct smu_context *smu) 152 { 153 struct smu_table_context *smu_table = &smu->smu_table; 154 struct smu_table *tables = smu_table->tables; 155 156 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 157 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 158 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 159 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 160 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 161 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 162 163 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 164 if (!smu_table->clocks_table) 165 goto err0_out; 166 167 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 168 if (!smu_table->metrics_table) 169 goto err1_out; 170 smu_table->metrics_time = 0; 171 172 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 173 if (!smu_table->watermarks_table) 174 goto err2_out; 175 176 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1); 177 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 178 if (!smu_table->gpu_metrics_table) 179 goto err3_out; 180 181 return 0; 182 183 err3_out: 184 kfree(smu_table->watermarks_table); 185 err2_out: 186 kfree(smu_table->metrics_table); 187 err1_out: 188 kfree(smu_table->clocks_table); 189 err0_out: 190 return -ENOMEM; 191 } 192 193 static int yellow_carp_fini_smc_tables(struct smu_context *smu) 194 { 195 struct smu_table_context *smu_table = &smu->smu_table; 196 197 kfree(smu_table->clocks_table); 198 smu_table->clocks_table = NULL; 199 200 kfree(smu_table->metrics_table); 201 smu_table->metrics_table = NULL; 202 203 kfree(smu_table->watermarks_table); 204 smu_table->watermarks_table = NULL; 205 206 kfree(smu_table->gpu_metrics_table); 207 smu_table->gpu_metrics_table = NULL; 208 209 return 0; 210 } 211 212 static int yellow_carp_system_features_control(struct smu_context *smu, bool en) 213 { 214 struct amdgpu_device *adev = smu->adev; 215 int ret = 0; 216 217 if (!en && !adev->in_s0ix) 218 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); 219 220 return ret; 221 } 222 223 static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable) 224 { 225 int ret = 0; 226 227 /* vcn dpm on is a prerequisite for vcn power gate messages */ 228 if (enable) 229 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 230 0, NULL); 231 else 232 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 233 0, NULL); 234 235 return ret; 236 } 237 238 static int yellow_carp_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 239 { 240 int ret = 0; 241 242 if (enable) 243 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 244 0, NULL); 245 else 246 ret = smu_cmn_send_smc_msg_with_param(smu, 247 SMU_MSG_PowerDownJpeg, 0, 248 NULL); 249 250 return ret; 251 } 252 253 254 static bool yellow_carp_is_dpm_running(struct smu_context *smu) 255 { 256 int ret = 0; 257 uint64_t feature_enabled; 258 259 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 260 261 if (ret) 262 return false; 263 264 return !!(feature_enabled & SMC_DPM_FEATURE); 265 } 266 267 static int yellow_carp_post_smu_init(struct smu_context *smu) 268 { 269 struct amdgpu_device *adev = smu->adev; 270 int ret = 0; 271 272 /* allow message will be sent after enable message on Yellow Carp*/ 273 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL); 274 if (ret) 275 dev_err(adev->dev, "Failed to Enable GfxOff!\n"); 276 return ret; 277 } 278 279 static int yellow_carp_mode_reset(struct smu_context *smu, int type) 280 { 281 int ret = 0; 282 283 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL); 284 if (ret) 285 dev_err(smu->adev->dev, "Failed to mode reset!\n"); 286 287 return ret; 288 } 289 290 static int yellow_carp_mode2_reset(struct smu_context *smu) 291 { 292 return yellow_carp_mode_reset(smu, SMU_RESET_MODE_2); 293 } 294 295 296 static void yellow_carp_get_ss_power_percent(SmuMetrics_t *metrics, 297 uint32_t *apu_percent, uint32_t *dgpu_percent) 298 { 299 uint32_t apu_boost = 0; 300 uint32_t dgpu_boost = 0; 301 uint16_t apu_limit = 0; 302 uint16_t dgpu_limit = 0; 303 uint16_t apu_power = 0; 304 uint16_t dgpu_power = 0; 305 306 /* APU and dGPU power values are reported in milli Watts 307 * and STAPM power limits are in Watts */ 308 apu_power = metrics->ApuPower/1000; 309 apu_limit = metrics->StapmOpnLimit; 310 if (apu_power > apu_limit && apu_limit != 0) 311 apu_boost = ((apu_power - apu_limit) * 100) / apu_limit; 312 apu_boost = (apu_boost > 100) ? 100 : apu_boost; 313 314 dgpu_power = metrics->dGpuPower/1000; 315 if (metrics->StapmCurrentLimit > metrics->StapmOpnLimit) 316 dgpu_limit = metrics->StapmCurrentLimit - metrics->StapmOpnLimit; 317 if (dgpu_power > dgpu_limit && dgpu_limit != 0) 318 dgpu_boost = ((dgpu_power - dgpu_limit) * 100) / dgpu_limit; 319 dgpu_boost = (dgpu_boost > 100) ? 100 : dgpu_boost; 320 321 if (dgpu_boost >= apu_boost) 322 apu_boost = 0; 323 else 324 dgpu_boost = 0; 325 326 *apu_percent = apu_boost; 327 *dgpu_percent = dgpu_boost; 328 329 } 330 331 static int yellow_carp_get_smu_metrics_data(struct smu_context *smu, 332 MetricsMember_t member, 333 uint32_t *value) 334 { 335 struct smu_table_context *smu_table = &smu->smu_table; 336 337 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 338 int ret = 0; 339 uint32_t apu_percent = 0; 340 uint32_t dgpu_percent = 0; 341 342 ret = smu_cmn_get_metrics_table(smu, NULL, false); 343 if (ret) 344 return ret; 345 346 switch (member) { 347 case METRICS_AVERAGE_GFXCLK: 348 *value = metrics->GfxclkFrequency; 349 break; 350 case METRICS_AVERAGE_SOCCLK: 351 *value = metrics->SocclkFrequency; 352 break; 353 case METRICS_AVERAGE_VCLK: 354 *value = metrics->VclkFrequency; 355 break; 356 case METRICS_AVERAGE_DCLK: 357 *value = metrics->DclkFrequency; 358 break; 359 case METRICS_AVERAGE_UCLK: 360 *value = metrics->MemclkFrequency; 361 break; 362 case METRICS_AVERAGE_GFXACTIVITY: 363 *value = metrics->GfxActivity / 100; 364 break; 365 case METRICS_AVERAGE_VCNACTIVITY: 366 *value = metrics->UvdActivity; 367 break; 368 case METRICS_AVERAGE_SOCKETPOWER: 369 *value = (metrics->CurrentSocketPower << 8) / 1000; 370 break; 371 case METRICS_TEMPERATURE_EDGE: 372 *value = metrics->GfxTemperature / 100 * 373 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 374 break; 375 case METRICS_TEMPERATURE_HOTSPOT: 376 *value = metrics->SocTemperature / 100 * 377 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 378 break; 379 case METRICS_THROTTLER_STATUS: 380 *value = metrics->ThrottlerStatus; 381 break; 382 case METRICS_VOLTAGE_VDDGFX: 383 *value = metrics->Voltage[0]; 384 break; 385 case METRICS_VOLTAGE_VDDSOC: 386 *value = metrics->Voltage[1]; 387 break; 388 case METRICS_SS_APU_SHARE: 389 /* return the percentage of APU power boost 390 * with respect to APU's power limit. 391 */ 392 yellow_carp_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent); 393 *value = apu_percent; 394 break; 395 case METRICS_SS_DGPU_SHARE: 396 /* return the percentage of dGPU power boost 397 * with respect to dGPU's power limit. 398 */ 399 yellow_carp_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent); 400 *value = dgpu_percent; 401 break; 402 default: 403 *value = UINT_MAX; 404 break; 405 } 406 407 return ret; 408 } 409 410 static int yellow_carp_read_sensor(struct smu_context *smu, 411 enum amd_pp_sensors sensor, 412 void *data, uint32_t *size) 413 { 414 int ret = 0; 415 416 if (!data || !size) 417 return -EINVAL; 418 419 switch (sensor) { 420 case AMDGPU_PP_SENSOR_GPU_LOAD: 421 ret = yellow_carp_get_smu_metrics_data(smu, 422 METRICS_AVERAGE_GFXACTIVITY, 423 (uint32_t *)data); 424 *size = 4; 425 break; 426 case AMDGPU_PP_SENSOR_GPU_POWER: 427 ret = yellow_carp_get_smu_metrics_data(smu, 428 METRICS_AVERAGE_SOCKETPOWER, 429 (uint32_t *)data); 430 *size = 4; 431 break; 432 case AMDGPU_PP_SENSOR_EDGE_TEMP: 433 ret = yellow_carp_get_smu_metrics_data(smu, 434 METRICS_TEMPERATURE_EDGE, 435 (uint32_t *)data); 436 *size = 4; 437 break; 438 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 439 ret = yellow_carp_get_smu_metrics_data(smu, 440 METRICS_TEMPERATURE_HOTSPOT, 441 (uint32_t *)data); 442 *size = 4; 443 break; 444 case AMDGPU_PP_SENSOR_GFX_MCLK: 445 ret = yellow_carp_get_smu_metrics_data(smu, 446 METRICS_AVERAGE_UCLK, 447 (uint32_t *)data); 448 *(uint32_t *)data *= 100; 449 *size = 4; 450 break; 451 case AMDGPU_PP_SENSOR_GFX_SCLK: 452 ret = yellow_carp_get_smu_metrics_data(smu, 453 METRICS_AVERAGE_GFXCLK, 454 (uint32_t *)data); 455 *(uint32_t *)data *= 100; 456 *size = 4; 457 break; 458 case AMDGPU_PP_SENSOR_VDDGFX: 459 ret = yellow_carp_get_smu_metrics_data(smu, 460 METRICS_VOLTAGE_VDDGFX, 461 (uint32_t *)data); 462 *size = 4; 463 break; 464 case AMDGPU_PP_SENSOR_VDDNB: 465 ret = yellow_carp_get_smu_metrics_data(smu, 466 METRICS_VOLTAGE_VDDSOC, 467 (uint32_t *)data); 468 *size = 4; 469 break; 470 case AMDGPU_PP_SENSOR_SS_APU_SHARE: 471 ret = yellow_carp_get_smu_metrics_data(smu, 472 METRICS_SS_APU_SHARE, 473 (uint32_t *)data); 474 *size = 4; 475 break; 476 case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: 477 ret = yellow_carp_get_smu_metrics_data(smu, 478 METRICS_SS_DGPU_SHARE, 479 (uint32_t *)data); 480 *size = 4; 481 break; 482 default: 483 ret = -EOPNOTSUPP; 484 break; 485 } 486 487 return ret; 488 } 489 490 static int yellow_carp_set_watermarks_table(struct smu_context *smu, 491 struct pp_smu_wm_range_sets *clock_ranges) 492 { 493 int i; 494 int ret = 0; 495 Watermarks_t *table = smu->smu_table.watermarks_table; 496 497 if (!table || !clock_ranges) 498 return -EINVAL; 499 500 if (clock_ranges) { 501 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 502 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 503 return -EINVAL; 504 505 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 506 table->WatermarkRow[WM_DCFCLK][i].MinClock = 507 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 508 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 509 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 510 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 511 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 512 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 513 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 514 515 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 516 clock_ranges->reader_wm_sets[i].wm_inst; 517 } 518 519 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 520 table->WatermarkRow[WM_SOCCLK][i].MinClock = 521 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 522 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 523 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 524 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 525 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 526 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 527 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 528 529 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 530 clock_ranges->writer_wm_sets[i].wm_inst; 531 } 532 533 smu->watermarks_bitmap |= WATERMARKS_EXIST; 534 } 535 536 /* pass data to smu controller */ 537 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 538 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 539 ret = smu_cmn_write_watermarks_table(smu); 540 if (ret) { 541 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 542 return ret; 543 } 544 smu->watermarks_bitmap |= WATERMARKS_LOADED; 545 } 546 547 return 0; 548 } 549 550 static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu, 551 void **table) 552 { 553 struct smu_table_context *smu_table = &smu->smu_table; 554 struct gpu_metrics_v2_1 *gpu_metrics = 555 (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; 556 SmuMetrics_t metrics; 557 int ret = 0; 558 559 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 560 if (ret) 561 return ret; 562 563 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 564 565 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 566 gpu_metrics->temperature_soc = metrics.SocTemperature; 567 memcpy(&gpu_metrics->temperature_core[0], 568 &metrics.CoreTemperature[0], 569 sizeof(uint16_t) * 8); 570 gpu_metrics->temperature_l3[0] = metrics.L3Temperature; 571 572 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 573 gpu_metrics->average_mm_activity = metrics.UvdActivity; 574 575 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 576 gpu_metrics->average_gfx_power = metrics.Power[0]; 577 gpu_metrics->average_soc_power = metrics.Power[1]; 578 memcpy(&gpu_metrics->average_core_power[0], 579 &metrics.CorePower[0], 580 sizeof(uint16_t) * 8); 581 582 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 583 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 584 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 585 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 586 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 587 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 588 589 memcpy(&gpu_metrics->current_coreclk[0], 590 &metrics.CoreFrequency[0], 591 sizeof(uint16_t) * 8); 592 gpu_metrics->current_l3clk[0] = metrics.L3Frequency; 593 594 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 595 596 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 597 598 *table = (void *)gpu_metrics; 599 600 return sizeof(struct gpu_metrics_v2_1); 601 } 602 603 /** 604 * yellow_carp_get_gfxoff_status - get gfxoff status 605 * 606 * @smu: smu_context pointer 607 * 608 * This function will be used to get gfxoff status 609 * 610 * Returns 0=GFXOFF(default). 611 * Returns 1=Transition out of GFX State. 612 * Returns 2=Not in GFXOFF. 613 * Returns 3=Transition into GFXOFF. 614 */ 615 static uint32_t yellow_carp_get_gfxoff_status(struct smu_context *smu) 616 { 617 uint32_t reg; 618 uint32_t gfxoff_status = 0; 619 struct amdgpu_device *adev = smu->adev; 620 621 reg = RREG32_SOC15(SMUIO, 0, regSMUIO_GFX_MISC_CNTL); 622 gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK) 623 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT; 624 625 return gfxoff_status; 626 } 627 628 static int yellow_carp_set_default_dpm_tables(struct smu_context *smu) 629 { 630 struct smu_table_context *smu_table = &smu->smu_table; 631 632 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); 633 } 634 635 static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, 636 long input[], uint32_t size) 637 { 638 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); 639 int ret = 0; 640 641 /* Only allowed in manual mode */ 642 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 643 return -EINVAL; 644 645 switch (type) { 646 case PP_OD_EDIT_SCLK_VDDC_TABLE: 647 if (size != 2) { 648 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 649 return -EINVAL; 650 } 651 652 if (input[0] == 0) { 653 if (input[1] < smu->gfx_default_hard_min_freq) { 654 dev_warn(smu->adev->dev, 655 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 656 input[1], smu->gfx_default_hard_min_freq); 657 return -EINVAL; 658 } 659 smu->gfx_actual_hard_min_freq = input[1]; 660 } else if (input[0] == 1) { 661 if (input[1] > smu->gfx_default_soft_max_freq) { 662 dev_warn(smu->adev->dev, 663 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 664 input[1], smu->gfx_default_soft_max_freq); 665 return -EINVAL; 666 } 667 smu->gfx_actual_soft_max_freq = input[1]; 668 } else { 669 return -EINVAL; 670 } 671 break; 672 case PP_OD_RESTORE_DEFAULT_TABLE: 673 if (size != 0) { 674 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 675 return -EINVAL; 676 } else { 677 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 678 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 679 } 680 break; 681 case PP_OD_COMMIT_DPM_TABLE: 682 if (size != 0) { 683 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 684 return -EINVAL; 685 } else { 686 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 687 dev_err(smu->adev->dev, 688 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 689 smu->gfx_actual_hard_min_freq, 690 smu->gfx_actual_soft_max_freq); 691 return -EINVAL; 692 } 693 694 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 695 smu->gfx_actual_hard_min_freq, NULL); 696 if (ret) { 697 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 698 return ret; 699 } 700 701 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 702 smu->gfx_actual_soft_max_freq, NULL); 703 if (ret) { 704 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 705 return ret; 706 } 707 } 708 break; 709 default: 710 return -ENOSYS; 711 } 712 713 return ret; 714 } 715 716 static int yellow_carp_get_current_clk_freq(struct smu_context *smu, 717 enum smu_clk_type clk_type, 718 uint32_t *value) 719 { 720 MetricsMember_t member_type; 721 722 switch (clk_type) { 723 case SMU_SOCCLK: 724 member_type = METRICS_AVERAGE_SOCCLK; 725 break; 726 case SMU_VCLK: 727 member_type = METRICS_AVERAGE_VCLK; 728 break; 729 case SMU_DCLK: 730 member_type = METRICS_AVERAGE_DCLK; 731 break; 732 case SMU_MCLK: 733 member_type = METRICS_AVERAGE_UCLK; 734 break; 735 case SMU_FCLK: 736 return smu_cmn_send_smc_msg_with_param(smu, 737 SMU_MSG_GetFclkFrequency, 0, value); 738 case SMU_GFXCLK: 739 case SMU_SCLK: 740 return smu_cmn_send_smc_msg_with_param(smu, 741 SMU_MSG_GetGfxclkFrequency, 0, value); 742 break; 743 default: 744 return -EINVAL; 745 } 746 747 return yellow_carp_get_smu_metrics_data(smu, member_type, value); 748 } 749 750 static int yellow_carp_get_dpm_level_count(struct smu_context *smu, 751 enum smu_clk_type clk_type, 752 uint32_t *count) 753 { 754 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 755 756 switch (clk_type) { 757 case SMU_SOCCLK: 758 *count = clk_table->NumSocClkLevelsEnabled; 759 break; 760 case SMU_VCLK: 761 *count = clk_table->VcnClkLevelsEnabled; 762 break; 763 case SMU_DCLK: 764 *count = clk_table->VcnClkLevelsEnabled; 765 break; 766 case SMU_MCLK: 767 *count = clk_table->NumDfPstatesEnabled; 768 break; 769 case SMU_FCLK: 770 *count = clk_table->NumDfPstatesEnabled; 771 break; 772 default: 773 break; 774 } 775 776 return 0; 777 } 778 779 static int yellow_carp_get_dpm_freq_by_index(struct smu_context *smu, 780 enum smu_clk_type clk_type, 781 uint32_t dpm_level, 782 uint32_t *freq) 783 { 784 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 785 786 if (!clk_table || clk_type >= SMU_CLK_COUNT) 787 return -EINVAL; 788 789 switch (clk_type) { 790 case SMU_SOCCLK: 791 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 792 return -EINVAL; 793 *freq = clk_table->SocClocks[dpm_level]; 794 break; 795 case SMU_VCLK: 796 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 797 return -EINVAL; 798 *freq = clk_table->VClocks[dpm_level]; 799 break; 800 case SMU_DCLK: 801 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 802 return -EINVAL; 803 *freq = clk_table->DClocks[dpm_level]; 804 break; 805 case SMU_UCLK: 806 case SMU_MCLK: 807 if (dpm_level >= clk_table->NumDfPstatesEnabled) 808 return -EINVAL; 809 *freq = clk_table->DfPstateTable[dpm_level].MemClk; 810 break; 811 case SMU_FCLK: 812 if (dpm_level >= clk_table->NumDfPstatesEnabled) 813 return -EINVAL; 814 *freq = clk_table->DfPstateTable[dpm_level].FClk; 815 break; 816 default: 817 return -EINVAL; 818 } 819 820 return 0; 821 } 822 823 static bool yellow_carp_clk_dpm_is_enabled(struct smu_context *smu, 824 enum smu_clk_type clk_type) 825 { 826 enum smu_feature_mask feature_id = 0; 827 828 switch (clk_type) { 829 case SMU_MCLK: 830 case SMU_UCLK: 831 case SMU_FCLK: 832 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 833 break; 834 case SMU_GFXCLK: 835 case SMU_SCLK: 836 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 837 break; 838 case SMU_SOCCLK: 839 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 840 break; 841 case SMU_VCLK: 842 case SMU_DCLK: 843 feature_id = SMU_FEATURE_VCN_DPM_BIT; 844 break; 845 default: 846 return true; 847 } 848 849 return smu_cmn_feature_is_enabled(smu, feature_id); 850 } 851 852 static int yellow_carp_get_dpm_ultimate_freq(struct smu_context *smu, 853 enum smu_clk_type clk_type, 854 uint32_t *min, 855 uint32_t *max) 856 { 857 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 858 uint32_t clock_limit; 859 uint32_t max_dpm_level, min_dpm_level; 860 int ret = 0; 861 862 if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type)) { 863 switch (clk_type) { 864 case SMU_MCLK: 865 case SMU_UCLK: 866 clock_limit = smu->smu_table.boot_values.uclk; 867 break; 868 case SMU_FCLK: 869 clock_limit = smu->smu_table.boot_values.fclk; 870 break; 871 case SMU_GFXCLK: 872 case SMU_SCLK: 873 clock_limit = smu->smu_table.boot_values.gfxclk; 874 break; 875 case SMU_SOCCLK: 876 clock_limit = smu->smu_table.boot_values.socclk; 877 break; 878 case SMU_VCLK: 879 clock_limit = smu->smu_table.boot_values.vclk; 880 break; 881 case SMU_DCLK: 882 clock_limit = smu->smu_table.boot_values.dclk; 883 break; 884 default: 885 clock_limit = 0; 886 break; 887 } 888 889 /* clock in Mhz unit */ 890 if (min) 891 *min = clock_limit / 100; 892 if (max) 893 *max = clock_limit / 100; 894 895 return 0; 896 } 897 898 if (max) { 899 switch (clk_type) { 900 case SMU_GFXCLK: 901 case SMU_SCLK: 902 *max = clk_table->MaxGfxClk; 903 break; 904 case SMU_MCLK: 905 case SMU_UCLK: 906 case SMU_FCLK: 907 max_dpm_level = 0; 908 break; 909 case SMU_SOCCLK: 910 max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; 911 break; 912 case SMU_VCLK: 913 case SMU_DCLK: 914 max_dpm_level = clk_table->VcnClkLevelsEnabled - 1; 915 break; 916 default: 917 ret = -EINVAL; 918 goto failed; 919 } 920 921 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 922 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); 923 if (ret) 924 goto failed; 925 } 926 } 927 928 if (min) { 929 switch (clk_type) { 930 case SMU_GFXCLK: 931 case SMU_SCLK: 932 *min = clk_table->MinGfxClk; 933 break; 934 case SMU_MCLK: 935 case SMU_UCLK: 936 case SMU_FCLK: 937 min_dpm_level = clk_table->NumDfPstatesEnabled - 1; 938 break; 939 case SMU_SOCCLK: 940 min_dpm_level = 0; 941 break; 942 case SMU_VCLK: 943 case SMU_DCLK: 944 min_dpm_level = 0; 945 break; 946 default: 947 ret = -EINVAL; 948 goto failed; 949 } 950 951 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 952 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); 953 if (ret) 954 goto failed; 955 } 956 } 957 958 failed: 959 return ret; 960 } 961 962 static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu, 963 enum smu_clk_type clk_type, 964 uint32_t min, 965 uint32_t max) 966 { 967 enum smu_message_type msg_set_min, msg_set_max; 968 uint32_t min_clk = min; 969 uint32_t max_clk = max; 970 971 int ret = 0; 972 973 if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type)) 974 return -EINVAL; 975 976 switch (clk_type) { 977 case SMU_GFXCLK: 978 case SMU_SCLK: 979 msg_set_min = SMU_MSG_SetHardMinGfxClk; 980 msg_set_max = SMU_MSG_SetSoftMaxGfxClk; 981 break; 982 case SMU_FCLK: 983 msg_set_min = SMU_MSG_SetHardMinFclkByFreq; 984 msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq; 985 break; 986 case SMU_SOCCLK: 987 msg_set_min = SMU_MSG_SetHardMinSocclkByFreq; 988 msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq; 989 break; 990 case SMU_VCLK: 991 case SMU_DCLK: 992 msg_set_min = SMU_MSG_SetHardMinVcn; 993 msg_set_max = SMU_MSG_SetSoftMaxVcn; 994 break; 995 default: 996 return -EINVAL; 997 } 998 999 if (clk_type == SMU_VCLK) { 1000 min_clk = min << SMU_13_VCLK_SHIFT; 1001 max_clk = max << SMU_13_VCLK_SHIFT; 1002 } 1003 1004 ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min_clk, NULL); 1005 1006 if (ret) 1007 goto out; 1008 1009 ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max_clk, NULL); 1010 if (ret) 1011 goto out; 1012 1013 out: 1014 return ret; 1015 } 1016 1017 static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu, 1018 enum smu_clk_type clk_type) 1019 { 1020 uint32_t clk_limit = 0; 1021 struct amdgpu_device *adev = smu->adev; 1022 1023 switch (clk_type) { 1024 case SMU_GFXCLK: 1025 case SMU_SCLK: 1026 if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8)) 1027 clk_limit = SMU_13_0_8_UMD_PSTATE_GFXCLK; 1028 if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) || 1029 (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3)) 1030 clk_limit = SMU_13_0_1_UMD_PSTATE_GFXCLK; 1031 break; 1032 case SMU_SOCCLK: 1033 if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8)) 1034 clk_limit = SMU_13_0_8_UMD_PSTATE_SOCCLK; 1035 if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) || 1036 (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3)) 1037 clk_limit = SMU_13_0_1_UMD_PSTATE_SOCCLK; 1038 break; 1039 case SMU_FCLK: 1040 if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8)) 1041 clk_limit = SMU_13_0_8_UMD_PSTATE_FCLK; 1042 if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) || 1043 (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3)) 1044 clk_limit = SMU_13_0_1_UMD_PSTATE_FCLK; 1045 break; 1046 default: 1047 break; 1048 } 1049 1050 return clk_limit; 1051 } 1052 1053 static int yellow_carp_print_clk_levels(struct smu_context *smu, 1054 enum smu_clk_type clk_type, char *buf) 1055 { 1056 int i, idx, size = 0, ret = 0; 1057 uint32_t cur_value = 0, value = 0, count = 0; 1058 uint32_t min, max; 1059 uint32_t clk_limit = 0; 1060 1061 smu_cmn_get_sysfs_buf(&buf, &size); 1062 1063 switch (clk_type) { 1064 case SMU_OD_SCLK: 1065 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 1066 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 1067 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 1068 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 1069 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 1070 break; 1071 case SMU_OD_RANGE: 1072 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1073 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 1074 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 1075 break; 1076 case SMU_SOCCLK: 1077 case SMU_VCLK: 1078 case SMU_DCLK: 1079 case SMU_MCLK: 1080 case SMU_FCLK: 1081 ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value); 1082 if (ret) 1083 goto print_clk_out; 1084 1085 ret = yellow_carp_get_dpm_level_count(smu, clk_type, &count); 1086 if (ret) 1087 goto print_clk_out; 1088 1089 for (i = 0; i < count; i++) { 1090 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 1091 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value); 1092 if (ret) 1093 goto print_clk_out; 1094 1095 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 1096 cur_value == value ? "*" : ""); 1097 } 1098 break; 1099 case SMU_GFXCLK: 1100 case SMU_SCLK: 1101 clk_limit = yellow_carp_get_umd_pstate_clk_default(smu, clk_type); 1102 ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value); 1103 if (ret) 1104 goto print_clk_out; 1105 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 1106 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 1107 if (cur_value == max) 1108 i = 2; 1109 else if (cur_value == min) 1110 i = 0; 1111 else 1112 i = 1; 1113 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 1114 i == 0 ? "*" : ""); 1115 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 1116 i == 1 ? cur_value : clk_limit, 1117 i == 1 ? "*" : ""); 1118 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 1119 i == 2 ? "*" : ""); 1120 break; 1121 default: 1122 break; 1123 } 1124 1125 print_clk_out: 1126 return size; 1127 } 1128 1129 static int yellow_carp_force_clk_levels(struct smu_context *smu, 1130 enum smu_clk_type clk_type, uint32_t mask) 1131 { 1132 uint32_t soft_min_level = 0, soft_max_level = 0; 1133 uint32_t min_freq = 0, max_freq = 0; 1134 int ret = 0; 1135 1136 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1137 soft_max_level = mask ? (fls(mask) - 1) : 0; 1138 1139 switch (clk_type) { 1140 case SMU_SOCCLK: 1141 case SMU_FCLK: 1142 case SMU_VCLK: 1143 case SMU_DCLK: 1144 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); 1145 if (ret) 1146 goto force_level_out; 1147 1148 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); 1149 if (ret) 1150 goto force_level_out; 1151 1152 ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); 1153 if (ret) 1154 goto force_level_out; 1155 break; 1156 default: 1157 ret = -EINVAL; 1158 break; 1159 } 1160 1161 force_level_out: 1162 return ret; 1163 } 1164 1165 static int yellow_carp_get_dpm_profile_freq(struct smu_context *smu, 1166 enum amd_dpm_forced_level level, 1167 enum smu_clk_type clk_type, 1168 uint32_t *min_clk, 1169 uint32_t *max_clk) 1170 { 1171 int ret = 0; 1172 uint32_t clk_limit = 0; 1173 1174 clk_limit = yellow_carp_get_umd_pstate_clk_default(smu, clk_type); 1175 1176 switch (clk_type) { 1177 case SMU_GFXCLK: 1178 case SMU_SCLK: 1179 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 1180 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit); 1181 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) 1182 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL); 1183 break; 1184 case SMU_SOCCLK: 1185 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 1186 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit); 1187 break; 1188 case SMU_FCLK: 1189 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 1190 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit); 1191 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) 1192 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &clk_limit, NULL); 1193 break; 1194 case SMU_VCLK: 1195 yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &clk_limit); 1196 break; 1197 case SMU_DCLK: 1198 yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &clk_limit); 1199 break; 1200 default: 1201 ret = -EINVAL; 1202 break; 1203 } 1204 *min_clk = *max_clk = clk_limit; 1205 return ret; 1206 } 1207 1208 static int yellow_carp_set_performance_level(struct smu_context *smu, 1209 enum amd_dpm_forced_level level) 1210 { 1211 struct amdgpu_device *adev = smu->adev; 1212 uint32_t sclk_min = 0, sclk_max = 0; 1213 uint32_t fclk_min = 0, fclk_max = 0; 1214 uint32_t socclk_min = 0, socclk_max = 0; 1215 uint32_t vclk_min = 0, vclk_max = 0; 1216 uint32_t dclk_min = 0, dclk_max = 0; 1217 1218 int ret = 0; 1219 1220 switch (level) { 1221 case AMD_DPM_FORCED_LEVEL_HIGH: 1222 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); 1223 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); 1224 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); 1225 yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_max); 1226 yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_max); 1227 sclk_min = sclk_max; 1228 fclk_min = fclk_max; 1229 socclk_min = socclk_max; 1230 vclk_min = vclk_max; 1231 dclk_min = dclk_max; 1232 break; 1233 case AMD_DPM_FORCED_LEVEL_LOW: 1234 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); 1235 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); 1236 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); 1237 yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, NULL); 1238 yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, NULL); 1239 sclk_max = sclk_min; 1240 fclk_max = fclk_min; 1241 socclk_max = socclk_min; 1242 vclk_max = vclk_min; 1243 dclk_max = dclk_min; 1244 break; 1245 case AMD_DPM_FORCED_LEVEL_AUTO: 1246 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); 1247 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); 1248 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); 1249 yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, &vclk_max); 1250 yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, &dclk_max); 1251 break; 1252 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1253 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1254 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1255 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1256 yellow_carp_get_dpm_profile_freq(smu, level, SMU_SCLK, &sclk_min, &sclk_max); 1257 yellow_carp_get_dpm_profile_freq(smu, level, SMU_FCLK, &fclk_min, &fclk_max); 1258 yellow_carp_get_dpm_profile_freq(smu, level, SMU_SOCCLK, &socclk_min, &socclk_max); 1259 yellow_carp_get_dpm_profile_freq(smu, level, SMU_VCLK, &vclk_min, &vclk_max); 1260 yellow_carp_get_dpm_profile_freq(smu, level, SMU_DCLK, &dclk_min, &dclk_max); 1261 break; 1262 case AMD_DPM_FORCED_LEVEL_MANUAL: 1263 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1264 return 0; 1265 default: 1266 dev_err(adev->dev, "Invalid performance level %d\n", level); 1267 return -EINVAL; 1268 } 1269 1270 if (sclk_min && sclk_max) { 1271 ret = yellow_carp_set_soft_freq_limited_range(smu, 1272 SMU_SCLK, 1273 sclk_min, 1274 sclk_max); 1275 if (ret) 1276 return ret; 1277 1278 smu->gfx_actual_hard_min_freq = sclk_min; 1279 smu->gfx_actual_soft_max_freq = sclk_max; 1280 } 1281 1282 if (fclk_min && fclk_max) { 1283 ret = yellow_carp_set_soft_freq_limited_range(smu, 1284 SMU_FCLK, 1285 fclk_min, 1286 fclk_max); 1287 if (ret) 1288 return ret; 1289 } 1290 1291 if (socclk_min && socclk_max) { 1292 ret = yellow_carp_set_soft_freq_limited_range(smu, 1293 SMU_SOCCLK, 1294 socclk_min, 1295 socclk_max); 1296 if (ret) 1297 return ret; 1298 } 1299 1300 if (vclk_min && vclk_max) { 1301 ret = yellow_carp_set_soft_freq_limited_range(smu, 1302 SMU_VCLK, 1303 vclk_min, 1304 vclk_max); 1305 if (ret) 1306 return ret; 1307 } 1308 1309 if (dclk_min && dclk_max) { 1310 ret = yellow_carp_set_soft_freq_limited_range(smu, 1311 SMU_DCLK, 1312 dclk_min, 1313 dclk_max); 1314 if (ret) 1315 return ret; 1316 } 1317 1318 return ret; 1319 } 1320 1321 static int yellow_carp_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 1322 { 1323 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 1324 1325 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 1326 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 1327 smu->gfx_actual_hard_min_freq = 0; 1328 smu->gfx_actual_soft_max_freq = 0; 1329 1330 return 0; 1331 } 1332 1333 static const struct pptable_funcs yellow_carp_ppt_funcs = { 1334 .check_fw_status = smu_v13_0_check_fw_status, 1335 .check_fw_version = smu_v13_0_check_fw_version, 1336 .init_smc_tables = yellow_carp_init_smc_tables, 1337 .fini_smc_tables = yellow_carp_fini_smc_tables, 1338 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, 1339 .system_features_control = yellow_carp_system_features_control, 1340 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 1341 .send_smc_msg = smu_cmn_send_smc_msg, 1342 .dpm_set_vcn_enable = yellow_carp_dpm_set_vcn_enable, 1343 .dpm_set_jpeg_enable = yellow_carp_dpm_set_jpeg_enable, 1344 .set_default_dpm_table = yellow_carp_set_default_dpm_tables, 1345 .read_sensor = yellow_carp_read_sensor, 1346 .is_dpm_running = yellow_carp_is_dpm_running, 1347 .set_watermarks_table = yellow_carp_set_watermarks_table, 1348 .get_gpu_metrics = yellow_carp_get_gpu_metrics, 1349 .get_enabled_mask = smu_cmn_get_enabled_mask, 1350 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1351 .set_driver_table_location = smu_v13_0_set_driver_table_location, 1352 .gfx_off_control = smu_v13_0_gfx_off_control, 1353 .get_gfx_off_status = yellow_carp_get_gfxoff_status, 1354 .post_init = yellow_carp_post_smu_init, 1355 .mode2_reset = yellow_carp_mode2_reset, 1356 .get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq, 1357 .od_edit_dpm_table = yellow_carp_od_edit_dpm_table, 1358 .print_clk_levels = yellow_carp_print_clk_levels, 1359 .force_clk_levels = yellow_carp_force_clk_levels, 1360 .set_performance_level = yellow_carp_set_performance_level, 1361 .set_fine_grain_gfx_freq_parameters = yellow_carp_set_fine_grain_gfx_freq_parameters, 1362 }; 1363 1364 void yellow_carp_set_ppt_funcs(struct smu_context *smu) 1365 { 1366 smu->ppt_funcs = &yellow_carp_ppt_funcs; 1367 smu->message_map = yellow_carp_message_map; 1368 smu->feature_map = yellow_carp_feature_mask_map; 1369 smu->table_map = yellow_carp_table_map; 1370 smu->is_apu = true; 1371 smu->smc_driver_if_version = SMU13_YELLOW_CARP_DRIVER_IF_VERSION; 1372 smu_v13_0_set_smu_mailbox_registers(smu); 1373 } 1374