1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include "amdgpu.h" 27 #include "amdgpu_smu.h" 28 #include "smu_v13_0.h" 29 #include "smu13_driver_if_yellow_carp.h" 30 #include "yellow_carp_ppt.h" 31 #include "smu_v13_0_1_ppsmc.h" 32 #include "smu_v13_0_1_pmfw.h" 33 #include "smu_cmn.h" 34 35 /* 36 * DO NOT use these for err/warn/info/debug messages. 37 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 38 * They are more MGPU friendly. 39 */ 40 #undef pr_err 41 #undef pr_warn 42 #undef pr_info 43 #undef pr_debug 44 45 #define FEATURE_MASK(feature) (1ULL << feature) 46 #define SMC_DPM_FEATURE ( \ 47 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 48 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 49 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 50 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 51 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 52 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 53 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 54 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 55 FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 56 57 static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] = { 58 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 59 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 60 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 61 MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 1), 62 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1), 63 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1), 64 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), 65 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), 66 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), 67 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), 68 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 69 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 70 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 71 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1), 72 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1), 73 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1), 74 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1), 75 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1), 76 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1), 77 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 1), 78 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), 79 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1), 80 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1), 81 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1), 82 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1), 83 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 1), 84 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1), 85 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1), 86 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1), 87 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1), 88 }; 89 90 static struct cmn2asic_mapping yellow_carp_feature_mask_map[SMU_FEATURE_COUNT] = { 91 FEA_MAP(CCLK_DPM), 92 FEA_MAP(FAN_CONTROLLER), 93 FEA_MAP(PPT), 94 FEA_MAP(TDC), 95 FEA_MAP(THERMAL), 96 FEA_MAP(ULV), 97 FEA_MAP(VCN_DPM), 98 FEA_MAP_REVERSE(FCLK), 99 FEA_MAP_REVERSE(SOCCLK), 100 FEA_MAP(LCLK_DPM), 101 FEA_MAP(SHUBCLK_DPM), 102 FEA_MAP(DCFCLK_DPM), 103 FEA_MAP_HALF_REVERSE(GFX), 104 FEA_MAP(DS_GFXCLK), 105 FEA_MAP(DS_SOCCLK), 106 FEA_MAP(DS_LCLK), 107 FEA_MAP(DS_DCFCLK), 108 FEA_MAP(DS_FCLK), 109 FEA_MAP(DS_MP1CLK), 110 FEA_MAP(DS_MP0CLK), 111 FEA_MAP(GFX_DEM), 112 FEA_MAP(PSI), 113 FEA_MAP(PROCHOT), 114 FEA_MAP(CPUOFF), 115 FEA_MAP(STAPM), 116 FEA_MAP(S0I3), 117 FEA_MAP(PERF_LIMIT), 118 FEA_MAP(CORE_DLDO), 119 FEA_MAP(RSMU_LOW_POWER), 120 FEA_MAP(SMN_LOW_POWER), 121 FEA_MAP(THM_LOW_POWER), 122 FEA_MAP(SMUIO_LOW_POWER), 123 FEA_MAP(MP1_LOW_POWER), 124 FEA_MAP(DS_VCN), 125 FEA_MAP(CPPC), 126 FEA_MAP(DF_CSTATES), 127 FEA_MAP(MSMU_LOW_POWER), 128 FEA_MAP(ATHUB_PG), 129 }; 130 131 static struct cmn2asic_mapping yellow_carp_table_map[SMU_TABLE_COUNT] = { 132 TAB_MAP_VALID(WATERMARKS), 133 TAB_MAP_VALID(SMU_METRICS), 134 TAB_MAP_VALID(CUSTOM_DPM), 135 TAB_MAP_VALID(DPMCLOCKS), 136 }; 137 138 static int yellow_carp_init_smc_tables(struct smu_context *smu) 139 { 140 struct smu_table_context *smu_table = &smu->smu_table; 141 struct smu_table *tables = smu_table->tables; 142 143 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 144 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 145 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 146 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 147 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 148 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 149 150 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 151 if (!smu_table->clocks_table) 152 goto err0_out; 153 154 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 155 if (!smu_table->metrics_table) 156 goto err1_out; 157 smu_table->metrics_time = 0; 158 159 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 160 if (!smu_table->watermarks_table) 161 goto err2_out; 162 163 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1); 164 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 165 if (!smu_table->gpu_metrics_table) 166 goto err3_out; 167 168 return 0; 169 170 err3_out: 171 kfree(smu_table->watermarks_table); 172 err2_out: 173 kfree(smu_table->metrics_table); 174 err1_out: 175 kfree(smu_table->clocks_table); 176 err0_out: 177 return -ENOMEM; 178 } 179 180 static int yellow_carp_fini_smc_tables(struct smu_context *smu) 181 { 182 struct smu_table_context *smu_table = &smu->smu_table; 183 184 kfree(smu_table->clocks_table); 185 smu_table->clocks_table = NULL; 186 187 kfree(smu_table->metrics_table); 188 smu_table->metrics_table = NULL; 189 190 kfree(smu_table->watermarks_table); 191 smu_table->watermarks_table = NULL; 192 193 return 0; 194 } 195 196 static int yellow_carp_system_features_control(struct smu_context *smu, bool en) 197 { 198 struct amdgpu_device *adev = smu->adev; 199 int ret = 0; 200 201 if (!en && !adev->in_s0ix) 202 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); 203 204 return ret; 205 } 206 207 static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable) 208 { 209 int ret = 0; 210 211 /* vcn dpm on is a prerequisite for vcn power gate messages */ 212 if (enable) 213 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 214 0, NULL); 215 else 216 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 217 0, NULL); 218 219 return ret; 220 } 221 222 static int yellow_carp_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 223 { 224 int ret = 0; 225 226 if (enable) 227 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 228 0, NULL); 229 else 230 ret = smu_cmn_send_smc_msg_with_param(smu, 231 SMU_MSG_PowerDownJpeg, 0, 232 NULL); 233 234 return ret; 235 } 236 237 238 static bool yellow_carp_is_dpm_running(struct smu_context *smu) 239 { 240 int ret = 0; 241 uint64_t feature_enabled; 242 243 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 244 245 if (ret) 246 return false; 247 248 return !!(feature_enabled & SMC_DPM_FEATURE); 249 } 250 251 static int yellow_carp_post_smu_init(struct smu_context *smu) 252 { 253 struct amdgpu_device *adev = smu->adev; 254 int ret = 0; 255 256 /* allow message will be sent after enable message on Yellow Carp*/ 257 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL); 258 if (ret) 259 dev_err(adev->dev, "Failed to Enable GfxOff!\n"); 260 return ret; 261 } 262 263 static int yellow_carp_mode_reset(struct smu_context *smu, int type) 264 { 265 int ret = 0; 266 267 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL); 268 if (ret) 269 dev_err(smu->adev->dev, "Failed to mode reset!\n"); 270 271 return ret; 272 } 273 274 static int yellow_carp_mode2_reset(struct smu_context *smu) 275 { 276 return yellow_carp_mode_reset(smu, SMU_RESET_MODE_2); 277 } 278 279 static int yellow_carp_get_smu_metrics_data(struct smu_context *smu, 280 MetricsMember_t member, 281 uint32_t *value) 282 { 283 struct smu_table_context *smu_table = &smu->smu_table; 284 285 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 286 int ret = 0; 287 288 ret = smu_cmn_get_metrics_table(smu, NULL, false); 289 if (ret) 290 return ret; 291 292 switch (member) { 293 case METRICS_AVERAGE_GFXCLK: 294 *value = metrics->GfxclkFrequency; 295 break; 296 case METRICS_AVERAGE_SOCCLK: 297 *value = metrics->SocclkFrequency; 298 break; 299 case METRICS_AVERAGE_VCLK: 300 *value = metrics->VclkFrequency; 301 break; 302 case METRICS_AVERAGE_DCLK: 303 *value = metrics->DclkFrequency; 304 break; 305 case METRICS_AVERAGE_UCLK: 306 *value = metrics->MemclkFrequency; 307 break; 308 case METRICS_AVERAGE_GFXACTIVITY: 309 *value = metrics->GfxActivity / 100; 310 break; 311 case METRICS_AVERAGE_VCNACTIVITY: 312 *value = metrics->UvdActivity; 313 break; 314 case METRICS_AVERAGE_SOCKETPOWER: 315 *value = (metrics->CurrentSocketPower << 8) / 1000; 316 break; 317 case METRICS_TEMPERATURE_EDGE: 318 *value = metrics->GfxTemperature / 100 * 319 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 320 break; 321 case METRICS_TEMPERATURE_HOTSPOT: 322 *value = metrics->SocTemperature / 100 * 323 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 324 break; 325 case METRICS_THROTTLER_STATUS: 326 *value = metrics->ThrottlerStatus; 327 break; 328 case METRICS_VOLTAGE_VDDGFX: 329 *value = metrics->Voltage[0]; 330 break; 331 case METRICS_VOLTAGE_VDDSOC: 332 *value = metrics->Voltage[1]; 333 break; 334 case METRICS_SS_APU_SHARE: 335 /* return the percentage of APU power with respect to APU's power limit. 336 * percentage is reported, this isn't boost value. Smartshift power 337 * boost/shift is only when the percentage is more than 100. 338 */ 339 if (metrics->StapmOpnLimit > 0) 340 *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit; 341 else 342 *value = 0; 343 break; 344 case METRICS_SS_DGPU_SHARE: 345 /* return the percentage of dGPU power with respect to dGPU's power limit. 346 * percentage is reported, this isn't boost value. Smartshift power 347 * boost/shift is only when the percentage is more than 100. 348 */ 349 if ((metrics->dGpuPower > 0) && 350 (metrics->StapmCurrentLimit > metrics->StapmOpnLimit)) 351 *value = (metrics->dGpuPower * 100) / 352 (metrics->StapmCurrentLimit - metrics->StapmOpnLimit); 353 else 354 *value = 0; 355 break; 356 default: 357 *value = UINT_MAX; 358 break; 359 } 360 361 return ret; 362 } 363 364 static int yellow_carp_read_sensor(struct smu_context *smu, 365 enum amd_pp_sensors sensor, 366 void *data, uint32_t *size) 367 { 368 int ret = 0; 369 370 if (!data || !size) 371 return -EINVAL; 372 373 switch (sensor) { 374 case AMDGPU_PP_SENSOR_GPU_LOAD: 375 ret = yellow_carp_get_smu_metrics_data(smu, 376 METRICS_AVERAGE_GFXACTIVITY, 377 (uint32_t *)data); 378 *size = 4; 379 break; 380 case AMDGPU_PP_SENSOR_GPU_POWER: 381 ret = yellow_carp_get_smu_metrics_data(smu, 382 METRICS_AVERAGE_SOCKETPOWER, 383 (uint32_t *)data); 384 *size = 4; 385 break; 386 case AMDGPU_PP_SENSOR_EDGE_TEMP: 387 ret = yellow_carp_get_smu_metrics_data(smu, 388 METRICS_TEMPERATURE_EDGE, 389 (uint32_t *)data); 390 *size = 4; 391 break; 392 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 393 ret = yellow_carp_get_smu_metrics_data(smu, 394 METRICS_TEMPERATURE_HOTSPOT, 395 (uint32_t *)data); 396 *size = 4; 397 break; 398 case AMDGPU_PP_SENSOR_GFX_MCLK: 399 ret = yellow_carp_get_smu_metrics_data(smu, 400 METRICS_AVERAGE_UCLK, 401 (uint32_t *)data); 402 *(uint32_t *)data *= 100; 403 *size = 4; 404 break; 405 case AMDGPU_PP_SENSOR_GFX_SCLK: 406 ret = yellow_carp_get_smu_metrics_data(smu, 407 METRICS_AVERAGE_GFXCLK, 408 (uint32_t *)data); 409 *(uint32_t *)data *= 100; 410 *size = 4; 411 break; 412 case AMDGPU_PP_SENSOR_VDDGFX: 413 ret = yellow_carp_get_smu_metrics_data(smu, 414 METRICS_VOLTAGE_VDDGFX, 415 (uint32_t *)data); 416 *size = 4; 417 break; 418 case AMDGPU_PP_SENSOR_VDDNB: 419 ret = yellow_carp_get_smu_metrics_data(smu, 420 METRICS_VOLTAGE_VDDSOC, 421 (uint32_t *)data); 422 *size = 4; 423 break; 424 case AMDGPU_PP_SENSOR_SS_APU_SHARE: 425 ret = yellow_carp_get_smu_metrics_data(smu, 426 METRICS_SS_APU_SHARE, 427 (uint32_t *)data); 428 *size = 4; 429 break; 430 case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: 431 ret = yellow_carp_get_smu_metrics_data(smu, 432 METRICS_SS_DGPU_SHARE, 433 (uint32_t *)data); 434 *size = 4; 435 break; 436 default: 437 ret = -EOPNOTSUPP; 438 break; 439 } 440 441 return ret; 442 } 443 444 static int yellow_carp_set_watermarks_table(struct smu_context *smu, 445 struct pp_smu_wm_range_sets *clock_ranges) 446 { 447 int i; 448 int ret = 0; 449 Watermarks_t *table = smu->smu_table.watermarks_table; 450 451 if (!table || !clock_ranges) 452 return -EINVAL; 453 454 if (clock_ranges) { 455 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 456 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 457 return -EINVAL; 458 459 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 460 table->WatermarkRow[WM_DCFCLK][i].MinClock = 461 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 462 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 463 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 464 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 465 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 466 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 467 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 468 469 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 470 clock_ranges->reader_wm_sets[i].wm_inst; 471 } 472 473 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 474 table->WatermarkRow[WM_SOCCLK][i].MinClock = 475 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 476 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 477 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 478 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 479 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 480 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 481 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 482 483 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 484 clock_ranges->writer_wm_sets[i].wm_inst; 485 } 486 487 smu->watermarks_bitmap |= WATERMARKS_EXIST; 488 } 489 490 /* pass data to smu controller */ 491 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 492 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 493 ret = smu_cmn_write_watermarks_table(smu); 494 if (ret) { 495 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 496 return ret; 497 } 498 smu->watermarks_bitmap |= WATERMARKS_LOADED; 499 } 500 501 return 0; 502 } 503 504 static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu, 505 void **table) 506 { 507 struct smu_table_context *smu_table = &smu->smu_table; 508 struct gpu_metrics_v2_1 *gpu_metrics = 509 (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; 510 SmuMetrics_t metrics; 511 int ret = 0; 512 513 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 514 if (ret) 515 return ret; 516 517 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 518 519 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 520 gpu_metrics->temperature_soc = metrics.SocTemperature; 521 memcpy(&gpu_metrics->temperature_core[0], 522 &metrics.CoreTemperature[0], 523 sizeof(uint16_t) * 8); 524 gpu_metrics->temperature_l3[0] = metrics.L3Temperature; 525 526 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 527 gpu_metrics->average_mm_activity = metrics.UvdActivity; 528 529 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 530 gpu_metrics->average_gfx_power = metrics.Power[0]; 531 gpu_metrics->average_soc_power = metrics.Power[1]; 532 memcpy(&gpu_metrics->average_core_power[0], 533 &metrics.CorePower[0], 534 sizeof(uint16_t) * 8); 535 536 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 537 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 538 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 539 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 540 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 541 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 542 543 memcpy(&gpu_metrics->current_coreclk[0], 544 &metrics.CoreFrequency[0], 545 sizeof(uint16_t) * 8); 546 gpu_metrics->current_l3clk[0] = metrics.L3Frequency; 547 548 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 549 550 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 551 552 *table = (void *)gpu_metrics; 553 554 return sizeof(struct gpu_metrics_v2_1); 555 } 556 557 static int yellow_carp_set_default_dpm_tables(struct smu_context *smu) 558 { 559 struct smu_table_context *smu_table = &smu->smu_table; 560 561 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); 562 } 563 564 static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, 565 long input[], uint32_t size) 566 { 567 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); 568 int ret = 0; 569 570 /* Only allowed in manual mode */ 571 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 572 return -EINVAL; 573 574 switch (type) { 575 case PP_OD_EDIT_SCLK_VDDC_TABLE: 576 if (size != 2) { 577 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 578 return -EINVAL; 579 } 580 581 if (input[0] == 0) { 582 if (input[1] < smu->gfx_default_hard_min_freq) { 583 dev_warn(smu->adev->dev, 584 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 585 input[1], smu->gfx_default_hard_min_freq); 586 return -EINVAL; 587 } 588 smu->gfx_actual_hard_min_freq = input[1]; 589 } else if (input[0] == 1) { 590 if (input[1] > smu->gfx_default_soft_max_freq) { 591 dev_warn(smu->adev->dev, 592 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 593 input[1], smu->gfx_default_soft_max_freq); 594 return -EINVAL; 595 } 596 smu->gfx_actual_soft_max_freq = input[1]; 597 } else { 598 return -EINVAL; 599 } 600 break; 601 case PP_OD_RESTORE_DEFAULT_TABLE: 602 if (size != 0) { 603 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 604 return -EINVAL; 605 } else { 606 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 607 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 608 } 609 break; 610 case PP_OD_COMMIT_DPM_TABLE: 611 if (size != 0) { 612 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 613 return -EINVAL; 614 } else { 615 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 616 dev_err(smu->adev->dev, 617 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 618 smu->gfx_actual_hard_min_freq, 619 smu->gfx_actual_soft_max_freq); 620 return -EINVAL; 621 } 622 623 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 624 smu->gfx_actual_hard_min_freq, NULL); 625 if (ret) { 626 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 627 return ret; 628 } 629 630 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 631 smu->gfx_actual_soft_max_freq, NULL); 632 if (ret) { 633 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 634 return ret; 635 } 636 } 637 break; 638 default: 639 return -ENOSYS; 640 } 641 642 return ret; 643 } 644 645 static int yellow_carp_get_current_clk_freq(struct smu_context *smu, 646 enum smu_clk_type clk_type, 647 uint32_t *value) 648 { 649 MetricsMember_t member_type; 650 651 switch (clk_type) { 652 case SMU_SOCCLK: 653 member_type = METRICS_AVERAGE_SOCCLK; 654 break; 655 case SMU_VCLK: 656 member_type = METRICS_AVERAGE_VCLK; 657 break; 658 case SMU_DCLK: 659 member_type = METRICS_AVERAGE_DCLK; 660 break; 661 case SMU_MCLK: 662 member_type = METRICS_AVERAGE_UCLK; 663 break; 664 case SMU_FCLK: 665 return smu_cmn_send_smc_msg_with_param(smu, 666 SMU_MSG_GetFclkFrequency, 0, value); 667 case SMU_GFXCLK: 668 case SMU_SCLK: 669 return smu_cmn_send_smc_msg_with_param(smu, 670 SMU_MSG_GetGfxclkFrequency, 0, value); 671 break; 672 default: 673 return -EINVAL; 674 } 675 676 return yellow_carp_get_smu_metrics_data(smu, member_type, value); 677 } 678 679 static int yellow_carp_get_dpm_level_count(struct smu_context *smu, 680 enum smu_clk_type clk_type, 681 uint32_t *count) 682 { 683 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 684 685 switch (clk_type) { 686 case SMU_SOCCLK: 687 *count = clk_table->NumSocClkLevelsEnabled; 688 break; 689 case SMU_VCLK: 690 *count = clk_table->VcnClkLevelsEnabled; 691 break; 692 case SMU_DCLK: 693 *count = clk_table->VcnClkLevelsEnabled; 694 break; 695 case SMU_MCLK: 696 *count = clk_table->NumDfPstatesEnabled; 697 break; 698 case SMU_FCLK: 699 *count = clk_table->NumDfPstatesEnabled; 700 break; 701 default: 702 break; 703 } 704 705 return 0; 706 } 707 708 static int yellow_carp_get_dpm_freq_by_index(struct smu_context *smu, 709 enum smu_clk_type clk_type, 710 uint32_t dpm_level, 711 uint32_t *freq) 712 { 713 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 714 715 if (!clk_table || clk_type >= SMU_CLK_COUNT) 716 return -EINVAL; 717 718 switch (clk_type) { 719 case SMU_SOCCLK: 720 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 721 return -EINVAL; 722 *freq = clk_table->SocClocks[dpm_level]; 723 break; 724 case SMU_VCLK: 725 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 726 return -EINVAL; 727 *freq = clk_table->VClocks[dpm_level]; 728 break; 729 case SMU_DCLK: 730 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 731 return -EINVAL; 732 *freq = clk_table->DClocks[dpm_level]; 733 break; 734 case SMU_UCLK: 735 case SMU_MCLK: 736 if (dpm_level >= clk_table->NumDfPstatesEnabled) 737 return -EINVAL; 738 *freq = clk_table->DfPstateTable[dpm_level].MemClk; 739 break; 740 case SMU_FCLK: 741 if (dpm_level >= clk_table->NumDfPstatesEnabled) 742 return -EINVAL; 743 *freq = clk_table->DfPstateTable[dpm_level].FClk; 744 break; 745 default: 746 return -EINVAL; 747 } 748 749 return 0; 750 } 751 752 static bool yellow_carp_clk_dpm_is_enabled(struct smu_context *smu, 753 enum smu_clk_type clk_type) 754 { 755 enum smu_feature_mask feature_id = 0; 756 757 switch (clk_type) { 758 case SMU_MCLK: 759 case SMU_UCLK: 760 case SMU_FCLK: 761 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 762 break; 763 case SMU_GFXCLK: 764 case SMU_SCLK: 765 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 766 break; 767 case SMU_SOCCLK: 768 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 769 break; 770 case SMU_VCLK: 771 case SMU_DCLK: 772 feature_id = SMU_FEATURE_VCN_DPM_BIT; 773 break; 774 default: 775 return true; 776 } 777 778 return smu_cmn_feature_is_enabled(smu, feature_id); 779 } 780 781 static int yellow_carp_get_dpm_ultimate_freq(struct smu_context *smu, 782 enum smu_clk_type clk_type, 783 uint32_t *min, 784 uint32_t *max) 785 { 786 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 787 uint32_t clock_limit; 788 uint32_t max_dpm_level, min_dpm_level; 789 int ret = 0; 790 791 if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type)) { 792 switch (clk_type) { 793 case SMU_MCLK: 794 case SMU_UCLK: 795 clock_limit = smu->smu_table.boot_values.uclk; 796 break; 797 case SMU_FCLK: 798 clock_limit = smu->smu_table.boot_values.fclk; 799 break; 800 case SMU_GFXCLK: 801 case SMU_SCLK: 802 clock_limit = smu->smu_table.boot_values.gfxclk; 803 break; 804 case SMU_SOCCLK: 805 clock_limit = smu->smu_table.boot_values.socclk; 806 break; 807 case SMU_VCLK: 808 clock_limit = smu->smu_table.boot_values.vclk; 809 break; 810 case SMU_DCLK: 811 clock_limit = smu->smu_table.boot_values.dclk; 812 break; 813 default: 814 clock_limit = 0; 815 break; 816 } 817 818 /* clock in Mhz unit */ 819 if (min) 820 *min = clock_limit / 100; 821 if (max) 822 *max = clock_limit / 100; 823 824 return 0; 825 } 826 827 if (max) { 828 switch (clk_type) { 829 case SMU_GFXCLK: 830 case SMU_SCLK: 831 *max = clk_table->MaxGfxClk; 832 break; 833 case SMU_MCLK: 834 case SMU_UCLK: 835 case SMU_FCLK: 836 max_dpm_level = 0; 837 break; 838 case SMU_SOCCLK: 839 max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; 840 break; 841 case SMU_VCLK: 842 case SMU_DCLK: 843 max_dpm_level = clk_table->VcnClkLevelsEnabled - 1; 844 break; 845 default: 846 ret = -EINVAL; 847 goto failed; 848 } 849 850 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 851 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); 852 if (ret) 853 goto failed; 854 } 855 } 856 857 if (min) { 858 switch (clk_type) { 859 case SMU_GFXCLK: 860 case SMU_SCLK: 861 *min = clk_table->MinGfxClk; 862 break; 863 case SMU_MCLK: 864 case SMU_UCLK: 865 case SMU_FCLK: 866 min_dpm_level = clk_table->NumDfPstatesEnabled - 1; 867 break; 868 case SMU_SOCCLK: 869 min_dpm_level = 0; 870 break; 871 case SMU_VCLK: 872 case SMU_DCLK: 873 min_dpm_level = 0; 874 break; 875 default: 876 ret = -EINVAL; 877 goto failed; 878 } 879 880 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 881 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); 882 if (ret) 883 goto failed; 884 } 885 } 886 887 failed: 888 return ret; 889 } 890 891 static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu, 892 enum smu_clk_type clk_type, 893 uint32_t min, 894 uint32_t max) 895 { 896 enum smu_message_type msg_set_min, msg_set_max; 897 int ret = 0; 898 899 if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type)) 900 return -EINVAL; 901 902 switch (clk_type) { 903 case SMU_GFXCLK: 904 case SMU_SCLK: 905 msg_set_min = SMU_MSG_SetHardMinGfxClk; 906 msg_set_max = SMU_MSG_SetSoftMaxGfxClk; 907 break; 908 case SMU_FCLK: 909 msg_set_min = SMU_MSG_SetHardMinFclkByFreq; 910 msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq; 911 break; 912 case SMU_SOCCLK: 913 msg_set_min = SMU_MSG_SetHardMinSocclkByFreq; 914 msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq; 915 break; 916 case SMU_VCLK: 917 case SMU_DCLK: 918 msg_set_min = SMU_MSG_SetHardMinVcn; 919 msg_set_max = SMU_MSG_SetSoftMaxVcn; 920 break; 921 default: 922 return -EINVAL; 923 } 924 925 ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); 926 if (ret) 927 goto out; 928 929 ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL); 930 if (ret) 931 goto out; 932 933 out: 934 return ret; 935 } 936 937 static int yellow_carp_print_clk_levels(struct smu_context *smu, 938 enum smu_clk_type clk_type, char *buf) 939 { 940 int i, size = 0, ret = 0; 941 uint32_t cur_value = 0, value = 0, count = 0; 942 uint32_t min, max; 943 944 smu_cmn_get_sysfs_buf(&buf, &size); 945 946 switch (clk_type) { 947 case SMU_OD_SCLK: 948 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 949 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 950 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 951 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 952 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 953 break; 954 case SMU_OD_RANGE: 955 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 956 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 957 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 958 break; 959 case SMU_SOCCLK: 960 case SMU_VCLK: 961 case SMU_DCLK: 962 case SMU_MCLK: 963 case SMU_FCLK: 964 ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value); 965 if (ret) 966 goto print_clk_out; 967 968 ret = yellow_carp_get_dpm_level_count(smu, clk_type, &count); 969 if (ret) 970 goto print_clk_out; 971 972 for (i = 0; i < count; i++) { 973 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value); 974 if (ret) 975 goto print_clk_out; 976 977 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 978 cur_value == value ? "*" : ""); 979 } 980 break; 981 case SMU_GFXCLK: 982 case SMU_SCLK: 983 ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value); 984 if (ret) 985 goto print_clk_out; 986 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 987 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 988 if (cur_value == max) 989 i = 2; 990 else if (cur_value == min) 991 i = 0; 992 else 993 i = 1; 994 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 995 i == 0 ? "*" : ""); 996 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 997 i == 1 ? cur_value : YELLOW_CARP_UMD_PSTATE_GFXCLK, 998 i == 1 ? "*" : ""); 999 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 1000 i == 2 ? "*" : ""); 1001 break; 1002 default: 1003 break; 1004 } 1005 1006 print_clk_out: 1007 return size; 1008 } 1009 1010 static int yellow_carp_force_clk_levels(struct smu_context *smu, 1011 enum smu_clk_type clk_type, uint32_t mask) 1012 { 1013 uint32_t soft_min_level = 0, soft_max_level = 0; 1014 uint32_t min_freq = 0, max_freq = 0; 1015 int ret = 0; 1016 1017 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1018 soft_max_level = mask ? (fls(mask) - 1) : 0; 1019 1020 switch (clk_type) { 1021 case SMU_SOCCLK: 1022 case SMU_FCLK: 1023 case SMU_VCLK: 1024 case SMU_DCLK: 1025 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); 1026 if (ret) 1027 goto force_level_out; 1028 1029 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); 1030 if (ret) 1031 goto force_level_out; 1032 1033 ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); 1034 if (ret) 1035 goto force_level_out; 1036 break; 1037 default: 1038 ret = -EINVAL; 1039 break; 1040 } 1041 1042 force_level_out: 1043 return ret; 1044 } 1045 1046 static int yellow_carp_set_performance_level(struct smu_context *smu, 1047 enum amd_dpm_forced_level level) 1048 { 1049 struct amdgpu_device *adev = smu->adev; 1050 uint32_t sclk_min = 0, sclk_max = 0; 1051 uint32_t fclk_min = 0, fclk_max = 0; 1052 uint32_t socclk_min = 0, socclk_max = 0; 1053 int ret = 0; 1054 1055 switch (level) { 1056 case AMD_DPM_FORCED_LEVEL_HIGH: 1057 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); 1058 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); 1059 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); 1060 sclk_min = sclk_max; 1061 fclk_min = fclk_max; 1062 socclk_min = socclk_max; 1063 break; 1064 case AMD_DPM_FORCED_LEVEL_LOW: 1065 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); 1066 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); 1067 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); 1068 sclk_max = sclk_min; 1069 fclk_max = fclk_min; 1070 socclk_max = socclk_min; 1071 break; 1072 case AMD_DPM_FORCED_LEVEL_AUTO: 1073 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); 1074 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); 1075 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); 1076 break; 1077 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1078 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1079 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1080 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1081 /* Temporarily do nothing since the optimal clocks haven't been provided yet */ 1082 break; 1083 case AMD_DPM_FORCED_LEVEL_MANUAL: 1084 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1085 return 0; 1086 default: 1087 dev_err(adev->dev, "Invalid performance level %d\n", level); 1088 return -EINVAL; 1089 } 1090 1091 if (sclk_min && sclk_max) { 1092 ret = yellow_carp_set_soft_freq_limited_range(smu, 1093 SMU_SCLK, 1094 sclk_min, 1095 sclk_max); 1096 if (ret) 1097 return ret; 1098 1099 smu->gfx_actual_hard_min_freq = sclk_min; 1100 smu->gfx_actual_soft_max_freq = sclk_max; 1101 } 1102 1103 if (fclk_min && fclk_max) { 1104 ret = yellow_carp_set_soft_freq_limited_range(smu, 1105 SMU_FCLK, 1106 fclk_min, 1107 fclk_max); 1108 if (ret) 1109 return ret; 1110 } 1111 1112 if (socclk_min && socclk_max) { 1113 ret = yellow_carp_set_soft_freq_limited_range(smu, 1114 SMU_SOCCLK, 1115 socclk_min, 1116 socclk_max); 1117 if (ret) 1118 return ret; 1119 } 1120 1121 return ret; 1122 } 1123 1124 static int yellow_carp_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 1125 { 1126 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 1127 1128 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 1129 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 1130 smu->gfx_actual_hard_min_freq = 0; 1131 smu->gfx_actual_soft_max_freq = 0; 1132 1133 return 0; 1134 } 1135 1136 static const struct pptable_funcs yellow_carp_ppt_funcs = { 1137 .check_fw_status = smu_v13_0_check_fw_status, 1138 .check_fw_version = smu_v13_0_check_fw_version, 1139 .init_smc_tables = yellow_carp_init_smc_tables, 1140 .fini_smc_tables = yellow_carp_fini_smc_tables, 1141 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, 1142 .system_features_control = yellow_carp_system_features_control, 1143 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 1144 .send_smc_msg = smu_cmn_send_smc_msg, 1145 .dpm_set_vcn_enable = yellow_carp_dpm_set_vcn_enable, 1146 .dpm_set_jpeg_enable = yellow_carp_dpm_set_jpeg_enable, 1147 .set_default_dpm_table = yellow_carp_set_default_dpm_tables, 1148 .read_sensor = yellow_carp_read_sensor, 1149 .is_dpm_running = yellow_carp_is_dpm_running, 1150 .set_watermarks_table = yellow_carp_set_watermarks_table, 1151 .get_gpu_metrics = yellow_carp_get_gpu_metrics, 1152 .get_enabled_mask = smu_cmn_get_enabled_mask, 1153 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1154 .set_driver_table_location = smu_v13_0_set_driver_table_location, 1155 .gfx_off_control = smu_v13_0_gfx_off_control, 1156 .post_init = yellow_carp_post_smu_init, 1157 .mode2_reset = yellow_carp_mode2_reset, 1158 .get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq, 1159 .od_edit_dpm_table = yellow_carp_od_edit_dpm_table, 1160 .print_clk_levels = yellow_carp_print_clk_levels, 1161 .force_clk_levels = yellow_carp_force_clk_levels, 1162 .set_performance_level = yellow_carp_set_performance_level, 1163 .set_fine_grain_gfx_freq_parameters = yellow_carp_set_fine_grain_gfx_freq_parameters, 1164 }; 1165 1166 void yellow_carp_set_ppt_funcs(struct smu_context *smu) 1167 { 1168 smu->ppt_funcs = &yellow_carp_ppt_funcs; 1169 smu->message_map = yellow_carp_message_map; 1170 smu->feature_map = yellow_carp_feature_mask_map; 1171 smu->table_map = yellow_carp_table_map; 1172 smu->is_apu = true; 1173 } 1174