1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "pp_debug.h" 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/slab.h> 27 #include "atom-types.h" 28 #include "atombios.h" 29 #include "processpptables.h" 30 #include "cgs_common.h" 31 #include "smumgr.h" 32 #include "hwmgr.h" 33 #include "hardwaremanager.h" 34 #include "rv_ppsmc.h" 35 #include "smu10_hwmgr.h" 36 #include "power_state.h" 37 #include "soc15_common.h" 38 #include "smu10.h" 39 #include "asic_reg/pwr/pwr_10_0_offset.h" 40 #include "asic_reg/pwr/pwr_10_0_sh_mask.h" 41 42 #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5 43 #define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */ 44 #define SCLK_MIN_DIV_INTV_SHIFT 12 45 #define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */ 46 #define SMC_RAM_END 0x40000 47 48 static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic; 49 50 51 static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 52 struct pp_display_clock_request *clock_req) 53 { 54 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 55 enum amd_pp_clock_type clk_type = clock_req->clock_type; 56 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 57 PPSMC_Msg msg; 58 59 switch (clk_type) { 60 case amd_pp_dcf_clock: 61 if (clk_freq == smu10_data->dcf_actual_hard_min_freq) 62 return 0; 63 msg = PPSMC_MSG_SetHardMinDcefclkByFreq; 64 smu10_data->dcf_actual_hard_min_freq = clk_freq; 65 break; 66 case amd_pp_soc_clock: 67 msg = PPSMC_MSG_SetHardMinSocclkByFreq; 68 break; 69 case amd_pp_f_clock: 70 if (clk_freq == smu10_data->f_actual_hard_min_freq) 71 return 0; 72 smu10_data->f_actual_hard_min_freq = clk_freq; 73 msg = PPSMC_MSG_SetHardMinFclkByFreq; 74 break; 75 default: 76 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); 77 return -EINVAL; 78 } 79 smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq, NULL); 80 81 return 0; 82 } 83 84 static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps) 85 { 86 if (SMU10_Magic != hw_ps->magic) 87 return NULL; 88 89 return (struct smu10_power_state *)hw_ps; 90 } 91 92 static const struct smu10_power_state *cast_const_smu10_ps( 93 const struct pp_hw_power_state *hw_ps) 94 { 95 if (SMU10_Magic != hw_ps->magic) 96 return NULL; 97 98 return (struct smu10_power_state *)hw_ps; 99 } 100 101 static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) 102 { 103 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 104 105 smu10_data->dce_slow_sclk_threshold = 30000; 106 smu10_data->thermal_auto_throttling_treshold = 0; 107 smu10_data->is_nb_dpm_enabled = 1; 108 smu10_data->dpm_flags = 1; 109 smu10_data->need_min_deep_sleep_dcefclk = true; 110 smu10_data->num_active_display = 0; 111 smu10_data->deep_sleep_dcefclk = 0; 112 113 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 114 PHM_PlatformCaps_SclkDeepSleep); 115 116 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 117 PHM_PlatformCaps_SclkThrottleLowNotification); 118 119 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 120 PHM_PlatformCaps_PowerPlaySupport); 121 return 0; 122 } 123 124 static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr, 125 struct phm_clock_and_voltage_limits *table) 126 { 127 return 0; 128 } 129 130 static int smu10_init_dynamic_state_adjustment_rule_settings( 131 struct pp_hwmgr *hwmgr) 132 { 133 struct phm_clock_voltage_dependency_table *table_clk_vlt; 134 135 table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7), 136 GFP_KERNEL); 137 138 if (NULL == table_clk_vlt) { 139 pr_err("Can not allocate memory!\n"); 140 return -ENOMEM; 141 } 142 143 table_clk_vlt->count = 8; 144 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0; 145 table_clk_vlt->entries[0].v = 0; 146 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1; 147 table_clk_vlt->entries[1].v = 1; 148 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2; 149 table_clk_vlt->entries[2].v = 2; 150 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3; 151 table_clk_vlt->entries[3].v = 3; 152 table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4; 153 table_clk_vlt->entries[4].v = 4; 154 table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5; 155 table_clk_vlt->entries[5].v = 5; 156 table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6; 157 table_clk_vlt->entries[6].v = 6; 158 table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7; 159 table_clk_vlt->entries[7].v = 7; 160 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; 161 162 return 0; 163 } 164 165 static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr) 166 { 167 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend; 168 169 smu10_data->sys_info.htc_hyst_lmt = 5; 170 smu10_data->sys_info.htc_tmp_lmt = 203; 171 172 if (smu10_data->thermal_auto_throttling_treshold == 0) 173 smu10_data->thermal_auto_throttling_treshold = 203; 174 175 smu10_construct_max_power_limits_table (hwmgr, 176 &hwmgr->dyn_state.max_clock_voltage_on_ac); 177 178 smu10_init_dynamic_state_adjustment_rule_settings(hwmgr); 179 180 return 0; 181 } 182 183 static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr) 184 { 185 return 0; 186 } 187 188 static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) 189 { 190 struct PP_Clocks clocks = {0}; 191 struct pp_display_clock_request clock_req; 192 193 clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; 194 clock_req.clock_type = amd_pp_dcf_clock; 195 clock_req.clock_freq_in_khz = clocks.dcefClock * 10; 196 197 PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req), 198 "Attempt to set DCF Clock Failed!", return -EINVAL); 199 200 return 0; 201 } 202 203 static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) 204 { 205 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 206 207 if (clock && smu10_data->deep_sleep_dcefclk != clock) { 208 smu10_data->deep_sleep_dcefclk = clock; 209 smum_send_msg_to_smc_with_parameter(hwmgr, 210 PPSMC_MSG_SetMinDeepSleepDcefclk, 211 smu10_data->deep_sleep_dcefclk, 212 NULL); 213 } 214 return 0; 215 } 216 217 static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock) 218 { 219 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 220 221 if (clock && smu10_data->dcf_actual_hard_min_freq != clock) { 222 smu10_data->dcf_actual_hard_min_freq = clock; 223 smum_send_msg_to_smc_with_parameter(hwmgr, 224 PPSMC_MSG_SetHardMinDcefclkByFreq, 225 smu10_data->dcf_actual_hard_min_freq, 226 NULL); 227 } 228 return 0; 229 } 230 231 static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock) 232 { 233 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 234 235 if (clock && smu10_data->f_actual_hard_min_freq != clock) { 236 smu10_data->f_actual_hard_min_freq = clock; 237 smum_send_msg_to_smc_with_parameter(hwmgr, 238 PPSMC_MSG_SetHardMinFclkByFreq, 239 smu10_data->f_actual_hard_min_freq, 240 NULL); 241 } 242 return 0; 243 } 244 245 static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count) 246 { 247 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 248 249 if (smu10_data->num_active_display != count) { 250 smu10_data->num_active_display = count; 251 smum_send_msg_to_smc_with_parameter(hwmgr, 252 PPSMC_MSG_SetDisplayCount, 253 smu10_data->num_active_display, 254 NULL); 255 } 256 257 return 0; 258 } 259 260 static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) 261 { 262 return smu10_set_clock_limit(hwmgr, input); 263 } 264 265 static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr) 266 { 267 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 268 struct amdgpu_device *adev = hwmgr->adev; 269 270 smu10_data->vcn_power_gated = true; 271 smu10_data->isp_tileA_power_gated = true; 272 smu10_data->isp_tileB_power_gated = true; 273 274 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) 275 return smum_send_msg_to_smc_with_parameter(hwmgr, 276 PPSMC_MSG_SetGfxCGPG, 277 true, 278 NULL); 279 else 280 return 0; 281 } 282 283 284 static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr) 285 { 286 return smu10_init_power_gate_state(hwmgr); 287 } 288 289 static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr) 290 { 291 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 292 293 smu10_data->separation_time = 0; 294 smu10_data->cc6_disable = false; 295 smu10_data->pstate_disable = false; 296 smu10_data->cc6_setting_changed = false; 297 298 return 0; 299 } 300 301 static int smu10_power_off_asic(struct pp_hwmgr *hwmgr) 302 { 303 return smu10_reset_cc6_data(hwmgr); 304 } 305 306 static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr) 307 { 308 uint32_t reg; 309 struct amdgpu_device *adev = hwmgr->adev; 310 311 reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS); 312 if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) == 313 (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT)) 314 return true; 315 316 return false; 317 } 318 319 static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr) 320 { 321 struct amdgpu_device *adev = hwmgr->adev; 322 323 if (adev->pm.pp_feature & PP_GFXOFF_MASK) { 324 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff, NULL); 325 326 /* confirm gfx is back to "on" state */ 327 while (!smu10_is_gfx_on(hwmgr)) 328 msleep(1); 329 } 330 331 return 0; 332 } 333 334 static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 335 { 336 return 0; 337 } 338 339 static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr) 340 { 341 struct amdgpu_device *adev = hwmgr->adev; 342 343 if (adev->pm.pp_feature & PP_GFXOFF_MASK) 344 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff, NULL); 345 346 return 0; 347 } 348 349 static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 350 { 351 return 0; 352 } 353 354 static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable) 355 { 356 if (enable) 357 return smu10_enable_gfx_off(hwmgr); 358 else 359 return smu10_disable_gfx_off(hwmgr); 360 } 361 362 static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 363 struct pp_power_state *prequest_ps, 364 const struct pp_power_state *pcurrent_ps) 365 { 366 return 0; 367 } 368 369 /* temporary hardcoded clock voltage breakdown tables */ 370 static const DpmClock_t VddDcfClk[]= { 371 { 300, 2600}, 372 { 600, 3200}, 373 { 600, 3600}, 374 }; 375 376 static const DpmClock_t VddSocClk[]= { 377 { 478, 2600}, 378 { 722, 3200}, 379 { 722, 3600}, 380 }; 381 382 static const DpmClock_t VddFClk[]= { 383 { 400, 2600}, 384 {1200, 3200}, 385 {1200, 3600}, 386 }; 387 388 static const DpmClock_t VddDispClk[]= { 389 { 435, 2600}, 390 { 661, 3200}, 391 {1086, 3600}, 392 }; 393 394 static const DpmClock_t VddDppClk[]= { 395 { 435, 2600}, 396 { 661, 3200}, 397 { 661, 3600}, 398 }; 399 400 static const DpmClock_t VddPhyClk[]= { 401 { 540, 2600}, 402 { 810, 3200}, 403 { 810, 3600}, 404 }; 405 406 static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr, 407 struct smu10_voltage_dependency_table **pptable, 408 uint32_t num_entry, const DpmClock_t *pclk_dependency_table) 409 { 410 uint32_t i; 411 struct smu10_voltage_dependency_table *ptable; 412 413 ptable = kzalloc(struct_size(ptable, entries, num_entry), GFP_KERNEL); 414 if (NULL == ptable) 415 return -ENOMEM; 416 417 ptable->count = num_entry; 418 419 for (i = 0; i < ptable->count; i++) { 420 ptable->entries[i].clk = pclk_dependency_table->Freq * 100; 421 ptable->entries[i].vol = pclk_dependency_table->Vol; 422 pclk_dependency_table++; 423 } 424 425 *pptable = ptable; 426 427 return 0; 428 } 429 430 431 static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr) 432 { 433 uint32_t result; 434 435 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 436 DpmClocks_t *table = &(smu10_data->clock_table); 437 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info); 438 439 result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true); 440 441 PP_ASSERT_WITH_CODE((0 == result), 442 "Attempt to copy clock table from smc failed", 443 return result); 444 445 if (0 == result && table->DcefClocks[0].Freq != 0) { 446 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk, 447 NUM_DCEFCLK_DPM_LEVELS, 448 &smu10_data->clock_table.DcefClocks[0]); 449 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk, 450 NUM_SOCCLK_DPM_LEVELS, 451 &smu10_data->clock_table.SocClocks[0]); 452 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk, 453 NUM_FCLK_DPM_LEVELS, 454 &smu10_data->clock_table.FClocks[0]); 455 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk, 456 NUM_MEMCLK_DPM_LEVELS, 457 &smu10_data->clock_table.MemClocks[0]); 458 } else { 459 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk, 460 ARRAY_SIZE(VddDcfClk), 461 &VddDcfClk[0]); 462 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk, 463 ARRAY_SIZE(VddSocClk), 464 &VddSocClk[0]); 465 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk, 466 ARRAY_SIZE(VddFClk), 467 &VddFClk[0]); 468 } 469 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk, 470 ARRAY_SIZE(VddDispClk), 471 &VddDispClk[0]); 472 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk, 473 ARRAY_SIZE(VddDppClk), &VddDppClk[0]); 474 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk, 475 ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]); 476 477 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &result); 478 smu10_data->gfx_min_freq_limit = result / 10 * 1000; 479 480 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &result); 481 smu10_data->gfx_max_freq_limit = result / 10 * 1000; 482 483 return 0; 484 } 485 486 static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 487 { 488 int result = 0; 489 struct smu10_hwmgr *data; 490 491 data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL); 492 if (data == NULL) 493 return -ENOMEM; 494 495 hwmgr->backend = data; 496 497 result = smu10_initialize_dpm_defaults(hwmgr); 498 if (result != 0) { 499 pr_err("smu10_initialize_dpm_defaults failed\n"); 500 return result; 501 } 502 503 smu10_populate_clock_table(hwmgr); 504 505 result = smu10_get_system_info_data(hwmgr); 506 if (result != 0) { 507 pr_err("smu10_get_system_info_data failed\n"); 508 return result; 509 } 510 511 smu10_construct_boot_state(hwmgr); 512 513 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 514 SMU10_MAX_HARDWARE_POWERLEVELS; 515 516 hwmgr->platform_descriptor.hardwarePerformanceLevels = 517 SMU10_MAX_HARDWARE_POWERLEVELS; 518 519 hwmgr->platform_descriptor.vbiosInterruptId = 0; 520 521 hwmgr->platform_descriptor.clockStep.engineClock = 500; 522 523 hwmgr->platform_descriptor.clockStep.memoryClock = 500; 524 525 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 526 527 hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100; 528 hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100; 529 530 return result; 531 } 532 533 static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 534 { 535 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 536 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info); 537 538 kfree(pinfo->vdd_dep_on_dcefclk); 539 pinfo->vdd_dep_on_dcefclk = NULL; 540 kfree(pinfo->vdd_dep_on_socclk); 541 pinfo->vdd_dep_on_socclk = NULL; 542 kfree(pinfo->vdd_dep_on_fclk); 543 pinfo->vdd_dep_on_fclk = NULL; 544 kfree(pinfo->vdd_dep_on_dispclk); 545 pinfo->vdd_dep_on_dispclk = NULL; 546 kfree(pinfo->vdd_dep_on_dppclk); 547 pinfo->vdd_dep_on_dppclk = NULL; 548 kfree(pinfo->vdd_dep_on_phyclk); 549 pinfo->vdd_dep_on_phyclk = NULL; 550 551 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); 552 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; 553 554 kfree(hwmgr->backend); 555 hwmgr->backend = NULL; 556 557 return 0; 558 } 559 560 static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 561 enum amd_dpm_forced_level level) 562 { 563 struct smu10_hwmgr *data = hwmgr->backend; 564 uint32_t min_sclk = hwmgr->display_config->min_core_set_clock; 565 uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100; 566 567 if (hwmgr->smu_version < 0x1E3700) { 568 pr_info("smu firmware version too old, can not set dpm level\n"); 569 return 0; 570 } 571 572 if (min_sclk < data->gfx_min_freq_limit) 573 min_sclk = data->gfx_min_freq_limit; 574 575 min_sclk /= 100; /* transfer 10KHz to MHz */ 576 if (min_mclk < data->clock_table.FClocks[0].Freq) 577 min_mclk = data->clock_table.FClocks[0].Freq; 578 579 switch (level) { 580 case AMD_DPM_FORCED_LEVEL_HIGH: 581 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 582 smum_send_msg_to_smc_with_parameter(hwmgr, 583 PPSMC_MSG_SetHardMinGfxClk, 584 data->gfx_max_freq_limit/100, 585 NULL); 586 smum_send_msg_to_smc_with_parameter(hwmgr, 587 PPSMC_MSG_SetHardMinFclkByFreq, 588 SMU10_UMD_PSTATE_PEAK_FCLK, 589 NULL); 590 smum_send_msg_to_smc_with_parameter(hwmgr, 591 PPSMC_MSG_SetHardMinSocclkByFreq, 592 SMU10_UMD_PSTATE_PEAK_SOCCLK, 593 NULL); 594 smum_send_msg_to_smc_with_parameter(hwmgr, 595 PPSMC_MSG_SetHardMinVcn, 596 SMU10_UMD_PSTATE_VCE, 597 NULL); 598 599 smum_send_msg_to_smc_with_parameter(hwmgr, 600 PPSMC_MSG_SetSoftMaxGfxClk, 601 data->gfx_max_freq_limit/100, 602 NULL); 603 smum_send_msg_to_smc_with_parameter(hwmgr, 604 PPSMC_MSG_SetSoftMaxFclkByFreq, 605 SMU10_UMD_PSTATE_PEAK_FCLK, 606 NULL); 607 smum_send_msg_to_smc_with_parameter(hwmgr, 608 PPSMC_MSG_SetSoftMaxSocclkByFreq, 609 SMU10_UMD_PSTATE_PEAK_SOCCLK, 610 NULL); 611 smum_send_msg_to_smc_with_parameter(hwmgr, 612 PPSMC_MSG_SetSoftMaxVcn, 613 SMU10_UMD_PSTATE_VCE, 614 NULL); 615 break; 616 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 617 smum_send_msg_to_smc_with_parameter(hwmgr, 618 PPSMC_MSG_SetHardMinGfxClk, 619 min_sclk, 620 NULL); 621 smum_send_msg_to_smc_with_parameter(hwmgr, 622 PPSMC_MSG_SetSoftMaxGfxClk, 623 min_sclk, 624 NULL); 625 break; 626 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 627 smum_send_msg_to_smc_with_parameter(hwmgr, 628 PPSMC_MSG_SetHardMinFclkByFreq, 629 min_mclk, 630 NULL); 631 smum_send_msg_to_smc_with_parameter(hwmgr, 632 PPSMC_MSG_SetSoftMaxFclkByFreq, 633 min_mclk, 634 NULL); 635 break; 636 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 637 smum_send_msg_to_smc_with_parameter(hwmgr, 638 PPSMC_MSG_SetHardMinGfxClk, 639 SMU10_UMD_PSTATE_GFXCLK, 640 NULL); 641 smum_send_msg_to_smc_with_parameter(hwmgr, 642 PPSMC_MSG_SetHardMinFclkByFreq, 643 SMU10_UMD_PSTATE_FCLK, 644 NULL); 645 smum_send_msg_to_smc_with_parameter(hwmgr, 646 PPSMC_MSG_SetHardMinSocclkByFreq, 647 SMU10_UMD_PSTATE_SOCCLK, 648 NULL); 649 smum_send_msg_to_smc_with_parameter(hwmgr, 650 PPSMC_MSG_SetHardMinVcn, 651 SMU10_UMD_PSTATE_VCE, 652 NULL); 653 654 smum_send_msg_to_smc_with_parameter(hwmgr, 655 PPSMC_MSG_SetSoftMaxGfxClk, 656 SMU10_UMD_PSTATE_GFXCLK, 657 NULL); 658 smum_send_msg_to_smc_with_parameter(hwmgr, 659 PPSMC_MSG_SetSoftMaxFclkByFreq, 660 SMU10_UMD_PSTATE_FCLK, 661 NULL); 662 smum_send_msg_to_smc_with_parameter(hwmgr, 663 PPSMC_MSG_SetSoftMaxSocclkByFreq, 664 SMU10_UMD_PSTATE_SOCCLK, 665 NULL); 666 smum_send_msg_to_smc_with_parameter(hwmgr, 667 PPSMC_MSG_SetSoftMaxVcn, 668 SMU10_UMD_PSTATE_VCE, 669 NULL); 670 break; 671 case AMD_DPM_FORCED_LEVEL_AUTO: 672 smum_send_msg_to_smc_with_parameter(hwmgr, 673 PPSMC_MSG_SetHardMinGfxClk, 674 min_sclk, 675 NULL); 676 smum_send_msg_to_smc_with_parameter(hwmgr, 677 PPSMC_MSG_SetHardMinFclkByFreq, 678 hwmgr->display_config->num_display > 3 ? 679 SMU10_UMD_PSTATE_PEAK_FCLK : 680 min_mclk, 681 NULL); 682 683 smum_send_msg_to_smc_with_parameter(hwmgr, 684 PPSMC_MSG_SetHardMinSocclkByFreq, 685 SMU10_UMD_PSTATE_MIN_SOCCLK, 686 NULL); 687 smum_send_msg_to_smc_with_parameter(hwmgr, 688 PPSMC_MSG_SetHardMinVcn, 689 SMU10_UMD_PSTATE_MIN_VCE, 690 NULL); 691 692 smum_send_msg_to_smc_with_parameter(hwmgr, 693 PPSMC_MSG_SetSoftMaxGfxClk, 694 data->gfx_max_freq_limit/100, 695 NULL); 696 smum_send_msg_to_smc_with_parameter(hwmgr, 697 PPSMC_MSG_SetSoftMaxFclkByFreq, 698 SMU10_UMD_PSTATE_PEAK_FCLK, 699 NULL); 700 smum_send_msg_to_smc_with_parameter(hwmgr, 701 PPSMC_MSG_SetSoftMaxSocclkByFreq, 702 SMU10_UMD_PSTATE_PEAK_SOCCLK, 703 NULL); 704 smum_send_msg_to_smc_with_parameter(hwmgr, 705 PPSMC_MSG_SetSoftMaxVcn, 706 SMU10_UMD_PSTATE_VCE, 707 NULL); 708 break; 709 case AMD_DPM_FORCED_LEVEL_LOW: 710 smum_send_msg_to_smc_with_parameter(hwmgr, 711 PPSMC_MSG_SetHardMinGfxClk, 712 data->gfx_min_freq_limit/100, 713 NULL); 714 smum_send_msg_to_smc_with_parameter(hwmgr, 715 PPSMC_MSG_SetSoftMaxGfxClk, 716 data->gfx_min_freq_limit/100, 717 NULL); 718 smum_send_msg_to_smc_with_parameter(hwmgr, 719 PPSMC_MSG_SetHardMinFclkByFreq, 720 min_mclk, 721 NULL); 722 smum_send_msg_to_smc_with_parameter(hwmgr, 723 PPSMC_MSG_SetSoftMaxFclkByFreq, 724 min_mclk, 725 NULL); 726 break; 727 case AMD_DPM_FORCED_LEVEL_MANUAL: 728 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 729 default: 730 break; 731 } 732 return 0; 733 } 734 735 static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 736 { 737 struct smu10_hwmgr *data; 738 739 if (hwmgr == NULL) 740 return -EINVAL; 741 742 data = (struct smu10_hwmgr *)(hwmgr->backend); 743 744 if (low) 745 return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk; 746 else 747 return data->clock_vol_info.vdd_dep_on_fclk->entries[ 748 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk; 749 } 750 751 static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 752 { 753 struct smu10_hwmgr *data; 754 755 if (hwmgr == NULL) 756 return -EINVAL; 757 758 data = (struct smu10_hwmgr *)(hwmgr->backend); 759 760 if (low) 761 return data->gfx_min_freq_limit; 762 else 763 return data->gfx_max_freq_limit; 764 } 765 766 static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, 767 struct pp_hw_power_state *hw_ps) 768 { 769 return 0; 770 } 771 772 static int smu10_dpm_get_pp_table_entry_callback( 773 struct pp_hwmgr *hwmgr, 774 struct pp_hw_power_state *hw_ps, 775 unsigned int index, 776 const void *clock_info) 777 { 778 struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps); 779 780 smu10_ps->levels[index].engine_clock = 0; 781 782 smu10_ps->levels[index].vddc_index = 0; 783 smu10_ps->level = index + 1; 784 785 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { 786 smu10_ps->levels[index].ds_divider_index = 5; 787 smu10_ps->levels[index].ss_divider_index = 5; 788 } 789 790 return 0; 791 } 792 793 static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr) 794 { 795 int result; 796 unsigned long ret = 0; 797 798 result = pp_tables_get_num_of_entries(hwmgr, &ret); 799 800 return result ? 0 : ret; 801 } 802 803 static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr, 804 unsigned long entry, struct pp_power_state *ps) 805 { 806 int result; 807 struct smu10_power_state *smu10_ps; 808 809 ps->hardware.magic = SMU10_Magic; 810 811 smu10_ps = cast_smu10_ps(&(ps->hardware)); 812 813 result = pp_tables_get_entry(hwmgr, entry, ps, 814 smu10_dpm_get_pp_table_entry_callback); 815 816 smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK; 817 smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK; 818 819 return result; 820 } 821 822 static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr) 823 { 824 return sizeof(struct smu10_power_state); 825 } 826 827 static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr) 828 { 829 return 0; 830 } 831 832 833 static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, 834 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) 835 { 836 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend); 837 838 if (separation_time != data->separation_time || 839 cc6_disable != data->cc6_disable || 840 pstate_disable != data->pstate_disable) { 841 data->separation_time = separation_time; 842 data->cc6_disable = cc6_disable; 843 data->pstate_disable = pstate_disable; 844 data->cc6_setting_changed = true; 845 } 846 return 0; 847 } 848 849 static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr, 850 struct amd_pp_simple_clock_info *info) 851 { 852 return -EINVAL; 853 } 854 855 static int smu10_force_clock_level(struct pp_hwmgr *hwmgr, 856 enum pp_clock_type type, uint32_t mask) 857 { 858 struct smu10_hwmgr *data = hwmgr->backend; 859 struct smu10_voltage_dependency_table *mclk_table = 860 data->clock_vol_info.vdd_dep_on_fclk; 861 uint32_t low, high; 862 863 low = mask ? (ffs(mask) - 1) : 0; 864 high = mask ? (fls(mask) - 1) : 0; 865 866 switch (type) { 867 case PP_SCLK: 868 if (low > 2 || high > 2) { 869 pr_info("Currently sclk only support 3 levels on RV\n"); 870 return -EINVAL; 871 } 872 873 smum_send_msg_to_smc_with_parameter(hwmgr, 874 PPSMC_MSG_SetHardMinGfxClk, 875 low == 2 ? data->gfx_max_freq_limit/100 : 876 low == 1 ? SMU10_UMD_PSTATE_GFXCLK : 877 data->gfx_min_freq_limit/100, 878 NULL); 879 880 smum_send_msg_to_smc_with_parameter(hwmgr, 881 PPSMC_MSG_SetSoftMaxGfxClk, 882 high == 0 ? data->gfx_min_freq_limit/100 : 883 high == 1 ? SMU10_UMD_PSTATE_GFXCLK : 884 data->gfx_max_freq_limit/100, 885 NULL); 886 break; 887 888 case PP_MCLK: 889 if (low > mclk_table->count - 1 || high > mclk_table->count - 1) 890 return -EINVAL; 891 892 smum_send_msg_to_smc_with_parameter(hwmgr, 893 PPSMC_MSG_SetHardMinFclkByFreq, 894 mclk_table->entries[low].clk/100, 895 NULL); 896 897 smum_send_msg_to_smc_with_parameter(hwmgr, 898 PPSMC_MSG_SetSoftMaxFclkByFreq, 899 mclk_table->entries[high].clk/100, 900 NULL); 901 break; 902 903 case PP_PCIE: 904 default: 905 break; 906 } 907 return 0; 908 } 909 910 static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, 911 enum pp_clock_type type, char *buf) 912 { 913 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend); 914 struct smu10_voltage_dependency_table *mclk_table = 915 data->clock_vol_info.vdd_dep_on_fclk; 916 uint32_t i, now, size = 0; 917 918 switch (type) { 919 case PP_SCLK: 920 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now); 921 922 /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ 923 if (now == data->gfx_max_freq_limit/100) 924 i = 2; 925 else if (now == data->gfx_min_freq_limit/100) 926 i = 0; 927 else 928 i = 1; 929 930 size += sprintf(buf + size, "0: %uMhz %s\n", 931 data->gfx_min_freq_limit/100, 932 i == 0 ? "*" : ""); 933 size += sprintf(buf + size, "1: %uMhz %s\n", 934 i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK, 935 i == 1 ? "*" : ""); 936 size += sprintf(buf + size, "2: %uMhz %s\n", 937 data->gfx_max_freq_limit/100, 938 i == 2 ? "*" : ""); 939 break; 940 case PP_MCLK: 941 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now); 942 943 for (i = 0; i < mclk_table->count; i++) 944 size += sprintf(buf + size, "%d: %uMhz %s\n", 945 i, 946 mclk_table->entries[i].clk / 100, 947 ((mclk_table->entries[i].clk / 100) 948 == now) ? "*" : ""); 949 break; 950 default: 951 break; 952 } 953 954 return size; 955 } 956 957 static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 958 PHM_PerformanceLevelDesignation designation, uint32_t index, 959 PHM_PerformanceLevel *level) 960 { 961 struct smu10_hwmgr *data; 962 963 if (level == NULL || hwmgr == NULL || state == NULL) 964 return -EINVAL; 965 966 data = (struct smu10_hwmgr *)(hwmgr->backend); 967 968 if (index == 0) { 969 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk; 970 level->coreClock = data->gfx_min_freq_limit; 971 } else { 972 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[ 973 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk; 974 level->coreClock = data->gfx_max_freq_limit; 975 } 976 977 level->nonLocalMemoryFreq = 0; 978 level->nonLocalMemoryWidth = 0; 979 980 return 0; 981 } 982 983 static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, 984 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) 985 { 986 const struct smu10_power_state *ps = cast_const_smu10_ps(state); 987 988 clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index)); 989 clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index)); 990 991 return 0; 992 } 993 994 #define MEM_FREQ_LOW_LATENCY 25000 995 #define MEM_FREQ_HIGH_LATENCY 80000 996 #define MEM_LATENCY_HIGH 245 997 #define MEM_LATENCY_LOW 35 998 #define MEM_LATENCY_ERR 0xFFFF 999 1000 1001 static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr, 1002 uint32_t clock) 1003 { 1004 if (clock >= MEM_FREQ_LOW_LATENCY && 1005 clock < MEM_FREQ_HIGH_LATENCY) 1006 return MEM_LATENCY_HIGH; 1007 else if (clock >= MEM_FREQ_HIGH_LATENCY) 1008 return MEM_LATENCY_LOW; 1009 else 1010 return MEM_LATENCY_ERR; 1011 } 1012 1013 static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, 1014 enum amd_pp_clock_type type, 1015 struct pp_clock_levels_with_latency *clocks) 1016 { 1017 uint32_t i; 1018 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 1019 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info); 1020 struct smu10_voltage_dependency_table *pclk_vol_table; 1021 bool latency_required = false; 1022 1023 if (pinfo == NULL) 1024 return -EINVAL; 1025 1026 switch (type) { 1027 case amd_pp_mem_clock: 1028 pclk_vol_table = pinfo->vdd_dep_on_mclk; 1029 latency_required = true; 1030 break; 1031 case amd_pp_f_clock: 1032 pclk_vol_table = pinfo->vdd_dep_on_fclk; 1033 latency_required = true; 1034 break; 1035 case amd_pp_dcf_clock: 1036 pclk_vol_table = pinfo->vdd_dep_on_dcefclk; 1037 break; 1038 case amd_pp_disp_clock: 1039 pclk_vol_table = pinfo->vdd_dep_on_dispclk; 1040 break; 1041 case amd_pp_phy_clock: 1042 pclk_vol_table = pinfo->vdd_dep_on_phyclk; 1043 break; 1044 case amd_pp_dpp_clock: 1045 pclk_vol_table = pinfo->vdd_dep_on_dppclk; 1046 break; 1047 default: 1048 return -EINVAL; 1049 } 1050 1051 if (pclk_vol_table == NULL || pclk_vol_table->count == 0) 1052 return -EINVAL; 1053 1054 clocks->num_levels = 0; 1055 for (i = 0; i < pclk_vol_table->count; i++) { 1056 if (pclk_vol_table->entries[i].clk) { 1057 clocks->data[clocks->num_levels].clocks_in_khz = 1058 pclk_vol_table->entries[i].clk * 10; 1059 clocks->data[clocks->num_levels].latency_in_us = latency_required ? 1060 smu10_get_mem_latency(hwmgr, 1061 pclk_vol_table->entries[i].clk) : 1062 0; 1063 clocks->num_levels++; 1064 } 1065 } 1066 1067 return 0; 1068 } 1069 1070 static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, 1071 enum amd_pp_clock_type type, 1072 struct pp_clock_levels_with_voltage *clocks) 1073 { 1074 uint32_t i; 1075 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 1076 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info); 1077 struct smu10_voltage_dependency_table *pclk_vol_table = NULL; 1078 1079 if (pinfo == NULL) 1080 return -EINVAL; 1081 1082 switch (type) { 1083 case amd_pp_mem_clock: 1084 pclk_vol_table = pinfo->vdd_dep_on_mclk; 1085 break; 1086 case amd_pp_f_clock: 1087 pclk_vol_table = pinfo->vdd_dep_on_fclk; 1088 break; 1089 case amd_pp_dcf_clock: 1090 pclk_vol_table = pinfo->vdd_dep_on_dcefclk; 1091 break; 1092 case amd_pp_soc_clock: 1093 pclk_vol_table = pinfo->vdd_dep_on_socclk; 1094 break; 1095 case amd_pp_disp_clock: 1096 pclk_vol_table = pinfo->vdd_dep_on_dispclk; 1097 break; 1098 case amd_pp_phy_clock: 1099 pclk_vol_table = pinfo->vdd_dep_on_phyclk; 1100 break; 1101 default: 1102 return -EINVAL; 1103 } 1104 1105 if (pclk_vol_table == NULL || pclk_vol_table->count == 0) 1106 return -EINVAL; 1107 1108 clocks->num_levels = 0; 1109 for (i = 0; i < pclk_vol_table->count; i++) { 1110 if (pclk_vol_table->entries[i].clk) { 1111 clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; 1112 clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol; 1113 clocks->num_levels++; 1114 } 1115 } 1116 1117 return 0; 1118 } 1119 1120 1121 1122 static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) 1123 { 1124 clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */ 1125 return 0; 1126 } 1127 1128 static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr) 1129 { 1130 struct amdgpu_device *adev = hwmgr->adev; 1131 uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP); 1132 int cur_temp = 1133 (reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT; 1134 1135 if (cur_temp & THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK) 1136 cur_temp = ((cur_temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 1137 else 1138 cur_temp = (cur_temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 1139 1140 return cur_temp; 1141 } 1142 1143 static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, 1144 void *value, int *size) 1145 { 1146 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 1147 uint32_t sclk, mclk; 1148 int ret = 0; 1149 1150 switch (idx) { 1151 case AMDGPU_PP_SENSOR_GFX_SCLK: 1152 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk); 1153 /* in units of 10KHZ */ 1154 *((uint32_t *)value) = sclk * 100; 1155 *size = 4; 1156 break; 1157 case AMDGPU_PP_SENSOR_GFX_MCLK: 1158 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &mclk); 1159 /* in units of 10KHZ */ 1160 *((uint32_t *)value) = mclk * 100; 1161 *size = 4; 1162 break; 1163 case AMDGPU_PP_SENSOR_GPU_TEMP: 1164 *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr); 1165 break; 1166 case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 1167 *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1; 1168 *size = 4; 1169 break; 1170 default: 1171 ret = -EINVAL; 1172 break; 1173 } 1174 1175 return ret; 1176 } 1177 1178 static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 1179 void *clock_ranges) 1180 { 1181 struct smu10_hwmgr *data = hwmgr->backend; 1182 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges; 1183 Watermarks_t *table = &(data->water_marks_table); 1184 1185 smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges); 1186 smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false); 1187 data->water_marks_exist = true; 1188 return 0; 1189 } 1190 1191 static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr) 1192 { 1193 1194 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister, NULL); 1195 } 1196 1197 static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr) 1198 { 1199 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub, NULL); 1200 } 1201 1202 static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate) 1203 { 1204 if (gate) 1205 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma, NULL); 1206 else 1207 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma, NULL); 1208 } 1209 1210 static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) 1211 { 1212 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 1213 1214 if (bgate) { 1215 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 1216 AMD_IP_BLOCK_TYPE_VCN, 1217 AMD_PG_STATE_GATE); 1218 smum_send_msg_to_smc_with_parameter(hwmgr, 1219 PPSMC_MSG_PowerDownVcn, 0, NULL); 1220 smu10_data->vcn_power_gated = true; 1221 } else { 1222 smum_send_msg_to_smc_with_parameter(hwmgr, 1223 PPSMC_MSG_PowerUpVcn, 0, NULL); 1224 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 1225 AMD_IP_BLOCK_TYPE_VCN, 1226 AMD_PG_STATE_UNGATE); 1227 smu10_data->vcn_power_gated = false; 1228 } 1229 } 1230 1231 static int conv_power_profile_to_pplib_workload(int power_profile) 1232 { 1233 int pplib_workload = 0; 1234 1235 switch (power_profile) { 1236 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: 1237 pplib_workload = WORKLOAD_DEFAULT_BIT; 1238 break; 1239 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 1240 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; 1241 break; 1242 case PP_SMC_POWER_PROFILE_POWERSAVING: 1243 pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT; 1244 break; 1245 case PP_SMC_POWER_PROFILE_VIDEO: 1246 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; 1247 break; 1248 case PP_SMC_POWER_PROFILE_VR: 1249 pplib_workload = WORKLOAD_PPLIB_VR_BIT; 1250 break; 1251 case PP_SMC_POWER_PROFILE_COMPUTE: 1252 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; 1253 break; 1254 } 1255 1256 return pplib_workload; 1257 } 1258 1259 static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) 1260 { 1261 uint32_t i, size = 0; 1262 static const uint8_t 1263 profile_mode_setting[6][4] = {{70, 60, 0, 0,}, 1264 {70, 60, 1, 3,}, 1265 {90, 60, 0, 0,}, 1266 {70, 60, 0, 0,}, 1267 {70, 90, 0, 0,}, 1268 {30, 60, 0, 6,}, 1269 }; 1270 static const char *profile_name[6] = { 1271 "BOOTUP_DEFAULT", 1272 "3D_FULL_SCREEN", 1273 "POWER_SAVING", 1274 "VIDEO", 1275 "VR", 1276 "COMPUTE"}; 1277 static const char *title[6] = {"NUM", 1278 "MODE_NAME", 1279 "BUSY_SET_POINT", 1280 "FPS", 1281 "USE_RLC_BUSY", 1282 "MIN_ACTIVE_LEVEL"}; 1283 1284 if (!buf) 1285 return -EINVAL; 1286 1287 size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0], 1288 title[1], title[2], title[3], title[4], title[5]); 1289 1290 for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++) 1291 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", 1292 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", 1293 profile_mode_setting[i][0], profile_mode_setting[i][1], 1294 profile_mode_setting[i][2], profile_mode_setting[i][3]); 1295 1296 return size; 1297 } 1298 1299 static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr) 1300 { 1301 struct amdgpu_device *adev = hwmgr->adev; 1302 if ((adev->apu_flags & AMD_APU_IS_RAVEN) && 1303 (hwmgr->smu_version >= 0x41e2b)) 1304 return true; 1305 else 1306 return false; 1307 } 1308 1309 static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) 1310 { 1311 int workload_type = 0; 1312 int result = 0; 1313 1314 if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) { 1315 pr_err("Invalid power profile mode %ld\n", input[size]); 1316 return -EINVAL; 1317 } 1318 if (hwmgr->power_profile_mode == input[size]) 1319 return 0; 1320 1321 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1322 workload_type = 1323 conv_power_profile_to_pplib_workload(input[size]); 1324 if (workload_type && 1325 smu10_is_raven1_refresh(hwmgr) && 1326 !hwmgr->gfxoff_state_changed_by_workload) { 1327 smu10_gfx_off_control(hwmgr, false); 1328 hwmgr->gfxoff_state_changed_by_workload = true; 1329 } 1330 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify, 1331 1 << workload_type, 1332 NULL); 1333 if (!result) 1334 hwmgr->power_profile_mode = input[size]; 1335 if (workload_type && hwmgr->gfxoff_state_changed_by_workload) { 1336 smu10_gfx_off_control(hwmgr, true); 1337 hwmgr->gfxoff_state_changed_by_workload = false; 1338 } 1339 1340 return 0; 1341 } 1342 1343 static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode) 1344 { 1345 return smum_send_msg_to_smc_with_parameter(hwmgr, 1346 PPSMC_MSG_DeviceDriverReset, 1347 mode, 1348 NULL); 1349 } 1350 1351 static const struct pp_hwmgr_func smu10_hwmgr_funcs = { 1352 .backend_init = smu10_hwmgr_backend_init, 1353 .backend_fini = smu10_hwmgr_backend_fini, 1354 .apply_state_adjust_rules = smu10_apply_state_adjust_rules, 1355 .force_dpm_level = smu10_dpm_force_dpm_level, 1356 .get_power_state_size = smu10_get_power_state_size, 1357 .powerdown_uvd = NULL, 1358 .powergate_uvd = smu10_powergate_vcn, 1359 .powergate_vce = NULL, 1360 .get_mclk = smu10_dpm_get_mclk, 1361 .get_sclk = smu10_dpm_get_sclk, 1362 .patch_boot_state = smu10_dpm_patch_boot_state, 1363 .get_pp_table_entry = smu10_dpm_get_pp_table_entry, 1364 .get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries, 1365 .set_cpu_power_state = smu10_set_cpu_power_state, 1366 .store_cc6_data = smu10_store_cc6_data, 1367 .force_clock_level = smu10_force_clock_level, 1368 .print_clock_levels = smu10_print_clock_levels, 1369 .get_dal_power_level = smu10_get_dal_power_level, 1370 .get_performance_level = smu10_get_performance_level, 1371 .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks, 1372 .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency, 1373 .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage, 1374 .set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges, 1375 .get_max_high_clocks = smu10_get_max_high_clocks, 1376 .read_sensor = smu10_read_sensor, 1377 .set_active_display_count = smu10_set_active_display_count, 1378 .set_min_deep_sleep_dcefclk = smu10_set_min_deep_sleep_dcefclk, 1379 .dynamic_state_management_enable = smu10_enable_dpm_tasks, 1380 .power_off_asic = smu10_power_off_asic, 1381 .asic_setup = smu10_setup_asic_task, 1382 .power_state_set = smu10_set_power_state_tasks, 1383 .dynamic_state_management_disable = smu10_disable_dpm_tasks, 1384 .powergate_mmhub = smu10_powergate_mmhub, 1385 .smus_notify_pwe = smu10_smus_notify_pwe, 1386 .display_clock_voltage_request = smu10_display_clock_voltage_request, 1387 .powergate_gfx = smu10_gfx_off_control, 1388 .powergate_sdma = smu10_powergate_sdma, 1389 .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq, 1390 .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq, 1391 .get_power_profile_mode = smu10_get_power_profile_mode, 1392 .set_power_profile_mode = smu10_set_power_profile_mode, 1393 .asic_reset = smu10_asic_reset, 1394 }; 1395 1396 int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) 1397 { 1398 hwmgr->hwmgr_func = &smu10_hwmgr_funcs; 1399 hwmgr->pptable_func = &pptable_funcs; 1400 return 0; 1401 } 1402