1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "pp_debug.h" 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/slab.h> 27 #include "atom-types.h" 28 #include "atombios.h" 29 #include "processpptables.h" 30 #include "cgs_common.h" 31 #include "smu/smu_8_0_d.h" 32 #include "smu8_fusion.h" 33 #include "smu/smu_8_0_sh_mask.h" 34 #include "smumgr.h" 35 #include "hwmgr.h" 36 #include "hardwaremanager.h" 37 #include "cz_ppsmc.h" 38 #include "smu8_hwmgr.h" 39 #include "power_state.h" 40 #include "pp_thermal.h" 41 42 #define ixSMUSVI_NB_CURRENTVID 0xD8230044 43 #define CURRENT_NB_VID_MASK 0xff000000 44 #define CURRENT_NB_VID__SHIFT 24 45 #define ixSMUSVI_GFX_CURRENTVID 0xD8230048 46 #define CURRENT_GFX_VID_MASK 0xff000000 47 #define CURRENT_GFX_VID__SHIFT 24 48 49 static const unsigned long smu8_magic = (unsigned long) PHM_Cz_Magic; 50 51 static struct smu8_power_state *cast_smu8_power_state(struct pp_hw_power_state *hw_ps) 52 { 53 if (smu8_magic != hw_ps->magic) 54 return NULL; 55 56 return (struct smu8_power_state *)hw_ps; 57 } 58 59 static const struct smu8_power_state *cast_const_smu8_power_state( 60 const struct pp_hw_power_state *hw_ps) 61 { 62 if (smu8_magic != hw_ps->magic) 63 return NULL; 64 65 return (struct smu8_power_state *)hw_ps; 66 } 67 68 static uint32_t smu8_get_eclk_level(struct pp_hwmgr *hwmgr, 69 uint32_t clock, uint32_t msg) 70 { 71 int i = 0; 72 struct phm_vce_clock_voltage_dependency_table *ptable = 73 hwmgr->dyn_state.vce_clock_voltage_dependency_table; 74 75 switch (msg) { 76 case PPSMC_MSG_SetEclkSoftMin: 77 case PPSMC_MSG_SetEclkHardMin: 78 for (i = 0; i < (int)ptable->count; i++) { 79 if (clock <= ptable->entries[i].ecclk) 80 break; 81 } 82 break; 83 84 case PPSMC_MSG_SetEclkSoftMax: 85 case PPSMC_MSG_SetEclkHardMax: 86 for (i = ptable->count - 1; i >= 0; i--) { 87 if (clock >= ptable->entries[i].ecclk) 88 break; 89 } 90 break; 91 92 default: 93 break; 94 } 95 96 return i; 97 } 98 99 static uint32_t smu8_get_sclk_level(struct pp_hwmgr *hwmgr, 100 uint32_t clock, uint32_t msg) 101 { 102 int i = 0; 103 struct phm_clock_voltage_dependency_table *table = 104 hwmgr->dyn_state.vddc_dependency_on_sclk; 105 106 switch (msg) { 107 case PPSMC_MSG_SetSclkSoftMin: 108 case PPSMC_MSG_SetSclkHardMin: 109 for (i = 0; i < (int)table->count; i++) { 110 if (clock <= table->entries[i].clk) 111 break; 112 } 113 break; 114 115 case PPSMC_MSG_SetSclkSoftMax: 116 case PPSMC_MSG_SetSclkHardMax: 117 for (i = table->count - 1; i >= 0; i--) { 118 if (clock >= table->entries[i].clk) 119 break; 120 } 121 break; 122 123 default: 124 break; 125 } 126 return i; 127 } 128 129 static uint32_t smu8_get_uvd_level(struct pp_hwmgr *hwmgr, 130 uint32_t clock, uint32_t msg) 131 { 132 int i = 0; 133 struct phm_uvd_clock_voltage_dependency_table *ptable = 134 hwmgr->dyn_state.uvd_clock_voltage_dependency_table; 135 136 switch (msg) { 137 case PPSMC_MSG_SetUvdSoftMin: 138 case PPSMC_MSG_SetUvdHardMin: 139 for (i = 0; i < (int)ptable->count; i++) { 140 if (clock <= ptable->entries[i].vclk) 141 break; 142 } 143 break; 144 145 case PPSMC_MSG_SetUvdSoftMax: 146 case PPSMC_MSG_SetUvdHardMax: 147 for (i = ptable->count - 1; i >= 0; i--) { 148 if (clock >= ptable->entries[i].vclk) 149 break; 150 } 151 break; 152 153 default: 154 break; 155 } 156 157 return i; 158 } 159 160 static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr) 161 { 162 struct smu8_hwmgr *data = hwmgr->backend; 163 164 if (data->max_sclk_level == 0) { 165 smum_send_msg_to_smc(hwmgr, 166 PPSMC_MSG_GetMaxSclkLevel, 167 &data->max_sclk_level); 168 data->max_sclk_level += 1; 169 } 170 171 return data->max_sclk_level; 172 } 173 174 static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) 175 { 176 struct smu8_hwmgr *data = hwmgr->backend; 177 struct amdgpu_device *adev = hwmgr->adev; 178 179 data->gfx_ramp_step = 256*25/100; 180 data->gfx_ramp_delay = 1; /* by default, we delay 1us */ 181 182 data->mgcg_cgtt_local0 = 0x00000000; 183 data->mgcg_cgtt_local1 = 0x00000000; 184 data->clock_slow_down_freq = 25000; 185 data->skip_clock_slow_down = 1; 186 data->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */ 187 data->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */ 188 data->voting_rights_clients = 0x00C00033; 189 data->static_screen_threshold = 8; 190 data->ddi_power_gating_disabled = 0; 191 data->bapm_enabled = 1; 192 data->voltage_drop_threshold = 0; 193 data->gfx_power_gating_threshold = 500; 194 data->vce_slow_sclk_threshold = 20000; 195 data->dce_slow_sclk_threshold = 30000; 196 data->disable_driver_thermal_policy = 1; 197 data->disable_nb_ps3_in_battery = 0; 198 199 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 200 PHM_PlatformCaps_ABM); 201 202 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 203 PHM_PlatformCaps_NonABMSupportInPPLib); 204 205 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 206 PHM_PlatformCaps_DynamicM3Arbiter); 207 208 data->override_dynamic_mgpg = 1; 209 210 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 211 PHM_PlatformCaps_DynamicPatchPowerState); 212 213 data->thermal_auto_throttling_treshold = 0; 214 data->tdr_clock = 0; 215 data->disable_gfx_power_gating_in_uvd = 0; 216 217 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 218 PHM_PlatformCaps_DynamicUVDState); 219 220 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 221 PHM_PlatformCaps_UVDDPM); 222 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 223 PHM_PlatformCaps_VCEDPM); 224 225 data->cc6_settings.cpu_cc6_disable = false; 226 data->cc6_settings.cpu_pstate_disable = false; 227 data->cc6_settings.nb_pstate_switch_disable = false; 228 data->cc6_settings.cpu_pstate_separation_time = 0; 229 230 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 231 PHM_PlatformCaps_DisableVoltageIsland); 232 233 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 234 PHM_PlatformCaps_UVDPowerGating); 235 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 236 PHM_PlatformCaps_VCEPowerGating); 237 238 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 239 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 240 PHM_PlatformCaps_UVDPowerGating); 241 if (adev->pg_flags & AMD_PG_SUPPORT_VCE) 242 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 243 PHM_PlatformCaps_VCEPowerGating); 244 245 246 return 0; 247 } 248 249 /* convert form 8bit vid to real voltage in mV*4 */ 250 static uint32_t smu8_convert_8Bit_index_to_voltage( 251 struct pp_hwmgr *hwmgr, uint16_t voltage) 252 { 253 return 6200 - (voltage * 25); 254 } 255 256 static int smu8_construct_max_power_limits_table(struct pp_hwmgr *hwmgr, 257 struct phm_clock_and_voltage_limits *table) 258 { 259 struct smu8_hwmgr *data = hwmgr->backend; 260 struct smu8_sys_info *sys_info = &data->sys_info; 261 struct phm_clock_voltage_dependency_table *dep_table = 262 hwmgr->dyn_state.vddc_dependency_on_sclk; 263 264 if (dep_table->count > 0) { 265 table->sclk = dep_table->entries[dep_table->count-1].clk; 266 table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr, 267 (uint16_t)dep_table->entries[dep_table->count-1].v); 268 } 269 table->mclk = sys_info->nbp_memory_clock[0]; 270 return 0; 271 } 272 273 static int smu8_init_dynamic_state_adjustment_rule_settings( 274 struct pp_hwmgr *hwmgr, 275 ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table) 276 { 277 struct phm_clock_voltage_dependency_table *table_clk_vlt; 278 279 table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 8), 280 GFP_KERNEL); 281 282 if (NULL == table_clk_vlt) { 283 pr_err("Can not allocate memory!\n"); 284 return -ENOMEM; 285 } 286 287 table_clk_vlt->count = 8; 288 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0; 289 table_clk_vlt->entries[0].v = 0; 290 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1; 291 table_clk_vlt->entries[1].v = 1; 292 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2; 293 table_clk_vlt->entries[2].v = 2; 294 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3; 295 table_clk_vlt->entries[3].v = 3; 296 table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4; 297 table_clk_vlt->entries[4].v = 4; 298 table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5; 299 table_clk_vlt->entries[5].v = 5; 300 table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6; 301 table_clk_vlt->entries[6].v = 6; 302 table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7; 303 table_clk_vlt->entries[7].v = 7; 304 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; 305 306 return 0; 307 } 308 309 static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr) 310 { 311 struct smu8_hwmgr *data = hwmgr->backend; 312 ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL; 313 uint32_t i; 314 int result = 0; 315 uint8_t frev, crev; 316 uint16_t size; 317 318 info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *)smu_atom_get_data_table(hwmgr->adev, 319 GetIndexIntoMasterTable(DATA, IntegratedSystemInfo), 320 &size, &frev, &crev); 321 322 if (info == NULL) { 323 pr_err("Could not retrieve the Integrated System Info Table!\n"); 324 return -EINVAL; 325 } 326 327 if (crev != 9) { 328 pr_err("Unsupported IGP table: %d %d\n", frev, crev); 329 return -EINVAL; 330 } 331 332 data->sys_info.bootup_uma_clock = 333 le32_to_cpu(info->ulBootUpUMAClock); 334 335 data->sys_info.bootup_engine_clock = 336 le32_to_cpu(info->ulBootUpEngineClock); 337 338 data->sys_info.dentist_vco_freq = 339 le32_to_cpu(info->ulDentistVCOFreq); 340 341 data->sys_info.system_config = 342 le32_to_cpu(info->ulSystemConfig); 343 344 data->sys_info.bootup_nb_voltage_index = 345 le16_to_cpu(info->usBootUpNBVoltage); 346 347 data->sys_info.htc_hyst_lmt = 348 (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt; 349 350 data->sys_info.htc_tmp_lmt = 351 (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt; 352 353 if (data->sys_info.htc_tmp_lmt <= 354 data->sys_info.htc_hyst_lmt) { 355 pr_err("The htcTmpLmt should be larger than htcHystLmt.\n"); 356 return -EINVAL; 357 } 358 359 data->sys_info.nb_dpm_enable = 360 data->enable_nb_ps_policy && 361 (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1); 362 363 for (i = 0; i < SMU8_NUM_NBPSTATES; i++) { 364 if (i < SMU8_NUM_NBPMEMORYCLOCK) { 365 data->sys_info.nbp_memory_clock[i] = 366 le32_to_cpu(info->ulNbpStateMemclkFreq[i]); 367 } 368 data->sys_info.nbp_n_clock[i] = 369 le32_to_cpu(info->ulNbpStateNClkFreq[i]); 370 } 371 372 for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) { 373 data->sys_info.display_clock[i] = 374 le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK); 375 } 376 377 /* Here use 4 levels, make sure not exceed */ 378 for (i = 0; i < SMU8_NUM_NBPSTATES; i++) { 379 data->sys_info.nbp_voltage_index[i] = 380 le16_to_cpu(info->usNBPStateVoltage[i]); 381 } 382 383 if (!data->sys_info.nb_dpm_enable) { 384 for (i = 1; i < SMU8_NUM_NBPSTATES; i++) { 385 if (i < SMU8_NUM_NBPMEMORYCLOCK) { 386 data->sys_info.nbp_memory_clock[i] = 387 data->sys_info.nbp_memory_clock[0]; 388 } 389 data->sys_info.nbp_n_clock[i] = 390 data->sys_info.nbp_n_clock[0]; 391 data->sys_info.nbp_voltage_index[i] = 392 data->sys_info.nbp_voltage_index[0]; 393 } 394 } 395 396 if (le32_to_cpu(info->ulGPUCapInfo) & 397 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) { 398 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 399 PHM_PlatformCaps_EnableDFSBypass); 400 } 401 402 data->sys_info.uma_channel_number = info->ucUMAChannelNumber; 403 404 smu8_construct_max_power_limits_table (hwmgr, 405 &hwmgr->dyn_state.max_clock_voltage_on_ac); 406 407 smu8_init_dynamic_state_adjustment_rule_settings(hwmgr, 408 &info->sDISPCLK_Voltage[0]); 409 410 return result; 411 } 412 413 static int smu8_construct_boot_state(struct pp_hwmgr *hwmgr) 414 { 415 struct smu8_hwmgr *data = hwmgr->backend; 416 417 data->boot_power_level.engineClock = 418 data->sys_info.bootup_engine_clock; 419 420 data->boot_power_level.vddcIndex = 421 (uint8_t)data->sys_info.bootup_nb_voltage_index; 422 423 data->boot_power_level.dsDividerIndex = 0; 424 data->boot_power_level.ssDividerIndex = 0; 425 data->boot_power_level.allowGnbSlow = 1; 426 data->boot_power_level.forceNBPstate = 0; 427 data->boot_power_level.hysteresis_up = 0; 428 data->boot_power_level.numSIMDToPowerDown = 0; 429 data->boot_power_level.display_wm = 0; 430 data->boot_power_level.vce_wm = 0; 431 432 return 0; 433 } 434 435 static int smu8_upload_pptable_to_smu(struct pp_hwmgr *hwmgr) 436 { 437 struct SMU8_Fusion_ClkTable *clock_table; 438 int ret; 439 uint32_t i; 440 void *table = NULL; 441 pp_atomctrl_clock_dividers_kong dividers; 442 443 struct phm_clock_voltage_dependency_table *vddc_table = 444 hwmgr->dyn_state.vddc_dependency_on_sclk; 445 struct phm_clock_voltage_dependency_table *vdd_gfx_table = 446 hwmgr->dyn_state.vdd_gfx_dependency_on_sclk; 447 struct phm_acp_clock_voltage_dependency_table *acp_table = 448 hwmgr->dyn_state.acp_clock_voltage_dependency_table; 449 struct phm_uvd_clock_voltage_dependency_table *uvd_table = 450 hwmgr->dyn_state.uvd_clock_voltage_dependency_table; 451 struct phm_vce_clock_voltage_dependency_table *vce_table = 452 hwmgr->dyn_state.vce_clock_voltage_dependency_table; 453 454 if (!hwmgr->need_pp_table_upload) 455 return 0; 456 457 ret = smum_download_powerplay_table(hwmgr, &table); 458 459 PP_ASSERT_WITH_CODE((0 == ret && NULL != table), 460 "Fail to get clock table from SMU!", return -EINVAL;); 461 462 clock_table = (struct SMU8_Fusion_ClkTable *)table; 463 464 /* patch clock table */ 465 PP_ASSERT_WITH_CODE((vddc_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS), 466 "Dependency table entry exceeds max limit!", return -EINVAL;); 467 PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS), 468 "Dependency table entry exceeds max limit!", return -EINVAL;); 469 PP_ASSERT_WITH_CODE((acp_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS), 470 "Dependency table entry exceeds max limit!", return -EINVAL;); 471 PP_ASSERT_WITH_CODE((uvd_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS), 472 "Dependency table entry exceeds max limit!", return -EINVAL;); 473 PP_ASSERT_WITH_CODE((vce_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS), 474 "Dependency table entry exceeds max limit!", return -EINVAL;); 475 476 for (i = 0; i < SMU8_MAX_HARDWARE_POWERLEVELS; i++) { 477 478 /* vddc_sclk */ 479 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid = 480 (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0; 481 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency = 482 (i < vddc_table->count) ? vddc_table->entries[i].clk : 0; 483 484 atomctrl_get_engine_pll_dividers_kong(hwmgr, 485 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency, 486 ÷rs); 487 488 clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid = 489 (uint8_t)dividers.pll_post_divider; 490 491 /* vddgfx_sclk */ 492 clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid = 493 (i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0; 494 495 /* acp breakdown */ 496 clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid = 497 (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0; 498 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency = 499 (i < acp_table->count) ? acp_table->entries[i].acpclk : 0; 500 501 atomctrl_get_engine_pll_dividers_kong(hwmgr, 502 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency, 503 ÷rs); 504 505 clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid = 506 (uint8_t)dividers.pll_post_divider; 507 508 509 /* uvd breakdown */ 510 clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid = 511 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; 512 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency = 513 (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0; 514 515 atomctrl_get_engine_pll_dividers_kong(hwmgr, 516 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency, 517 ÷rs); 518 519 clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid = 520 (uint8_t)dividers.pll_post_divider; 521 522 clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid = 523 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; 524 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency = 525 (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0; 526 527 atomctrl_get_engine_pll_dividers_kong(hwmgr, 528 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency, 529 ÷rs); 530 531 clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid = 532 (uint8_t)dividers.pll_post_divider; 533 534 /* vce breakdown */ 535 clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid = 536 (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0; 537 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency = 538 (i < vce_table->count) ? vce_table->entries[i].ecclk : 0; 539 540 541 atomctrl_get_engine_pll_dividers_kong(hwmgr, 542 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency, 543 ÷rs); 544 545 clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid = 546 (uint8_t)dividers.pll_post_divider; 547 548 } 549 ret = smum_upload_powerplay_table(hwmgr); 550 551 return ret; 552 } 553 554 static int smu8_init_sclk_limit(struct pp_hwmgr *hwmgr) 555 { 556 struct smu8_hwmgr *data = hwmgr->backend; 557 struct phm_clock_voltage_dependency_table *table = 558 hwmgr->dyn_state.vddc_dependency_on_sclk; 559 unsigned long clock = 0, level; 560 561 if (NULL == table || table->count <= 0) 562 return -EINVAL; 563 564 data->sclk_dpm.soft_min_clk = table->entries[0].clk; 565 data->sclk_dpm.hard_min_clk = table->entries[0].clk; 566 567 level = smu8_get_max_sclk_level(hwmgr) - 1; 568 569 if (level < table->count) 570 clock = table->entries[level].clk; 571 else 572 clock = table->entries[table->count - 1].clk; 573 574 data->sclk_dpm.soft_max_clk = clock; 575 data->sclk_dpm.hard_max_clk = clock; 576 577 return 0; 578 } 579 580 static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr) 581 { 582 struct smu8_hwmgr *data = hwmgr->backend; 583 struct phm_uvd_clock_voltage_dependency_table *table = 584 hwmgr->dyn_state.uvd_clock_voltage_dependency_table; 585 unsigned long clock = 0; 586 uint32_t level; 587 int ret; 588 589 if (NULL == table || table->count <= 0) 590 return -EINVAL; 591 592 data->uvd_dpm.soft_min_clk = 0; 593 data->uvd_dpm.hard_min_clk = 0; 594 595 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level); 596 if (ret) 597 return ret; 598 599 if (level < table->count) 600 clock = table->entries[level].vclk; 601 else 602 clock = table->entries[table->count - 1].vclk; 603 604 data->uvd_dpm.soft_max_clk = clock; 605 data->uvd_dpm.hard_max_clk = clock; 606 607 return 0; 608 } 609 610 static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr) 611 { 612 struct smu8_hwmgr *data = hwmgr->backend; 613 struct phm_vce_clock_voltage_dependency_table *table = 614 hwmgr->dyn_state.vce_clock_voltage_dependency_table; 615 unsigned long clock = 0; 616 uint32_t level; 617 int ret; 618 619 if (NULL == table || table->count <= 0) 620 return -EINVAL; 621 622 data->vce_dpm.soft_min_clk = 0; 623 data->vce_dpm.hard_min_clk = 0; 624 625 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level); 626 if (ret) 627 return ret; 628 629 if (level < table->count) 630 clock = table->entries[level].ecclk; 631 else 632 clock = table->entries[table->count - 1].ecclk; 633 634 data->vce_dpm.soft_max_clk = clock; 635 data->vce_dpm.hard_max_clk = clock; 636 637 return 0; 638 } 639 640 static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr) 641 { 642 struct smu8_hwmgr *data = hwmgr->backend; 643 struct phm_acp_clock_voltage_dependency_table *table = 644 hwmgr->dyn_state.acp_clock_voltage_dependency_table; 645 unsigned long clock = 0; 646 uint32_t level; 647 int ret; 648 649 if (NULL == table || table->count <= 0) 650 return -EINVAL; 651 652 data->acp_dpm.soft_min_clk = 0; 653 data->acp_dpm.hard_min_clk = 0; 654 655 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level); 656 if (ret) 657 return ret; 658 659 if (level < table->count) 660 clock = table->entries[level].acpclk; 661 else 662 clock = table->entries[table->count - 1].acpclk; 663 664 data->acp_dpm.soft_max_clk = clock; 665 data->acp_dpm.hard_max_clk = clock; 666 return 0; 667 } 668 669 static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr) 670 { 671 struct smu8_hwmgr *data = hwmgr->backend; 672 673 data->uvd_power_gated = false; 674 data->vce_power_gated = false; 675 data->samu_power_gated = false; 676 #ifdef CONFIG_DRM_AMD_ACP 677 data->acp_power_gated = false; 678 #else 679 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL); 680 data->acp_power_gated = true; 681 #endif 682 683 } 684 685 static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr) 686 { 687 struct smu8_hwmgr *data = hwmgr->backend; 688 689 data->low_sclk_interrupt_threshold = 0; 690 } 691 692 static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr) 693 { 694 struct smu8_hwmgr *data = hwmgr->backend; 695 struct phm_clock_voltage_dependency_table *table = 696 hwmgr->dyn_state.vddc_dependency_on_sclk; 697 698 unsigned long clock = 0; 699 unsigned long level; 700 unsigned long stable_pstate_sclk; 701 unsigned long percentage; 702 703 data->sclk_dpm.soft_min_clk = table->entries[0].clk; 704 level = smu8_get_max_sclk_level(hwmgr) - 1; 705 706 if (level < table->count) 707 data->sclk_dpm.soft_max_clk = table->entries[level].clk; 708 else 709 data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; 710 711 clock = hwmgr->display_config->min_core_set_clock; 712 if (clock == 0) 713 pr_debug("min_core_set_clock not set\n"); 714 715 if (data->sclk_dpm.hard_min_clk != clock) { 716 data->sclk_dpm.hard_min_clk = clock; 717 718 smum_send_msg_to_smc_with_parameter(hwmgr, 719 PPSMC_MSG_SetSclkHardMin, 720 smu8_get_sclk_level(hwmgr, 721 data->sclk_dpm.hard_min_clk, 722 PPSMC_MSG_SetSclkHardMin), 723 NULL); 724 } 725 726 clock = data->sclk_dpm.soft_min_clk; 727 728 /* update minimum clocks for Stable P-State feature */ 729 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 730 PHM_PlatformCaps_StablePState)) { 731 percentage = 75; 732 /*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table */ 733 stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk * 734 percentage) / 100; 735 736 if (clock < stable_pstate_sclk) 737 clock = stable_pstate_sclk; 738 } 739 740 if (data->sclk_dpm.soft_min_clk != clock) { 741 data->sclk_dpm.soft_min_clk = clock; 742 smum_send_msg_to_smc_with_parameter(hwmgr, 743 PPSMC_MSG_SetSclkSoftMin, 744 smu8_get_sclk_level(hwmgr, 745 data->sclk_dpm.soft_min_clk, 746 PPSMC_MSG_SetSclkSoftMin), 747 NULL); 748 } 749 750 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 751 PHM_PlatformCaps_StablePState) && 752 data->sclk_dpm.soft_max_clk != clock) { 753 data->sclk_dpm.soft_max_clk = clock; 754 smum_send_msg_to_smc_with_parameter(hwmgr, 755 PPSMC_MSG_SetSclkSoftMax, 756 smu8_get_sclk_level(hwmgr, 757 data->sclk_dpm.soft_max_clk, 758 PPSMC_MSG_SetSclkSoftMax), 759 NULL); 760 } 761 762 return 0; 763 } 764 765 static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr) 766 { 767 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 768 PHM_PlatformCaps_SclkDeepSleep)) { 769 uint32_t clks = hwmgr->display_config->min_core_set_clock_in_sr; 770 if (clks == 0) 771 clks = SMU8_MIN_DEEP_SLEEP_SCLK; 772 773 PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks); 774 775 smum_send_msg_to_smc_with_parameter(hwmgr, 776 PPSMC_MSG_SetMinDeepSleepSclk, 777 clks, 778 NULL); 779 } 780 781 return 0; 782 } 783 784 static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr) 785 { 786 struct smu8_hwmgr *data = 787 hwmgr->backend; 788 789 smum_send_msg_to_smc_with_parameter(hwmgr, 790 PPSMC_MSG_SetWatermarkFrequency, 791 data->sclk_dpm.soft_max_clk, 792 NULL); 793 794 return 0; 795 } 796 797 static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock) 798 { 799 struct smu8_hwmgr *hw_data = hwmgr->backend; 800 801 if (hw_data->is_nb_dpm_enabled) { 802 if (enable) { 803 PP_DBG_LOG("enable Low Memory PState.\n"); 804 805 return smum_send_msg_to_smc_with_parameter(hwmgr, 806 PPSMC_MSG_EnableLowMemoryPstate, 807 (lock ? 1 : 0), 808 NULL); 809 } else { 810 PP_DBG_LOG("disable Low Memory PState.\n"); 811 812 return smum_send_msg_to_smc_with_parameter(hwmgr, 813 PPSMC_MSG_DisableLowMemoryPstate, 814 (lock ? 1 : 0), 815 NULL); 816 } 817 } 818 819 return 0; 820 } 821 822 static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr) 823 { 824 int ret = 0; 825 826 struct smu8_hwmgr *data = hwmgr->backend; 827 unsigned long dpm_features = 0; 828 829 if (data->is_nb_dpm_enabled) { 830 smu8_nbdpm_pstate_enable_disable(hwmgr, true, true); 831 dpm_features |= NB_DPM_MASK; 832 ret = smum_send_msg_to_smc_with_parameter( 833 hwmgr, 834 PPSMC_MSG_DisableAllSmuFeatures, 835 dpm_features, 836 NULL); 837 if (ret == 0) 838 data->is_nb_dpm_enabled = false; 839 } 840 841 return ret; 842 } 843 844 static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr) 845 { 846 int ret = 0; 847 848 struct smu8_hwmgr *data = hwmgr->backend; 849 unsigned long dpm_features = 0; 850 851 if (!data->is_nb_dpm_enabled) { 852 PP_DBG_LOG("enabling ALL SMU features.\n"); 853 dpm_features |= NB_DPM_MASK; 854 ret = smum_send_msg_to_smc_with_parameter( 855 hwmgr, 856 PPSMC_MSG_EnableAllSmuFeatures, 857 dpm_features, 858 NULL); 859 if (ret == 0) 860 data->is_nb_dpm_enabled = true; 861 } 862 863 return ret; 864 } 865 866 static int smu8_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input) 867 { 868 bool disable_switch; 869 bool enable_low_mem_state; 870 struct smu8_hwmgr *hw_data = hwmgr->backend; 871 const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input; 872 const struct smu8_power_state *pnew_state = cast_const_smu8_power_state(states->pnew_state); 873 874 if (hw_data->sys_info.nb_dpm_enable) { 875 disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false; 876 enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true; 877 878 if (pnew_state->action == FORCE_HIGH) 879 smu8_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch); 880 else if (pnew_state->action == CANCEL_FORCE_HIGH) 881 smu8_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch); 882 else 883 smu8_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch); 884 } 885 return 0; 886 } 887 888 static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) 889 { 890 int ret = 0; 891 892 smu8_update_sclk_limit(hwmgr); 893 smu8_set_deep_sleep_sclk_threshold(hwmgr); 894 smu8_set_watermark_threshold(hwmgr); 895 ret = smu8_enable_nb_dpm(hwmgr); 896 if (ret) 897 return ret; 898 smu8_update_low_mem_pstate(hwmgr, input); 899 900 return 0; 901 } 902 903 904 static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr) 905 { 906 int ret; 907 908 ret = smu8_upload_pptable_to_smu(hwmgr); 909 if (ret) 910 return ret; 911 ret = smu8_init_sclk_limit(hwmgr); 912 if (ret) 913 return ret; 914 ret = smu8_init_uvd_limit(hwmgr); 915 if (ret) 916 return ret; 917 ret = smu8_init_vce_limit(hwmgr); 918 if (ret) 919 return ret; 920 ret = smu8_init_acp_limit(hwmgr); 921 if (ret) 922 return ret; 923 924 smu8_init_power_gate_state(hwmgr); 925 smu8_init_sclk_threshold(hwmgr); 926 927 return 0; 928 } 929 930 static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr) 931 { 932 struct smu8_hwmgr *hw_data = hwmgr->backend; 933 934 hw_data->disp_clk_bypass_pending = false; 935 hw_data->disp_clk_bypass = false; 936 } 937 938 static void smu8_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr) 939 { 940 struct smu8_hwmgr *hw_data = hwmgr->backend; 941 942 hw_data->is_nb_dpm_enabled = false; 943 } 944 945 static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr) 946 { 947 struct smu8_hwmgr *hw_data = hwmgr->backend; 948 949 hw_data->cc6_settings.cc6_setting_changed = false; 950 hw_data->cc6_settings.cpu_pstate_separation_time = 0; 951 hw_data->cc6_settings.cpu_cc6_disable = false; 952 hw_data->cc6_settings.cpu_pstate_disable = false; 953 } 954 955 static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr) 956 { 957 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 958 ixCG_FREQ_TRAN_VOTING_0, 959 SMU8_VOTINGRIGHTSCLIENTS_DFLT0); 960 } 961 962 static void smu8_clear_voting_clients(struct pp_hwmgr *hwmgr) 963 { 964 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 965 ixCG_FREQ_TRAN_VOTING_0, 0); 966 } 967 968 static int smu8_start_dpm(struct pp_hwmgr *hwmgr) 969 { 970 struct smu8_hwmgr *data = hwmgr->backend; 971 972 data->dpm_flags |= DPMFlags_SCLK_Enabled; 973 974 return smum_send_msg_to_smc_with_parameter(hwmgr, 975 PPSMC_MSG_EnableAllSmuFeatures, 976 SCLK_DPM_MASK, 977 NULL); 978 } 979 980 static int smu8_stop_dpm(struct pp_hwmgr *hwmgr) 981 { 982 int ret = 0; 983 struct smu8_hwmgr *data = hwmgr->backend; 984 unsigned long dpm_features = 0; 985 986 if (data->dpm_flags & DPMFlags_SCLK_Enabled) { 987 dpm_features |= SCLK_DPM_MASK; 988 data->dpm_flags &= ~DPMFlags_SCLK_Enabled; 989 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 990 PPSMC_MSG_DisableAllSmuFeatures, 991 dpm_features, 992 NULL); 993 } 994 return ret; 995 } 996 997 static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr) 998 { 999 struct smu8_hwmgr *data = hwmgr->backend; 1000 1001 data->sclk_dpm.soft_min_clk = data->sys_info.bootup_engine_clock; 1002 data->sclk_dpm.soft_max_clk = data->sys_info.bootup_engine_clock; 1003 1004 smum_send_msg_to_smc_with_parameter(hwmgr, 1005 PPSMC_MSG_SetSclkSoftMin, 1006 smu8_get_sclk_level(hwmgr, 1007 data->sclk_dpm.soft_min_clk, 1008 PPSMC_MSG_SetSclkSoftMin), 1009 NULL); 1010 1011 smum_send_msg_to_smc_with_parameter(hwmgr, 1012 PPSMC_MSG_SetSclkSoftMax, 1013 smu8_get_sclk_level(hwmgr, 1014 data->sclk_dpm.soft_max_clk, 1015 PPSMC_MSG_SetSclkSoftMax), 1016 NULL); 1017 1018 return 0; 1019 } 1020 1021 static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr) 1022 { 1023 struct smu8_hwmgr *data = hwmgr->backend; 1024 1025 data->acp_boot_level = 0xff; 1026 } 1027 1028 static void smu8_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) 1029 { 1030 struct phm_clock_voltage_dependency_table *table = 1031 hwmgr->dyn_state.vddc_dependency_on_sclk; 1032 1033 hwmgr->pstate_sclk = table->entries[0].clk / 100; 1034 hwmgr->pstate_mclk = 0; 1035 1036 hwmgr->pstate_sclk_peak = table->entries[table->count - 1].clk / 100; 1037 hwmgr->pstate_mclk_peak = 0; 1038 } 1039 1040 static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 1041 { 1042 smu8_program_voting_clients(hwmgr); 1043 if (smu8_start_dpm(hwmgr)) 1044 return -EINVAL; 1045 smu8_program_bootup_state(hwmgr); 1046 smu8_reset_acp_boot_level(hwmgr); 1047 1048 smu8_populate_umdpstate_clocks(hwmgr); 1049 1050 return 0; 1051 } 1052 1053 static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 1054 { 1055 smu8_disable_nb_dpm(hwmgr); 1056 1057 smu8_clear_voting_clients(hwmgr); 1058 if (smu8_stop_dpm(hwmgr)) 1059 return -EINVAL; 1060 1061 return 0; 1062 } 1063 1064 static int smu8_power_off_asic(struct pp_hwmgr *hwmgr) 1065 { 1066 smu8_disable_dpm_tasks(hwmgr); 1067 smu8_power_up_display_clock_sys_pll(hwmgr); 1068 smu8_clear_nb_dpm_flag(hwmgr); 1069 smu8_reset_cc6_data(hwmgr); 1070 return 0; 1071 } 1072 1073 static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 1074 struct pp_power_state *prequest_ps, 1075 const struct pp_power_state *pcurrent_ps) 1076 { 1077 struct smu8_power_state *smu8_ps; 1078 const struct smu8_power_state *smu8_current_ps; 1079 struct smu8_hwmgr *data = hwmgr->backend; 1080 struct PP_Clocks clocks = {0, 0, 0, 0}; 1081 bool force_high; 1082 1083 smu8_ps = cast_smu8_power_state(&prequest_ps->hardware); 1084 smu8_current_ps = cast_const_smu8_power_state(&pcurrent_ps->hardware); 1085 1086 if (!smu8_ps || !smu8_current_ps) 1087 return -EINVAL; 1088 1089 smu8_ps->need_dfs_bypass = true; 1090 1091 data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); 1092 1093 clocks.memoryClock = hwmgr->display_config->min_mem_set_clock != 0 ? 1094 hwmgr->display_config->min_mem_set_clock : 1095 data->sys_info.nbp_memory_clock[1]; 1096 1097 1098 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) 1099 clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk; 1100 1101 force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1]) 1102 || (hwmgr->display_config->num_display >= 3); 1103 1104 smu8_ps->action = smu8_current_ps->action; 1105 1106 if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 1107 smu8_nbdpm_pstate_enable_disable(hwmgr, false, false); 1108 else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) 1109 smu8_nbdpm_pstate_enable_disable(hwmgr, false, true); 1110 else if (!force_high && (smu8_ps->action == FORCE_HIGH)) 1111 smu8_ps->action = CANCEL_FORCE_HIGH; 1112 else if (force_high && (smu8_ps->action != FORCE_HIGH)) 1113 smu8_ps->action = FORCE_HIGH; 1114 else 1115 smu8_ps->action = DO_NOTHING; 1116 1117 return 0; 1118 } 1119 1120 static int smu8_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 1121 { 1122 int result = 0; 1123 struct smu8_hwmgr *data; 1124 1125 data = kzalloc(sizeof(struct smu8_hwmgr), GFP_KERNEL); 1126 if (data == NULL) 1127 return -ENOMEM; 1128 1129 hwmgr->backend = data; 1130 1131 result = smu8_initialize_dpm_defaults(hwmgr); 1132 if (result != 0) { 1133 pr_err("smu8_initialize_dpm_defaults failed\n"); 1134 return result; 1135 } 1136 1137 result = smu8_get_system_info_data(hwmgr); 1138 if (result != 0) { 1139 pr_err("smu8_get_system_info_data failed\n"); 1140 return result; 1141 } 1142 1143 smu8_construct_boot_state(hwmgr); 1144 1145 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = SMU8_MAX_HARDWARE_POWERLEVELS; 1146 1147 return result; 1148 } 1149 1150 static int smu8_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 1151 { 1152 if (hwmgr != NULL) { 1153 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); 1154 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; 1155 1156 kfree(hwmgr->backend); 1157 hwmgr->backend = NULL; 1158 } 1159 return 0; 1160 } 1161 1162 static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) 1163 { 1164 struct smu8_hwmgr *data = hwmgr->backend; 1165 1166 smum_send_msg_to_smc_with_parameter(hwmgr, 1167 PPSMC_MSG_SetSclkSoftMin, 1168 smu8_get_sclk_level(hwmgr, 1169 data->sclk_dpm.soft_max_clk, 1170 PPSMC_MSG_SetSclkSoftMin), 1171 NULL); 1172 1173 smum_send_msg_to_smc_with_parameter(hwmgr, 1174 PPSMC_MSG_SetSclkSoftMax, 1175 smu8_get_sclk_level(hwmgr, 1176 data->sclk_dpm.soft_max_clk, 1177 PPSMC_MSG_SetSclkSoftMax), 1178 NULL); 1179 1180 return 0; 1181 } 1182 1183 static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 1184 { 1185 struct smu8_hwmgr *data = hwmgr->backend; 1186 struct phm_clock_voltage_dependency_table *table = 1187 hwmgr->dyn_state.vddc_dependency_on_sclk; 1188 unsigned long clock = 0, level; 1189 1190 if (NULL == table || table->count <= 0) 1191 return -EINVAL; 1192 1193 data->sclk_dpm.soft_min_clk = table->entries[0].clk; 1194 data->sclk_dpm.hard_min_clk = table->entries[0].clk; 1195 1196 level = smu8_get_max_sclk_level(hwmgr) - 1; 1197 1198 if (level < table->count) 1199 clock = table->entries[level].clk; 1200 else 1201 clock = table->entries[table->count - 1].clk; 1202 1203 data->sclk_dpm.soft_max_clk = clock; 1204 data->sclk_dpm.hard_max_clk = clock; 1205 1206 smum_send_msg_to_smc_with_parameter(hwmgr, 1207 PPSMC_MSG_SetSclkSoftMin, 1208 smu8_get_sclk_level(hwmgr, 1209 data->sclk_dpm.soft_min_clk, 1210 PPSMC_MSG_SetSclkSoftMin), 1211 NULL); 1212 1213 smum_send_msg_to_smc_with_parameter(hwmgr, 1214 PPSMC_MSG_SetSclkSoftMax, 1215 smu8_get_sclk_level(hwmgr, 1216 data->sclk_dpm.soft_max_clk, 1217 PPSMC_MSG_SetSclkSoftMax), 1218 NULL); 1219 1220 return 0; 1221 } 1222 1223 static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) 1224 { 1225 struct smu8_hwmgr *data = hwmgr->backend; 1226 1227 smum_send_msg_to_smc_with_parameter(hwmgr, 1228 PPSMC_MSG_SetSclkSoftMax, 1229 smu8_get_sclk_level(hwmgr, 1230 data->sclk_dpm.soft_min_clk, 1231 PPSMC_MSG_SetSclkSoftMax), 1232 NULL); 1233 1234 smum_send_msg_to_smc_with_parameter(hwmgr, 1235 PPSMC_MSG_SetSclkSoftMin, 1236 smu8_get_sclk_level(hwmgr, 1237 data->sclk_dpm.soft_min_clk, 1238 PPSMC_MSG_SetSclkSoftMin), 1239 NULL); 1240 1241 return 0; 1242 } 1243 1244 static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 1245 enum amd_dpm_forced_level level) 1246 { 1247 int ret = 0; 1248 1249 switch (level) { 1250 case AMD_DPM_FORCED_LEVEL_HIGH: 1251 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1252 ret = smu8_phm_force_dpm_highest(hwmgr); 1253 break; 1254 case AMD_DPM_FORCED_LEVEL_LOW: 1255 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1256 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1257 ret = smu8_phm_force_dpm_lowest(hwmgr); 1258 break; 1259 case AMD_DPM_FORCED_LEVEL_AUTO: 1260 ret = smu8_phm_unforce_dpm_levels(hwmgr); 1261 break; 1262 case AMD_DPM_FORCED_LEVEL_MANUAL: 1263 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1264 default: 1265 break; 1266 } 1267 1268 return ret; 1269 } 1270 1271 static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) 1272 { 1273 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) 1274 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF, NULL); 1275 return 0; 1276 } 1277 1278 static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) 1279 { 1280 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { 1281 return smum_send_msg_to_smc_with_parameter( 1282 hwmgr, 1283 PPSMC_MSG_UVDPowerON, 1284 PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0, 1285 NULL); 1286 } 1287 1288 return 0; 1289 } 1290 1291 static int smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) 1292 { 1293 struct smu8_hwmgr *data = hwmgr->backend; 1294 struct phm_vce_clock_voltage_dependency_table *ptable = 1295 hwmgr->dyn_state.vce_clock_voltage_dependency_table; 1296 1297 /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ 1298 if (PP_CAP(PHM_PlatformCaps_StablePState) || 1299 hwmgr->en_umd_pstate) { 1300 data->vce_dpm.hard_min_clk = 1301 ptable->entries[ptable->count - 1].ecclk; 1302 1303 smum_send_msg_to_smc_with_parameter(hwmgr, 1304 PPSMC_MSG_SetEclkHardMin, 1305 smu8_get_eclk_level(hwmgr, 1306 data->vce_dpm.hard_min_clk, 1307 PPSMC_MSG_SetEclkHardMin), 1308 NULL); 1309 } else { 1310 1311 smum_send_msg_to_smc_with_parameter(hwmgr, 1312 PPSMC_MSG_SetEclkHardMin, 1313 0, 1314 NULL); 1315 /* disable ECLK DPM 0. Otherwise VCE could hang if 1316 * switching SCLK from DPM 0 to 6/7 */ 1317 smum_send_msg_to_smc_with_parameter(hwmgr, 1318 PPSMC_MSG_SetEclkSoftMin, 1319 1, 1320 NULL); 1321 } 1322 return 0; 1323 } 1324 1325 static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr) 1326 { 1327 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) 1328 return smum_send_msg_to_smc(hwmgr, 1329 PPSMC_MSG_VCEPowerOFF, 1330 NULL); 1331 return 0; 1332 } 1333 1334 static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr) 1335 { 1336 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) 1337 return smum_send_msg_to_smc(hwmgr, 1338 PPSMC_MSG_VCEPowerON, 1339 NULL); 1340 return 0; 1341 } 1342 1343 static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 1344 { 1345 struct smu8_hwmgr *data = hwmgr->backend; 1346 1347 return data->sys_info.bootup_uma_clock; 1348 } 1349 1350 static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 1351 { 1352 struct pp_power_state *ps; 1353 struct smu8_power_state *smu8_ps; 1354 1355 if (hwmgr == NULL) 1356 return -EINVAL; 1357 1358 ps = hwmgr->request_ps; 1359 1360 if (ps == NULL) 1361 return -EINVAL; 1362 1363 smu8_ps = cast_smu8_power_state(&ps->hardware); 1364 1365 if (low) 1366 return smu8_ps->levels[0].engineClock; 1367 else 1368 return smu8_ps->levels[smu8_ps->level-1].engineClock; 1369 } 1370 1371 static int smu8_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, 1372 struct pp_hw_power_state *hw_ps) 1373 { 1374 struct smu8_hwmgr *data = hwmgr->backend; 1375 struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps); 1376 1377 smu8_ps->level = 1; 1378 smu8_ps->nbps_flags = 0; 1379 smu8_ps->bapm_flags = 0; 1380 smu8_ps->levels[0] = data->boot_power_level; 1381 1382 return 0; 1383 } 1384 1385 static int smu8_dpm_get_pp_table_entry_callback( 1386 struct pp_hwmgr *hwmgr, 1387 struct pp_hw_power_state *hw_ps, 1388 unsigned int index, 1389 const void *clock_info) 1390 { 1391 struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps); 1392 1393 const ATOM_PPLIB_CZ_CLOCK_INFO *smu8_clock_info = clock_info; 1394 1395 struct phm_clock_voltage_dependency_table *table = 1396 hwmgr->dyn_state.vddc_dependency_on_sclk; 1397 uint8_t clock_info_index = smu8_clock_info->index; 1398 1399 if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1)) 1400 clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1); 1401 1402 smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk; 1403 smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v; 1404 1405 smu8_ps->level = index + 1; 1406 1407 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { 1408 smu8_ps->levels[index].dsDividerIndex = 5; 1409 smu8_ps->levels[index].ssDividerIndex = 5; 1410 } 1411 1412 return 0; 1413 } 1414 1415 static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr) 1416 { 1417 int result; 1418 unsigned long ret = 0; 1419 1420 result = pp_tables_get_num_of_entries(hwmgr, &ret); 1421 1422 return result ? 0 : ret; 1423 } 1424 1425 static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr, 1426 unsigned long entry, struct pp_power_state *ps) 1427 { 1428 int result; 1429 struct smu8_power_state *smu8_ps; 1430 1431 ps->hardware.magic = smu8_magic; 1432 1433 smu8_ps = cast_smu8_power_state(&(ps->hardware)); 1434 1435 result = pp_tables_get_entry(hwmgr, entry, ps, 1436 smu8_dpm_get_pp_table_entry_callback); 1437 1438 smu8_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK; 1439 smu8_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK; 1440 1441 return result; 1442 } 1443 1444 static int smu8_get_power_state_size(struct pp_hwmgr *hwmgr) 1445 { 1446 return sizeof(struct smu8_power_state); 1447 } 1448 1449 static void smu8_hw_print_display_cfg( 1450 const struct cc6_settings *cc6_settings) 1451 { 1452 PP_DBG_LOG("New Display Configuration:\n"); 1453 1454 PP_DBG_LOG(" cpu_cc6_disable: %d\n", 1455 cc6_settings->cpu_cc6_disable); 1456 PP_DBG_LOG(" cpu_pstate_disable: %d\n", 1457 cc6_settings->cpu_pstate_disable); 1458 PP_DBG_LOG(" nb_pstate_switch_disable: %d\n", 1459 cc6_settings->nb_pstate_switch_disable); 1460 PP_DBG_LOG(" cpu_pstate_separation_time: %d\n\n", 1461 cc6_settings->cpu_pstate_separation_time); 1462 } 1463 1464 static int smu8_set_cpu_power_state(struct pp_hwmgr *hwmgr) 1465 { 1466 struct smu8_hwmgr *hw_data = hwmgr->backend; 1467 uint32_t data = 0; 1468 1469 if (hw_data->cc6_settings.cc6_setting_changed) { 1470 1471 hw_data->cc6_settings.cc6_setting_changed = false; 1472 1473 smu8_hw_print_display_cfg(&hw_data->cc6_settings); 1474 1475 data |= (hw_data->cc6_settings.cpu_pstate_separation_time 1476 & PWRMGT_SEPARATION_TIME_MASK) 1477 << PWRMGT_SEPARATION_TIME_SHIFT; 1478 1479 data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0) 1480 << PWRMGT_DISABLE_CPU_CSTATES_SHIFT; 1481 1482 data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0) 1483 << PWRMGT_DISABLE_CPU_PSTATES_SHIFT; 1484 1485 PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n", 1486 data); 1487 1488 smum_send_msg_to_smc_with_parameter(hwmgr, 1489 PPSMC_MSG_SetDisplaySizePowerParams, 1490 data, 1491 NULL); 1492 } 1493 1494 return 0; 1495 } 1496 1497 1498 static int smu8_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, 1499 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) 1500 { 1501 struct smu8_hwmgr *hw_data = hwmgr->backend; 1502 1503 if (separation_time != 1504 hw_data->cc6_settings.cpu_pstate_separation_time || 1505 cc6_disable != hw_data->cc6_settings.cpu_cc6_disable || 1506 pstate_disable != hw_data->cc6_settings.cpu_pstate_disable || 1507 pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) { 1508 1509 hw_data->cc6_settings.cc6_setting_changed = true; 1510 1511 hw_data->cc6_settings.cpu_pstate_separation_time = 1512 separation_time; 1513 hw_data->cc6_settings.cpu_cc6_disable = 1514 cc6_disable; 1515 hw_data->cc6_settings.cpu_pstate_disable = 1516 pstate_disable; 1517 hw_data->cc6_settings.nb_pstate_switch_disable = 1518 pstate_switch_disable; 1519 1520 } 1521 1522 return 0; 1523 } 1524 1525 static int smu8_get_dal_power_level(struct pp_hwmgr *hwmgr, 1526 struct amd_pp_simple_clock_info *info) 1527 { 1528 uint32_t i; 1529 const struct phm_clock_voltage_dependency_table *table = 1530 hwmgr->dyn_state.vddc_dep_on_dal_pwrl; 1531 const struct phm_clock_and_voltage_limits *limits = 1532 &hwmgr->dyn_state.max_clock_voltage_on_ac; 1533 1534 info->engine_max_clock = limits->sclk; 1535 info->memory_max_clock = limits->mclk; 1536 1537 for (i = table->count - 1; i > 0; i--) { 1538 if (limits->vddc >= table->entries[i].v) { 1539 info->level = table->entries[i].clk; 1540 return 0; 1541 } 1542 } 1543 return -EINVAL; 1544 } 1545 1546 static int smu8_force_clock_level(struct pp_hwmgr *hwmgr, 1547 enum pp_clock_type type, uint32_t mask) 1548 { 1549 switch (type) { 1550 case PP_SCLK: 1551 smum_send_msg_to_smc_with_parameter(hwmgr, 1552 PPSMC_MSG_SetSclkSoftMin, 1553 mask, 1554 NULL); 1555 smum_send_msg_to_smc_with_parameter(hwmgr, 1556 PPSMC_MSG_SetSclkSoftMax, 1557 mask, 1558 NULL); 1559 break; 1560 default: 1561 break; 1562 } 1563 1564 return 0; 1565 } 1566 1567 static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, 1568 enum pp_clock_type type, char *buf) 1569 { 1570 struct smu8_hwmgr *data = hwmgr->backend; 1571 struct phm_clock_voltage_dependency_table *sclk_table = 1572 hwmgr->dyn_state.vddc_dependency_on_sclk; 1573 uint32_t i, now; 1574 int size = 0; 1575 1576 switch (type) { 1577 case PP_SCLK: 1578 now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, 1579 CGS_IND_REG__SMC, 1580 ixTARGET_AND_CURRENT_PROFILE_INDEX), 1581 TARGET_AND_CURRENT_PROFILE_INDEX, 1582 CURR_SCLK_INDEX); 1583 1584 for (i = 0; i < sclk_table->count; i++) 1585 size += sprintf(buf + size, "%d: %uMhz %s\n", 1586 i, sclk_table->entries[i].clk / 100, 1587 (i == now) ? "*" : ""); 1588 break; 1589 case PP_MCLK: 1590 now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, 1591 CGS_IND_REG__SMC, 1592 ixTARGET_AND_CURRENT_PROFILE_INDEX), 1593 TARGET_AND_CURRENT_PROFILE_INDEX, 1594 CURR_MCLK_INDEX); 1595 1596 for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--) 1597 size += sprintf(buf + size, "%d: %uMhz %s\n", 1598 SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100, 1599 (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : ""); 1600 break; 1601 default: 1602 break; 1603 } 1604 return size; 1605 } 1606 1607 static int smu8_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 1608 PHM_PerformanceLevelDesignation designation, uint32_t index, 1609 PHM_PerformanceLevel *level) 1610 { 1611 const struct smu8_power_state *ps; 1612 struct smu8_hwmgr *data; 1613 uint32_t level_index; 1614 uint32_t i; 1615 1616 if (level == NULL || hwmgr == NULL || state == NULL) 1617 return -EINVAL; 1618 1619 data = hwmgr->backend; 1620 ps = cast_const_smu8_power_state(state); 1621 1622 level_index = index > ps->level - 1 ? ps->level - 1 : index; 1623 level->coreClock = ps->levels[level_index].engineClock; 1624 1625 if (designation == PHM_PerformanceLevelDesignation_PowerContainment) { 1626 for (i = 1; i < ps->level; i++) { 1627 if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) { 1628 level->coreClock = ps->levels[i].engineClock; 1629 break; 1630 } 1631 } 1632 } 1633 1634 if (level_index == 0) 1635 level->memory_clock = data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1]; 1636 else 1637 level->memory_clock = data->sys_info.nbp_memory_clock[0]; 1638 1639 level->vddc = (smu8_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4; 1640 level->nonLocalMemoryFreq = 0; 1641 level->nonLocalMemoryWidth = 0; 1642 1643 return 0; 1644 } 1645 1646 static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, 1647 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) 1648 { 1649 const struct smu8_power_state *ps = cast_const_smu8_power_state(state); 1650 1651 clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex)); 1652 clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex)); 1653 1654 return 0; 1655 } 1656 1657 static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, 1658 struct amd_pp_clocks *clocks) 1659 { 1660 struct smu8_hwmgr *data = hwmgr->backend; 1661 int i; 1662 struct phm_clock_voltage_dependency_table *table; 1663 1664 clocks->count = smu8_get_max_sclk_level(hwmgr); 1665 switch (type) { 1666 case amd_pp_disp_clock: 1667 for (i = 0; i < clocks->count; i++) 1668 clocks->clock[i] = data->sys_info.display_clock[i] * 10; 1669 break; 1670 case amd_pp_sys_clock: 1671 table = hwmgr->dyn_state.vddc_dependency_on_sclk; 1672 for (i = 0; i < clocks->count; i++) 1673 clocks->clock[i] = table->entries[i].clk * 10; 1674 break; 1675 case amd_pp_mem_clock: 1676 clocks->count = SMU8_NUM_NBPMEMORYCLOCK; 1677 for (i = 0; i < clocks->count; i++) 1678 clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10; 1679 break; 1680 default: 1681 return -1; 1682 } 1683 1684 return 0; 1685 } 1686 1687 static int smu8_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) 1688 { 1689 struct phm_clock_voltage_dependency_table *table = 1690 hwmgr->dyn_state.vddc_dependency_on_sclk; 1691 unsigned long level; 1692 const struct phm_clock_and_voltage_limits *limits = 1693 &hwmgr->dyn_state.max_clock_voltage_on_ac; 1694 1695 if ((NULL == table) || (table->count <= 0) || (clocks == NULL)) 1696 return -EINVAL; 1697 1698 level = smu8_get_max_sclk_level(hwmgr) - 1; 1699 1700 if (level < table->count) 1701 clocks->engine_max_clock = table->entries[level].clk; 1702 else 1703 clocks->engine_max_clock = table->entries[table->count - 1].clk; 1704 1705 clocks->memory_max_clock = limits->mclk; 1706 1707 return 0; 1708 } 1709 1710 static int smu8_thermal_get_temperature(struct pp_hwmgr *hwmgr) 1711 { 1712 int actual_temp = 0; 1713 uint32_t val = cgs_read_ind_register(hwmgr->device, 1714 CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP); 1715 uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP); 1716 1717 if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL)) 1718 actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 1719 else 1720 actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 1721 1722 return actual_temp; 1723 } 1724 1725 static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx, 1726 void *value, int *size) 1727 { 1728 struct smu8_hwmgr *data = hwmgr->backend; 1729 1730 struct phm_clock_voltage_dependency_table *table = 1731 hwmgr->dyn_state.vddc_dependency_on_sclk; 1732 1733 struct phm_vce_clock_voltage_dependency_table *vce_table = 1734 hwmgr->dyn_state.vce_clock_voltage_dependency_table; 1735 1736 struct phm_uvd_clock_voltage_dependency_table *uvd_table = 1737 hwmgr->dyn_state.uvd_clock_voltage_dependency_table; 1738 1739 uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX), 1740 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); 1741 uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2), 1742 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); 1743 uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2), 1744 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); 1745 1746 uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent; 1747 uint16_t vddnb, vddgfx; 1748 int result; 1749 1750 /* size must be at least 4 bytes for all sensors */ 1751 if (*size < 4) 1752 return -EINVAL; 1753 *size = 4; 1754 1755 switch (idx) { 1756 case AMDGPU_PP_SENSOR_GFX_SCLK: 1757 if (sclk_index < NUM_SCLK_LEVELS) { 1758 sclk = table->entries[sclk_index].clk; 1759 *((uint32_t *)value) = sclk; 1760 return 0; 1761 } 1762 return -EINVAL; 1763 case AMDGPU_PP_SENSOR_VDDNB: 1764 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & 1765 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; 1766 vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4; 1767 *((uint32_t *)value) = vddnb; 1768 return 0; 1769 case AMDGPU_PP_SENSOR_VDDGFX: 1770 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & 1771 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; 1772 vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4; 1773 *((uint32_t *)value) = vddgfx; 1774 return 0; 1775 case AMDGPU_PP_SENSOR_UVD_VCLK: 1776 if (!data->uvd_power_gated) { 1777 if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) { 1778 return -EINVAL; 1779 } else { 1780 vclk = uvd_table->entries[uvd_index].vclk; 1781 *((uint32_t *)value) = vclk; 1782 return 0; 1783 } 1784 } 1785 *((uint32_t *)value) = 0; 1786 return 0; 1787 case AMDGPU_PP_SENSOR_UVD_DCLK: 1788 if (!data->uvd_power_gated) { 1789 if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) { 1790 return -EINVAL; 1791 } else { 1792 dclk = uvd_table->entries[uvd_index].dclk; 1793 *((uint32_t *)value) = dclk; 1794 return 0; 1795 } 1796 } 1797 *((uint32_t *)value) = 0; 1798 return 0; 1799 case AMDGPU_PP_SENSOR_VCE_ECCLK: 1800 if (!data->vce_power_gated) { 1801 if (vce_index >= SMU8_MAX_HARDWARE_POWERLEVELS) { 1802 return -EINVAL; 1803 } else { 1804 ecclk = vce_table->entries[vce_index].ecclk; 1805 *((uint32_t *)value) = ecclk; 1806 return 0; 1807 } 1808 } 1809 *((uint32_t *)value) = 0; 1810 return 0; 1811 case AMDGPU_PP_SENSOR_GPU_LOAD: 1812 result = smum_send_msg_to_smc(hwmgr, 1813 PPSMC_MSG_GetAverageGraphicsActivity, 1814 &activity_percent); 1815 if (0 == result) 1816 activity_percent = activity_percent > 100 ? 100 : activity_percent; 1817 else 1818 return -EIO; 1819 *((uint32_t *)value) = activity_percent; 1820 return 0; 1821 case AMDGPU_PP_SENSOR_UVD_POWER: 1822 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; 1823 return 0; 1824 case AMDGPU_PP_SENSOR_VCE_POWER: 1825 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; 1826 return 0; 1827 case AMDGPU_PP_SENSOR_GPU_TEMP: 1828 *((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr); 1829 return 0; 1830 default: 1831 return -EOPNOTSUPP; 1832 } 1833 } 1834 1835 static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, 1836 uint32_t virtual_addr_low, 1837 uint32_t virtual_addr_hi, 1838 uint32_t mc_addr_low, 1839 uint32_t mc_addr_hi, 1840 uint32_t size) 1841 { 1842 smum_send_msg_to_smc_with_parameter(hwmgr, 1843 PPSMC_MSG_DramAddrHiVirtual, 1844 mc_addr_hi, 1845 NULL); 1846 smum_send_msg_to_smc_with_parameter(hwmgr, 1847 PPSMC_MSG_DramAddrLoVirtual, 1848 mc_addr_low, 1849 NULL); 1850 smum_send_msg_to_smc_with_parameter(hwmgr, 1851 PPSMC_MSG_DramAddrHiPhysical, 1852 virtual_addr_hi, 1853 NULL); 1854 smum_send_msg_to_smc_with_parameter(hwmgr, 1855 PPSMC_MSG_DramAddrLoPhysical, 1856 virtual_addr_low, 1857 NULL); 1858 1859 smum_send_msg_to_smc_with_parameter(hwmgr, 1860 PPSMC_MSG_DramBufferSize, 1861 size, 1862 NULL); 1863 return 0; 1864 } 1865 1866 static int smu8_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, 1867 struct PP_TemperatureRange *thermal_data) 1868 { 1869 struct smu8_hwmgr *data = hwmgr->backend; 1870 1871 memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange)); 1872 1873 thermal_data->max = (data->thermal_auto_throttling_treshold + 1874 data->sys_info.htc_hyst_lmt) * 1875 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 1876 1877 return 0; 1878 } 1879 1880 static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) 1881 { 1882 struct smu8_hwmgr *data = hwmgr->backend; 1883 uint32_t dpm_features = 0; 1884 1885 if (enable && 1886 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1887 PHM_PlatformCaps_UVDDPM)) { 1888 data->dpm_flags |= DPMFlags_UVD_Enabled; 1889 dpm_features |= UVD_DPM_MASK; 1890 smum_send_msg_to_smc_with_parameter(hwmgr, 1891 PPSMC_MSG_EnableAllSmuFeatures, 1892 dpm_features, 1893 NULL); 1894 } else { 1895 dpm_features |= UVD_DPM_MASK; 1896 data->dpm_flags &= ~DPMFlags_UVD_Enabled; 1897 smum_send_msg_to_smc_with_parameter(hwmgr, 1898 PPSMC_MSG_DisableAllSmuFeatures, 1899 dpm_features, 1900 NULL); 1901 } 1902 return 0; 1903 } 1904 1905 static int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) 1906 { 1907 struct smu8_hwmgr *data = hwmgr->backend; 1908 struct phm_uvd_clock_voltage_dependency_table *ptable = 1909 hwmgr->dyn_state.uvd_clock_voltage_dependency_table; 1910 1911 if (!bgate) { 1912 /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */ 1913 if (PP_CAP(PHM_PlatformCaps_StablePState) || 1914 hwmgr->en_umd_pstate) { 1915 data->uvd_dpm.hard_min_clk = 1916 ptable->entries[ptable->count - 1].vclk; 1917 1918 smum_send_msg_to_smc_with_parameter(hwmgr, 1919 PPSMC_MSG_SetUvdHardMin, 1920 smu8_get_uvd_level(hwmgr, 1921 data->uvd_dpm.hard_min_clk, 1922 PPSMC_MSG_SetUvdHardMin), 1923 NULL); 1924 1925 smu8_enable_disable_uvd_dpm(hwmgr, true); 1926 } else { 1927 smu8_enable_disable_uvd_dpm(hwmgr, true); 1928 } 1929 } else { 1930 smu8_enable_disable_uvd_dpm(hwmgr, false); 1931 } 1932 1933 return 0; 1934 } 1935 1936 static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) 1937 { 1938 struct smu8_hwmgr *data = hwmgr->backend; 1939 uint32_t dpm_features = 0; 1940 1941 if (enable && phm_cap_enabled( 1942 hwmgr->platform_descriptor.platformCaps, 1943 PHM_PlatformCaps_VCEDPM)) { 1944 data->dpm_flags |= DPMFlags_VCE_Enabled; 1945 dpm_features |= VCE_DPM_MASK; 1946 smum_send_msg_to_smc_with_parameter(hwmgr, 1947 PPSMC_MSG_EnableAllSmuFeatures, 1948 dpm_features, 1949 NULL); 1950 } else { 1951 dpm_features |= VCE_DPM_MASK; 1952 data->dpm_flags &= ~DPMFlags_VCE_Enabled; 1953 smum_send_msg_to_smc_with_parameter(hwmgr, 1954 PPSMC_MSG_DisableAllSmuFeatures, 1955 dpm_features, 1956 NULL); 1957 } 1958 1959 return 0; 1960 } 1961 1962 1963 static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate) 1964 { 1965 struct smu8_hwmgr *data = hwmgr->backend; 1966 1967 if (data->acp_power_gated == bgate) 1968 return; 1969 1970 if (bgate) 1971 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL); 1972 else 1973 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL); 1974 } 1975 1976 #define WIDTH_4K 3840 1977 1978 static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) 1979 { 1980 struct smu8_hwmgr *data = hwmgr->backend; 1981 struct amdgpu_device *adev = hwmgr->adev; 1982 1983 data->uvd_power_gated = bgate; 1984 1985 if (bgate) { 1986 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 1987 AMD_IP_BLOCK_TYPE_UVD, 1988 AMD_PG_STATE_GATE); 1989 amdgpu_device_ip_set_clockgating_state(hwmgr->adev, 1990 AMD_IP_BLOCK_TYPE_UVD, 1991 AMD_CG_STATE_GATE); 1992 smu8_dpm_update_uvd_dpm(hwmgr, true); 1993 smu8_dpm_powerdown_uvd(hwmgr); 1994 } else { 1995 smu8_dpm_powerup_uvd(hwmgr); 1996 amdgpu_device_ip_set_clockgating_state(hwmgr->adev, 1997 AMD_IP_BLOCK_TYPE_UVD, 1998 AMD_CG_STATE_UNGATE); 1999 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 2000 AMD_IP_BLOCK_TYPE_UVD, 2001 AMD_PG_STATE_UNGATE); 2002 smu8_dpm_update_uvd_dpm(hwmgr, false); 2003 } 2004 2005 /* enable/disable Low Memory PState for UVD (4k videos) */ 2006 if (adev->asic_type == CHIP_STONEY && 2007 adev->uvd.decode_image_width >= WIDTH_4K) 2008 smu8_nbdpm_pstate_enable_disable(hwmgr, 2009 bgate, 2010 true); 2011 } 2012 2013 static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) 2014 { 2015 struct smu8_hwmgr *data = hwmgr->backend; 2016 2017 if (bgate) { 2018 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 2019 AMD_IP_BLOCK_TYPE_VCE, 2020 AMD_PG_STATE_GATE); 2021 amdgpu_device_ip_set_clockgating_state(hwmgr->adev, 2022 AMD_IP_BLOCK_TYPE_VCE, 2023 AMD_CG_STATE_GATE); 2024 smu8_enable_disable_vce_dpm(hwmgr, false); 2025 smu8_dpm_powerdown_vce(hwmgr); 2026 data->vce_power_gated = true; 2027 } else { 2028 smu8_dpm_powerup_vce(hwmgr); 2029 data->vce_power_gated = false; 2030 amdgpu_device_ip_set_clockgating_state(hwmgr->adev, 2031 AMD_IP_BLOCK_TYPE_VCE, 2032 AMD_CG_STATE_UNGATE); 2033 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 2034 AMD_IP_BLOCK_TYPE_VCE, 2035 AMD_PG_STATE_UNGATE); 2036 smu8_dpm_update_vce_dpm(hwmgr); 2037 smu8_enable_disable_vce_dpm(hwmgr, true); 2038 } 2039 } 2040 2041 static const struct pp_hwmgr_func smu8_hwmgr_funcs = { 2042 .backend_init = smu8_hwmgr_backend_init, 2043 .backend_fini = smu8_hwmgr_backend_fini, 2044 .apply_state_adjust_rules = smu8_apply_state_adjust_rules, 2045 .force_dpm_level = smu8_dpm_force_dpm_level, 2046 .get_power_state_size = smu8_get_power_state_size, 2047 .powerdown_uvd = smu8_dpm_powerdown_uvd, 2048 .powergate_uvd = smu8_dpm_powergate_uvd, 2049 .powergate_vce = smu8_dpm_powergate_vce, 2050 .powergate_acp = smu8_dpm_powergate_acp, 2051 .get_mclk = smu8_dpm_get_mclk, 2052 .get_sclk = smu8_dpm_get_sclk, 2053 .patch_boot_state = smu8_dpm_patch_boot_state, 2054 .get_pp_table_entry = smu8_dpm_get_pp_table_entry, 2055 .get_num_of_pp_table_entries = smu8_dpm_get_num_of_pp_table_entries, 2056 .set_cpu_power_state = smu8_set_cpu_power_state, 2057 .store_cc6_data = smu8_store_cc6_data, 2058 .force_clock_level = smu8_force_clock_level, 2059 .print_clock_levels = smu8_print_clock_levels, 2060 .get_dal_power_level = smu8_get_dal_power_level, 2061 .get_performance_level = smu8_get_performance_level, 2062 .get_current_shallow_sleep_clocks = smu8_get_current_shallow_sleep_clocks, 2063 .get_clock_by_type = smu8_get_clock_by_type, 2064 .get_max_high_clocks = smu8_get_max_high_clocks, 2065 .read_sensor = smu8_read_sensor, 2066 .power_off_asic = smu8_power_off_asic, 2067 .asic_setup = smu8_setup_asic_task, 2068 .dynamic_state_management_enable = smu8_enable_dpm_tasks, 2069 .power_state_set = smu8_set_power_state_tasks, 2070 .dynamic_state_management_disable = smu8_disable_dpm_tasks, 2071 .notify_cac_buffer_info = smu8_notify_cac_buffer_info, 2072 .get_thermal_temperature_range = smu8_get_thermal_temperature_range, 2073 }; 2074 2075 int smu8_init_function_pointers(struct pp_hwmgr *hwmgr) 2076 { 2077 hwmgr->hwmgr_func = &smu8_hwmgr_funcs; 2078 hwmgr->pptable_func = &pptable_funcs; 2079 return 0; 2080 } 2081