1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "pp_debug.h" 25 #include <linux/delay.h> 26 #include <linux/kernel.h> 27 #include <linux/slab.h> 28 #include <linux/types.h> 29 #include <linux/pci.h> 30 #include <drm/amdgpu_drm.h> 31 #include "power_state.h" 32 #include "hwmgr.h" 33 #include "ppsmc.h" 34 #include "amd_acpi.h" 35 #include "pp_psm.h" 36 37 extern const struct pp_smumgr_func ci_smu_funcs; 38 extern const struct pp_smumgr_func smu8_smu_funcs; 39 extern const struct pp_smumgr_func iceland_smu_funcs; 40 extern const struct pp_smumgr_func tonga_smu_funcs; 41 extern const struct pp_smumgr_func fiji_smu_funcs; 42 extern const struct pp_smumgr_func polaris10_smu_funcs; 43 extern const struct pp_smumgr_func vegam_smu_funcs; 44 extern const struct pp_smumgr_func vega10_smu_funcs; 45 extern const struct pp_smumgr_func vega12_smu_funcs; 46 extern const struct pp_smumgr_func smu10_smu_funcs; 47 extern const struct pp_smumgr_func vega20_smu_funcs; 48 49 extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr); 50 extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr); 51 extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr); 52 extern int vega12_hwmgr_init(struct pp_hwmgr *hwmgr); 53 extern int vega20_hwmgr_init(struct pp_hwmgr *hwmgr); 54 extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr); 55 56 static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); 57 static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); 58 static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr); 59 static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr); 60 static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr); 61 static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr); 62 static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr); 63 64 65 static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) 66 { 67 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; 68 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; 69 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; 70 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; 71 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; 72 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; 73 74 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 75 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 76 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; 77 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; 78 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; 79 hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; 80 } 81 82 int hwmgr_early_init(struct pp_hwmgr *hwmgr) 83 { 84 struct amdgpu_device *adev; 85 86 if (!hwmgr) 87 return -EINVAL; 88 89 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; 90 hwmgr->pp_table_version = PP_TABLE_V1; 91 hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 92 hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 93 hwmgr_init_default_caps(hwmgr); 94 hwmgr_set_user_specify_caps(hwmgr); 95 hwmgr->fan_ctrl_is_in_default_mode = true; 96 hwmgr_init_workload_prority(hwmgr); 97 hwmgr->gfxoff_state_changed_by_workload = false; 98 99 adev = hwmgr->adev; 100 101 switch (hwmgr->chip_family) { 102 case AMDGPU_FAMILY_CI: 103 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 104 hwmgr->smumgr_funcs = &ci_smu_funcs; 105 ci_set_asic_special_caps(hwmgr); 106 hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK | 107 PP_ENABLE_GFX_CG_THRU_SMU | 108 PP_GFXOFF_MASK); 109 hwmgr->pp_table_version = PP_TABLE_V0; 110 hwmgr->od_enabled = false; 111 smu7_init_function_pointers(hwmgr); 112 break; 113 case AMDGPU_FAMILY_CZ: 114 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 115 hwmgr->od_enabled = false; 116 hwmgr->smumgr_funcs = &smu8_smu_funcs; 117 hwmgr->feature_mask &= ~PP_GFXOFF_MASK; 118 smu8_init_function_pointers(hwmgr); 119 break; 120 case AMDGPU_FAMILY_VI: 121 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 122 hwmgr->feature_mask &= ~PP_GFXOFF_MASK; 123 switch (hwmgr->chip_id) { 124 case CHIP_TOPAZ: 125 hwmgr->smumgr_funcs = &iceland_smu_funcs; 126 topaz_set_asic_special_caps(hwmgr); 127 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK | 128 PP_ENABLE_GFX_CG_THRU_SMU); 129 hwmgr->pp_table_version = PP_TABLE_V0; 130 hwmgr->od_enabled = false; 131 break; 132 case CHIP_TONGA: 133 hwmgr->smumgr_funcs = &tonga_smu_funcs; 134 tonga_set_asic_special_caps(hwmgr); 135 hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK; 136 break; 137 case CHIP_FIJI: 138 hwmgr->smumgr_funcs = &fiji_smu_funcs; 139 fiji_set_asic_special_caps(hwmgr); 140 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK | 141 PP_ENABLE_GFX_CG_THRU_SMU); 142 break; 143 case CHIP_POLARIS11: 144 case CHIP_POLARIS10: 145 case CHIP_POLARIS12: 146 hwmgr->smumgr_funcs = &polaris10_smu_funcs; 147 polaris_set_asic_special_caps(hwmgr); 148 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); 149 break; 150 case CHIP_VEGAM: 151 hwmgr->smumgr_funcs = &vegam_smu_funcs; 152 polaris_set_asic_special_caps(hwmgr); 153 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); 154 break; 155 default: 156 return -EINVAL; 157 } 158 smu7_init_function_pointers(hwmgr); 159 break; 160 case AMDGPU_FAMILY_AI: 161 switch (hwmgr->chip_id) { 162 case CHIP_VEGA10: 163 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 164 hwmgr->feature_mask &= ~PP_GFXOFF_MASK; 165 hwmgr->smumgr_funcs = &vega10_smu_funcs; 166 vega10_hwmgr_init(hwmgr); 167 break; 168 case CHIP_VEGA12: 169 hwmgr->smumgr_funcs = &vega12_smu_funcs; 170 vega12_hwmgr_init(hwmgr); 171 break; 172 case CHIP_VEGA20: 173 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 174 hwmgr->feature_mask &= ~PP_GFXOFF_MASK; 175 hwmgr->smumgr_funcs = &vega20_smu_funcs; 176 vega20_hwmgr_init(hwmgr); 177 break; 178 default: 179 return -EINVAL; 180 } 181 break; 182 case AMDGPU_FAMILY_RV: 183 switch (hwmgr->chip_id) { 184 case CHIP_RAVEN: 185 hwmgr->od_enabled = false; 186 hwmgr->smumgr_funcs = &smu10_smu_funcs; 187 smu10_init_function_pointers(hwmgr); 188 break; 189 default: 190 return -EINVAL; 191 } 192 break; 193 default: 194 return -EINVAL; 195 } 196 197 return 0; 198 } 199 200 int hwmgr_sw_init(struct pp_hwmgr *hwmgr) 201 { 202 if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init) 203 return -EINVAL; 204 205 phm_register_irq_handlers(hwmgr); 206 pr_info("hwmgr_sw_init smu backed is %s\n", hwmgr->smumgr_funcs->name); 207 208 return hwmgr->smumgr_funcs->smu_init(hwmgr); 209 } 210 211 212 int hwmgr_sw_fini(struct pp_hwmgr *hwmgr) 213 { 214 if (hwmgr && hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->smu_fini) 215 hwmgr->smumgr_funcs->smu_fini(hwmgr); 216 217 return 0; 218 } 219 220 int hwmgr_hw_init(struct pp_hwmgr *hwmgr) 221 { 222 int ret = 0; 223 224 hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf((struct amdgpu_device *)hwmgr->adev); 225 hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf)) 226 ? true : false; 227 if (!hwmgr->pm_en) 228 return 0; 229 230 if (!hwmgr->pptable_func || 231 !hwmgr->pptable_func->pptable_init || 232 !hwmgr->hwmgr_func->backend_init) { 233 hwmgr->pm_en = false; 234 pr_info("dpm not supported \n"); 235 return 0; 236 } 237 238 ret = hwmgr->pptable_func->pptable_init(hwmgr); 239 if (ret) 240 goto err; 241 242 ((struct amdgpu_device *)hwmgr->adev)->pm.no_fan = 243 hwmgr->thermal_controller.fanInfo.bNoFan; 244 245 ret = hwmgr->hwmgr_func->backend_init(hwmgr); 246 if (ret) 247 goto err1; 248 /* make sure dc limits are valid */ 249 if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) || 250 (hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0)) 251 hwmgr->dyn_state.max_clock_voltage_on_dc = 252 hwmgr->dyn_state.max_clock_voltage_on_ac; 253 254 ret = psm_init_power_state_table(hwmgr); 255 if (ret) 256 goto err2; 257 258 ret = phm_setup_asic(hwmgr); 259 if (ret) 260 goto err2; 261 262 ret = phm_enable_dynamic_state_management(hwmgr); 263 if (ret) 264 goto err2; 265 ret = phm_start_thermal_controller(hwmgr); 266 ret |= psm_set_performance_states(hwmgr); 267 if (ret) 268 goto err2; 269 270 ((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = true; 271 272 return 0; 273 err2: 274 if (hwmgr->hwmgr_func->backend_fini) 275 hwmgr->hwmgr_func->backend_fini(hwmgr); 276 err1: 277 if (hwmgr->pptable_func->pptable_fini) 278 hwmgr->pptable_func->pptable_fini(hwmgr); 279 err: 280 return ret; 281 } 282 283 int hwmgr_hw_fini(struct pp_hwmgr *hwmgr) 284 { 285 if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf) 286 return 0; 287 288 phm_stop_thermal_controller(hwmgr); 289 psm_set_boot_states(hwmgr); 290 psm_adjust_power_state_dynamic(hwmgr, true, NULL); 291 phm_disable_dynamic_state_management(hwmgr); 292 phm_disable_clock_power_gatings(hwmgr); 293 294 if (hwmgr->hwmgr_func->backend_fini) 295 hwmgr->hwmgr_func->backend_fini(hwmgr); 296 if (hwmgr->pptable_func->pptable_fini) 297 hwmgr->pptable_func->pptable_fini(hwmgr); 298 return psm_fini_power_state_table(hwmgr); 299 } 300 301 int hwmgr_suspend(struct pp_hwmgr *hwmgr) 302 { 303 int ret = 0; 304 305 if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf) 306 return 0; 307 308 phm_disable_smc_firmware_ctf(hwmgr); 309 ret = psm_set_boot_states(hwmgr); 310 if (ret) 311 return ret; 312 ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL); 313 if (ret) 314 return ret; 315 ret = phm_power_down_asic(hwmgr); 316 317 return ret; 318 } 319 320 int hwmgr_resume(struct pp_hwmgr *hwmgr) 321 { 322 int ret = 0; 323 324 if (!hwmgr) 325 return -EINVAL; 326 327 if (!hwmgr->not_vf || !hwmgr->pm_en) 328 return 0; 329 330 ret = phm_setup_asic(hwmgr); 331 if (ret) 332 return ret; 333 334 ret = phm_enable_dynamic_state_management(hwmgr); 335 if (ret) 336 return ret; 337 ret = phm_start_thermal_controller(hwmgr); 338 ret |= psm_set_performance_states(hwmgr); 339 if (ret) 340 return ret; 341 342 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); 343 344 return ret; 345 } 346 347 static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state) 348 { 349 switch (state) { 350 case POWER_STATE_TYPE_BATTERY: 351 return PP_StateUILabel_Battery; 352 case POWER_STATE_TYPE_BALANCED: 353 return PP_StateUILabel_Balanced; 354 case POWER_STATE_TYPE_PERFORMANCE: 355 return PP_StateUILabel_Performance; 356 default: 357 return PP_StateUILabel_None; 358 } 359 } 360 361 int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, 362 enum amd_pm_state_type *user_state) 363 { 364 int ret = 0; 365 366 if (hwmgr == NULL) 367 return -EINVAL; 368 369 switch (task_id) { 370 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 371 if (!hwmgr->not_vf) 372 return ret; 373 ret = phm_pre_display_configuration_changed(hwmgr); 374 if (ret) 375 return ret; 376 ret = phm_set_cpu_power_state(hwmgr); 377 if (ret) 378 return ret; 379 ret = psm_set_performance_states(hwmgr); 380 if (ret) 381 return ret; 382 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); 383 break; 384 case AMD_PP_TASK_ENABLE_USER_STATE: 385 { 386 enum PP_StateUILabel requested_ui_label; 387 struct pp_power_state *requested_ps = NULL; 388 389 if (!hwmgr->not_vf) 390 return ret; 391 if (user_state == NULL) { 392 ret = -EINVAL; 393 break; 394 } 395 396 requested_ui_label = power_state_convert(*user_state); 397 ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps); 398 if (ret) 399 return ret; 400 ret = psm_adjust_power_state_dynamic(hwmgr, true, requested_ps); 401 break; 402 } 403 case AMD_PP_TASK_COMPLETE_INIT: 404 case AMD_PP_TASK_READJUST_POWER_STATE: 405 ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL); 406 break; 407 default: 408 break; 409 } 410 return ret; 411 } 412 413 void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr) 414 { 415 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); 416 417 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM); 418 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM); 419 420 #if defined(CONFIG_ACPI) 421 if (amdgpu_acpi_is_pcie_performance_request_supported(hwmgr->adev)) 422 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); 423 #endif 424 425 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 426 PHM_PlatformCaps_DynamicPatchPowerState); 427 428 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 429 PHM_PlatformCaps_EnableSMU7ThermalManagement); 430 431 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 432 PHM_PlatformCaps_DynamicPowerManagement); 433 434 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 435 PHM_PlatformCaps_SMC); 436 437 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 438 PHM_PlatformCaps_DynamicUVDState); 439 440 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 441 PHM_PlatformCaps_FanSpeedInTableIsRPM); 442 return; 443 } 444 445 int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr) 446 { 447 if (hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK) 448 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 449 PHM_PlatformCaps_SclkDeepSleep); 450 else 451 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 452 PHM_PlatformCaps_SclkDeepSleep); 453 454 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) { 455 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 456 PHM_PlatformCaps_PowerContainment); 457 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 458 PHM_PlatformCaps_CAC); 459 } else { 460 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 461 PHM_PlatformCaps_PowerContainment); 462 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 463 PHM_PlatformCaps_CAC); 464 } 465 466 if (hwmgr->feature_mask & PP_OVERDRIVE_MASK) 467 hwmgr->od_enabled = true; 468 469 return 0; 470 } 471 472 int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) 473 { 474 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 475 PHM_PlatformCaps_EVV); 476 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 477 PHM_PlatformCaps_SQRamping); 478 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 479 PHM_PlatformCaps_RegulatorHot); 480 481 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 482 PHM_PlatformCaps_AutomaticDCTransition); 483 484 if (hwmgr->chip_id != CHIP_POLARIS10) 485 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 486 PHM_PlatformCaps_SPLLShutdownSupport); 487 488 if (hwmgr->chip_id != CHIP_POLARIS11) { 489 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 490 PHM_PlatformCaps_DBRamping); 491 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 492 PHM_PlatformCaps_TDRamping); 493 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 494 PHM_PlatformCaps_TCPRamping); 495 } 496 return 0; 497 } 498 499 int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr) 500 { 501 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 502 PHM_PlatformCaps_EVV); 503 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 504 PHM_PlatformCaps_SQRamping); 505 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 506 PHM_PlatformCaps_DBRamping); 507 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 508 PHM_PlatformCaps_TDRamping); 509 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 510 PHM_PlatformCaps_TCPRamping); 511 return 0; 512 } 513 514 int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr) 515 { 516 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 517 PHM_PlatformCaps_EVV); 518 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 519 PHM_PlatformCaps_SQRamping); 520 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 521 PHM_PlatformCaps_DBRamping); 522 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 523 PHM_PlatformCaps_TDRamping); 524 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 525 PHM_PlatformCaps_TCPRamping); 526 527 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 528 PHM_PlatformCaps_UVDPowerGating); 529 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 530 PHM_PlatformCaps_VCEPowerGating); 531 return 0; 532 } 533 534 int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr) 535 { 536 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 537 PHM_PlatformCaps_EVV); 538 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 539 PHM_PlatformCaps_SQRamping); 540 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 541 PHM_PlatformCaps_DBRamping); 542 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 543 PHM_PlatformCaps_TDRamping); 544 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 545 PHM_PlatformCaps_TCPRamping); 546 return 0; 547 } 548 549 int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr) 550 { 551 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 552 PHM_PlatformCaps_SQRamping); 553 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 554 PHM_PlatformCaps_DBRamping); 555 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 556 PHM_PlatformCaps_TDRamping); 557 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 558 PHM_PlatformCaps_TCPRamping); 559 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 560 PHM_PlatformCaps_MemorySpreadSpectrumSupport); 561 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 562 PHM_PlatformCaps_EngineSpreadSpectrumSupport); 563 return 0; 564 } 565