1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 40 { 41 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 42 int ret = 0; 43 44 if (!pp_funcs->get_sclk) 45 return 0; 46 47 mutex_lock(&adev->pm.mutex); 48 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 49 low); 50 mutex_unlock(&adev->pm.mutex); 51 52 return ret; 53 } 54 55 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 56 { 57 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 58 int ret = 0; 59 60 if (!pp_funcs->get_mclk) 61 return 0; 62 63 mutex_lock(&adev->pm.mutex); 64 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 65 low); 66 mutex_unlock(&adev->pm.mutex); 67 68 return ret; 69 } 70 71 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 72 { 73 int ret = 0; 74 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 75 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 76 77 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 78 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 79 block_type, gate ? "gate" : "ungate"); 80 return 0; 81 } 82 83 mutex_lock(&adev->pm.mutex); 84 85 switch (block_type) { 86 case AMD_IP_BLOCK_TYPE_UVD: 87 case AMD_IP_BLOCK_TYPE_VCE: 88 case AMD_IP_BLOCK_TYPE_GFX: 89 case AMD_IP_BLOCK_TYPE_VCN: 90 case AMD_IP_BLOCK_TYPE_SDMA: 91 case AMD_IP_BLOCK_TYPE_JPEG: 92 case AMD_IP_BLOCK_TYPE_GMC: 93 case AMD_IP_BLOCK_TYPE_ACP: 94 if (pp_funcs && pp_funcs->set_powergating_by_smu) 95 ret = (pp_funcs->set_powergating_by_smu( 96 (adev)->powerplay.pp_handle, block_type, gate)); 97 break; 98 default: 99 break; 100 } 101 102 if (!ret) 103 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 104 105 mutex_unlock(&adev->pm.mutex); 106 107 return ret; 108 } 109 110 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 111 { 112 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 113 void *pp_handle = adev->powerplay.pp_handle; 114 int ret = 0; 115 116 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 117 return -ENOENT; 118 119 mutex_lock(&adev->pm.mutex); 120 121 /* enter BACO state */ 122 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 123 124 mutex_unlock(&adev->pm.mutex); 125 126 return ret; 127 } 128 129 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 130 { 131 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 132 void *pp_handle = adev->powerplay.pp_handle; 133 int ret = 0; 134 135 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 136 return -ENOENT; 137 138 mutex_lock(&adev->pm.mutex); 139 140 /* exit BACO state */ 141 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 142 143 mutex_unlock(&adev->pm.mutex); 144 145 return ret; 146 } 147 148 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 149 enum pp_mp1_state mp1_state) 150 { 151 int ret = 0; 152 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 153 154 if (pp_funcs && pp_funcs->set_mp1_state) { 155 mutex_lock(&adev->pm.mutex); 156 157 ret = pp_funcs->set_mp1_state( 158 adev->powerplay.pp_handle, 159 mp1_state); 160 161 mutex_unlock(&adev->pm.mutex); 162 } 163 164 return ret; 165 } 166 167 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 168 { 169 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 170 void *pp_handle = adev->powerplay.pp_handle; 171 bool baco_cap; 172 int ret = 0; 173 174 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 175 return false; 176 177 mutex_lock(&adev->pm.mutex); 178 179 ret = pp_funcs->get_asic_baco_capability(pp_handle, 180 &baco_cap); 181 182 mutex_unlock(&adev->pm.mutex); 183 184 return ret ? false : baco_cap; 185 } 186 187 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 188 { 189 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 190 void *pp_handle = adev->powerplay.pp_handle; 191 int ret = 0; 192 193 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 194 return -ENOENT; 195 196 mutex_lock(&adev->pm.mutex); 197 198 ret = pp_funcs->asic_reset_mode_2(pp_handle); 199 200 mutex_unlock(&adev->pm.mutex); 201 202 return ret; 203 } 204 205 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 206 { 207 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 208 void *pp_handle = adev->powerplay.pp_handle; 209 int ret = 0; 210 211 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 212 return -ENOENT; 213 214 mutex_lock(&adev->pm.mutex); 215 216 /* enter BACO state */ 217 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 218 if (ret) 219 goto out; 220 221 /* exit BACO state */ 222 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 223 224 out: 225 mutex_unlock(&adev->pm.mutex); 226 return ret; 227 } 228 229 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 230 { 231 struct smu_context *smu = adev->powerplay.pp_handle; 232 bool support_mode1_reset = false; 233 234 if (is_support_sw_smu(adev)) { 235 mutex_lock(&adev->pm.mutex); 236 support_mode1_reset = smu_mode1_reset_is_support(smu); 237 mutex_unlock(&adev->pm.mutex); 238 } 239 240 return support_mode1_reset; 241 } 242 243 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 244 { 245 struct smu_context *smu = adev->powerplay.pp_handle; 246 int ret = -EOPNOTSUPP; 247 248 if (is_support_sw_smu(adev)) { 249 mutex_lock(&adev->pm.mutex); 250 ret = smu_mode1_reset(smu); 251 mutex_unlock(&adev->pm.mutex); 252 } 253 254 return ret; 255 } 256 257 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 258 enum PP_SMC_POWER_PROFILE type, 259 bool en) 260 { 261 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 262 int ret = 0; 263 264 if (amdgpu_sriov_vf(adev)) 265 return 0; 266 267 if (pp_funcs && pp_funcs->switch_power_profile) { 268 mutex_lock(&adev->pm.mutex); 269 ret = pp_funcs->switch_power_profile( 270 adev->powerplay.pp_handle, type, en); 271 mutex_unlock(&adev->pm.mutex); 272 } 273 274 return ret; 275 } 276 277 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 278 uint32_t pstate) 279 { 280 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 281 int ret = 0; 282 283 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 284 mutex_lock(&adev->pm.mutex); 285 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 286 pstate); 287 mutex_unlock(&adev->pm.mutex); 288 } 289 290 return ret; 291 } 292 293 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 294 uint32_t cstate) 295 { 296 int ret = 0; 297 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 298 void *pp_handle = adev->powerplay.pp_handle; 299 300 if (pp_funcs && pp_funcs->set_df_cstate) { 301 mutex_lock(&adev->pm.mutex); 302 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 303 mutex_unlock(&adev->pm.mutex); 304 } 305 306 return ret; 307 } 308 309 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) 310 { 311 struct smu_context *smu = adev->powerplay.pp_handle; 312 int ret = 0; 313 314 if (is_support_sw_smu(adev)) { 315 mutex_lock(&adev->pm.mutex); 316 ret = smu_allow_xgmi_power_down(smu, en); 317 mutex_unlock(&adev->pm.mutex); 318 } 319 320 return ret; 321 } 322 323 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 324 { 325 void *pp_handle = adev->powerplay.pp_handle; 326 const struct amd_pm_funcs *pp_funcs = 327 adev->powerplay.pp_funcs; 328 int ret = 0; 329 330 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 331 mutex_lock(&adev->pm.mutex); 332 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 333 mutex_unlock(&adev->pm.mutex); 334 } 335 336 return ret; 337 } 338 339 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 340 uint32_t msg_id) 341 { 342 void *pp_handle = adev->powerplay.pp_handle; 343 const struct amd_pm_funcs *pp_funcs = 344 adev->powerplay.pp_funcs; 345 int ret = 0; 346 347 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 348 mutex_lock(&adev->pm.mutex); 349 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 350 msg_id); 351 mutex_unlock(&adev->pm.mutex); 352 } 353 354 return ret; 355 } 356 357 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 358 bool acquire) 359 { 360 void *pp_handle = adev->powerplay.pp_handle; 361 const struct amd_pm_funcs *pp_funcs = 362 adev->powerplay.pp_funcs; 363 int ret = -EOPNOTSUPP; 364 365 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 366 mutex_lock(&adev->pm.mutex); 367 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 368 acquire); 369 mutex_unlock(&adev->pm.mutex); 370 } 371 372 return ret; 373 } 374 375 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 376 { 377 if (adev->pm.dpm_enabled) { 378 mutex_lock(&adev->pm.mutex); 379 if (power_supply_is_system_supplied() > 0) 380 adev->pm.ac_power = true; 381 else 382 adev->pm.ac_power = false; 383 384 if (adev->powerplay.pp_funcs && 385 adev->powerplay.pp_funcs->enable_bapm) 386 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 387 388 if (is_support_sw_smu(adev)) 389 smu_set_ac_dc(adev->powerplay.pp_handle); 390 391 mutex_unlock(&adev->pm.mutex); 392 } 393 } 394 395 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 396 void *data, uint32_t *size) 397 { 398 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 399 int ret = -EINVAL; 400 401 if (!data || !size) 402 return -EINVAL; 403 404 if (pp_funcs && pp_funcs->read_sensor) { 405 mutex_lock(&adev->pm.mutex); 406 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 407 sensor, 408 data, 409 size); 410 mutex_unlock(&adev->pm.mutex); 411 } 412 413 return ret; 414 } 415 416 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 417 { 418 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 419 420 if (!adev->pm.dpm_enabled) 421 return; 422 423 if (!pp_funcs->pm_compute_clocks) 424 return; 425 426 mutex_lock(&adev->pm.mutex); 427 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 428 mutex_unlock(&adev->pm.mutex); 429 } 430 431 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 432 { 433 int ret = 0; 434 435 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 436 if (ret) 437 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 438 enable ? "enable" : "disable", ret); 439 } 440 441 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 442 { 443 int ret = 0; 444 445 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 446 if (ret) 447 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 448 enable ? "enable" : "disable", ret); 449 } 450 451 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 452 { 453 int ret = 0; 454 455 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 456 if (ret) 457 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 458 enable ? "enable" : "disable", ret); 459 } 460 461 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 462 { 463 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 464 int r = 0; 465 466 if (!pp_funcs->load_firmware) 467 return 0; 468 469 mutex_lock(&adev->pm.mutex); 470 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 471 if (r) { 472 pr_err("smu firmware loading failed\n"); 473 goto out; 474 } 475 476 if (smu_version) 477 *smu_version = adev->pm.fw_version; 478 479 out: 480 mutex_unlock(&adev->pm.mutex); 481 return r; 482 } 483 484 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 485 { 486 int ret = 0; 487 488 if (is_support_sw_smu(adev)) { 489 mutex_lock(&adev->pm.mutex); 490 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 491 enable); 492 mutex_unlock(&adev->pm.mutex); 493 } 494 495 return ret; 496 } 497 498 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 499 { 500 struct smu_context *smu = adev->powerplay.pp_handle; 501 int ret = 0; 502 503 mutex_lock(&adev->pm.mutex); 504 ret = smu_send_hbm_bad_pages_num(smu, size); 505 mutex_unlock(&adev->pm.mutex); 506 507 return ret; 508 } 509 510 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 511 enum pp_clock_type type, 512 uint32_t *min, 513 uint32_t *max) 514 { 515 int ret = 0; 516 517 if (type != PP_SCLK) 518 return -EINVAL; 519 520 if (!is_support_sw_smu(adev)) 521 return -EOPNOTSUPP; 522 523 mutex_lock(&adev->pm.mutex); 524 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 525 SMU_SCLK, 526 min, 527 max); 528 mutex_unlock(&adev->pm.mutex); 529 530 return ret; 531 } 532 533 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 534 enum pp_clock_type type, 535 uint32_t min, 536 uint32_t max) 537 { 538 struct smu_context *smu = adev->powerplay.pp_handle; 539 int ret = 0; 540 541 if (type != PP_SCLK) 542 return -EINVAL; 543 544 if (!is_support_sw_smu(adev)) 545 return -EOPNOTSUPP; 546 547 mutex_lock(&adev->pm.mutex); 548 ret = smu_set_soft_freq_range(smu, 549 SMU_SCLK, 550 min, 551 max); 552 mutex_unlock(&adev->pm.mutex); 553 554 return ret; 555 } 556 557 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 558 { 559 struct smu_context *smu = adev->powerplay.pp_handle; 560 int ret = 0; 561 562 if (!is_support_sw_smu(adev)) 563 return 0; 564 565 mutex_lock(&adev->pm.mutex); 566 ret = smu_write_watermarks_table(smu); 567 mutex_unlock(&adev->pm.mutex); 568 569 return ret; 570 } 571 572 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 573 enum smu_event_type event, 574 uint64_t event_arg) 575 { 576 struct smu_context *smu = adev->powerplay.pp_handle; 577 int ret = 0; 578 579 if (!is_support_sw_smu(adev)) 580 return -EOPNOTSUPP; 581 582 mutex_lock(&adev->pm.mutex); 583 ret = smu_wait_for_event(smu, event, event_arg); 584 mutex_unlock(&adev->pm.mutex); 585 586 return ret; 587 } 588 589 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 590 { 591 struct smu_context *smu = adev->powerplay.pp_handle; 592 int ret = 0; 593 594 if (!is_support_sw_smu(adev)) 595 return -EOPNOTSUPP; 596 597 mutex_lock(&adev->pm.mutex); 598 ret = smu_get_status_gfxoff(smu, value); 599 mutex_unlock(&adev->pm.mutex); 600 601 return ret; 602 } 603 604 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 605 { 606 struct smu_context *smu = adev->powerplay.pp_handle; 607 608 if (!is_support_sw_smu(adev)) 609 return 0; 610 611 return atomic64_read(&smu->throttle_int_counter); 612 } 613 614 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 615 * @adev: amdgpu_device pointer 616 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 617 * 618 */ 619 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 620 enum gfx_change_state state) 621 { 622 mutex_lock(&adev->pm.mutex); 623 if (adev->powerplay.pp_funcs && 624 adev->powerplay.pp_funcs->gfx_state_change_set) 625 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 626 (adev)->powerplay.pp_handle, state)); 627 mutex_unlock(&adev->pm.mutex); 628 } 629 630 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 631 void *umc_ecc) 632 { 633 struct smu_context *smu = adev->powerplay.pp_handle; 634 635 if (!is_support_sw_smu(adev)) 636 return -EOPNOTSUPP; 637 638 return smu_get_ecc_info(smu, umc_ecc); 639 } 640 641 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 642 uint32_t idx) 643 { 644 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 645 struct amd_vce_state *vstate = NULL; 646 647 if (!pp_funcs->get_vce_clock_state) 648 return NULL; 649 650 mutex_lock(&adev->pm.mutex); 651 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 652 idx); 653 mutex_unlock(&adev->pm.mutex); 654 655 return vstate; 656 } 657 658 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 659 enum amd_pm_state_type *state) 660 { 661 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 662 663 mutex_lock(&adev->pm.mutex); 664 665 if (!pp_funcs->get_current_power_state) { 666 *state = adev->pm.dpm.user_state; 667 goto out; 668 } 669 670 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 671 if (*state < POWER_STATE_TYPE_DEFAULT || 672 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 673 *state = adev->pm.dpm.user_state; 674 675 out: 676 mutex_unlock(&adev->pm.mutex); 677 } 678 679 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 680 enum amd_pm_state_type state) 681 { 682 mutex_lock(&adev->pm.mutex); 683 adev->pm.dpm.user_state = state; 684 mutex_unlock(&adev->pm.mutex); 685 686 if (is_support_sw_smu(adev)) 687 return; 688 689 if (amdgpu_dpm_dispatch_task(adev, 690 AMD_PP_TASK_ENABLE_USER_STATE, 691 &state) == -EOPNOTSUPP) 692 amdgpu_dpm_compute_clocks(adev); 693 } 694 695 static enum amd_dpm_forced_level amdgpu_dpm_get_performance_level_locked(struct amdgpu_device *adev) 696 { 697 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 698 enum amd_dpm_forced_level level; 699 700 if (pp_funcs->get_performance_level) 701 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 702 else 703 level = adev->pm.dpm.forced_level; 704 705 return level; 706 } 707 708 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 709 { 710 enum amd_dpm_forced_level level; 711 712 mutex_lock(&adev->pm.mutex); 713 level = amdgpu_dpm_get_performance_level_locked(adev); 714 mutex_unlock(&adev->pm.mutex); 715 716 return level; 717 } 718 719 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 720 enum amd_dpm_forced_level level) 721 { 722 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 723 enum amd_dpm_forced_level current_level; 724 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 725 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 726 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 727 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 728 int ret = 0; 729 730 if (!pp_funcs->force_performance_level) 731 return 0; 732 733 mutex_lock(&adev->pm.mutex); 734 735 if (adev->pm.dpm.thermal_active) { 736 ret = -EINVAL; 737 goto out; 738 } 739 740 current_level = amdgpu_dpm_get_performance_level_locked(adev); 741 if (current_level == level) { 742 ret = 0; 743 goto out; 744 } 745 746 if (adev->asic_type == CHIP_RAVEN) { 747 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 748 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 749 level == AMD_DPM_FORCED_LEVEL_MANUAL) 750 amdgpu_gfx_off_ctrl(adev, false); 751 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 752 level != AMD_DPM_FORCED_LEVEL_MANUAL) 753 amdgpu_gfx_off_ctrl(adev, true); 754 } 755 } 756 757 if (!(current_level & profile_mode_mask) && 758 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { 759 ret = -EINVAL; 760 goto out; 761 } 762 763 if (!(current_level & profile_mode_mask) && 764 (level & profile_mode_mask)) { 765 /* enter UMD Pstate */ 766 amdgpu_device_ip_set_powergating_state(adev, 767 AMD_IP_BLOCK_TYPE_GFX, 768 AMD_PG_STATE_UNGATE); 769 amdgpu_device_ip_set_clockgating_state(adev, 770 AMD_IP_BLOCK_TYPE_GFX, 771 AMD_CG_STATE_UNGATE); 772 } else if ((current_level & profile_mode_mask) && 773 !(level & profile_mode_mask)) { 774 /* exit UMD Pstate */ 775 amdgpu_device_ip_set_clockgating_state(adev, 776 AMD_IP_BLOCK_TYPE_GFX, 777 AMD_CG_STATE_GATE); 778 amdgpu_device_ip_set_powergating_state(adev, 779 AMD_IP_BLOCK_TYPE_GFX, 780 AMD_PG_STATE_GATE); 781 } 782 783 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 784 level)) 785 ret = -EINVAL; 786 787 if (!ret) 788 adev->pm.dpm.forced_level = level; 789 790 out: 791 mutex_unlock(&adev->pm.mutex); 792 793 return ret; 794 } 795 796 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 797 struct pp_states_info *states) 798 { 799 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 800 int ret = 0; 801 802 if (!pp_funcs->get_pp_num_states) 803 return -EOPNOTSUPP; 804 805 mutex_lock(&adev->pm.mutex); 806 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 807 states); 808 mutex_unlock(&adev->pm.mutex); 809 810 return ret; 811 } 812 813 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 814 enum amd_pp_task task_id, 815 enum amd_pm_state_type *user_state) 816 { 817 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 818 int ret = 0; 819 820 if (!pp_funcs->dispatch_tasks) 821 return -EOPNOTSUPP; 822 823 mutex_lock(&adev->pm.mutex); 824 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 825 task_id, 826 user_state); 827 mutex_unlock(&adev->pm.mutex); 828 829 return ret; 830 } 831 832 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 833 { 834 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 835 int ret = 0; 836 837 if (!pp_funcs->get_pp_table) 838 return 0; 839 840 mutex_lock(&adev->pm.mutex); 841 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 842 table); 843 mutex_unlock(&adev->pm.mutex); 844 845 return ret; 846 } 847 848 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 849 uint32_t type, 850 long *input, 851 uint32_t size) 852 { 853 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 854 int ret = 0; 855 856 if (!pp_funcs->set_fine_grain_clk_vol) 857 return 0; 858 859 mutex_lock(&adev->pm.mutex); 860 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 861 type, 862 input, 863 size); 864 mutex_unlock(&adev->pm.mutex); 865 866 return ret; 867 } 868 869 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 870 uint32_t type, 871 long *input, 872 uint32_t size) 873 { 874 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 875 int ret = 0; 876 877 if (!pp_funcs->odn_edit_dpm_table) 878 return 0; 879 880 mutex_lock(&adev->pm.mutex); 881 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 882 type, 883 input, 884 size); 885 mutex_unlock(&adev->pm.mutex); 886 887 return ret; 888 } 889 890 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 891 enum pp_clock_type type, 892 char *buf) 893 { 894 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 895 int ret = 0; 896 897 if (!pp_funcs->print_clock_levels) 898 return 0; 899 900 mutex_lock(&adev->pm.mutex); 901 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 902 type, 903 buf); 904 mutex_unlock(&adev->pm.mutex); 905 906 return ret; 907 } 908 909 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 910 uint64_t ppfeature_masks) 911 { 912 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 913 int ret = 0; 914 915 if (!pp_funcs->set_ppfeature_status) 916 return 0; 917 918 mutex_lock(&adev->pm.mutex); 919 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 920 ppfeature_masks); 921 mutex_unlock(&adev->pm.mutex); 922 923 return ret; 924 } 925 926 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 927 { 928 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 929 int ret = 0; 930 931 if (!pp_funcs->get_ppfeature_status) 932 return 0; 933 934 mutex_lock(&adev->pm.mutex); 935 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 936 buf); 937 mutex_unlock(&adev->pm.mutex); 938 939 return ret; 940 } 941 942 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 943 enum pp_clock_type type, 944 uint32_t mask) 945 { 946 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 947 int ret = 0; 948 949 if (!pp_funcs->force_clock_level) 950 return 0; 951 952 mutex_lock(&adev->pm.mutex); 953 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 954 type, 955 mask); 956 mutex_unlock(&adev->pm.mutex); 957 958 return ret; 959 } 960 961 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 962 { 963 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 964 int ret = 0; 965 966 if (!pp_funcs->get_sclk_od) 967 return 0; 968 969 mutex_lock(&adev->pm.mutex); 970 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 971 mutex_unlock(&adev->pm.mutex); 972 973 return ret; 974 } 975 976 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 977 { 978 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 979 980 if (is_support_sw_smu(adev)) 981 return 0; 982 983 mutex_lock(&adev->pm.mutex); 984 if (pp_funcs->set_sclk_od) 985 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 986 mutex_unlock(&adev->pm.mutex); 987 988 if (amdgpu_dpm_dispatch_task(adev, 989 AMD_PP_TASK_READJUST_POWER_STATE, 990 NULL) == -EOPNOTSUPP) { 991 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 992 amdgpu_dpm_compute_clocks(adev); 993 } 994 995 return 0; 996 } 997 998 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 999 { 1000 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1001 int ret = 0; 1002 1003 if (!pp_funcs->get_mclk_od) 1004 return 0; 1005 1006 mutex_lock(&adev->pm.mutex); 1007 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1008 mutex_unlock(&adev->pm.mutex); 1009 1010 return ret; 1011 } 1012 1013 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1014 { 1015 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1016 1017 if (is_support_sw_smu(adev)) 1018 return 0; 1019 1020 mutex_lock(&adev->pm.mutex); 1021 if (pp_funcs->set_mclk_od) 1022 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1023 mutex_unlock(&adev->pm.mutex); 1024 1025 if (amdgpu_dpm_dispatch_task(adev, 1026 AMD_PP_TASK_READJUST_POWER_STATE, 1027 NULL) == -EOPNOTSUPP) { 1028 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1029 amdgpu_dpm_compute_clocks(adev); 1030 } 1031 1032 return 0; 1033 } 1034 1035 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1036 char *buf) 1037 { 1038 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1039 int ret = 0; 1040 1041 if (!pp_funcs->get_power_profile_mode) 1042 return -EOPNOTSUPP; 1043 1044 mutex_lock(&adev->pm.mutex); 1045 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1046 buf); 1047 mutex_unlock(&adev->pm.mutex); 1048 1049 return ret; 1050 } 1051 1052 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1053 long *input, uint32_t size) 1054 { 1055 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1056 int ret = 0; 1057 1058 if (!pp_funcs->set_power_profile_mode) 1059 return 0; 1060 1061 mutex_lock(&adev->pm.mutex); 1062 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1063 input, 1064 size); 1065 mutex_unlock(&adev->pm.mutex); 1066 1067 return ret; 1068 } 1069 1070 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1071 { 1072 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1073 int ret = 0; 1074 1075 if (!pp_funcs->get_gpu_metrics) 1076 return 0; 1077 1078 mutex_lock(&adev->pm.mutex); 1079 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1080 table); 1081 mutex_unlock(&adev->pm.mutex); 1082 1083 return ret; 1084 } 1085 1086 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1087 uint32_t *fan_mode) 1088 { 1089 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1090 1091 if (!pp_funcs->get_fan_control_mode) 1092 return -EOPNOTSUPP; 1093 1094 mutex_lock(&adev->pm.mutex); 1095 *fan_mode = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle); 1096 mutex_unlock(&adev->pm.mutex); 1097 1098 return 0; 1099 } 1100 1101 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1102 uint32_t speed) 1103 { 1104 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1105 int ret = 0; 1106 1107 if (!pp_funcs->set_fan_speed_pwm) 1108 return -EINVAL; 1109 1110 mutex_lock(&adev->pm.mutex); 1111 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1112 speed); 1113 mutex_unlock(&adev->pm.mutex); 1114 1115 return ret; 1116 } 1117 1118 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1119 uint32_t *speed) 1120 { 1121 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1122 int ret = 0; 1123 1124 if (!pp_funcs->get_fan_speed_pwm) 1125 return -EINVAL; 1126 1127 mutex_lock(&adev->pm.mutex); 1128 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1129 speed); 1130 mutex_unlock(&adev->pm.mutex); 1131 1132 return ret; 1133 } 1134 1135 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1136 uint32_t *speed) 1137 { 1138 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1139 int ret = 0; 1140 1141 if (!pp_funcs->get_fan_speed_rpm) 1142 return -EINVAL; 1143 1144 mutex_lock(&adev->pm.mutex); 1145 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1146 speed); 1147 mutex_unlock(&adev->pm.mutex); 1148 1149 return ret; 1150 } 1151 1152 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1153 uint32_t speed) 1154 { 1155 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1156 int ret = 0; 1157 1158 if (!pp_funcs->set_fan_speed_rpm) 1159 return -EINVAL; 1160 1161 mutex_lock(&adev->pm.mutex); 1162 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1163 speed); 1164 mutex_unlock(&adev->pm.mutex); 1165 1166 return ret; 1167 } 1168 1169 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1170 uint32_t mode) 1171 { 1172 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1173 1174 if (!pp_funcs->set_fan_control_mode) 1175 return -EOPNOTSUPP; 1176 1177 mutex_lock(&adev->pm.mutex); 1178 pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1179 mode); 1180 mutex_unlock(&adev->pm.mutex); 1181 1182 return 0; 1183 } 1184 1185 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1186 uint32_t *limit, 1187 enum pp_power_limit_level pp_limit_level, 1188 enum pp_power_type power_type) 1189 { 1190 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1191 int ret = 0; 1192 1193 if (!pp_funcs->get_power_limit) 1194 return -ENODATA; 1195 1196 mutex_lock(&adev->pm.mutex); 1197 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1198 limit, 1199 pp_limit_level, 1200 power_type); 1201 mutex_unlock(&adev->pm.mutex); 1202 1203 return ret; 1204 } 1205 1206 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1207 uint32_t limit) 1208 { 1209 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1210 int ret = 0; 1211 1212 if (!pp_funcs->set_power_limit) 1213 return -EINVAL; 1214 1215 mutex_lock(&adev->pm.mutex); 1216 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1217 limit); 1218 mutex_unlock(&adev->pm.mutex); 1219 1220 return ret; 1221 } 1222 1223 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1224 { 1225 bool cclk_dpm_supported = false; 1226 1227 if (!is_support_sw_smu(adev)) 1228 return false; 1229 1230 mutex_lock(&adev->pm.mutex); 1231 cclk_dpm_supported = is_support_cclk_dpm(adev); 1232 mutex_unlock(&adev->pm.mutex); 1233 1234 return (int)cclk_dpm_supported; 1235 } 1236 1237 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1238 struct seq_file *m) 1239 { 1240 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1241 1242 if (!pp_funcs->debugfs_print_current_performance_level) 1243 return -EOPNOTSUPP; 1244 1245 mutex_lock(&adev->pm.mutex); 1246 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1247 m); 1248 mutex_unlock(&adev->pm.mutex); 1249 1250 return 0; 1251 } 1252 1253 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1254 void **addr, 1255 size_t *size) 1256 { 1257 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1258 int ret = 0; 1259 1260 if (!pp_funcs->get_smu_prv_buf_details) 1261 return -ENOSYS; 1262 1263 mutex_lock(&adev->pm.mutex); 1264 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1265 addr, 1266 size); 1267 mutex_unlock(&adev->pm.mutex); 1268 1269 return ret; 1270 } 1271 1272 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1273 { 1274 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1275 struct smu_context *smu = adev->powerplay.pp_handle; 1276 1277 if ((is_support_sw_smu(adev) && smu->od_enabled) || 1278 (is_support_sw_smu(adev) && smu->is_apu) || 1279 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 1280 return true; 1281 1282 return false; 1283 } 1284 1285 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1286 const char *buf, 1287 size_t size) 1288 { 1289 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1290 int ret = 0; 1291 1292 if (!pp_funcs->set_pp_table) 1293 return -EOPNOTSUPP; 1294 1295 mutex_lock(&adev->pm.mutex); 1296 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1297 buf, 1298 size); 1299 mutex_unlock(&adev->pm.mutex); 1300 1301 return ret; 1302 } 1303 1304 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1305 { 1306 struct smu_context *smu = adev->powerplay.pp_handle; 1307 1308 if (!is_support_sw_smu(adev)) 1309 return INT_MAX; 1310 1311 return smu->cpu_core_num; 1312 } 1313 1314 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1315 { 1316 if (!is_support_sw_smu(adev)) 1317 return; 1318 1319 amdgpu_smu_stb_debug_fs_init(adev); 1320 } 1321 1322 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1323 const struct amd_pp_display_configuration *input) 1324 { 1325 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1326 int ret = 0; 1327 1328 if (!pp_funcs->display_configuration_change) 1329 return 0; 1330 1331 mutex_lock(&adev->pm.mutex); 1332 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1333 input); 1334 mutex_unlock(&adev->pm.mutex); 1335 1336 return ret; 1337 } 1338 1339 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1340 enum amd_pp_clock_type type, 1341 struct amd_pp_clocks *clocks) 1342 { 1343 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1344 int ret = 0; 1345 1346 if (!pp_funcs->get_clock_by_type) 1347 return 0; 1348 1349 mutex_lock(&adev->pm.mutex); 1350 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1351 type, 1352 clocks); 1353 mutex_unlock(&adev->pm.mutex); 1354 1355 return ret; 1356 } 1357 1358 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1359 struct amd_pp_simple_clock_info *clocks) 1360 { 1361 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1362 int ret = 0; 1363 1364 if (!pp_funcs->get_display_mode_validation_clocks) 1365 return 0; 1366 1367 mutex_lock(&adev->pm.mutex); 1368 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1369 clocks); 1370 mutex_unlock(&adev->pm.mutex); 1371 1372 return ret; 1373 } 1374 1375 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1376 enum amd_pp_clock_type type, 1377 struct pp_clock_levels_with_latency *clocks) 1378 { 1379 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1380 int ret = 0; 1381 1382 if (!pp_funcs->get_clock_by_type_with_latency) 1383 return 0; 1384 1385 mutex_lock(&adev->pm.mutex); 1386 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1387 type, 1388 clocks); 1389 mutex_unlock(&adev->pm.mutex); 1390 1391 return ret; 1392 } 1393 1394 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1395 enum amd_pp_clock_type type, 1396 struct pp_clock_levels_with_voltage *clocks) 1397 { 1398 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1399 int ret = 0; 1400 1401 if (!pp_funcs->get_clock_by_type_with_voltage) 1402 return 0; 1403 1404 mutex_lock(&adev->pm.mutex); 1405 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1406 type, 1407 clocks); 1408 mutex_unlock(&adev->pm.mutex); 1409 1410 return ret; 1411 } 1412 1413 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1414 void *clock_ranges) 1415 { 1416 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1417 int ret = 0; 1418 1419 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1420 return -EOPNOTSUPP; 1421 1422 mutex_lock(&adev->pm.mutex); 1423 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1424 clock_ranges); 1425 mutex_unlock(&adev->pm.mutex); 1426 1427 return ret; 1428 } 1429 1430 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1431 struct pp_display_clock_request *clock) 1432 { 1433 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1434 int ret = 0; 1435 1436 if (!pp_funcs->display_clock_voltage_request) 1437 return -EOPNOTSUPP; 1438 1439 mutex_lock(&adev->pm.mutex); 1440 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1441 clock); 1442 mutex_unlock(&adev->pm.mutex); 1443 1444 return ret; 1445 } 1446 1447 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1448 struct amd_pp_clock_info *clocks) 1449 { 1450 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1451 int ret = 0; 1452 1453 if (!pp_funcs->get_current_clocks) 1454 return -EOPNOTSUPP; 1455 1456 mutex_lock(&adev->pm.mutex); 1457 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1458 clocks); 1459 mutex_unlock(&adev->pm.mutex); 1460 1461 return ret; 1462 } 1463 1464 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1465 { 1466 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1467 1468 if (!pp_funcs->notify_smu_enable_pwe) 1469 return; 1470 1471 mutex_lock(&adev->pm.mutex); 1472 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1473 mutex_unlock(&adev->pm.mutex); 1474 } 1475 1476 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1477 uint32_t count) 1478 { 1479 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1480 int ret = 0; 1481 1482 if (!pp_funcs->set_active_display_count) 1483 return -EOPNOTSUPP; 1484 1485 mutex_lock(&adev->pm.mutex); 1486 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1487 count); 1488 mutex_unlock(&adev->pm.mutex); 1489 1490 return ret; 1491 } 1492 1493 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1494 uint32_t clock) 1495 { 1496 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1497 int ret = 0; 1498 1499 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1500 return -EOPNOTSUPP; 1501 1502 mutex_lock(&adev->pm.mutex); 1503 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1504 clock); 1505 mutex_unlock(&adev->pm.mutex); 1506 1507 return ret; 1508 } 1509 1510 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1511 uint32_t clock) 1512 { 1513 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1514 1515 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1516 return; 1517 1518 mutex_lock(&adev->pm.mutex); 1519 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1520 clock); 1521 mutex_unlock(&adev->pm.mutex); 1522 } 1523 1524 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1525 uint32_t clock) 1526 { 1527 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1528 1529 if (!pp_funcs->set_hard_min_fclk_by_freq) 1530 return; 1531 1532 mutex_lock(&adev->pm.mutex); 1533 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1534 clock); 1535 mutex_unlock(&adev->pm.mutex); 1536 } 1537 1538 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1539 bool disable_memory_clock_switch) 1540 { 1541 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1542 int ret = 0; 1543 1544 if (!pp_funcs->display_disable_memory_clock_switch) 1545 return 0; 1546 1547 mutex_lock(&adev->pm.mutex); 1548 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1549 disable_memory_clock_switch); 1550 mutex_unlock(&adev->pm.mutex); 1551 1552 return ret; 1553 } 1554 1555 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1556 struct pp_smu_nv_clock_table *max_clocks) 1557 { 1558 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1559 int ret = 0; 1560 1561 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1562 return -EOPNOTSUPP; 1563 1564 mutex_lock(&adev->pm.mutex); 1565 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1566 max_clocks); 1567 mutex_unlock(&adev->pm.mutex); 1568 1569 return ret; 1570 } 1571 1572 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1573 unsigned int *clock_values_in_khz, 1574 unsigned int *num_states) 1575 { 1576 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1577 int ret = 0; 1578 1579 if (!pp_funcs->get_uclk_dpm_states) 1580 return -EOPNOTSUPP; 1581 1582 mutex_lock(&adev->pm.mutex); 1583 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1584 clock_values_in_khz, 1585 num_states); 1586 mutex_unlock(&adev->pm.mutex); 1587 1588 return ret; 1589 } 1590 1591 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1592 struct dpm_clocks *clock_table) 1593 { 1594 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1595 int ret = 0; 1596 1597 if (!pp_funcs->get_dpm_clock_table) 1598 return -EOPNOTSUPP; 1599 1600 mutex_lock(&adev->pm.mutex); 1601 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1602 clock_table); 1603 mutex_unlock(&adev->pm.mutex); 1604 1605 return ret; 1606 } 1607