1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 40 { 41 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 42 int ret = 0; 43 44 if (!pp_funcs->get_sclk) 45 return 0; 46 47 mutex_lock(&adev->pm.mutex); 48 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 49 low); 50 mutex_unlock(&adev->pm.mutex); 51 52 return ret; 53 } 54 55 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 56 { 57 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 58 int ret = 0; 59 60 if (!pp_funcs->get_mclk) 61 return 0; 62 63 mutex_lock(&adev->pm.mutex); 64 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 65 low); 66 mutex_unlock(&adev->pm.mutex); 67 68 return ret; 69 } 70 71 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 72 { 73 int ret = 0; 74 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 75 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 76 77 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 78 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 79 block_type, gate ? "gate" : "ungate"); 80 return 0; 81 } 82 83 mutex_lock(&adev->pm.mutex); 84 85 switch (block_type) { 86 case AMD_IP_BLOCK_TYPE_UVD: 87 case AMD_IP_BLOCK_TYPE_VCE: 88 case AMD_IP_BLOCK_TYPE_GFX: 89 case AMD_IP_BLOCK_TYPE_VCN: 90 case AMD_IP_BLOCK_TYPE_SDMA: 91 case AMD_IP_BLOCK_TYPE_JPEG: 92 case AMD_IP_BLOCK_TYPE_GMC: 93 case AMD_IP_BLOCK_TYPE_ACP: 94 if (pp_funcs && pp_funcs->set_powergating_by_smu) 95 ret = (pp_funcs->set_powergating_by_smu( 96 (adev)->powerplay.pp_handle, block_type, gate)); 97 break; 98 default: 99 break; 100 } 101 102 if (!ret) 103 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 104 105 mutex_unlock(&adev->pm.mutex); 106 107 return ret; 108 } 109 110 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 111 { 112 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 113 void *pp_handle = adev->powerplay.pp_handle; 114 int ret = 0; 115 116 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 117 return -ENOENT; 118 119 mutex_lock(&adev->pm.mutex); 120 121 /* enter BACO state */ 122 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 123 124 mutex_unlock(&adev->pm.mutex); 125 126 return ret; 127 } 128 129 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 130 { 131 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 132 void *pp_handle = adev->powerplay.pp_handle; 133 int ret = 0; 134 135 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 136 return -ENOENT; 137 138 mutex_lock(&adev->pm.mutex); 139 140 /* exit BACO state */ 141 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 142 143 mutex_unlock(&adev->pm.mutex); 144 145 return ret; 146 } 147 148 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 149 enum pp_mp1_state mp1_state) 150 { 151 int ret = 0; 152 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 153 154 if (pp_funcs && pp_funcs->set_mp1_state) { 155 mutex_lock(&adev->pm.mutex); 156 157 ret = pp_funcs->set_mp1_state( 158 adev->powerplay.pp_handle, 159 mp1_state); 160 161 mutex_unlock(&adev->pm.mutex); 162 } 163 164 return ret; 165 } 166 167 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 168 { 169 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 170 void *pp_handle = adev->powerplay.pp_handle; 171 bool baco_cap; 172 int ret = 0; 173 174 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 175 return false; 176 177 mutex_lock(&adev->pm.mutex); 178 179 ret = pp_funcs->get_asic_baco_capability(pp_handle, 180 &baco_cap); 181 182 mutex_unlock(&adev->pm.mutex); 183 184 return ret ? false : baco_cap; 185 } 186 187 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 188 { 189 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 190 void *pp_handle = adev->powerplay.pp_handle; 191 int ret = 0; 192 193 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 194 return -ENOENT; 195 196 mutex_lock(&adev->pm.mutex); 197 198 ret = pp_funcs->asic_reset_mode_2(pp_handle); 199 200 mutex_unlock(&adev->pm.mutex); 201 202 return ret; 203 } 204 205 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 206 { 207 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 208 void *pp_handle = adev->powerplay.pp_handle; 209 int ret = 0; 210 211 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 212 return -ENOENT; 213 214 mutex_lock(&adev->pm.mutex); 215 216 /* enter BACO state */ 217 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 218 if (ret) 219 goto out; 220 221 /* exit BACO state */ 222 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 223 224 out: 225 mutex_unlock(&adev->pm.mutex); 226 return ret; 227 } 228 229 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 230 { 231 struct smu_context *smu = adev->powerplay.pp_handle; 232 bool support_mode1_reset = false; 233 234 if (is_support_sw_smu(adev)) { 235 mutex_lock(&adev->pm.mutex); 236 support_mode1_reset = smu_mode1_reset_is_support(smu); 237 mutex_unlock(&adev->pm.mutex); 238 } 239 240 return support_mode1_reset; 241 } 242 243 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 244 { 245 struct smu_context *smu = adev->powerplay.pp_handle; 246 int ret = -EOPNOTSUPP; 247 248 if (is_support_sw_smu(adev)) { 249 mutex_lock(&adev->pm.mutex); 250 ret = smu_mode1_reset(smu); 251 mutex_unlock(&adev->pm.mutex); 252 } 253 254 return ret; 255 } 256 257 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 258 enum PP_SMC_POWER_PROFILE type, 259 bool en) 260 { 261 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 262 int ret = 0; 263 264 if (amdgpu_sriov_vf(adev)) 265 return 0; 266 267 if (pp_funcs && pp_funcs->switch_power_profile) { 268 mutex_lock(&adev->pm.mutex); 269 ret = pp_funcs->switch_power_profile( 270 adev->powerplay.pp_handle, type, en); 271 mutex_unlock(&adev->pm.mutex); 272 } 273 274 return ret; 275 } 276 277 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 278 uint32_t pstate) 279 { 280 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 281 int ret = 0; 282 283 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 284 mutex_lock(&adev->pm.mutex); 285 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 286 pstate); 287 mutex_unlock(&adev->pm.mutex); 288 } 289 290 return ret; 291 } 292 293 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 294 uint32_t cstate) 295 { 296 int ret = 0; 297 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 298 void *pp_handle = adev->powerplay.pp_handle; 299 300 if (pp_funcs && pp_funcs->set_df_cstate) { 301 mutex_lock(&adev->pm.mutex); 302 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 303 mutex_unlock(&adev->pm.mutex); 304 } 305 306 return ret; 307 } 308 309 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) 310 { 311 struct smu_context *smu = adev->powerplay.pp_handle; 312 int ret = 0; 313 314 if (is_support_sw_smu(adev)) { 315 mutex_lock(&adev->pm.mutex); 316 ret = smu_allow_xgmi_power_down(smu, en); 317 mutex_unlock(&adev->pm.mutex); 318 } 319 320 return ret; 321 } 322 323 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 324 { 325 void *pp_handle = adev->powerplay.pp_handle; 326 const struct amd_pm_funcs *pp_funcs = 327 adev->powerplay.pp_funcs; 328 int ret = 0; 329 330 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 331 mutex_lock(&adev->pm.mutex); 332 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 333 mutex_unlock(&adev->pm.mutex); 334 } 335 336 return ret; 337 } 338 339 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 340 uint32_t msg_id) 341 { 342 void *pp_handle = adev->powerplay.pp_handle; 343 const struct amd_pm_funcs *pp_funcs = 344 adev->powerplay.pp_funcs; 345 int ret = 0; 346 347 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 348 mutex_lock(&adev->pm.mutex); 349 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 350 msg_id); 351 mutex_unlock(&adev->pm.mutex); 352 } 353 354 return ret; 355 } 356 357 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 358 bool acquire) 359 { 360 void *pp_handle = adev->powerplay.pp_handle; 361 const struct amd_pm_funcs *pp_funcs = 362 adev->powerplay.pp_funcs; 363 int ret = -EOPNOTSUPP; 364 365 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 366 mutex_lock(&adev->pm.mutex); 367 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 368 acquire); 369 mutex_unlock(&adev->pm.mutex); 370 } 371 372 return ret; 373 } 374 375 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 376 { 377 if (adev->pm.dpm_enabled) { 378 mutex_lock(&adev->pm.mutex); 379 if (power_supply_is_system_supplied() > 0) 380 adev->pm.ac_power = true; 381 else 382 adev->pm.ac_power = false; 383 384 if (adev->powerplay.pp_funcs && 385 adev->powerplay.pp_funcs->enable_bapm) 386 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 387 388 if (is_support_sw_smu(adev)) 389 smu_set_ac_dc(adev->powerplay.pp_handle); 390 391 mutex_unlock(&adev->pm.mutex); 392 } 393 } 394 395 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 396 void *data, uint32_t *size) 397 { 398 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 399 int ret = -EINVAL; 400 401 if (!data || !size) 402 return -EINVAL; 403 404 if (pp_funcs && pp_funcs->read_sensor) { 405 mutex_lock(&adev->pm.mutex); 406 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 407 sensor, 408 data, 409 size); 410 mutex_unlock(&adev->pm.mutex); 411 } 412 413 return ret; 414 } 415 416 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 417 { 418 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 419 420 if (!adev->pm.dpm_enabled) 421 return; 422 423 if (!pp_funcs->pm_compute_clocks) 424 return; 425 426 mutex_lock(&adev->pm.mutex); 427 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 428 mutex_unlock(&adev->pm.mutex); 429 } 430 431 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 432 { 433 int ret = 0; 434 435 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 436 if (ret) 437 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 438 enable ? "enable" : "disable", ret); 439 } 440 441 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 442 { 443 int ret = 0; 444 445 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 446 if (ret) 447 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 448 enable ? "enable" : "disable", ret); 449 } 450 451 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 452 { 453 int ret = 0; 454 455 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 456 if (ret) 457 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 458 enable ? "enable" : "disable", ret); 459 } 460 461 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 462 { 463 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 464 int r = 0; 465 466 if (!pp_funcs || !pp_funcs->load_firmware) 467 return 0; 468 469 mutex_lock(&adev->pm.mutex); 470 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 471 if (r) { 472 pr_err("smu firmware loading failed\n"); 473 goto out; 474 } 475 476 if (smu_version) 477 *smu_version = adev->pm.fw_version; 478 479 out: 480 mutex_unlock(&adev->pm.mutex); 481 return r; 482 } 483 484 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 485 { 486 int ret = 0; 487 488 if (is_support_sw_smu(adev)) { 489 mutex_lock(&adev->pm.mutex); 490 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 491 enable); 492 mutex_unlock(&adev->pm.mutex); 493 } 494 495 return ret; 496 } 497 498 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 499 { 500 struct smu_context *smu = adev->powerplay.pp_handle; 501 int ret = 0; 502 503 if (!is_support_sw_smu(adev)) 504 return -EOPNOTSUPP; 505 506 mutex_lock(&adev->pm.mutex); 507 ret = smu_send_hbm_bad_pages_num(smu, size); 508 mutex_unlock(&adev->pm.mutex); 509 510 return ret; 511 } 512 513 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 514 { 515 struct smu_context *smu = adev->powerplay.pp_handle; 516 int ret = 0; 517 518 if (!is_support_sw_smu(adev)) 519 return -EOPNOTSUPP; 520 521 mutex_lock(&adev->pm.mutex); 522 ret = smu_send_hbm_bad_channel_flag(smu, size); 523 mutex_unlock(&adev->pm.mutex); 524 525 return ret; 526 } 527 528 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 529 enum pp_clock_type type, 530 uint32_t *min, 531 uint32_t *max) 532 { 533 int ret = 0; 534 535 if (type != PP_SCLK) 536 return -EINVAL; 537 538 if (!is_support_sw_smu(adev)) 539 return -EOPNOTSUPP; 540 541 mutex_lock(&adev->pm.mutex); 542 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 543 SMU_SCLK, 544 min, 545 max); 546 mutex_unlock(&adev->pm.mutex); 547 548 return ret; 549 } 550 551 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 552 enum pp_clock_type type, 553 uint32_t min, 554 uint32_t max) 555 { 556 struct smu_context *smu = adev->powerplay.pp_handle; 557 int ret = 0; 558 559 if (type != PP_SCLK) 560 return -EINVAL; 561 562 if (!is_support_sw_smu(adev)) 563 return -EOPNOTSUPP; 564 565 mutex_lock(&adev->pm.mutex); 566 ret = smu_set_soft_freq_range(smu, 567 SMU_SCLK, 568 min, 569 max); 570 mutex_unlock(&adev->pm.mutex); 571 572 return ret; 573 } 574 575 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 576 { 577 struct smu_context *smu = adev->powerplay.pp_handle; 578 int ret = 0; 579 580 if (!is_support_sw_smu(adev)) 581 return 0; 582 583 mutex_lock(&adev->pm.mutex); 584 ret = smu_write_watermarks_table(smu); 585 mutex_unlock(&adev->pm.mutex); 586 587 return ret; 588 } 589 590 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 591 enum smu_event_type event, 592 uint64_t event_arg) 593 { 594 struct smu_context *smu = adev->powerplay.pp_handle; 595 int ret = 0; 596 597 if (!is_support_sw_smu(adev)) 598 return -EOPNOTSUPP; 599 600 mutex_lock(&adev->pm.mutex); 601 ret = smu_wait_for_event(smu, event, event_arg); 602 mutex_unlock(&adev->pm.mutex); 603 604 return ret; 605 } 606 607 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 608 { 609 struct smu_context *smu = adev->powerplay.pp_handle; 610 int ret = 0; 611 612 if (!is_support_sw_smu(adev)) 613 return -EOPNOTSUPP; 614 615 mutex_lock(&adev->pm.mutex); 616 ret = smu_get_status_gfxoff(smu, value); 617 mutex_unlock(&adev->pm.mutex); 618 619 return ret; 620 } 621 622 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 623 { 624 struct smu_context *smu = adev->powerplay.pp_handle; 625 626 if (!is_support_sw_smu(adev)) 627 return 0; 628 629 return atomic64_read(&smu->throttle_int_counter); 630 } 631 632 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 633 * @adev: amdgpu_device pointer 634 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 635 * 636 */ 637 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 638 enum gfx_change_state state) 639 { 640 mutex_lock(&adev->pm.mutex); 641 if (adev->powerplay.pp_funcs && 642 adev->powerplay.pp_funcs->gfx_state_change_set) 643 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 644 (adev)->powerplay.pp_handle, state)); 645 mutex_unlock(&adev->pm.mutex); 646 } 647 648 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 649 void *umc_ecc) 650 { 651 struct smu_context *smu = adev->powerplay.pp_handle; 652 int ret = 0; 653 654 if (!is_support_sw_smu(adev)) 655 return -EOPNOTSUPP; 656 657 mutex_lock(&adev->pm.mutex); 658 ret = smu_get_ecc_info(smu, umc_ecc); 659 mutex_unlock(&adev->pm.mutex); 660 661 return ret; 662 } 663 664 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 665 uint32_t idx) 666 { 667 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 668 struct amd_vce_state *vstate = NULL; 669 670 if (!pp_funcs->get_vce_clock_state) 671 return NULL; 672 673 mutex_lock(&adev->pm.mutex); 674 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 675 idx); 676 mutex_unlock(&adev->pm.mutex); 677 678 return vstate; 679 } 680 681 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 682 enum amd_pm_state_type *state) 683 { 684 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 685 686 mutex_lock(&adev->pm.mutex); 687 688 if (!pp_funcs->get_current_power_state) { 689 *state = adev->pm.dpm.user_state; 690 goto out; 691 } 692 693 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 694 if (*state < POWER_STATE_TYPE_DEFAULT || 695 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 696 *state = adev->pm.dpm.user_state; 697 698 out: 699 mutex_unlock(&adev->pm.mutex); 700 } 701 702 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 703 enum amd_pm_state_type state) 704 { 705 mutex_lock(&adev->pm.mutex); 706 adev->pm.dpm.user_state = state; 707 mutex_unlock(&adev->pm.mutex); 708 709 if (is_support_sw_smu(adev)) 710 return; 711 712 if (amdgpu_dpm_dispatch_task(adev, 713 AMD_PP_TASK_ENABLE_USER_STATE, 714 &state) == -EOPNOTSUPP) 715 amdgpu_dpm_compute_clocks(adev); 716 } 717 718 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 719 { 720 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 721 enum amd_dpm_forced_level level; 722 723 mutex_lock(&adev->pm.mutex); 724 if (pp_funcs->get_performance_level) 725 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 726 else 727 level = adev->pm.dpm.forced_level; 728 mutex_unlock(&adev->pm.mutex); 729 730 return level; 731 } 732 733 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 734 enum amd_dpm_forced_level level) 735 { 736 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 737 enum amd_dpm_forced_level current_level; 738 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 739 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 740 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 741 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 742 743 if (!pp_funcs->force_performance_level) 744 return 0; 745 746 if (adev->pm.dpm.thermal_active) 747 return -EINVAL; 748 749 current_level = amdgpu_dpm_get_performance_level(adev); 750 if (current_level == level) 751 return 0; 752 753 if (adev->asic_type == CHIP_RAVEN) { 754 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 755 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 756 level == AMD_DPM_FORCED_LEVEL_MANUAL) 757 amdgpu_gfx_off_ctrl(adev, false); 758 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 759 level != AMD_DPM_FORCED_LEVEL_MANUAL) 760 amdgpu_gfx_off_ctrl(adev, true); 761 } 762 } 763 764 if (!(current_level & profile_mode_mask) && 765 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 766 return -EINVAL; 767 768 if (!(current_level & profile_mode_mask) && 769 (level & profile_mode_mask)) { 770 /* enter UMD Pstate */ 771 amdgpu_device_ip_set_powergating_state(adev, 772 AMD_IP_BLOCK_TYPE_GFX, 773 AMD_PG_STATE_UNGATE); 774 amdgpu_device_ip_set_clockgating_state(adev, 775 AMD_IP_BLOCK_TYPE_GFX, 776 AMD_CG_STATE_UNGATE); 777 } else if ((current_level & profile_mode_mask) && 778 !(level & profile_mode_mask)) { 779 /* exit UMD Pstate */ 780 amdgpu_device_ip_set_clockgating_state(adev, 781 AMD_IP_BLOCK_TYPE_GFX, 782 AMD_CG_STATE_GATE); 783 amdgpu_device_ip_set_powergating_state(adev, 784 AMD_IP_BLOCK_TYPE_GFX, 785 AMD_PG_STATE_GATE); 786 } 787 788 mutex_lock(&adev->pm.mutex); 789 790 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 791 level)) { 792 mutex_unlock(&adev->pm.mutex); 793 return -EINVAL; 794 } 795 796 adev->pm.dpm.forced_level = level; 797 798 mutex_unlock(&adev->pm.mutex); 799 800 return 0; 801 } 802 803 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 804 struct pp_states_info *states) 805 { 806 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 807 int ret = 0; 808 809 if (!pp_funcs->get_pp_num_states) 810 return -EOPNOTSUPP; 811 812 mutex_lock(&adev->pm.mutex); 813 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 814 states); 815 mutex_unlock(&adev->pm.mutex); 816 817 return ret; 818 } 819 820 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 821 enum amd_pp_task task_id, 822 enum amd_pm_state_type *user_state) 823 { 824 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 825 int ret = 0; 826 827 if (!pp_funcs->dispatch_tasks) 828 return -EOPNOTSUPP; 829 830 mutex_lock(&adev->pm.mutex); 831 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 832 task_id, 833 user_state); 834 mutex_unlock(&adev->pm.mutex); 835 836 return ret; 837 } 838 839 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 840 { 841 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 842 int ret = 0; 843 844 if (!pp_funcs->get_pp_table) 845 return 0; 846 847 mutex_lock(&adev->pm.mutex); 848 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 849 table); 850 mutex_unlock(&adev->pm.mutex); 851 852 return ret; 853 } 854 855 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 856 uint32_t type, 857 long *input, 858 uint32_t size) 859 { 860 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 861 int ret = 0; 862 863 if (!pp_funcs->set_fine_grain_clk_vol) 864 return 0; 865 866 mutex_lock(&adev->pm.mutex); 867 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 868 type, 869 input, 870 size); 871 mutex_unlock(&adev->pm.mutex); 872 873 return ret; 874 } 875 876 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 877 uint32_t type, 878 long *input, 879 uint32_t size) 880 { 881 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 882 int ret = 0; 883 884 if (!pp_funcs->odn_edit_dpm_table) 885 return 0; 886 887 mutex_lock(&adev->pm.mutex); 888 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 889 type, 890 input, 891 size); 892 mutex_unlock(&adev->pm.mutex); 893 894 return ret; 895 } 896 897 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 898 enum pp_clock_type type, 899 char *buf) 900 { 901 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 902 int ret = 0; 903 904 if (!pp_funcs->print_clock_levels) 905 return 0; 906 907 mutex_lock(&adev->pm.mutex); 908 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 909 type, 910 buf); 911 mutex_unlock(&adev->pm.mutex); 912 913 return ret; 914 } 915 916 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 917 enum pp_clock_type type, 918 char *buf, 919 int *offset) 920 { 921 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 922 int ret = 0; 923 924 if (!pp_funcs->emit_clock_levels) 925 return -ENOENT; 926 927 mutex_lock(&adev->pm.mutex); 928 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 929 type, 930 buf, 931 offset); 932 mutex_unlock(&adev->pm.mutex); 933 934 return ret; 935 } 936 937 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 938 uint64_t ppfeature_masks) 939 { 940 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 941 int ret = 0; 942 943 if (!pp_funcs->set_ppfeature_status) 944 return 0; 945 946 mutex_lock(&adev->pm.mutex); 947 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 948 ppfeature_masks); 949 mutex_unlock(&adev->pm.mutex); 950 951 return ret; 952 } 953 954 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 955 { 956 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 957 int ret = 0; 958 959 if (!pp_funcs->get_ppfeature_status) 960 return 0; 961 962 mutex_lock(&adev->pm.mutex); 963 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 964 buf); 965 mutex_unlock(&adev->pm.mutex); 966 967 return ret; 968 } 969 970 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 971 enum pp_clock_type type, 972 uint32_t mask) 973 { 974 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 975 int ret = 0; 976 977 if (!pp_funcs->force_clock_level) 978 return 0; 979 980 mutex_lock(&adev->pm.mutex); 981 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 982 type, 983 mask); 984 mutex_unlock(&adev->pm.mutex); 985 986 return ret; 987 } 988 989 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 990 { 991 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 992 int ret = 0; 993 994 if (!pp_funcs->get_sclk_od) 995 return 0; 996 997 mutex_lock(&adev->pm.mutex); 998 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 999 mutex_unlock(&adev->pm.mutex); 1000 1001 return ret; 1002 } 1003 1004 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1005 { 1006 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1007 1008 if (is_support_sw_smu(adev)) 1009 return 0; 1010 1011 mutex_lock(&adev->pm.mutex); 1012 if (pp_funcs->set_sclk_od) 1013 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1014 mutex_unlock(&adev->pm.mutex); 1015 1016 if (amdgpu_dpm_dispatch_task(adev, 1017 AMD_PP_TASK_READJUST_POWER_STATE, 1018 NULL) == -EOPNOTSUPP) { 1019 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1020 amdgpu_dpm_compute_clocks(adev); 1021 } 1022 1023 return 0; 1024 } 1025 1026 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1027 { 1028 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1029 int ret = 0; 1030 1031 if (!pp_funcs->get_mclk_od) 1032 return 0; 1033 1034 mutex_lock(&adev->pm.mutex); 1035 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1036 mutex_unlock(&adev->pm.mutex); 1037 1038 return ret; 1039 } 1040 1041 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1042 { 1043 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1044 1045 if (is_support_sw_smu(adev)) 1046 return 0; 1047 1048 mutex_lock(&adev->pm.mutex); 1049 if (pp_funcs->set_mclk_od) 1050 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1051 mutex_unlock(&adev->pm.mutex); 1052 1053 if (amdgpu_dpm_dispatch_task(adev, 1054 AMD_PP_TASK_READJUST_POWER_STATE, 1055 NULL) == -EOPNOTSUPP) { 1056 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1057 amdgpu_dpm_compute_clocks(adev); 1058 } 1059 1060 return 0; 1061 } 1062 1063 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1064 char *buf) 1065 { 1066 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1067 int ret = 0; 1068 1069 if (!pp_funcs->get_power_profile_mode) 1070 return -EOPNOTSUPP; 1071 1072 mutex_lock(&adev->pm.mutex); 1073 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1074 buf); 1075 mutex_unlock(&adev->pm.mutex); 1076 1077 return ret; 1078 } 1079 1080 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1081 long *input, uint32_t size) 1082 { 1083 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1084 int ret = 0; 1085 1086 if (!pp_funcs->set_power_profile_mode) 1087 return 0; 1088 1089 mutex_lock(&adev->pm.mutex); 1090 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1091 input, 1092 size); 1093 mutex_unlock(&adev->pm.mutex); 1094 1095 return ret; 1096 } 1097 1098 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1099 { 1100 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1101 int ret = 0; 1102 1103 if (!pp_funcs->get_gpu_metrics) 1104 return 0; 1105 1106 mutex_lock(&adev->pm.mutex); 1107 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1108 table); 1109 mutex_unlock(&adev->pm.mutex); 1110 1111 return ret; 1112 } 1113 1114 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1115 uint32_t *fan_mode) 1116 { 1117 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1118 int ret = 0; 1119 1120 if (!pp_funcs->get_fan_control_mode) 1121 return -EOPNOTSUPP; 1122 1123 mutex_lock(&adev->pm.mutex); 1124 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1125 fan_mode); 1126 mutex_unlock(&adev->pm.mutex); 1127 1128 return ret; 1129 } 1130 1131 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1132 uint32_t speed) 1133 { 1134 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1135 int ret = 0; 1136 1137 if (!pp_funcs->set_fan_speed_pwm) 1138 return -EOPNOTSUPP; 1139 1140 mutex_lock(&adev->pm.mutex); 1141 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1142 speed); 1143 mutex_unlock(&adev->pm.mutex); 1144 1145 return ret; 1146 } 1147 1148 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1149 uint32_t *speed) 1150 { 1151 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1152 int ret = 0; 1153 1154 if (!pp_funcs->get_fan_speed_pwm) 1155 return -EOPNOTSUPP; 1156 1157 mutex_lock(&adev->pm.mutex); 1158 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1159 speed); 1160 mutex_unlock(&adev->pm.mutex); 1161 1162 return ret; 1163 } 1164 1165 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1166 uint32_t *speed) 1167 { 1168 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1169 int ret = 0; 1170 1171 if (!pp_funcs->get_fan_speed_rpm) 1172 return -EOPNOTSUPP; 1173 1174 mutex_lock(&adev->pm.mutex); 1175 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1176 speed); 1177 mutex_unlock(&adev->pm.mutex); 1178 1179 return ret; 1180 } 1181 1182 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1183 uint32_t speed) 1184 { 1185 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1186 int ret = 0; 1187 1188 if (!pp_funcs->set_fan_speed_rpm) 1189 return -EOPNOTSUPP; 1190 1191 mutex_lock(&adev->pm.mutex); 1192 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1193 speed); 1194 mutex_unlock(&adev->pm.mutex); 1195 1196 return ret; 1197 } 1198 1199 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1200 uint32_t mode) 1201 { 1202 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1203 int ret = 0; 1204 1205 if (!pp_funcs->set_fan_control_mode) 1206 return -EOPNOTSUPP; 1207 1208 mutex_lock(&adev->pm.mutex); 1209 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1210 mode); 1211 mutex_unlock(&adev->pm.mutex); 1212 1213 return ret; 1214 } 1215 1216 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1217 uint32_t *limit, 1218 enum pp_power_limit_level pp_limit_level, 1219 enum pp_power_type power_type) 1220 { 1221 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1222 int ret = 0; 1223 1224 if (!pp_funcs->get_power_limit) 1225 return -ENODATA; 1226 1227 mutex_lock(&adev->pm.mutex); 1228 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1229 limit, 1230 pp_limit_level, 1231 power_type); 1232 mutex_unlock(&adev->pm.mutex); 1233 1234 return ret; 1235 } 1236 1237 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1238 uint32_t limit) 1239 { 1240 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1241 int ret = 0; 1242 1243 if (!pp_funcs->set_power_limit) 1244 return -EINVAL; 1245 1246 mutex_lock(&adev->pm.mutex); 1247 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1248 limit); 1249 mutex_unlock(&adev->pm.mutex); 1250 1251 return ret; 1252 } 1253 1254 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1255 { 1256 bool cclk_dpm_supported = false; 1257 1258 if (!is_support_sw_smu(adev)) 1259 return false; 1260 1261 mutex_lock(&adev->pm.mutex); 1262 cclk_dpm_supported = is_support_cclk_dpm(adev); 1263 mutex_unlock(&adev->pm.mutex); 1264 1265 return (int)cclk_dpm_supported; 1266 } 1267 1268 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1269 struct seq_file *m) 1270 { 1271 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1272 1273 if (!pp_funcs->debugfs_print_current_performance_level) 1274 return -EOPNOTSUPP; 1275 1276 mutex_lock(&adev->pm.mutex); 1277 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1278 m); 1279 mutex_unlock(&adev->pm.mutex); 1280 1281 return 0; 1282 } 1283 1284 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1285 void **addr, 1286 size_t *size) 1287 { 1288 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1289 int ret = 0; 1290 1291 if (!pp_funcs->get_smu_prv_buf_details) 1292 return -ENOSYS; 1293 1294 mutex_lock(&adev->pm.mutex); 1295 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1296 addr, 1297 size); 1298 mutex_unlock(&adev->pm.mutex); 1299 1300 return ret; 1301 } 1302 1303 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1304 { 1305 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1306 struct smu_context *smu = adev->powerplay.pp_handle; 1307 1308 if ((is_support_sw_smu(adev) && smu->od_enabled) || 1309 (is_support_sw_smu(adev) && smu->is_apu) || 1310 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 1311 return true; 1312 1313 return false; 1314 } 1315 1316 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1317 const char *buf, 1318 size_t size) 1319 { 1320 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1321 int ret = 0; 1322 1323 if (!pp_funcs->set_pp_table) 1324 return -EOPNOTSUPP; 1325 1326 mutex_lock(&adev->pm.mutex); 1327 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1328 buf, 1329 size); 1330 mutex_unlock(&adev->pm.mutex); 1331 1332 return ret; 1333 } 1334 1335 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1336 { 1337 struct smu_context *smu = adev->powerplay.pp_handle; 1338 1339 if (!is_support_sw_smu(adev)) 1340 return INT_MAX; 1341 1342 return smu->cpu_core_num; 1343 } 1344 1345 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1346 { 1347 if (!is_support_sw_smu(adev)) 1348 return; 1349 1350 amdgpu_smu_stb_debug_fs_init(adev); 1351 } 1352 1353 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1354 const struct amd_pp_display_configuration *input) 1355 { 1356 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1357 int ret = 0; 1358 1359 if (!pp_funcs->display_configuration_change) 1360 return 0; 1361 1362 mutex_lock(&adev->pm.mutex); 1363 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1364 input); 1365 mutex_unlock(&adev->pm.mutex); 1366 1367 return ret; 1368 } 1369 1370 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1371 enum amd_pp_clock_type type, 1372 struct amd_pp_clocks *clocks) 1373 { 1374 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1375 int ret = 0; 1376 1377 if (!pp_funcs->get_clock_by_type) 1378 return 0; 1379 1380 mutex_lock(&adev->pm.mutex); 1381 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1382 type, 1383 clocks); 1384 mutex_unlock(&adev->pm.mutex); 1385 1386 return ret; 1387 } 1388 1389 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1390 struct amd_pp_simple_clock_info *clocks) 1391 { 1392 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1393 int ret = 0; 1394 1395 if (!pp_funcs->get_display_mode_validation_clocks) 1396 return 0; 1397 1398 mutex_lock(&adev->pm.mutex); 1399 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1400 clocks); 1401 mutex_unlock(&adev->pm.mutex); 1402 1403 return ret; 1404 } 1405 1406 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1407 enum amd_pp_clock_type type, 1408 struct pp_clock_levels_with_latency *clocks) 1409 { 1410 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1411 int ret = 0; 1412 1413 if (!pp_funcs->get_clock_by_type_with_latency) 1414 return 0; 1415 1416 mutex_lock(&adev->pm.mutex); 1417 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1418 type, 1419 clocks); 1420 mutex_unlock(&adev->pm.mutex); 1421 1422 return ret; 1423 } 1424 1425 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1426 enum amd_pp_clock_type type, 1427 struct pp_clock_levels_with_voltage *clocks) 1428 { 1429 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1430 int ret = 0; 1431 1432 if (!pp_funcs->get_clock_by_type_with_voltage) 1433 return 0; 1434 1435 mutex_lock(&adev->pm.mutex); 1436 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1437 type, 1438 clocks); 1439 mutex_unlock(&adev->pm.mutex); 1440 1441 return ret; 1442 } 1443 1444 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1445 void *clock_ranges) 1446 { 1447 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1448 int ret = 0; 1449 1450 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1451 return -EOPNOTSUPP; 1452 1453 mutex_lock(&adev->pm.mutex); 1454 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1455 clock_ranges); 1456 mutex_unlock(&adev->pm.mutex); 1457 1458 return ret; 1459 } 1460 1461 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1462 struct pp_display_clock_request *clock) 1463 { 1464 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1465 int ret = 0; 1466 1467 if (!pp_funcs->display_clock_voltage_request) 1468 return -EOPNOTSUPP; 1469 1470 mutex_lock(&adev->pm.mutex); 1471 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1472 clock); 1473 mutex_unlock(&adev->pm.mutex); 1474 1475 return ret; 1476 } 1477 1478 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1479 struct amd_pp_clock_info *clocks) 1480 { 1481 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1482 int ret = 0; 1483 1484 if (!pp_funcs->get_current_clocks) 1485 return -EOPNOTSUPP; 1486 1487 mutex_lock(&adev->pm.mutex); 1488 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1489 clocks); 1490 mutex_unlock(&adev->pm.mutex); 1491 1492 return ret; 1493 } 1494 1495 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1496 { 1497 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1498 1499 if (!pp_funcs->notify_smu_enable_pwe) 1500 return; 1501 1502 mutex_lock(&adev->pm.mutex); 1503 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1504 mutex_unlock(&adev->pm.mutex); 1505 } 1506 1507 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1508 uint32_t count) 1509 { 1510 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1511 int ret = 0; 1512 1513 if (!pp_funcs->set_active_display_count) 1514 return -EOPNOTSUPP; 1515 1516 mutex_lock(&adev->pm.mutex); 1517 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1518 count); 1519 mutex_unlock(&adev->pm.mutex); 1520 1521 return ret; 1522 } 1523 1524 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1525 uint32_t clock) 1526 { 1527 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1528 int ret = 0; 1529 1530 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1531 return -EOPNOTSUPP; 1532 1533 mutex_lock(&adev->pm.mutex); 1534 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1535 clock); 1536 mutex_unlock(&adev->pm.mutex); 1537 1538 return ret; 1539 } 1540 1541 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1542 uint32_t clock) 1543 { 1544 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1545 1546 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1547 return; 1548 1549 mutex_lock(&adev->pm.mutex); 1550 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1551 clock); 1552 mutex_unlock(&adev->pm.mutex); 1553 } 1554 1555 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1556 uint32_t clock) 1557 { 1558 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1559 1560 if (!pp_funcs->set_hard_min_fclk_by_freq) 1561 return; 1562 1563 mutex_lock(&adev->pm.mutex); 1564 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1565 clock); 1566 mutex_unlock(&adev->pm.mutex); 1567 } 1568 1569 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1570 bool disable_memory_clock_switch) 1571 { 1572 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1573 int ret = 0; 1574 1575 if (!pp_funcs->display_disable_memory_clock_switch) 1576 return 0; 1577 1578 mutex_lock(&adev->pm.mutex); 1579 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1580 disable_memory_clock_switch); 1581 mutex_unlock(&adev->pm.mutex); 1582 1583 return ret; 1584 } 1585 1586 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1587 struct pp_smu_nv_clock_table *max_clocks) 1588 { 1589 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1590 int ret = 0; 1591 1592 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1593 return -EOPNOTSUPP; 1594 1595 mutex_lock(&adev->pm.mutex); 1596 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1597 max_clocks); 1598 mutex_unlock(&adev->pm.mutex); 1599 1600 return ret; 1601 } 1602 1603 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1604 unsigned int *clock_values_in_khz, 1605 unsigned int *num_states) 1606 { 1607 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1608 int ret = 0; 1609 1610 if (!pp_funcs->get_uclk_dpm_states) 1611 return -EOPNOTSUPP; 1612 1613 mutex_lock(&adev->pm.mutex); 1614 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1615 clock_values_in_khz, 1616 num_states); 1617 mutex_unlock(&adev->pm.mutex); 1618 1619 return ret; 1620 } 1621 1622 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1623 struct dpm_clocks *clock_table) 1624 { 1625 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1626 int ret = 0; 1627 1628 if (!pp_funcs->get_dpm_clock_table) 1629 return -EOPNOTSUPP; 1630 1631 mutex_lock(&adev->pm.mutex); 1632 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1633 clock_table); 1634 mutex_unlock(&adev->pm.mutex); 1635 1636 return ret; 1637 } 1638