1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 40 { 41 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 42 int ret = 0; 43 44 if (!pp_funcs->get_sclk) 45 return 0; 46 47 mutex_lock(&adev->pm.mutex); 48 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 49 low); 50 mutex_unlock(&adev->pm.mutex); 51 52 return ret; 53 } 54 55 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 56 { 57 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 58 int ret = 0; 59 60 if (!pp_funcs->get_mclk) 61 return 0; 62 63 mutex_lock(&adev->pm.mutex); 64 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 65 low); 66 mutex_unlock(&adev->pm.mutex); 67 68 return ret; 69 } 70 71 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 72 { 73 int ret = 0; 74 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 75 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 76 77 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 78 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 79 block_type, gate ? "gate" : "ungate"); 80 return 0; 81 } 82 83 mutex_lock(&adev->pm.mutex); 84 85 switch (block_type) { 86 case AMD_IP_BLOCK_TYPE_UVD: 87 case AMD_IP_BLOCK_TYPE_VCE: 88 case AMD_IP_BLOCK_TYPE_GFX: 89 case AMD_IP_BLOCK_TYPE_VCN: 90 case AMD_IP_BLOCK_TYPE_SDMA: 91 case AMD_IP_BLOCK_TYPE_JPEG: 92 case AMD_IP_BLOCK_TYPE_GMC: 93 case AMD_IP_BLOCK_TYPE_ACP: 94 if (pp_funcs && pp_funcs->set_powergating_by_smu) 95 ret = (pp_funcs->set_powergating_by_smu( 96 (adev)->powerplay.pp_handle, block_type, gate)); 97 break; 98 default: 99 break; 100 } 101 102 if (!ret) 103 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 104 105 mutex_unlock(&adev->pm.mutex); 106 107 return ret; 108 } 109 110 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 111 { 112 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 113 void *pp_handle = adev->powerplay.pp_handle; 114 int ret = 0; 115 116 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 117 return -ENOENT; 118 119 mutex_lock(&adev->pm.mutex); 120 121 /* enter BACO state */ 122 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 123 124 mutex_unlock(&adev->pm.mutex); 125 126 return ret; 127 } 128 129 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 130 { 131 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 132 void *pp_handle = adev->powerplay.pp_handle; 133 int ret = 0; 134 135 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 136 return -ENOENT; 137 138 mutex_lock(&adev->pm.mutex); 139 140 /* exit BACO state */ 141 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 142 143 mutex_unlock(&adev->pm.mutex); 144 145 return ret; 146 } 147 148 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 149 enum pp_mp1_state mp1_state) 150 { 151 int ret = 0; 152 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 153 154 if (pp_funcs && pp_funcs->set_mp1_state) { 155 mutex_lock(&adev->pm.mutex); 156 157 ret = pp_funcs->set_mp1_state( 158 adev->powerplay.pp_handle, 159 mp1_state); 160 161 mutex_unlock(&adev->pm.mutex); 162 } 163 164 return ret; 165 } 166 167 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 168 { 169 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 170 void *pp_handle = adev->powerplay.pp_handle; 171 bool baco_cap; 172 int ret = 0; 173 174 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 175 return false; 176 /* Don't use baco for reset in S3. 177 * This is a workaround for some platforms 178 * where entering BACO during suspend 179 * seems to cause reboots or hangs. 180 * This might be related to the fact that BACO controls 181 * power to the whole GPU including devices like audio and USB. 182 * Powering down/up everything may adversely affect these other 183 * devices. Needs more investigation. 184 */ 185 if (adev->in_s3) 186 return false; 187 188 mutex_lock(&adev->pm.mutex); 189 190 ret = pp_funcs->get_asic_baco_capability(pp_handle, 191 &baco_cap); 192 193 mutex_unlock(&adev->pm.mutex); 194 195 return ret ? false : baco_cap; 196 } 197 198 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 199 { 200 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 201 void *pp_handle = adev->powerplay.pp_handle; 202 int ret = 0; 203 204 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 205 return -ENOENT; 206 207 mutex_lock(&adev->pm.mutex); 208 209 ret = pp_funcs->asic_reset_mode_2(pp_handle); 210 211 mutex_unlock(&adev->pm.mutex); 212 213 return ret; 214 } 215 216 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 217 { 218 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 219 void *pp_handle = adev->powerplay.pp_handle; 220 int ret = 0; 221 222 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 223 return -ENOENT; 224 225 mutex_lock(&adev->pm.mutex); 226 227 /* enter BACO state */ 228 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 229 if (ret) 230 goto out; 231 232 /* exit BACO state */ 233 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 234 235 out: 236 mutex_unlock(&adev->pm.mutex); 237 return ret; 238 } 239 240 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 241 { 242 struct smu_context *smu = adev->powerplay.pp_handle; 243 bool support_mode1_reset = false; 244 245 if (is_support_sw_smu(adev)) { 246 mutex_lock(&adev->pm.mutex); 247 support_mode1_reset = smu_mode1_reset_is_support(smu); 248 mutex_unlock(&adev->pm.mutex); 249 } 250 251 return support_mode1_reset; 252 } 253 254 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 255 { 256 struct smu_context *smu = adev->powerplay.pp_handle; 257 int ret = -EOPNOTSUPP; 258 259 if (is_support_sw_smu(adev)) { 260 mutex_lock(&adev->pm.mutex); 261 ret = smu_mode1_reset(smu); 262 mutex_unlock(&adev->pm.mutex); 263 } 264 265 return ret; 266 } 267 268 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 269 enum PP_SMC_POWER_PROFILE type, 270 bool en) 271 { 272 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 273 int ret = 0; 274 275 if (amdgpu_sriov_vf(adev)) 276 return 0; 277 278 if (pp_funcs && pp_funcs->switch_power_profile) { 279 mutex_lock(&adev->pm.mutex); 280 ret = pp_funcs->switch_power_profile( 281 adev->powerplay.pp_handle, type, en); 282 mutex_unlock(&adev->pm.mutex); 283 } 284 285 return ret; 286 } 287 288 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 289 uint32_t pstate) 290 { 291 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 292 int ret = 0; 293 294 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 295 mutex_lock(&adev->pm.mutex); 296 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 297 pstate); 298 mutex_unlock(&adev->pm.mutex); 299 } 300 301 return ret; 302 } 303 304 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 305 uint32_t cstate) 306 { 307 int ret = 0; 308 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 309 void *pp_handle = adev->powerplay.pp_handle; 310 311 if (pp_funcs && pp_funcs->set_df_cstate) { 312 mutex_lock(&adev->pm.mutex); 313 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 314 mutex_unlock(&adev->pm.mutex); 315 } 316 317 return ret; 318 } 319 320 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) 321 { 322 struct smu_context *smu = adev->powerplay.pp_handle; 323 int ret = 0; 324 325 if (is_support_sw_smu(adev)) { 326 mutex_lock(&adev->pm.mutex); 327 ret = smu_allow_xgmi_power_down(smu, en); 328 mutex_unlock(&adev->pm.mutex); 329 } 330 331 return ret; 332 } 333 334 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 335 { 336 void *pp_handle = adev->powerplay.pp_handle; 337 const struct amd_pm_funcs *pp_funcs = 338 adev->powerplay.pp_funcs; 339 int ret = 0; 340 341 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 342 mutex_lock(&adev->pm.mutex); 343 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 344 mutex_unlock(&adev->pm.mutex); 345 } 346 347 return ret; 348 } 349 350 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 351 uint32_t msg_id) 352 { 353 void *pp_handle = adev->powerplay.pp_handle; 354 const struct amd_pm_funcs *pp_funcs = 355 adev->powerplay.pp_funcs; 356 int ret = 0; 357 358 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 359 mutex_lock(&adev->pm.mutex); 360 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 361 msg_id); 362 mutex_unlock(&adev->pm.mutex); 363 } 364 365 return ret; 366 } 367 368 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 369 bool acquire) 370 { 371 void *pp_handle = adev->powerplay.pp_handle; 372 const struct amd_pm_funcs *pp_funcs = 373 adev->powerplay.pp_funcs; 374 int ret = -EOPNOTSUPP; 375 376 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 377 mutex_lock(&adev->pm.mutex); 378 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 379 acquire); 380 mutex_unlock(&adev->pm.mutex); 381 } 382 383 return ret; 384 } 385 386 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 387 { 388 if (adev->pm.dpm_enabled) { 389 mutex_lock(&adev->pm.mutex); 390 if (power_supply_is_system_supplied() > 0) 391 adev->pm.ac_power = true; 392 else 393 adev->pm.ac_power = false; 394 395 if (adev->powerplay.pp_funcs && 396 adev->powerplay.pp_funcs->enable_bapm) 397 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 398 399 if (is_support_sw_smu(adev)) 400 smu_set_ac_dc(adev->powerplay.pp_handle); 401 402 mutex_unlock(&adev->pm.mutex); 403 } 404 } 405 406 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 407 void *data, uint32_t *size) 408 { 409 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 410 int ret = -EINVAL; 411 412 if (!data || !size) 413 return -EINVAL; 414 415 if (pp_funcs && pp_funcs->read_sensor) { 416 mutex_lock(&adev->pm.mutex); 417 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 418 sensor, 419 data, 420 size); 421 mutex_unlock(&adev->pm.mutex); 422 } 423 424 return ret; 425 } 426 427 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 428 { 429 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 430 431 if (!adev->pm.dpm_enabled) 432 return; 433 434 if (!pp_funcs->pm_compute_clocks) 435 return; 436 437 mutex_lock(&adev->pm.mutex); 438 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 439 mutex_unlock(&adev->pm.mutex); 440 } 441 442 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 443 { 444 int ret = 0; 445 446 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 447 if (ret) 448 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 449 enable ? "enable" : "disable", ret); 450 } 451 452 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 453 { 454 int ret = 0; 455 456 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 457 if (ret) 458 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 459 enable ? "enable" : "disable", ret); 460 } 461 462 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 463 { 464 int ret = 0; 465 466 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 467 if (ret) 468 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 469 enable ? "enable" : "disable", ret); 470 } 471 472 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 473 { 474 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 475 int r = 0; 476 477 if (!pp_funcs || !pp_funcs->load_firmware) 478 return 0; 479 480 mutex_lock(&adev->pm.mutex); 481 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 482 if (r) { 483 pr_err("smu firmware loading failed\n"); 484 goto out; 485 } 486 487 if (smu_version) 488 *smu_version = adev->pm.fw_version; 489 490 out: 491 mutex_unlock(&adev->pm.mutex); 492 return r; 493 } 494 495 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 496 { 497 int ret = 0; 498 499 if (is_support_sw_smu(adev)) { 500 mutex_lock(&adev->pm.mutex); 501 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 502 enable); 503 mutex_unlock(&adev->pm.mutex); 504 } 505 506 return ret; 507 } 508 509 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 510 { 511 struct smu_context *smu = adev->powerplay.pp_handle; 512 int ret = 0; 513 514 if (!is_support_sw_smu(adev)) 515 return -EOPNOTSUPP; 516 517 mutex_lock(&adev->pm.mutex); 518 ret = smu_send_hbm_bad_pages_num(smu, size); 519 mutex_unlock(&adev->pm.mutex); 520 521 return ret; 522 } 523 524 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 525 { 526 struct smu_context *smu = adev->powerplay.pp_handle; 527 int ret = 0; 528 529 if (!is_support_sw_smu(adev)) 530 return -EOPNOTSUPP; 531 532 mutex_lock(&adev->pm.mutex); 533 ret = smu_send_hbm_bad_channel_flag(smu, size); 534 mutex_unlock(&adev->pm.mutex); 535 536 return ret; 537 } 538 539 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 540 enum pp_clock_type type, 541 uint32_t *min, 542 uint32_t *max) 543 { 544 int ret = 0; 545 546 if (type != PP_SCLK) 547 return -EINVAL; 548 549 if (!is_support_sw_smu(adev)) 550 return -EOPNOTSUPP; 551 552 mutex_lock(&adev->pm.mutex); 553 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 554 SMU_SCLK, 555 min, 556 max); 557 mutex_unlock(&adev->pm.mutex); 558 559 return ret; 560 } 561 562 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 563 enum pp_clock_type type, 564 uint32_t min, 565 uint32_t max) 566 { 567 struct smu_context *smu = adev->powerplay.pp_handle; 568 int ret = 0; 569 570 if (type != PP_SCLK) 571 return -EINVAL; 572 573 if (!is_support_sw_smu(adev)) 574 return -EOPNOTSUPP; 575 576 mutex_lock(&adev->pm.mutex); 577 ret = smu_set_soft_freq_range(smu, 578 SMU_SCLK, 579 min, 580 max); 581 mutex_unlock(&adev->pm.mutex); 582 583 return ret; 584 } 585 586 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 587 { 588 struct smu_context *smu = adev->powerplay.pp_handle; 589 int ret = 0; 590 591 if (!is_support_sw_smu(adev)) 592 return 0; 593 594 mutex_lock(&adev->pm.mutex); 595 ret = smu_write_watermarks_table(smu); 596 mutex_unlock(&adev->pm.mutex); 597 598 return ret; 599 } 600 601 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 602 enum smu_event_type event, 603 uint64_t event_arg) 604 { 605 struct smu_context *smu = adev->powerplay.pp_handle; 606 int ret = 0; 607 608 if (!is_support_sw_smu(adev)) 609 return -EOPNOTSUPP; 610 611 mutex_lock(&adev->pm.mutex); 612 ret = smu_wait_for_event(smu, event, event_arg); 613 mutex_unlock(&adev->pm.mutex); 614 615 return ret; 616 } 617 618 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 619 { 620 struct smu_context *smu = adev->powerplay.pp_handle; 621 int ret = 0; 622 623 if (!is_support_sw_smu(adev)) 624 return -EOPNOTSUPP; 625 626 mutex_lock(&adev->pm.mutex); 627 ret = smu_get_status_gfxoff(smu, value); 628 mutex_unlock(&adev->pm.mutex); 629 630 return ret; 631 } 632 633 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 634 { 635 struct smu_context *smu = adev->powerplay.pp_handle; 636 637 if (!is_support_sw_smu(adev)) 638 return 0; 639 640 return atomic64_read(&smu->throttle_int_counter); 641 } 642 643 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 644 * @adev: amdgpu_device pointer 645 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 646 * 647 */ 648 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 649 enum gfx_change_state state) 650 { 651 mutex_lock(&adev->pm.mutex); 652 if (adev->powerplay.pp_funcs && 653 adev->powerplay.pp_funcs->gfx_state_change_set) 654 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 655 (adev)->powerplay.pp_handle, state)); 656 mutex_unlock(&adev->pm.mutex); 657 } 658 659 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 660 void *umc_ecc) 661 { 662 struct smu_context *smu = adev->powerplay.pp_handle; 663 int ret = 0; 664 665 if (!is_support_sw_smu(adev)) 666 return -EOPNOTSUPP; 667 668 mutex_lock(&adev->pm.mutex); 669 ret = smu_get_ecc_info(smu, umc_ecc); 670 mutex_unlock(&adev->pm.mutex); 671 672 return ret; 673 } 674 675 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 676 uint32_t idx) 677 { 678 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 679 struct amd_vce_state *vstate = NULL; 680 681 if (!pp_funcs->get_vce_clock_state) 682 return NULL; 683 684 mutex_lock(&adev->pm.mutex); 685 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 686 idx); 687 mutex_unlock(&adev->pm.mutex); 688 689 return vstate; 690 } 691 692 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 693 enum amd_pm_state_type *state) 694 { 695 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 696 697 mutex_lock(&adev->pm.mutex); 698 699 if (!pp_funcs->get_current_power_state) { 700 *state = adev->pm.dpm.user_state; 701 goto out; 702 } 703 704 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 705 if (*state < POWER_STATE_TYPE_DEFAULT || 706 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 707 *state = adev->pm.dpm.user_state; 708 709 out: 710 mutex_unlock(&adev->pm.mutex); 711 } 712 713 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 714 enum amd_pm_state_type state) 715 { 716 mutex_lock(&adev->pm.mutex); 717 adev->pm.dpm.user_state = state; 718 mutex_unlock(&adev->pm.mutex); 719 720 if (is_support_sw_smu(adev)) 721 return; 722 723 if (amdgpu_dpm_dispatch_task(adev, 724 AMD_PP_TASK_ENABLE_USER_STATE, 725 &state) == -EOPNOTSUPP) 726 amdgpu_dpm_compute_clocks(adev); 727 } 728 729 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 730 { 731 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 732 enum amd_dpm_forced_level level; 733 734 mutex_lock(&adev->pm.mutex); 735 if (pp_funcs->get_performance_level) 736 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 737 else 738 level = adev->pm.dpm.forced_level; 739 mutex_unlock(&adev->pm.mutex); 740 741 return level; 742 } 743 744 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 745 enum amd_dpm_forced_level level) 746 { 747 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 748 enum amd_dpm_forced_level current_level; 749 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 750 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 751 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 752 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 753 754 if (!pp_funcs->force_performance_level) 755 return 0; 756 757 if (adev->pm.dpm.thermal_active) 758 return -EINVAL; 759 760 current_level = amdgpu_dpm_get_performance_level(adev); 761 if (current_level == level) 762 return 0; 763 764 if (adev->asic_type == CHIP_RAVEN) { 765 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 766 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 767 level == AMD_DPM_FORCED_LEVEL_MANUAL) 768 amdgpu_gfx_off_ctrl(adev, false); 769 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 770 level != AMD_DPM_FORCED_LEVEL_MANUAL) 771 amdgpu_gfx_off_ctrl(adev, true); 772 } 773 } 774 775 if (!(current_level & profile_mode_mask) && 776 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 777 return -EINVAL; 778 779 if (!(current_level & profile_mode_mask) && 780 (level & profile_mode_mask)) { 781 /* enter UMD Pstate */ 782 amdgpu_device_ip_set_powergating_state(adev, 783 AMD_IP_BLOCK_TYPE_GFX, 784 AMD_PG_STATE_UNGATE); 785 amdgpu_device_ip_set_clockgating_state(adev, 786 AMD_IP_BLOCK_TYPE_GFX, 787 AMD_CG_STATE_UNGATE); 788 } else if ((current_level & profile_mode_mask) && 789 !(level & profile_mode_mask)) { 790 /* exit UMD Pstate */ 791 amdgpu_device_ip_set_clockgating_state(adev, 792 AMD_IP_BLOCK_TYPE_GFX, 793 AMD_CG_STATE_GATE); 794 amdgpu_device_ip_set_powergating_state(adev, 795 AMD_IP_BLOCK_TYPE_GFX, 796 AMD_PG_STATE_GATE); 797 } 798 799 mutex_lock(&adev->pm.mutex); 800 801 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 802 level)) { 803 mutex_unlock(&adev->pm.mutex); 804 return -EINVAL; 805 } 806 807 adev->pm.dpm.forced_level = level; 808 809 mutex_unlock(&adev->pm.mutex); 810 811 return 0; 812 } 813 814 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 815 struct pp_states_info *states) 816 { 817 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 818 int ret = 0; 819 820 if (!pp_funcs->get_pp_num_states) 821 return -EOPNOTSUPP; 822 823 mutex_lock(&adev->pm.mutex); 824 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 825 states); 826 mutex_unlock(&adev->pm.mutex); 827 828 return ret; 829 } 830 831 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 832 enum amd_pp_task task_id, 833 enum amd_pm_state_type *user_state) 834 { 835 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 836 int ret = 0; 837 838 if (!pp_funcs->dispatch_tasks) 839 return -EOPNOTSUPP; 840 841 mutex_lock(&adev->pm.mutex); 842 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 843 task_id, 844 user_state); 845 mutex_unlock(&adev->pm.mutex); 846 847 return ret; 848 } 849 850 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 851 { 852 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 853 int ret = 0; 854 855 if (!pp_funcs->get_pp_table) 856 return 0; 857 858 mutex_lock(&adev->pm.mutex); 859 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 860 table); 861 mutex_unlock(&adev->pm.mutex); 862 863 return ret; 864 } 865 866 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 867 uint32_t type, 868 long *input, 869 uint32_t size) 870 { 871 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 872 int ret = 0; 873 874 if (!pp_funcs->set_fine_grain_clk_vol) 875 return 0; 876 877 mutex_lock(&adev->pm.mutex); 878 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 879 type, 880 input, 881 size); 882 mutex_unlock(&adev->pm.mutex); 883 884 return ret; 885 } 886 887 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 888 uint32_t type, 889 long *input, 890 uint32_t size) 891 { 892 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 893 int ret = 0; 894 895 if (!pp_funcs->odn_edit_dpm_table) 896 return 0; 897 898 mutex_lock(&adev->pm.mutex); 899 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 900 type, 901 input, 902 size); 903 mutex_unlock(&adev->pm.mutex); 904 905 return ret; 906 } 907 908 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 909 enum pp_clock_type type, 910 char *buf) 911 { 912 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 913 int ret = 0; 914 915 if (!pp_funcs->print_clock_levels) 916 return 0; 917 918 mutex_lock(&adev->pm.mutex); 919 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 920 type, 921 buf); 922 mutex_unlock(&adev->pm.mutex); 923 924 return ret; 925 } 926 927 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 928 enum pp_clock_type type, 929 char *buf, 930 int *offset) 931 { 932 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 933 int ret = 0; 934 935 if (!pp_funcs->emit_clock_levels) 936 return -ENOENT; 937 938 mutex_lock(&adev->pm.mutex); 939 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 940 type, 941 buf, 942 offset); 943 mutex_unlock(&adev->pm.mutex); 944 945 return ret; 946 } 947 948 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 949 uint64_t ppfeature_masks) 950 { 951 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 952 int ret = 0; 953 954 if (!pp_funcs->set_ppfeature_status) 955 return 0; 956 957 mutex_lock(&adev->pm.mutex); 958 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 959 ppfeature_masks); 960 mutex_unlock(&adev->pm.mutex); 961 962 return ret; 963 } 964 965 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 966 { 967 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 968 int ret = 0; 969 970 if (!pp_funcs->get_ppfeature_status) 971 return 0; 972 973 mutex_lock(&adev->pm.mutex); 974 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 975 buf); 976 mutex_unlock(&adev->pm.mutex); 977 978 return ret; 979 } 980 981 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 982 enum pp_clock_type type, 983 uint32_t mask) 984 { 985 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 986 int ret = 0; 987 988 if (!pp_funcs->force_clock_level) 989 return 0; 990 991 mutex_lock(&adev->pm.mutex); 992 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 993 type, 994 mask); 995 mutex_unlock(&adev->pm.mutex); 996 997 return ret; 998 } 999 1000 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1001 { 1002 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1003 int ret = 0; 1004 1005 if (!pp_funcs->get_sclk_od) 1006 return 0; 1007 1008 mutex_lock(&adev->pm.mutex); 1009 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1010 mutex_unlock(&adev->pm.mutex); 1011 1012 return ret; 1013 } 1014 1015 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1016 { 1017 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1018 1019 if (is_support_sw_smu(adev)) 1020 return 0; 1021 1022 mutex_lock(&adev->pm.mutex); 1023 if (pp_funcs->set_sclk_od) 1024 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1025 mutex_unlock(&adev->pm.mutex); 1026 1027 if (amdgpu_dpm_dispatch_task(adev, 1028 AMD_PP_TASK_READJUST_POWER_STATE, 1029 NULL) == -EOPNOTSUPP) { 1030 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1031 amdgpu_dpm_compute_clocks(adev); 1032 } 1033 1034 return 0; 1035 } 1036 1037 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1038 { 1039 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1040 int ret = 0; 1041 1042 if (!pp_funcs->get_mclk_od) 1043 return 0; 1044 1045 mutex_lock(&adev->pm.mutex); 1046 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1047 mutex_unlock(&adev->pm.mutex); 1048 1049 return ret; 1050 } 1051 1052 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1053 { 1054 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1055 1056 if (is_support_sw_smu(adev)) 1057 return 0; 1058 1059 mutex_lock(&adev->pm.mutex); 1060 if (pp_funcs->set_mclk_od) 1061 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1062 mutex_unlock(&adev->pm.mutex); 1063 1064 if (amdgpu_dpm_dispatch_task(adev, 1065 AMD_PP_TASK_READJUST_POWER_STATE, 1066 NULL) == -EOPNOTSUPP) { 1067 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1068 amdgpu_dpm_compute_clocks(adev); 1069 } 1070 1071 return 0; 1072 } 1073 1074 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1075 char *buf) 1076 { 1077 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1078 int ret = 0; 1079 1080 if (!pp_funcs->get_power_profile_mode) 1081 return -EOPNOTSUPP; 1082 1083 mutex_lock(&adev->pm.mutex); 1084 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1085 buf); 1086 mutex_unlock(&adev->pm.mutex); 1087 1088 return ret; 1089 } 1090 1091 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1092 long *input, uint32_t size) 1093 { 1094 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1095 int ret = 0; 1096 1097 if (!pp_funcs->set_power_profile_mode) 1098 return 0; 1099 1100 mutex_lock(&adev->pm.mutex); 1101 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1102 input, 1103 size); 1104 mutex_unlock(&adev->pm.mutex); 1105 1106 return ret; 1107 } 1108 1109 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1110 { 1111 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1112 int ret = 0; 1113 1114 if (!pp_funcs->get_gpu_metrics) 1115 return 0; 1116 1117 mutex_lock(&adev->pm.mutex); 1118 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1119 table); 1120 mutex_unlock(&adev->pm.mutex); 1121 1122 return ret; 1123 } 1124 1125 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1126 uint32_t *fan_mode) 1127 { 1128 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1129 int ret = 0; 1130 1131 if (!pp_funcs->get_fan_control_mode) 1132 return -EOPNOTSUPP; 1133 1134 mutex_lock(&adev->pm.mutex); 1135 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1136 fan_mode); 1137 mutex_unlock(&adev->pm.mutex); 1138 1139 return ret; 1140 } 1141 1142 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1143 uint32_t speed) 1144 { 1145 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1146 int ret = 0; 1147 1148 if (!pp_funcs->set_fan_speed_pwm) 1149 return -EOPNOTSUPP; 1150 1151 mutex_lock(&adev->pm.mutex); 1152 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1153 speed); 1154 mutex_unlock(&adev->pm.mutex); 1155 1156 return ret; 1157 } 1158 1159 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1160 uint32_t *speed) 1161 { 1162 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1163 int ret = 0; 1164 1165 if (!pp_funcs->get_fan_speed_pwm) 1166 return -EOPNOTSUPP; 1167 1168 mutex_lock(&adev->pm.mutex); 1169 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1170 speed); 1171 mutex_unlock(&adev->pm.mutex); 1172 1173 return ret; 1174 } 1175 1176 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1177 uint32_t *speed) 1178 { 1179 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1180 int ret = 0; 1181 1182 if (!pp_funcs->get_fan_speed_rpm) 1183 return -EOPNOTSUPP; 1184 1185 mutex_lock(&adev->pm.mutex); 1186 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1187 speed); 1188 mutex_unlock(&adev->pm.mutex); 1189 1190 return ret; 1191 } 1192 1193 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1194 uint32_t speed) 1195 { 1196 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1197 int ret = 0; 1198 1199 if (!pp_funcs->set_fan_speed_rpm) 1200 return -EOPNOTSUPP; 1201 1202 mutex_lock(&adev->pm.mutex); 1203 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1204 speed); 1205 mutex_unlock(&adev->pm.mutex); 1206 1207 return ret; 1208 } 1209 1210 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1211 uint32_t mode) 1212 { 1213 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1214 int ret = 0; 1215 1216 if (!pp_funcs->set_fan_control_mode) 1217 return -EOPNOTSUPP; 1218 1219 mutex_lock(&adev->pm.mutex); 1220 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1221 mode); 1222 mutex_unlock(&adev->pm.mutex); 1223 1224 return ret; 1225 } 1226 1227 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1228 uint32_t *limit, 1229 enum pp_power_limit_level pp_limit_level, 1230 enum pp_power_type power_type) 1231 { 1232 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1233 int ret = 0; 1234 1235 if (!pp_funcs->get_power_limit) 1236 return -ENODATA; 1237 1238 mutex_lock(&adev->pm.mutex); 1239 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1240 limit, 1241 pp_limit_level, 1242 power_type); 1243 mutex_unlock(&adev->pm.mutex); 1244 1245 return ret; 1246 } 1247 1248 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1249 uint32_t limit) 1250 { 1251 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1252 int ret = 0; 1253 1254 if (!pp_funcs->set_power_limit) 1255 return -EINVAL; 1256 1257 mutex_lock(&adev->pm.mutex); 1258 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1259 limit); 1260 mutex_unlock(&adev->pm.mutex); 1261 1262 return ret; 1263 } 1264 1265 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1266 { 1267 bool cclk_dpm_supported = false; 1268 1269 if (!is_support_sw_smu(adev)) 1270 return false; 1271 1272 mutex_lock(&adev->pm.mutex); 1273 cclk_dpm_supported = is_support_cclk_dpm(adev); 1274 mutex_unlock(&adev->pm.mutex); 1275 1276 return (int)cclk_dpm_supported; 1277 } 1278 1279 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1280 struct seq_file *m) 1281 { 1282 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1283 1284 if (!pp_funcs->debugfs_print_current_performance_level) 1285 return -EOPNOTSUPP; 1286 1287 mutex_lock(&adev->pm.mutex); 1288 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1289 m); 1290 mutex_unlock(&adev->pm.mutex); 1291 1292 return 0; 1293 } 1294 1295 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1296 void **addr, 1297 size_t *size) 1298 { 1299 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1300 int ret = 0; 1301 1302 if (!pp_funcs->get_smu_prv_buf_details) 1303 return -ENOSYS; 1304 1305 mutex_lock(&adev->pm.mutex); 1306 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1307 addr, 1308 size); 1309 mutex_unlock(&adev->pm.mutex); 1310 1311 return ret; 1312 } 1313 1314 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1315 { 1316 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1317 struct smu_context *smu = adev->powerplay.pp_handle; 1318 1319 if ((is_support_sw_smu(adev) && smu->od_enabled) || 1320 (is_support_sw_smu(adev) && smu->is_apu) || 1321 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 1322 return true; 1323 1324 return false; 1325 } 1326 1327 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1328 const char *buf, 1329 size_t size) 1330 { 1331 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1332 int ret = 0; 1333 1334 if (!pp_funcs->set_pp_table) 1335 return -EOPNOTSUPP; 1336 1337 mutex_lock(&adev->pm.mutex); 1338 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1339 buf, 1340 size); 1341 mutex_unlock(&adev->pm.mutex); 1342 1343 return ret; 1344 } 1345 1346 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1347 { 1348 struct smu_context *smu = adev->powerplay.pp_handle; 1349 1350 if (!is_support_sw_smu(adev)) 1351 return INT_MAX; 1352 1353 return smu->cpu_core_num; 1354 } 1355 1356 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1357 { 1358 if (!is_support_sw_smu(adev)) 1359 return; 1360 1361 amdgpu_smu_stb_debug_fs_init(adev); 1362 } 1363 1364 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1365 const struct amd_pp_display_configuration *input) 1366 { 1367 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1368 int ret = 0; 1369 1370 if (!pp_funcs->display_configuration_change) 1371 return 0; 1372 1373 mutex_lock(&adev->pm.mutex); 1374 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1375 input); 1376 mutex_unlock(&adev->pm.mutex); 1377 1378 return ret; 1379 } 1380 1381 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1382 enum amd_pp_clock_type type, 1383 struct amd_pp_clocks *clocks) 1384 { 1385 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1386 int ret = 0; 1387 1388 if (!pp_funcs->get_clock_by_type) 1389 return 0; 1390 1391 mutex_lock(&adev->pm.mutex); 1392 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1393 type, 1394 clocks); 1395 mutex_unlock(&adev->pm.mutex); 1396 1397 return ret; 1398 } 1399 1400 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1401 struct amd_pp_simple_clock_info *clocks) 1402 { 1403 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1404 int ret = 0; 1405 1406 if (!pp_funcs->get_display_mode_validation_clocks) 1407 return 0; 1408 1409 mutex_lock(&adev->pm.mutex); 1410 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1411 clocks); 1412 mutex_unlock(&adev->pm.mutex); 1413 1414 return ret; 1415 } 1416 1417 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1418 enum amd_pp_clock_type type, 1419 struct pp_clock_levels_with_latency *clocks) 1420 { 1421 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1422 int ret = 0; 1423 1424 if (!pp_funcs->get_clock_by_type_with_latency) 1425 return 0; 1426 1427 mutex_lock(&adev->pm.mutex); 1428 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1429 type, 1430 clocks); 1431 mutex_unlock(&adev->pm.mutex); 1432 1433 return ret; 1434 } 1435 1436 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1437 enum amd_pp_clock_type type, 1438 struct pp_clock_levels_with_voltage *clocks) 1439 { 1440 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1441 int ret = 0; 1442 1443 if (!pp_funcs->get_clock_by_type_with_voltage) 1444 return 0; 1445 1446 mutex_lock(&adev->pm.mutex); 1447 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1448 type, 1449 clocks); 1450 mutex_unlock(&adev->pm.mutex); 1451 1452 return ret; 1453 } 1454 1455 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1456 void *clock_ranges) 1457 { 1458 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1459 int ret = 0; 1460 1461 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1462 return -EOPNOTSUPP; 1463 1464 mutex_lock(&adev->pm.mutex); 1465 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1466 clock_ranges); 1467 mutex_unlock(&adev->pm.mutex); 1468 1469 return ret; 1470 } 1471 1472 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1473 struct pp_display_clock_request *clock) 1474 { 1475 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1476 int ret = 0; 1477 1478 if (!pp_funcs->display_clock_voltage_request) 1479 return -EOPNOTSUPP; 1480 1481 mutex_lock(&adev->pm.mutex); 1482 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1483 clock); 1484 mutex_unlock(&adev->pm.mutex); 1485 1486 return ret; 1487 } 1488 1489 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1490 struct amd_pp_clock_info *clocks) 1491 { 1492 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1493 int ret = 0; 1494 1495 if (!pp_funcs->get_current_clocks) 1496 return -EOPNOTSUPP; 1497 1498 mutex_lock(&adev->pm.mutex); 1499 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1500 clocks); 1501 mutex_unlock(&adev->pm.mutex); 1502 1503 return ret; 1504 } 1505 1506 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1507 { 1508 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1509 1510 if (!pp_funcs->notify_smu_enable_pwe) 1511 return; 1512 1513 mutex_lock(&adev->pm.mutex); 1514 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1515 mutex_unlock(&adev->pm.mutex); 1516 } 1517 1518 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1519 uint32_t count) 1520 { 1521 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1522 int ret = 0; 1523 1524 if (!pp_funcs->set_active_display_count) 1525 return -EOPNOTSUPP; 1526 1527 mutex_lock(&adev->pm.mutex); 1528 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1529 count); 1530 mutex_unlock(&adev->pm.mutex); 1531 1532 return ret; 1533 } 1534 1535 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1536 uint32_t clock) 1537 { 1538 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1539 int ret = 0; 1540 1541 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1542 return -EOPNOTSUPP; 1543 1544 mutex_lock(&adev->pm.mutex); 1545 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1546 clock); 1547 mutex_unlock(&adev->pm.mutex); 1548 1549 return ret; 1550 } 1551 1552 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1553 uint32_t clock) 1554 { 1555 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1556 1557 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1558 return; 1559 1560 mutex_lock(&adev->pm.mutex); 1561 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1562 clock); 1563 mutex_unlock(&adev->pm.mutex); 1564 } 1565 1566 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1567 uint32_t clock) 1568 { 1569 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1570 1571 if (!pp_funcs->set_hard_min_fclk_by_freq) 1572 return; 1573 1574 mutex_lock(&adev->pm.mutex); 1575 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1576 clock); 1577 mutex_unlock(&adev->pm.mutex); 1578 } 1579 1580 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1581 bool disable_memory_clock_switch) 1582 { 1583 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1584 int ret = 0; 1585 1586 if (!pp_funcs->display_disable_memory_clock_switch) 1587 return 0; 1588 1589 mutex_lock(&adev->pm.mutex); 1590 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1591 disable_memory_clock_switch); 1592 mutex_unlock(&adev->pm.mutex); 1593 1594 return ret; 1595 } 1596 1597 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1598 struct pp_smu_nv_clock_table *max_clocks) 1599 { 1600 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1601 int ret = 0; 1602 1603 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1604 return -EOPNOTSUPP; 1605 1606 mutex_lock(&adev->pm.mutex); 1607 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1608 max_clocks); 1609 mutex_unlock(&adev->pm.mutex); 1610 1611 return ret; 1612 } 1613 1614 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1615 unsigned int *clock_values_in_khz, 1616 unsigned int *num_states) 1617 { 1618 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1619 int ret = 0; 1620 1621 if (!pp_funcs->get_uclk_dpm_states) 1622 return -EOPNOTSUPP; 1623 1624 mutex_lock(&adev->pm.mutex); 1625 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1626 clock_values_in_khz, 1627 num_states); 1628 mutex_unlock(&adev->pm.mutex); 1629 1630 return ret; 1631 } 1632 1633 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1634 struct dpm_clocks *clock_table) 1635 { 1636 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1637 int ret = 0; 1638 1639 if (!pp_funcs->get_dpm_clock_table) 1640 return -EOPNOTSUPP; 1641 1642 mutex_lock(&adev->pm.mutex); 1643 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1644 clock_table); 1645 mutex_unlock(&adev->pm.mutex); 1646 1647 return ret; 1648 } 1649