1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 40 { 41 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 42 int ret = 0; 43 44 if (!pp_funcs->get_sclk) 45 return 0; 46 47 mutex_lock(&adev->pm.mutex); 48 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 49 low); 50 mutex_unlock(&adev->pm.mutex); 51 52 return ret; 53 } 54 55 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 56 { 57 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 58 int ret = 0; 59 60 if (!pp_funcs->get_mclk) 61 return 0; 62 63 mutex_lock(&adev->pm.mutex); 64 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 65 low); 66 mutex_unlock(&adev->pm.mutex); 67 68 return ret; 69 } 70 71 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 72 { 73 int ret = 0; 74 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 75 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 76 77 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 78 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 79 block_type, gate ? "gate" : "ungate"); 80 return 0; 81 } 82 83 mutex_lock(&adev->pm.mutex); 84 85 switch (block_type) { 86 case AMD_IP_BLOCK_TYPE_UVD: 87 case AMD_IP_BLOCK_TYPE_VCE: 88 case AMD_IP_BLOCK_TYPE_GFX: 89 case AMD_IP_BLOCK_TYPE_VCN: 90 case AMD_IP_BLOCK_TYPE_SDMA: 91 case AMD_IP_BLOCK_TYPE_JPEG: 92 case AMD_IP_BLOCK_TYPE_GMC: 93 case AMD_IP_BLOCK_TYPE_ACP: 94 if (pp_funcs && pp_funcs->set_powergating_by_smu) 95 ret = (pp_funcs->set_powergating_by_smu( 96 (adev)->powerplay.pp_handle, block_type, gate)); 97 break; 98 default: 99 break; 100 } 101 102 if (!ret) 103 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 104 105 mutex_unlock(&adev->pm.mutex); 106 107 return ret; 108 } 109 110 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 111 { 112 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 113 void *pp_handle = adev->powerplay.pp_handle; 114 int ret = 0; 115 116 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 117 return -ENOENT; 118 119 mutex_lock(&adev->pm.mutex); 120 121 /* enter BACO state */ 122 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 123 124 mutex_unlock(&adev->pm.mutex); 125 126 return ret; 127 } 128 129 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 130 { 131 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 132 void *pp_handle = adev->powerplay.pp_handle; 133 int ret = 0; 134 135 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 136 return -ENOENT; 137 138 mutex_lock(&adev->pm.mutex); 139 140 /* exit BACO state */ 141 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 142 143 mutex_unlock(&adev->pm.mutex); 144 145 return ret; 146 } 147 148 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 149 enum pp_mp1_state mp1_state) 150 { 151 int ret = 0; 152 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 153 154 if (pp_funcs && pp_funcs->set_mp1_state) { 155 mutex_lock(&adev->pm.mutex); 156 157 ret = pp_funcs->set_mp1_state( 158 adev->powerplay.pp_handle, 159 mp1_state); 160 161 mutex_unlock(&adev->pm.mutex); 162 } 163 164 return ret; 165 } 166 167 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 168 { 169 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 170 void *pp_handle = adev->powerplay.pp_handle; 171 bool baco_cap; 172 int ret = 0; 173 174 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 175 return false; 176 177 mutex_lock(&adev->pm.mutex); 178 179 ret = pp_funcs->get_asic_baco_capability(pp_handle, 180 &baco_cap); 181 182 mutex_unlock(&adev->pm.mutex); 183 184 return ret ? false : baco_cap; 185 } 186 187 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 188 { 189 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 190 void *pp_handle = adev->powerplay.pp_handle; 191 int ret = 0; 192 193 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 194 return -ENOENT; 195 196 mutex_lock(&adev->pm.mutex); 197 198 ret = pp_funcs->asic_reset_mode_2(pp_handle); 199 200 mutex_unlock(&adev->pm.mutex); 201 202 return ret; 203 } 204 205 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 206 { 207 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 208 void *pp_handle = adev->powerplay.pp_handle; 209 int ret = 0; 210 211 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 212 return -ENOENT; 213 214 mutex_lock(&adev->pm.mutex); 215 216 /* enter BACO state */ 217 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 218 if (ret) 219 goto out; 220 221 /* exit BACO state */ 222 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 223 224 out: 225 mutex_unlock(&adev->pm.mutex); 226 return ret; 227 } 228 229 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 230 { 231 struct smu_context *smu = adev->powerplay.pp_handle; 232 bool support_mode1_reset = false; 233 234 if (is_support_sw_smu(adev)) { 235 mutex_lock(&adev->pm.mutex); 236 support_mode1_reset = smu_mode1_reset_is_support(smu); 237 mutex_unlock(&adev->pm.mutex); 238 } 239 240 return support_mode1_reset; 241 } 242 243 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 244 { 245 struct smu_context *smu = adev->powerplay.pp_handle; 246 int ret = -EOPNOTSUPP; 247 248 if (is_support_sw_smu(adev)) { 249 mutex_lock(&adev->pm.mutex); 250 ret = smu_mode1_reset(smu); 251 mutex_unlock(&adev->pm.mutex); 252 } 253 254 return ret; 255 } 256 257 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 258 enum PP_SMC_POWER_PROFILE type, 259 bool en) 260 { 261 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 262 int ret = 0; 263 264 if (amdgpu_sriov_vf(adev)) 265 return 0; 266 267 if (pp_funcs && pp_funcs->switch_power_profile) { 268 mutex_lock(&adev->pm.mutex); 269 ret = pp_funcs->switch_power_profile( 270 adev->powerplay.pp_handle, type, en); 271 mutex_unlock(&adev->pm.mutex); 272 } 273 274 return ret; 275 } 276 277 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 278 uint32_t pstate) 279 { 280 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 281 int ret = 0; 282 283 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 284 mutex_lock(&adev->pm.mutex); 285 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 286 pstate); 287 mutex_unlock(&adev->pm.mutex); 288 } 289 290 return ret; 291 } 292 293 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 294 uint32_t cstate) 295 { 296 int ret = 0; 297 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 298 void *pp_handle = adev->powerplay.pp_handle; 299 300 if (pp_funcs && pp_funcs->set_df_cstate) { 301 mutex_lock(&adev->pm.mutex); 302 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 303 mutex_unlock(&adev->pm.mutex); 304 } 305 306 return ret; 307 } 308 309 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) 310 { 311 struct smu_context *smu = adev->powerplay.pp_handle; 312 int ret = 0; 313 314 if (is_support_sw_smu(adev)) { 315 mutex_lock(&adev->pm.mutex); 316 ret = smu_allow_xgmi_power_down(smu, en); 317 mutex_unlock(&adev->pm.mutex); 318 } 319 320 return ret; 321 } 322 323 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 324 { 325 void *pp_handle = adev->powerplay.pp_handle; 326 const struct amd_pm_funcs *pp_funcs = 327 adev->powerplay.pp_funcs; 328 int ret = 0; 329 330 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 331 mutex_lock(&adev->pm.mutex); 332 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 333 mutex_unlock(&adev->pm.mutex); 334 } 335 336 return ret; 337 } 338 339 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 340 uint32_t msg_id) 341 { 342 void *pp_handle = adev->powerplay.pp_handle; 343 const struct amd_pm_funcs *pp_funcs = 344 adev->powerplay.pp_funcs; 345 int ret = 0; 346 347 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 348 mutex_lock(&adev->pm.mutex); 349 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 350 msg_id); 351 mutex_unlock(&adev->pm.mutex); 352 } 353 354 return ret; 355 } 356 357 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 358 bool acquire) 359 { 360 void *pp_handle = adev->powerplay.pp_handle; 361 const struct amd_pm_funcs *pp_funcs = 362 adev->powerplay.pp_funcs; 363 int ret = -EOPNOTSUPP; 364 365 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 366 mutex_lock(&adev->pm.mutex); 367 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 368 acquire); 369 mutex_unlock(&adev->pm.mutex); 370 } 371 372 return ret; 373 } 374 375 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 376 { 377 if (adev->pm.dpm_enabled) { 378 mutex_lock(&adev->pm.mutex); 379 if (power_supply_is_system_supplied() > 0) 380 adev->pm.ac_power = true; 381 else 382 adev->pm.ac_power = false; 383 384 if (adev->powerplay.pp_funcs && 385 adev->powerplay.pp_funcs->enable_bapm) 386 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 387 388 if (is_support_sw_smu(adev)) 389 smu_set_ac_dc(adev->powerplay.pp_handle); 390 391 mutex_unlock(&adev->pm.mutex); 392 } 393 } 394 395 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 396 void *data, uint32_t *size) 397 { 398 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 399 int ret = -EINVAL; 400 401 if (!data || !size) 402 return -EINVAL; 403 404 if (pp_funcs && pp_funcs->read_sensor) { 405 mutex_lock(&adev->pm.mutex); 406 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 407 sensor, 408 data, 409 size); 410 mutex_unlock(&adev->pm.mutex); 411 } 412 413 return ret; 414 } 415 416 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 417 { 418 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 419 420 if (!adev->pm.dpm_enabled) 421 return; 422 423 if (!pp_funcs->pm_compute_clocks) 424 return; 425 426 mutex_lock(&adev->pm.mutex); 427 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 428 mutex_unlock(&adev->pm.mutex); 429 } 430 431 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 432 { 433 int ret = 0; 434 435 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 436 if (ret) 437 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 438 enable ? "enable" : "disable", ret); 439 } 440 441 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 442 { 443 int ret = 0; 444 445 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 446 if (ret) 447 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 448 enable ? "enable" : "disable", ret); 449 } 450 451 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 452 { 453 int ret = 0; 454 455 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 456 if (ret) 457 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 458 enable ? "enable" : "disable", ret); 459 } 460 461 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 462 { 463 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 464 int r = 0; 465 466 if (!pp_funcs || !pp_funcs->load_firmware) 467 return 0; 468 469 mutex_lock(&adev->pm.mutex); 470 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 471 if (r) { 472 pr_err("smu firmware loading failed\n"); 473 goto out; 474 } 475 476 if (smu_version) 477 *smu_version = adev->pm.fw_version; 478 479 out: 480 mutex_unlock(&adev->pm.mutex); 481 return r; 482 } 483 484 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 485 { 486 int ret = 0; 487 488 if (is_support_sw_smu(adev)) { 489 mutex_lock(&adev->pm.mutex); 490 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 491 enable); 492 mutex_unlock(&adev->pm.mutex); 493 } 494 495 return ret; 496 } 497 498 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 499 { 500 struct smu_context *smu = adev->powerplay.pp_handle; 501 int ret = 0; 502 503 mutex_lock(&adev->pm.mutex); 504 ret = smu_send_hbm_bad_pages_num(smu, size); 505 mutex_unlock(&adev->pm.mutex); 506 507 return ret; 508 } 509 510 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 511 { 512 struct smu_context *smu = adev->powerplay.pp_handle; 513 int ret = 0; 514 515 mutex_lock(&adev->pm.mutex); 516 ret = smu_send_hbm_bad_channel_flag(smu, size); 517 mutex_unlock(&adev->pm.mutex); 518 519 return ret; 520 } 521 522 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 523 enum pp_clock_type type, 524 uint32_t *min, 525 uint32_t *max) 526 { 527 int ret = 0; 528 529 if (type != PP_SCLK) 530 return -EINVAL; 531 532 if (!is_support_sw_smu(adev)) 533 return -EOPNOTSUPP; 534 535 mutex_lock(&adev->pm.mutex); 536 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 537 SMU_SCLK, 538 min, 539 max); 540 mutex_unlock(&adev->pm.mutex); 541 542 return ret; 543 } 544 545 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 546 enum pp_clock_type type, 547 uint32_t min, 548 uint32_t max) 549 { 550 struct smu_context *smu = adev->powerplay.pp_handle; 551 int ret = 0; 552 553 if (type != PP_SCLK) 554 return -EINVAL; 555 556 if (!is_support_sw_smu(adev)) 557 return -EOPNOTSUPP; 558 559 mutex_lock(&adev->pm.mutex); 560 ret = smu_set_soft_freq_range(smu, 561 SMU_SCLK, 562 min, 563 max); 564 mutex_unlock(&adev->pm.mutex); 565 566 return ret; 567 } 568 569 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 570 { 571 struct smu_context *smu = adev->powerplay.pp_handle; 572 int ret = 0; 573 574 if (!is_support_sw_smu(adev)) 575 return 0; 576 577 mutex_lock(&adev->pm.mutex); 578 ret = smu_write_watermarks_table(smu); 579 mutex_unlock(&adev->pm.mutex); 580 581 return ret; 582 } 583 584 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 585 enum smu_event_type event, 586 uint64_t event_arg) 587 { 588 struct smu_context *smu = adev->powerplay.pp_handle; 589 int ret = 0; 590 591 if (!is_support_sw_smu(adev)) 592 return -EOPNOTSUPP; 593 594 mutex_lock(&adev->pm.mutex); 595 ret = smu_wait_for_event(smu, event, event_arg); 596 mutex_unlock(&adev->pm.mutex); 597 598 return ret; 599 } 600 601 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 602 { 603 struct smu_context *smu = adev->powerplay.pp_handle; 604 int ret = 0; 605 606 if (!is_support_sw_smu(adev)) 607 return -EOPNOTSUPP; 608 609 mutex_lock(&adev->pm.mutex); 610 ret = smu_get_status_gfxoff(smu, value); 611 mutex_unlock(&adev->pm.mutex); 612 613 return ret; 614 } 615 616 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 617 { 618 struct smu_context *smu = adev->powerplay.pp_handle; 619 620 if (!is_support_sw_smu(adev)) 621 return 0; 622 623 return atomic64_read(&smu->throttle_int_counter); 624 } 625 626 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 627 * @adev: amdgpu_device pointer 628 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 629 * 630 */ 631 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 632 enum gfx_change_state state) 633 { 634 mutex_lock(&adev->pm.mutex); 635 if (adev->powerplay.pp_funcs && 636 adev->powerplay.pp_funcs->gfx_state_change_set) 637 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 638 (adev)->powerplay.pp_handle, state)); 639 mutex_unlock(&adev->pm.mutex); 640 } 641 642 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 643 void *umc_ecc) 644 { 645 struct smu_context *smu = adev->powerplay.pp_handle; 646 int ret = 0; 647 648 if (!is_support_sw_smu(adev)) 649 return -EOPNOTSUPP; 650 651 mutex_lock(&adev->pm.mutex); 652 ret = smu_get_ecc_info(smu, umc_ecc); 653 mutex_unlock(&adev->pm.mutex); 654 655 return ret; 656 } 657 658 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 659 uint32_t idx) 660 { 661 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 662 struct amd_vce_state *vstate = NULL; 663 664 if (!pp_funcs->get_vce_clock_state) 665 return NULL; 666 667 mutex_lock(&adev->pm.mutex); 668 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 669 idx); 670 mutex_unlock(&adev->pm.mutex); 671 672 return vstate; 673 } 674 675 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 676 enum amd_pm_state_type *state) 677 { 678 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 679 680 mutex_lock(&adev->pm.mutex); 681 682 if (!pp_funcs->get_current_power_state) { 683 *state = adev->pm.dpm.user_state; 684 goto out; 685 } 686 687 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 688 if (*state < POWER_STATE_TYPE_DEFAULT || 689 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 690 *state = adev->pm.dpm.user_state; 691 692 out: 693 mutex_unlock(&adev->pm.mutex); 694 } 695 696 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 697 enum amd_pm_state_type state) 698 { 699 mutex_lock(&adev->pm.mutex); 700 adev->pm.dpm.user_state = state; 701 mutex_unlock(&adev->pm.mutex); 702 703 if (is_support_sw_smu(adev)) 704 return; 705 706 if (amdgpu_dpm_dispatch_task(adev, 707 AMD_PP_TASK_ENABLE_USER_STATE, 708 &state) == -EOPNOTSUPP) 709 amdgpu_dpm_compute_clocks(adev); 710 } 711 712 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 713 { 714 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 715 enum amd_dpm_forced_level level; 716 717 mutex_lock(&adev->pm.mutex); 718 if (pp_funcs->get_performance_level) 719 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 720 else 721 level = adev->pm.dpm.forced_level; 722 mutex_unlock(&adev->pm.mutex); 723 724 return level; 725 } 726 727 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 728 enum amd_dpm_forced_level level) 729 { 730 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 731 enum amd_dpm_forced_level current_level; 732 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 733 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 734 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 735 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 736 737 if (!pp_funcs->force_performance_level) 738 return 0; 739 740 if (adev->pm.dpm.thermal_active) 741 return -EINVAL; 742 743 current_level = amdgpu_dpm_get_performance_level(adev); 744 if (current_level == level) 745 return 0; 746 747 if (adev->asic_type == CHIP_RAVEN) { 748 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 749 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 750 level == AMD_DPM_FORCED_LEVEL_MANUAL) 751 amdgpu_gfx_off_ctrl(adev, false); 752 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 753 level != AMD_DPM_FORCED_LEVEL_MANUAL) 754 amdgpu_gfx_off_ctrl(adev, true); 755 } 756 } 757 758 if (!(current_level & profile_mode_mask) && 759 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 760 return -EINVAL; 761 762 if (!(current_level & profile_mode_mask) && 763 (level & profile_mode_mask)) { 764 /* enter UMD Pstate */ 765 amdgpu_device_ip_set_powergating_state(adev, 766 AMD_IP_BLOCK_TYPE_GFX, 767 AMD_PG_STATE_UNGATE); 768 amdgpu_device_ip_set_clockgating_state(adev, 769 AMD_IP_BLOCK_TYPE_GFX, 770 AMD_CG_STATE_UNGATE); 771 } else if ((current_level & profile_mode_mask) && 772 !(level & profile_mode_mask)) { 773 /* exit UMD Pstate */ 774 amdgpu_device_ip_set_clockgating_state(adev, 775 AMD_IP_BLOCK_TYPE_GFX, 776 AMD_CG_STATE_GATE); 777 amdgpu_device_ip_set_powergating_state(adev, 778 AMD_IP_BLOCK_TYPE_GFX, 779 AMD_PG_STATE_GATE); 780 } 781 782 mutex_lock(&adev->pm.mutex); 783 784 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 785 level)) { 786 mutex_unlock(&adev->pm.mutex); 787 return -EINVAL; 788 } 789 790 adev->pm.dpm.forced_level = level; 791 792 mutex_unlock(&adev->pm.mutex); 793 794 return 0; 795 } 796 797 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 798 struct pp_states_info *states) 799 { 800 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 801 int ret = 0; 802 803 if (!pp_funcs->get_pp_num_states) 804 return -EOPNOTSUPP; 805 806 mutex_lock(&adev->pm.mutex); 807 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 808 states); 809 mutex_unlock(&adev->pm.mutex); 810 811 return ret; 812 } 813 814 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 815 enum amd_pp_task task_id, 816 enum amd_pm_state_type *user_state) 817 { 818 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 819 int ret = 0; 820 821 if (!pp_funcs->dispatch_tasks) 822 return -EOPNOTSUPP; 823 824 mutex_lock(&adev->pm.mutex); 825 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 826 task_id, 827 user_state); 828 mutex_unlock(&adev->pm.mutex); 829 830 return ret; 831 } 832 833 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 834 { 835 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 836 int ret = 0; 837 838 if (!pp_funcs->get_pp_table) 839 return 0; 840 841 mutex_lock(&adev->pm.mutex); 842 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 843 table); 844 mutex_unlock(&adev->pm.mutex); 845 846 return ret; 847 } 848 849 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 850 uint32_t type, 851 long *input, 852 uint32_t size) 853 { 854 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 855 int ret = 0; 856 857 if (!pp_funcs->set_fine_grain_clk_vol) 858 return 0; 859 860 mutex_lock(&adev->pm.mutex); 861 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 862 type, 863 input, 864 size); 865 mutex_unlock(&adev->pm.mutex); 866 867 return ret; 868 } 869 870 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 871 uint32_t type, 872 long *input, 873 uint32_t size) 874 { 875 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 876 int ret = 0; 877 878 if (!pp_funcs->odn_edit_dpm_table) 879 return 0; 880 881 mutex_lock(&adev->pm.mutex); 882 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 883 type, 884 input, 885 size); 886 mutex_unlock(&adev->pm.mutex); 887 888 return ret; 889 } 890 891 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 892 enum pp_clock_type type, 893 char *buf) 894 { 895 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 896 int ret = 0; 897 898 if (!pp_funcs->print_clock_levels) 899 return 0; 900 901 mutex_lock(&adev->pm.mutex); 902 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 903 type, 904 buf); 905 mutex_unlock(&adev->pm.mutex); 906 907 return ret; 908 } 909 910 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 911 enum pp_clock_type type, 912 char *buf, 913 int *offset) 914 { 915 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 916 int ret = 0; 917 918 if (!pp_funcs->emit_clock_levels) 919 return -ENOENT; 920 921 mutex_lock(&adev->pm.mutex); 922 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 923 type, 924 buf, 925 offset); 926 mutex_unlock(&adev->pm.mutex); 927 928 return ret; 929 } 930 931 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 932 uint64_t ppfeature_masks) 933 { 934 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 935 int ret = 0; 936 937 if (!pp_funcs->set_ppfeature_status) 938 return 0; 939 940 mutex_lock(&adev->pm.mutex); 941 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 942 ppfeature_masks); 943 mutex_unlock(&adev->pm.mutex); 944 945 return ret; 946 } 947 948 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 949 { 950 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 951 int ret = 0; 952 953 if (!pp_funcs->get_ppfeature_status) 954 return 0; 955 956 mutex_lock(&adev->pm.mutex); 957 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 958 buf); 959 mutex_unlock(&adev->pm.mutex); 960 961 return ret; 962 } 963 964 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 965 enum pp_clock_type type, 966 uint32_t mask) 967 { 968 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 969 int ret = 0; 970 971 if (!pp_funcs->force_clock_level) 972 return 0; 973 974 mutex_lock(&adev->pm.mutex); 975 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 976 type, 977 mask); 978 mutex_unlock(&adev->pm.mutex); 979 980 return ret; 981 } 982 983 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 984 { 985 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 986 int ret = 0; 987 988 if (!pp_funcs->get_sclk_od) 989 return 0; 990 991 mutex_lock(&adev->pm.mutex); 992 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 993 mutex_unlock(&adev->pm.mutex); 994 995 return ret; 996 } 997 998 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 999 { 1000 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1001 1002 if (is_support_sw_smu(adev)) 1003 return 0; 1004 1005 mutex_lock(&adev->pm.mutex); 1006 if (pp_funcs->set_sclk_od) 1007 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1008 mutex_unlock(&adev->pm.mutex); 1009 1010 if (amdgpu_dpm_dispatch_task(adev, 1011 AMD_PP_TASK_READJUST_POWER_STATE, 1012 NULL) == -EOPNOTSUPP) { 1013 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1014 amdgpu_dpm_compute_clocks(adev); 1015 } 1016 1017 return 0; 1018 } 1019 1020 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1021 { 1022 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1023 int ret = 0; 1024 1025 if (!pp_funcs->get_mclk_od) 1026 return 0; 1027 1028 mutex_lock(&adev->pm.mutex); 1029 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1030 mutex_unlock(&adev->pm.mutex); 1031 1032 return ret; 1033 } 1034 1035 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1036 { 1037 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1038 1039 if (is_support_sw_smu(adev)) 1040 return 0; 1041 1042 mutex_lock(&adev->pm.mutex); 1043 if (pp_funcs->set_mclk_od) 1044 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1045 mutex_unlock(&adev->pm.mutex); 1046 1047 if (amdgpu_dpm_dispatch_task(adev, 1048 AMD_PP_TASK_READJUST_POWER_STATE, 1049 NULL) == -EOPNOTSUPP) { 1050 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1051 amdgpu_dpm_compute_clocks(adev); 1052 } 1053 1054 return 0; 1055 } 1056 1057 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1058 char *buf) 1059 { 1060 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1061 int ret = 0; 1062 1063 if (!pp_funcs->get_power_profile_mode) 1064 return -EOPNOTSUPP; 1065 1066 mutex_lock(&adev->pm.mutex); 1067 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1068 buf); 1069 mutex_unlock(&adev->pm.mutex); 1070 1071 return ret; 1072 } 1073 1074 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1075 long *input, uint32_t size) 1076 { 1077 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1078 int ret = 0; 1079 1080 if (!pp_funcs->set_power_profile_mode) 1081 return 0; 1082 1083 mutex_lock(&adev->pm.mutex); 1084 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1085 input, 1086 size); 1087 mutex_unlock(&adev->pm.mutex); 1088 1089 return ret; 1090 } 1091 1092 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1093 { 1094 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1095 int ret = 0; 1096 1097 if (!pp_funcs->get_gpu_metrics) 1098 return 0; 1099 1100 mutex_lock(&adev->pm.mutex); 1101 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1102 table); 1103 mutex_unlock(&adev->pm.mutex); 1104 1105 return ret; 1106 } 1107 1108 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1109 uint32_t *fan_mode) 1110 { 1111 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1112 int ret = 0; 1113 1114 if (!pp_funcs->get_fan_control_mode) 1115 return -EOPNOTSUPP; 1116 1117 mutex_lock(&adev->pm.mutex); 1118 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1119 fan_mode); 1120 mutex_unlock(&adev->pm.mutex); 1121 1122 return ret; 1123 } 1124 1125 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1126 uint32_t speed) 1127 { 1128 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1129 int ret = 0; 1130 1131 if (!pp_funcs->set_fan_speed_pwm) 1132 return -EOPNOTSUPP; 1133 1134 mutex_lock(&adev->pm.mutex); 1135 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1136 speed); 1137 mutex_unlock(&adev->pm.mutex); 1138 1139 return ret; 1140 } 1141 1142 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1143 uint32_t *speed) 1144 { 1145 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1146 int ret = 0; 1147 1148 if (!pp_funcs->get_fan_speed_pwm) 1149 return -EOPNOTSUPP; 1150 1151 mutex_lock(&adev->pm.mutex); 1152 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1153 speed); 1154 mutex_unlock(&adev->pm.mutex); 1155 1156 return ret; 1157 } 1158 1159 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1160 uint32_t *speed) 1161 { 1162 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1163 int ret = 0; 1164 1165 if (!pp_funcs->get_fan_speed_rpm) 1166 return -EOPNOTSUPP; 1167 1168 mutex_lock(&adev->pm.mutex); 1169 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1170 speed); 1171 mutex_unlock(&adev->pm.mutex); 1172 1173 return ret; 1174 } 1175 1176 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1177 uint32_t speed) 1178 { 1179 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1180 int ret = 0; 1181 1182 if (!pp_funcs->set_fan_speed_rpm) 1183 return -EOPNOTSUPP; 1184 1185 mutex_lock(&adev->pm.mutex); 1186 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1187 speed); 1188 mutex_unlock(&adev->pm.mutex); 1189 1190 return ret; 1191 } 1192 1193 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1194 uint32_t mode) 1195 { 1196 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1197 int ret = 0; 1198 1199 if (!pp_funcs->set_fan_control_mode) 1200 return -EOPNOTSUPP; 1201 1202 mutex_lock(&adev->pm.mutex); 1203 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1204 mode); 1205 mutex_unlock(&adev->pm.mutex); 1206 1207 return ret; 1208 } 1209 1210 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1211 uint32_t *limit, 1212 enum pp_power_limit_level pp_limit_level, 1213 enum pp_power_type power_type) 1214 { 1215 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1216 int ret = 0; 1217 1218 if (!pp_funcs->get_power_limit) 1219 return -ENODATA; 1220 1221 mutex_lock(&adev->pm.mutex); 1222 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1223 limit, 1224 pp_limit_level, 1225 power_type); 1226 mutex_unlock(&adev->pm.mutex); 1227 1228 return ret; 1229 } 1230 1231 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1232 uint32_t limit) 1233 { 1234 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1235 int ret = 0; 1236 1237 if (!pp_funcs->set_power_limit) 1238 return -EINVAL; 1239 1240 mutex_lock(&adev->pm.mutex); 1241 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1242 limit); 1243 mutex_unlock(&adev->pm.mutex); 1244 1245 return ret; 1246 } 1247 1248 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1249 { 1250 bool cclk_dpm_supported = false; 1251 1252 if (!is_support_sw_smu(adev)) 1253 return false; 1254 1255 mutex_lock(&adev->pm.mutex); 1256 cclk_dpm_supported = is_support_cclk_dpm(adev); 1257 mutex_unlock(&adev->pm.mutex); 1258 1259 return (int)cclk_dpm_supported; 1260 } 1261 1262 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1263 struct seq_file *m) 1264 { 1265 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1266 1267 if (!pp_funcs->debugfs_print_current_performance_level) 1268 return -EOPNOTSUPP; 1269 1270 mutex_lock(&adev->pm.mutex); 1271 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1272 m); 1273 mutex_unlock(&adev->pm.mutex); 1274 1275 return 0; 1276 } 1277 1278 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1279 void **addr, 1280 size_t *size) 1281 { 1282 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1283 int ret = 0; 1284 1285 if (!pp_funcs->get_smu_prv_buf_details) 1286 return -ENOSYS; 1287 1288 mutex_lock(&adev->pm.mutex); 1289 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1290 addr, 1291 size); 1292 mutex_unlock(&adev->pm.mutex); 1293 1294 return ret; 1295 } 1296 1297 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1298 { 1299 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1300 struct smu_context *smu = adev->powerplay.pp_handle; 1301 1302 if ((is_support_sw_smu(adev) && smu->od_enabled) || 1303 (is_support_sw_smu(adev) && smu->is_apu) || 1304 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 1305 return true; 1306 1307 return false; 1308 } 1309 1310 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1311 const char *buf, 1312 size_t size) 1313 { 1314 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1315 int ret = 0; 1316 1317 if (!pp_funcs->set_pp_table) 1318 return -EOPNOTSUPP; 1319 1320 mutex_lock(&adev->pm.mutex); 1321 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1322 buf, 1323 size); 1324 mutex_unlock(&adev->pm.mutex); 1325 1326 return ret; 1327 } 1328 1329 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1330 { 1331 struct smu_context *smu = adev->powerplay.pp_handle; 1332 1333 if (!is_support_sw_smu(adev)) 1334 return INT_MAX; 1335 1336 return smu->cpu_core_num; 1337 } 1338 1339 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1340 { 1341 if (!is_support_sw_smu(adev)) 1342 return; 1343 1344 amdgpu_smu_stb_debug_fs_init(adev); 1345 } 1346 1347 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1348 const struct amd_pp_display_configuration *input) 1349 { 1350 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1351 int ret = 0; 1352 1353 if (!pp_funcs->display_configuration_change) 1354 return 0; 1355 1356 mutex_lock(&adev->pm.mutex); 1357 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1358 input); 1359 mutex_unlock(&adev->pm.mutex); 1360 1361 return ret; 1362 } 1363 1364 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1365 enum amd_pp_clock_type type, 1366 struct amd_pp_clocks *clocks) 1367 { 1368 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1369 int ret = 0; 1370 1371 if (!pp_funcs->get_clock_by_type) 1372 return 0; 1373 1374 mutex_lock(&adev->pm.mutex); 1375 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1376 type, 1377 clocks); 1378 mutex_unlock(&adev->pm.mutex); 1379 1380 return ret; 1381 } 1382 1383 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1384 struct amd_pp_simple_clock_info *clocks) 1385 { 1386 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1387 int ret = 0; 1388 1389 if (!pp_funcs->get_display_mode_validation_clocks) 1390 return 0; 1391 1392 mutex_lock(&adev->pm.mutex); 1393 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1394 clocks); 1395 mutex_unlock(&adev->pm.mutex); 1396 1397 return ret; 1398 } 1399 1400 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1401 enum amd_pp_clock_type type, 1402 struct pp_clock_levels_with_latency *clocks) 1403 { 1404 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1405 int ret = 0; 1406 1407 if (!pp_funcs->get_clock_by_type_with_latency) 1408 return 0; 1409 1410 mutex_lock(&adev->pm.mutex); 1411 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1412 type, 1413 clocks); 1414 mutex_unlock(&adev->pm.mutex); 1415 1416 return ret; 1417 } 1418 1419 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1420 enum amd_pp_clock_type type, 1421 struct pp_clock_levels_with_voltage *clocks) 1422 { 1423 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1424 int ret = 0; 1425 1426 if (!pp_funcs->get_clock_by_type_with_voltage) 1427 return 0; 1428 1429 mutex_lock(&adev->pm.mutex); 1430 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1431 type, 1432 clocks); 1433 mutex_unlock(&adev->pm.mutex); 1434 1435 return ret; 1436 } 1437 1438 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1439 void *clock_ranges) 1440 { 1441 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1442 int ret = 0; 1443 1444 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1445 return -EOPNOTSUPP; 1446 1447 mutex_lock(&adev->pm.mutex); 1448 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1449 clock_ranges); 1450 mutex_unlock(&adev->pm.mutex); 1451 1452 return ret; 1453 } 1454 1455 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1456 struct pp_display_clock_request *clock) 1457 { 1458 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1459 int ret = 0; 1460 1461 if (!pp_funcs->display_clock_voltage_request) 1462 return -EOPNOTSUPP; 1463 1464 mutex_lock(&adev->pm.mutex); 1465 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1466 clock); 1467 mutex_unlock(&adev->pm.mutex); 1468 1469 return ret; 1470 } 1471 1472 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1473 struct amd_pp_clock_info *clocks) 1474 { 1475 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1476 int ret = 0; 1477 1478 if (!pp_funcs->get_current_clocks) 1479 return -EOPNOTSUPP; 1480 1481 mutex_lock(&adev->pm.mutex); 1482 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1483 clocks); 1484 mutex_unlock(&adev->pm.mutex); 1485 1486 return ret; 1487 } 1488 1489 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1490 { 1491 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1492 1493 if (!pp_funcs->notify_smu_enable_pwe) 1494 return; 1495 1496 mutex_lock(&adev->pm.mutex); 1497 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1498 mutex_unlock(&adev->pm.mutex); 1499 } 1500 1501 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1502 uint32_t count) 1503 { 1504 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1505 int ret = 0; 1506 1507 if (!pp_funcs->set_active_display_count) 1508 return -EOPNOTSUPP; 1509 1510 mutex_lock(&adev->pm.mutex); 1511 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1512 count); 1513 mutex_unlock(&adev->pm.mutex); 1514 1515 return ret; 1516 } 1517 1518 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1519 uint32_t clock) 1520 { 1521 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1522 int ret = 0; 1523 1524 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1525 return -EOPNOTSUPP; 1526 1527 mutex_lock(&adev->pm.mutex); 1528 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1529 clock); 1530 mutex_unlock(&adev->pm.mutex); 1531 1532 return ret; 1533 } 1534 1535 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1536 uint32_t clock) 1537 { 1538 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1539 1540 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1541 return; 1542 1543 mutex_lock(&adev->pm.mutex); 1544 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1545 clock); 1546 mutex_unlock(&adev->pm.mutex); 1547 } 1548 1549 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1550 uint32_t clock) 1551 { 1552 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1553 1554 if (!pp_funcs->set_hard_min_fclk_by_freq) 1555 return; 1556 1557 mutex_lock(&adev->pm.mutex); 1558 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1559 clock); 1560 mutex_unlock(&adev->pm.mutex); 1561 } 1562 1563 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1564 bool disable_memory_clock_switch) 1565 { 1566 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1567 int ret = 0; 1568 1569 if (!pp_funcs->display_disable_memory_clock_switch) 1570 return 0; 1571 1572 mutex_lock(&adev->pm.mutex); 1573 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1574 disable_memory_clock_switch); 1575 mutex_unlock(&adev->pm.mutex); 1576 1577 return ret; 1578 } 1579 1580 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1581 struct pp_smu_nv_clock_table *max_clocks) 1582 { 1583 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1584 int ret = 0; 1585 1586 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1587 return -EOPNOTSUPP; 1588 1589 mutex_lock(&adev->pm.mutex); 1590 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1591 max_clocks); 1592 mutex_unlock(&adev->pm.mutex); 1593 1594 return ret; 1595 } 1596 1597 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1598 unsigned int *clock_values_in_khz, 1599 unsigned int *num_states) 1600 { 1601 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1602 int ret = 0; 1603 1604 if (!pp_funcs->get_uclk_dpm_states) 1605 return -EOPNOTSUPP; 1606 1607 mutex_lock(&adev->pm.mutex); 1608 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1609 clock_values_in_khz, 1610 num_states); 1611 mutex_unlock(&adev->pm.mutex); 1612 1613 return ret; 1614 } 1615 1616 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1617 struct dpm_clocks *clock_table) 1618 { 1619 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1620 int ret = 0; 1621 1622 if (!pp_funcs->get_dpm_clock_table) 1623 return -EOPNOTSUPP; 1624 1625 mutex_lock(&adev->pm.mutex); 1626 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1627 clock_table); 1628 mutex_unlock(&adev->pm.mutex); 1629 1630 return ret; 1631 } 1632