1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 40 { 41 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 42 int ret = 0; 43 44 if (!pp_funcs->get_sclk) 45 return 0; 46 47 mutex_lock(&adev->pm.mutex); 48 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 49 low); 50 mutex_unlock(&adev->pm.mutex); 51 52 return ret; 53 } 54 55 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 56 { 57 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 58 int ret = 0; 59 60 if (!pp_funcs->get_mclk) 61 return 0; 62 63 mutex_lock(&adev->pm.mutex); 64 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 65 low); 66 mutex_unlock(&adev->pm.mutex); 67 68 return ret; 69 } 70 71 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 72 { 73 int ret = 0; 74 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 75 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 76 77 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 78 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 79 block_type, gate ? "gate" : "ungate"); 80 return 0; 81 } 82 83 mutex_lock(&adev->pm.mutex); 84 85 switch (block_type) { 86 case AMD_IP_BLOCK_TYPE_UVD: 87 case AMD_IP_BLOCK_TYPE_VCE: 88 case AMD_IP_BLOCK_TYPE_GFX: 89 case AMD_IP_BLOCK_TYPE_VCN: 90 case AMD_IP_BLOCK_TYPE_SDMA: 91 case AMD_IP_BLOCK_TYPE_JPEG: 92 case AMD_IP_BLOCK_TYPE_GMC: 93 case AMD_IP_BLOCK_TYPE_ACP: 94 if (pp_funcs && pp_funcs->set_powergating_by_smu) 95 ret = (pp_funcs->set_powergating_by_smu( 96 (adev)->powerplay.pp_handle, block_type, gate)); 97 break; 98 default: 99 break; 100 } 101 102 if (!ret) 103 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 104 105 mutex_unlock(&adev->pm.mutex); 106 107 return ret; 108 } 109 110 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 111 { 112 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 113 void *pp_handle = adev->powerplay.pp_handle; 114 int ret = 0; 115 116 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 117 return -ENOENT; 118 119 mutex_lock(&adev->pm.mutex); 120 121 /* enter BACO state */ 122 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 123 124 mutex_unlock(&adev->pm.mutex); 125 126 return ret; 127 } 128 129 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 130 { 131 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 132 void *pp_handle = adev->powerplay.pp_handle; 133 int ret = 0; 134 135 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 136 return -ENOENT; 137 138 mutex_lock(&adev->pm.mutex); 139 140 /* exit BACO state */ 141 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 142 143 mutex_unlock(&adev->pm.mutex); 144 145 return ret; 146 } 147 148 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 149 enum pp_mp1_state mp1_state) 150 { 151 int ret = 0; 152 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 153 154 if (pp_funcs && pp_funcs->set_mp1_state) { 155 mutex_lock(&adev->pm.mutex); 156 157 ret = pp_funcs->set_mp1_state( 158 adev->powerplay.pp_handle, 159 mp1_state); 160 161 mutex_unlock(&adev->pm.mutex); 162 } 163 164 return ret; 165 } 166 167 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 168 { 169 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 170 void *pp_handle = adev->powerplay.pp_handle; 171 bool baco_cap; 172 int ret = 0; 173 174 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 175 return false; 176 /* Don't use baco for reset in S3. 177 * This is a workaround for some platforms 178 * where entering BACO during suspend 179 * seems to cause reboots or hangs. 180 * This might be related to the fact that BACO controls 181 * power to the whole GPU including devices like audio and USB. 182 * Powering down/up everything may adversely affect these other 183 * devices. Needs more investigation. 184 */ 185 if (adev->in_s3) 186 return false; 187 188 mutex_lock(&adev->pm.mutex); 189 190 ret = pp_funcs->get_asic_baco_capability(pp_handle, 191 &baco_cap); 192 193 mutex_unlock(&adev->pm.mutex); 194 195 return ret ? false : baco_cap; 196 } 197 198 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 199 { 200 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 201 void *pp_handle = adev->powerplay.pp_handle; 202 int ret = 0; 203 204 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 205 return -ENOENT; 206 207 mutex_lock(&adev->pm.mutex); 208 209 ret = pp_funcs->asic_reset_mode_2(pp_handle); 210 211 mutex_unlock(&adev->pm.mutex); 212 213 return ret; 214 } 215 216 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 217 { 218 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 219 void *pp_handle = adev->powerplay.pp_handle; 220 int ret = 0; 221 222 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 223 return -ENOENT; 224 225 mutex_lock(&adev->pm.mutex); 226 227 /* enter BACO state */ 228 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 229 if (ret) 230 goto out; 231 232 /* exit BACO state */ 233 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 234 235 out: 236 mutex_unlock(&adev->pm.mutex); 237 return ret; 238 } 239 240 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 241 { 242 struct smu_context *smu = adev->powerplay.pp_handle; 243 bool support_mode1_reset = false; 244 245 if (is_support_sw_smu(adev)) { 246 mutex_lock(&adev->pm.mutex); 247 support_mode1_reset = smu_mode1_reset_is_support(smu); 248 mutex_unlock(&adev->pm.mutex); 249 } 250 251 return support_mode1_reset; 252 } 253 254 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 255 { 256 struct smu_context *smu = adev->powerplay.pp_handle; 257 int ret = -EOPNOTSUPP; 258 259 if (is_support_sw_smu(adev)) { 260 mutex_lock(&adev->pm.mutex); 261 ret = smu_mode1_reset(smu); 262 mutex_unlock(&adev->pm.mutex); 263 } 264 265 return ret; 266 } 267 268 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 269 enum PP_SMC_POWER_PROFILE type, 270 bool en) 271 { 272 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 273 int ret = 0; 274 275 if (amdgpu_sriov_vf(adev)) 276 return 0; 277 278 if (pp_funcs && pp_funcs->switch_power_profile) { 279 mutex_lock(&adev->pm.mutex); 280 ret = pp_funcs->switch_power_profile( 281 adev->powerplay.pp_handle, type, en); 282 mutex_unlock(&adev->pm.mutex); 283 } 284 285 return ret; 286 } 287 288 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 289 uint32_t pstate) 290 { 291 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 292 int ret = 0; 293 294 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 295 mutex_lock(&adev->pm.mutex); 296 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 297 pstate); 298 mutex_unlock(&adev->pm.mutex); 299 } 300 301 return ret; 302 } 303 304 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 305 uint32_t cstate) 306 { 307 int ret = 0; 308 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 309 void *pp_handle = adev->powerplay.pp_handle; 310 311 if (pp_funcs && pp_funcs->set_df_cstate) { 312 mutex_lock(&adev->pm.mutex); 313 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 314 mutex_unlock(&adev->pm.mutex); 315 } 316 317 return ret; 318 } 319 320 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) 321 { 322 struct smu_context *smu = adev->powerplay.pp_handle; 323 int ret = 0; 324 325 if (is_support_sw_smu(adev)) { 326 mutex_lock(&adev->pm.mutex); 327 ret = smu_allow_xgmi_power_down(smu, en); 328 mutex_unlock(&adev->pm.mutex); 329 } 330 331 return ret; 332 } 333 334 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 335 { 336 void *pp_handle = adev->powerplay.pp_handle; 337 const struct amd_pm_funcs *pp_funcs = 338 adev->powerplay.pp_funcs; 339 int ret = 0; 340 341 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 342 mutex_lock(&adev->pm.mutex); 343 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 344 mutex_unlock(&adev->pm.mutex); 345 } 346 347 return ret; 348 } 349 350 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 351 uint32_t msg_id) 352 { 353 void *pp_handle = adev->powerplay.pp_handle; 354 const struct amd_pm_funcs *pp_funcs = 355 adev->powerplay.pp_funcs; 356 int ret = 0; 357 358 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 359 mutex_lock(&adev->pm.mutex); 360 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 361 msg_id); 362 mutex_unlock(&adev->pm.mutex); 363 } 364 365 return ret; 366 } 367 368 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 369 bool acquire) 370 { 371 void *pp_handle = adev->powerplay.pp_handle; 372 const struct amd_pm_funcs *pp_funcs = 373 adev->powerplay.pp_funcs; 374 int ret = -EOPNOTSUPP; 375 376 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 377 mutex_lock(&adev->pm.mutex); 378 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 379 acquire); 380 mutex_unlock(&adev->pm.mutex); 381 } 382 383 return ret; 384 } 385 386 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 387 { 388 if (adev->pm.dpm_enabled) { 389 mutex_lock(&adev->pm.mutex); 390 if (power_supply_is_system_supplied() > 0) 391 adev->pm.ac_power = true; 392 else 393 adev->pm.ac_power = false; 394 395 if (adev->powerplay.pp_funcs && 396 adev->powerplay.pp_funcs->enable_bapm) 397 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 398 399 if (is_support_sw_smu(adev)) 400 smu_set_ac_dc(adev->powerplay.pp_handle); 401 402 mutex_unlock(&adev->pm.mutex); 403 } 404 } 405 406 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 407 void *data, uint32_t *size) 408 { 409 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 410 int ret = -EINVAL; 411 412 if (!data || !size) 413 return -EINVAL; 414 415 if (pp_funcs && pp_funcs->read_sensor) { 416 mutex_lock(&adev->pm.mutex); 417 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 418 sensor, 419 data, 420 size); 421 mutex_unlock(&adev->pm.mutex); 422 } 423 424 return ret; 425 } 426 427 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 428 { 429 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 430 int i; 431 432 if (!adev->pm.dpm_enabled) 433 return; 434 435 if (!pp_funcs->pm_compute_clocks) 436 return; 437 438 if (adev->mode_info.num_crtc) 439 amdgpu_display_bandwidth_update(adev); 440 441 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 442 struct amdgpu_ring *ring = adev->rings[i]; 443 if (ring && ring->sched.ready) 444 amdgpu_fence_wait_empty(ring); 445 } 446 447 mutex_lock(&adev->pm.mutex); 448 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 449 mutex_unlock(&adev->pm.mutex); 450 } 451 452 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 453 { 454 int ret = 0; 455 456 if (adev->family == AMDGPU_FAMILY_SI) { 457 mutex_lock(&adev->pm.mutex); 458 if (enable) { 459 adev->pm.dpm.uvd_active = true; 460 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 461 } else { 462 adev->pm.dpm.uvd_active = false; 463 } 464 mutex_unlock(&adev->pm.mutex); 465 466 amdgpu_dpm_compute_clocks(adev); 467 return; 468 } 469 470 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 471 if (ret) 472 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 473 enable ? "enable" : "disable", ret); 474 } 475 476 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 477 { 478 int ret = 0; 479 480 if (adev->family == AMDGPU_FAMILY_SI) { 481 mutex_lock(&adev->pm.mutex); 482 if (enable) { 483 adev->pm.dpm.vce_active = true; 484 /* XXX select vce level based on ring/task */ 485 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 486 } else { 487 adev->pm.dpm.vce_active = false; 488 } 489 mutex_unlock(&adev->pm.mutex); 490 491 amdgpu_dpm_compute_clocks(adev); 492 return; 493 } 494 495 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 496 if (ret) 497 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 498 enable ? "enable" : "disable", ret); 499 } 500 501 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 502 { 503 int ret = 0; 504 505 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 506 if (ret) 507 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 508 enable ? "enable" : "disable", ret); 509 } 510 511 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 512 { 513 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 514 int r = 0; 515 516 if (!pp_funcs || !pp_funcs->load_firmware) 517 return 0; 518 519 mutex_lock(&adev->pm.mutex); 520 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 521 if (r) { 522 pr_err("smu firmware loading failed\n"); 523 goto out; 524 } 525 526 if (smu_version) 527 *smu_version = adev->pm.fw_version; 528 529 out: 530 mutex_unlock(&adev->pm.mutex); 531 return r; 532 } 533 534 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 535 { 536 int ret = 0; 537 538 if (is_support_sw_smu(adev)) { 539 mutex_lock(&adev->pm.mutex); 540 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 541 enable); 542 mutex_unlock(&adev->pm.mutex); 543 } 544 545 return ret; 546 } 547 548 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 549 { 550 struct smu_context *smu = adev->powerplay.pp_handle; 551 int ret = 0; 552 553 if (!is_support_sw_smu(adev)) 554 return -EOPNOTSUPP; 555 556 mutex_lock(&adev->pm.mutex); 557 ret = smu_send_hbm_bad_pages_num(smu, size); 558 mutex_unlock(&adev->pm.mutex); 559 560 return ret; 561 } 562 563 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 564 { 565 struct smu_context *smu = adev->powerplay.pp_handle; 566 int ret = 0; 567 568 if (!is_support_sw_smu(adev)) 569 return -EOPNOTSUPP; 570 571 mutex_lock(&adev->pm.mutex); 572 ret = smu_send_hbm_bad_channel_flag(smu, size); 573 mutex_unlock(&adev->pm.mutex); 574 575 return ret; 576 } 577 578 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 579 enum pp_clock_type type, 580 uint32_t *min, 581 uint32_t *max) 582 { 583 int ret = 0; 584 585 if (type != PP_SCLK) 586 return -EINVAL; 587 588 if (!is_support_sw_smu(adev)) 589 return -EOPNOTSUPP; 590 591 mutex_lock(&adev->pm.mutex); 592 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 593 SMU_SCLK, 594 min, 595 max); 596 mutex_unlock(&adev->pm.mutex); 597 598 return ret; 599 } 600 601 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 602 enum pp_clock_type type, 603 uint32_t min, 604 uint32_t max) 605 { 606 struct smu_context *smu = adev->powerplay.pp_handle; 607 int ret = 0; 608 609 if (type != PP_SCLK) 610 return -EINVAL; 611 612 if (!is_support_sw_smu(adev)) 613 return -EOPNOTSUPP; 614 615 mutex_lock(&adev->pm.mutex); 616 ret = smu_set_soft_freq_range(smu, 617 SMU_SCLK, 618 min, 619 max); 620 mutex_unlock(&adev->pm.mutex); 621 622 return ret; 623 } 624 625 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 626 { 627 struct smu_context *smu = adev->powerplay.pp_handle; 628 int ret = 0; 629 630 if (!is_support_sw_smu(adev)) 631 return 0; 632 633 mutex_lock(&adev->pm.mutex); 634 ret = smu_write_watermarks_table(smu); 635 mutex_unlock(&adev->pm.mutex); 636 637 return ret; 638 } 639 640 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 641 enum smu_event_type event, 642 uint64_t event_arg) 643 { 644 struct smu_context *smu = adev->powerplay.pp_handle; 645 int ret = 0; 646 647 if (!is_support_sw_smu(adev)) 648 return -EOPNOTSUPP; 649 650 mutex_lock(&adev->pm.mutex); 651 ret = smu_wait_for_event(smu, event, event_arg); 652 mutex_unlock(&adev->pm.mutex); 653 654 return ret; 655 } 656 657 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 658 { 659 struct smu_context *smu = adev->powerplay.pp_handle; 660 int ret = 0; 661 662 if (!is_support_sw_smu(adev)) 663 return -EOPNOTSUPP; 664 665 mutex_lock(&adev->pm.mutex); 666 ret = smu_get_status_gfxoff(smu, value); 667 mutex_unlock(&adev->pm.mutex); 668 669 return ret; 670 } 671 672 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 673 { 674 struct smu_context *smu = adev->powerplay.pp_handle; 675 676 if (!is_support_sw_smu(adev)) 677 return 0; 678 679 return atomic64_read(&smu->throttle_int_counter); 680 } 681 682 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 683 * @adev: amdgpu_device pointer 684 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 685 * 686 */ 687 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 688 enum gfx_change_state state) 689 { 690 mutex_lock(&adev->pm.mutex); 691 if (adev->powerplay.pp_funcs && 692 adev->powerplay.pp_funcs->gfx_state_change_set) 693 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 694 (adev)->powerplay.pp_handle, state)); 695 mutex_unlock(&adev->pm.mutex); 696 } 697 698 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 699 void *umc_ecc) 700 { 701 struct smu_context *smu = adev->powerplay.pp_handle; 702 int ret = 0; 703 704 if (!is_support_sw_smu(adev)) 705 return -EOPNOTSUPP; 706 707 mutex_lock(&adev->pm.mutex); 708 ret = smu_get_ecc_info(smu, umc_ecc); 709 mutex_unlock(&adev->pm.mutex); 710 711 return ret; 712 } 713 714 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 715 uint32_t idx) 716 { 717 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 718 struct amd_vce_state *vstate = NULL; 719 720 if (!pp_funcs->get_vce_clock_state) 721 return NULL; 722 723 mutex_lock(&adev->pm.mutex); 724 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 725 idx); 726 mutex_unlock(&adev->pm.mutex); 727 728 return vstate; 729 } 730 731 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 732 enum amd_pm_state_type *state) 733 { 734 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 735 736 mutex_lock(&adev->pm.mutex); 737 738 if (!pp_funcs->get_current_power_state) { 739 *state = adev->pm.dpm.user_state; 740 goto out; 741 } 742 743 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 744 if (*state < POWER_STATE_TYPE_DEFAULT || 745 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 746 *state = adev->pm.dpm.user_state; 747 748 out: 749 mutex_unlock(&adev->pm.mutex); 750 } 751 752 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 753 enum amd_pm_state_type state) 754 { 755 mutex_lock(&adev->pm.mutex); 756 adev->pm.dpm.user_state = state; 757 mutex_unlock(&adev->pm.mutex); 758 759 if (is_support_sw_smu(adev)) 760 return; 761 762 if (amdgpu_dpm_dispatch_task(adev, 763 AMD_PP_TASK_ENABLE_USER_STATE, 764 &state) == -EOPNOTSUPP) 765 amdgpu_dpm_compute_clocks(adev); 766 } 767 768 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 769 { 770 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 771 enum amd_dpm_forced_level level; 772 773 if (!pp_funcs) 774 return AMD_DPM_FORCED_LEVEL_AUTO; 775 776 mutex_lock(&adev->pm.mutex); 777 if (pp_funcs->get_performance_level) 778 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 779 else 780 level = adev->pm.dpm.forced_level; 781 mutex_unlock(&adev->pm.mutex); 782 783 return level; 784 } 785 786 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 787 enum amd_dpm_forced_level level) 788 { 789 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 790 enum amd_dpm_forced_level current_level; 791 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 792 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 793 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 794 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 795 796 if (!pp_funcs || !pp_funcs->force_performance_level) 797 return 0; 798 799 if (adev->pm.dpm.thermal_active) 800 return -EINVAL; 801 802 current_level = amdgpu_dpm_get_performance_level(adev); 803 if (current_level == level) 804 return 0; 805 806 if (adev->asic_type == CHIP_RAVEN) { 807 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 808 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 809 level == AMD_DPM_FORCED_LEVEL_MANUAL) 810 amdgpu_gfx_off_ctrl(adev, false); 811 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 812 level != AMD_DPM_FORCED_LEVEL_MANUAL) 813 amdgpu_gfx_off_ctrl(adev, true); 814 } 815 } 816 817 if (!(current_level & profile_mode_mask) && 818 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 819 return -EINVAL; 820 821 if (!(current_level & profile_mode_mask) && 822 (level & profile_mode_mask)) { 823 /* enter UMD Pstate */ 824 amdgpu_device_ip_set_powergating_state(adev, 825 AMD_IP_BLOCK_TYPE_GFX, 826 AMD_PG_STATE_UNGATE); 827 amdgpu_device_ip_set_clockgating_state(adev, 828 AMD_IP_BLOCK_TYPE_GFX, 829 AMD_CG_STATE_UNGATE); 830 } else if ((current_level & profile_mode_mask) && 831 !(level & profile_mode_mask)) { 832 /* exit UMD Pstate */ 833 amdgpu_device_ip_set_clockgating_state(adev, 834 AMD_IP_BLOCK_TYPE_GFX, 835 AMD_CG_STATE_GATE); 836 amdgpu_device_ip_set_powergating_state(adev, 837 AMD_IP_BLOCK_TYPE_GFX, 838 AMD_PG_STATE_GATE); 839 } 840 841 mutex_lock(&adev->pm.mutex); 842 843 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 844 level)) { 845 mutex_unlock(&adev->pm.mutex); 846 return -EINVAL; 847 } 848 849 adev->pm.dpm.forced_level = level; 850 851 mutex_unlock(&adev->pm.mutex); 852 853 return 0; 854 } 855 856 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 857 struct pp_states_info *states) 858 { 859 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 860 int ret = 0; 861 862 if (!pp_funcs->get_pp_num_states) 863 return -EOPNOTSUPP; 864 865 mutex_lock(&adev->pm.mutex); 866 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 867 states); 868 mutex_unlock(&adev->pm.mutex); 869 870 return ret; 871 } 872 873 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 874 enum amd_pp_task task_id, 875 enum amd_pm_state_type *user_state) 876 { 877 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 878 int ret = 0; 879 880 if (!pp_funcs->dispatch_tasks) 881 return -EOPNOTSUPP; 882 883 mutex_lock(&adev->pm.mutex); 884 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 885 task_id, 886 user_state); 887 mutex_unlock(&adev->pm.mutex); 888 889 return ret; 890 } 891 892 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 893 { 894 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 895 int ret = 0; 896 897 if (!pp_funcs->get_pp_table) 898 return 0; 899 900 mutex_lock(&adev->pm.mutex); 901 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 902 table); 903 mutex_unlock(&adev->pm.mutex); 904 905 return ret; 906 } 907 908 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 909 uint32_t type, 910 long *input, 911 uint32_t size) 912 { 913 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 914 int ret = 0; 915 916 if (!pp_funcs->set_fine_grain_clk_vol) 917 return 0; 918 919 mutex_lock(&adev->pm.mutex); 920 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 921 type, 922 input, 923 size); 924 mutex_unlock(&adev->pm.mutex); 925 926 return ret; 927 } 928 929 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 930 uint32_t type, 931 long *input, 932 uint32_t size) 933 { 934 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 935 int ret = 0; 936 937 if (!pp_funcs->odn_edit_dpm_table) 938 return 0; 939 940 mutex_lock(&adev->pm.mutex); 941 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 942 type, 943 input, 944 size); 945 mutex_unlock(&adev->pm.mutex); 946 947 return ret; 948 } 949 950 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 951 enum pp_clock_type type, 952 char *buf) 953 { 954 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 955 int ret = 0; 956 957 if (!pp_funcs->print_clock_levels) 958 return 0; 959 960 mutex_lock(&adev->pm.mutex); 961 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 962 type, 963 buf); 964 mutex_unlock(&adev->pm.mutex); 965 966 return ret; 967 } 968 969 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 970 enum pp_clock_type type, 971 char *buf, 972 int *offset) 973 { 974 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 975 int ret = 0; 976 977 if (!pp_funcs->emit_clock_levels) 978 return -ENOENT; 979 980 mutex_lock(&adev->pm.mutex); 981 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 982 type, 983 buf, 984 offset); 985 mutex_unlock(&adev->pm.mutex); 986 987 return ret; 988 } 989 990 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 991 uint64_t ppfeature_masks) 992 { 993 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 994 int ret = 0; 995 996 if (!pp_funcs->set_ppfeature_status) 997 return 0; 998 999 mutex_lock(&adev->pm.mutex); 1000 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1001 ppfeature_masks); 1002 mutex_unlock(&adev->pm.mutex); 1003 1004 return ret; 1005 } 1006 1007 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1008 { 1009 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1010 int ret = 0; 1011 1012 if (!pp_funcs->get_ppfeature_status) 1013 return 0; 1014 1015 mutex_lock(&adev->pm.mutex); 1016 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1017 buf); 1018 mutex_unlock(&adev->pm.mutex); 1019 1020 return ret; 1021 } 1022 1023 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1024 enum pp_clock_type type, 1025 uint32_t mask) 1026 { 1027 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1028 int ret = 0; 1029 1030 if (!pp_funcs->force_clock_level) 1031 return 0; 1032 1033 mutex_lock(&adev->pm.mutex); 1034 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1035 type, 1036 mask); 1037 mutex_unlock(&adev->pm.mutex); 1038 1039 return ret; 1040 } 1041 1042 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1043 { 1044 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1045 int ret = 0; 1046 1047 if (!pp_funcs->get_sclk_od) 1048 return 0; 1049 1050 mutex_lock(&adev->pm.mutex); 1051 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1052 mutex_unlock(&adev->pm.mutex); 1053 1054 return ret; 1055 } 1056 1057 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1058 { 1059 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1060 1061 if (is_support_sw_smu(adev)) 1062 return 0; 1063 1064 mutex_lock(&adev->pm.mutex); 1065 if (pp_funcs->set_sclk_od) 1066 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1067 mutex_unlock(&adev->pm.mutex); 1068 1069 if (amdgpu_dpm_dispatch_task(adev, 1070 AMD_PP_TASK_READJUST_POWER_STATE, 1071 NULL) == -EOPNOTSUPP) { 1072 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1073 amdgpu_dpm_compute_clocks(adev); 1074 } 1075 1076 return 0; 1077 } 1078 1079 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1080 { 1081 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1082 int ret = 0; 1083 1084 if (!pp_funcs->get_mclk_od) 1085 return 0; 1086 1087 mutex_lock(&adev->pm.mutex); 1088 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1089 mutex_unlock(&adev->pm.mutex); 1090 1091 return ret; 1092 } 1093 1094 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1095 { 1096 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1097 1098 if (is_support_sw_smu(adev)) 1099 return 0; 1100 1101 mutex_lock(&adev->pm.mutex); 1102 if (pp_funcs->set_mclk_od) 1103 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1104 mutex_unlock(&adev->pm.mutex); 1105 1106 if (amdgpu_dpm_dispatch_task(adev, 1107 AMD_PP_TASK_READJUST_POWER_STATE, 1108 NULL) == -EOPNOTSUPP) { 1109 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1110 amdgpu_dpm_compute_clocks(adev); 1111 } 1112 1113 return 0; 1114 } 1115 1116 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1117 char *buf) 1118 { 1119 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1120 int ret = 0; 1121 1122 if (!pp_funcs->get_power_profile_mode) 1123 return -EOPNOTSUPP; 1124 1125 mutex_lock(&adev->pm.mutex); 1126 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1127 buf); 1128 mutex_unlock(&adev->pm.mutex); 1129 1130 return ret; 1131 } 1132 1133 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1134 long *input, uint32_t size) 1135 { 1136 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1137 int ret = 0; 1138 1139 if (!pp_funcs->set_power_profile_mode) 1140 return 0; 1141 1142 mutex_lock(&adev->pm.mutex); 1143 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1144 input, 1145 size); 1146 mutex_unlock(&adev->pm.mutex); 1147 1148 return ret; 1149 } 1150 1151 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1152 { 1153 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1154 int ret = 0; 1155 1156 if (!pp_funcs->get_gpu_metrics) 1157 return 0; 1158 1159 mutex_lock(&adev->pm.mutex); 1160 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1161 table); 1162 mutex_unlock(&adev->pm.mutex); 1163 1164 return ret; 1165 } 1166 1167 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1168 uint32_t *fan_mode) 1169 { 1170 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1171 int ret = 0; 1172 1173 if (!pp_funcs->get_fan_control_mode) 1174 return -EOPNOTSUPP; 1175 1176 mutex_lock(&adev->pm.mutex); 1177 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1178 fan_mode); 1179 mutex_unlock(&adev->pm.mutex); 1180 1181 return ret; 1182 } 1183 1184 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1185 uint32_t speed) 1186 { 1187 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1188 int ret = 0; 1189 1190 if (!pp_funcs->set_fan_speed_pwm) 1191 return -EOPNOTSUPP; 1192 1193 mutex_lock(&adev->pm.mutex); 1194 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1195 speed); 1196 mutex_unlock(&adev->pm.mutex); 1197 1198 return ret; 1199 } 1200 1201 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1202 uint32_t *speed) 1203 { 1204 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1205 int ret = 0; 1206 1207 if (!pp_funcs->get_fan_speed_pwm) 1208 return -EOPNOTSUPP; 1209 1210 mutex_lock(&adev->pm.mutex); 1211 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1212 speed); 1213 mutex_unlock(&adev->pm.mutex); 1214 1215 return ret; 1216 } 1217 1218 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1219 uint32_t *speed) 1220 { 1221 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1222 int ret = 0; 1223 1224 if (!pp_funcs->get_fan_speed_rpm) 1225 return -EOPNOTSUPP; 1226 1227 mutex_lock(&adev->pm.mutex); 1228 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1229 speed); 1230 mutex_unlock(&adev->pm.mutex); 1231 1232 return ret; 1233 } 1234 1235 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1236 uint32_t speed) 1237 { 1238 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1239 int ret = 0; 1240 1241 if (!pp_funcs->set_fan_speed_rpm) 1242 return -EOPNOTSUPP; 1243 1244 mutex_lock(&adev->pm.mutex); 1245 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1246 speed); 1247 mutex_unlock(&adev->pm.mutex); 1248 1249 return ret; 1250 } 1251 1252 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1253 uint32_t mode) 1254 { 1255 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1256 int ret = 0; 1257 1258 if (!pp_funcs->set_fan_control_mode) 1259 return -EOPNOTSUPP; 1260 1261 mutex_lock(&adev->pm.mutex); 1262 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1263 mode); 1264 mutex_unlock(&adev->pm.mutex); 1265 1266 return ret; 1267 } 1268 1269 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1270 uint32_t *limit, 1271 enum pp_power_limit_level pp_limit_level, 1272 enum pp_power_type power_type) 1273 { 1274 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1275 int ret = 0; 1276 1277 if (!pp_funcs->get_power_limit) 1278 return -ENODATA; 1279 1280 mutex_lock(&adev->pm.mutex); 1281 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1282 limit, 1283 pp_limit_level, 1284 power_type); 1285 mutex_unlock(&adev->pm.mutex); 1286 1287 return ret; 1288 } 1289 1290 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1291 uint32_t limit) 1292 { 1293 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1294 int ret = 0; 1295 1296 if (!pp_funcs->set_power_limit) 1297 return -EINVAL; 1298 1299 mutex_lock(&adev->pm.mutex); 1300 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1301 limit); 1302 mutex_unlock(&adev->pm.mutex); 1303 1304 return ret; 1305 } 1306 1307 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1308 { 1309 bool cclk_dpm_supported = false; 1310 1311 if (!is_support_sw_smu(adev)) 1312 return false; 1313 1314 mutex_lock(&adev->pm.mutex); 1315 cclk_dpm_supported = is_support_cclk_dpm(adev); 1316 mutex_unlock(&adev->pm.mutex); 1317 1318 return (int)cclk_dpm_supported; 1319 } 1320 1321 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1322 struct seq_file *m) 1323 { 1324 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1325 1326 if (!pp_funcs->debugfs_print_current_performance_level) 1327 return -EOPNOTSUPP; 1328 1329 mutex_lock(&adev->pm.mutex); 1330 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1331 m); 1332 mutex_unlock(&adev->pm.mutex); 1333 1334 return 0; 1335 } 1336 1337 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1338 void **addr, 1339 size_t *size) 1340 { 1341 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1342 int ret = 0; 1343 1344 if (!pp_funcs->get_smu_prv_buf_details) 1345 return -ENOSYS; 1346 1347 mutex_lock(&adev->pm.mutex); 1348 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1349 addr, 1350 size); 1351 mutex_unlock(&adev->pm.mutex); 1352 1353 return ret; 1354 } 1355 1356 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1357 { 1358 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1359 struct smu_context *smu = adev->powerplay.pp_handle; 1360 1361 if ((is_support_sw_smu(adev) && smu->od_enabled) || 1362 (is_support_sw_smu(adev) && smu->is_apu) || 1363 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 1364 return true; 1365 1366 return false; 1367 } 1368 1369 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1370 const char *buf, 1371 size_t size) 1372 { 1373 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1374 int ret = 0; 1375 1376 if (!pp_funcs->set_pp_table) 1377 return -EOPNOTSUPP; 1378 1379 mutex_lock(&adev->pm.mutex); 1380 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1381 buf, 1382 size); 1383 mutex_unlock(&adev->pm.mutex); 1384 1385 return ret; 1386 } 1387 1388 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1389 { 1390 struct smu_context *smu = adev->powerplay.pp_handle; 1391 1392 if (!is_support_sw_smu(adev)) 1393 return INT_MAX; 1394 1395 return smu->cpu_core_num; 1396 } 1397 1398 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1399 { 1400 if (!is_support_sw_smu(adev)) 1401 return; 1402 1403 amdgpu_smu_stb_debug_fs_init(adev); 1404 } 1405 1406 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1407 const struct amd_pp_display_configuration *input) 1408 { 1409 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1410 int ret = 0; 1411 1412 if (!pp_funcs->display_configuration_change) 1413 return 0; 1414 1415 mutex_lock(&adev->pm.mutex); 1416 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1417 input); 1418 mutex_unlock(&adev->pm.mutex); 1419 1420 return ret; 1421 } 1422 1423 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1424 enum amd_pp_clock_type type, 1425 struct amd_pp_clocks *clocks) 1426 { 1427 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1428 int ret = 0; 1429 1430 if (!pp_funcs->get_clock_by_type) 1431 return 0; 1432 1433 mutex_lock(&adev->pm.mutex); 1434 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1435 type, 1436 clocks); 1437 mutex_unlock(&adev->pm.mutex); 1438 1439 return ret; 1440 } 1441 1442 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1443 struct amd_pp_simple_clock_info *clocks) 1444 { 1445 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1446 int ret = 0; 1447 1448 if (!pp_funcs->get_display_mode_validation_clocks) 1449 return 0; 1450 1451 mutex_lock(&adev->pm.mutex); 1452 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1453 clocks); 1454 mutex_unlock(&adev->pm.mutex); 1455 1456 return ret; 1457 } 1458 1459 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1460 enum amd_pp_clock_type type, 1461 struct pp_clock_levels_with_latency *clocks) 1462 { 1463 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1464 int ret = 0; 1465 1466 if (!pp_funcs->get_clock_by_type_with_latency) 1467 return 0; 1468 1469 mutex_lock(&adev->pm.mutex); 1470 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1471 type, 1472 clocks); 1473 mutex_unlock(&adev->pm.mutex); 1474 1475 return ret; 1476 } 1477 1478 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1479 enum amd_pp_clock_type type, 1480 struct pp_clock_levels_with_voltage *clocks) 1481 { 1482 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1483 int ret = 0; 1484 1485 if (!pp_funcs->get_clock_by_type_with_voltage) 1486 return 0; 1487 1488 mutex_lock(&adev->pm.mutex); 1489 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1490 type, 1491 clocks); 1492 mutex_unlock(&adev->pm.mutex); 1493 1494 return ret; 1495 } 1496 1497 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1498 void *clock_ranges) 1499 { 1500 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1501 int ret = 0; 1502 1503 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1504 return -EOPNOTSUPP; 1505 1506 mutex_lock(&adev->pm.mutex); 1507 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1508 clock_ranges); 1509 mutex_unlock(&adev->pm.mutex); 1510 1511 return ret; 1512 } 1513 1514 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1515 struct pp_display_clock_request *clock) 1516 { 1517 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1518 int ret = 0; 1519 1520 if (!pp_funcs->display_clock_voltage_request) 1521 return -EOPNOTSUPP; 1522 1523 mutex_lock(&adev->pm.mutex); 1524 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1525 clock); 1526 mutex_unlock(&adev->pm.mutex); 1527 1528 return ret; 1529 } 1530 1531 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1532 struct amd_pp_clock_info *clocks) 1533 { 1534 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1535 int ret = 0; 1536 1537 if (!pp_funcs->get_current_clocks) 1538 return -EOPNOTSUPP; 1539 1540 mutex_lock(&adev->pm.mutex); 1541 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1542 clocks); 1543 mutex_unlock(&adev->pm.mutex); 1544 1545 return ret; 1546 } 1547 1548 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1549 { 1550 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1551 1552 if (!pp_funcs->notify_smu_enable_pwe) 1553 return; 1554 1555 mutex_lock(&adev->pm.mutex); 1556 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1557 mutex_unlock(&adev->pm.mutex); 1558 } 1559 1560 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1561 uint32_t count) 1562 { 1563 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1564 int ret = 0; 1565 1566 if (!pp_funcs->set_active_display_count) 1567 return -EOPNOTSUPP; 1568 1569 mutex_lock(&adev->pm.mutex); 1570 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1571 count); 1572 mutex_unlock(&adev->pm.mutex); 1573 1574 return ret; 1575 } 1576 1577 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1578 uint32_t clock) 1579 { 1580 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1581 int ret = 0; 1582 1583 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1584 return -EOPNOTSUPP; 1585 1586 mutex_lock(&adev->pm.mutex); 1587 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1588 clock); 1589 mutex_unlock(&adev->pm.mutex); 1590 1591 return ret; 1592 } 1593 1594 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1595 uint32_t clock) 1596 { 1597 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1598 1599 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1600 return; 1601 1602 mutex_lock(&adev->pm.mutex); 1603 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1604 clock); 1605 mutex_unlock(&adev->pm.mutex); 1606 } 1607 1608 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1609 uint32_t clock) 1610 { 1611 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1612 1613 if (!pp_funcs->set_hard_min_fclk_by_freq) 1614 return; 1615 1616 mutex_lock(&adev->pm.mutex); 1617 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1618 clock); 1619 mutex_unlock(&adev->pm.mutex); 1620 } 1621 1622 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1623 bool disable_memory_clock_switch) 1624 { 1625 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1626 int ret = 0; 1627 1628 if (!pp_funcs->display_disable_memory_clock_switch) 1629 return 0; 1630 1631 mutex_lock(&adev->pm.mutex); 1632 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1633 disable_memory_clock_switch); 1634 mutex_unlock(&adev->pm.mutex); 1635 1636 return ret; 1637 } 1638 1639 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1640 struct pp_smu_nv_clock_table *max_clocks) 1641 { 1642 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1643 int ret = 0; 1644 1645 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1646 return -EOPNOTSUPP; 1647 1648 mutex_lock(&adev->pm.mutex); 1649 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1650 max_clocks); 1651 mutex_unlock(&adev->pm.mutex); 1652 1653 return ret; 1654 } 1655 1656 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1657 unsigned int *clock_values_in_khz, 1658 unsigned int *num_states) 1659 { 1660 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1661 int ret = 0; 1662 1663 if (!pp_funcs->get_uclk_dpm_states) 1664 return -EOPNOTSUPP; 1665 1666 mutex_lock(&adev->pm.mutex); 1667 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1668 clock_values_in_khz, 1669 num_states); 1670 mutex_unlock(&adev->pm.mutex); 1671 1672 return ret; 1673 } 1674 1675 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1676 struct dpm_clocks *clock_table) 1677 { 1678 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1679 int ret = 0; 1680 1681 if (!pp_funcs->get_dpm_clock_table) 1682 return -EOPNOTSUPP; 1683 1684 mutex_lock(&adev->pm.mutex); 1685 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1686 clock_table); 1687 mutex_unlock(&adev->pm.mutex); 1688 1689 return ret; 1690 } 1691