1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 40 { 41 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 42 int ret = 0; 43 44 if (!pp_funcs->get_sclk) 45 return 0; 46 47 mutex_lock(&adev->pm.mutex); 48 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 49 low); 50 mutex_unlock(&adev->pm.mutex); 51 52 return ret; 53 } 54 55 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 56 { 57 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 58 int ret = 0; 59 60 if (!pp_funcs->get_mclk) 61 return 0; 62 63 mutex_lock(&adev->pm.mutex); 64 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 65 low); 66 mutex_unlock(&adev->pm.mutex); 67 68 return ret; 69 } 70 71 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 72 { 73 int ret = 0; 74 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 75 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 76 77 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 78 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 79 block_type, gate ? "gate" : "ungate"); 80 return 0; 81 } 82 83 mutex_lock(&adev->pm.mutex); 84 85 switch (block_type) { 86 case AMD_IP_BLOCK_TYPE_UVD: 87 case AMD_IP_BLOCK_TYPE_VCE: 88 case AMD_IP_BLOCK_TYPE_GFX: 89 case AMD_IP_BLOCK_TYPE_VCN: 90 case AMD_IP_BLOCK_TYPE_SDMA: 91 case AMD_IP_BLOCK_TYPE_JPEG: 92 case AMD_IP_BLOCK_TYPE_GMC: 93 case AMD_IP_BLOCK_TYPE_ACP: 94 if (pp_funcs && pp_funcs->set_powergating_by_smu) 95 ret = (pp_funcs->set_powergating_by_smu( 96 (adev)->powerplay.pp_handle, block_type, gate)); 97 break; 98 default: 99 break; 100 } 101 102 if (!ret) 103 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 104 105 mutex_unlock(&adev->pm.mutex); 106 107 return ret; 108 } 109 110 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev) 111 { 112 struct smu_context *smu = adev->powerplay.pp_handle; 113 int ret = -EOPNOTSUPP; 114 115 mutex_lock(&adev->pm.mutex); 116 ret = smu_set_gfx_power_up_by_imu(smu); 117 mutex_unlock(&adev->pm.mutex); 118 119 msleep(10); 120 121 return ret; 122 } 123 124 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 125 { 126 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 127 void *pp_handle = adev->powerplay.pp_handle; 128 int ret = 0; 129 130 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 131 return -ENOENT; 132 133 mutex_lock(&adev->pm.mutex); 134 135 /* enter BACO state */ 136 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 137 138 mutex_unlock(&adev->pm.mutex); 139 140 return ret; 141 } 142 143 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 144 { 145 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 146 void *pp_handle = adev->powerplay.pp_handle; 147 int ret = 0; 148 149 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 150 return -ENOENT; 151 152 mutex_lock(&adev->pm.mutex); 153 154 /* exit BACO state */ 155 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 156 157 mutex_unlock(&adev->pm.mutex); 158 159 return ret; 160 } 161 162 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 163 enum pp_mp1_state mp1_state) 164 { 165 int ret = 0; 166 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 167 168 if (pp_funcs && pp_funcs->set_mp1_state) { 169 mutex_lock(&adev->pm.mutex); 170 171 ret = pp_funcs->set_mp1_state( 172 adev->powerplay.pp_handle, 173 mp1_state); 174 175 mutex_unlock(&adev->pm.mutex); 176 } 177 178 return ret; 179 } 180 181 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 182 { 183 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 184 void *pp_handle = adev->powerplay.pp_handle; 185 bool baco_cap; 186 int ret = 0; 187 188 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 189 return false; 190 /* Don't use baco for reset in S3. 191 * This is a workaround for some platforms 192 * where entering BACO during suspend 193 * seems to cause reboots or hangs. 194 * This might be related to the fact that BACO controls 195 * power to the whole GPU including devices like audio and USB. 196 * Powering down/up everything may adversely affect these other 197 * devices. Needs more investigation. 198 */ 199 if (adev->in_s3) 200 return false; 201 202 mutex_lock(&adev->pm.mutex); 203 204 ret = pp_funcs->get_asic_baco_capability(pp_handle, 205 &baco_cap); 206 207 mutex_unlock(&adev->pm.mutex); 208 209 return ret ? false : baco_cap; 210 } 211 212 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 213 { 214 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 215 void *pp_handle = adev->powerplay.pp_handle; 216 int ret = 0; 217 218 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 219 return -ENOENT; 220 221 mutex_lock(&adev->pm.mutex); 222 223 ret = pp_funcs->asic_reset_mode_2(pp_handle); 224 225 mutex_unlock(&adev->pm.mutex); 226 227 return ret; 228 } 229 230 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 231 { 232 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 233 void *pp_handle = adev->powerplay.pp_handle; 234 int ret = 0; 235 236 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 237 return -ENOENT; 238 239 mutex_lock(&adev->pm.mutex); 240 241 /* enter BACO state */ 242 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 243 if (ret) 244 goto out; 245 246 /* exit BACO state */ 247 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 248 249 out: 250 mutex_unlock(&adev->pm.mutex); 251 return ret; 252 } 253 254 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 255 { 256 struct smu_context *smu = adev->powerplay.pp_handle; 257 bool support_mode1_reset = false; 258 259 if (is_support_sw_smu(adev)) { 260 mutex_lock(&adev->pm.mutex); 261 support_mode1_reset = smu_mode1_reset_is_support(smu); 262 mutex_unlock(&adev->pm.mutex); 263 } 264 265 return support_mode1_reset; 266 } 267 268 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 269 { 270 struct smu_context *smu = adev->powerplay.pp_handle; 271 int ret = -EOPNOTSUPP; 272 273 if (is_support_sw_smu(adev)) { 274 mutex_lock(&adev->pm.mutex); 275 ret = smu_mode1_reset(smu); 276 mutex_unlock(&adev->pm.mutex); 277 } 278 279 return ret; 280 } 281 282 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 283 enum PP_SMC_POWER_PROFILE type, 284 bool en) 285 { 286 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 287 int ret = 0; 288 289 if (amdgpu_sriov_vf(adev)) 290 return 0; 291 292 if (pp_funcs && pp_funcs->switch_power_profile) { 293 mutex_lock(&adev->pm.mutex); 294 ret = pp_funcs->switch_power_profile( 295 adev->powerplay.pp_handle, type, en); 296 mutex_unlock(&adev->pm.mutex); 297 } 298 299 return ret; 300 } 301 302 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 303 uint32_t pstate) 304 { 305 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 306 int ret = 0; 307 308 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 309 mutex_lock(&adev->pm.mutex); 310 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 311 pstate); 312 mutex_unlock(&adev->pm.mutex); 313 } 314 315 return ret; 316 } 317 318 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 319 uint32_t cstate) 320 { 321 int ret = 0; 322 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 323 void *pp_handle = adev->powerplay.pp_handle; 324 325 if (pp_funcs && pp_funcs->set_df_cstate) { 326 mutex_lock(&adev->pm.mutex); 327 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 328 mutex_unlock(&adev->pm.mutex); 329 } 330 331 return ret; 332 } 333 334 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) 335 { 336 struct smu_context *smu = adev->powerplay.pp_handle; 337 int ret = 0; 338 339 if (is_support_sw_smu(adev)) { 340 mutex_lock(&adev->pm.mutex); 341 ret = smu_allow_xgmi_power_down(smu, en); 342 mutex_unlock(&adev->pm.mutex); 343 } 344 345 return ret; 346 } 347 348 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 349 { 350 void *pp_handle = adev->powerplay.pp_handle; 351 const struct amd_pm_funcs *pp_funcs = 352 adev->powerplay.pp_funcs; 353 int ret = 0; 354 355 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 356 mutex_lock(&adev->pm.mutex); 357 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 358 mutex_unlock(&adev->pm.mutex); 359 } 360 361 return ret; 362 } 363 364 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 365 uint32_t msg_id) 366 { 367 void *pp_handle = adev->powerplay.pp_handle; 368 const struct amd_pm_funcs *pp_funcs = 369 adev->powerplay.pp_funcs; 370 int ret = 0; 371 372 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 373 mutex_lock(&adev->pm.mutex); 374 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 375 msg_id); 376 mutex_unlock(&adev->pm.mutex); 377 } 378 379 return ret; 380 } 381 382 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 383 bool acquire) 384 { 385 void *pp_handle = adev->powerplay.pp_handle; 386 const struct amd_pm_funcs *pp_funcs = 387 adev->powerplay.pp_funcs; 388 int ret = -EOPNOTSUPP; 389 390 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 391 mutex_lock(&adev->pm.mutex); 392 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 393 acquire); 394 mutex_unlock(&adev->pm.mutex); 395 } 396 397 return ret; 398 } 399 400 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 401 { 402 if (adev->pm.dpm_enabled) { 403 mutex_lock(&adev->pm.mutex); 404 if (power_supply_is_system_supplied() > 0) 405 adev->pm.ac_power = true; 406 else 407 adev->pm.ac_power = false; 408 409 if (adev->powerplay.pp_funcs && 410 adev->powerplay.pp_funcs->enable_bapm) 411 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 412 413 if (is_support_sw_smu(adev)) 414 smu_set_ac_dc(adev->powerplay.pp_handle); 415 416 mutex_unlock(&adev->pm.mutex); 417 } 418 } 419 420 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 421 void *data, uint32_t *size) 422 { 423 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 424 int ret = -EINVAL; 425 426 if (!data || !size) 427 return -EINVAL; 428 429 if (pp_funcs && pp_funcs->read_sensor) { 430 mutex_lock(&adev->pm.mutex); 431 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 432 sensor, 433 data, 434 size); 435 mutex_unlock(&adev->pm.mutex); 436 } 437 438 return ret; 439 } 440 441 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 442 { 443 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 444 int i; 445 446 if (!adev->pm.dpm_enabled) 447 return; 448 449 if (!pp_funcs->pm_compute_clocks) 450 return; 451 452 if (adev->mode_info.num_crtc) 453 amdgpu_display_bandwidth_update(adev); 454 455 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 456 struct amdgpu_ring *ring = adev->rings[i]; 457 if (ring && ring->sched.ready) 458 amdgpu_fence_wait_empty(ring); 459 } 460 461 mutex_lock(&adev->pm.mutex); 462 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 463 mutex_unlock(&adev->pm.mutex); 464 } 465 466 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 467 { 468 int ret = 0; 469 470 if (adev->family == AMDGPU_FAMILY_SI) { 471 mutex_lock(&adev->pm.mutex); 472 if (enable) { 473 adev->pm.dpm.uvd_active = true; 474 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 475 } else { 476 adev->pm.dpm.uvd_active = false; 477 } 478 mutex_unlock(&adev->pm.mutex); 479 480 amdgpu_dpm_compute_clocks(adev); 481 return; 482 } 483 484 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 485 if (ret) 486 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 487 enable ? "enable" : "disable", ret); 488 } 489 490 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 491 { 492 int ret = 0; 493 494 if (adev->family == AMDGPU_FAMILY_SI) { 495 mutex_lock(&adev->pm.mutex); 496 if (enable) { 497 adev->pm.dpm.vce_active = true; 498 /* XXX select vce level based on ring/task */ 499 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 500 } else { 501 adev->pm.dpm.vce_active = false; 502 } 503 mutex_unlock(&adev->pm.mutex); 504 505 amdgpu_dpm_compute_clocks(adev); 506 return; 507 } 508 509 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 510 if (ret) 511 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 512 enable ? "enable" : "disable", ret); 513 } 514 515 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 516 { 517 int ret = 0; 518 519 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 520 if (ret) 521 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 522 enable ? "enable" : "disable", ret); 523 } 524 525 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 526 { 527 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 528 int r = 0; 529 530 if (!pp_funcs || !pp_funcs->load_firmware) 531 return 0; 532 533 mutex_lock(&adev->pm.mutex); 534 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 535 if (r) { 536 pr_err("smu firmware loading failed\n"); 537 goto out; 538 } 539 540 if (smu_version) 541 *smu_version = adev->pm.fw_version; 542 543 out: 544 mutex_unlock(&adev->pm.mutex); 545 return r; 546 } 547 548 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 549 { 550 int ret = 0; 551 552 if (is_support_sw_smu(adev)) { 553 mutex_lock(&adev->pm.mutex); 554 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 555 enable); 556 mutex_unlock(&adev->pm.mutex); 557 } 558 559 return ret; 560 } 561 562 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 563 { 564 struct smu_context *smu = adev->powerplay.pp_handle; 565 int ret = 0; 566 567 if (!is_support_sw_smu(adev)) 568 return -EOPNOTSUPP; 569 570 mutex_lock(&adev->pm.mutex); 571 ret = smu_send_hbm_bad_pages_num(smu, size); 572 mutex_unlock(&adev->pm.mutex); 573 574 return ret; 575 } 576 577 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 578 { 579 struct smu_context *smu = adev->powerplay.pp_handle; 580 int ret = 0; 581 582 if (!is_support_sw_smu(adev)) 583 return -EOPNOTSUPP; 584 585 mutex_lock(&adev->pm.mutex); 586 ret = smu_send_hbm_bad_channel_flag(smu, size); 587 mutex_unlock(&adev->pm.mutex); 588 589 return ret; 590 } 591 592 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 593 enum pp_clock_type type, 594 uint32_t *min, 595 uint32_t *max) 596 { 597 int ret = 0; 598 599 if (type != PP_SCLK) 600 return -EINVAL; 601 602 if (!is_support_sw_smu(adev)) 603 return -EOPNOTSUPP; 604 605 mutex_lock(&adev->pm.mutex); 606 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 607 SMU_SCLK, 608 min, 609 max); 610 mutex_unlock(&adev->pm.mutex); 611 612 return ret; 613 } 614 615 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 616 enum pp_clock_type type, 617 uint32_t min, 618 uint32_t max) 619 { 620 struct smu_context *smu = adev->powerplay.pp_handle; 621 int ret = 0; 622 623 if (type != PP_SCLK) 624 return -EINVAL; 625 626 if (!is_support_sw_smu(adev)) 627 return -EOPNOTSUPP; 628 629 mutex_lock(&adev->pm.mutex); 630 ret = smu_set_soft_freq_range(smu, 631 SMU_SCLK, 632 min, 633 max); 634 mutex_unlock(&adev->pm.mutex); 635 636 return ret; 637 } 638 639 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 640 { 641 struct smu_context *smu = adev->powerplay.pp_handle; 642 int ret = 0; 643 644 if (!is_support_sw_smu(adev)) 645 return 0; 646 647 mutex_lock(&adev->pm.mutex); 648 ret = smu_write_watermarks_table(smu); 649 mutex_unlock(&adev->pm.mutex); 650 651 return ret; 652 } 653 654 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 655 enum smu_event_type event, 656 uint64_t event_arg) 657 { 658 struct smu_context *smu = adev->powerplay.pp_handle; 659 int ret = 0; 660 661 if (!is_support_sw_smu(adev)) 662 return -EOPNOTSUPP; 663 664 mutex_lock(&adev->pm.mutex); 665 ret = smu_wait_for_event(smu, event, event_arg); 666 mutex_unlock(&adev->pm.mutex); 667 668 return ret; 669 } 670 671 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value) 672 { 673 struct smu_context *smu = adev->powerplay.pp_handle; 674 int ret = 0; 675 676 if (!is_support_sw_smu(adev)) 677 return -EOPNOTSUPP; 678 679 mutex_lock(&adev->pm.mutex); 680 ret = smu_set_residency_gfxoff(smu, value); 681 mutex_unlock(&adev->pm.mutex); 682 683 return ret; 684 } 685 686 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value) 687 { 688 struct smu_context *smu = adev->powerplay.pp_handle; 689 int ret = 0; 690 691 if (!is_support_sw_smu(adev)) 692 return -EOPNOTSUPP; 693 694 mutex_lock(&adev->pm.mutex); 695 ret = smu_get_residency_gfxoff(smu, value); 696 mutex_unlock(&adev->pm.mutex); 697 698 return ret; 699 } 700 701 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value) 702 { 703 struct smu_context *smu = adev->powerplay.pp_handle; 704 int ret = 0; 705 706 if (!is_support_sw_smu(adev)) 707 return -EOPNOTSUPP; 708 709 mutex_lock(&adev->pm.mutex); 710 ret = smu_get_entrycount_gfxoff(smu, value); 711 mutex_unlock(&adev->pm.mutex); 712 713 return ret; 714 } 715 716 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 717 { 718 struct smu_context *smu = adev->powerplay.pp_handle; 719 int ret = 0; 720 721 if (!is_support_sw_smu(adev)) 722 return -EOPNOTSUPP; 723 724 mutex_lock(&adev->pm.mutex); 725 ret = smu_get_status_gfxoff(smu, value); 726 mutex_unlock(&adev->pm.mutex); 727 728 return ret; 729 } 730 731 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 732 { 733 struct smu_context *smu = adev->powerplay.pp_handle; 734 735 if (!is_support_sw_smu(adev)) 736 return 0; 737 738 return atomic64_read(&smu->throttle_int_counter); 739 } 740 741 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 742 * @adev: amdgpu_device pointer 743 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 744 * 745 */ 746 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 747 enum gfx_change_state state) 748 { 749 mutex_lock(&adev->pm.mutex); 750 if (adev->powerplay.pp_funcs && 751 adev->powerplay.pp_funcs->gfx_state_change_set) 752 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 753 (adev)->powerplay.pp_handle, state)); 754 mutex_unlock(&adev->pm.mutex); 755 } 756 757 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 758 void *umc_ecc) 759 { 760 struct smu_context *smu = adev->powerplay.pp_handle; 761 int ret = 0; 762 763 if (!is_support_sw_smu(adev)) 764 return -EOPNOTSUPP; 765 766 mutex_lock(&adev->pm.mutex); 767 ret = smu_get_ecc_info(smu, umc_ecc); 768 mutex_unlock(&adev->pm.mutex); 769 770 return ret; 771 } 772 773 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 774 uint32_t idx) 775 { 776 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 777 struct amd_vce_state *vstate = NULL; 778 779 if (!pp_funcs->get_vce_clock_state) 780 return NULL; 781 782 mutex_lock(&adev->pm.mutex); 783 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 784 idx); 785 mutex_unlock(&adev->pm.mutex); 786 787 return vstate; 788 } 789 790 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 791 enum amd_pm_state_type *state) 792 { 793 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 794 795 mutex_lock(&adev->pm.mutex); 796 797 if (!pp_funcs->get_current_power_state) { 798 *state = adev->pm.dpm.user_state; 799 goto out; 800 } 801 802 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 803 if (*state < POWER_STATE_TYPE_DEFAULT || 804 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 805 *state = adev->pm.dpm.user_state; 806 807 out: 808 mutex_unlock(&adev->pm.mutex); 809 } 810 811 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 812 enum amd_pm_state_type state) 813 { 814 mutex_lock(&adev->pm.mutex); 815 adev->pm.dpm.user_state = state; 816 mutex_unlock(&adev->pm.mutex); 817 818 if (is_support_sw_smu(adev)) 819 return; 820 821 if (amdgpu_dpm_dispatch_task(adev, 822 AMD_PP_TASK_ENABLE_USER_STATE, 823 &state) == -EOPNOTSUPP) 824 amdgpu_dpm_compute_clocks(adev); 825 } 826 827 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 828 { 829 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 830 enum amd_dpm_forced_level level; 831 832 if (!pp_funcs) 833 return AMD_DPM_FORCED_LEVEL_AUTO; 834 835 mutex_lock(&adev->pm.mutex); 836 if (pp_funcs->get_performance_level) 837 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 838 else 839 level = adev->pm.dpm.forced_level; 840 mutex_unlock(&adev->pm.mutex); 841 842 return level; 843 } 844 845 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 846 enum amd_dpm_forced_level level) 847 { 848 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 849 enum amd_dpm_forced_level current_level; 850 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 851 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 852 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 853 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 854 855 if (!pp_funcs || !pp_funcs->force_performance_level) 856 return 0; 857 858 if (adev->pm.dpm.thermal_active) 859 return -EINVAL; 860 861 current_level = amdgpu_dpm_get_performance_level(adev); 862 if (current_level == level) 863 return 0; 864 865 if (adev->asic_type == CHIP_RAVEN) { 866 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 867 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 868 level == AMD_DPM_FORCED_LEVEL_MANUAL) 869 amdgpu_gfx_off_ctrl(adev, false); 870 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 871 level != AMD_DPM_FORCED_LEVEL_MANUAL) 872 amdgpu_gfx_off_ctrl(adev, true); 873 } 874 } 875 876 if (!(current_level & profile_mode_mask) && 877 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 878 return -EINVAL; 879 880 if (!(current_level & profile_mode_mask) && 881 (level & profile_mode_mask)) { 882 /* enter UMD Pstate */ 883 amdgpu_device_ip_set_powergating_state(adev, 884 AMD_IP_BLOCK_TYPE_GFX, 885 AMD_PG_STATE_UNGATE); 886 amdgpu_device_ip_set_clockgating_state(adev, 887 AMD_IP_BLOCK_TYPE_GFX, 888 AMD_CG_STATE_UNGATE); 889 } else if ((current_level & profile_mode_mask) && 890 !(level & profile_mode_mask)) { 891 /* exit UMD Pstate */ 892 amdgpu_device_ip_set_clockgating_state(adev, 893 AMD_IP_BLOCK_TYPE_GFX, 894 AMD_CG_STATE_GATE); 895 amdgpu_device_ip_set_powergating_state(adev, 896 AMD_IP_BLOCK_TYPE_GFX, 897 AMD_PG_STATE_GATE); 898 } 899 900 mutex_lock(&adev->pm.mutex); 901 902 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 903 level)) { 904 mutex_unlock(&adev->pm.mutex); 905 return -EINVAL; 906 } 907 908 adev->pm.dpm.forced_level = level; 909 910 mutex_unlock(&adev->pm.mutex); 911 912 return 0; 913 } 914 915 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 916 struct pp_states_info *states) 917 { 918 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 919 int ret = 0; 920 921 if (!pp_funcs->get_pp_num_states) 922 return -EOPNOTSUPP; 923 924 mutex_lock(&adev->pm.mutex); 925 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 926 states); 927 mutex_unlock(&adev->pm.mutex); 928 929 return ret; 930 } 931 932 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 933 enum amd_pp_task task_id, 934 enum amd_pm_state_type *user_state) 935 { 936 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 937 int ret = 0; 938 939 if (!pp_funcs->dispatch_tasks) 940 return -EOPNOTSUPP; 941 942 mutex_lock(&adev->pm.mutex); 943 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 944 task_id, 945 user_state); 946 mutex_unlock(&adev->pm.mutex); 947 948 return ret; 949 } 950 951 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 952 { 953 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 954 int ret = 0; 955 956 if (!pp_funcs->get_pp_table) 957 return 0; 958 959 mutex_lock(&adev->pm.mutex); 960 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 961 table); 962 mutex_unlock(&adev->pm.mutex); 963 964 return ret; 965 } 966 967 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 968 uint32_t type, 969 long *input, 970 uint32_t size) 971 { 972 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 973 int ret = 0; 974 975 if (!pp_funcs->set_fine_grain_clk_vol) 976 return 0; 977 978 mutex_lock(&adev->pm.mutex); 979 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 980 type, 981 input, 982 size); 983 mutex_unlock(&adev->pm.mutex); 984 985 return ret; 986 } 987 988 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 989 uint32_t type, 990 long *input, 991 uint32_t size) 992 { 993 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 994 int ret = 0; 995 996 if (!pp_funcs->odn_edit_dpm_table) 997 return 0; 998 999 mutex_lock(&adev->pm.mutex); 1000 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 1001 type, 1002 input, 1003 size); 1004 mutex_unlock(&adev->pm.mutex); 1005 1006 return ret; 1007 } 1008 1009 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 1010 enum pp_clock_type type, 1011 char *buf) 1012 { 1013 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1014 int ret = 0; 1015 1016 if (!pp_funcs->print_clock_levels) 1017 return 0; 1018 1019 mutex_lock(&adev->pm.mutex); 1020 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 1021 type, 1022 buf); 1023 mutex_unlock(&adev->pm.mutex); 1024 1025 return ret; 1026 } 1027 1028 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 1029 enum pp_clock_type type, 1030 char *buf, 1031 int *offset) 1032 { 1033 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1034 int ret = 0; 1035 1036 if (!pp_funcs->emit_clock_levels) 1037 return -ENOENT; 1038 1039 mutex_lock(&adev->pm.mutex); 1040 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 1041 type, 1042 buf, 1043 offset); 1044 mutex_unlock(&adev->pm.mutex); 1045 1046 return ret; 1047 } 1048 1049 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 1050 uint64_t ppfeature_masks) 1051 { 1052 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1053 int ret = 0; 1054 1055 if (!pp_funcs->set_ppfeature_status) 1056 return 0; 1057 1058 mutex_lock(&adev->pm.mutex); 1059 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1060 ppfeature_masks); 1061 mutex_unlock(&adev->pm.mutex); 1062 1063 return ret; 1064 } 1065 1066 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1067 { 1068 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1069 int ret = 0; 1070 1071 if (!pp_funcs->get_ppfeature_status) 1072 return 0; 1073 1074 mutex_lock(&adev->pm.mutex); 1075 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1076 buf); 1077 mutex_unlock(&adev->pm.mutex); 1078 1079 return ret; 1080 } 1081 1082 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1083 enum pp_clock_type type, 1084 uint32_t mask) 1085 { 1086 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1087 int ret = 0; 1088 1089 if (!pp_funcs->force_clock_level) 1090 return 0; 1091 1092 mutex_lock(&adev->pm.mutex); 1093 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1094 type, 1095 mask); 1096 mutex_unlock(&adev->pm.mutex); 1097 1098 return ret; 1099 } 1100 1101 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1102 { 1103 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1104 int ret = 0; 1105 1106 if (!pp_funcs->get_sclk_od) 1107 return 0; 1108 1109 mutex_lock(&adev->pm.mutex); 1110 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1111 mutex_unlock(&adev->pm.mutex); 1112 1113 return ret; 1114 } 1115 1116 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1117 { 1118 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1119 1120 if (is_support_sw_smu(adev)) 1121 return 0; 1122 1123 mutex_lock(&adev->pm.mutex); 1124 if (pp_funcs->set_sclk_od) 1125 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1126 mutex_unlock(&adev->pm.mutex); 1127 1128 if (amdgpu_dpm_dispatch_task(adev, 1129 AMD_PP_TASK_READJUST_POWER_STATE, 1130 NULL) == -EOPNOTSUPP) { 1131 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1132 amdgpu_dpm_compute_clocks(adev); 1133 } 1134 1135 return 0; 1136 } 1137 1138 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1139 { 1140 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1141 int ret = 0; 1142 1143 if (!pp_funcs->get_mclk_od) 1144 return 0; 1145 1146 mutex_lock(&adev->pm.mutex); 1147 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1148 mutex_unlock(&adev->pm.mutex); 1149 1150 return ret; 1151 } 1152 1153 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1154 { 1155 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1156 1157 if (is_support_sw_smu(adev)) 1158 return 0; 1159 1160 mutex_lock(&adev->pm.mutex); 1161 if (pp_funcs->set_mclk_od) 1162 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1163 mutex_unlock(&adev->pm.mutex); 1164 1165 if (amdgpu_dpm_dispatch_task(adev, 1166 AMD_PP_TASK_READJUST_POWER_STATE, 1167 NULL) == -EOPNOTSUPP) { 1168 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1169 amdgpu_dpm_compute_clocks(adev); 1170 } 1171 1172 return 0; 1173 } 1174 1175 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1176 char *buf) 1177 { 1178 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1179 int ret = 0; 1180 1181 if (!pp_funcs->get_power_profile_mode) 1182 return -EOPNOTSUPP; 1183 1184 mutex_lock(&adev->pm.mutex); 1185 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1186 buf); 1187 mutex_unlock(&adev->pm.mutex); 1188 1189 return ret; 1190 } 1191 1192 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1193 long *input, uint32_t size) 1194 { 1195 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1196 int ret = 0; 1197 1198 if (!pp_funcs->set_power_profile_mode) 1199 return 0; 1200 1201 mutex_lock(&adev->pm.mutex); 1202 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1203 input, 1204 size); 1205 mutex_unlock(&adev->pm.mutex); 1206 1207 return ret; 1208 } 1209 1210 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1211 { 1212 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1213 int ret = 0; 1214 1215 if (!pp_funcs->get_gpu_metrics) 1216 return 0; 1217 1218 mutex_lock(&adev->pm.mutex); 1219 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1220 table); 1221 mutex_unlock(&adev->pm.mutex); 1222 1223 return ret; 1224 } 1225 1226 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1227 uint32_t *fan_mode) 1228 { 1229 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1230 int ret = 0; 1231 1232 if (!pp_funcs->get_fan_control_mode) 1233 return -EOPNOTSUPP; 1234 1235 mutex_lock(&adev->pm.mutex); 1236 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1237 fan_mode); 1238 mutex_unlock(&adev->pm.mutex); 1239 1240 return ret; 1241 } 1242 1243 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1244 uint32_t speed) 1245 { 1246 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1247 int ret = 0; 1248 1249 if (!pp_funcs->set_fan_speed_pwm) 1250 return -EOPNOTSUPP; 1251 1252 mutex_lock(&adev->pm.mutex); 1253 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1254 speed); 1255 mutex_unlock(&adev->pm.mutex); 1256 1257 return ret; 1258 } 1259 1260 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1261 uint32_t *speed) 1262 { 1263 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1264 int ret = 0; 1265 1266 if (!pp_funcs->get_fan_speed_pwm) 1267 return -EOPNOTSUPP; 1268 1269 mutex_lock(&adev->pm.mutex); 1270 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1271 speed); 1272 mutex_unlock(&adev->pm.mutex); 1273 1274 return ret; 1275 } 1276 1277 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1278 uint32_t *speed) 1279 { 1280 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1281 int ret = 0; 1282 1283 if (!pp_funcs->get_fan_speed_rpm) 1284 return -EOPNOTSUPP; 1285 1286 mutex_lock(&adev->pm.mutex); 1287 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1288 speed); 1289 mutex_unlock(&adev->pm.mutex); 1290 1291 return ret; 1292 } 1293 1294 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1295 uint32_t speed) 1296 { 1297 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1298 int ret = 0; 1299 1300 if (!pp_funcs->set_fan_speed_rpm) 1301 return -EOPNOTSUPP; 1302 1303 mutex_lock(&adev->pm.mutex); 1304 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1305 speed); 1306 mutex_unlock(&adev->pm.mutex); 1307 1308 return ret; 1309 } 1310 1311 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1312 uint32_t mode) 1313 { 1314 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1315 int ret = 0; 1316 1317 if (!pp_funcs->set_fan_control_mode) 1318 return -EOPNOTSUPP; 1319 1320 mutex_lock(&adev->pm.mutex); 1321 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1322 mode); 1323 mutex_unlock(&adev->pm.mutex); 1324 1325 return ret; 1326 } 1327 1328 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1329 uint32_t *limit, 1330 enum pp_power_limit_level pp_limit_level, 1331 enum pp_power_type power_type) 1332 { 1333 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1334 int ret = 0; 1335 1336 if (!pp_funcs->get_power_limit) 1337 return -ENODATA; 1338 1339 mutex_lock(&adev->pm.mutex); 1340 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1341 limit, 1342 pp_limit_level, 1343 power_type); 1344 mutex_unlock(&adev->pm.mutex); 1345 1346 return ret; 1347 } 1348 1349 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1350 uint32_t limit) 1351 { 1352 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1353 int ret = 0; 1354 1355 if (!pp_funcs->set_power_limit) 1356 return -EINVAL; 1357 1358 mutex_lock(&adev->pm.mutex); 1359 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1360 limit); 1361 mutex_unlock(&adev->pm.mutex); 1362 1363 return ret; 1364 } 1365 1366 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1367 { 1368 bool cclk_dpm_supported = false; 1369 1370 if (!is_support_sw_smu(adev)) 1371 return false; 1372 1373 mutex_lock(&adev->pm.mutex); 1374 cclk_dpm_supported = is_support_cclk_dpm(adev); 1375 mutex_unlock(&adev->pm.mutex); 1376 1377 return (int)cclk_dpm_supported; 1378 } 1379 1380 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1381 struct seq_file *m) 1382 { 1383 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1384 1385 if (!pp_funcs->debugfs_print_current_performance_level) 1386 return -EOPNOTSUPP; 1387 1388 mutex_lock(&adev->pm.mutex); 1389 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1390 m); 1391 mutex_unlock(&adev->pm.mutex); 1392 1393 return 0; 1394 } 1395 1396 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1397 void **addr, 1398 size_t *size) 1399 { 1400 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1401 int ret = 0; 1402 1403 if (!pp_funcs->get_smu_prv_buf_details) 1404 return -ENOSYS; 1405 1406 mutex_lock(&adev->pm.mutex); 1407 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1408 addr, 1409 size); 1410 mutex_unlock(&adev->pm.mutex); 1411 1412 return ret; 1413 } 1414 1415 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1416 { 1417 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1418 struct smu_context *smu = adev->powerplay.pp_handle; 1419 1420 if ((is_support_sw_smu(adev) && smu->od_enabled) || 1421 (is_support_sw_smu(adev) && smu->is_apu) || 1422 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 1423 return true; 1424 1425 return false; 1426 } 1427 1428 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1429 const char *buf, 1430 size_t size) 1431 { 1432 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1433 int ret = 0; 1434 1435 if (!pp_funcs->set_pp_table) 1436 return -EOPNOTSUPP; 1437 1438 mutex_lock(&adev->pm.mutex); 1439 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1440 buf, 1441 size); 1442 mutex_unlock(&adev->pm.mutex); 1443 1444 return ret; 1445 } 1446 1447 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1448 { 1449 struct smu_context *smu = adev->powerplay.pp_handle; 1450 1451 if (!is_support_sw_smu(adev)) 1452 return INT_MAX; 1453 1454 return smu->cpu_core_num; 1455 } 1456 1457 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1458 { 1459 if (!is_support_sw_smu(adev)) 1460 return; 1461 1462 amdgpu_smu_stb_debug_fs_init(adev); 1463 } 1464 1465 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1466 const struct amd_pp_display_configuration *input) 1467 { 1468 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1469 int ret = 0; 1470 1471 if (!pp_funcs->display_configuration_change) 1472 return 0; 1473 1474 mutex_lock(&adev->pm.mutex); 1475 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1476 input); 1477 mutex_unlock(&adev->pm.mutex); 1478 1479 return ret; 1480 } 1481 1482 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1483 enum amd_pp_clock_type type, 1484 struct amd_pp_clocks *clocks) 1485 { 1486 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1487 int ret = 0; 1488 1489 if (!pp_funcs->get_clock_by_type) 1490 return 0; 1491 1492 mutex_lock(&adev->pm.mutex); 1493 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1494 type, 1495 clocks); 1496 mutex_unlock(&adev->pm.mutex); 1497 1498 return ret; 1499 } 1500 1501 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1502 struct amd_pp_simple_clock_info *clocks) 1503 { 1504 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1505 int ret = 0; 1506 1507 if (!pp_funcs->get_display_mode_validation_clocks) 1508 return 0; 1509 1510 mutex_lock(&adev->pm.mutex); 1511 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1512 clocks); 1513 mutex_unlock(&adev->pm.mutex); 1514 1515 return ret; 1516 } 1517 1518 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1519 enum amd_pp_clock_type type, 1520 struct pp_clock_levels_with_latency *clocks) 1521 { 1522 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1523 int ret = 0; 1524 1525 if (!pp_funcs->get_clock_by_type_with_latency) 1526 return 0; 1527 1528 mutex_lock(&adev->pm.mutex); 1529 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1530 type, 1531 clocks); 1532 mutex_unlock(&adev->pm.mutex); 1533 1534 return ret; 1535 } 1536 1537 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1538 enum amd_pp_clock_type type, 1539 struct pp_clock_levels_with_voltage *clocks) 1540 { 1541 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1542 int ret = 0; 1543 1544 if (!pp_funcs->get_clock_by_type_with_voltage) 1545 return 0; 1546 1547 mutex_lock(&adev->pm.mutex); 1548 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1549 type, 1550 clocks); 1551 mutex_unlock(&adev->pm.mutex); 1552 1553 return ret; 1554 } 1555 1556 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1557 void *clock_ranges) 1558 { 1559 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1560 int ret = 0; 1561 1562 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1563 return -EOPNOTSUPP; 1564 1565 mutex_lock(&adev->pm.mutex); 1566 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1567 clock_ranges); 1568 mutex_unlock(&adev->pm.mutex); 1569 1570 return ret; 1571 } 1572 1573 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1574 struct pp_display_clock_request *clock) 1575 { 1576 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1577 int ret = 0; 1578 1579 if (!pp_funcs->display_clock_voltage_request) 1580 return -EOPNOTSUPP; 1581 1582 mutex_lock(&adev->pm.mutex); 1583 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1584 clock); 1585 mutex_unlock(&adev->pm.mutex); 1586 1587 return ret; 1588 } 1589 1590 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1591 struct amd_pp_clock_info *clocks) 1592 { 1593 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1594 int ret = 0; 1595 1596 if (!pp_funcs->get_current_clocks) 1597 return -EOPNOTSUPP; 1598 1599 mutex_lock(&adev->pm.mutex); 1600 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1601 clocks); 1602 mutex_unlock(&adev->pm.mutex); 1603 1604 return ret; 1605 } 1606 1607 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1608 { 1609 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1610 1611 if (!pp_funcs->notify_smu_enable_pwe) 1612 return; 1613 1614 mutex_lock(&adev->pm.mutex); 1615 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1616 mutex_unlock(&adev->pm.mutex); 1617 } 1618 1619 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1620 uint32_t count) 1621 { 1622 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1623 int ret = 0; 1624 1625 if (!pp_funcs->set_active_display_count) 1626 return -EOPNOTSUPP; 1627 1628 mutex_lock(&adev->pm.mutex); 1629 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1630 count); 1631 mutex_unlock(&adev->pm.mutex); 1632 1633 return ret; 1634 } 1635 1636 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1637 uint32_t clock) 1638 { 1639 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1640 int ret = 0; 1641 1642 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1643 return -EOPNOTSUPP; 1644 1645 mutex_lock(&adev->pm.mutex); 1646 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1647 clock); 1648 mutex_unlock(&adev->pm.mutex); 1649 1650 return ret; 1651 } 1652 1653 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1654 uint32_t clock) 1655 { 1656 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1657 1658 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1659 return; 1660 1661 mutex_lock(&adev->pm.mutex); 1662 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1663 clock); 1664 mutex_unlock(&adev->pm.mutex); 1665 } 1666 1667 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1668 uint32_t clock) 1669 { 1670 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1671 1672 if (!pp_funcs->set_hard_min_fclk_by_freq) 1673 return; 1674 1675 mutex_lock(&adev->pm.mutex); 1676 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1677 clock); 1678 mutex_unlock(&adev->pm.mutex); 1679 } 1680 1681 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1682 bool disable_memory_clock_switch) 1683 { 1684 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1685 int ret = 0; 1686 1687 if (!pp_funcs->display_disable_memory_clock_switch) 1688 return 0; 1689 1690 mutex_lock(&adev->pm.mutex); 1691 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1692 disable_memory_clock_switch); 1693 mutex_unlock(&adev->pm.mutex); 1694 1695 return ret; 1696 } 1697 1698 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1699 struct pp_smu_nv_clock_table *max_clocks) 1700 { 1701 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1702 int ret = 0; 1703 1704 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1705 return -EOPNOTSUPP; 1706 1707 mutex_lock(&adev->pm.mutex); 1708 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1709 max_clocks); 1710 mutex_unlock(&adev->pm.mutex); 1711 1712 return ret; 1713 } 1714 1715 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1716 unsigned int *clock_values_in_khz, 1717 unsigned int *num_states) 1718 { 1719 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1720 int ret = 0; 1721 1722 if (!pp_funcs->get_uclk_dpm_states) 1723 return -EOPNOTSUPP; 1724 1725 mutex_lock(&adev->pm.mutex); 1726 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1727 clock_values_in_khz, 1728 num_states); 1729 mutex_unlock(&adev->pm.mutex); 1730 1731 return ret; 1732 } 1733 1734 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1735 struct dpm_clocks *clock_table) 1736 { 1737 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1738 int ret = 0; 1739 1740 if (!pp_funcs->get_dpm_clock_table) 1741 return -EOPNOTSUPP; 1742 1743 mutex_lock(&adev->pm.mutex); 1744 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1745 clock_table); 1746 mutex_unlock(&adev->pm.mutex); 1747 1748 return ret; 1749 } 1750