1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 40 { 41 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 42 int ret = 0; 43 44 if (!pp_funcs->get_sclk) 45 return 0; 46 47 mutex_lock(&adev->pm.mutex); 48 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 49 low); 50 mutex_unlock(&adev->pm.mutex); 51 52 return ret; 53 } 54 55 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 56 { 57 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 58 int ret = 0; 59 60 if (!pp_funcs->get_mclk) 61 return 0; 62 63 mutex_lock(&adev->pm.mutex); 64 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 65 low); 66 mutex_unlock(&adev->pm.mutex); 67 68 return ret; 69 } 70 71 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 72 { 73 int ret = 0; 74 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 75 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 76 77 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 78 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 79 block_type, gate ? "gate" : "ungate"); 80 return 0; 81 } 82 83 mutex_lock(&adev->pm.mutex); 84 85 switch (block_type) { 86 case AMD_IP_BLOCK_TYPE_UVD: 87 case AMD_IP_BLOCK_TYPE_VCE: 88 case AMD_IP_BLOCK_TYPE_GFX: 89 case AMD_IP_BLOCK_TYPE_VCN: 90 case AMD_IP_BLOCK_TYPE_SDMA: 91 case AMD_IP_BLOCK_TYPE_JPEG: 92 case AMD_IP_BLOCK_TYPE_GMC: 93 case AMD_IP_BLOCK_TYPE_ACP: 94 if (pp_funcs && pp_funcs->set_powergating_by_smu) 95 ret = (pp_funcs->set_powergating_by_smu( 96 (adev)->powerplay.pp_handle, block_type, gate)); 97 break; 98 default: 99 break; 100 } 101 102 if (!ret) 103 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 104 105 mutex_unlock(&adev->pm.mutex); 106 107 return ret; 108 } 109 110 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev) 111 { 112 struct smu_context *smu = adev->powerplay.pp_handle; 113 int ret = -EOPNOTSUPP; 114 115 mutex_lock(&adev->pm.mutex); 116 ret = smu_set_gfx_power_up_by_imu(smu); 117 mutex_unlock(&adev->pm.mutex); 118 119 msleep(10); 120 121 return ret; 122 } 123 124 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 125 { 126 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 127 void *pp_handle = adev->powerplay.pp_handle; 128 int ret = 0; 129 130 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 131 return -ENOENT; 132 133 mutex_lock(&adev->pm.mutex); 134 135 /* enter BACO state */ 136 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 137 138 mutex_unlock(&adev->pm.mutex); 139 140 return ret; 141 } 142 143 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 144 { 145 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 146 void *pp_handle = adev->powerplay.pp_handle; 147 int ret = 0; 148 149 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 150 return -ENOENT; 151 152 mutex_lock(&adev->pm.mutex); 153 154 /* exit BACO state */ 155 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 156 157 mutex_unlock(&adev->pm.mutex); 158 159 return ret; 160 } 161 162 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 163 enum pp_mp1_state mp1_state) 164 { 165 int ret = 0; 166 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 167 168 if (pp_funcs && pp_funcs->set_mp1_state) { 169 mutex_lock(&adev->pm.mutex); 170 171 ret = pp_funcs->set_mp1_state( 172 adev->powerplay.pp_handle, 173 mp1_state); 174 175 mutex_unlock(&adev->pm.mutex); 176 } 177 178 return ret; 179 } 180 181 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 182 { 183 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 184 void *pp_handle = adev->powerplay.pp_handle; 185 bool baco_cap; 186 int ret = 0; 187 188 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 189 return false; 190 /* Don't use baco for reset in S3. 191 * This is a workaround for some platforms 192 * where entering BACO during suspend 193 * seems to cause reboots or hangs. 194 * This might be related to the fact that BACO controls 195 * power to the whole GPU including devices like audio and USB. 196 * Powering down/up everything may adversely affect these other 197 * devices. Needs more investigation. 198 */ 199 if (adev->in_s3) 200 return false; 201 202 mutex_lock(&adev->pm.mutex); 203 204 ret = pp_funcs->get_asic_baco_capability(pp_handle, 205 &baco_cap); 206 207 mutex_unlock(&adev->pm.mutex); 208 209 return ret ? false : baco_cap; 210 } 211 212 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 213 { 214 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 215 void *pp_handle = adev->powerplay.pp_handle; 216 int ret = 0; 217 218 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 219 return -ENOENT; 220 221 mutex_lock(&adev->pm.mutex); 222 223 ret = pp_funcs->asic_reset_mode_2(pp_handle); 224 225 mutex_unlock(&adev->pm.mutex); 226 227 return ret; 228 } 229 230 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev) 231 { 232 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 233 void *pp_handle = adev->powerplay.pp_handle; 234 int ret = 0; 235 236 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) 237 return -ENOENT; 238 239 mutex_lock(&adev->pm.mutex); 240 241 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle); 242 243 mutex_unlock(&adev->pm.mutex); 244 245 return ret; 246 } 247 248 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 249 { 250 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 251 void *pp_handle = adev->powerplay.pp_handle; 252 int ret = 0; 253 254 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 255 return -ENOENT; 256 257 mutex_lock(&adev->pm.mutex); 258 259 /* enter BACO state */ 260 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 261 if (ret) 262 goto out; 263 264 /* exit BACO state */ 265 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 266 267 out: 268 mutex_unlock(&adev->pm.mutex); 269 return ret; 270 } 271 272 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 273 { 274 struct smu_context *smu = adev->powerplay.pp_handle; 275 bool support_mode1_reset = false; 276 277 if (is_support_sw_smu(adev)) { 278 mutex_lock(&adev->pm.mutex); 279 support_mode1_reset = smu_mode1_reset_is_support(smu); 280 mutex_unlock(&adev->pm.mutex); 281 } 282 283 return support_mode1_reset; 284 } 285 286 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 287 { 288 struct smu_context *smu = adev->powerplay.pp_handle; 289 int ret = -EOPNOTSUPP; 290 291 if (is_support_sw_smu(adev)) { 292 mutex_lock(&adev->pm.mutex); 293 ret = smu_mode1_reset(smu); 294 mutex_unlock(&adev->pm.mutex); 295 } 296 297 return ret; 298 } 299 300 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 301 enum PP_SMC_POWER_PROFILE type, 302 bool en) 303 { 304 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 305 int ret = 0; 306 307 if (amdgpu_sriov_vf(adev)) 308 return 0; 309 310 if (pp_funcs && pp_funcs->switch_power_profile) { 311 mutex_lock(&adev->pm.mutex); 312 ret = pp_funcs->switch_power_profile( 313 adev->powerplay.pp_handle, type, en); 314 mutex_unlock(&adev->pm.mutex); 315 } 316 317 return ret; 318 } 319 320 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 321 uint32_t pstate) 322 { 323 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 324 int ret = 0; 325 326 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 327 mutex_lock(&adev->pm.mutex); 328 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 329 pstate); 330 mutex_unlock(&adev->pm.mutex); 331 } 332 333 return ret; 334 } 335 336 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 337 uint32_t cstate) 338 { 339 int ret = 0; 340 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 341 void *pp_handle = adev->powerplay.pp_handle; 342 343 if (pp_funcs && pp_funcs->set_df_cstate) { 344 mutex_lock(&adev->pm.mutex); 345 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 346 mutex_unlock(&adev->pm.mutex); 347 } 348 349 return ret; 350 } 351 352 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) 353 { 354 struct smu_context *smu = adev->powerplay.pp_handle; 355 int ret = 0; 356 357 if (is_support_sw_smu(adev)) { 358 mutex_lock(&adev->pm.mutex); 359 ret = smu_allow_xgmi_power_down(smu, en); 360 mutex_unlock(&adev->pm.mutex); 361 } 362 363 return ret; 364 } 365 366 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 367 { 368 void *pp_handle = adev->powerplay.pp_handle; 369 const struct amd_pm_funcs *pp_funcs = 370 adev->powerplay.pp_funcs; 371 int ret = 0; 372 373 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 374 mutex_lock(&adev->pm.mutex); 375 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 376 mutex_unlock(&adev->pm.mutex); 377 } 378 379 return ret; 380 } 381 382 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 383 uint32_t msg_id) 384 { 385 void *pp_handle = adev->powerplay.pp_handle; 386 const struct amd_pm_funcs *pp_funcs = 387 adev->powerplay.pp_funcs; 388 int ret = 0; 389 390 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 391 mutex_lock(&adev->pm.mutex); 392 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 393 msg_id); 394 mutex_unlock(&adev->pm.mutex); 395 } 396 397 return ret; 398 } 399 400 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 401 bool acquire) 402 { 403 void *pp_handle = adev->powerplay.pp_handle; 404 const struct amd_pm_funcs *pp_funcs = 405 adev->powerplay.pp_funcs; 406 int ret = -EOPNOTSUPP; 407 408 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 409 mutex_lock(&adev->pm.mutex); 410 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 411 acquire); 412 mutex_unlock(&adev->pm.mutex); 413 } 414 415 return ret; 416 } 417 418 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 419 { 420 if (adev->pm.dpm_enabled) { 421 mutex_lock(&adev->pm.mutex); 422 if (power_supply_is_system_supplied() > 0) 423 adev->pm.ac_power = true; 424 else 425 adev->pm.ac_power = false; 426 427 if (adev->powerplay.pp_funcs && 428 adev->powerplay.pp_funcs->enable_bapm) 429 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 430 431 if (is_support_sw_smu(adev)) 432 smu_set_ac_dc(adev->powerplay.pp_handle); 433 434 mutex_unlock(&adev->pm.mutex); 435 } 436 } 437 438 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 439 void *data, uint32_t *size) 440 { 441 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 442 int ret = -EINVAL; 443 444 if (!data || !size) 445 return -EINVAL; 446 447 if (pp_funcs && pp_funcs->read_sensor) { 448 mutex_lock(&adev->pm.mutex); 449 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 450 sensor, 451 data, 452 size); 453 mutex_unlock(&adev->pm.mutex); 454 } 455 456 return ret; 457 } 458 459 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 460 { 461 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 462 int i; 463 464 if (!adev->pm.dpm_enabled) 465 return; 466 467 if (!pp_funcs->pm_compute_clocks) 468 return; 469 470 if (adev->mode_info.num_crtc) 471 amdgpu_display_bandwidth_update(adev); 472 473 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 474 struct amdgpu_ring *ring = adev->rings[i]; 475 if (ring && ring->sched.ready) 476 amdgpu_fence_wait_empty(ring); 477 } 478 479 mutex_lock(&adev->pm.mutex); 480 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 481 mutex_unlock(&adev->pm.mutex); 482 } 483 484 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 485 { 486 int ret = 0; 487 488 if (adev->family == AMDGPU_FAMILY_SI) { 489 mutex_lock(&adev->pm.mutex); 490 if (enable) { 491 adev->pm.dpm.uvd_active = true; 492 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 493 } else { 494 adev->pm.dpm.uvd_active = false; 495 } 496 mutex_unlock(&adev->pm.mutex); 497 498 amdgpu_dpm_compute_clocks(adev); 499 return; 500 } 501 502 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 503 if (ret) 504 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 505 enable ? "enable" : "disable", ret); 506 } 507 508 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 509 { 510 int ret = 0; 511 512 if (adev->family == AMDGPU_FAMILY_SI) { 513 mutex_lock(&adev->pm.mutex); 514 if (enable) { 515 adev->pm.dpm.vce_active = true; 516 /* XXX select vce level based on ring/task */ 517 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 518 } else { 519 adev->pm.dpm.vce_active = false; 520 } 521 mutex_unlock(&adev->pm.mutex); 522 523 amdgpu_dpm_compute_clocks(adev); 524 return; 525 } 526 527 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 528 if (ret) 529 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 530 enable ? "enable" : "disable", ret); 531 } 532 533 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 534 { 535 int ret = 0; 536 537 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 538 if (ret) 539 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 540 enable ? "enable" : "disable", ret); 541 } 542 543 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 544 { 545 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 546 int r = 0; 547 548 if (!pp_funcs || !pp_funcs->load_firmware) 549 return 0; 550 551 mutex_lock(&adev->pm.mutex); 552 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 553 if (r) { 554 pr_err("smu firmware loading failed\n"); 555 goto out; 556 } 557 558 if (smu_version) 559 *smu_version = adev->pm.fw_version; 560 561 out: 562 mutex_unlock(&adev->pm.mutex); 563 return r; 564 } 565 566 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 567 { 568 int ret = 0; 569 570 if (is_support_sw_smu(adev)) { 571 mutex_lock(&adev->pm.mutex); 572 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 573 enable); 574 mutex_unlock(&adev->pm.mutex); 575 } 576 577 return ret; 578 } 579 580 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 581 { 582 struct smu_context *smu = adev->powerplay.pp_handle; 583 int ret = 0; 584 585 if (!is_support_sw_smu(adev)) 586 return -EOPNOTSUPP; 587 588 mutex_lock(&adev->pm.mutex); 589 ret = smu_send_hbm_bad_pages_num(smu, size); 590 mutex_unlock(&adev->pm.mutex); 591 592 return ret; 593 } 594 595 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 596 { 597 struct smu_context *smu = adev->powerplay.pp_handle; 598 int ret = 0; 599 600 if (!is_support_sw_smu(adev)) 601 return -EOPNOTSUPP; 602 603 mutex_lock(&adev->pm.mutex); 604 ret = smu_send_hbm_bad_channel_flag(smu, size); 605 mutex_unlock(&adev->pm.mutex); 606 607 return ret; 608 } 609 610 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 611 enum pp_clock_type type, 612 uint32_t *min, 613 uint32_t *max) 614 { 615 int ret = 0; 616 617 if (type != PP_SCLK) 618 return -EINVAL; 619 620 if (!is_support_sw_smu(adev)) 621 return -EOPNOTSUPP; 622 623 mutex_lock(&adev->pm.mutex); 624 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 625 SMU_SCLK, 626 min, 627 max); 628 mutex_unlock(&adev->pm.mutex); 629 630 return ret; 631 } 632 633 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 634 enum pp_clock_type type, 635 uint32_t min, 636 uint32_t max) 637 { 638 struct smu_context *smu = adev->powerplay.pp_handle; 639 int ret = 0; 640 641 if (type != PP_SCLK) 642 return -EINVAL; 643 644 if (!is_support_sw_smu(adev)) 645 return -EOPNOTSUPP; 646 647 mutex_lock(&adev->pm.mutex); 648 ret = smu_set_soft_freq_range(smu, 649 SMU_SCLK, 650 min, 651 max); 652 mutex_unlock(&adev->pm.mutex); 653 654 return ret; 655 } 656 657 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 658 { 659 struct smu_context *smu = adev->powerplay.pp_handle; 660 int ret = 0; 661 662 if (!is_support_sw_smu(adev)) 663 return 0; 664 665 mutex_lock(&adev->pm.mutex); 666 ret = smu_write_watermarks_table(smu); 667 mutex_unlock(&adev->pm.mutex); 668 669 return ret; 670 } 671 672 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 673 enum smu_event_type event, 674 uint64_t event_arg) 675 { 676 struct smu_context *smu = adev->powerplay.pp_handle; 677 int ret = 0; 678 679 if (!is_support_sw_smu(adev)) 680 return -EOPNOTSUPP; 681 682 mutex_lock(&adev->pm.mutex); 683 ret = smu_wait_for_event(smu, event, event_arg); 684 mutex_unlock(&adev->pm.mutex); 685 686 return ret; 687 } 688 689 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value) 690 { 691 struct smu_context *smu = adev->powerplay.pp_handle; 692 int ret = 0; 693 694 if (!is_support_sw_smu(adev)) 695 return -EOPNOTSUPP; 696 697 mutex_lock(&adev->pm.mutex); 698 ret = smu_set_residency_gfxoff(smu, value); 699 mutex_unlock(&adev->pm.mutex); 700 701 return ret; 702 } 703 704 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value) 705 { 706 struct smu_context *smu = adev->powerplay.pp_handle; 707 int ret = 0; 708 709 if (!is_support_sw_smu(adev)) 710 return -EOPNOTSUPP; 711 712 mutex_lock(&adev->pm.mutex); 713 ret = smu_get_residency_gfxoff(smu, value); 714 mutex_unlock(&adev->pm.mutex); 715 716 return ret; 717 } 718 719 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value) 720 { 721 struct smu_context *smu = adev->powerplay.pp_handle; 722 int ret = 0; 723 724 if (!is_support_sw_smu(adev)) 725 return -EOPNOTSUPP; 726 727 mutex_lock(&adev->pm.mutex); 728 ret = smu_get_entrycount_gfxoff(smu, value); 729 mutex_unlock(&adev->pm.mutex); 730 731 return ret; 732 } 733 734 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 735 { 736 struct smu_context *smu = adev->powerplay.pp_handle; 737 int ret = 0; 738 739 if (!is_support_sw_smu(adev)) 740 return -EOPNOTSUPP; 741 742 mutex_lock(&adev->pm.mutex); 743 ret = smu_get_status_gfxoff(smu, value); 744 mutex_unlock(&adev->pm.mutex); 745 746 return ret; 747 } 748 749 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 750 { 751 struct smu_context *smu = adev->powerplay.pp_handle; 752 753 if (!is_support_sw_smu(adev)) 754 return 0; 755 756 return atomic64_read(&smu->throttle_int_counter); 757 } 758 759 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 760 * @adev: amdgpu_device pointer 761 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 762 * 763 */ 764 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 765 enum gfx_change_state state) 766 { 767 mutex_lock(&adev->pm.mutex); 768 if (adev->powerplay.pp_funcs && 769 adev->powerplay.pp_funcs->gfx_state_change_set) 770 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 771 (adev)->powerplay.pp_handle, state)); 772 mutex_unlock(&adev->pm.mutex); 773 } 774 775 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 776 void *umc_ecc) 777 { 778 struct smu_context *smu = adev->powerplay.pp_handle; 779 int ret = 0; 780 781 if (!is_support_sw_smu(adev)) 782 return -EOPNOTSUPP; 783 784 mutex_lock(&adev->pm.mutex); 785 ret = smu_get_ecc_info(smu, umc_ecc); 786 mutex_unlock(&adev->pm.mutex); 787 788 return ret; 789 } 790 791 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 792 uint32_t idx) 793 { 794 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 795 struct amd_vce_state *vstate = NULL; 796 797 if (!pp_funcs->get_vce_clock_state) 798 return NULL; 799 800 mutex_lock(&adev->pm.mutex); 801 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 802 idx); 803 mutex_unlock(&adev->pm.mutex); 804 805 return vstate; 806 } 807 808 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 809 enum amd_pm_state_type *state) 810 { 811 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 812 813 mutex_lock(&adev->pm.mutex); 814 815 if (!pp_funcs->get_current_power_state) { 816 *state = adev->pm.dpm.user_state; 817 goto out; 818 } 819 820 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 821 if (*state < POWER_STATE_TYPE_DEFAULT || 822 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 823 *state = adev->pm.dpm.user_state; 824 825 out: 826 mutex_unlock(&adev->pm.mutex); 827 } 828 829 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 830 enum amd_pm_state_type state) 831 { 832 mutex_lock(&adev->pm.mutex); 833 adev->pm.dpm.user_state = state; 834 mutex_unlock(&adev->pm.mutex); 835 836 if (is_support_sw_smu(adev)) 837 return; 838 839 if (amdgpu_dpm_dispatch_task(adev, 840 AMD_PP_TASK_ENABLE_USER_STATE, 841 &state) == -EOPNOTSUPP) 842 amdgpu_dpm_compute_clocks(adev); 843 } 844 845 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 846 { 847 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 848 enum amd_dpm_forced_level level; 849 850 if (!pp_funcs) 851 return AMD_DPM_FORCED_LEVEL_AUTO; 852 853 mutex_lock(&adev->pm.mutex); 854 if (pp_funcs->get_performance_level) 855 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 856 else 857 level = adev->pm.dpm.forced_level; 858 mutex_unlock(&adev->pm.mutex); 859 860 return level; 861 } 862 863 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 864 enum amd_dpm_forced_level level) 865 { 866 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 867 enum amd_dpm_forced_level current_level; 868 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 869 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 870 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 871 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 872 873 if (!pp_funcs || !pp_funcs->force_performance_level) 874 return 0; 875 876 if (adev->pm.dpm.thermal_active) 877 return -EINVAL; 878 879 current_level = amdgpu_dpm_get_performance_level(adev); 880 if (current_level == level) 881 return 0; 882 883 if (adev->asic_type == CHIP_RAVEN) { 884 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 885 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 886 level == AMD_DPM_FORCED_LEVEL_MANUAL) 887 amdgpu_gfx_off_ctrl(adev, false); 888 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 889 level != AMD_DPM_FORCED_LEVEL_MANUAL) 890 amdgpu_gfx_off_ctrl(adev, true); 891 } 892 } 893 894 if (!(current_level & profile_mode_mask) && 895 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 896 return -EINVAL; 897 898 if (!(current_level & profile_mode_mask) && 899 (level & profile_mode_mask)) { 900 /* enter UMD Pstate */ 901 amdgpu_device_ip_set_powergating_state(adev, 902 AMD_IP_BLOCK_TYPE_GFX, 903 AMD_PG_STATE_UNGATE); 904 amdgpu_device_ip_set_clockgating_state(adev, 905 AMD_IP_BLOCK_TYPE_GFX, 906 AMD_CG_STATE_UNGATE); 907 } else if ((current_level & profile_mode_mask) && 908 !(level & profile_mode_mask)) { 909 /* exit UMD Pstate */ 910 amdgpu_device_ip_set_clockgating_state(adev, 911 AMD_IP_BLOCK_TYPE_GFX, 912 AMD_CG_STATE_GATE); 913 amdgpu_device_ip_set_powergating_state(adev, 914 AMD_IP_BLOCK_TYPE_GFX, 915 AMD_PG_STATE_GATE); 916 } 917 918 mutex_lock(&adev->pm.mutex); 919 920 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 921 level)) { 922 mutex_unlock(&adev->pm.mutex); 923 return -EINVAL; 924 } 925 926 adev->pm.dpm.forced_level = level; 927 928 mutex_unlock(&adev->pm.mutex); 929 930 return 0; 931 } 932 933 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 934 struct pp_states_info *states) 935 { 936 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 937 int ret = 0; 938 939 if (!pp_funcs->get_pp_num_states) 940 return -EOPNOTSUPP; 941 942 mutex_lock(&adev->pm.mutex); 943 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 944 states); 945 mutex_unlock(&adev->pm.mutex); 946 947 return ret; 948 } 949 950 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 951 enum amd_pp_task task_id, 952 enum amd_pm_state_type *user_state) 953 { 954 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 955 int ret = 0; 956 957 if (!pp_funcs->dispatch_tasks) 958 return -EOPNOTSUPP; 959 960 mutex_lock(&adev->pm.mutex); 961 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 962 task_id, 963 user_state); 964 mutex_unlock(&adev->pm.mutex); 965 966 return ret; 967 } 968 969 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 970 { 971 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 972 int ret = 0; 973 974 if (!pp_funcs->get_pp_table) 975 return 0; 976 977 mutex_lock(&adev->pm.mutex); 978 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 979 table); 980 mutex_unlock(&adev->pm.mutex); 981 982 return ret; 983 } 984 985 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 986 uint32_t type, 987 long *input, 988 uint32_t size) 989 { 990 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 991 int ret = 0; 992 993 if (!pp_funcs->set_fine_grain_clk_vol) 994 return 0; 995 996 mutex_lock(&adev->pm.mutex); 997 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 998 type, 999 input, 1000 size); 1001 mutex_unlock(&adev->pm.mutex); 1002 1003 return ret; 1004 } 1005 1006 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 1007 uint32_t type, 1008 long *input, 1009 uint32_t size) 1010 { 1011 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1012 int ret = 0; 1013 1014 if (!pp_funcs->odn_edit_dpm_table) 1015 return 0; 1016 1017 mutex_lock(&adev->pm.mutex); 1018 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 1019 type, 1020 input, 1021 size); 1022 mutex_unlock(&adev->pm.mutex); 1023 1024 return ret; 1025 } 1026 1027 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 1028 enum pp_clock_type type, 1029 char *buf) 1030 { 1031 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1032 int ret = 0; 1033 1034 if (!pp_funcs->print_clock_levels) 1035 return 0; 1036 1037 mutex_lock(&adev->pm.mutex); 1038 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 1039 type, 1040 buf); 1041 mutex_unlock(&adev->pm.mutex); 1042 1043 return ret; 1044 } 1045 1046 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 1047 enum pp_clock_type type, 1048 char *buf, 1049 int *offset) 1050 { 1051 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1052 int ret = 0; 1053 1054 if (!pp_funcs->emit_clock_levels) 1055 return -ENOENT; 1056 1057 mutex_lock(&adev->pm.mutex); 1058 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 1059 type, 1060 buf, 1061 offset); 1062 mutex_unlock(&adev->pm.mutex); 1063 1064 return ret; 1065 } 1066 1067 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 1068 uint64_t ppfeature_masks) 1069 { 1070 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1071 int ret = 0; 1072 1073 if (!pp_funcs->set_ppfeature_status) 1074 return 0; 1075 1076 mutex_lock(&adev->pm.mutex); 1077 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1078 ppfeature_masks); 1079 mutex_unlock(&adev->pm.mutex); 1080 1081 return ret; 1082 } 1083 1084 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1085 { 1086 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1087 int ret = 0; 1088 1089 if (!pp_funcs->get_ppfeature_status) 1090 return 0; 1091 1092 mutex_lock(&adev->pm.mutex); 1093 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1094 buf); 1095 mutex_unlock(&adev->pm.mutex); 1096 1097 return ret; 1098 } 1099 1100 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1101 enum pp_clock_type type, 1102 uint32_t mask) 1103 { 1104 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1105 int ret = 0; 1106 1107 if (!pp_funcs->force_clock_level) 1108 return 0; 1109 1110 mutex_lock(&adev->pm.mutex); 1111 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1112 type, 1113 mask); 1114 mutex_unlock(&adev->pm.mutex); 1115 1116 return ret; 1117 } 1118 1119 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1120 { 1121 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1122 int ret = 0; 1123 1124 if (!pp_funcs->get_sclk_od) 1125 return 0; 1126 1127 mutex_lock(&adev->pm.mutex); 1128 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1129 mutex_unlock(&adev->pm.mutex); 1130 1131 return ret; 1132 } 1133 1134 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1135 { 1136 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1137 1138 if (is_support_sw_smu(adev)) 1139 return 0; 1140 1141 mutex_lock(&adev->pm.mutex); 1142 if (pp_funcs->set_sclk_od) 1143 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1144 mutex_unlock(&adev->pm.mutex); 1145 1146 if (amdgpu_dpm_dispatch_task(adev, 1147 AMD_PP_TASK_READJUST_POWER_STATE, 1148 NULL) == -EOPNOTSUPP) { 1149 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1150 amdgpu_dpm_compute_clocks(adev); 1151 } 1152 1153 return 0; 1154 } 1155 1156 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1157 { 1158 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1159 int ret = 0; 1160 1161 if (!pp_funcs->get_mclk_od) 1162 return 0; 1163 1164 mutex_lock(&adev->pm.mutex); 1165 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1166 mutex_unlock(&adev->pm.mutex); 1167 1168 return ret; 1169 } 1170 1171 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1172 { 1173 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1174 1175 if (is_support_sw_smu(adev)) 1176 return 0; 1177 1178 mutex_lock(&adev->pm.mutex); 1179 if (pp_funcs->set_mclk_od) 1180 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1181 mutex_unlock(&adev->pm.mutex); 1182 1183 if (amdgpu_dpm_dispatch_task(adev, 1184 AMD_PP_TASK_READJUST_POWER_STATE, 1185 NULL) == -EOPNOTSUPP) { 1186 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1187 amdgpu_dpm_compute_clocks(adev); 1188 } 1189 1190 return 0; 1191 } 1192 1193 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1194 char *buf) 1195 { 1196 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1197 int ret = 0; 1198 1199 if (!pp_funcs->get_power_profile_mode) 1200 return -EOPNOTSUPP; 1201 1202 mutex_lock(&adev->pm.mutex); 1203 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1204 buf); 1205 mutex_unlock(&adev->pm.mutex); 1206 1207 return ret; 1208 } 1209 1210 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1211 long *input, uint32_t size) 1212 { 1213 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1214 int ret = 0; 1215 1216 if (!pp_funcs->set_power_profile_mode) 1217 return 0; 1218 1219 mutex_lock(&adev->pm.mutex); 1220 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1221 input, 1222 size); 1223 mutex_unlock(&adev->pm.mutex); 1224 1225 return ret; 1226 } 1227 1228 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1229 { 1230 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1231 int ret = 0; 1232 1233 if (!pp_funcs->get_gpu_metrics) 1234 return 0; 1235 1236 mutex_lock(&adev->pm.mutex); 1237 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1238 table); 1239 mutex_unlock(&adev->pm.mutex); 1240 1241 return ret; 1242 } 1243 1244 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1245 uint32_t *fan_mode) 1246 { 1247 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1248 int ret = 0; 1249 1250 if (!pp_funcs->get_fan_control_mode) 1251 return -EOPNOTSUPP; 1252 1253 mutex_lock(&adev->pm.mutex); 1254 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1255 fan_mode); 1256 mutex_unlock(&adev->pm.mutex); 1257 1258 return ret; 1259 } 1260 1261 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1262 uint32_t speed) 1263 { 1264 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1265 int ret = 0; 1266 1267 if (!pp_funcs->set_fan_speed_pwm) 1268 return -EOPNOTSUPP; 1269 1270 mutex_lock(&adev->pm.mutex); 1271 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1272 speed); 1273 mutex_unlock(&adev->pm.mutex); 1274 1275 return ret; 1276 } 1277 1278 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1279 uint32_t *speed) 1280 { 1281 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1282 int ret = 0; 1283 1284 if (!pp_funcs->get_fan_speed_pwm) 1285 return -EOPNOTSUPP; 1286 1287 mutex_lock(&adev->pm.mutex); 1288 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1289 speed); 1290 mutex_unlock(&adev->pm.mutex); 1291 1292 return ret; 1293 } 1294 1295 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1296 uint32_t *speed) 1297 { 1298 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1299 int ret = 0; 1300 1301 if (!pp_funcs->get_fan_speed_rpm) 1302 return -EOPNOTSUPP; 1303 1304 mutex_lock(&adev->pm.mutex); 1305 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1306 speed); 1307 mutex_unlock(&adev->pm.mutex); 1308 1309 return ret; 1310 } 1311 1312 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1313 uint32_t speed) 1314 { 1315 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1316 int ret = 0; 1317 1318 if (!pp_funcs->set_fan_speed_rpm) 1319 return -EOPNOTSUPP; 1320 1321 mutex_lock(&adev->pm.mutex); 1322 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1323 speed); 1324 mutex_unlock(&adev->pm.mutex); 1325 1326 return ret; 1327 } 1328 1329 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1330 uint32_t mode) 1331 { 1332 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1333 int ret = 0; 1334 1335 if (!pp_funcs->set_fan_control_mode) 1336 return -EOPNOTSUPP; 1337 1338 mutex_lock(&adev->pm.mutex); 1339 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1340 mode); 1341 mutex_unlock(&adev->pm.mutex); 1342 1343 return ret; 1344 } 1345 1346 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1347 uint32_t *limit, 1348 enum pp_power_limit_level pp_limit_level, 1349 enum pp_power_type power_type) 1350 { 1351 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1352 int ret = 0; 1353 1354 if (!pp_funcs->get_power_limit) 1355 return -ENODATA; 1356 1357 mutex_lock(&adev->pm.mutex); 1358 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1359 limit, 1360 pp_limit_level, 1361 power_type); 1362 mutex_unlock(&adev->pm.mutex); 1363 1364 return ret; 1365 } 1366 1367 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1368 uint32_t limit) 1369 { 1370 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1371 int ret = 0; 1372 1373 if (!pp_funcs->set_power_limit) 1374 return -EINVAL; 1375 1376 mutex_lock(&adev->pm.mutex); 1377 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1378 limit); 1379 mutex_unlock(&adev->pm.mutex); 1380 1381 return ret; 1382 } 1383 1384 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1385 { 1386 bool cclk_dpm_supported = false; 1387 1388 if (!is_support_sw_smu(adev)) 1389 return false; 1390 1391 mutex_lock(&adev->pm.mutex); 1392 cclk_dpm_supported = is_support_cclk_dpm(adev); 1393 mutex_unlock(&adev->pm.mutex); 1394 1395 return (int)cclk_dpm_supported; 1396 } 1397 1398 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1399 struct seq_file *m) 1400 { 1401 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1402 1403 if (!pp_funcs->debugfs_print_current_performance_level) 1404 return -EOPNOTSUPP; 1405 1406 mutex_lock(&adev->pm.mutex); 1407 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1408 m); 1409 mutex_unlock(&adev->pm.mutex); 1410 1411 return 0; 1412 } 1413 1414 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1415 void **addr, 1416 size_t *size) 1417 { 1418 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1419 int ret = 0; 1420 1421 if (!pp_funcs->get_smu_prv_buf_details) 1422 return -ENOSYS; 1423 1424 mutex_lock(&adev->pm.mutex); 1425 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1426 addr, 1427 size); 1428 mutex_unlock(&adev->pm.mutex); 1429 1430 return ret; 1431 } 1432 1433 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1434 { 1435 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1436 struct smu_context *smu = adev->powerplay.pp_handle; 1437 1438 if ((is_support_sw_smu(adev) && smu->od_enabled) || 1439 (is_support_sw_smu(adev) && smu->is_apu) || 1440 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 1441 return true; 1442 1443 return false; 1444 } 1445 1446 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1447 const char *buf, 1448 size_t size) 1449 { 1450 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1451 int ret = 0; 1452 1453 if (!pp_funcs->set_pp_table) 1454 return -EOPNOTSUPP; 1455 1456 mutex_lock(&adev->pm.mutex); 1457 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1458 buf, 1459 size); 1460 mutex_unlock(&adev->pm.mutex); 1461 1462 return ret; 1463 } 1464 1465 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1466 { 1467 struct smu_context *smu = adev->powerplay.pp_handle; 1468 1469 if (!is_support_sw_smu(adev)) 1470 return INT_MAX; 1471 1472 return smu->cpu_core_num; 1473 } 1474 1475 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1476 { 1477 if (!is_support_sw_smu(adev)) 1478 return; 1479 1480 amdgpu_smu_stb_debug_fs_init(adev); 1481 } 1482 1483 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1484 const struct amd_pp_display_configuration *input) 1485 { 1486 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1487 int ret = 0; 1488 1489 if (!pp_funcs->display_configuration_change) 1490 return 0; 1491 1492 mutex_lock(&adev->pm.mutex); 1493 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1494 input); 1495 mutex_unlock(&adev->pm.mutex); 1496 1497 return ret; 1498 } 1499 1500 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1501 enum amd_pp_clock_type type, 1502 struct amd_pp_clocks *clocks) 1503 { 1504 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1505 int ret = 0; 1506 1507 if (!pp_funcs->get_clock_by_type) 1508 return 0; 1509 1510 mutex_lock(&adev->pm.mutex); 1511 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1512 type, 1513 clocks); 1514 mutex_unlock(&adev->pm.mutex); 1515 1516 return ret; 1517 } 1518 1519 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1520 struct amd_pp_simple_clock_info *clocks) 1521 { 1522 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1523 int ret = 0; 1524 1525 if (!pp_funcs->get_display_mode_validation_clocks) 1526 return 0; 1527 1528 mutex_lock(&adev->pm.mutex); 1529 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1530 clocks); 1531 mutex_unlock(&adev->pm.mutex); 1532 1533 return ret; 1534 } 1535 1536 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1537 enum amd_pp_clock_type type, 1538 struct pp_clock_levels_with_latency *clocks) 1539 { 1540 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1541 int ret = 0; 1542 1543 if (!pp_funcs->get_clock_by_type_with_latency) 1544 return 0; 1545 1546 mutex_lock(&adev->pm.mutex); 1547 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1548 type, 1549 clocks); 1550 mutex_unlock(&adev->pm.mutex); 1551 1552 return ret; 1553 } 1554 1555 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1556 enum amd_pp_clock_type type, 1557 struct pp_clock_levels_with_voltage *clocks) 1558 { 1559 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1560 int ret = 0; 1561 1562 if (!pp_funcs->get_clock_by_type_with_voltage) 1563 return 0; 1564 1565 mutex_lock(&adev->pm.mutex); 1566 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1567 type, 1568 clocks); 1569 mutex_unlock(&adev->pm.mutex); 1570 1571 return ret; 1572 } 1573 1574 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1575 void *clock_ranges) 1576 { 1577 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1578 int ret = 0; 1579 1580 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1581 return -EOPNOTSUPP; 1582 1583 mutex_lock(&adev->pm.mutex); 1584 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1585 clock_ranges); 1586 mutex_unlock(&adev->pm.mutex); 1587 1588 return ret; 1589 } 1590 1591 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1592 struct pp_display_clock_request *clock) 1593 { 1594 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1595 int ret = 0; 1596 1597 if (!pp_funcs->display_clock_voltage_request) 1598 return -EOPNOTSUPP; 1599 1600 mutex_lock(&adev->pm.mutex); 1601 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1602 clock); 1603 mutex_unlock(&adev->pm.mutex); 1604 1605 return ret; 1606 } 1607 1608 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1609 struct amd_pp_clock_info *clocks) 1610 { 1611 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1612 int ret = 0; 1613 1614 if (!pp_funcs->get_current_clocks) 1615 return -EOPNOTSUPP; 1616 1617 mutex_lock(&adev->pm.mutex); 1618 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1619 clocks); 1620 mutex_unlock(&adev->pm.mutex); 1621 1622 return ret; 1623 } 1624 1625 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1626 { 1627 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1628 1629 if (!pp_funcs->notify_smu_enable_pwe) 1630 return; 1631 1632 mutex_lock(&adev->pm.mutex); 1633 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1634 mutex_unlock(&adev->pm.mutex); 1635 } 1636 1637 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1638 uint32_t count) 1639 { 1640 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1641 int ret = 0; 1642 1643 if (!pp_funcs->set_active_display_count) 1644 return -EOPNOTSUPP; 1645 1646 mutex_lock(&adev->pm.mutex); 1647 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1648 count); 1649 mutex_unlock(&adev->pm.mutex); 1650 1651 return ret; 1652 } 1653 1654 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1655 uint32_t clock) 1656 { 1657 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1658 int ret = 0; 1659 1660 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1661 return -EOPNOTSUPP; 1662 1663 mutex_lock(&adev->pm.mutex); 1664 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1665 clock); 1666 mutex_unlock(&adev->pm.mutex); 1667 1668 return ret; 1669 } 1670 1671 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1672 uint32_t clock) 1673 { 1674 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1675 1676 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1677 return; 1678 1679 mutex_lock(&adev->pm.mutex); 1680 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1681 clock); 1682 mutex_unlock(&adev->pm.mutex); 1683 } 1684 1685 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1686 uint32_t clock) 1687 { 1688 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1689 1690 if (!pp_funcs->set_hard_min_fclk_by_freq) 1691 return; 1692 1693 mutex_lock(&adev->pm.mutex); 1694 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1695 clock); 1696 mutex_unlock(&adev->pm.mutex); 1697 } 1698 1699 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1700 bool disable_memory_clock_switch) 1701 { 1702 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1703 int ret = 0; 1704 1705 if (!pp_funcs->display_disable_memory_clock_switch) 1706 return 0; 1707 1708 mutex_lock(&adev->pm.mutex); 1709 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1710 disable_memory_clock_switch); 1711 mutex_unlock(&adev->pm.mutex); 1712 1713 return ret; 1714 } 1715 1716 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1717 struct pp_smu_nv_clock_table *max_clocks) 1718 { 1719 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1720 int ret = 0; 1721 1722 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1723 return -EOPNOTSUPP; 1724 1725 mutex_lock(&adev->pm.mutex); 1726 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1727 max_clocks); 1728 mutex_unlock(&adev->pm.mutex); 1729 1730 return ret; 1731 } 1732 1733 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1734 unsigned int *clock_values_in_khz, 1735 unsigned int *num_states) 1736 { 1737 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1738 int ret = 0; 1739 1740 if (!pp_funcs->get_uclk_dpm_states) 1741 return -EOPNOTSUPP; 1742 1743 mutex_lock(&adev->pm.mutex); 1744 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1745 clock_values_in_khz, 1746 num_states); 1747 mutex_unlock(&adev->pm.mutex); 1748 1749 return ret; 1750 } 1751 1752 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1753 struct dpm_clocks *clock_table) 1754 { 1755 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1756 int ret = 0; 1757 1758 if (!pp_funcs->get_dpm_clock_table) 1759 return -EOPNOTSUPP; 1760 1761 mutex_lock(&adev->pm.mutex); 1762 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1763 clock_table); 1764 mutex_unlock(&adev->pm.mutex); 1765 1766 return ret; 1767 } 1768