1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) 40 41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 42 { 43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 44 int ret = 0; 45 46 if (!pp_funcs->get_sclk) 47 return 0; 48 49 mutex_lock(&adev->pm.mutex); 50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 51 low); 52 mutex_unlock(&adev->pm.mutex); 53 54 return ret; 55 } 56 57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 58 { 59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 60 int ret = 0; 61 62 if (!pp_funcs->get_mclk) 63 return 0; 64 65 mutex_lock(&adev->pm.mutex); 66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 67 low); 68 mutex_unlock(&adev->pm.mutex); 69 70 return ret; 71 } 72 73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 74 { 75 int ret = 0; 76 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 77 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 78 79 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 80 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 81 block_type, gate ? "gate" : "ungate"); 82 return 0; 83 } 84 85 mutex_lock(&adev->pm.mutex); 86 87 switch (block_type) { 88 case AMD_IP_BLOCK_TYPE_UVD: 89 case AMD_IP_BLOCK_TYPE_VCE: 90 case AMD_IP_BLOCK_TYPE_GFX: 91 case AMD_IP_BLOCK_TYPE_VCN: 92 case AMD_IP_BLOCK_TYPE_SDMA: 93 case AMD_IP_BLOCK_TYPE_JPEG: 94 case AMD_IP_BLOCK_TYPE_GMC: 95 case AMD_IP_BLOCK_TYPE_ACP: 96 if (pp_funcs && pp_funcs->set_powergating_by_smu) 97 ret = (pp_funcs->set_powergating_by_smu( 98 (adev)->powerplay.pp_handle, block_type, gate)); 99 break; 100 default: 101 break; 102 } 103 104 if (!ret) 105 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 106 107 mutex_unlock(&adev->pm.mutex); 108 109 return ret; 110 } 111 112 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev) 113 { 114 struct smu_context *smu = adev->powerplay.pp_handle; 115 int ret = -EOPNOTSUPP; 116 117 mutex_lock(&adev->pm.mutex); 118 ret = smu_set_gfx_power_up_by_imu(smu); 119 mutex_unlock(&adev->pm.mutex); 120 121 msleep(10); 122 123 return ret; 124 } 125 126 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 127 { 128 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 129 void *pp_handle = adev->powerplay.pp_handle; 130 int ret = 0; 131 132 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 133 return -ENOENT; 134 135 mutex_lock(&adev->pm.mutex); 136 137 /* enter BACO state */ 138 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 139 140 mutex_unlock(&adev->pm.mutex); 141 142 return ret; 143 } 144 145 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 146 { 147 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 148 void *pp_handle = adev->powerplay.pp_handle; 149 int ret = 0; 150 151 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 152 return -ENOENT; 153 154 mutex_lock(&adev->pm.mutex); 155 156 /* exit BACO state */ 157 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 158 159 mutex_unlock(&adev->pm.mutex); 160 161 return ret; 162 } 163 164 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 165 enum pp_mp1_state mp1_state) 166 { 167 int ret = 0; 168 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 169 170 if (pp_funcs && pp_funcs->set_mp1_state) { 171 mutex_lock(&adev->pm.mutex); 172 173 ret = pp_funcs->set_mp1_state( 174 adev->powerplay.pp_handle, 175 mp1_state); 176 177 mutex_unlock(&adev->pm.mutex); 178 } 179 180 return ret; 181 } 182 183 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 184 { 185 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 186 void *pp_handle = adev->powerplay.pp_handle; 187 bool baco_cap; 188 int ret = 0; 189 190 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 191 return false; 192 /* Don't use baco for reset in S3. 193 * This is a workaround for some platforms 194 * where entering BACO during suspend 195 * seems to cause reboots or hangs. 196 * This might be related to the fact that BACO controls 197 * power to the whole GPU including devices like audio and USB. 198 * Powering down/up everything may adversely affect these other 199 * devices. Needs more investigation. 200 */ 201 if (adev->in_s3) 202 return false; 203 204 mutex_lock(&adev->pm.mutex); 205 206 ret = pp_funcs->get_asic_baco_capability(pp_handle, 207 &baco_cap); 208 209 mutex_unlock(&adev->pm.mutex); 210 211 return ret ? false : baco_cap; 212 } 213 214 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 215 { 216 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 217 void *pp_handle = adev->powerplay.pp_handle; 218 int ret = 0; 219 220 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 221 return -ENOENT; 222 223 mutex_lock(&adev->pm.mutex); 224 225 ret = pp_funcs->asic_reset_mode_2(pp_handle); 226 227 mutex_unlock(&adev->pm.mutex); 228 229 return ret; 230 } 231 232 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev) 233 { 234 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 235 void *pp_handle = adev->powerplay.pp_handle; 236 int ret = 0; 237 238 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) 239 return -ENOENT; 240 241 mutex_lock(&adev->pm.mutex); 242 243 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle); 244 245 mutex_unlock(&adev->pm.mutex); 246 247 return ret; 248 } 249 250 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 251 { 252 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 253 void *pp_handle = adev->powerplay.pp_handle; 254 int ret = 0; 255 256 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 257 return -ENOENT; 258 259 mutex_lock(&adev->pm.mutex); 260 261 /* enter BACO state */ 262 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 263 if (ret) 264 goto out; 265 266 /* exit BACO state */ 267 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 268 269 out: 270 mutex_unlock(&adev->pm.mutex); 271 return ret; 272 } 273 274 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 275 { 276 struct smu_context *smu = adev->powerplay.pp_handle; 277 bool support_mode1_reset = false; 278 279 if (is_support_sw_smu(adev)) { 280 mutex_lock(&adev->pm.mutex); 281 support_mode1_reset = smu_mode1_reset_is_support(smu); 282 mutex_unlock(&adev->pm.mutex); 283 } 284 285 return support_mode1_reset; 286 } 287 288 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 289 { 290 struct smu_context *smu = adev->powerplay.pp_handle; 291 int ret = -EOPNOTSUPP; 292 293 if (is_support_sw_smu(adev)) { 294 mutex_lock(&adev->pm.mutex); 295 ret = smu_mode1_reset(smu); 296 mutex_unlock(&adev->pm.mutex); 297 } 298 299 return ret; 300 } 301 302 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 303 enum PP_SMC_POWER_PROFILE type, 304 bool en) 305 { 306 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 307 int ret = 0; 308 309 if (amdgpu_sriov_vf(adev)) 310 return 0; 311 312 if (pp_funcs && pp_funcs->switch_power_profile) { 313 mutex_lock(&adev->pm.mutex); 314 ret = pp_funcs->switch_power_profile( 315 adev->powerplay.pp_handle, type, en); 316 mutex_unlock(&adev->pm.mutex); 317 } 318 319 return ret; 320 } 321 322 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 323 uint32_t pstate) 324 { 325 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 326 int ret = 0; 327 328 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 329 mutex_lock(&adev->pm.mutex); 330 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 331 pstate); 332 mutex_unlock(&adev->pm.mutex); 333 } 334 335 return ret; 336 } 337 338 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 339 uint32_t cstate) 340 { 341 int ret = 0; 342 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 343 void *pp_handle = adev->powerplay.pp_handle; 344 345 if (pp_funcs && pp_funcs->set_df_cstate) { 346 mutex_lock(&adev->pm.mutex); 347 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 348 mutex_unlock(&adev->pm.mutex); 349 } 350 351 return ret; 352 } 353 354 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) 355 { 356 struct smu_context *smu = adev->powerplay.pp_handle; 357 int ret = 0; 358 359 if (is_support_sw_smu(adev)) { 360 mutex_lock(&adev->pm.mutex); 361 ret = smu_allow_xgmi_power_down(smu, en); 362 mutex_unlock(&adev->pm.mutex); 363 } 364 365 return ret; 366 } 367 368 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 369 { 370 void *pp_handle = adev->powerplay.pp_handle; 371 const struct amd_pm_funcs *pp_funcs = 372 adev->powerplay.pp_funcs; 373 int ret = 0; 374 375 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 376 mutex_lock(&adev->pm.mutex); 377 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 378 mutex_unlock(&adev->pm.mutex); 379 } 380 381 return ret; 382 } 383 384 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 385 uint32_t msg_id) 386 { 387 void *pp_handle = adev->powerplay.pp_handle; 388 const struct amd_pm_funcs *pp_funcs = 389 adev->powerplay.pp_funcs; 390 int ret = 0; 391 392 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 393 mutex_lock(&adev->pm.mutex); 394 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 395 msg_id); 396 mutex_unlock(&adev->pm.mutex); 397 } 398 399 return ret; 400 } 401 402 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 403 bool acquire) 404 { 405 void *pp_handle = adev->powerplay.pp_handle; 406 const struct amd_pm_funcs *pp_funcs = 407 adev->powerplay.pp_funcs; 408 int ret = -EOPNOTSUPP; 409 410 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 411 mutex_lock(&adev->pm.mutex); 412 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 413 acquire); 414 mutex_unlock(&adev->pm.mutex); 415 } 416 417 return ret; 418 } 419 420 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 421 { 422 if (adev->pm.dpm_enabled) { 423 mutex_lock(&adev->pm.mutex); 424 if (power_supply_is_system_supplied() > 0) 425 adev->pm.ac_power = true; 426 else 427 adev->pm.ac_power = false; 428 429 if (adev->powerplay.pp_funcs && 430 adev->powerplay.pp_funcs->enable_bapm) 431 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 432 433 if (is_support_sw_smu(adev)) 434 smu_set_ac_dc(adev->powerplay.pp_handle); 435 436 mutex_unlock(&adev->pm.mutex); 437 } 438 } 439 440 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 441 void *data, uint32_t *size) 442 { 443 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 444 int ret = -EINVAL; 445 446 if (!data || !size) 447 return -EINVAL; 448 449 if (pp_funcs && pp_funcs->read_sensor) { 450 mutex_lock(&adev->pm.mutex); 451 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 452 sensor, 453 data, 454 size); 455 mutex_unlock(&adev->pm.mutex); 456 } 457 458 return ret; 459 } 460 461 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit) 462 { 463 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 464 int ret = -EINVAL; 465 466 if (pp_funcs && pp_funcs->get_apu_thermal_limit) { 467 mutex_lock(&adev->pm.mutex); 468 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit); 469 mutex_unlock(&adev->pm.mutex); 470 } 471 472 return ret; 473 } 474 475 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit) 476 { 477 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 478 int ret = -EINVAL; 479 480 if (pp_funcs && pp_funcs->set_apu_thermal_limit) { 481 mutex_lock(&adev->pm.mutex); 482 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit); 483 mutex_unlock(&adev->pm.mutex); 484 } 485 486 return ret; 487 } 488 489 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 490 { 491 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 492 int i; 493 494 if (!adev->pm.dpm_enabled) 495 return; 496 497 if (!pp_funcs->pm_compute_clocks) 498 return; 499 500 if (adev->mode_info.num_crtc) 501 amdgpu_display_bandwidth_update(adev); 502 503 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 504 struct amdgpu_ring *ring = adev->rings[i]; 505 if (ring && ring->sched.ready) 506 amdgpu_fence_wait_empty(ring); 507 } 508 509 mutex_lock(&adev->pm.mutex); 510 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 511 mutex_unlock(&adev->pm.mutex); 512 } 513 514 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 515 { 516 int ret = 0; 517 518 if (adev->family == AMDGPU_FAMILY_SI) { 519 mutex_lock(&adev->pm.mutex); 520 if (enable) { 521 adev->pm.dpm.uvd_active = true; 522 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 523 } else { 524 adev->pm.dpm.uvd_active = false; 525 } 526 mutex_unlock(&adev->pm.mutex); 527 528 amdgpu_dpm_compute_clocks(adev); 529 return; 530 } 531 532 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 533 if (ret) 534 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 535 enable ? "enable" : "disable", ret); 536 } 537 538 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 539 { 540 int ret = 0; 541 542 if (adev->family == AMDGPU_FAMILY_SI) { 543 mutex_lock(&adev->pm.mutex); 544 if (enable) { 545 adev->pm.dpm.vce_active = true; 546 /* XXX select vce level based on ring/task */ 547 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 548 } else { 549 adev->pm.dpm.vce_active = false; 550 } 551 mutex_unlock(&adev->pm.mutex); 552 553 amdgpu_dpm_compute_clocks(adev); 554 return; 555 } 556 557 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 558 if (ret) 559 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 560 enable ? "enable" : "disable", ret); 561 } 562 563 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 564 { 565 int ret = 0; 566 567 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 568 if (ret) 569 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 570 enable ? "enable" : "disable", ret); 571 } 572 573 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 574 { 575 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 576 int r = 0; 577 578 if (!pp_funcs || !pp_funcs->load_firmware) 579 return 0; 580 581 mutex_lock(&adev->pm.mutex); 582 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 583 if (r) { 584 pr_err("smu firmware loading failed\n"); 585 goto out; 586 } 587 588 if (smu_version) 589 *smu_version = adev->pm.fw_version; 590 591 out: 592 mutex_unlock(&adev->pm.mutex); 593 return r; 594 } 595 596 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 597 { 598 int ret = 0; 599 600 if (is_support_sw_smu(adev)) { 601 mutex_lock(&adev->pm.mutex); 602 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 603 enable); 604 mutex_unlock(&adev->pm.mutex); 605 } 606 607 return ret; 608 } 609 610 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 611 { 612 struct smu_context *smu = adev->powerplay.pp_handle; 613 int ret = 0; 614 615 if (!is_support_sw_smu(adev)) 616 return -EOPNOTSUPP; 617 618 mutex_lock(&adev->pm.mutex); 619 ret = smu_send_hbm_bad_pages_num(smu, size); 620 mutex_unlock(&adev->pm.mutex); 621 622 return ret; 623 } 624 625 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 626 { 627 struct smu_context *smu = adev->powerplay.pp_handle; 628 int ret = 0; 629 630 if (!is_support_sw_smu(adev)) 631 return -EOPNOTSUPP; 632 633 mutex_lock(&adev->pm.mutex); 634 ret = smu_send_hbm_bad_channel_flag(smu, size); 635 mutex_unlock(&adev->pm.mutex); 636 637 return ret; 638 } 639 640 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 641 enum pp_clock_type type, 642 uint32_t *min, 643 uint32_t *max) 644 { 645 int ret = 0; 646 647 if (type != PP_SCLK) 648 return -EINVAL; 649 650 if (!is_support_sw_smu(adev)) 651 return -EOPNOTSUPP; 652 653 mutex_lock(&adev->pm.mutex); 654 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 655 SMU_SCLK, 656 min, 657 max); 658 mutex_unlock(&adev->pm.mutex); 659 660 return ret; 661 } 662 663 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 664 enum pp_clock_type type, 665 uint32_t min, 666 uint32_t max) 667 { 668 struct smu_context *smu = adev->powerplay.pp_handle; 669 int ret = 0; 670 671 if (type != PP_SCLK) 672 return -EINVAL; 673 674 if (!is_support_sw_smu(adev)) 675 return -EOPNOTSUPP; 676 677 mutex_lock(&adev->pm.mutex); 678 ret = smu_set_soft_freq_range(smu, 679 SMU_SCLK, 680 min, 681 max); 682 mutex_unlock(&adev->pm.mutex); 683 684 return ret; 685 } 686 687 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 688 { 689 struct smu_context *smu = adev->powerplay.pp_handle; 690 int ret = 0; 691 692 if (!is_support_sw_smu(adev)) 693 return 0; 694 695 mutex_lock(&adev->pm.mutex); 696 ret = smu_write_watermarks_table(smu); 697 mutex_unlock(&adev->pm.mutex); 698 699 return ret; 700 } 701 702 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 703 enum smu_event_type event, 704 uint64_t event_arg) 705 { 706 struct smu_context *smu = adev->powerplay.pp_handle; 707 int ret = 0; 708 709 if (!is_support_sw_smu(adev)) 710 return -EOPNOTSUPP; 711 712 mutex_lock(&adev->pm.mutex); 713 ret = smu_wait_for_event(smu, event, event_arg); 714 mutex_unlock(&adev->pm.mutex); 715 716 return ret; 717 } 718 719 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value) 720 { 721 struct smu_context *smu = adev->powerplay.pp_handle; 722 int ret = 0; 723 724 if (!is_support_sw_smu(adev)) 725 return -EOPNOTSUPP; 726 727 mutex_lock(&adev->pm.mutex); 728 ret = smu_set_residency_gfxoff(smu, value); 729 mutex_unlock(&adev->pm.mutex); 730 731 return ret; 732 } 733 734 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value) 735 { 736 struct smu_context *smu = adev->powerplay.pp_handle; 737 int ret = 0; 738 739 if (!is_support_sw_smu(adev)) 740 return -EOPNOTSUPP; 741 742 mutex_lock(&adev->pm.mutex); 743 ret = smu_get_residency_gfxoff(smu, value); 744 mutex_unlock(&adev->pm.mutex); 745 746 return ret; 747 } 748 749 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value) 750 { 751 struct smu_context *smu = adev->powerplay.pp_handle; 752 int ret = 0; 753 754 if (!is_support_sw_smu(adev)) 755 return -EOPNOTSUPP; 756 757 mutex_lock(&adev->pm.mutex); 758 ret = smu_get_entrycount_gfxoff(smu, value); 759 mutex_unlock(&adev->pm.mutex); 760 761 return ret; 762 } 763 764 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 765 { 766 struct smu_context *smu = adev->powerplay.pp_handle; 767 int ret = 0; 768 769 if (!is_support_sw_smu(adev)) 770 return -EOPNOTSUPP; 771 772 mutex_lock(&adev->pm.mutex); 773 ret = smu_get_status_gfxoff(smu, value); 774 mutex_unlock(&adev->pm.mutex); 775 776 return ret; 777 } 778 779 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 780 { 781 struct smu_context *smu = adev->powerplay.pp_handle; 782 783 if (!is_support_sw_smu(adev)) 784 return 0; 785 786 return atomic64_read(&smu->throttle_int_counter); 787 } 788 789 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 790 * @adev: amdgpu_device pointer 791 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 792 * 793 */ 794 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 795 enum gfx_change_state state) 796 { 797 mutex_lock(&adev->pm.mutex); 798 if (adev->powerplay.pp_funcs && 799 adev->powerplay.pp_funcs->gfx_state_change_set) 800 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 801 (adev)->powerplay.pp_handle, state)); 802 mutex_unlock(&adev->pm.mutex); 803 } 804 805 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 806 void *umc_ecc) 807 { 808 struct smu_context *smu = adev->powerplay.pp_handle; 809 int ret = 0; 810 811 if (!is_support_sw_smu(adev)) 812 return -EOPNOTSUPP; 813 814 mutex_lock(&adev->pm.mutex); 815 ret = smu_get_ecc_info(smu, umc_ecc); 816 mutex_unlock(&adev->pm.mutex); 817 818 return ret; 819 } 820 821 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 822 uint32_t idx) 823 { 824 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 825 struct amd_vce_state *vstate = NULL; 826 827 if (!pp_funcs->get_vce_clock_state) 828 return NULL; 829 830 mutex_lock(&adev->pm.mutex); 831 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 832 idx); 833 mutex_unlock(&adev->pm.mutex); 834 835 return vstate; 836 } 837 838 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 839 enum amd_pm_state_type *state) 840 { 841 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 842 843 mutex_lock(&adev->pm.mutex); 844 845 if (!pp_funcs->get_current_power_state) { 846 *state = adev->pm.dpm.user_state; 847 goto out; 848 } 849 850 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 851 if (*state < POWER_STATE_TYPE_DEFAULT || 852 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 853 *state = adev->pm.dpm.user_state; 854 855 out: 856 mutex_unlock(&adev->pm.mutex); 857 } 858 859 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 860 enum amd_pm_state_type state) 861 { 862 mutex_lock(&adev->pm.mutex); 863 adev->pm.dpm.user_state = state; 864 mutex_unlock(&adev->pm.mutex); 865 866 if (is_support_sw_smu(adev)) 867 return; 868 869 if (amdgpu_dpm_dispatch_task(adev, 870 AMD_PP_TASK_ENABLE_USER_STATE, 871 &state) == -EOPNOTSUPP) 872 amdgpu_dpm_compute_clocks(adev); 873 } 874 875 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 876 { 877 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 878 enum amd_dpm_forced_level level; 879 880 if (!pp_funcs) 881 return AMD_DPM_FORCED_LEVEL_AUTO; 882 883 mutex_lock(&adev->pm.mutex); 884 if (pp_funcs->get_performance_level) 885 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 886 else 887 level = adev->pm.dpm.forced_level; 888 mutex_unlock(&adev->pm.mutex); 889 890 return level; 891 } 892 893 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 894 enum amd_dpm_forced_level level) 895 { 896 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 897 enum amd_dpm_forced_level current_level; 898 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 899 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 900 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 901 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 902 903 if (!pp_funcs || !pp_funcs->force_performance_level) 904 return 0; 905 906 if (adev->pm.dpm.thermal_active) 907 return -EINVAL; 908 909 current_level = amdgpu_dpm_get_performance_level(adev); 910 if (current_level == level) 911 return 0; 912 913 if (adev->asic_type == CHIP_RAVEN) { 914 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 915 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 916 level == AMD_DPM_FORCED_LEVEL_MANUAL) 917 amdgpu_gfx_off_ctrl(adev, false); 918 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 919 level != AMD_DPM_FORCED_LEVEL_MANUAL) 920 amdgpu_gfx_off_ctrl(adev, true); 921 } 922 } 923 924 if (!(current_level & profile_mode_mask) && 925 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 926 return -EINVAL; 927 928 if (!(current_level & profile_mode_mask) && 929 (level & profile_mode_mask)) { 930 /* enter UMD Pstate */ 931 amdgpu_device_ip_set_powergating_state(adev, 932 AMD_IP_BLOCK_TYPE_GFX, 933 AMD_PG_STATE_UNGATE); 934 amdgpu_device_ip_set_clockgating_state(adev, 935 AMD_IP_BLOCK_TYPE_GFX, 936 AMD_CG_STATE_UNGATE); 937 } else if ((current_level & profile_mode_mask) && 938 !(level & profile_mode_mask)) { 939 /* exit UMD Pstate */ 940 amdgpu_device_ip_set_clockgating_state(adev, 941 AMD_IP_BLOCK_TYPE_GFX, 942 AMD_CG_STATE_GATE); 943 amdgpu_device_ip_set_powergating_state(adev, 944 AMD_IP_BLOCK_TYPE_GFX, 945 AMD_PG_STATE_GATE); 946 } 947 948 mutex_lock(&adev->pm.mutex); 949 950 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 951 level)) { 952 mutex_unlock(&adev->pm.mutex); 953 return -EINVAL; 954 } 955 956 adev->pm.dpm.forced_level = level; 957 958 mutex_unlock(&adev->pm.mutex); 959 960 return 0; 961 } 962 963 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 964 struct pp_states_info *states) 965 { 966 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 967 int ret = 0; 968 969 if (!pp_funcs->get_pp_num_states) 970 return -EOPNOTSUPP; 971 972 mutex_lock(&adev->pm.mutex); 973 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 974 states); 975 mutex_unlock(&adev->pm.mutex); 976 977 return ret; 978 } 979 980 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 981 enum amd_pp_task task_id, 982 enum amd_pm_state_type *user_state) 983 { 984 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 985 int ret = 0; 986 987 if (!pp_funcs->dispatch_tasks) 988 return -EOPNOTSUPP; 989 990 mutex_lock(&adev->pm.mutex); 991 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 992 task_id, 993 user_state); 994 mutex_unlock(&adev->pm.mutex); 995 996 return ret; 997 } 998 999 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 1000 { 1001 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1002 int ret = 0; 1003 1004 if (!pp_funcs->get_pp_table) 1005 return 0; 1006 1007 mutex_lock(&adev->pm.mutex); 1008 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 1009 table); 1010 mutex_unlock(&adev->pm.mutex); 1011 1012 return ret; 1013 } 1014 1015 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 1016 uint32_t type, 1017 long *input, 1018 uint32_t size) 1019 { 1020 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1021 int ret = 0; 1022 1023 if (!pp_funcs->set_fine_grain_clk_vol) 1024 return 0; 1025 1026 mutex_lock(&adev->pm.mutex); 1027 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 1028 type, 1029 input, 1030 size); 1031 mutex_unlock(&adev->pm.mutex); 1032 1033 return ret; 1034 } 1035 1036 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 1037 uint32_t type, 1038 long *input, 1039 uint32_t size) 1040 { 1041 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1042 int ret = 0; 1043 1044 if (!pp_funcs->odn_edit_dpm_table) 1045 return 0; 1046 1047 mutex_lock(&adev->pm.mutex); 1048 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 1049 type, 1050 input, 1051 size); 1052 mutex_unlock(&adev->pm.mutex); 1053 1054 return ret; 1055 } 1056 1057 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 1058 enum pp_clock_type type, 1059 char *buf) 1060 { 1061 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1062 int ret = 0; 1063 1064 if (!pp_funcs->print_clock_levels) 1065 return 0; 1066 1067 mutex_lock(&adev->pm.mutex); 1068 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 1069 type, 1070 buf); 1071 mutex_unlock(&adev->pm.mutex); 1072 1073 return ret; 1074 } 1075 1076 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 1077 enum pp_clock_type type, 1078 char *buf, 1079 int *offset) 1080 { 1081 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1082 int ret = 0; 1083 1084 if (!pp_funcs->emit_clock_levels) 1085 return -ENOENT; 1086 1087 mutex_lock(&adev->pm.mutex); 1088 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 1089 type, 1090 buf, 1091 offset); 1092 mutex_unlock(&adev->pm.mutex); 1093 1094 return ret; 1095 } 1096 1097 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 1098 uint64_t ppfeature_masks) 1099 { 1100 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1101 int ret = 0; 1102 1103 if (!pp_funcs->set_ppfeature_status) 1104 return 0; 1105 1106 mutex_lock(&adev->pm.mutex); 1107 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1108 ppfeature_masks); 1109 mutex_unlock(&adev->pm.mutex); 1110 1111 return ret; 1112 } 1113 1114 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1115 { 1116 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1117 int ret = 0; 1118 1119 if (!pp_funcs->get_ppfeature_status) 1120 return 0; 1121 1122 mutex_lock(&adev->pm.mutex); 1123 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1124 buf); 1125 mutex_unlock(&adev->pm.mutex); 1126 1127 return ret; 1128 } 1129 1130 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1131 enum pp_clock_type type, 1132 uint32_t mask) 1133 { 1134 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1135 int ret = 0; 1136 1137 if (!pp_funcs->force_clock_level) 1138 return 0; 1139 1140 mutex_lock(&adev->pm.mutex); 1141 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1142 type, 1143 mask); 1144 mutex_unlock(&adev->pm.mutex); 1145 1146 return ret; 1147 } 1148 1149 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1150 { 1151 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1152 int ret = 0; 1153 1154 if (!pp_funcs->get_sclk_od) 1155 return 0; 1156 1157 mutex_lock(&adev->pm.mutex); 1158 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1159 mutex_unlock(&adev->pm.mutex); 1160 1161 return ret; 1162 } 1163 1164 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1165 { 1166 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1167 1168 if (is_support_sw_smu(adev)) 1169 return 0; 1170 1171 mutex_lock(&adev->pm.mutex); 1172 if (pp_funcs->set_sclk_od) 1173 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1174 mutex_unlock(&adev->pm.mutex); 1175 1176 if (amdgpu_dpm_dispatch_task(adev, 1177 AMD_PP_TASK_READJUST_POWER_STATE, 1178 NULL) == -EOPNOTSUPP) { 1179 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1180 amdgpu_dpm_compute_clocks(adev); 1181 } 1182 1183 return 0; 1184 } 1185 1186 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1187 { 1188 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1189 int ret = 0; 1190 1191 if (!pp_funcs->get_mclk_od) 1192 return 0; 1193 1194 mutex_lock(&adev->pm.mutex); 1195 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1196 mutex_unlock(&adev->pm.mutex); 1197 1198 return ret; 1199 } 1200 1201 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1202 { 1203 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1204 1205 if (is_support_sw_smu(adev)) 1206 return 0; 1207 1208 mutex_lock(&adev->pm.mutex); 1209 if (pp_funcs->set_mclk_od) 1210 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1211 mutex_unlock(&adev->pm.mutex); 1212 1213 if (amdgpu_dpm_dispatch_task(adev, 1214 AMD_PP_TASK_READJUST_POWER_STATE, 1215 NULL) == -EOPNOTSUPP) { 1216 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1217 amdgpu_dpm_compute_clocks(adev); 1218 } 1219 1220 return 0; 1221 } 1222 1223 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1224 char *buf) 1225 { 1226 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1227 int ret = 0; 1228 1229 if (!pp_funcs->get_power_profile_mode) 1230 return -EOPNOTSUPP; 1231 1232 mutex_lock(&adev->pm.mutex); 1233 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1234 buf); 1235 mutex_unlock(&adev->pm.mutex); 1236 1237 return ret; 1238 } 1239 1240 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1241 long *input, uint32_t size) 1242 { 1243 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1244 int ret = 0; 1245 1246 if (!pp_funcs->set_power_profile_mode) 1247 return 0; 1248 1249 mutex_lock(&adev->pm.mutex); 1250 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1251 input, 1252 size); 1253 mutex_unlock(&adev->pm.mutex); 1254 1255 return ret; 1256 } 1257 1258 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1259 { 1260 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1261 int ret = 0; 1262 1263 if (!pp_funcs->get_gpu_metrics) 1264 return 0; 1265 1266 mutex_lock(&adev->pm.mutex); 1267 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1268 table); 1269 mutex_unlock(&adev->pm.mutex); 1270 1271 return ret; 1272 } 1273 1274 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1275 uint32_t *fan_mode) 1276 { 1277 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1278 int ret = 0; 1279 1280 if (!pp_funcs->get_fan_control_mode) 1281 return -EOPNOTSUPP; 1282 1283 mutex_lock(&adev->pm.mutex); 1284 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1285 fan_mode); 1286 mutex_unlock(&adev->pm.mutex); 1287 1288 return ret; 1289 } 1290 1291 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1292 uint32_t speed) 1293 { 1294 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1295 int ret = 0; 1296 1297 if (!pp_funcs->set_fan_speed_pwm) 1298 return -EOPNOTSUPP; 1299 1300 mutex_lock(&adev->pm.mutex); 1301 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1302 speed); 1303 mutex_unlock(&adev->pm.mutex); 1304 1305 return ret; 1306 } 1307 1308 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1309 uint32_t *speed) 1310 { 1311 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1312 int ret = 0; 1313 1314 if (!pp_funcs->get_fan_speed_pwm) 1315 return -EOPNOTSUPP; 1316 1317 mutex_lock(&adev->pm.mutex); 1318 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1319 speed); 1320 mutex_unlock(&adev->pm.mutex); 1321 1322 return ret; 1323 } 1324 1325 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1326 uint32_t *speed) 1327 { 1328 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1329 int ret = 0; 1330 1331 if (!pp_funcs->get_fan_speed_rpm) 1332 return -EOPNOTSUPP; 1333 1334 mutex_lock(&adev->pm.mutex); 1335 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1336 speed); 1337 mutex_unlock(&adev->pm.mutex); 1338 1339 return ret; 1340 } 1341 1342 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1343 uint32_t speed) 1344 { 1345 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1346 int ret = 0; 1347 1348 if (!pp_funcs->set_fan_speed_rpm) 1349 return -EOPNOTSUPP; 1350 1351 mutex_lock(&adev->pm.mutex); 1352 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1353 speed); 1354 mutex_unlock(&adev->pm.mutex); 1355 1356 return ret; 1357 } 1358 1359 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1360 uint32_t mode) 1361 { 1362 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1363 int ret = 0; 1364 1365 if (!pp_funcs->set_fan_control_mode) 1366 return -EOPNOTSUPP; 1367 1368 mutex_lock(&adev->pm.mutex); 1369 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1370 mode); 1371 mutex_unlock(&adev->pm.mutex); 1372 1373 return ret; 1374 } 1375 1376 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1377 uint32_t *limit, 1378 enum pp_power_limit_level pp_limit_level, 1379 enum pp_power_type power_type) 1380 { 1381 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1382 int ret = 0; 1383 1384 if (!pp_funcs->get_power_limit) 1385 return -ENODATA; 1386 1387 mutex_lock(&adev->pm.mutex); 1388 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1389 limit, 1390 pp_limit_level, 1391 power_type); 1392 mutex_unlock(&adev->pm.mutex); 1393 1394 return ret; 1395 } 1396 1397 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1398 uint32_t limit) 1399 { 1400 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1401 int ret = 0; 1402 1403 if (!pp_funcs->set_power_limit) 1404 return -EINVAL; 1405 1406 mutex_lock(&adev->pm.mutex); 1407 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1408 limit); 1409 mutex_unlock(&adev->pm.mutex); 1410 1411 return ret; 1412 } 1413 1414 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1415 { 1416 bool cclk_dpm_supported = false; 1417 1418 if (!is_support_sw_smu(adev)) 1419 return false; 1420 1421 mutex_lock(&adev->pm.mutex); 1422 cclk_dpm_supported = is_support_cclk_dpm(adev); 1423 mutex_unlock(&adev->pm.mutex); 1424 1425 return (int)cclk_dpm_supported; 1426 } 1427 1428 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1429 struct seq_file *m) 1430 { 1431 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1432 1433 if (!pp_funcs->debugfs_print_current_performance_level) 1434 return -EOPNOTSUPP; 1435 1436 mutex_lock(&adev->pm.mutex); 1437 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1438 m); 1439 mutex_unlock(&adev->pm.mutex); 1440 1441 return 0; 1442 } 1443 1444 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1445 void **addr, 1446 size_t *size) 1447 { 1448 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1449 int ret = 0; 1450 1451 if (!pp_funcs->get_smu_prv_buf_details) 1452 return -ENOSYS; 1453 1454 mutex_lock(&adev->pm.mutex); 1455 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1456 addr, 1457 size); 1458 mutex_unlock(&adev->pm.mutex); 1459 1460 return ret; 1461 } 1462 1463 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1464 { 1465 if (is_support_sw_smu(adev)) { 1466 struct smu_context *smu = adev->powerplay.pp_handle; 1467 1468 return (smu->od_enabled || smu->is_apu); 1469 } else { 1470 struct pp_hwmgr *hwmgr; 1471 1472 /* 1473 * dpm on some legacy asics don't carry od_enabled member 1474 * as its pp_handle is casted directly from adev. 1475 */ 1476 if (amdgpu_dpm_is_legacy_dpm(adev)) 1477 return false; 1478 1479 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; 1480 1481 return hwmgr->od_enabled; 1482 } 1483 } 1484 1485 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1486 const char *buf, 1487 size_t size) 1488 { 1489 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1490 int ret = 0; 1491 1492 if (!pp_funcs->set_pp_table) 1493 return -EOPNOTSUPP; 1494 1495 mutex_lock(&adev->pm.mutex); 1496 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1497 buf, 1498 size); 1499 mutex_unlock(&adev->pm.mutex); 1500 1501 return ret; 1502 } 1503 1504 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1505 { 1506 struct smu_context *smu = adev->powerplay.pp_handle; 1507 1508 if (!is_support_sw_smu(adev)) 1509 return INT_MAX; 1510 1511 return smu->cpu_core_num; 1512 } 1513 1514 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1515 { 1516 if (!is_support_sw_smu(adev)) 1517 return; 1518 1519 amdgpu_smu_stb_debug_fs_init(adev); 1520 } 1521 1522 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1523 const struct amd_pp_display_configuration *input) 1524 { 1525 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1526 int ret = 0; 1527 1528 if (!pp_funcs->display_configuration_change) 1529 return 0; 1530 1531 mutex_lock(&adev->pm.mutex); 1532 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1533 input); 1534 mutex_unlock(&adev->pm.mutex); 1535 1536 return ret; 1537 } 1538 1539 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1540 enum amd_pp_clock_type type, 1541 struct amd_pp_clocks *clocks) 1542 { 1543 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1544 int ret = 0; 1545 1546 if (!pp_funcs->get_clock_by_type) 1547 return 0; 1548 1549 mutex_lock(&adev->pm.mutex); 1550 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1551 type, 1552 clocks); 1553 mutex_unlock(&adev->pm.mutex); 1554 1555 return ret; 1556 } 1557 1558 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1559 struct amd_pp_simple_clock_info *clocks) 1560 { 1561 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1562 int ret = 0; 1563 1564 if (!pp_funcs->get_display_mode_validation_clocks) 1565 return 0; 1566 1567 mutex_lock(&adev->pm.mutex); 1568 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1569 clocks); 1570 mutex_unlock(&adev->pm.mutex); 1571 1572 return ret; 1573 } 1574 1575 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1576 enum amd_pp_clock_type type, 1577 struct pp_clock_levels_with_latency *clocks) 1578 { 1579 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1580 int ret = 0; 1581 1582 if (!pp_funcs->get_clock_by_type_with_latency) 1583 return 0; 1584 1585 mutex_lock(&adev->pm.mutex); 1586 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1587 type, 1588 clocks); 1589 mutex_unlock(&adev->pm.mutex); 1590 1591 return ret; 1592 } 1593 1594 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1595 enum amd_pp_clock_type type, 1596 struct pp_clock_levels_with_voltage *clocks) 1597 { 1598 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1599 int ret = 0; 1600 1601 if (!pp_funcs->get_clock_by_type_with_voltage) 1602 return 0; 1603 1604 mutex_lock(&adev->pm.mutex); 1605 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1606 type, 1607 clocks); 1608 mutex_unlock(&adev->pm.mutex); 1609 1610 return ret; 1611 } 1612 1613 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1614 void *clock_ranges) 1615 { 1616 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1617 int ret = 0; 1618 1619 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1620 return -EOPNOTSUPP; 1621 1622 mutex_lock(&adev->pm.mutex); 1623 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1624 clock_ranges); 1625 mutex_unlock(&adev->pm.mutex); 1626 1627 return ret; 1628 } 1629 1630 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1631 struct pp_display_clock_request *clock) 1632 { 1633 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1634 int ret = 0; 1635 1636 if (!pp_funcs->display_clock_voltage_request) 1637 return -EOPNOTSUPP; 1638 1639 mutex_lock(&adev->pm.mutex); 1640 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1641 clock); 1642 mutex_unlock(&adev->pm.mutex); 1643 1644 return ret; 1645 } 1646 1647 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1648 struct amd_pp_clock_info *clocks) 1649 { 1650 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1651 int ret = 0; 1652 1653 if (!pp_funcs->get_current_clocks) 1654 return -EOPNOTSUPP; 1655 1656 mutex_lock(&adev->pm.mutex); 1657 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1658 clocks); 1659 mutex_unlock(&adev->pm.mutex); 1660 1661 return ret; 1662 } 1663 1664 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1665 { 1666 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1667 1668 if (!pp_funcs->notify_smu_enable_pwe) 1669 return; 1670 1671 mutex_lock(&adev->pm.mutex); 1672 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1673 mutex_unlock(&adev->pm.mutex); 1674 } 1675 1676 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1677 uint32_t count) 1678 { 1679 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1680 int ret = 0; 1681 1682 if (!pp_funcs->set_active_display_count) 1683 return -EOPNOTSUPP; 1684 1685 mutex_lock(&adev->pm.mutex); 1686 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1687 count); 1688 mutex_unlock(&adev->pm.mutex); 1689 1690 return ret; 1691 } 1692 1693 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1694 uint32_t clock) 1695 { 1696 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1697 int ret = 0; 1698 1699 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1700 return -EOPNOTSUPP; 1701 1702 mutex_lock(&adev->pm.mutex); 1703 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1704 clock); 1705 mutex_unlock(&adev->pm.mutex); 1706 1707 return ret; 1708 } 1709 1710 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1711 uint32_t clock) 1712 { 1713 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1714 1715 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1716 return; 1717 1718 mutex_lock(&adev->pm.mutex); 1719 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1720 clock); 1721 mutex_unlock(&adev->pm.mutex); 1722 } 1723 1724 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1725 uint32_t clock) 1726 { 1727 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1728 1729 if (!pp_funcs->set_hard_min_fclk_by_freq) 1730 return; 1731 1732 mutex_lock(&adev->pm.mutex); 1733 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1734 clock); 1735 mutex_unlock(&adev->pm.mutex); 1736 } 1737 1738 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1739 bool disable_memory_clock_switch) 1740 { 1741 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1742 int ret = 0; 1743 1744 if (!pp_funcs->display_disable_memory_clock_switch) 1745 return 0; 1746 1747 mutex_lock(&adev->pm.mutex); 1748 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1749 disable_memory_clock_switch); 1750 mutex_unlock(&adev->pm.mutex); 1751 1752 return ret; 1753 } 1754 1755 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1756 struct pp_smu_nv_clock_table *max_clocks) 1757 { 1758 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1759 int ret = 0; 1760 1761 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1762 return -EOPNOTSUPP; 1763 1764 mutex_lock(&adev->pm.mutex); 1765 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1766 max_clocks); 1767 mutex_unlock(&adev->pm.mutex); 1768 1769 return ret; 1770 } 1771 1772 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1773 unsigned int *clock_values_in_khz, 1774 unsigned int *num_states) 1775 { 1776 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1777 int ret = 0; 1778 1779 if (!pp_funcs->get_uclk_dpm_states) 1780 return -EOPNOTSUPP; 1781 1782 mutex_lock(&adev->pm.mutex); 1783 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1784 clock_values_in_khz, 1785 num_states); 1786 mutex_unlock(&adev->pm.mutex); 1787 1788 return ret; 1789 } 1790 1791 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1792 struct dpm_clocks *clock_table) 1793 { 1794 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1795 int ret = 0; 1796 1797 if (!pp_funcs->get_dpm_clock_table) 1798 return -EOPNOTSUPP; 1799 1800 mutex_lock(&adev->pm.mutex); 1801 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1802 clock_table); 1803 mutex_unlock(&adev->pm.mutex); 1804 1805 return ret; 1806 } 1807