1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 40 { 41 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 42 int ret = 0; 43 44 if (!pp_funcs->get_sclk) 45 return 0; 46 47 mutex_lock(&adev->pm.mutex); 48 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 49 low); 50 mutex_unlock(&adev->pm.mutex); 51 52 return ret; 53 } 54 55 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 56 { 57 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 58 int ret = 0; 59 60 if (!pp_funcs->get_mclk) 61 return 0; 62 63 mutex_lock(&adev->pm.mutex); 64 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 65 low); 66 mutex_unlock(&adev->pm.mutex); 67 68 return ret; 69 } 70 71 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 72 { 73 int ret = 0; 74 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 75 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 76 77 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 78 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 79 block_type, gate ? "gate" : "ungate"); 80 return 0; 81 } 82 83 mutex_lock(&adev->pm.mutex); 84 85 switch (block_type) { 86 case AMD_IP_BLOCK_TYPE_UVD: 87 case AMD_IP_BLOCK_TYPE_VCE: 88 case AMD_IP_BLOCK_TYPE_GFX: 89 case AMD_IP_BLOCK_TYPE_VCN: 90 case AMD_IP_BLOCK_TYPE_SDMA: 91 case AMD_IP_BLOCK_TYPE_JPEG: 92 case AMD_IP_BLOCK_TYPE_GMC: 93 case AMD_IP_BLOCK_TYPE_ACP: 94 if (pp_funcs && pp_funcs->set_powergating_by_smu) 95 ret = (pp_funcs->set_powergating_by_smu( 96 (adev)->powerplay.pp_handle, block_type, gate)); 97 break; 98 default: 99 break; 100 } 101 102 if (!ret) 103 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 104 105 mutex_unlock(&adev->pm.mutex); 106 107 return ret; 108 } 109 110 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev) 111 { 112 struct smu_context *smu = adev->powerplay.pp_handle; 113 int ret = -EOPNOTSUPP; 114 115 mutex_lock(&adev->pm.mutex); 116 ret = smu_set_gfx_power_up_by_imu(smu); 117 mutex_unlock(&adev->pm.mutex); 118 119 msleep(10); 120 121 return ret; 122 } 123 124 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 125 { 126 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 127 void *pp_handle = adev->powerplay.pp_handle; 128 int ret = 0; 129 130 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 131 return -ENOENT; 132 133 mutex_lock(&adev->pm.mutex); 134 135 /* enter BACO state */ 136 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 137 138 mutex_unlock(&adev->pm.mutex); 139 140 return ret; 141 } 142 143 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 144 { 145 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 146 void *pp_handle = adev->powerplay.pp_handle; 147 int ret = 0; 148 149 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 150 return -ENOENT; 151 152 mutex_lock(&adev->pm.mutex); 153 154 /* exit BACO state */ 155 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 156 157 mutex_unlock(&adev->pm.mutex); 158 159 return ret; 160 } 161 162 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 163 enum pp_mp1_state mp1_state) 164 { 165 int ret = 0; 166 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 167 168 if (pp_funcs && pp_funcs->set_mp1_state) { 169 mutex_lock(&adev->pm.mutex); 170 171 ret = pp_funcs->set_mp1_state( 172 adev->powerplay.pp_handle, 173 mp1_state); 174 175 mutex_unlock(&adev->pm.mutex); 176 } 177 178 return ret; 179 } 180 181 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 182 { 183 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 184 void *pp_handle = adev->powerplay.pp_handle; 185 bool baco_cap; 186 int ret = 0; 187 188 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 189 return false; 190 /* Don't use baco for reset in S3. 191 * This is a workaround for some platforms 192 * where entering BACO during suspend 193 * seems to cause reboots or hangs. 194 * This might be related to the fact that BACO controls 195 * power to the whole GPU including devices like audio and USB. 196 * Powering down/up everything may adversely affect these other 197 * devices. Needs more investigation. 198 */ 199 if (adev->in_s3) 200 return false; 201 202 mutex_lock(&adev->pm.mutex); 203 204 ret = pp_funcs->get_asic_baco_capability(pp_handle, 205 &baco_cap); 206 207 mutex_unlock(&adev->pm.mutex); 208 209 return ret ? false : baco_cap; 210 } 211 212 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 213 { 214 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 215 void *pp_handle = adev->powerplay.pp_handle; 216 int ret = 0; 217 218 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 219 return -ENOENT; 220 221 mutex_lock(&adev->pm.mutex); 222 223 ret = pp_funcs->asic_reset_mode_2(pp_handle); 224 225 mutex_unlock(&adev->pm.mutex); 226 227 return ret; 228 } 229 230 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev) 231 { 232 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 233 void *pp_handle = adev->powerplay.pp_handle; 234 int ret = 0; 235 236 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) 237 return -ENOENT; 238 239 mutex_lock(&adev->pm.mutex); 240 241 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle); 242 243 mutex_unlock(&adev->pm.mutex); 244 245 return ret; 246 } 247 248 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 249 { 250 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 251 void *pp_handle = adev->powerplay.pp_handle; 252 int ret = 0; 253 254 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 255 return -ENOENT; 256 257 mutex_lock(&adev->pm.mutex); 258 259 /* enter BACO state */ 260 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 261 if (ret) 262 goto out; 263 264 /* exit BACO state */ 265 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 266 267 out: 268 mutex_unlock(&adev->pm.mutex); 269 return ret; 270 } 271 272 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 273 { 274 struct smu_context *smu = adev->powerplay.pp_handle; 275 bool support_mode1_reset = false; 276 277 if (is_support_sw_smu(adev)) { 278 mutex_lock(&adev->pm.mutex); 279 support_mode1_reset = smu_mode1_reset_is_support(smu); 280 mutex_unlock(&adev->pm.mutex); 281 } 282 283 return support_mode1_reset; 284 } 285 286 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 287 { 288 struct smu_context *smu = adev->powerplay.pp_handle; 289 int ret = -EOPNOTSUPP; 290 291 if (is_support_sw_smu(adev)) { 292 mutex_lock(&adev->pm.mutex); 293 ret = smu_mode1_reset(smu); 294 mutex_unlock(&adev->pm.mutex); 295 } 296 297 return ret; 298 } 299 300 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 301 enum PP_SMC_POWER_PROFILE type, 302 bool en) 303 { 304 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 305 int ret = 0; 306 307 if (amdgpu_sriov_vf(adev)) 308 return 0; 309 310 if (pp_funcs && pp_funcs->switch_power_profile) { 311 mutex_lock(&adev->pm.mutex); 312 ret = pp_funcs->switch_power_profile( 313 adev->powerplay.pp_handle, type, en); 314 mutex_unlock(&adev->pm.mutex); 315 } 316 317 return ret; 318 } 319 320 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 321 uint32_t pstate) 322 { 323 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 324 int ret = 0; 325 326 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 327 mutex_lock(&adev->pm.mutex); 328 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 329 pstate); 330 mutex_unlock(&adev->pm.mutex); 331 } 332 333 return ret; 334 } 335 336 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 337 uint32_t cstate) 338 { 339 int ret = 0; 340 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 341 void *pp_handle = adev->powerplay.pp_handle; 342 343 if (pp_funcs && pp_funcs->set_df_cstate) { 344 mutex_lock(&adev->pm.mutex); 345 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 346 mutex_unlock(&adev->pm.mutex); 347 } 348 349 return ret; 350 } 351 352 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) 353 { 354 struct smu_context *smu = adev->powerplay.pp_handle; 355 int ret = 0; 356 357 if (is_support_sw_smu(adev)) { 358 mutex_lock(&adev->pm.mutex); 359 ret = smu_allow_xgmi_power_down(smu, en); 360 mutex_unlock(&adev->pm.mutex); 361 } 362 363 return ret; 364 } 365 366 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 367 { 368 void *pp_handle = adev->powerplay.pp_handle; 369 const struct amd_pm_funcs *pp_funcs = 370 adev->powerplay.pp_funcs; 371 int ret = 0; 372 373 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 374 mutex_lock(&adev->pm.mutex); 375 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 376 mutex_unlock(&adev->pm.mutex); 377 } 378 379 return ret; 380 } 381 382 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 383 uint32_t msg_id) 384 { 385 void *pp_handle = adev->powerplay.pp_handle; 386 const struct amd_pm_funcs *pp_funcs = 387 adev->powerplay.pp_funcs; 388 int ret = 0; 389 390 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 391 mutex_lock(&adev->pm.mutex); 392 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 393 msg_id); 394 mutex_unlock(&adev->pm.mutex); 395 } 396 397 return ret; 398 } 399 400 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 401 bool acquire) 402 { 403 void *pp_handle = adev->powerplay.pp_handle; 404 const struct amd_pm_funcs *pp_funcs = 405 adev->powerplay.pp_funcs; 406 int ret = -EOPNOTSUPP; 407 408 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 409 mutex_lock(&adev->pm.mutex); 410 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 411 acquire); 412 mutex_unlock(&adev->pm.mutex); 413 } 414 415 return ret; 416 } 417 418 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 419 { 420 if (adev->pm.dpm_enabled) { 421 mutex_lock(&adev->pm.mutex); 422 if (power_supply_is_system_supplied() > 0) 423 adev->pm.ac_power = true; 424 else 425 adev->pm.ac_power = false; 426 427 if (adev->powerplay.pp_funcs && 428 adev->powerplay.pp_funcs->enable_bapm) 429 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 430 431 if (is_support_sw_smu(adev)) 432 smu_set_ac_dc(adev->powerplay.pp_handle); 433 434 mutex_unlock(&adev->pm.mutex); 435 } 436 } 437 438 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 439 void *data, uint32_t *size) 440 { 441 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 442 int ret = -EINVAL; 443 444 if (!data || !size) 445 return -EINVAL; 446 447 if (pp_funcs && pp_funcs->read_sensor) { 448 mutex_lock(&adev->pm.mutex); 449 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 450 sensor, 451 data, 452 size); 453 mutex_unlock(&adev->pm.mutex); 454 } 455 456 return ret; 457 } 458 459 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit) 460 { 461 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 462 int ret = -EINVAL; 463 464 if (pp_funcs && pp_funcs->get_apu_thermal_limit) { 465 mutex_lock(&adev->pm.mutex); 466 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit); 467 mutex_unlock(&adev->pm.mutex); 468 } 469 470 return ret; 471 } 472 473 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit) 474 { 475 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 476 int ret = -EINVAL; 477 478 if (pp_funcs && pp_funcs->set_apu_thermal_limit) { 479 mutex_lock(&adev->pm.mutex); 480 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit); 481 mutex_unlock(&adev->pm.mutex); 482 } 483 484 return ret; 485 } 486 487 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 488 { 489 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 490 int i; 491 492 if (!adev->pm.dpm_enabled) 493 return; 494 495 if (!pp_funcs->pm_compute_clocks) 496 return; 497 498 if (adev->mode_info.num_crtc) 499 amdgpu_display_bandwidth_update(adev); 500 501 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 502 struct amdgpu_ring *ring = adev->rings[i]; 503 if (ring && ring->sched.ready) 504 amdgpu_fence_wait_empty(ring); 505 } 506 507 mutex_lock(&adev->pm.mutex); 508 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 509 mutex_unlock(&adev->pm.mutex); 510 } 511 512 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 513 { 514 int ret = 0; 515 516 if (adev->family == AMDGPU_FAMILY_SI) { 517 mutex_lock(&adev->pm.mutex); 518 if (enable) { 519 adev->pm.dpm.uvd_active = true; 520 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 521 } else { 522 adev->pm.dpm.uvd_active = false; 523 } 524 mutex_unlock(&adev->pm.mutex); 525 526 amdgpu_dpm_compute_clocks(adev); 527 return; 528 } 529 530 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 531 if (ret) 532 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 533 enable ? "enable" : "disable", ret); 534 } 535 536 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 537 { 538 int ret = 0; 539 540 if (adev->family == AMDGPU_FAMILY_SI) { 541 mutex_lock(&adev->pm.mutex); 542 if (enable) { 543 adev->pm.dpm.vce_active = true; 544 /* XXX select vce level based on ring/task */ 545 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 546 } else { 547 adev->pm.dpm.vce_active = false; 548 } 549 mutex_unlock(&adev->pm.mutex); 550 551 amdgpu_dpm_compute_clocks(adev); 552 return; 553 } 554 555 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 556 if (ret) 557 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 558 enable ? "enable" : "disable", ret); 559 } 560 561 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 562 { 563 int ret = 0; 564 565 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 566 if (ret) 567 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 568 enable ? "enable" : "disable", ret); 569 } 570 571 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 572 { 573 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 574 int r = 0; 575 576 if (!pp_funcs || !pp_funcs->load_firmware) 577 return 0; 578 579 mutex_lock(&adev->pm.mutex); 580 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 581 if (r) { 582 pr_err("smu firmware loading failed\n"); 583 goto out; 584 } 585 586 if (smu_version) 587 *smu_version = adev->pm.fw_version; 588 589 out: 590 mutex_unlock(&adev->pm.mutex); 591 return r; 592 } 593 594 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 595 { 596 int ret = 0; 597 598 if (is_support_sw_smu(adev)) { 599 mutex_lock(&adev->pm.mutex); 600 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 601 enable); 602 mutex_unlock(&adev->pm.mutex); 603 } 604 605 return ret; 606 } 607 608 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 609 { 610 struct smu_context *smu = adev->powerplay.pp_handle; 611 int ret = 0; 612 613 if (!is_support_sw_smu(adev)) 614 return -EOPNOTSUPP; 615 616 mutex_lock(&adev->pm.mutex); 617 ret = smu_send_hbm_bad_pages_num(smu, size); 618 mutex_unlock(&adev->pm.mutex); 619 620 return ret; 621 } 622 623 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 624 { 625 struct smu_context *smu = adev->powerplay.pp_handle; 626 int ret = 0; 627 628 if (!is_support_sw_smu(adev)) 629 return -EOPNOTSUPP; 630 631 mutex_lock(&adev->pm.mutex); 632 ret = smu_send_hbm_bad_channel_flag(smu, size); 633 mutex_unlock(&adev->pm.mutex); 634 635 return ret; 636 } 637 638 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 639 enum pp_clock_type type, 640 uint32_t *min, 641 uint32_t *max) 642 { 643 int ret = 0; 644 645 if (type != PP_SCLK) 646 return -EINVAL; 647 648 if (!is_support_sw_smu(adev)) 649 return -EOPNOTSUPP; 650 651 mutex_lock(&adev->pm.mutex); 652 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 653 SMU_SCLK, 654 min, 655 max); 656 mutex_unlock(&adev->pm.mutex); 657 658 return ret; 659 } 660 661 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 662 enum pp_clock_type type, 663 uint32_t min, 664 uint32_t max) 665 { 666 struct smu_context *smu = adev->powerplay.pp_handle; 667 int ret = 0; 668 669 if (type != PP_SCLK) 670 return -EINVAL; 671 672 if (!is_support_sw_smu(adev)) 673 return -EOPNOTSUPP; 674 675 mutex_lock(&adev->pm.mutex); 676 ret = smu_set_soft_freq_range(smu, 677 SMU_SCLK, 678 min, 679 max); 680 mutex_unlock(&adev->pm.mutex); 681 682 return ret; 683 } 684 685 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 686 { 687 struct smu_context *smu = adev->powerplay.pp_handle; 688 int ret = 0; 689 690 if (!is_support_sw_smu(adev)) 691 return 0; 692 693 mutex_lock(&adev->pm.mutex); 694 ret = smu_write_watermarks_table(smu); 695 mutex_unlock(&adev->pm.mutex); 696 697 return ret; 698 } 699 700 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 701 enum smu_event_type event, 702 uint64_t event_arg) 703 { 704 struct smu_context *smu = adev->powerplay.pp_handle; 705 int ret = 0; 706 707 if (!is_support_sw_smu(adev)) 708 return -EOPNOTSUPP; 709 710 mutex_lock(&adev->pm.mutex); 711 ret = smu_wait_for_event(smu, event, event_arg); 712 mutex_unlock(&adev->pm.mutex); 713 714 return ret; 715 } 716 717 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value) 718 { 719 struct smu_context *smu = adev->powerplay.pp_handle; 720 int ret = 0; 721 722 if (!is_support_sw_smu(adev)) 723 return -EOPNOTSUPP; 724 725 mutex_lock(&adev->pm.mutex); 726 ret = smu_set_residency_gfxoff(smu, value); 727 mutex_unlock(&adev->pm.mutex); 728 729 return ret; 730 } 731 732 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value) 733 { 734 struct smu_context *smu = adev->powerplay.pp_handle; 735 int ret = 0; 736 737 if (!is_support_sw_smu(adev)) 738 return -EOPNOTSUPP; 739 740 mutex_lock(&adev->pm.mutex); 741 ret = smu_get_residency_gfxoff(smu, value); 742 mutex_unlock(&adev->pm.mutex); 743 744 return ret; 745 } 746 747 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value) 748 { 749 struct smu_context *smu = adev->powerplay.pp_handle; 750 int ret = 0; 751 752 if (!is_support_sw_smu(adev)) 753 return -EOPNOTSUPP; 754 755 mutex_lock(&adev->pm.mutex); 756 ret = smu_get_entrycount_gfxoff(smu, value); 757 mutex_unlock(&adev->pm.mutex); 758 759 return ret; 760 } 761 762 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 763 { 764 struct smu_context *smu = adev->powerplay.pp_handle; 765 int ret = 0; 766 767 if (!is_support_sw_smu(adev)) 768 return -EOPNOTSUPP; 769 770 mutex_lock(&adev->pm.mutex); 771 ret = smu_get_status_gfxoff(smu, value); 772 mutex_unlock(&adev->pm.mutex); 773 774 return ret; 775 } 776 777 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 778 { 779 struct smu_context *smu = adev->powerplay.pp_handle; 780 781 if (!is_support_sw_smu(adev)) 782 return 0; 783 784 return atomic64_read(&smu->throttle_int_counter); 785 } 786 787 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 788 * @adev: amdgpu_device pointer 789 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 790 * 791 */ 792 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 793 enum gfx_change_state state) 794 { 795 mutex_lock(&adev->pm.mutex); 796 if (adev->powerplay.pp_funcs && 797 adev->powerplay.pp_funcs->gfx_state_change_set) 798 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 799 (adev)->powerplay.pp_handle, state)); 800 mutex_unlock(&adev->pm.mutex); 801 } 802 803 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 804 void *umc_ecc) 805 { 806 struct smu_context *smu = adev->powerplay.pp_handle; 807 int ret = 0; 808 809 if (!is_support_sw_smu(adev)) 810 return -EOPNOTSUPP; 811 812 mutex_lock(&adev->pm.mutex); 813 ret = smu_get_ecc_info(smu, umc_ecc); 814 mutex_unlock(&adev->pm.mutex); 815 816 return ret; 817 } 818 819 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 820 uint32_t idx) 821 { 822 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 823 struct amd_vce_state *vstate = NULL; 824 825 if (!pp_funcs->get_vce_clock_state) 826 return NULL; 827 828 mutex_lock(&adev->pm.mutex); 829 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 830 idx); 831 mutex_unlock(&adev->pm.mutex); 832 833 return vstate; 834 } 835 836 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 837 enum amd_pm_state_type *state) 838 { 839 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 840 841 mutex_lock(&adev->pm.mutex); 842 843 if (!pp_funcs->get_current_power_state) { 844 *state = adev->pm.dpm.user_state; 845 goto out; 846 } 847 848 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 849 if (*state < POWER_STATE_TYPE_DEFAULT || 850 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 851 *state = adev->pm.dpm.user_state; 852 853 out: 854 mutex_unlock(&adev->pm.mutex); 855 } 856 857 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 858 enum amd_pm_state_type state) 859 { 860 mutex_lock(&adev->pm.mutex); 861 adev->pm.dpm.user_state = state; 862 mutex_unlock(&adev->pm.mutex); 863 864 if (is_support_sw_smu(adev)) 865 return; 866 867 if (amdgpu_dpm_dispatch_task(adev, 868 AMD_PP_TASK_ENABLE_USER_STATE, 869 &state) == -EOPNOTSUPP) 870 amdgpu_dpm_compute_clocks(adev); 871 } 872 873 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 874 { 875 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 876 enum amd_dpm_forced_level level; 877 878 if (!pp_funcs) 879 return AMD_DPM_FORCED_LEVEL_AUTO; 880 881 mutex_lock(&adev->pm.mutex); 882 if (pp_funcs->get_performance_level) 883 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 884 else 885 level = adev->pm.dpm.forced_level; 886 mutex_unlock(&adev->pm.mutex); 887 888 return level; 889 } 890 891 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 892 enum amd_dpm_forced_level level) 893 { 894 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 895 enum amd_dpm_forced_level current_level; 896 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 897 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 898 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 899 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 900 901 if (!pp_funcs || !pp_funcs->force_performance_level) 902 return 0; 903 904 if (adev->pm.dpm.thermal_active) 905 return -EINVAL; 906 907 current_level = amdgpu_dpm_get_performance_level(adev); 908 if (current_level == level) 909 return 0; 910 911 if (adev->asic_type == CHIP_RAVEN) { 912 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 913 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 914 level == AMD_DPM_FORCED_LEVEL_MANUAL) 915 amdgpu_gfx_off_ctrl(adev, false); 916 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 917 level != AMD_DPM_FORCED_LEVEL_MANUAL) 918 amdgpu_gfx_off_ctrl(adev, true); 919 } 920 } 921 922 if (!(current_level & profile_mode_mask) && 923 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 924 return -EINVAL; 925 926 if (!(current_level & profile_mode_mask) && 927 (level & profile_mode_mask)) { 928 /* enter UMD Pstate */ 929 amdgpu_device_ip_set_powergating_state(adev, 930 AMD_IP_BLOCK_TYPE_GFX, 931 AMD_PG_STATE_UNGATE); 932 amdgpu_device_ip_set_clockgating_state(adev, 933 AMD_IP_BLOCK_TYPE_GFX, 934 AMD_CG_STATE_UNGATE); 935 } else if ((current_level & profile_mode_mask) && 936 !(level & profile_mode_mask)) { 937 /* exit UMD Pstate */ 938 amdgpu_device_ip_set_clockgating_state(adev, 939 AMD_IP_BLOCK_TYPE_GFX, 940 AMD_CG_STATE_GATE); 941 amdgpu_device_ip_set_powergating_state(adev, 942 AMD_IP_BLOCK_TYPE_GFX, 943 AMD_PG_STATE_GATE); 944 } 945 946 mutex_lock(&adev->pm.mutex); 947 948 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 949 level)) { 950 mutex_unlock(&adev->pm.mutex); 951 return -EINVAL; 952 } 953 954 adev->pm.dpm.forced_level = level; 955 956 mutex_unlock(&adev->pm.mutex); 957 958 return 0; 959 } 960 961 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 962 struct pp_states_info *states) 963 { 964 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 965 int ret = 0; 966 967 if (!pp_funcs->get_pp_num_states) 968 return -EOPNOTSUPP; 969 970 mutex_lock(&adev->pm.mutex); 971 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 972 states); 973 mutex_unlock(&adev->pm.mutex); 974 975 return ret; 976 } 977 978 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 979 enum amd_pp_task task_id, 980 enum amd_pm_state_type *user_state) 981 { 982 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 983 int ret = 0; 984 985 if (!pp_funcs->dispatch_tasks) 986 return -EOPNOTSUPP; 987 988 mutex_lock(&adev->pm.mutex); 989 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 990 task_id, 991 user_state); 992 mutex_unlock(&adev->pm.mutex); 993 994 return ret; 995 } 996 997 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 998 { 999 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1000 int ret = 0; 1001 1002 if (!pp_funcs->get_pp_table) 1003 return 0; 1004 1005 mutex_lock(&adev->pm.mutex); 1006 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 1007 table); 1008 mutex_unlock(&adev->pm.mutex); 1009 1010 return ret; 1011 } 1012 1013 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 1014 uint32_t type, 1015 long *input, 1016 uint32_t size) 1017 { 1018 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1019 int ret = 0; 1020 1021 if (!pp_funcs->set_fine_grain_clk_vol) 1022 return 0; 1023 1024 mutex_lock(&adev->pm.mutex); 1025 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 1026 type, 1027 input, 1028 size); 1029 mutex_unlock(&adev->pm.mutex); 1030 1031 return ret; 1032 } 1033 1034 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 1035 uint32_t type, 1036 long *input, 1037 uint32_t size) 1038 { 1039 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1040 int ret = 0; 1041 1042 if (!pp_funcs->odn_edit_dpm_table) 1043 return 0; 1044 1045 mutex_lock(&adev->pm.mutex); 1046 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 1047 type, 1048 input, 1049 size); 1050 mutex_unlock(&adev->pm.mutex); 1051 1052 return ret; 1053 } 1054 1055 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 1056 enum pp_clock_type type, 1057 char *buf) 1058 { 1059 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1060 int ret = 0; 1061 1062 if (!pp_funcs->print_clock_levels) 1063 return 0; 1064 1065 mutex_lock(&adev->pm.mutex); 1066 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 1067 type, 1068 buf); 1069 mutex_unlock(&adev->pm.mutex); 1070 1071 return ret; 1072 } 1073 1074 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 1075 enum pp_clock_type type, 1076 char *buf, 1077 int *offset) 1078 { 1079 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1080 int ret = 0; 1081 1082 if (!pp_funcs->emit_clock_levels) 1083 return -ENOENT; 1084 1085 mutex_lock(&adev->pm.mutex); 1086 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 1087 type, 1088 buf, 1089 offset); 1090 mutex_unlock(&adev->pm.mutex); 1091 1092 return ret; 1093 } 1094 1095 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 1096 uint64_t ppfeature_masks) 1097 { 1098 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1099 int ret = 0; 1100 1101 if (!pp_funcs->set_ppfeature_status) 1102 return 0; 1103 1104 mutex_lock(&adev->pm.mutex); 1105 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1106 ppfeature_masks); 1107 mutex_unlock(&adev->pm.mutex); 1108 1109 return ret; 1110 } 1111 1112 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1113 { 1114 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1115 int ret = 0; 1116 1117 if (!pp_funcs->get_ppfeature_status) 1118 return 0; 1119 1120 mutex_lock(&adev->pm.mutex); 1121 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1122 buf); 1123 mutex_unlock(&adev->pm.mutex); 1124 1125 return ret; 1126 } 1127 1128 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1129 enum pp_clock_type type, 1130 uint32_t mask) 1131 { 1132 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1133 int ret = 0; 1134 1135 if (!pp_funcs->force_clock_level) 1136 return 0; 1137 1138 mutex_lock(&adev->pm.mutex); 1139 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1140 type, 1141 mask); 1142 mutex_unlock(&adev->pm.mutex); 1143 1144 return ret; 1145 } 1146 1147 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1148 { 1149 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1150 int ret = 0; 1151 1152 if (!pp_funcs->get_sclk_od) 1153 return 0; 1154 1155 mutex_lock(&adev->pm.mutex); 1156 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1157 mutex_unlock(&adev->pm.mutex); 1158 1159 return ret; 1160 } 1161 1162 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1163 { 1164 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1165 1166 if (is_support_sw_smu(adev)) 1167 return 0; 1168 1169 mutex_lock(&adev->pm.mutex); 1170 if (pp_funcs->set_sclk_od) 1171 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1172 mutex_unlock(&adev->pm.mutex); 1173 1174 if (amdgpu_dpm_dispatch_task(adev, 1175 AMD_PP_TASK_READJUST_POWER_STATE, 1176 NULL) == -EOPNOTSUPP) { 1177 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1178 amdgpu_dpm_compute_clocks(adev); 1179 } 1180 1181 return 0; 1182 } 1183 1184 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1185 { 1186 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1187 int ret = 0; 1188 1189 if (!pp_funcs->get_mclk_od) 1190 return 0; 1191 1192 mutex_lock(&adev->pm.mutex); 1193 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1194 mutex_unlock(&adev->pm.mutex); 1195 1196 return ret; 1197 } 1198 1199 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1200 { 1201 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1202 1203 if (is_support_sw_smu(adev)) 1204 return 0; 1205 1206 mutex_lock(&adev->pm.mutex); 1207 if (pp_funcs->set_mclk_od) 1208 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1209 mutex_unlock(&adev->pm.mutex); 1210 1211 if (amdgpu_dpm_dispatch_task(adev, 1212 AMD_PP_TASK_READJUST_POWER_STATE, 1213 NULL) == -EOPNOTSUPP) { 1214 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1215 amdgpu_dpm_compute_clocks(adev); 1216 } 1217 1218 return 0; 1219 } 1220 1221 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1222 char *buf) 1223 { 1224 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1225 int ret = 0; 1226 1227 if (!pp_funcs->get_power_profile_mode) 1228 return -EOPNOTSUPP; 1229 1230 mutex_lock(&adev->pm.mutex); 1231 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1232 buf); 1233 mutex_unlock(&adev->pm.mutex); 1234 1235 return ret; 1236 } 1237 1238 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1239 long *input, uint32_t size) 1240 { 1241 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1242 int ret = 0; 1243 1244 if (!pp_funcs->set_power_profile_mode) 1245 return 0; 1246 1247 mutex_lock(&adev->pm.mutex); 1248 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1249 input, 1250 size); 1251 mutex_unlock(&adev->pm.mutex); 1252 1253 return ret; 1254 } 1255 1256 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1257 { 1258 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1259 int ret = 0; 1260 1261 if (!pp_funcs->get_gpu_metrics) 1262 return 0; 1263 1264 mutex_lock(&adev->pm.mutex); 1265 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1266 table); 1267 mutex_unlock(&adev->pm.mutex); 1268 1269 return ret; 1270 } 1271 1272 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1273 uint32_t *fan_mode) 1274 { 1275 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1276 int ret = 0; 1277 1278 if (!pp_funcs->get_fan_control_mode) 1279 return -EOPNOTSUPP; 1280 1281 mutex_lock(&adev->pm.mutex); 1282 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1283 fan_mode); 1284 mutex_unlock(&adev->pm.mutex); 1285 1286 return ret; 1287 } 1288 1289 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1290 uint32_t speed) 1291 { 1292 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1293 int ret = 0; 1294 1295 if (!pp_funcs->set_fan_speed_pwm) 1296 return -EOPNOTSUPP; 1297 1298 mutex_lock(&adev->pm.mutex); 1299 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1300 speed); 1301 mutex_unlock(&adev->pm.mutex); 1302 1303 return ret; 1304 } 1305 1306 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1307 uint32_t *speed) 1308 { 1309 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1310 int ret = 0; 1311 1312 if (!pp_funcs->get_fan_speed_pwm) 1313 return -EOPNOTSUPP; 1314 1315 mutex_lock(&adev->pm.mutex); 1316 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1317 speed); 1318 mutex_unlock(&adev->pm.mutex); 1319 1320 return ret; 1321 } 1322 1323 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1324 uint32_t *speed) 1325 { 1326 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1327 int ret = 0; 1328 1329 if (!pp_funcs->get_fan_speed_rpm) 1330 return -EOPNOTSUPP; 1331 1332 mutex_lock(&adev->pm.mutex); 1333 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1334 speed); 1335 mutex_unlock(&adev->pm.mutex); 1336 1337 return ret; 1338 } 1339 1340 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1341 uint32_t speed) 1342 { 1343 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1344 int ret = 0; 1345 1346 if (!pp_funcs->set_fan_speed_rpm) 1347 return -EOPNOTSUPP; 1348 1349 mutex_lock(&adev->pm.mutex); 1350 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1351 speed); 1352 mutex_unlock(&adev->pm.mutex); 1353 1354 return ret; 1355 } 1356 1357 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1358 uint32_t mode) 1359 { 1360 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1361 int ret = 0; 1362 1363 if (!pp_funcs->set_fan_control_mode) 1364 return -EOPNOTSUPP; 1365 1366 mutex_lock(&adev->pm.mutex); 1367 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1368 mode); 1369 mutex_unlock(&adev->pm.mutex); 1370 1371 return ret; 1372 } 1373 1374 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1375 uint32_t *limit, 1376 enum pp_power_limit_level pp_limit_level, 1377 enum pp_power_type power_type) 1378 { 1379 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1380 int ret = 0; 1381 1382 if (!pp_funcs->get_power_limit) 1383 return -ENODATA; 1384 1385 mutex_lock(&adev->pm.mutex); 1386 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1387 limit, 1388 pp_limit_level, 1389 power_type); 1390 mutex_unlock(&adev->pm.mutex); 1391 1392 return ret; 1393 } 1394 1395 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1396 uint32_t limit) 1397 { 1398 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1399 int ret = 0; 1400 1401 if (!pp_funcs->set_power_limit) 1402 return -EINVAL; 1403 1404 mutex_lock(&adev->pm.mutex); 1405 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1406 limit); 1407 mutex_unlock(&adev->pm.mutex); 1408 1409 return ret; 1410 } 1411 1412 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1413 { 1414 bool cclk_dpm_supported = false; 1415 1416 if (!is_support_sw_smu(adev)) 1417 return false; 1418 1419 mutex_lock(&adev->pm.mutex); 1420 cclk_dpm_supported = is_support_cclk_dpm(adev); 1421 mutex_unlock(&adev->pm.mutex); 1422 1423 return (int)cclk_dpm_supported; 1424 } 1425 1426 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1427 struct seq_file *m) 1428 { 1429 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1430 1431 if (!pp_funcs->debugfs_print_current_performance_level) 1432 return -EOPNOTSUPP; 1433 1434 mutex_lock(&adev->pm.mutex); 1435 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1436 m); 1437 mutex_unlock(&adev->pm.mutex); 1438 1439 return 0; 1440 } 1441 1442 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1443 void **addr, 1444 size_t *size) 1445 { 1446 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1447 int ret = 0; 1448 1449 if (!pp_funcs->get_smu_prv_buf_details) 1450 return -ENOSYS; 1451 1452 mutex_lock(&adev->pm.mutex); 1453 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1454 addr, 1455 size); 1456 mutex_unlock(&adev->pm.mutex); 1457 1458 return ret; 1459 } 1460 1461 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1462 { 1463 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1464 struct smu_context *smu = adev->powerplay.pp_handle; 1465 1466 if ((is_support_sw_smu(adev) && smu->od_enabled) || 1467 (is_support_sw_smu(adev) && smu->is_apu) || 1468 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 1469 return true; 1470 1471 return false; 1472 } 1473 1474 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1475 const char *buf, 1476 size_t size) 1477 { 1478 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1479 int ret = 0; 1480 1481 if (!pp_funcs->set_pp_table) 1482 return -EOPNOTSUPP; 1483 1484 mutex_lock(&adev->pm.mutex); 1485 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1486 buf, 1487 size); 1488 mutex_unlock(&adev->pm.mutex); 1489 1490 return ret; 1491 } 1492 1493 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1494 { 1495 struct smu_context *smu = adev->powerplay.pp_handle; 1496 1497 if (!is_support_sw_smu(adev)) 1498 return INT_MAX; 1499 1500 return smu->cpu_core_num; 1501 } 1502 1503 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1504 { 1505 if (!is_support_sw_smu(adev)) 1506 return; 1507 1508 amdgpu_smu_stb_debug_fs_init(adev); 1509 } 1510 1511 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1512 const struct amd_pp_display_configuration *input) 1513 { 1514 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1515 int ret = 0; 1516 1517 if (!pp_funcs->display_configuration_change) 1518 return 0; 1519 1520 mutex_lock(&adev->pm.mutex); 1521 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1522 input); 1523 mutex_unlock(&adev->pm.mutex); 1524 1525 return ret; 1526 } 1527 1528 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1529 enum amd_pp_clock_type type, 1530 struct amd_pp_clocks *clocks) 1531 { 1532 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1533 int ret = 0; 1534 1535 if (!pp_funcs->get_clock_by_type) 1536 return 0; 1537 1538 mutex_lock(&adev->pm.mutex); 1539 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1540 type, 1541 clocks); 1542 mutex_unlock(&adev->pm.mutex); 1543 1544 return ret; 1545 } 1546 1547 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1548 struct amd_pp_simple_clock_info *clocks) 1549 { 1550 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1551 int ret = 0; 1552 1553 if (!pp_funcs->get_display_mode_validation_clocks) 1554 return 0; 1555 1556 mutex_lock(&adev->pm.mutex); 1557 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1558 clocks); 1559 mutex_unlock(&adev->pm.mutex); 1560 1561 return ret; 1562 } 1563 1564 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1565 enum amd_pp_clock_type type, 1566 struct pp_clock_levels_with_latency *clocks) 1567 { 1568 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1569 int ret = 0; 1570 1571 if (!pp_funcs->get_clock_by_type_with_latency) 1572 return 0; 1573 1574 mutex_lock(&adev->pm.mutex); 1575 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1576 type, 1577 clocks); 1578 mutex_unlock(&adev->pm.mutex); 1579 1580 return ret; 1581 } 1582 1583 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1584 enum amd_pp_clock_type type, 1585 struct pp_clock_levels_with_voltage *clocks) 1586 { 1587 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1588 int ret = 0; 1589 1590 if (!pp_funcs->get_clock_by_type_with_voltage) 1591 return 0; 1592 1593 mutex_lock(&adev->pm.mutex); 1594 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1595 type, 1596 clocks); 1597 mutex_unlock(&adev->pm.mutex); 1598 1599 return ret; 1600 } 1601 1602 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1603 void *clock_ranges) 1604 { 1605 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1606 int ret = 0; 1607 1608 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1609 return -EOPNOTSUPP; 1610 1611 mutex_lock(&adev->pm.mutex); 1612 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1613 clock_ranges); 1614 mutex_unlock(&adev->pm.mutex); 1615 1616 return ret; 1617 } 1618 1619 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1620 struct pp_display_clock_request *clock) 1621 { 1622 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1623 int ret = 0; 1624 1625 if (!pp_funcs->display_clock_voltage_request) 1626 return -EOPNOTSUPP; 1627 1628 mutex_lock(&adev->pm.mutex); 1629 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1630 clock); 1631 mutex_unlock(&adev->pm.mutex); 1632 1633 return ret; 1634 } 1635 1636 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1637 struct amd_pp_clock_info *clocks) 1638 { 1639 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1640 int ret = 0; 1641 1642 if (!pp_funcs->get_current_clocks) 1643 return -EOPNOTSUPP; 1644 1645 mutex_lock(&adev->pm.mutex); 1646 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1647 clocks); 1648 mutex_unlock(&adev->pm.mutex); 1649 1650 return ret; 1651 } 1652 1653 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1654 { 1655 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1656 1657 if (!pp_funcs->notify_smu_enable_pwe) 1658 return; 1659 1660 mutex_lock(&adev->pm.mutex); 1661 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1662 mutex_unlock(&adev->pm.mutex); 1663 } 1664 1665 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1666 uint32_t count) 1667 { 1668 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1669 int ret = 0; 1670 1671 if (!pp_funcs->set_active_display_count) 1672 return -EOPNOTSUPP; 1673 1674 mutex_lock(&adev->pm.mutex); 1675 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1676 count); 1677 mutex_unlock(&adev->pm.mutex); 1678 1679 return ret; 1680 } 1681 1682 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1683 uint32_t clock) 1684 { 1685 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1686 int ret = 0; 1687 1688 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1689 return -EOPNOTSUPP; 1690 1691 mutex_lock(&adev->pm.mutex); 1692 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1693 clock); 1694 mutex_unlock(&adev->pm.mutex); 1695 1696 return ret; 1697 } 1698 1699 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1700 uint32_t clock) 1701 { 1702 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1703 1704 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1705 return; 1706 1707 mutex_lock(&adev->pm.mutex); 1708 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1709 clock); 1710 mutex_unlock(&adev->pm.mutex); 1711 } 1712 1713 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1714 uint32_t clock) 1715 { 1716 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1717 1718 if (!pp_funcs->set_hard_min_fclk_by_freq) 1719 return; 1720 1721 mutex_lock(&adev->pm.mutex); 1722 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1723 clock); 1724 mutex_unlock(&adev->pm.mutex); 1725 } 1726 1727 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1728 bool disable_memory_clock_switch) 1729 { 1730 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1731 int ret = 0; 1732 1733 if (!pp_funcs->display_disable_memory_clock_switch) 1734 return 0; 1735 1736 mutex_lock(&adev->pm.mutex); 1737 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1738 disable_memory_clock_switch); 1739 mutex_unlock(&adev->pm.mutex); 1740 1741 return ret; 1742 } 1743 1744 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1745 struct pp_smu_nv_clock_table *max_clocks) 1746 { 1747 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1748 int ret = 0; 1749 1750 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1751 return -EOPNOTSUPP; 1752 1753 mutex_lock(&adev->pm.mutex); 1754 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1755 max_clocks); 1756 mutex_unlock(&adev->pm.mutex); 1757 1758 return ret; 1759 } 1760 1761 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1762 unsigned int *clock_values_in_khz, 1763 unsigned int *num_states) 1764 { 1765 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1766 int ret = 0; 1767 1768 if (!pp_funcs->get_uclk_dpm_states) 1769 return -EOPNOTSUPP; 1770 1771 mutex_lock(&adev->pm.mutex); 1772 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1773 clock_values_in_khz, 1774 num_states); 1775 mutex_unlock(&adev->pm.mutex); 1776 1777 return ret; 1778 } 1779 1780 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1781 struct dpm_clocks *clock_table) 1782 { 1783 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1784 int ret = 0; 1785 1786 if (!pp_funcs->get_dpm_clock_table) 1787 return -EOPNOTSUPP; 1788 1789 mutex_lock(&adev->pm.mutex); 1790 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1791 clock_table); 1792 mutex_unlock(&adev->pm.mutex); 1793 1794 return ret; 1795 } 1796