1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "pp_debug.h" 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/gfp.h> 27 #include <linux/slab.h> 28 #include <linux/firmware.h> 29 #include <linux/reboot.h> 30 #include "amd_shared.h" 31 #include "amd_powerplay.h" 32 #include "power_state.h" 33 #include "amdgpu.h" 34 #include "hwmgr.h" 35 #include "amdgpu_dpm_internal.h" 36 #include "amdgpu_display.h" 37 38 static const struct amd_pm_funcs pp_dpm_funcs; 39 40 static int amd_powerplay_create(struct amdgpu_device *adev) 41 { 42 struct pp_hwmgr *hwmgr; 43 44 if (adev == NULL) 45 return -EINVAL; 46 47 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL); 48 if (hwmgr == NULL) 49 return -ENOMEM; 50 51 hwmgr->adev = adev; 52 hwmgr->not_vf = !amdgpu_sriov_vf(adev); 53 hwmgr->device = amdgpu_cgs_create_device(adev); 54 mutex_init(&hwmgr->msg_lock); 55 hwmgr->chip_family = adev->family; 56 hwmgr->chip_id = adev->asic_type; 57 hwmgr->feature_mask = adev->pm.pp_feature; 58 hwmgr->display_config = &adev->pm.pm_display_cfg; 59 adev->powerplay.pp_handle = hwmgr; 60 adev->powerplay.pp_funcs = &pp_dpm_funcs; 61 return 0; 62 } 63 64 65 static void amd_powerplay_destroy(struct amdgpu_device *adev) 66 { 67 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 68 69 mutex_destroy(&hwmgr->msg_lock); 70 71 kfree(hwmgr->hardcode_pp_table); 72 hwmgr->hardcode_pp_table = NULL; 73 74 kfree(hwmgr); 75 hwmgr = NULL; 76 } 77 78 static int pp_early_init(void *handle) 79 { 80 int ret; 81 struct amdgpu_device *adev = handle; 82 83 ret = amd_powerplay_create(adev); 84 85 if (ret != 0) 86 return ret; 87 88 ret = hwmgr_early_init(adev->powerplay.pp_handle); 89 if (ret) 90 return -EINVAL; 91 92 return 0; 93 } 94 95 static void pp_swctf_delayed_work_handler(struct work_struct *work) 96 { 97 struct pp_hwmgr *hwmgr = 98 container_of(work, struct pp_hwmgr, swctf_delayed_work.work); 99 struct amdgpu_device *adev = hwmgr->adev; 100 struct amdgpu_dpm_thermal *range = 101 &adev->pm.dpm.thermal; 102 uint32_t gpu_temperature, size = sizeof(gpu_temperature); 103 int ret; 104 105 /* 106 * If the hotspot/edge temperature is confirmed as below SW CTF setting point 107 * after the delay enforced, nothing will be done. 108 * Otherwise, a graceful shutdown will be performed to prevent further damage. 109 */ 110 if (range->sw_ctf_threshold && 111 hwmgr->hwmgr_func->read_sensor) { 112 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, 113 AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 114 &gpu_temperature, 115 &size); 116 /* 117 * For some legacy ASICs, hotspot temperature retrieving might be not 118 * supported. Check the edge temperature instead then. 119 */ 120 if (ret == -EOPNOTSUPP) 121 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, 122 AMDGPU_PP_SENSOR_EDGE_TEMP, 123 &gpu_temperature, 124 &size); 125 if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold) 126 return; 127 } 128 129 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); 130 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); 131 orderly_poweroff(true); 132 } 133 134 static int pp_sw_init(void *handle) 135 { 136 struct amdgpu_device *adev = handle; 137 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 138 int ret = 0; 139 140 ret = hwmgr_sw_init(hwmgr); 141 142 pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully"); 143 144 if (!ret) 145 INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work, 146 pp_swctf_delayed_work_handler); 147 148 return ret; 149 } 150 151 static int pp_sw_fini(void *handle) 152 { 153 struct amdgpu_device *adev = handle; 154 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 155 156 hwmgr_sw_fini(hwmgr); 157 158 amdgpu_ucode_release(&adev->pm.fw); 159 160 return 0; 161 } 162 163 static int pp_hw_init(void *handle) 164 { 165 int ret = 0; 166 struct amdgpu_device *adev = handle; 167 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 168 169 ret = hwmgr_hw_init(hwmgr); 170 171 if (ret) 172 pr_err("powerplay hw init failed\n"); 173 174 return ret; 175 } 176 177 static int pp_hw_fini(void *handle) 178 { 179 struct amdgpu_device *adev = handle; 180 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 181 182 cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); 183 184 hwmgr_hw_fini(hwmgr); 185 186 return 0; 187 } 188 189 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev) 190 { 191 int r = -EINVAL; 192 void *cpu_ptr = NULL; 193 uint64_t gpu_addr; 194 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 195 196 if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size, 197 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 198 &adev->pm.smu_prv_buffer, 199 &gpu_addr, 200 &cpu_ptr)) { 201 DRM_ERROR("amdgpu: failed to create smu prv buffer\n"); 202 return; 203 } 204 205 if (hwmgr->hwmgr_func->notify_cac_buffer_info) 206 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, 207 lower_32_bits((unsigned long)cpu_ptr), 208 upper_32_bits((unsigned long)cpu_ptr), 209 lower_32_bits(gpu_addr), 210 upper_32_bits(gpu_addr), 211 adev->pm.smu_prv_buffer_size); 212 213 if (r) { 214 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); 215 adev->pm.smu_prv_buffer = NULL; 216 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n"); 217 } 218 } 219 220 static int pp_late_init(void *handle) 221 { 222 struct amdgpu_device *adev = handle; 223 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 224 225 if (hwmgr && hwmgr->pm_en) 226 hwmgr_handle_task(hwmgr, 227 AMD_PP_TASK_COMPLETE_INIT, NULL); 228 if (adev->pm.smu_prv_buffer_size != 0) 229 pp_reserve_vram_for_smu(adev); 230 231 return 0; 232 } 233 234 static void pp_late_fini(void *handle) 235 { 236 struct amdgpu_device *adev = handle; 237 238 if (adev->pm.smu_prv_buffer) 239 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); 240 amd_powerplay_destroy(adev); 241 } 242 243 244 static bool pp_is_idle(void *handle) 245 { 246 return false; 247 } 248 249 static int pp_wait_for_idle(void *handle) 250 { 251 return 0; 252 } 253 254 static int pp_sw_reset(void *handle) 255 { 256 return 0; 257 } 258 259 static int pp_set_powergating_state(void *handle, 260 enum amd_powergating_state state) 261 { 262 return 0; 263 } 264 265 static int pp_suspend(void *handle) 266 { 267 struct amdgpu_device *adev = handle; 268 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 269 270 cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); 271 272 return hwmgr_suspend(hwmgr); 273 } 274 275 static int pp_resume(void *handle) 276 { 277 struct amdgpu_device *adev = handle; 278 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 279 280 return hwmgr_resume(hwmgr); 281 } 282 283 static int pp_set_clockgating_state(void *handle, 284 enum amd_clockgating_state state) 285 { 286 return 0; 287 } 288 289 static const struct amd_ip_funcs pp_ip_funcs = { 290 .name = "powerplay", 291 .early_init = pp_early_init, 292 .late_init = pp_late_init, 293 .sw_init = pp_sw_init, 294 .sw_fini = pp_sw_fini, 295 .hw_init = pp_hw_init, 296 .hw_fini = pp_hw_fini, 297 .late_fini = pp_late_fini, 298 .suspend = pp_suspend, 299 .resume = pp_resume, 300 .is_idle = pp_is_idle, 301 .wait_for_idle = pp_wait_for_idle, 302 .soft_reset = pp_sw_reset, 303 .set_clockgating_state = pp_set_clockgating_state, 304 .set_powergating_state = pp_set_powergating_state, 305 }; 306 307 const struct amdgpu_ip_block_version pp_smu_ip_block = 308 { 309 .type = AMD_IP_BLOCK_TYPE_SMC, 310 .major = 1, 311 .minor = 0, 312 .rev = 0, 313 .funcs = &pp_ip_funcs, 314 }; 315 316 /* This interface only be supported On Vi, 317 * because only smu7/8 can help to load gfx/sdma fw, 318 * smu need to be enabled before load other ip's fw. 319 * so call start smu to load smu7 fw and other ip's fw 320 */ 321 static int pp_dpm_load_fw(void *handle) 322 { 323 struct pp_hwmgr *hwmgr = handle; 324 325 if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu) 326 return -EINVAL; 327 328 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) { 329 pr_err("fw load failed\n"); 330 return -EINVAL; 331 } 332 333 return 0; 334 } 335 336 static int pp_dpm_fw_loading_complete(void *handle) 337 { 338 return 0; 339 } 340 341 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id) 342 { 343 struct pp_hwmgr *hwmgr = handle; 344 345 if (!hwmgr || !hwmgr->pm_en) 346 return -EINVAL; 347 348 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { 349 pr_info_ratelimited("%s was not implemented.\n", __func__); 350 return 0; 351 } 352 353 return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); 354 } 355 356 static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, 357 enum amd_dpm_forced_level *level) 358 { 359 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 360 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 361 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 362 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 363 364 if (!(hwmgr->dpm_level & profile_mode_mask)) { 365 /* enter umd pstate, save current level, disable gfx cg*/ 366 if (*level & profile_mode_mask) { 367 hwmgr->saved_dpm_level = hwmgr->dpm_level; 368 hwmgr->en_umd_pstate = true; 369 } 370 } else { 371 /* exit umd pstate, restore level, enable gfx cg*/ 372 if (!(*level & profile_mode_mask)) { 373 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 374 *level = hwmgr->saved_dpm_level; 375 hwmgr->en_umd_pstate = false; 376 } 377 } 378 } 379 380 static int pp_dpm_force_performance_level(void *handle, 381 enum amd_dpm_forced_level level) 382 { 383 struct pp_hwmgr *hwmgr = handle; 384 385 if (!hwmgr || !hwmgr->pm_en) 386 return -EINVAL; 387 388 if (level == hwmgr->dpm_level) 389 return 0; 390 391 pp_dpm_en_umd_pstate(hwmgr, &level); 392 hwmgr->request_dpm_level = level; 393 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 394 395 return 0; 396 } 397 398 static enum amd_dpm_forced_level pp_dpm_get_performance_level( 399 void *handle) 400 { 401 struct pp_hwmgr *hwmgr = handle; 402 403 if (!hwmgr || !hwmgr->pm_en) 404 return -EINVAL; 405 406 return hwmgr->dpm_level; 407 } 408 409 static uint32_t pp_dpm_get_sclk(void *handle, bool low) 410 { 411 struct pp_hwmgr *hwmgr = handle; 412 413 if (!hwmgr || !hwmgr->pm_en) 414 return 0; 415 416 if (hwmgr->hwmgr_func->get_sclk == NULL) { 417 pr_info_ratelimited("%s was not implemented.\n", __func__); 418 return 0; 419 } 420 return hwmgr->hwmgr_func->get_sclk(hwmgr, low); 421 } 422 423 static uint32_t pp_dpm_get_mclk(void *handle, bool low) 424 { 425 struct pp_hwmgr *hwmgr = handle; 426 427 if (!hwmgr || !hwmgr->pm_en) 428 return 0; 429 430 if (hwmgr->hwmgr_func->get_mclk == NULL) { 431 pr_info_ratelimited("%s was not implemented.\n", __func__); 432 return 0; 433 } 434 return hwmgr->hwmgr_func->get_mclk(hwmgr, low); 435 } 436 437 static void pp_dpm_powergate_vce(void *handle, bool gate) 438 { 439 struct pp_hwmgr *hwmgr = handle; 440 441 if (!hwmgr || !hwmgr->pm_en) 442 return; 443 444 if (hwmgr->hwmgr_func->powergate_vce == NULL) { 445 pr_info_ratelimited("%s was not implemented.\n", __func__); 446 return; 447 } 448 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); 449 } 450 451 static void pp_dpm_powergate_uvd(void *handle, bool gate) 452 { 453 struct pp_hwmgr *hwmgr = handle; 454 455 if (!hwmgr || !hwmgr->pm_en) 456 return; 457 458 if (hwmgr->hwmgr_func->powergate_uvd == NULL) { 459 pr_info_ratelimited("%s was not implemented.\n", __func__); 460 return; 461 } 462 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); 463 } 464 465 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, 466 enum amd_pm_state_type *user_state) 467 { 468 struct pp_hwmgr *hwmgr = handle; 469 470 if (!hwmgr || !hwmgr->pm_en) 471 return -EINVAL; 472 473 return hwmgr_handle_task(hwmgr, task_id, user_state); 474 } 475 476 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) 477 { 478 struct pp_hwmgr *hwmgr = handle; 479 struct pp_power_state *state; 480 enum amd_pm_state_type pm_type; 481 482 if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps) 483 return -EINVAL; 484 485 state = hwmgr->current_ps; 486 487 switch (state->classification.ui_label) { 488 case PP_StateUILabel_Battery: 489 pm_type = POWER_STATE_TYPE_BATTERY; 490 break; 491 case PP_StateUILabel_Balanced: 492 pm_type = POWER_STATE_TYPE_BALANCED; 493 break; 494 case PP_StateUILabel_Performance: 495 pm_type = POWER_STATE_TYPE_PERFORMANCE; 496 break; 497 default: 498 if (state->classification.flags & PP_StateClassificationFlag_Boot) 499 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT; 500 else 501 pm_type = POWER_STATE_TYPE_DEFAULT; 502 break; 503 } 504 505 return pm_type; 506 } 507 508 static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) 509 { 510 struct pp_hwmgr *hwmgr = handle; 511 512 if (!hwmgr || !hwmgr->pm_en) 513 return -EOPNOTSUPP; 514 515 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) 516 return -EOPNOTSUPP; 517 518 if (mode == U32_MAX) 519 return -EINVAL; 520 521 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); 522 523 return 0; 524 } 525 526 static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode) 527 { 528 struct pp_hwmgr *hwmgr = handle; 529 530 if (!hwmgr || !hwmgr->pm_en) 531 return -EOPNOTSUPP; 532 533 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) 534 return -EOPNOTSUPP; 535 536 if (!fan_mode) 537 return -EINVAL; 538 539 *fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); 540 return 0; 541 } 542 543 static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed) 544 { 545 struct pp_hwmgr *hwmgr = handle; 546 547 if (!hwmgr || !hwmgr->pm_en) 548 return -EOPNOTSUPP; 549 550 if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) 551 return -EOPNOTSUPP; 552 553 if (speed == U32_MAX) 554 return -EINVAL; 555 556 return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed); 557 } 558 559 static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed) 560 { 561 struct pp_hwmgr *hwmgr = handle; 562 563 if (!hwmgr || !hwmgr->pm_en) 564 return -EOPNOTSUPP; 565 566 if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) 567 return -EOPNOTSUPP; 568 569 if (!speed) 570 return -EINVAL; 571 572 return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed); 573 } 574 575 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) 576 { 577 struct pp_hwmgr *hwmgr = handle; 578 579 if (!hwmgr || !hwmgr->pm_en) 580 return -EOPNOTSUPP; 581 582 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL) 583 return -EOPNOTSUPP; 584 585 if (!rpm) 586 return -EINVAL; 587 588 return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm); 589 } 590 591 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm) 592 { 593 struct pp_hwmgr *hwmgr = handle; 594 595 if (!hwmgr || !hwmgr->pm_en) 596 return -EOPNOTSUPP; 597 598 if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) 599 return -EOPNOTSUPP; 600 601 if (rpm == U32_MAX) 602 return -EINVAL; 603 604 return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm); 605 } 606 607 static int pp_dpm_get_pp_num_states(void *handle, 608 struct pp_states_info *data) 609 { 610 struct pp_hwmgr *hwmgr = handle; 611 int i; 612 613 memset(data, 0, sizeof(*data)); 614 615 if (!hwmgr || !hwmgr->pm_en || !hwmgr->ps) 616 return -EINVAL; 617 618 data->nums = hwmgr->num_ps; 619 620 for (i = 0; i < hwmgr->num_ps; i++) { 621 struct pp_power_state *state = (struct pp_power_state *) 622 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size); 623 switch (state->classification.ui_label) { 624 case PP_StateUILabel_Battery: 625 data->states[i] = POWER_STATE_TYPE_BATTERY; 626 break; 627 case PP_StateUILabel_Balanced: 628 data->states[i] = POWER_STATE_TYPE_BALANCED; 629 break; 630 case PP_StateUILabel_Performance: 631 data->states[i] = POWER_STATE_TYPE_PERFORMANCE; 632 break; 633 default: 634 if (state->classification.flags & PP_StateClassificationFlag_Boot) 635 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT; 636 else 637 data->states[i] = POWER_STATE_TYPE_DEFAULT; 638 } 639 } 640 return 0; 641 } 642 643 static int pp_dpm_get_pp_table(void *handle, char **table) 644 { 645 struct pp_hwmgr *hwmgr = handle; 646 647 if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table) 648 return -EINVAL; 649 650 *table = (char *)hwmgr->soft_pp_table; 651 return hwmgr->soft_pp_table_size; 652 } 653 654 static int amd_powerplay_reset(void *handle) 655 { 656 struct pp_hwmgr *hwmgr = handle; 657 int ret; 658 659 ret = hwmgr_hw_fini(hwmgr); 660 if (ret) 661 return ret; 662 663 ret = hwmgr_hw_init(hwmgr); 664 if (ret) 665 return ret; 666 667 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL); 668 } 669 670 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) 671 { 672 struct pp_hwmgr *hwmgr = handle; 673 int ret = -ENOMEM; 674 675 if (!hwmgr || !hwmgr->pm_en) 676 return -EINVAL; 677 678 if (!hwmgr->hardcode_pp_table) { 679 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, 680 hwmgr->soft_pp_table_size, 681 GFP_KERNEL); 682 if (!hwmgr->hardcode_pp_table) 683 return ret; 684 } 685 686 memcpy(hwmgr->hardcode_pp_table, buf, size); 687 688 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; 689 690 ret = amd_powerplay_reset(handle); 691 if (ret) 692 return ret; 693 694 if (hwmgr->hwmgr_func->avfs_control) 695 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false); 696 697 return ret; 698 } 699 700 static int pp_dpm_force_clock_level(void *handle, 701 enum pp_clock_type type, uint32_t mask) 702 { 703 struct pp_hwmgr *hwmgr = handle; 704 705 if (!hwmgr || !hwmgr->pm_en) 706 return -EINVAL; 707 708 if (hwmgr->hwmgr_func->force_clock_level == NULL) { 709 pr_info_ratelimited("%s was not implemented.\n", __func__); 710 return 0; 711 } 712 713 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 714 pr_debug("force clock level is for dpm manual mode only.\n"); 715 return -EINVAL; 716 } 717 718 return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); 719 } 720 721 static int pp_dpm_emit_clock_levels(void *handle, 722 enum pp_clock_type type, 723 char *buf, 724 int *offset) 725 { 726 struct pp_hwmgr *hwmgr = handle; 727 728 if (!hwmgr || !hwmgr->pm_en) 729 return -EOPNOTSUPP; 730 731 if (!hwmgr->hwmgr_func->emit_clock_levels) 732 return -ENOENT; 733 734 return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset); 735 } 736 737 static int pp_dpm_print_clock_levels(void *handle, 738 enum pp_clock_type type, char *buf) 739 { 740 struct pp_hwmgr *hwmgr = handle; 741 742 if (!hwmgr || !hwmgr->pm_en) 743 return -EINVAL; 744 745 if (hwmgr->hwmgr_func->print_clock_levels == NULL) { 746 pr_info_ratelimited("%s was not implemented.\n", __func__); 747 return 0; 748 } 749 return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); 750 } 751 752 static int pp_dpm_get_sclk_od(void *handle) 753 { 754 struct pp_hwmgr *hwmgr = handle; 755 756 if (!hwmgr || !hwmgr->pm_en) 757 return -EINVAL; 758 759 if (hwmgr->hwmgr_func->get_sclk_od == NULL) { 760 pr_info_ratelimited("%s was not implemented.\n", __func__); 761 return 0; 762 } 763 return hwmgr->hwmgr_func->get_sclk_od(hwmgr); 764 } 765 766 static int pp_dpm_set_sclk_od(void *handle, uint32_t value) 767 { 768 struct pp_hwmgr *hwmgr = handle; 769 770 if (!hwmgr || !hwmgr->pm_en) 771 return -EINVAL; 772 773 if (hwmgr->hwmgr_func->set_sclk_od == NULL) { 774 pr_info_ratelimited("%s was not implemented.\n", __func__); 775 return 0; 776 } 777 778 return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); 779 } 780 781 static int pp_dpm_get_mclk_od(void *handle) 782 { 783 struct pp_hwmgr *hwmgr = handle; 784 785 if (!hwmgr || !hwmgr->pm_en) 786 return -EINVAL; 787 788 if (hwmgr->hwmgr_func->get_mclk_od == NULL) { 789 pr_info_ratelimited("%s was not implemented.\n", __func__); 790 return 0; 791 } 792 return hwmgr->hwmgr_func->get_mclk_od(hwmgr); 793 } 794 795 static int pp_dpm_set_mclk_od(void *handle, uint32_t value) 796 { 797 struct pp_hwmgr *hwmgr = handle; 798 799 if (!hwmgr || !hwmgr->pm_en) 800 return -EINVAL; 801 802 if (hwmgr->hwmgr_func->set_mclk_od == NULL) { 803 pr_info_ratelimited("%s was not implemented.\n", __func__); 804 return 0; 805 } 806 return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); 807 } 808 809 static int pp_dpm_read_sensor(void *handle, int idx, 810 void *value, int *size) 811 { 812 struct pp_hwmgr *hwmgr = handle; 813 814 if (!hwmgr || !hwmgr->pm_en || !value) 815 return -EINVAL; 816 817 switch (idx) { 818 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 819 *((uint32_t *)value) = hwmgr->pstate_sclk * 100; 820 return 0; 821 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 822 *((uint32_t *)value) = hwmgr->pstate_mclk * 100; 823 return 0; 824 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK: 825 *((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100; 826 return 0; 827 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK: 828 *((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100; 829 return 0; 830 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 831 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM; 832 return 0; 833 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 834 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM; 835 return 0; 836 default: 837 return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size); 838 } 839 } 840 841 static struct amd_vce_state* 842 pp_dpm_get_vce_clock_state(void *handle, unsigned idx) 843 { 844 struct pp_hwmgr *hwmgr = handle; 845 846 if (!hwmgr || !hwmgr->pm_en) 847 return NULL; 848 849 if (idx < hwmgr->num_vce_state_tables) 850 return &hwmgr->vce_states[idx]; 851 return NULL; 852 } 853 854 static int pp_get_power_profile_mode(void *handle, char *buf) 855 { 856 struct pp_hwmgr *hwmgr = handle; 857 858 if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode) 859 return -EOPNOTSUPP; 860 if (!buf) 861 return -EINVAL; 862 863 return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); 864 } 865 866 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) 867 { 868 struct pp_hwmgr *hwmgr = handle; 869 870 if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode) 871 return -EOPNOTSUPP; 872 873 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 874 pr_debug("power profile setting is for manual dpm mode only.\n"); 875 return -EINVAL; 876 } 877 878 return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size); 879 } 880 881 static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size) 882 { 883 struct pp_hwmgr *hwmgr = handle; 884 885 if (!hwmgr || !hwmgr->pm_en) 886 return -EINVAL; 887 888 if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL) 889 return 0; 890 891 return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size); 892 } 893 894 static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type, 895 long *input, uint32_t size) 896 { 897 struct pp_hwmgr *hwmgr = handle; 898 899 if (!hwmgr || !hwmgr->pm_en) 900 return -EINVAL; 901 902 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) { 903 pr_info_ratelimited("%s was not implemented.\n", __func__); 904 return 0; 905 } 906 907 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size); 908 } 909 910 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state) 911 { 912 struct pp_hwmgr *hwmgr = handle; 913 914 if (!hwmgr) 915 return -EINVAL; 916 917 if (!hwmgr->pm_en) 918 return 0; 919 920 if (hwmgr->hwmgr_func->set_mp1_state) 921 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state); 922 923 return 0; 924 } 925 926 static int pp_dpm_switch_power_profile(void *handle, 927 enum PP_SMC_POWER_PROFILE type, bool en) 928 { 929 struct pp_hwmgr *hwmgr = handle; 930 long workload[1]; 931 uint32_t index; 932 933 if (!hwmgr || !hwmgr->pm_en) 934 return -EINVAL; 935 936 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { 937 pr_info_ratelimited("%s was not implemented.\n", __func__); 938 return -EINVAL; 939 } 940 941 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 942 return -EINVAL; 943 944 if (!en) { 945 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]); 946 index = fls(hwmgr->workload_mask); 947 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0; 948 workload[0] = hwmgr->workload_setting[index]; 949 } else { 950 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]); 951 index = fls(hwmgr->workload_mask); 952 index = index <= Workload_Policy_Max ? index - 1 : 0; 953 workload[0] = hwmgr->workload_setting[index]; 954 } 955 956 if (type == PP_SMC_POWER_PROFILE_COMPUTE && 957 hwmgr->hwmgr_func->disable_power_features_for_compute_performance) { 958 if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) 959 return -EINVAL; 960 } 961 962 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 963 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0); 964 965 return 0; 966 } 967 968 static int pp_set_power_limit(void *handle, uint32_t limit) 969 { 970 struct pp_hwmgr *hwmgr = handle; 971 uint32_t max_power_limit; 972 973 if (!hwmgr || !hwmgr->pm_en) 974 return -EINVAL; 975 976 if (hwmgr->hwmgr_func->set_power_limit == NULL) { 977 pr_info_ratelimited("%s was not implemented.\n", __func__); 978 return -EINVAL; 979 } 980 981 if (limit == 0) 982 limit = hwmgr->default_power_limit; 983 984 max_power_limit = hwmgr->default_power_limit; 985 if (hwmgr->od_enabled) { 986 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit); 987 max_power_limit /= 100; 988 } 989 990 if (limit > max_power_limit) 991 return -EINVAL; 992 993 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit); 994 hwmgr->power_limit = limit; 995 return 0; 996 } 997 998 static int pp_get_power_limit(void *handle, uint32_t *limit, 999 enum pp_power_limit_level pp_limit_level, 1000 enum pp_power_type power_type) 1001 { 1002 struct pp_hwmgr *hwmgr = handle; 1003 int ret = 0; 1004 1005 if (!hwmgr || !hwmgr->pm_en || !limit) 1006 return -EINVAL; 1007 1008 if (power_type != PP_PWR_TYPE_SUSTAINED) 1009 return -EOPNOTSUPP; 1010 1011 switch (pp_limit_level) { 1012 case PP_PWR_LIMIT_CURRENT: 1013 *limit = hwmgr->power_limit; 1014 break; 1015 case PP_PWR_LIMIT_DEFAULT: 1016 *limit = hwmgr->default_power_limit; 1017 break; 1018 case PP_PWR_LIMIT_MAX: 1019 *limit = hwmgr->default_power_limit; 1020 if (hwmgr->od_enabled) { 1021 *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit); 1022 *limit /= 100; 1023 } 1024 break; 1025 default: 1026 ret = -EOPNOTSUPP; 1027 break; 1028 } 1029 1030 return ret; 1031 } 1032 1033 static int pp_display_configuration_change(void *handle, 1034 const struct amd_pp_display_configuration *display_config) 1035 { 1036 struct pp_hwmgr *hwmgr = handle; 1037 1038 if (!hwmgr || !hwmgr->pm_en) 1039 return -EINVAL; 1040 1041 phm_store_dal_configuration_data(hwmgr, display_config); 1042 return 0; 1043 } 1044 1045 static int pp_get_display_power_level(void *handle, 1046 struct amd_pp_simple_clock_info *output) 1047 { 1048 struct pp_hwmgr *hwmgr = handle; 1049 1050 if (!hwmgr || !hwmgr->pm_en || !output) 1051 return -EINVAL; 1052 1053 return phm_get_dal_power_level(hwmgr, output); 1054 } 1055 1056 static int pp_get_current_clocks(void *handle, 1057 struct amd_pp_clock_info *clocks) 1058 { 1059 struct amd_pp_simple_clock_info simple_clocks = { 0 }; 1060 struct pp_clock_info hw_clocks; 1061 struct pp_hwmgr *hwmgr = handle; 1062 int ret = 0; 1063 1064 if (!hwmgr || !hwmgr->pm_en) 1065 return -EINVAL; 1066 1067 phm_get_dal_power_level(hwmgr, &simple_clocks); 1068 1069 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1070 PHM_PlatformCaps_PowerContainment)) 1071 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, 1072 &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment); 1073 else 1074 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, 1075 &hw_clocks, PHM_PerformanceLevelDesignation_Activity); 1076 1077 if (ret) { 1078 pr_debug("Error in phm_get_clock_info \n"); 1079 return -EINVAL; 1080 } 1081 1082 clocks->min_engine_clock = hw_clocks.min_eng_clk; 1083 clocks->max_engine_clock = hw_clocks.max_eng_clk; 1084 clocks->min_memory_clock = hw_clocks.min_mem_clk; 1085 clocks->max_memory_clock = hw_clocks.max_mem_clk; 1086 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth; 1087 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth; 1088 1089 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1090 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1091 1092 if (simple_clocks.level == 0) 1093 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7; 1094 else 1095 clocks->max_clocks_state = simple_clocks.level; 1096 1097 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) { 1098 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1099 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1100 } 1101 return 0; 1102 } 1103 1104 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) 1105 { 1106 struct pp_hwmgr *hwmgr = handle; 1107 1108 if (!hwmgr || !hwmgr->pm_en) 1109 return -EINVAL; 1110 1111 if (clocks == NULL) 1112 return -EINVAL; 1113 1114 return phm_get_clock_by_type(hwmgr, type, clocks); 1115 } 1116 1117 static int pp_get_clock_by_type_with_latency(void *handle, 1118 enum amd_pp_clock_type type, 1119 struct pp_clock_levels_with_latency *clocks) 1120 { 1121 struct pp_hwmgr *hwmgr = handle; 1122 1123 if (!hwmgr || !hwmgr->pm_en || !clocks) 1124 return -EINVAL; 1125 1126 return phm_get_clock_by_type_with_latency(hwmgr, type, clocks); 1127 } 1128 1129 static int pp_get_clock_by_type_with_voltage(void *handle, 1130 enum amd_pp_clock_type type, 1131 struct pp_clock_levels_with_voltage *clocks) 1132 { 1133 struct pp_hwmgr *hwmgr = handle; 1134 1135 if (!hwmgr || !hwmgr->pm_en || !clocks) 1136 return -EINVAL; 1137 1138 return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks); 1139 } 1140 1141 static int pp_set_watermarks_for_clocks_ranges(void *handle, 1142 void *clock_ranges) 1143 { 1144 struct pp_hwmgr *hwmgr = handle; 1145 1146 if (!hwmgr || !hwmgr->pm_en || !clock_ranges) 1147 return -EINVAL; 1148 1149 return phm_set_watermarks_for_clocks_ranges(hwmgr, 1150 clock_ranges); 1151 } 1152 1153 static int pp_display_clock_voltage_request(void *handle, 1154 struct pp_display_clock_request *clock) 1155 { 1156 struct pp_hwmgr *hwmgr = handle; 1157 1158 if (!hwmgr || !hwmgr->pm_en || !clock) 1159 return -EINVAL; 1160 1161 return phm_display_clock_voltage_request(hwmgr, clock); 1162 } 1163 1164 static int pp_get_display_mode_validation_clocks(void *handle, 1165 struct amd_pp_simple_clock_info *clocks) 1166 { 1167 struct pp_hwmgr *hwmgr = handle; 1168 int ret = 0; 1169 1170 if (!hwmgr || !hwmgr->pm_en || !clocks) 1171 return -EINVAL; 1172 1173 clocks->level = PP_DAL_POWERLEVEL_7; 1174 1175 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) 1176 ret = phm_get_max_high_clocks(hwmgr, clocks); 1177 1178 return ret; 1179 } 1180 1181 static int pp_dpm_powergate_mmhub(void *handle) 1182 { 1183 struct pp_hwmgr *hwmgr = handle; 1184 1185 if (!hwmgr || !hwmgr->pm_en) 1186 return -EINVAL; 1187 1188 if (hwmgr->hwmgr_func->powergate_mmhub == NULL) { 1189 pr_info_ratelimited("%s was not implemented.\n", __func__); 1190 return 0; 1191 } 1192 1193 return hwmgr->hwmgr_func->powergate_mmhub(hwmgr); 1194 } 1195 1196 static int pp_dpm_powergate_gfx(void *handle, bool gate) 1197 { 1198 struct pp_hwmgr *hwmgr = handle; 1199 1200 if (!hwmgr || !hwmgr->pm_en) 1201 return 0; 1202 1203 if (hwmgr->hwmgr_func->powergate_gfx == NULL) { 1204 pr_info_ratelimited("%s was not implemented.\n", __func__); 1205 return 0; 1206 } 1207 1208 return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate); 1209 } 1210 1211 static void pp_dpm_powergate_acp(void *handle, bool gate) 1212 { 1213 struct pp_hwmgr *hwmgr = handle; 1214 1215 if (!hwmgr || !hwmgr->pm_en) 1216 return; 1217 1218 if (hwmgr->hwmgr_func->powergate_acp == NULL) { 1219 pr_info_ratelimited("%s was not implemented.\n", __func__); 1220 return; 1221 } 1222 1223 hwmgr->hwmgr_func->powergate_acp(hwmgr, gate); 1224 } 1225 1226 static void pp_dpm_powergate_sdma(void *handle, bool gate) 1227 { 1228 struct pp_hwmgr *hwmgr = handle; 1229 1230 if (!hwmgr) 1231 return; 1232 1233 if (hwmgr->hwmgr_func->powergate_sdma == NULL) { 1234 pr_info_ratelimited("%s was not implemented.\n", __func__); 1235 return; 1236 } 1237 1238 hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate); 1239 } 1240 1241 static int pp_set_powergating_by_smu(void *handle, 1242 uint32_t block_type, bool gate) 1243 { 1244 int ret = 0; 1245 1246 switch (block_type) { 1247 case AMD_IP_BLOCK_TYPE_UVD: 1248 case AMD_IP_BLOCK_TYPE_VCN: 1249 pp_dpm_powergate_uvd(handle, gate); 1250 break; 1251 case AMD_IP_BLOCK_TYPE_VCE: 1252 pp_dpm_powergate_vce(handle, gate); 1253 break; 1254 case AMD_IP_BLOCK_TYPE_GMC: 1255 /* 1256 * For now, this is only used on PICASSO. 1257 * And only "gate" operation is supported. 1258 */ 1259 if (gate) 1260 pp_dpm_powergate_mmhub(handle); 1261 break; 1262 case AMD_IP_BLOCK_TYPE_GFX: 1263 ret = pp_dpm_powergate_gfx(handle, gate); 1264 break; 1265 case AMD_IP_BLOCK_TYPE_ACP: 1266 pp_dpm_powergate_acp(handle, gate); 1267 break; 1268 case AMD_IP_BLOCK_TYPE_SDMA: 1269 pp_dpm_powergate_sdma(handle, gate); 1270 break; 1271 default: 1272 break; 1273 } 1274 return ret; 1275 } 1276 1277 static int pp_notify_smu_enable_pwe(void *handle) 1278 { 1279 struct pp_hwmgr *hwmgr = handle; 1280 1281 if (!hwmgr || !hwmgr->pm_en) 1282 return -EINVAL; 1283 1284 if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) { 1285 pr_info_ratelimited("%s was not implemented.\n", __func__); 1286 return -EINVAL; 1287 } 1288 1289 hwmgr->hwmgr_func->smus_notify_pwe(hwmgr); 1290 1291 return 0; 1292 } 1293 1294 static int pp_enable_mgpu_fan_boost(void *handle) 1295 { 1296 struct pp_hwmgr *hwmgr = handle; 1297 1298 if (!hwmgr) 1299 return -EINVAL; 1300 1301 if (!hwmgr->pm_en || 1302 hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) 1303 return 0; 1304 1305 hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr); 1306 1307 return 0; 1308 } 1309 1310 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock) 1311 { 1312 struct pp_hwmgr *hwmgr = handle; 1313 1314 if (!hwmgr || !hwmgr->pm_en) 1315 return -EINVAL; 1316 1317 if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) { 1318 pr_debug("%s was not implemented.\n", __func__); 1319 return -EINVAL; 1320 } 1321 1322 hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock); 1323 1324 return 0; 1325 } 1326 1327 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock) 1328 { 1329 struct pp_hwmgr *hwmgr = handle; 1330 1331 if (!hwmgr || !hwmgr->pm_en) 1332 return -EINVAL; 1333 1334 if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) { 1335 pr_debug("%s was not implemented.\n", __func__); 1336 return -EINVAL; 1337 } 1338 1339 hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock); 1340 1341 return 0; 1342 } 1343 1344 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock) 1345 { 1346 struct pp_hwmgr *hwmgr = handle; 1347 1348 if (!hwmgr || !hwmgr->pm_en) 1349 return -EINVAL; 1350 1351 if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) { 1352 pr_debug("%s was not implemented.\n", __func__); 1353 return -EINVAL; 1354 } 1355 1356 hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock); 1357 1358 return 0; 1359 } 1360 1361 static int pp_set_active_display_count(void *handle, uint32_t count) 1362 { 1363 struct pp_hwmgr *hwmgr = handle; 1364 1365 if (!hwmgr || !hwmgr->pm_en) 1366 return -EINVAL; 1367 1368 return phm_set_active_display_count(hwmgr, count); 1369 } 1370 1371 static int pp_get_asic_baco_capability(void *handle, bool *cap) 1372 { 1373 struct pp_hwmgr *hwmgr = handle; 1374 1375 *cap = false; 1376 if (!hwmgr) 1377 return -EINVAL; 1378 1379 if (!(hwmgr->not_vf && amdgpu_dpm) || 1380 !hwmgr->hwmgr_func->get_asic_baco_capability) 1381 return 0; 1382 1383 hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap); 1384 1385 return 0; 1386 } 1387 1388 static int pp_get_asic_baco_state(void *handle, int *state) 1389 { 1390 struct pp_hwmgr *hwmgr = handle; 1391 1392 if (!hwmgr) 1393 return -EINVAL; 1394 1395 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state) 1396 return 0; 1397 1398 hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state); 1399 1400 return 0; 1401 } 1402 1403 static int pp_set_asic_baco_state(void *handle, int state) 1404 { 1405 struct pp_hwmgr *hwmgr = handle; 1406 1407 if (!hwmgr) 1408 return -EINVAL; 1409 1410 if (!(hwmgr->not_vf && amdgpu_dpm) || 1411 !hwmgr->hwmgr_func->set_asic_baco_state) 1412 return 0; 1413 1414 hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state); 1415 1416 return 0; 1417 } 1418 1419 static int pp_get_ppfeature_status(void *handle, char *buf) 1420 { 1421 struct pp_hwmgr *hwmgr = handle; 1422 1423 if (!hwmgr || !hwmgr->pm_en || !buf) 1424 return -EINVAL; 1425 1426 if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) { 1427 pr_info_ratelimited("%s was not implemented.\n", __func__); 1428 return -EINVAL; 1429 } 1430 1431 return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf); 1432 } 1433 1434 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks) 1435 { 1436 struct pp_hwmgr *hwmgr = handle; 1437 1438 if (!hwmgr || !hwmgr->pm_en) 1439 return -EINVAL; 1440 1441 if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) { 1442 pr_info_ratelimited("%s was not implemented.\n", __func__); 1443 return -EINVAL; 1444 } 1445 1446 return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks); 1447 } 1448 1449 static int pp_asic_reset_mode_2(void *handle) 1450 { 1451 struct pp_hwmgr *hwmgr = handle; 1452 1453 if (!hwmgr || !hwmgr->pm_en) 1454 return -EINVAL; 1455 1456 if (hwmgr->hwmgr_func->asic_reset == NULL) { 1457 pr_info_ratelimited("%s was not implemented.\n", __func__); 1458 return -EINVAL; 1459 } 1460 1461 return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2); 1462 } 1463 1464 static int pp_smu_i2c_bus_access(void *handle, bool acquire) 1465 { 1466 struct pp_hwmgr *hwmgr = handle; 1467 1468 if (!hwmgr || !hwmgr->pm_en) 1469 return -EINVAL; 1470 1471 if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) { 1472 pr_info_ratelimited("%s was not implemented.\n", __func__); 1473 return -EINVAL; 1474 } 1475 1476 return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire); 1477 } 1478 1479 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state) 1480 { 1481 struct pp_hwmgr *hwmgr = handle; 1482 1483 if (!hwmgr) 1484 return -EINVAL; 1485 1486 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate) 1487 return 0; 1488 1489 hwmgr->hwmgr_func->set_df_cstate(hwmgr, state); 1490 1491 return 0; 1492 } 1493 1494 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate) 1495 { 1496 struct pp_hwmgr *hwmgr = handle; 1497 1498 if (!hwmgr) 1499 return -EINVAL; 1500 1501 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate) 1502 return 0; 1503 1504 hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate); 1505 1506 return 0; 1507 } 1508 1509 static ssize_t pp_get_gpu_metrics(void *handle, void **table) 1510 { 1511 struct pp_hwmgr *hwmgr = handle; 1512 1513 if (!hwmgr) 1514 return -EINVAL; 1515 1516 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics) 1517 return -EOPNOTSUPP; 1518 1519 return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table); 1520 } 1521 1522 static int pp_gfx_state_change_set(void *handle, uint32_t state) 1523 { 1524 struct pp_hwmgr *hwmgr = handle; 1525 1526 if (!hwmgr || !hwmgr->pm_en) 1527 return -EINVAL; 1528 1529 if (hwmgr->hwmgr_func->gfx_state_change == NULL) { 1530 pr_info_ratelimited("%s was not implemented.\n", __func__); 1531 return -EINVAL; 1532 } 1533 1534 hwmgr->hwmgr_func->gfx_state_change(hwmgr, state); 1535 return 0; 1536 } 1537 1538 static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size) 1539 { 1540 struct pp_hwmgr *hwmgr = handle; 1541 struct amdgpu_device *adev = hwmgr->adev; 1542 int err; 1543 1544 if (!addr || !size) 1545 return -EINVAL; 1546 1547 *addr = NULL; 1548 *size = 0; 1549 if (adev->pm.smu_prv_buffer) { 1550 err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr); 1551 if (err) 1552 return err; 1553 *size = adev->pm.smu_prv_buffer_size; 1554 } 1555 1556 return 0; 1557 } 1558 1559 static void pp_pm_compute_clocks(void *handle) 1560 { 1561 struct pp_hwmgr *hwmgr = handle; 1562 struct amdgpu_device *adev = hwmgr->adev; 1563 1564 if (!adev->dc_enabled) { 1565 amdgpu_dpm_get_active_displays(adev); 1566 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 1567 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 1568 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 1569 /* we have issues with mclk switching with 1570 * refresh rates over 120 hz on the non-DC code. 1571 */ 1572 if (adev->pm.pm_display_cfg.vrefresh > 120) 1573 adev->pm.pm_display_cfg.min_vblank_time = 0; 1574 1575 pp_display_configuration_change(handle, 1576 &adev->pm.pm_display_cfg); 1577 } 1578 1579 pp_dpm_dispatch_tasks(handle, 1580 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, 1581 NULL); 1582 } 1583 1584 static const struct amd_pm_funcs pp_dpm_funcs = { 1585 .load_firmware = pp_dpm_load_fw, 1586 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, 1587 .force_performance_level = pp_dpm_force_performance_level, 1588 .get_performance_level = pp_dpm_get_performance_level, 1589 .get_current_power_state = pp_dpm_get_current_power_state, 1590 .dispatch_tasks = pp_dpm_dispatch_tasks, 1591 .set_fan_control_mode = pp_dpm_set_fan_control_mode, 1592 .get_fan_control_mode = pp_dpm_get_fan_control_mode, 1593 .set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm, 1594 .get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm, 1595 .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm, 1596 .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm, 1597 .get_pp_num_states = pp_dpm_get_pp_num_states, 1598 .get_pp_table = pp_dpm_get_pp_table, 1599 .set_pp_table = pp_dpm_set_pp_table, 1600 .force_clock_level = pp_dpm_force_clock_level, 1601 .emit_clock_levels = pp_dpm_emit_clock_levels, 1602 .print_clock_levels = pp_dpm_print_clock_levels, 1603 .get_sclk_od = pp_dpm_get_sclk_od, 1604 .set_sclk_od = pp_dpm_set_sclk_od, 1605 .get_mclk_od = pp_dpm_get_mclk_od, 1606 .set_mclk_od = pp_dpm_set_mclk_od, 1607 .read_sensor = pp_dpm_read_sensor, 1608 .get_vce_clock_state = pp_dpm_get_vce_clock_state, 1609 .switch_power_profile = pp_dpm_switch_power_profile, 1610 .set_clockgating_by_smu = pp_set_clockgating_by_smu, 1611 .set_powergating_by_smu = pp_set_powergating_by_smu, 1612 .get_power_profile_mode = pp_get_power_profile_mode, 1613 .set_power_profile_mode = pp_set_power_profile_mode, 1614 .set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol, 1615 .odn_edit_dpm_table = pp_odn_edit_dpm_table, 1616 .set_mp1_state = pp_dpm_set_mp1_state, 1617 .set_power_limit = pp_set_power_limit, 1618 .get_power_limit = pp_get_power_limit, 1619 /* export to DC */ 1620 .get_sclk = pp_dpm_get_sclk, 1621 .get_mclk = pp_dpm_get_mclk, 1622 .display_configuration_change = pp_display_configuration_change, 1623 .get_display_power_level = pp_get_display_power_level, 1624 .get_current_clocks = pp_get_current_clocks, 1625 .get_clock_by_type = pp_get_clock_by_type, 1626 .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency, 1627 .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage, 1628 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges, 1629 .display_clock_voltage_request = pp_display_clock_voltage_request, 1630 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks, 1631 .notify_smu_enable_pwe = pp_notify_smu_enable_pwe, 1632 .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost, 1633 .set_active_display_count = pp_set_active_display_count, 1634 .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk, 1635 .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq, 1636 .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq, 1637 .get_asic_baco_capability = pp_get_asic_baco_capability, 1638 .get_asic_baco_state = pp_get_asic_baco_state, 1639 .set_asic_baco_state = pp_set_asic_baco_state, 1640 .get_ppfeature_status = pp_get_ppfeature_status, 1641 .set_ppfeature_status = pp_set_ppfeature_status, 1642 .asic_reset_mode_2 = pp_asic_reset_mode_2, 1643 .smu_i2c_bus_access = pp_smu_i2c_bus_access, 1644 .set_df_cstate = pp_set_df_cstate, 1645 .set_xgmi_pstate = pp_set_xgmi_pstate, 1646 .get_gpu_metrics = pp_get_gpu_metrics, 1647 .gfx_state_change_set = pp_gfx_state_change_set, 1648 .get_smu_prv_buf_details = pp_get_prv_buffer_details, 1649 .pm_compute_clocks = pp_pm_compute_clocks, 1650 }; 1651