1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L1 24 25 #include <linux/firmware.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_smu.h" 30 #include "smu_internal.h" 31 #include "atom.h" 32 #include "arcturus_ppt.h" 33 #include "navi10_ppt.h" 34 #include "sienna_cichlid_ppt.h" 35 #include "renoir_ppt.h" 36 #include "vangogh_ppt.h" 37 #include "aldebaran_ppt.h" 38 #include "yellow_carp_ppt.h" 39 #include "cyan_skillfish_ppt.h" 40 #include "amd_pcie.h" 41 42 /* 43 * DO NOT use these for err/warn/info/debug messages. 44 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 45 * They are more MGPU friendly. 46 */ 47 #undef pr_err 48 #undef pr_warn 49 #undef pr_info 50 #undef pr_debug 51 52 static const struct amd_pm_funcs swsmu_pm_funcs; 53 static int smu_force_smuclk_levels(struct smu_context *smu, 54 enum smu_clk_type clk_type, 55 uint32_t mask); 56 static int smu_handle_task(struct smu_context *smu, 57 enum amd_dpm_forced_level level, 58 enum amd_pp_task task_id, 59 bool lock_needed); 60 static int smu_reset(struct smu_context *smu); 61 static int smu_set_fan_speed_percent(void *handle, u32 speed); 62 static int smu_set_fan_control_mode(struct smu_context *smu, int value); 63 static int smu_set_power_limit(void *handle, uint32_t limit); 64 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); 65 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); 66 67 static int smu_sys_get_pp_feature_mask(void *handle, 68 char *buf) 69 { 70 struct smu_context *smu = handle; 71 int size = 0; 72 73 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 74 return -EOPNOTSUPP; 75 76 mutex_lock(&smu->mutex); 77 78 size = smu_get_pp_feature_mask(smu, buf); 79 80 mutex_unlock(&smu->mutex); 81 82 return size; 83 } 84 85 static int smu_sys_set_pp_feature_mask(void *handle, 86 uint64_t new_mask) 87 { 88 struct smu_context *smu = handle; 89 int ret = 0; 90 91 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 92 return -EOPNOTSUPP; 93 94 mutex_lock(&smu->mutex); 95 96 ret = smu_set_pp_feature_mask(smu, new_mask); 97 98 mutex_unlock(&smu->mutex); 99 100 return ret; 101 } 102 103 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 104 { 105 int ret = 0; 106 struct smu_context *smu = &adev->smu; 107 108 if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status) 109 *value = smu_get_gfx_off_status(smu); 110 else 111 ret = -EINVAL; 112 113 return ret; 114 } 115 116 int smu_set_soft_freq_range(struct smu_context *smu, 117 enum smu_clk_type clk_type, 118 uint32_t min, 119 uint32_t max) 120 { 121 int ret = 0; 122 123 mutex_lock(&smu->mutex); 124 125 if (smu->ppt_funcs->set_soft_freq_limited_range) 126 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, 127 clk_type, 128 min, 129 max); 130 131 mutex_unlock(&smu->mutex); 132 133 return ret; 134 } 135 136 int smu_get_dpm_freq_range(struct smu_context *smu, 137 enum smu_clk_type clk_type, 138 uint32_t *min, 139 uint32_t *max) 140 { 141 int ret = 0; 142 143 if (!min && !max) 144 return -EINVAL; 145 146 mutex_lock(&smu->mutex); 147 148 if (smu->ppt_funcs->get_dpm_ultimate_freq) 149 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, 150 clk_type, 151 min, 152 max); 153 154 mutex_unlock(&smu->mutex); 155 156 return ret; 157 } 158 159 static u32 smu_get_mclk(void *handle, bool low) 160 { 161 struct smu_context *smu = handle; 162 uint32_t clk_freq; 163 int ret = 0; 164 165 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, 166 low ? &clk_freq : NULL, 167 !low ? &clk_freq : NULL); 168 if (ret) 169 return 0; 170 return clk_freq * 100; 171 } 172 173 static u32 smu_get_sclk(void *handle, bool low) 174 { 175 struct smu_context *smu = handle; 176 uint32_t clk_freq; 177 int ret = 0; 178 179 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, 180 low ? &clk_freq : NULL, 181 !low ? &clk_freq : NULL); 182 if (ret) 183 return 0; 184 return clk_freq * 100; 185 } 186 187 static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu, 188 bool enable) 189 { 190 struct smu_power_context *smu_power = &smu->smu_power; 191 struct smu_power_gate *power_gate = &smu_power->power_gate; 192 int ret = 0; 193 194 if (!smu->ppt_funcs->dpm_set_vcn_enable) 195 return 0; 196 197 if (atomic_read(&power_gate->vcn_gated) ^ enable) 198 return 0; 199 200 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); 201 if (!ret) 202 atomic_set(&power_gate->vcn_gated, !enable); 203 204 return ret; 205 } 206 207 static int smu_dpm_set_vcn_enable(struct smu_context *smu, 208 bool enable) 209 { 210 struct smu_power_context *smu_power = &smu->smu_power; 211 struct smu_power_gate *power_gate = &smu_power->power_gate; 212 int ret = 0; 213 214 mutex_lock(&power_gate->vcn_gate_lock); 215 216 ret = smu_dpm_set_vcn_enable_locked(smu, enable); 217 218 mutex_unlock(&power_gate->vcn_gate_lock); 219 220 return ret; 221 } 222 223 static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu, 224 bool enable) 225 { 226 struct smu_power_context *smu_power = &smu->smu_power; 227 struct smu_power_gate *power_gate = &smu_power->power_gate; 228 int ret = 0; 229 230 if (!smu->ppt_funcs->dpm_set_jpeg_enable) 231 return 0; 232 233 if (atomic_read(&power_gate->jpeg_gated) ^ enable) 234 return 0; 235 236 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); 237 if (!ret) 238 atomic_set(&power_gate->jpeg_gated, !enable); 239 240 return ret; 241 } 242 243 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, 244 bool enable) 245 { 246 struct smu_power_context *smu_power = &smu->smu_power; 247 struct smu_power_gate *power_gate = &smu_power->power_gate; 248 int ret = 0; 249 250 mutex_lock(&power_gate->jpeg_gate_lock); 251 252 ret = smu_dpm_set_jpeg_enable_locked(smu, enable); 253 254 mutex_unlock(&power_gate->jpeg_gate_lock); 255 256 return ret; 257 } 258 259 /** 260 * smu_dpm_set_power_gate - power gate/ungate the specific IP block 261 * 262 * @handle: smu_context pointer 263 * @block_type: the IP block to power gate/ungate 264 * @gate: to power gate if true, ungate otherwise 265 * 266 * This API uses no smu->mutex lock protection due to: 267 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). 268 * This is guarded to be race condition free by the caller. 269 * 2. Or get called on user setting request of power_dpm_force_performance_level. 270 * Under this case, the smu->mutex lock protection is already enforced on 271 * the parent API smu_force_performance_level of the call path. 272 */ 273 static int smu_dpm_set_power_gate(void *handle, 274 uint32_t block_type, 275 bool gate) 276 { 277 struct smu_context *smu = handle; 278 int ret = 0; 279 280 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 281 return -EOPNOTSUPP; 282 283 switch (block_type) { 284 /* 285 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses 286 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept. 287 */ 288 case AMD_IP_BLOCK_TYPE_UVD: 289 case AMD_IP_BLOCK_TYPE_VCN: 290 ret = smu_dpm_set_vcn_enable(smu, !gate); 291 if (ret) 292 dev_err(smu->adev->dev, "Failed to power %s VCN!\n", 293 gate ? "gate" : "ungate"); 294 break; 295 case AMD_IP_BLOCK_TYPE_GFX: 296 ret = smu_gfx_off_control(smu, gate); 297 if (ret) 298 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", 299 gate ? "enable" : "disable"); 300 break; 301 case AMD_IP_BLOCK_TYPE_SDMA: 302 ret = smu_powergate_sdma(smu, gate); 303 if (ret) 304 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", 305 gate ? "gate" : "ungate"); 306 break; 307 case AMD_IP_BLOCK_TYPE_JPEG: 308 ret = smu_dpm_set_jpeg_enable(smu, !gate); 309 if (ret) 310 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", 311 gate ? "gate" : "ungate"); 312 break; 313 default: 314 dev_err(smu->adev->dev, "Unsupported block type!\n"); 315 return -EINVAL; 316 } 317 318 return ret; 319 } 320 321 /** 322 * smu_set_user_clk_dependencies - set user profile clock dependencies 323 * 324 * @smu: smu_context pointer 325 * @clk: enum smu_clk_type type 326 * 327 * Enable/Disable the clock dependency for the @clk type. 328 */ 329 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk) 330 { 331 if (smu->adev->in_suspend) 332 return; 333 334 if (clk == SMU_MCLK) { 335 smu->user_dpm_profile.clk_dependency = 0; 336 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); 337 } else if (clk == SMU_FCLK) { 338 /* MCLK takes precedence over FCLK */ 339 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 340 return; 341 342 smu->user_dpm_profile.clk_dependency = 0; 343 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); 344 } else if (clk == SMU_SOCCLK) { 345 /* MCLK takes precedence over SOCCLK */ 346 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 347 return; 348 349 smu->user_dpm_profile.clk_dependency = 0; 350 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); 351 } else 352 /* Add clk dependencies here, if any */ 353 return; 354 } 355 356 /** 357 * smu_restore_dpm_user_profile - reinstate user dpm profile 358 * 359 * @smu: smu_context pointer 360 * 361 * Restore the saved user power configurations include power limit, 362 * clock frequencies, fan control mode and fan speed. 363 */ 364 static void smu_restore_dpm_user_profile(struct smu_context *smu) 365 { 366 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 367 int ret = 0; 368 369 if (!smu->adev->in_suspend) 370 return; 371 372 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 373 return; 374 375 /* Enable restore flag */ 376 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; 377 378 /* set the user dpm power limit */ 379 if (smu->user_dpm_profile.power_limit) { 380 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); 381 if (ret) 382 dev_err(smu->adev->dev, "Failed to set power limit value\n"); 383 } 384 385 /* set the user dpm clock configurations */ 386 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 387 enum smu_clk_type clk_type; 388 389 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) { 390 /* 391 * Iterate over smu clk type and force the saved user clk 392 * configs, skip if clock dependency is enabled 393 */ 394 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && 395 smu->user_dpm_profile.clk_mask[clk_type]) { 396 ret = smu_force_smuclk_levels(smu, clk_type, 397 smu->user_dpm_profile.clk_mask[clk_type]); 398 if (ret) 399 dev_err(smu->adev->dev, 400 "Failed to set clock type = %d\n", clk_type); 401 } 402 } 403 } 404 405 /* set the user dpm fan configurations */ 406 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL) { 407 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); 408 if (ret) { 409 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n"); 410 return; 411 } 412 413 if (!ret && smu->user_dpm_profile.fan_speed_percent) { 414 ret = smu_set_fan_speed_percent(smu, smu->user_dpm_profile.fan_speed_percent); 415 if (ret) 416 dev_err(smu->adev->dev, "Failed to set manual fan speed\n"); 417 } 418 } 419 420 /* Restore user customized OD settings */ 421 if (smu->user_dpm_profile.user_od) { 422 if (smu->ppt_funcs->restore_user_od_settings) { 423 ret = smu->ppt_funcs->restore_user_od_settings(smu); 424 if (ret) 425 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n"); 426 } 427 } 428 429 /* Disable restore flag */ 430 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; 431 } 432 433 static int smu_get_power_num_states(void *handle, 434 struct pp_states_info *state_info) 435 { 436 if (!state_info) 437 return -EINVAL; 438 439 /* not support power state */ 440 memset(state_info, 0, sizeof(struct pp_states_info)); 441 state_info->nums = 1; 442 state_info->states[0] = POWER_STATE_TYPE_DEFAULT; 443 444 return 0; 445 } 446 447 bool is_support_sw_smu(struct amdgpu_device *adev) 448 { 449 if (adev->asic_type >= CHIP_ARCTURUS) 450 return true; 451 452 return false; 453 } 454 455 bool is_support_cclk_dpm(struct amdgpu_device *adev) 456 { 457 struct smu_context *smu = &adev->smu; 458 459 if (!is_support_sw_smu(adev)) 460 return false; 461 462 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) 463 return false; 464 465 return true; 466 } 467 468 469 static int smu_sys_get_pp_table(void *handle, 470 char **table) 471 { 472 struct smu_context *smu = handle; 473 struct smu_table_context *smu_table = &smu->smu_table; 474 uint32_t powerplay_table_size; 475 476 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 477 return -EOPNOTSUPP; 478 479 if (!smu_table->power_play_table && !smu_table->hardcode_pptable) 480 return -EINVAL; 481 482 mutex_lock(&smu->mutex); 483 484 if (smu_table->hardcode_pptable) 485 *table = smu_table->hardcode_pptable; 486 else 487 *table = smu_table->power_play_table; 488 489 powerplay_table_size = smu_table->power_play_table_size; 490 491 mutex_unlock(&smu->mutex); 492 493 return powerplay_table_size; 494 } 495 496 static int smu_sys_set_pp_table(void *handle, 497 const char *buf, 498 size_t size) 499 { 500 struct smu_context *smu = handle; 501 struct smu_table_context *smu_table = &smu->smu_table; 502 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; 503 int ret = 0; 504 505 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 506 return -EOPNOTSUPP; 507 508 if (header->usStructureSize != size) { 509 dev_err(smu->adev->dev, "pp table size not matched !\n"); 510 return -EIO; 511 } 512 513 mutex_lock(&smu->mutex); 514 if (!smu_table->hardcode_pptable) 515 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); 516 if (!smu_table->hardcode_pptable) { 517 ret = -ENOMEM; 518 goto failed; 519 } 520 521 memcpy(smu_table->hardcode_pptable, buf, size); 522 smu_table->power_play_table = smu_table->hardcode_pptable; 523 smu_table->power_play_table_size = size; 524 525 /* 526 * Special hw_fini action(for Navi1x, the DPMs disablement will be 527 * skipped) may be needed for custom pptable uploading. 528 */ 529 smu->uploading_custom_pp_table = true; 530 531 ret = smu_reset(smu); 532 if (ret) 533 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); 534 535 smu->uploading_custom_pp_table = false; 536 537 failed: 538 mutex_unlock(&smu->mutex); 539 return ret; 540 } 541 542 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) 543 { 544 struct smu_feature *feature = &smu->smu_feature; 545 int ret = 0; 546 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; 547 548 bitmap_zero(feature->allowed, SMU_FEATURE_MAX); 549 550 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, 551 SMU_FEATURE_MAX/32); 552 if (ret) 553 return ret; 554 555 bitmap_or(feature->allowed, feature->allowed, 556 (unsigned long *)allowed_feature_mask, 557 feature->feature_num); 558 559 return ret; 560 } 561 562 static int smu_set_funcs(struct amdgpu_device *adev) 563 { 564 struct smu_context *smu = &adev->smu; 565 566 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 567 smu->od_enabled = true; 568 569 switch (adev->asic_type) { 570 case CHIP_NAVI10: 571 case CHIP_NAVI14: 572 case CHIP_NAVI12: 573 navi10_set_ppt_funcs(smu); 574 break; 575 case CHIP_ARCTURUS: 576 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 577 arcturus_set_ppt_funcs(smu); 578 /* OD is not supported on Arcturus */ 579 smu->od_enabled =false; 580 break; 581 case CHIP_SIENNA_CICHLID: 582 case CHIP_NAVY_FLOUNDER: 583 case CHIP_DIMGREY_CAVEFISH: 584 case CHIP_BEIGE_GOBY: 585 sienna_cichlid_set_ppt_funcs(smu); 586 break; 587 case CHIP_ALDEBARAN: 588 aldebaran_set_ppt_funcs(smu); 589 /* Enable pp_od_clk_voltage node */ 590 smu->od_enabled = true; 591 break; 592 case CHIP_RENOIR: 593 renoir_set_ppt_funcs(smu); 594 break; 595 case CHIP_VANGOGH: 596 vangogh_set_ppt_funcs(smu); 597 break; 598 case CHIP_YELLOW_CARP: 599 yellow_carp_set_ppt_funcs(smu); 600 break; 601 case CHIP_CYAN_SKILLFISH: 602 cyan_skillfish_set_ppt_funcs(smu); 603 break; 604 default: 605 return -EINVAL; 606 } 607 608 return 0; 609 } 610 611 static int smu_early_init(void *handle) 612 { 613 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 614 struct smu_context *smu = &adev->smu; 615 616 smu->adev = adev; 617 smu->pm_enabled = !!amdgpu_dpm; 618 smu->is_apu = false; 619 mutex_init(&smu->mutex); 620 mutex_init(&smu->smu_baco.mutex); 621 smu->smu_baco.state = SMU_BACO_STATE_EXIT; 622 smu->smu_baco.platform_support = false; 623 624 adev->powerplay.pp_handle = smu; 625 adev->powerplay.pp_funcs = &swsmu_pm_funcs; 626 627 return smu_set_funcs(adev); 628 } 629 630 static int smu_set_default_dpm_table(struct smu_context *smu) 631 { 632 struct smu_power_context *smu_power = &smu->smu_power; 633 struct smu_power_gate *power_gate = &smu_power->power_gate; 634 int vcn_gate, jpeg_gate; 635 int ret = 0; 636 637 if (!smu->ppt_funcs->set_default_dpm_table) 638 return 0; 639 640 mutex_lock(&power_gate->vcn_gate_lock); 641 mutex_lock(&power_gate->jpeg_gate_lock); 642 643 vcn_gate = atomic_read(&power_gate->vcn_gated); 644 jpeg_gate = atomic_read(&power_gate->jpeg_gated); 645 646 ret = smu_dpm_set_vcn_enable_locked(smu, true); 647 if (ret) 648 goto err0_out; 649 650 ret = smu_dpm_set_jpeg_enable_locked(smu, true); 651 if (ret) 652 goto err1_out; 653 654 ret = smu->ppt_funcs->set_default_dpm_table(smu); 655 if (ret) 656 dev_err(smu->adev->dev, 657 "Failed to setup default dpm clock tables!\n"); 658 659 smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate); 660 err1_out: 661 smu_dpm_set_vcn_enable_locked(smu, !vcn_gate); 662 err0_out: 663 mutex_unlock(&power_gate->jpeg_gate_lock); 664 mutex_unlock(&power_gate->vcn_gate_lock); 665 666 return ret; 667 } 668 669 670 static int smu_late_init(void *handle) 671 { 672 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 673 struct smu_context *smu = &adev->smu; 674 int ret = 0; 675 676 smu_set_fine_grain_gfx_freq_parameters(smu); 677 678 if (!smu->pm_enabled) 679 return 0; 680 681 ret = smu_post_init(smu); 682 if (ret) { 683 dev_err(adev->dev, "Failed to post smu init!\n"); 684 return ret; 685 } 686 687 if (adev->asic_type == CHIP_YELLOW_CARP) 688 return 0; 689 690 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { 691 ret = smu_set_default_od_settings(smu); 692 if (ret) { 693 dev_err(adev->dev, "Failed to setup default OD settings!\n"); 694 return ret; 695 } 696 } 697 698 ret = smu_populate_umd_state_clk(smu); 699 if (ret) { 700 dev_err(adev->dev, "Failed to populate UMD state clocks!\n"); 701 return ret; 702 } 703 704 ret = smu_get_asic_power_limits(smu, 705 &smu->current_power_limit, 706 &smu->default_power_limit, 707 &smu->max_power_limit); 708 if (ret) { 709 dev_err(adev->dev, "Failed to get asic power limits!\n"); 710 return ret; 711 } 712 713 if (!amdgpu_sriov_vf(adev)) 714 smu_get_unique_id(smu); 715 716 smu_get_fan_parameters(smu); 717 718 smu_handle_task(&adev->smu, 719 smu->smu_dpm.dpm_level, 720 AMD_PP_TASK_COMPLETE_INIT, 721 false); 722 723 smu_restore_dpm_user_profile(smu); 724 725 return 0; 726 } 727 728 static int smu_init_fb_allocations(struct smu_context *smu) 729 { 730 struct amdgpu_device *adev = smu->adev; 731 struct smu_table_context *smu_table = &smu->smu_table; 732 struct smu_table *tables = smu_table->tables; 733 struct smu_table *driver_table = &(smu_table->driver_table); 734 uint32_t max_table_size = 0; 735 int ret, i; 736 737 /* VRAM allocation for tool table */ 738 if (tables[SMU_TABLE_PMSTATUSLOG].size) { 739 ret = amdgpu_bo_create_kernel(adev, 740 tables[SMU_TABLE_PMSTATUSLOG].size, 741 tables[SMU_TABLE_PMSTATUSLOG].align, 742 tables[SMU_TABLE_PMSTATUSLOG].domain, 743 &tables[SMU_TABLE_PMSTATUSLOG].bo, 744 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 745 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 746 if (ret) { 747 dev_err(adev->dev, "VRAM allocation for tool table failed!\n"); 748 return ret; 749 } 750 } 751 752 /* VRAM allocation for driver table */ 753 for (i = 0; i < SMU_TABLE_COUNT; i++) { 754 if (tables[i].size == 0) 755 continue; 756 757 if (i == SMU_TABLE_PMSTATUSLOG) 758 continue; 759 760 if (max_table_size < tables[i].size) 761 max_table_size = tables[i].size; 762 } 763 764 driver_table->size = max_table_size; 765 driver_table->align = PAGE_SIZE; 766 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 767 768 ret = amdgpu_bo_create_kernel(adev, 769 driver_table->size, 770 driver_table->align, 771 driver_table->domain, 772 &driver_table->bo, 773 &driver_table->mc_address, 774 &driver_table->cpu_addr); 775 if (ret) { 776 dev_err(adev->dev, "VRAM allocation for driver table failed!\n"); 777 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 778 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 779 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 780 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 781 } 782 783 return ret; 784 } 785 786 static int smu_fini_fb_allocations(struct smu_context *smu) 787 { 788 struct smu_table_context *smu_table = &smu->smu_table; 789 struct smu_table *tables = smu_table->tables; 790 struct smu_table *driver_table = &(smu_table->driver_table); 791 792 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 793 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 794 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 795 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 796 797 amdgpu_bo_free_kernel(&driver_table->bo, 798 &driver_table->mc_address, 799 &driver_table->cpu_addr); 800 801 return 0; 802 } 803 804 /** 805 * smu_alloc_memory_pool - allocate memory pool in the system memory 806 * 807 * @smu: amdgpu_device pointer 808 * 809 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr 810 * and DramLogSetDramAddr can notify it changed. 811 * 812 * Returns 0 on success, error on failure. 813 */ 814 static int smu_alloc_memory_pool(struct smu_context *smu) 815 { 816 struct amdgpu_device *adev = smu->adev; 817 struct smu_table_context *smu_table = &smu->smu_table; 818 struct smu_table *memory_pool = &smu_table->memory_pool; 819 uint64_t pool_size = smu->pool_size; 820 int ret = 0; 821 822 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) 823 return ret; 824 825 memory_pool->size = pool_size; 826 memory_pool->align = PAGE_SIZE; 827 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; 828 829 switch (pool_size) { 830 case SMU_MEMORY_POOL_SIZE_256_MB: 831 case SMU_MEMORY_POOL_SIZE_512_MB: 832 case SMU_MEMORY_POOL_SIZE_1_GB: 833 case SMU_MEMORY_POOL_SIZE_2_GB: 834 ret = amdgpu_bo_create_kernel(adev, 835 memory_pool->size, 836 memory_pool->align, 837 memory_pool->domain, 838 &memory_pool->bo, 839 &memory_pool->mc_address, 840 &memory_pool->cpu_addr); 841 if (ret) 842 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n"); 843 break; 844 default: 845 break; 846 } 847 848 return ret; 849 } 850 851 static int smu_free_memory_pool(struct smu_context *smu) 852 { 853 struct smu_table_context *smu_table = &smu->smu_table; 854 struct smu_table *memory_pool = &smu_table->memory_pool; 855 856 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) 857 return 0; 858 859 amdgpu_bo_free_kernel(&memory_pool->bo, 860 &memory_pool->mc_address, 861 &memory_pool->cpu_addr); 862 863 memset(memory_pool, 0, sizeof(struct smu_table)); 864 865 return 0; 866 } 867 868 static int smu_alloc_dummy_read_table(struct smu_context *smu) 869 { 870 struct smu_table_context *smu_table = &smu->smu_table; 871 struct smu_table *dummy_read_1_table = 872 &smu_table->dummy_read_1_table; 873 struct amdgpu_device *adev = smu->adev; 874 int ret = 0; 875 876 dummy_read_1_table->size = 0x40000; 877 dummy_read_1_table->align = PAGE_SIZE; 878 dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 879 880 ret = amdgpu_bo_create_kernel(adev, 881 dummy_read_1_table->size, 882 dummy_read_1_table->align, 883 dummy_read_1_table->domain, 884 &dummy_read_1_table->bo, 885 &dummy_read_1_table->mc_address, 886 &dummy_read_1_table->cpu_addr); 887 if (ret) 888 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n"); 889 890 return ret; 891 } 892 893 static void smu_free_dummy_read_table(struct smu_context *smu) 894 { 895 struct smu_table_context *smu_table = &smu->smu_table; 896 struct smu_table *dummy_read_1_table = 897 &smu_table->dummy_read_1_table; 898 899 900 amdgpu_bo_free_kernel(&dummy_read_1_table->bo, 901 &dummy_read_1_table->mc_address, 902 &dummy_read_1_table->cpu_addr); 903 904 memset(dummy_read_1_table, 0, sizeof(struct smu_table)); 905 } 906 907 static int smu_smc_table_sw_init(struct smu_context *smu) 908 { 909 int ret; 910 911 /** 912 * Create smu_table structure, and init smc tables such as 913 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. 914 */ 915 ret = smu_init_smc_tables(smu); 916 if (ret) { 917 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); 918 return ret; 919 } 920 921 /** 922 * Create smu_power_context structure, and allocate smu_dpm_context and 923 * context size to fill the smu_power_context data. 924 */ 925 ret = smu_init_power(smu); 926 if (ret) { 927 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); 928 return ret; 929 } 930 931 /* 932 * allocate vram bos to store smc table contents. 933 */ 934 ret = smu_init_fb_allocations(smu); 935 if (ret) 936 return ret; 937 938 ret = smu_alloc_memory_pool(smu); 939 if (ret) 940 return ret; 941 942 ret = smu_alloc_dummy_read_table(smu); 943 if (ret) 944 return ret; 945 946 ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c); 947 if (ret) 948 return ret; 949 950 return 0; 951 } 952 953 static int smu_smc_table_sw_fini(struct smu_context *smu) 954 { 955 int ret; 956 957 smu_i2c_fini(smu, &smu->adev->pm.smu_i2c); 958 959 smu_free_dummy_read_table(smu); 960 961 ret = smu_free_memory_pool(smu); 962 if (ret) 963 return ret; 964 965 ret = smu_fini_fb_allocations(smu); 966 if (ret) 967 return ret; 968 969 ret = smu_fini_power(smu); 970 if (ret) { 971 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); 972 return ret; 973 } 974 975 ret = smu_fini_smc_tables(smu); 976 if (ret) { 977 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); 978 return ret; 979 } 980 981 return 0; 982 } 983 984 static void smu_throttling_logging_work_fn(struct work_struct *work) 985 { 986 struct smu_context *smu = container_of(work, struct smu_context, 987 throttling_logging_work); 988 989 smu_log_thermal_throttling(smu); 990 } 991 992 static void smu_interrupt_work_fn(struct work_struct *work) 993 { 994 struct smu_context *smu = container_of(work, struct smu_context, 995 interrupt_work); 996 997 mutex_lock(&smu->mutex); 998 999 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) 1000 smu->ppt_funcs->interrupt_work(smu); 1001 1002 mutex_unlock(&smu->mutex); 1003 } 1004 1005 static int smu_sw_init(void *handle) 1006 { 1007 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1008 struct smu_context *smu = &adev->smu; 1009 int ret; 1010 1011 smu->pool_size = adev->pm.smu_prv_buffer_size; 1012 smu->smu_feature.feature_num = SMU_FEATURE_MAX; 1013 mutex_init(&smu->smu_feature.mutex); 1014 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); 1015 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); 1016 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); 1017 1018 mutex_init(&smu->sensor_lock); 1019 mutex_init(&smu->metrics_lock); 1020 mutex_init(&smu->message_lock); 1021 1022 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); 1023 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); 1024 atomic64_set(&smu->throttle_int_counter, 0); 1025 smu->watermarks_bitmap = 0; 1026 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1027 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1028 1029 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); 1030 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); 1031 mutex_init(&smu->smu_power.power_gate.vcn_gate_lock); 1032 mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock); 1033 1034 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 1035 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; 1036 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; 1037 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; 1038 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; 1039 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; 1040 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; 1041 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; 1042 1043 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1044 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 1045 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; 1046 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; 1047 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; 1048 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; 1049 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; 1050 smu->display_config = &adev->pm.pm_display_cfg; 1051 1052 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1053 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1054 1055 ret = smu_init_microcode(smu); 1056 if (ret) { 1057 dev_err(adev->dev, "Failed to load smu firmware!\n"); 1058 return ret; 1059 } 1060 1061 ret = smu_smc_table_sw_init(smu); 1062 if (ret) { 1063 dev_err(adev->dev, "Failed to sw init smc table!\n"); 1064 return ret; 1065 } 1066 1067 ret = smu_register_irq_handler(smu); 1068 if (ret) { 1069 dev_err(adev->dev, "Failed to register smc irq handler!\n"); 1070 return ret; 1071 } 1072 1073 /* If there is no way to query fan control mode, fan control is not supported */ 1074 if (!smu->ppt_funcs->get_fan_control_mode) 1075 smu->adev->pm.no_fan = true; 1076 1077 return 0; 1078 } 1079 1080 static int smu_sw_fini(void *handle) 1081 { 1082 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1083 struct smu_context *smu = &adev->smu; 1084 int ret; 1085 1086 ret = smu_smc_table_sw_fini(smu); 1087 if (ret) { 1088 dev_err(adev->dev, "Failed to sw fini smc table!\n"); 1089 return ret; 1090 } 1091 1092 smu_fini_microcode(smu); 1093 1094 return 0; 1095 } 1096 1097 static int smu_get_thermal_temperature_range(struct smu_context *smu) 1098 { 1099 struct amdgpu_device *adev = smu->adev; 1100 struct smu_temperature_range *range = 1101 &smu->thermal_range; 1102 int ret = 0; 1103 1104 if (!smu->ppt_funcs->get_thermal_temperature_range) 1105 return 0; 1106 1107 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); 1108 if (ret) 1109 return ret; 1110 1111 adev->pm.dpm.thermal.min_temp = range->min; 1112 adev->pm.dpm.thermal.max_temp = range->max; 1113 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max; 1114 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min; 1115 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max; 1116 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max; 1117 adev->pm.dpm.thermal.min_mem_temp = range->mem_min; 1118 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max; 1119 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max; 1120 1121 return ret; 1122 } 1123 1124 static int smu_smc_hw_setup(struct smu_context *smu) 1125 { 1126 struct amdgpu_device *adev = smu->adev; 1127 uint32_t pcie_gen = 0, pcie_width = 0; 1128 int ret = 0; 1129 1130 if (adev->in_suspend && smu_is_dpm_running(smu)) { 1131 dev_info(adev->dev, "dpm has been enabled\n"); 1132 /* this is needed specifically */ 1133 if ((adev->asic_type >= CHIP_SIENNA_CICHLID) && 1134 (adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) 1135 ret = smu_system_features_control(smu, true); 1136 return ret; 1137 } 1138 1139 ret = smu_init_display_count(smu, 0); 1140 if (ret) { 1141 dev_info(adev->dev, "Failed to pre-set display count as 0!\n"); 1142 return ret; 1143 } 1144 1145 ret = smu_set_driver_table_location(smu); 1146 if (ret) { 1147 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n"); 1148 return ret; 1149 } 1150 1151 /* 1152 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 1153 */ 1154 ret = smu_set_tool_table_location(smu); 1155 if (ret) { 1156 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n"); 1157 return ret; 1158 } 1159 1160 /* 1161 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify 1162 * pool location. 1163 */ 1164 ret = smu_notify_memory_pool_location(smu); 1165 if (ret) { 1166 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n"); 1167 return ret; 1168 } 1169 1170 /* smu_dump_pptable(smu); */ 1171 /* 1172 * Copy pptable bo in the vram to smc with SMU MSGs such as 1173 * SetDriverDramAddr and TransferTableDram2Smu. 1174 */ 1175 ret = smu_write_pptable(smu); 1176 if (ret) { 1177 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n"); 1178 return ret; 1179 } 1180 1181 /* issue Run*Btc msg */ 1182 ret = smu_run_btc(smu); 1183 if (ret) 1184 return ret; 1185 1186 ret = smu_feature_set_allowed_mask(smu); 1187 if (ret) { 1188 dev_err(adev->dev, "Failed to set driver allowed features mask!\n"); 1189 return ret; 1190 } 1191 1192 ret = smu_system_features_control(smu, true); 1193 if (ret) { 1194 dev_err(adev->dev, "Failed to enable requested dpm features!\n"); 1195 return ret; 1196 } 1197 1198 if (!smu_is_dpm_running(smu)) 1199 dev_info(adev->dev, "dpm has been disabled\n"); 1200 1201 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 1202 pcie_gen = 3; 1203 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 1204 pcie_gen = 2; 1205 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 1206 pcie_gen = 1; 1207 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 1208 pcie_gen = 0; 1209 1210 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 1211 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 1212 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 1213 */ 1214 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 1215 pcie_width = 6; 1216 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 1217 pcie_width = 5; 1218 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 1219 pcie_width = 4; 1220 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 1221 pcie_width = 3; 1222 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 1223 pcie_width = 2; 1224 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 1225 pcie_width = 1; 1226 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); 1227 if (ret) { 1228 dev_err(adev->dev, "Attempt to override pcie params failed!\n"); 1229 return ret; 1230 } 1231 1232 ret = smu_get_thermal_temperature_range(smu); 1233 if (ret) { 1234 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); 1235 return ret; 1236 } 1237 1238 ret = smu_enable_thermal_alert(smu); 1239 if (ret) { 1240 dev_err(adev->dev, "Failed to enable thermal alert!\n"); 1241 return ret; 1242 } 1243 1244 /* 1245 * Set initialized values (get from vbios) to dpm tables context such as 1246 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each 1247 * type of clks. 1248 */ 1249 ret = smu_set_default_dpm_table(smu); 1250 if (ret) { 1251 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); 1252 return ret; 1253 } 1254 1255 ret = smu_notify_display_change(smu); 1256 if (ret) 1257 return ret; 1258 1259 /* 1260 * Set min deep sleep dce fclk with bootup value from vbios via 1261 * SetMinDeepSleepDcefclk MSG. 1262 */ 1263 ret = smu_set_min_dcef_deep_sleep(smu, 1264 smu->smu_table.boot_values.dcefclk / 100); 1265 if (ret) 1266 return ret; 1267 1268 return ret; 1269 } 1270 1271 static int smu_start_smc_engine(struct smu_context *smu) 1272 { 1273 struct amdgpu_device *adev = smu->adev; 1274 int ret = 0; 1275 1276 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1277 if (adev->asic_type < CHIP_NAVI10) { 1278 if (smu->ppt_funcs->load_microcode) { 1279 ret = smu->ppt_funcs->load_microcode(smu); 1280 if (ret) 1281 return ret; 1282 } 1283 } 1284 } 1285 1286 if (smu->ppt_funcs->check_fw_status) { 1287 ret = smu->ppt_funcs->check_fw_status(smu); 1288 if (ret) { 1289 dev_err(adev->dev, "SMC is not ready\n"); 1290 return ret; 1291 } 1292 } 1293 1294 /* 1295 * Send msg GetDriverIfVersion to check if the return value is equal 1296 * with DRIVER_IF_VERSION of smc header. 1297 */ 1298 ret = smu_check_fw_version(smu); 1299 if (ret) 1300 return ret; 1301 1302 return ret; 1303 } 1304 1305 static int smu_hw_init(void *handle) 1306 { 1307 int ret; 1308 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1309 struct smu_context *smu = &adev->smu; 1310 1311 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { 1312 smu->pm_enabled = false; 1313 return 0; 1314 } 1315 1316 ret = smu_start_smc_engine(smu); 1317 if (ret) { 1318 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1319 return ret; 1320 } 1321 1322 if (smu->is_apu) { 1323 smu_powergate_sdma(&adev->smu, false); 1324 smu_dpm_set_vcn_enable(smu, true); 1325 smu_dpm_set_jpeg_enable(smu, true); 1326 smu_set_gfx_cgpg(&adev->smu, true); 1327 } 1328 1329 if (!smu->pm_enabled) 1330 return 0; 1331 1332 /* get boot_values from vbios to set revision, gfxclk, and etc. */ 1333 ret = smu_get_vbios_bootup_values(smu); 1334 if (ret) { 1335 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n"); 1336 return ret; 1337 } 1338 1339 ret = smu_setup_pptable(smu); 1340 if (ret) { 1341 dev_err(adev->dev, "Failed to setup pptable!\n"); 1342 return ret; 1343 } 1344 1345 ret = smu_get_driver_allowed_feature_mask(smu); 1346 if (ret) 1347 return ret; 1348 1349 ret = smu_smc_hw_setup(smu); 1350 if (ret) { 1351 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1352 return ret; 1353 } 1354 1355 /* 1356 * Move maximum sustainable clock retrieving here considering 1357 * 1. It is not needed on resume(from S3). 1358 * 2. DAL settings come between .hw_init and .late_init of SMU. 1359 * And DAL needs to know the maximum sustainable clocks. Thus 1360 * it cannot be put in .late_init(). 1361 */ 1362 ret = smu_init_max_sustainable_clocks(smu); 1363 if (ret) { 1364 dev_err(adev->dev, "Failed to init max sustainable clocks!\n"); 1365 return ret; 1366 } 1367 1368 adev->pm.dpm_enabled = true; 1369 1370 dev_info(adev->dev, "SMU is initialized successfully!\n"); 1371 1372 return 0; 1373 } 1374 1375 static int smu_disable_dpms(struct smu_context *smu) 1376 { 1377 struct amdgpu_device *adev = smu->adev; 1378 int ret = 0; 1379 bool use_baco = !smu->is_apu && 1380 ((amdgpu_in_reset(adev) && 1381 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 1382 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); 1383 1384 /* 1385 * For custom pptable uploading, skip the DPM features 1386 * disable process on Navi1x ASICs. 1387 * - As the gfx related features are under control of 1388 * RLC on those ASICs. RLC reinitialization will be 1389 * needed to reenable them. That will cost much more 1390 * efforts. 1391 * 1392 * - SMU firmware can handle the DPM reenablement 1393 * properly. 1394 */ 1395 if (smu->uploading_custom_pp_table && 1396 (adev->asic_type >= CHIP_NAVI10) && 1397 (adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) 1398 return smu_disable_all_features_with_exception(smu, 1399 true, 1400 SMU_FEATURE_COUNT); 1401 1402 /* 1403 * For Sienna_Cichlid, PMFW will handle the features disablement properly 1404 * on BACO in. Driver involvement is unnecessary. 1405 */ 1406 if (((adev->asic_type == CHIP_SIENNA_CICHLID) || 1407 ((adev->asic_type >= CHIP_NAVI10) && (adev->asic_type <= CHIP_NAVI12))) && 1408 use_baco) 1409 return smu_disable_all_features_with_exception(smu, 1410 true, 1411 SMU_FEATURE_BACO_BIT); 1412 1413 /* 1414 * For gpu reset, runpm and hibernation through BACO, 1415 * BACO feature has to be kept enabled. 1416 */ 1417 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { 1418 ret = smu_disable_all_features_with_exception(smu, 1419 false, 1420 SMU_FEATURE_BACO_BIT); 1421 if (ret) 1422 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); 1423 } else { 1424 ret = smu_system_features_control(smu, false); 1425 if (ret) 1426 dev_err(adev->dev, "Failed to disable smu features.\n"); 1427 } 1428 1429 if (adev->asic_type >= CHIP_NAVI10 && 1430 adev->gfx.rlc.funcs->stop) 1431 adev->gfx.rlc.funcs->stop(adev); 1432 1433 return ret; 1434 } 1435 1436 static int smu_smc_hw_cleanup(struct smu_context *smu) 1437 { 1438 struct amdgpu_device *adev = smu->adev; 1439 int ret = 0; 1440 1441 cancel_work_sync(&smu->throttling_logging_work); 1442 cancel_work_sync(&smu->interrupt_work); 1443 1444 ret = smu_disable_thermal_alert(smu); 1445 if (ret) { 1446 dev_err(adev->dev, "Fail to disable thermal alert!\n"); 1447 return ret; 1448 } 1449 1450 ret = smu_disable_dpms(smu); 1451 if (ret) { 1452 dev_err(adev->dev, "Fail to disable dpm features!\n"); 1453 return ret; 1454 } 1455 1456 return 0; 1457 } 1458 1459 static int smu_hw_fini(void *handle) 1460 { 1461 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1462 struct smu_context *smu = &adev->smu; 1463 1464 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1465 return 0; 1466 1467 if (smu->is_apu) { 1468 smu_powergate_sdma(&adev->smu, true); 1469 } 1470 1471 smu_dpm_set_vcn_enable(smu, false); 1472 smu_dpm_set_jpeg_enable(smu, false); 1473 1474 adev->vcn.cur_state = AMD_PG_STATE_GATE; 1475 adev->jpeg.cur_state = AMD_PG_STATE_GATE; 1476 1477 if (!smu->pm_enabled) 1478 return 0; 1479 1480 adev->pm.dpm_enabled = false; 1481 1482 return smu_smc_hw_cleanup(smu); 1483 } 1484 1485 static int smu_reset(struct smu_context *smu) 1486 { 1487 struct amdgpu_device *adev = smu->adev; 1488 int ret; 1489 1490 amdgpu_gfx_off_ctrl(smu->adev, false); 1491 1492 ret = smu_hw_fini(adev); 1493 if (ret) 1494 return ret; 1495 1496 ret = smu_hw_init(adev); 1497 if (ret) 1498 return ret; 1499 1500 ret = smu_late_init(adev); 1501 if (ret) 1502 return ret; 1503 1504 amdgpu_gfx_off_ctrl(smu->adev, true); 1505 1506 return 0; 1507 } 1508 1509 static int smu_suspend(void *handle) 1510 { 1511 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1512 struct smu_context *smu = &adev->smu; 1513 int ret; 1514 1515 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1516 return 0; 1517 1518 if (!smu->pm_enabled) 1519 return 0; 1520 1521 adev->pm.dpm_enabled = false; 1522 1523 ret = smu_smc_hw_cleanup(smu); 1524 if (ret) 1525 return ret; 1526 1527 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 1528 1529 /* skip CGPG when in S0ix */ 1530 if (smu->is_apu && !adev->in_s0ix) 1531 smu_set_gfx_cgpg(&adev->smu, false); 1532 1533 return 0; 1534 } 1535 1536 static int smu_resume(void *handle) 1537 { 1538 int ret; 1539 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1540 struct smu_context *smu = &adev->smu; 1541 1542 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1543 return 0; 1544 1545 if (!smu->pm_enabled) 1546 return 0; 1547 1548 dev_info(adev->dev, "SMU is resuming...\n"); 1549 1550 ret = smu_start_smc_engine(smu); 1551 if (ret) { 1552 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1553 return ret; 1554 } 1555 1556 ret = smu_smc_hw_setup(smu); 1557 if (ret) { 1558 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1559 return ret; 1560 } 1561 1562 if (smu->is_apu) 1563 smu_set_gfx_cgpg(&adev->smu, true); 1564 1565 smu->disable_uclk_switch = 0; 1566 1567 adev->pm.dpm_enabled = true; 1568 1569 dev_info(adev->dev, "SMU is resumed successfully!\n"); 1570 1571 return 0; 1572 } 1573 1574 static int smu_display_configuration_change(void *handle, 1575 const struct amd_pp_display_configuration *display_config) 1576 { 1577 struct smu_context *smu = handle; 1578 int index = 0; 1579 int num_of_active_display = 0; 1580 1581 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1582 return -EOPNOTSUPP; 1583 1584 if (!display_config) 1585 return -EINVAL; 1586 1587 mutex_lock(&smu->mutex); 1588 1589 smu_set_min_dcef_deep_sleep(smu, 1590 display_config->min_dcef_deep_sleep_set_clk / 100); 1591 1592 for (index = 0; index < display_config->num_path_including_non_display; index++) { 1593 if (display_config->displays[index].controller_id != 0) 1594 num_of_active_display++; 1595 } 1596 1597 mutex_unlock(&smu->mutex); 1598 1599 return 0; 1600 } 1601 1602 static int smu_set_clockgating_state(void *handle, 1603 enum amd_clockgating_state state) 1604 { 1605 return 0; 1606 } 1607 1608 static int smu_set_powergating_state(void *handle, 1609 enum amd_powergating_state state) 1610 { 1611 return 0; 1612 } 1613 1614 static int smu_enable_umd_pstate(void *handle, 1615 enum amd_dpm_forced_level *level) 1616 { 1617 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1618 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1619 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1620 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1621 1622 struct smu_context *smu = (struct smu_context*)(handle); 1623 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1624 1625 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1626 return -EINVAL; 1627 1628 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { 1629 /* enter umd pstate, save current level, disable gfx cg*/ 1630 if (*level & profile_mode_mask) { 1631 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; 1632 smu_dpm_ctx->enable_umd_pstate = true; 1633 smu_gpo_control(smu, false); 1634 amdgpu_device_ip_set_powergating_state(smu->adev, 1635 AMD_IP_BLOCK_TYPE_GFX, 1636 AMD_PG_STATE_UNGATE); 1637 amdgpu_device_ip_set_clockgating_state(smu->adev, 1638 AMD_IP_BLOCK_TYPE_GFX, 1639 AMD_CG_STATE_UNGATE); 1640 smu_gfx_ulv_control(smu, false); 1641 smu_deep_sleep_control(smu, false); 1642 amdgpu_asic_update_umd_stable_pstate(smu->adev, true); 1643 } 1644 } else { 1645 /* exit umd pstate, restore level, enable gfx cg*/ 1646 if (!(*level & profile_mode_mask)) { 1647 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 1648 *level = smu_dpm_ctx->saved_dpm_level; 1649 smu_dpm_ctx->enable_umd_pstate = false; 1650 amdgpu_asic_update_umd_stable_pstate(smu->adev, false); 1651 smu_deep_sleep_control(smu, true); 1652 smu_gfx_ulv_control(smu, true); 1653 amdgpu_device_ip_set_clockgating_state(smu->adev, 1654 AMD_IP_BLOCK_TYPE_GFX, 1655 AMD_CG_STATE_GATE); 1656 amdgpu_device_ip_set_powergating_state(smu->adev, 1657 AMD_IP_BLOCK_TYPE_GFX, 1658 AMD_PG_STATE_GATE); 1659 smu_gpo_control(smu, true); 1660 } 1661 } 1662 1663 return 0; 1664 } 1665 1666 static int smu_bump_power_profile_mode(struct smu_context *smu, 1667 long *param, 1668 uint32_t param_size) 1669 { 1670 int ret = 0; 1671 1672 if (smu->ppt_funcs->set_power_profile_mode) 1673 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); 1674 1675 return ret; 1676 } 1677 1678 static int smu_adjust_power_state_dynamic(struct smu_context *smu, 1679 enum amd_dpm_forced_level level, 1680 bool skip_display_settings) 1681 { 1682 int ret = 0; 1683 int index = 0; 1684 long workload; 1685 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1686 1687 if (!skip_display_settings) { 1688 ret = smu_display_config_changed(smu); 1689 if (ret) { 1690 dev_err(smu->adev->dev, "Failed to change display config!"); 1691 return ret; 1692 } 1693 } 1694 1695 ret = smu_apply_clocks_adjust_rules(smu); 1696 if (ret) { 1697 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); 1698 return ret; 1699 } 1700 1701 if (!skip_display_settings) { 1702 ret = smu_notify_smc_display_config(smu); 1703 if (ret) { 1704 dev_err(smu->adev->dev, "Failed to notify smc display config!"); 1705 return ret; 1706 } 1707 } 1708 1709 if (smu_dpm_ctx->dpm_level != level) { 1710 ret = smu_asic_set_performance_level(smu, level); 1711 if (ret) { 1712 dev_err(smu->adev->dev, "Failed to set performance level!"); 1713 return ret; 1714 } 1715 1716 /* update the saved copy */ 1717 smu_dpm_ctx->dpm_level = level; 1718 } 1719 1720 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1721 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { 1722 index = fls(smu->workload_mask); 1723 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1724 workload = smu->workload_setting[index]; 1725 1726 if (smu->power_profile_mode != workload) 1727 smu_bump_power_profile_mode(smu, &workload, 0); 1728 } 1729 1730 return ret; 1731 } 1732 1733 static int smu_handle_task(struct smu_context *smu, 1734 enum amd_dpm_forced_level level, 1735 enum amd_pp_task task_id, 1736 bool lock_needed) 1737 { 1738 int ret = 0; 1739 1740 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1741 return -EOPNOTSUPP; 1742 1743 if (lock_needed) 1744 mutex_lock(&smu->mutex); 1745 1746 switch (task_id) { 1747 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 1748 ret = smu_pre_display_config_changed(smu); 1749 if (ret) 1750 goto out; 1751 ret = smu_adjust_power_state_dynamic(smu, level, false); 1752 break; 1753 case AMD_PP_TASK_COMPLETE_INIT: 1754 case AMD_PP_TASK_READJUST_POWER_STATE: 1755 ret = smu_adjust_power_state_dynamic(smu, level, true); 1756 break; 1757 default: 1758 break; 1759 } 1760 1761 out: 1762 if (lock_needed) 1763 mutex_unlock(&smu->mutex); 1764 1765 return ret; 1766 } 1767 1768 static int smu_handle_dpm_task(void *handle, 1769 enum amd_pp_task task_id, 1770 enum amd_pm_state_type *user_state) 1771 { 1772 struct smu_context *smu = handle; 1773 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1774 1775 return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true); 1776 1777 } 1778 1779 static int smu_switch_power_profile(void *handle, 1780 enum PP_SMC_POWER_PROFILE type, 1781 bool en) 1782 { 1783 struct smu_context *smu = handle; 1784 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1785 long workload; 1786 uint32_t index; 1787 1788 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1789 return -EOPNOTSUPP; 1790 1791 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1792 return -EINVAL; 1793 1794 mutex_lock(&smu->mutex); 1795 1796 if (!en) { 1797 smu->workload_mask &= ~(1 << smu->workload_prority[type]); 1798 index = fls(smu->workload_mask); 1799 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1800 workload = smu->workload_setting[index]; 1801 } else { 1802 smu->workload_mask |= (1 << smu->workload_prority[type]); 1803 index = fls(smu->workload_mask); 1804 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1805 workload = smu->workload_setting[index]; 1806 } 1807 1808 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1809 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) 1810 smu_bump_power_profile_mode(smu, &workload, 0); 1811 1812 mutex_unlock(&smu->mutex); 1813 1814 return 0; 1815 } 1816 1817 static enum amd_dpm_forced_level smu_get_performance_level(void *handle) 1818 { 1819 struct smu_context *smu = handle; 1820 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1821 enum amd_dpm_forced_level level; 1822 1823 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1824 return -EOPNOTSUPP; 1825 1826 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1827 return -EINVAL; 1828 1829 mutex_lock(&(smu->mutex)); 1830 level = smu_dpm_ctx->dpm_level; 1831 mutex_unlock(&(smu->mutex)); 1832 1833 return level; 1834 } 1835 1836 static int smu_force_performance_level(void *handle, 1837 enum amd_dpm_forced_level level) 1838 { 1839 struct smu_context *smu = handle; 1840 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1841 int ret = 0; 1842 1843 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1844 return -EOPNOTSUPP; 1845 1846 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1847 return -EINVAL; 1848 1849 mutex_lock(&smu->mutex); 1850 1851 ret = smu_enable_umd_pstate(smu, &level); 1852 if (ret) { 1853 mutex_unlock(&smu->mutex); 1854 return ret; 1855 } 1856 1857 ret = smu_handle_task(smu, level, 1858 AMD_PP_TASK_READJUST_POWER_STATE, 1859 false); 1860 1861 mutex_unlock(&smu->mutex); 1862 1863 /* reset user dpm clock state */ 1864 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1865 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask)); 1866 smu->user_dpm_profile.clk_dependency = 0; 1867 } 1868 1869 return ret; 1870 } 1871 1872 static int smu_set_display_count(void *handle, uint32_t count) 1873 { 1874 struct smu_context *smu = handle; 1875 int ret = 0; 1876 1877 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1878 return -EOPNOTSUPP; 1879 1880 mutex_lock(&smu->mutex); 1881 ret = smu_init_display_count(smu, count); 1882 mutex_unlock(&smu->mutex); 1883 1884 return ret; 1885 } 1886 1887 static int smu_force_smuclk_levels(struct smu_context *smu, 1888 enum smu_clk_type clk_type, 1889 uint32_t mask) 1890 { 1891 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1892 int ret = 0; 1893 1894 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1895 return -EOPNOTSUPP; 1896 1897 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1898 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); 1899 return -EINVAL; 1900 } 1901 1902 mutex_lock(&smu->mutex); 1903 1904 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { 1905 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); 1906 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 1907 smu->user_dpm_profile.clk_mask[clk_type] = mask; 1908 smu_set_user_clk_dependencies(smu, clk_type); 1909 } 1910 } 1911 1912 mutex_unlock(&smu->mutex); 1913 1914 return ret; 1915 } 1916 1917 static int smu_force_ppclk_levels(void *handle, 1918 enum pp_clock_type type, 1919 uint32_t mask) 1920 { 1921 struct smu_context *smu = handle; 1922 enum smu_clk_type clk_type; 1923 1924 switch (type) { 1925 case PP_SCLK: 1926 clk_type = SMU_SCLK; break; 1927 case PP_MCLK: 1928 clk_type = SMU_MCLK; break; 1929 case PP_PCIE: 1930 clk_type = SMU_PCIE; break; 1931 case PP_SOCCLK: 1932 clk_type = SMU_SOCCLK; break; 1933 case PP_FCLK: 1934 clk_type = SMU_FCLK; break; 1935 case PP_DCEFCLK: 1936 clk_type = SMU_DCEFCLK; break; 1937 case PP_VCLK: 1938 clk_type = SMU_VCLK; break; 1939 case PP_DCLK: 1940 clk_type = SMU_DCLK; break; 1941 case OD_SCLK: 1942 clk_type = SMU_OD_SCLK; break; 1943 case OD_MCLK: 1944 clk_type = SMU_OD_MCLK; break; 1945 case OD_VDDC_CURVE: 1946 clk_type = SMU_OD_VDDC_CURVE; break; 1947 case OD_RANGE: 1948 clk_type = SMU_OD_RANGE; break; 1949 default: 1950 return -EINVAL; 1951 } 1952 1953 return smu_force_smuclk_levels(smu, clk_type, mask); 1954 } 1955 1956 /* 1957 * On system suspending or resetting, the dpm_enabled 1958 * flag will be cleared. So that those SMU services which 1959 * are not supported will be gated. 1960 * However, the mp1 state setting should still be granted 1961 * even if the dpm_enabled cleared. 1962 */ 1963 static int smu_set_mp1_state(void *handle, 1964 enum pp_mp1_state mp1_state) 1965 { 1966 struct smu_context *smu = handle; 1967 int ret = 0; 1968 1969 if (!smu->pm_enabled) 1970 return -EOPNOTSUPP; 1971 1972 mutex_lock(&smu->mutex); 1973 1974 if (smu->ppt_funcs && 1975 smu->ppt_funcs->set_mp1_state) 1976 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); 1977 1978 mutex_unlock(&smu->mutex); 1979 1980 return ret; 1981 } 1982 1983 static int smu_set_df_cstate(void *handle, 1984 enum pp_df_cstate state) 1985 { 1986 struct smu_context *smu = handle; 1987 int ret = 0; 1988 1989 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1990 return -EOPNOTSUPP; 1991 1992 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) 1993 return 0; 1994 1995 mutex_lock(&smu->mutex); 1996 1997 ret = smu->ppt_funcs->set_df_cstate(smu, state); 1998 if (ret) 1999 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); 2000 2001 mutex_unlock(&smu->mutex); 2002 2003 return ret; 2004 } 2005 2006 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en) 2007 { 2008 int ret = 0; 2009 2010 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2011 return -EOPNOTSUPP; 2012 2013 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down) 2014 return 0; 2015 2016 mutex_lock(&smu->mutex); 2017 2018 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en); 2019 if (ret) 2020 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n"); 2021 2022 mutex_unlock(&smu->mutex); 2023 2024 return ret; 2025 } 2026 2027 int smu_write_watermarks_table(struct smu_context *smu) 2028 { 2029 int ret = 0; 2030 2031 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2032 return -EOPNOTSUPP; 2033 2034 mutex_lock(&smu->mutex); 2035 2036 ret = smu_set_watermarks_table(smu, NULL); 2037 2038 mutex_unlock(&smu->mutex); 2039 2040 return ret; 2041 } 2042 2043 static int smu_set_watermarks_for_clock_ranges(void *handle, 2044 struct pp_smu_wm_range_sets *clock_ranges) 2045 { 2046 struct smu_context *smu = handle; 2047 int ret = 0; 2048 2049 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2050 return -EOPNOTSUPP; 2051 2052 if (smu->disable_watermark) 2053 return 0; 2054 2055 mutex_lock(&smu->mutex); 2056 2057 ret = smu_set_watermarks_table(smu, clock_ranges); 2058 2059 mutex_unlock(&smu->mutex); 2060 2061 return ret; 2062 } 2063 2064 int smu_set_ac_dc(struct smu_context *smu) 2065 { 2066 int ret = 0; 2067 2068 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2069 return -EOPNOTSUPP; 2070 2071 /* controlled by firmware */ 2072 if (smu->dc_controlled_by_gpio) 2073 return 0; 2074 2075 mutex_lock(&smu->mutex); 2076 ret = smu_set_power_source(smu, 2077 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : 2078 SMU_POWER_SOURCE_DC); 2079 if (ret) 2080 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", 2081 smu->adev->pm.ac_power ? "AC" : "DC"); 2082 mutex_unlock(&smu->mutex); 2083 2084 return ret; 2085 } 2086 2087 const struct amd_ip_funcs smu_ip_funcs = { 2088 .name = "smu", 2089 .early_init = smu_early_init, 2090 .late_init = smu_late_init, 2091 .sw_init = smu_sw_init, 2092 .sw_fini = smu_sw_fini, 2093 .hw_init = smu_hw_init, 2094 .hw_fini = smu_hw_fini, 2095 .suspend = smu_suspend, 2096 .resume = smu_resume, 2097 .is_idle = NULL, 2098 .check_soft_reset = NULL, 2099 .wait_for_idle = NULL, 2100 .soft_reset = NULL, 2101 .set_clockgating_state = smu_set_clockgating_state, 2102 .set_powergating_state = smu_set_powergating_state, 2103 .enable_umd_pstate = smu_enable_umd_pstate, 2104 }; 2105 2106 const struct amdgpu_ip_block_version smu_v11_0_ip_block = 2107 { 2108 .type = AMD_IP_BLOCK_TYPE_SMC, 2109 .major = 11, 2110 .minor = 0, 2111 .rev = 0, 2112 .funcs = &smu_ip_funcs, 2113 }; 2114 2115 const struct amdgpu_ip_block_version smu_v12_0_ip_block = 2116 { 2117 .type = AMD_IP_BLOCK_TYPE_SMC, 2118 .major = 12, 2119 .minor = 0, 2120 .rev = 0, 2121 .funcs = &smu_ip_funcs, 2122 }; 2123 2124 const struct amdgpu_ip_block_version smu_v13_0_ip_block = 2125 { 2126 .type = AMD_IP_BLOCK_TYPE_SMC, 2127 .major = 13, 2128 .minor = 0, 2129 .rev = 0, 2130 .funcs = &smu_ip_funcs, 2131 }; 2132 2133 static int smu_load_microcode(void *handle) 2134 { 2135 struct smu_context *smu = handle; 2136 struct amdgpu_device *adev = smu->adev; 2137 int ret = 0; 2138 2139 if (!smu->pm_enabled) 2140 return -EOPNOTSUPP; 2141 2142 /* This should be used for non PSP loading */ 2143 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 2144 return 0; 2145 2146 if (smu->ppt_funcs->load_microcode) { 2147 ret = smu->ppt_funcs->load_microcode(smu); 2148 if (ret) { 2149 dev_err(adev->dev, "Load microcode failed\n"); 2150 return ret; 2151 } 2152 } 2153 2154 if (smu->ppt_funcs->check_fw_status) { 2155 ret = smu->ppt_funcs->check_fw_status(smu); 2156 if (ret) { 2157 dev_err(adev->dev, "SMC is not ready\n"); 2158 return ret; 2159 } 2160 } 2161 2162 return ret; 2163 } 2164 2165 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) 2166 { 2167 int ret = 0; 2168 2169 mutex_lock(&smu->mutex); 2170 2171 if (smu->ppt_funcs->set_gfx_cgpg) 2172 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); 2173 2174 mutex_unlock(&smu->mutex); 2175 2176 return ret; 2177 } 2178 2179 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) 2180 { 2181 struct smu_context *smu = handle; 2182 u32 percent; 2183 int ret = 0; 2184 2185 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2186 return -EOPNOTSUPP; 2187 2188 mutex_lock(&smu->mutex); 2189 2190 if (smu->ppt_funcs->set_fan_speed_percent) { 2191 percent = speed * 100 / smu->fan_max_rpm; 2192 ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent); 2193 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) 2194 smu->user_dpm_profile.fan_speed_percent = percent; 2195 } 2196 2197 mutex_unlock(&smu->mutex); 2198 2199 return ret; 2200 } 2201 2202 /** 2203 * smu_get_power_limit - Request one of the SMU Power Limits 2204 * 2205 * @handle: pointer to smu context 2206 * @limit: requested limit is written back to this variable 2207 * @pp_limit_level: &pp_power_limit_level which limit of the power to return 2208 * @pp_power_type: &pp_power_type type of power 2209 * Return: 0 on success, <0 on error 2210 * 2211 */ 2212 int smu_get_power_limit(void *handle, 2213 uint32_t *limit, 2214 enum pp_power_limit_level pp_limit_level, 2215 enum pp_power_type pp_power_type) 2216 { 2217 struct smu_context *smu = handle; 2218 enum smu_ppt_limit_level limit_level; 2219 uint32_t limit_type; 2220 int ret = 0; 2221 2222 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2223 return -EOPNOTSUPP; 2224 2225 switch(pp_power_type) { 2226 case PP_PWR_TYPE_SUSTAINED: 2227 limit_type = SMU_DEFAULT_PPT_LIMIT; 2228 break; 2229 case PP_PWR_TYPE_FAST: 2230 limit_type = SMU_FAST_PPT_LIMIT; 2231 break; 2232 default: 2233 return -EOPNOTSUPP; 2234 break; 2235 } 2236 2237 switch(pp_limit_level){ 2238 case PP_PWR_LIMIT_CURRENT: 2239 limit_level = SMU_PPT_LIMIT_CURRENT; 2240 break; 2241 case PP_PWR_LIMIT_DEFAULT: 2242 limit_level = SMU_PPT_LIMIT_DEFAULT; 2243 break; 2244 case PP_PWR_LIMIT_MAX: 2245 limit_level = SMU_PPT_LIMIT_MAX; 2246 break; 2247 case PP_PWR_LIMIT_MIN: 2248 default: 2249 return -EOPNOTSUPP; 2250 break; 2251 } 2252 2253 mutex_lock(&smu->mutex); 2254 2255 if (limit_type != SMU_DEFAULT_PPT_LIMIT) { 2256 if (smu->ppt_funcs->get_ppt_limit) 2257 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); 2258 } else { 2259 switch (limit_level) { 2260 case SMU_PPT_LIMIT_CURRENT: 2261 if ((smu->adev->asic_type == CHIP_ALDEBARAN) || 2262 (smu->adev->asic_type == CHIP_SIENNA_CICHLID) || 2263 (smu->adev->asic_type == CHIP_NAVY_FLOUNDER) || 2264 (smu->adev->asic_type == CHIP_DIMGREY_CAVEFISH) || 2265 (smu->adev->asic_type == CHIP_BEIGE_GOBY)) 2266 ret = smu_get_asic_power_limits(smu, 2267 &smu->current_power_limit, 2268 NULL, 2269 NULL); 2270 *limit = smu->current_power_limit; 2271 break; 2272 case SMU_PPT_LIMIT_DEFAULT: 2273 *limit = smu->default_power_limit; 2274 break; 2275 case SMU_PPT_LIMIT_MAX: 2276 *limit = smu->max_power_limit; 2277 break; 2278 default: 2279 break; 2280 } 2281 } 2282 2283 mutex_unlock(&smu->mutex); 2284 2285 return ret; 2286 } 2287 2288 static int smu_set_power_limit(void *handle, uint32_t limit) 2289 { 2290 struct smu_context *smu = handle; 2291 uint32_t limit_type = limit >> 24; 2292 int ret = 0; 2293 2294 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2295 return -EOPNOTSUPP; 2296 2297 mutex_lock(&smu->mutex); 2298 2299 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 2300 if (smu->ppt_funcs->set_power_limit) { 2301 ret = smu->ppt_funcs->set_power_limit(smu, limit); 2302 goto out; 2303 } 2304 2305 if (limit > smu->max_power_limit) { 2306 dev_err(smu->adev->dev, 2307 "New power limit (%d) is over the max allowed %d\n", 2308 limit, smu->max_power_limit); 2309 ret = -EINVAL; 2310 goto out; 2311 } 2312 2313 if (!limit) 2314 limit = smu->current_power_limit; 2315 2316 if (smu->ppt_funcs->set_power_limit) { 2317 ret = smu->ppt_funcs->set_power_limit(smu, limit); 2318 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) 2319 smu->user_dpm_profile.power_limit = limit; 2320 } 2321 2322 out: 2323 mutex_unlock(&smu->mutex); 2324 2325 return ret; 2326 } 2327 2328 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) 2329 { 2330 int ret = 0; 2331 2332 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2333 return -EOPNOTSUPP; 2334 2335 mutex_lock(&smu->mutex); 2336 2337 if (smu->ppt_funcs->print_clk_levels) 2338 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); 2339 2340 mutex_unlock(&smu->mutex); 2341 2342 return ret; 2343 } 2344 2345 static int smu_print_ppclk_levels(void *handle, 2346 enum pp_clock_type type, 2347 char *buf) 2348 { 2349 struct smu_context *smu = handle; 2350 enum smu_clk_type clk_type; 2351 2352 switch (type) { 2353 case PP_SCLK: 2354 clk_type = SMU_SCLK; break; 2355 case PP_MCLK: 2356 clk_type = SMU_MCLK; break; 2357 case PP_PCIE: 2358 clk_type = SMU_PCIE; break; 2359 case PP_SOCCLK: 2360 clk_type = SMU_SOCCLK; break; 2361 case PP_FCLK: 2362 clk_type = SMU_FCLK; break; 2363 case PP_DCEFCLK: 2364 clk_type = SMU_DCEFCLK; break; 2365 case PP_VCLK: 2366 clk_type = SMU_VCLK; break; 2367 case PP_DCLK: 2368 clk_type = SMU_DCLK; break; 2369 case OD_SCLK: 2370 clk_type = SMU_OD_SCLK; break; 2371 case OD_MCLK: 2372 clk_type = SMU_OD_MCLK; break; 2373 case OD_VDDC_CURVE: 2374 clk_type = SMU_OD_VDDC_CURVE; break; 2375 case OD_RANGE: 2376 clk_type = SMU_OD_RANGE; break; 2377 case OD_VDDGFX_OFFSET: 2378 clk_type = SMU_OD_VDDGFX_OFFSET; break; 2379 case OD_CCLK: 2380 clk_type = SMU_OD_CCLK; break; 2381 default: 2382 return -EINVAL; 2383 } 2384 2385 return smu_print_smuclk_levels(smu, clk_type, buf); 2386 } 2387 2388 static int smu_od_edit_dpm_table(void *handle, 2389 enum PP_OD_DPM_TABLE_COMMAND type, 2390 long *input, uint32_t size) 2391 { 2392 struct smu_context *smu = handle; 2393 int ret = 0; 2394 2395 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2396 return -EOPNOTSUPP; 2397 2398 mutex_lock(&smu->mutex); 2399 2400 if (smu->ppt_funcs->od_edit_dpm_table) { 2401 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); 2402 } 2403 2404 mutex_unlock(&smu->mutex); 2405 2406 return ret; 2407 } 2408 2409 static int smu_read_sensor(void *handle, 2410 int sensor, 2411 void *data, 2412 int *size_arg) 2413 { 2414 struct smu_context *smu = handle; 2415 struct smu_umd_pstate_table *pstate_table = 2416 &smu->pstate_table; 2417 int ret = 0; 2418 uint32_t *size, size_val; 2419 2420 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2421 return -EOPNOTSUPP; 2422 2423 if (!data || !size_arg) 2424 return -EINVAL; 2425 2426 size_val = *size_arg; 2427 size = &size_val; 2428 2429 mutex_lock(&smu->mutex); 2430 2431 if (smu->ppt_funcs->read_sensor) 2432 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) 2433 goto unlock; 2434 2435 switch (sensor) { 2436 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 2437 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; 2438 *size = 4; 2439 break; 2440 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 2441 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; 2442 *size = 4; 2443 break; 2444 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 2445 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2); 2446 *size = 8; 2447 break; 2448 case AMDGPU_PP_SENSOR_UVD_POWER: 2449 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 2450 *size = 4; 2451 break; 2452 case AMDGPU_PP_SENSOR_VCE_POWER: 2453 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 2454 *size = 4; 2455 break; 2456 case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 2457 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1; 2458 *size = 4; 2459 break; 2460 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 2461 *(uint32_t *)data = 0; 2462 *size = 4; 2463 break; 2464 default: 2465 *size = 0; 2466 ret = -EOPNOTSUPP; 2467 break; 2468 } 2469 2470 unlock: 2471 mutex_unlock(&smu->mutex); 2472 2473 // assign uint32_t to int 2474 *size_arg = size_val; 2475 2476 return ret; 2477 } 2478 2479 static int smu_get_power_profile_mode(void *handle, char *buf) 2480 { 2481 struct smu_context *smu = handle; 2482 int ret = 0; 2483 2484 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2485 return -EOPNOTSUPP; 2486 2487 mutex_lock(&smu->mutex); 2488 2489 if (smu->ppt_funcs->get_power_profile_mode) 2490 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); 2491 2492 mutex_unlock(&smu->mutex); 2493 2494 return ret; 2495 } 2496 2497 static int smu_set_power_profile_mode(void *handle, 2498 long *param, 2499 uint32_t param_size) 2500 { 2501 struct smu_context *smu = handle; 2502 int ret = 0; 2503 2504 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2505 return -EOPNOTSUPP; 2506 2507 mutex_lock(&smu->mutex); 2508 2509 smu_bump_power_profile_mode(smu, param, param_size); 2510 2511 mutex_unlock(&smu->mutex); 2512 2513 return ret; 2514 } 2515 2516 2517 static u32 smu_get_fan_control_mode(void *handle) 2518 { 2519 struct smu_context *smu = handle; 2520 u32 ret = 0; 2521 2522 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2523 return AMD_FAN_CTRL_NONE; 2524 2525 mutex_lock(&smu->mutex); 2526 2527 if (smu->ppt_funcs->get_fan_control_mode) 2528 ret = smu->ppt_funcs->get_fan_control_mode(smu); 2529 2530 mutex_unlock(&smu->mutex); 2531 2532 return ret; 2533 } 2534 2535 static int smu_set_fan_control_mode(struct smu_context *smu, int value) 2536 { 2537 int ret = 0; 2538 2539 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2540 return -EOPNOTSUPP; 2541 2542 mutex_lock(&smu->mutex); 2543 2544 if (smu->ppt_funcs->set_fan_control_mode) { 2545 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); 2546 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) 2547 smu->user_dpm_profile.fan_mode = value; 2548 } 2549 2550 mutex_unlock(&smu->mutex); 2551 2552 /* reset user dpm fan speed */ 2553 if (!ret && value != AMD_FAN_CTRL_MANUAL && 2554 !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) 2555 smu->user_dpm_profile.fan_speed_percent = 0; 2556 2557 return ret; 2558 } 2559 2560 static void smu_pp_set_fan_control_mode(void *handle, u32 value) 2561 { 2562 struct smu_context *smu = handle; 2563 2564 smu_set_fan_control_mode(smu, value); 2565 } 2566 2567 2568 static int smu_get_fan_speed_percent(void *handle, u32 *speed) 2569 { 2570 struct smu_context *smu = handle; 2571 int ret = 0; 2572 uint32_t percent; 2573 2574 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2575 return -EOPNOTSUPP; 2576 2577 mutex_lock(&smu->mutex); 2578 2579 if (smu->ppt_funcs->get_fan_speed_percent) { 2580 ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent); 2581 if (!ret) { 2582 *speed = percent > 100 ? 100 : percent; 2583 } 2584 } 2585 2586 mutex_unlock(&smu->mutex); 2587 2588 2589 return ret; 2590 } 2591 2592 static int smu_set_fan_speed_percent(void *handle, u32 speed) 2593 { 2594 struct smu_context *smu = handle; 2595 int ret = 0; 2596 2597 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2598 return -EOPNOTSUPP; 2599 2600 mutex_lock(&smu->mutex); 2601 2602 if (smu->ppt_funcs->set_fan_speed_percent) { 2603 if (speed > 100) 2604 speed = 100; 2605 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed); 2606 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) 2607 smu->user_dpm_profile.fan_speed_percent = speed; 2608 } 2609 2610 mutex_unlock(&smu->mutex); 2611 2612 return ret; 2613 } 2614 2615 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) 2616 { 2617 struct smu_context *smu = handle; 2618 int ret = 0; 2619 u32 percent; 2620 2621 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2622 return -EOPNOTSUPP; 2623 2624 mutex_lock(&smu->mutex); 2625 2626 if (smu->ppt_funcs->get_fan_speed_percent) { 2627 ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent); 2628 *speed = percent * smu->fan_max_rpm / 100; 2629 } 2630 2631 mutex_unlock(&smu->mutex); 2632 2633 return ret; 2634 } 2635 2636 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) 2637 { 2638 struct smu_context *smu = handle; 2639 int ret = 0; 2640 2641 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2642 return -EOPNOTSUPP; 2643 2644 mutex_lock(&smu->mutex); 2645 2646 ret = smu_set_min_dcef_deep_sleep(smu, clk); 2647 2648 mutex_unlock(&smu->mutex); 2649 2650 return ret; 2651 } 2652 2653 static int smu_get_clock_by_type_with_latency(void *handle, 2654 enum amd_pp_clock_type type, 2655 struct pp_clock_levels_with_latency *clocks) 2656 { 2657 struct smu_context *smu = handle; 2658 enum smu_clk_type clk_type; 2659 int ret = 0; 2660 2661 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2662 return -EOPNOTSUPP; 2663 2664 mutex_lock(&smu->mutex); 2665 2666 if (smu->ppt_funcs->get_clock_by_type_with_latency) { 2667 switch (type) { 2668 case amd_pp_sys_clock: 2669 clk_type = SMU_GFXCLK; 2670 break; 2671 case amd_pp_mem_clock: 2672 clk_type = SMU_MCLK; 2673 break; 2674 case amd_pp_dcef_clock: 2675 clk_type = SMU_DCEFCLK; 2676 break; 2677 case amd_pp_disp_clock: 2678 clk_type = SMU_DISPCLK; 2679 break; 2680 default: 2681 dev_err(smu->adev->dev, "Invalid clock type!\n"); 2682 mutex_unlock(&smu->mutex); 2683 return -EINVAL; 2684 } 2685 2686 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); 2687 } 2688 2689 mutex_unlock(&smu->mutex); 2690 2691 return ret; 2692 } 2693 2694 static int smu_display_clock_voltage_request(void *handle, 2695 struct pp_display_clock_request *clock_req) 2696 { 2697 struct smu_context *smu = handle; 2698 int ret = 0; 2699 2700 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2701 return -EOPNOTSUPP; 2702 2703 mutex_lock(&smu->mutex); 2704 2705 if (smu->ppt_funcs->display_clock_voltage_request) 2706 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); 2707 2708 mutex_unlock(&smu->mutex); 2709 2710 return ret; 2711 } 2712 2713 2714 static int smu_display_disable_memory_clock_switch(void *handle, 2715 bool disable_memory_clock_switch) 2716 { 2717 struct smu_context *smu = handle; 2718 int ret = -EINVAL; 2719 2720 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2721 return -EOPNOTSUPP; 2722 2723 mutex_lock(&smu->mutex); 2724 2725 if (smu->ppt_funcs->display_disable_memory_clock_switch) 2726 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); 2727 2728 mutex_unlock(&smu->mutex); 2729 2730 return ret; 2731 } 2732 2733 static int smu_set_xgmi_pstate(void *handle, 2734 uint32_t pstate) 2735 { 2736 struct smu_context *smu = handle; 2737 int ret = 0; 2738 2739 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2740 return -EOPNOTSUPP; 2741 2742 mutex_lock(&smu->mutex); 2743 2744 if (smu->ppt_funcs->set_xgmi_pstate) 2745 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); 2746 2747 mutex_unlock(&smu->mutex); 2748 2749 if(ret) 2750 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); 2751 2752 return ret; 2753 } 2754 2755 static int smu_get_baco_capability(void *handle, bool *cap) 2756 { 2757 struct smu_context *smu = handle; 2758 int ret = 0; 2759 2760 *cap = false; 2761 2762 if (!smu->pm_enabled) 2763 return 0; 2764 2765 mutex_lock(&smu->mutex); 2766 2767 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) 2768 *cap = smu->ppt_funcs->baco_is_support(smu); 2769 2770 mutex_unlock(&smu->mutex); 2771 2772 return ret; 2773 } 2774 2775 static int smu_baco_set_state(void *handle, int state) 2776 { 2777 struct smu_context *smu = handle; 2778 int ret = 0; 2779 2780 if (!smu->pm_enabled) 2781 return -EOPNOTSUPP; 2782 2783 if (state == 0) { 2784 mutex_lock(&smu->mutex); 2785 2786 if (smu->ppt_funcs->baco_exit) 2787 ret = smu->ppt_funcs->baco_exit(smu); 2788 2789 mutex_unlock(&smu->mutex); 2790 } else if (state == 1) { 2791 mutex_lock(&smu->mutex); 2792 2793 if (smu->ppt_funcs->baco_enter) 2794 ret = smu->ppt_funcs->baco_enter(smu); 2795 2796 mutex_unlock(&smu->mutex); 2797 2798 } else { 2799 return -EINVAL; 2800 } 2801 2802 if (ret) 2803 dev_err(smu->adev->dev, "Failed to %s BACO state!\n", 2804 (state)?"enter":"exit"); 2805 2806 return ret; 2807 } 2808 2809 bool smu_mode1_reset_is_support(struct smu_context *smu) 2810 { 2811 bool ret = false; 2812 2813 if (!smu->pm_enabled) 2814 return false; 2815 2816 mutex_lock(&smu->mutex); 2817 2818 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) 2819 ret = smu->ppt_funcs->mode1_reset_is_support(smu); 2820 2821 mutex_unlock(&smu->mutex); 2822 2823 return ret; 2824 } 2825 2826 bool smu_mode2_reset_is_support(struct smu_context *smu) 2827 { 2828 bool ret = false; 2829 2830 if (!smu->pm_enabled) 2831 return false; 2832 2833 mutex_lock(&smu->mutex); 2834 2835 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) 2836 ret = smu->ppt_funcs->mode2_reset_is_support(smu); 2837 2838 mutex_unlock(&smu->mutex); 2839 2840 return ret; 2841 } 2842 2843 int smu_mode1_reset(struct smu_context *smu) 2844 { 2845 int ret = 0; 2846 2847 if (!smu->pm_enabled) 2848 return -EOPNOTSUPP; 2849 2850 mutex_lock(&smu->mutex); 2851 2852 if (smu->ppt_funcs->mode1_reset) 2853 ret = smu->ppt_funcs->mode1_reset(smu); 2854 2855 mutex_unlock(&smu->mutex); 2856 2857 return ret; 2858 } 2859 2860 static int smu_mode2_reset(void *handle) 2861 { 2862 struct smu_context *smu = handle; 2863 int ret = 0; 2864 2865 if (!smu->pm_enabled) 2866 return -EOPNOTSUPP; 2867 2868 mutex_lock(&smu->mutex); 2869 2870 if (smu->ppt_funcs->mode2_reset) 2871 ret = smu->ppt_funcs->mode2_reset(smu); 2872 2873 mutex_unlock(&smu->mutex); 2874 2875 if (ret) 2876 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); 2877 2878 return ret; 2879 } 2880 2881 static int smu_get_max_sustainable_clocks_by_dc(void *handle, 2882 struct pp_smu_nv_clock_table *max_clocks) 2883 { 2884 struct smu_context *smu = handle; 2885 int ret = 0; 2886 2887 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2888 return -EOPNOTSUPP; 2889 2890 mutex_lock(&smu->mutex); 2891 2892 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) 2893 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); 2894 2895 mutex_unlock(&smu->mutex); 2896 2897 return ret; 2898 } 2899 2900 static int smu_get_uclk_dpm_states(void *handle, 2901 unsigned int *clock_values_in_khz, 2902 unsigned int *num_states) 2903 { 2904 struct smu_context *smu = handle; 2905 int ret = 0; 2906 2907 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2908 return -EOPNOTSUPP; 2909 2910 mutex_lock(&smu->mutex); 2911 2912 if (smu->ppt_funcs->get_uclk_dpm_states) 2913 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); 2914 2915 mutex_unlock(&smu->mutex); 2916 2917 return ret; 2918 } 2919 2920 static enum amd_pm_state_type smu_get_current_power_state(void *handle) 2921 { 2922 struct smu_context *smu = handle; 2923 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; 2924 2925 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2926 return -EOPNOTSUPP; 2927 2928 mutex_lock(&smu->mutex); 2929 2930 if (smu->ppt_funcs->get_current_power_state) 2931 pm_state = smu->ppt_funcs->get_current_power_state(smu); 2932 2933 mutex_unlock(&smu->mutex); 2934 2935 return pm_state; 2936 } 2937 2938 static int smu_get_dpm_clock_table(void *handle, 2939 struct dpm_clocks *clock_table) 2940 { 2941 struct smu_context *smu = handle; 2942 int ret = 0; 2943 2944 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2945 return -EOPNOTSUPP; 2946 2947 mutex_lock(&smu->mutex); 2948 2949 if (smu->ppt_funcs->get_dpm_clock_table) 2950 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); 2951 2952 mutex_unlock(&smu->mutex); 2953 2954 return ret; 2955 } 2956 2957 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) 2958 { 2959 struct smu_context *smu = handle; 2960 ssize_t size; 2961 2962 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2963 return -EOPNOTSUPP; 2964 2965 if (!smu->ppt_funcs->get_gpu_metrics) 2966 return -EOPNOTSUPP; 2967 2968 mutex_lock(&smu->mutex); 2969 2970 size = smu->ppt_funcs->get_gpu_metrics(smu, table); 2971 2972 mutex_unlock(&smu->mutex); 2973 2974 return size; 2975 } 2976 2977 static int smu_enable_mgpu_fan_boost(void *handle) 2978 { 2979 struct smu_context *smu = handle; 2980 int ret = 0; 2981 2982 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2983 return -EOPNOTSUPP; 2984 2985 mutex_lock(&smu->mutex); 2986 2987 if (smu->ppt_funcs->enable_mgpu_fan_boost) 2988 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); 2989 2990 mutex_unlock(&smu->mutex); 2991 2992 return ret; 2993 } 2994 2995 static int smu_gfx_state_change_set(void *handle, 2996 uint32_t state) 2997 { 2998 struct smu_context *smu = handle; 2999 int ret = 0; 3000 3001 mutex_lock(&smu->mutex); 3002 if (smu->ppt_funcs->gfx_state_change_set) 3003 ret = smu->ppt_funcs->gfx_state_change_set(smu, state); 3004 mutex_unlock(&smu->mutex); 3005 3006 return ret; 3007 } 3008 3009 int smu_set_light_sbr(struct smu_context *smu, bool enable) 3010 { 3011 int ret = 0; 3012 3013 mutex_lock(&smu->mutex); 3014 if (smu->ppt_funcs->set_light_sbr) 3015 ret = smu->ppt_funcs->set_light_sbr(smu, enable); 3016 mutex_unlock(&smu->mutex); 3017 3018 return ret; 3019 } 3020 3021 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) 3022 { 3023 struct smu_context *smu = handle; 3024 struct smu_table_context *smu_table = &smu->smu_table; 3025 struct smu_table *memory_pool = &smu_table->memory_pool; 3026 3027 if (!addr || !size) 3028 return -EINVAL; 3029 3030 *addr = NULL; 3031 *size = 0; 3032 mutex_lock(&smu->mutex); 3033 if (memory_pool->bo) { 3034 *addr = memory_pool->cpu_addr; 3035 *size = memory_pool->size; 3036 } 3037 mutex_unlock(&smu->mutex); 3038 3039 return 0; 3040 } 3041 3042 static const struct amd_pm_funcs swsmu_pm_funcs = { 3043 /* export for sysfs */ 3044 .set_fan_control_mode = smu_pp_set_fan_control_mode, 3045 .get_fan_control_mode = smu_get_fan_control_mode, 3046 .set_fan_speed_percent = smu_set_fan_speed_percent, 3047 .get_fan_speed_percent = smu_get_fan_speed_percent, 3048 .force_clock_level = smu_force_ppclk_levels, 3049 .print_clock_levels = smu_print_ppclk_levels, 3050 .force_performance_level = smu_force_performance_level, 3051 .read_sensor = smu_read_sensor, 3052 .get_performance_level = smu_get_performance_level, 3053 .get_current_power_state = smu_get_current_power_state, 3054 .get_fan_speed_rpm = smu_get_fan_speed_rpm, 3055 .set_fan_speed_rpm = smu_set_fan_speed_rpm, 3056 .get_pp_num_states = smu_get_power_num_states, 3057 .get_pp_table = smu_sys_get_pp_table, 3058 .set_pp_table = smu_sys_set_pp_table, 3059 .switch_power_profile = smu_switch_power_profile, 3060 /* export to amdgpu */ 3061 .dispatch_tasks = smu_handle_dpm_task, 3062 .load_firmware = smu_load_microcode, 3063 .set_powergating_by_smu = smu_dpm_set_power_gate, 3064 .set_power_limit = smu_set_power_limit, 3065 .get_power_limit = smu_get_power_limit, 3066 .get_power_profile_mode = smu_get_power_profile_mode, 3067 .set_power_profile_mode = smu_set_power_profile_mode, 3068 .odn_edit_dpm_table = smu_od_edit_dpm_table, 3069 .set_mp1_state = smu_set_mp1_state, 3070 .gfx_state_change_set = smu_gfx_state_change_set, 3071 /* export to DC */ 3072 .get_sclk = smu_get_sclk, 3073 .get_mclk = smu_get_mclk, 3074 .display_configuration_change = smu_display_configuration_change, 3075 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency, 3076 .display_clock_voltage_request = smu_display_clock_voltage_request, 3077 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost, 3078 .set_active_display_count = smu_set_display_count, 3079 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk, 3080 .get_asic_baco_capability = smu_get_baco_capability, 3081 .set_asic_baco_state = smu_baco_set_state, 3082 .get_ppfeature_status = smu_sys_get_pp_feature_mask, 3083 .set_ppfeature_status = smu_sys_set_pp_feature_mask, 3084 .asic_reset_mode_2 = smu_mode2_reset, 3085 .set_df_cstate = smu_set_df_cstate, 3086 .set_xgmi_pstate = smu_set_xgmi_pstate, 3087 .get_gpu_metrics = smu_sys_get_gpu_metrics, 3088 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges, 3089 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch, 3090 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, 3091 .get_uclk_dpm_states = smu_get_uclk_dpm_states, 3092 .get_dpm_clock_table = smu_get_dpm_clock_table, 3093 .get_smu_prv_buf_details = smu_get_prv_buffer_details, 3094 }; 3095 3096 int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, 3097 uint64_t event_arg) 3098 { 3099 int ret = -EINVAL; 3100 struct smu_context *smu = &adev->smu; 3101 3102 if (smu->ppt_funcs->wait_for_event) { 3103 mutex_lock(&smu->mutex); 3104 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); 3105 mutex_unlock(&smu->mutex); 3106 } 3107 3108 return ret; 3109 } 3110