1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L1 24 25 #include <linux/firmware.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_smu.h" 30 #include "smu_internal.h" 31 #include "atom.h" 32 #include "arcturus_ppt.h" 33 #include "navi10_ppt.h" 34 #include "sienna_cichlid_ppt.h" 35 #include "renoir_ppt.h" 36 #include "vangogh_ppt.h" 37 #include "aldebaran_ppt.h" 38 #include "yellow_carp_ppt.h" 39 #include "cyan_skillfish_ppt.h" 40 #include "amd_pcie.h" 41 42 /* 43 * DO NOT use these for err/warn/info/debug messages. 44 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 45 * They are more MGPU friendly. 46 */ 47 #undef pr_err 48 #undef pr_warn 49 #undef pr_info 50 #undef pr_debug 51 52 static const struct amd_pm_funcs swsmu_pm_funcs; 53 static int smu_force_smuclk_levels(struct smu_context *smu, 54 enum smu_clk_type clk_type, 55 uint32_t mask); 56 static int smu_handle_task(struct smu_context *smu, 57 enum amd_dpm_forced_level level, 58 enum amd_pp_task task_id, 59 bool lock_needed); 60 static int smu_reset(struct smu_context *smu); 61 static int smu_set_fan_speed_pwm(void *handle, u32 speed); 62 static int smu_set_fan_control_mode(struct smu_context *smu, int value); 63 static int smu_set_power_limit(void *handle, uint32_t limit); 64 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); 65 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); 66 67 static int smu_sys_get_pp_feature_mask(void *handle, 68 char *buf) 69 { 70 struct smu_context *smu = handle; 71 int size = 0; 72 73 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 74 return -EOPNOTSUPP; 75 76 mutex_lock(&smu->mutex); 77 78 size = smu_get_pp_feature_mask(smu, buf); 79 80 mutex_unlock(&smu->mutex); 81 82 return size; 83 } 84 85 static int smu_sys_set_pp_feature_mask(void *handle, 86 uint64_t new_mask) 87 { 88 struct smu_context *smu = handle; 89 int ret = 0; 90 91 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 92 return -EOPNOTSUPP; 93 94 mutex_lock(&smu->mutex); 95 96 ret = smu_set_pp_feature_mask(smu, new_mask); 97 98 mutex_unlock(&smu->mutex); 99 100 return ret; 101 } 102 103 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 104 { 105 int ret = 0; 106 struct smu_context *smu = &adev->smu; 107 108 if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status) 109 *value = smu_get_gfx_off_status(smu); 110 else 111 ret = -EINVAL; 112 113 return ret; 114 } 115 116 int smu_set_soft_freq_range(struct smu_context *smu, 117 enum smu_clk_type clk_type, 118 uint32_t min, 119 uint32_t max) 120 { 121 int ret = 0; 122 123 mutex_lock(&smu->mutex); 124 125 if (smu->ppt_funcs->set_soft_freq_limited_range) 126 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, 127 clk_type, 128 min, 129 max); 130 131 mutex_unlock(&smu->mutex); 132 133 return ret; 134 } 135 136 int smu_get_dpm_freq_range(struct smu_context *smu, 137 enum smu_clk_type clk_type, 138 uint32_t *min, 139 uint32_t *max) 140 { 141 int ret = 0; 142 143 if (!min && !max) 144 return -EINVAL; 145 146 mutex_lock(&smu->mutex); 147 148 if (smu->ppt_funcs->get_dpm_ultimate_freq) 149 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, 150 clk_type, 151 min, 152 max); 153 154 mutex_unlock(&smu->mutex); 155 156 return ret; 157 } 158 159 static u32 smu_get_mclk(void *handle, bool low) 160 { 161 struct smu_context *smu = handle; 162 uint32_t clk_freq; 163 int ret = 0; 164 165 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, 166 low ? &clk_freq : NULL, 167 !low ? &clk_freq : NULL); 168 if (ret) 169 return 0; 170 return clk_freq * 100; 171 } 172 173 static u32 smu_get_sclk(void *handle, bool low) 174 { 175 struct smu_context *smu = handle; 176 uint32_t clk_freq; 177 int ret = 0; 178 179 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, 180 low ? &clk_freq : NULL, 181 !low ? &clk_freq : NULL); 182 if (ret) 183 return 0; 184 return clk_freq * 100; 185 } 186 187 static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu, 188 bool enable) 189 { 190 struct smu_power_context *smu_power = &smu->smu_power; 191 struct smu_power_gate *power_gate = &smu_power->power_gate; 192 int ret = 0; 193 194 if (!smu->ppt_funcs->dpm_set_vcn_enable) 195 return 0; 196 197 if (atomic_read(&power_gate->vcn_gated) ^ enable) 198 return 0; 199 200 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); 201 if (!ret) 202 atomic_set(&power_gate->vcn_gated, !enable); 203 204 return ret; 205 } 206 207 static int smu_dpm_set_vcn_enable(struct smu_context *smu, 208 bool enable) 209 { 210 struct smu_power_context *smu_power = &smu->smu_power; 211 struct smu_power_gate *power_gate = &smu_power->power_gate; 212 int ret = 0; 213 214 mutex_lock(&power_gate->vcn_gate_lock); 215 216 ret = smu_dpm_set_vcn_enable_locked(smu, enable); 217 218 mutex_unlock(&power_gate->vcn_gate_lock); 219 220 return ret; 221 } 222 223 static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu, 224 bool enable) 225 { 226 struct smu_power_context *smu_power = &smu->smu_power; 227 struct smu_power_gate *power_gate = &smu_power->power_gate; 228 int ret = 0; 229 230 if (!smu->ppt_funcs->dpm_set_jpeg_enable) 231 return 0; 232 233 if (atomic_read(&power_gate->jpeg_gated) ^ enable) 234 return 0; 235 236 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); 237 if (!ret) 238 atomic_set(&power_gate->jpeg_gated, !enable); 239 240 return ret; 241 } 242 243 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, 244 bool enable) 245 { 246 struct smu_power_context *smu_power = &smu->smu_power; 247 struct smu_power_gate *power_gate = &smu_power->power_gate; 248 int ret = 0; 249 250 mutex_lock(&power_gate->jpeg_gate_lock); 251 252 ret = smu_dpm_set_jpeg_enable_locked(smu, enable); 253 254 mutex_unlock(&power_gate->jpeg_gate_lock); 255 256 return ret; 257 } 258 259 /** 260 * smu_dpm_set_power_gate - power gate/ungate the specific IP block 261 * 262 * @handle: smu_context pointer 263 * @block_type: the IP block to power gate/ungate 264 * @gate: to power gate if true, ungate otherwise 265 * 266 * This API uses no smu->mutex lock protection due to: 267 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). 268 * This is guarded to be race condition free by the caller. 269 * 2. Or get called on user setting request of power_dpm_force_performance_level. 270 * Under this case, the smu->mutex lock protection is already enforced on 271 * the parent API smu_force_performance_level of the call path. 272 */ 273 static int smu_dpm_set_power_gate(void *handle, 274 uint32_t block_type, 275 bool gate) 276 { 277 struct smu_context *smu = handle; 278 int ret = 0; 279 280 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 281 return -EOPNOTSUPP; 282 283 switch (block_type) { 284 /* 285 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses 286 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept. 287 */ 288 case AMD_IP_BLOCK_TYPE_UVD: 289 case AMD_IP_BLOCK_TYPE_VCN: 290 ret = smu_dpm_set_vcn_enable(smu, !gate); 291 if (ret) 292 dev_err(smu->adev->dev, "Failed to power %s VCN!\n", 293 gate ? "gate" : "ungate"); 294 break; 295 case AMD_IP_BLOCK_TYPE_GFX: 296 ret = smu_gfx_off_control(smu, gate); 297 if (ret) 298 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", 299 gate ? "enable" : "disable"); 300 break; 301 case AMD_IP_BLOCK_TYPE_SDMA: 302 ret = smu_powergate_sdma(smu, gate); 303 if (ret) 304 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", 305 gate ? "gate" : "ungate"); 306 break; 307 case AMD_IP_BLOCK_TYPE_JPEG: 308 ret = smu_dpm_set_jpeg_enable(smu, !gate); 309 if (ret) 310 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", 311 gate ? "gate" : "ungate"); 312 break; 313 default: 314 dev_err(smu->adev->dev, "Unsupported block type!\n"); 315 return -EINVAL; 316 } 317 318 return ret; 319 } 320 321 /** 322 * smu_set_user_clk_dependencies - set user profile clock dependencies 323 * 324 * @smu: smu_context pointer 325 * @clk: enum smu_clk_type type 326 * 327 * Enable/Disable the clock dependency for the @clk type. 328 */ 329 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk) 330 { 331 if (smu->adev->in_suspend) 332 return; 333 334 if (clk == SMU_MCLK) { 335 smu->user_dpm_profile.clk_dependency = 0; 336 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); 337 } else if (clk == SMU_FCLK) { 338 /* MCLK takes precedence over FCLK */ 339 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 340 return; 341 342 smu->user_dpm_profile.clk_dependency = 0; 343 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); 344 } else if (clk == SMU_SOCCLK) { 345 /* MCLK takes precedence over SOCCLK */ 346 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 347 return; 348 349 smu->user_dpm_profile.clk_dependency = 0; 350 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); 351 } else 352 /* Add clk dependencies here, if any */ 353 return; 354 } 355 356 /** 357 * smu_restore_dpm_user_profile - reinstate user dpm profile 358 * 359 * @smu: smu_context pointer 360 * 361 * Restore the saved user power configurations include power limit, 362 * clock frequencies, fan control mode and fan speed. 363 */ 364 static void smu_restore_dpm_user_profile(struct smu_context *smu) 365 { 366 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 367 int ret = 0; 368 369 if (!smu->adev->in_suspend) 370 return; 371 372 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 373 return; 374 375 /* Enable restore flag */ 376 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; 377 378 /* set the user dpm power limit */ 379 if (smu->user_dpm_profile.power_limit) { 380 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); 381 if (ret) 382 dev_err(smu->adev->dev, "Failed to set power limit value\n"); 383 } 384 385 /* set the user dpm clock configurations */ 386 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 387 enum smu_clk_type clk_type; 388 389 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) { 390 /* 391 * Iterate over smu clk type and force the saved user clk 392 * configs, skip if clock dependency is enabled 393 */ 394 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && 395 smu->user_dpm_profile.clk_mask[clk_type]) { 396 ret = smu_force_smuclk_levels(smu, clk_type, 397 smu->user_dpm_profile.clk_mask[clk_type]); 398 if (ret) 399 dev_err(smu->adev->dev, 400 "Failed to set clock type = %d\n", clk_type); 401 } 402 } 403 } 404 405 /* set the user dpm fan configurations */ 406 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL || 407 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) { 408 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); 409 if (ret) { 410 smu->user_dpm_profile.fan_speed_pwm = 0; 411 smu->user_dpm_profile.fan_speed_rpm = 0; 412 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO; 413 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n"); 414 } 415 416 if (smu->user_dpm_profile.fan_speed_pwm) { 417 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm); 418 if (ret) 419 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n"); 420 } 421 422 if (smu->user_dpm_profile.fan_speed_rpm) { 423 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm); 424 if (ret) 425 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n"); 426 } 427 } 428 429 /* Restore user customized OD settings */ 430 if (smu->user_dpm_profile.user_od) { 431 if (smu->ppt_funcs->restore_user_od_settings) { 432 ret = smu->ppt_funcs->restore_user_od_settings(smu); 433 if (ret) 434 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n"); 435 } 436 } 437 438 /* Disable restore flag */ 439 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; 440 } 441 442 static int smu_get_power_num_states(void *handle, 443 struct pp_states_info *state_info) 444 { 445 if (!state_info) 446 return -EINVAL; 447 448 /* not support power state */ 449 memset(state_info, 0, sizeof(struct pp_states_info)); 450 state_info->nums = 1; 451 state_info->states[0] = POWER_STATE_TYPE_DEFAULT; 452 453 return 0; 454 } 455 456 bool is_support_sw_smu(struct amdgpu_device *adev) 457 { 458 if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0)) 459 return true; 460 461 return false; 462 } 463 464 bool is_support_cclk_dpm(struct amdgpu_device *adev) 465 { 466 struct smu_context *smu = &adev->smu; 467 468 if (!is_support_sw_smu(adev)) 469 return false; 470 471 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) 472 return false; 473 474 return true; 475 } 476 477 478 static int smu_sys_get_pp_table(void *handle, 479 char **table) 480 { 481 struct smu_context *smu = handle; 482 struct smu_table_context *smu_table = &smu->smu_table; 483 uint32_t powerplay_table_size; 484 485 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 486 return -EOPNOTSUPP; 487 488 if (!smu_table->power_play_table && !smu_table->hardcode_pptable) 489 return -EINVAL; 490 491 mutex_lock(&smu->mutex); 492 493 if (smu_table->hardcode_pptable) 494 *table = smu_table->hardcode_pptable; 495 else 496 *table = smu_table->power_play_table; 497 498 powerplay_table_size = smu_table->power_play_table_size; 499 500 mutex_unlock(&smu->mutex); 501 502 return powerplay_table_size; 503 } 504 505 static int smu_sys_set_pp_table(void *handle, 506 const char *buf, 507 size_t size) 508 { 509 struct smu_context *smu = handle; 510 struct smu_table_context *smu_table = &smu->smu_table; 511 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; 512 int ret = 0; 513 514 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 515 return -EOPNOTSUPP; 516 517 if (header->usStructureSize != size) { 518 dev_err(smu->adev->dev, "pp table size not matched !\n"); 519 return -EIO; 520 } 521 522 mutex_lock(&smu->mutex); 523 if (!smu_table->hardcode_pptable) 524 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); 525 if (!smu_table->hardcode_pptable) { 526 ret = -ENOMEM; 527 goto failed; 528 } 529 530 memcpy(smu_table->hardcode_pptable, buf, size); 531 smu_table->power_play_table = smu_table->hardcode_pptable; 532 smu_table->power_play_table_size = size; 533 534 /* 535 * Special hw_fini action(for Navi1x, the DPMs disablement will be 536 * skipped) may be needed for custom pptable uploading. 537 */ 538 smu->uploading_custom_pp_table = true; 539 540 ret = smu_reset(smu); 541 if (ret) 542 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); 543 544 smu->uploading_custom_pp_table = false; 545 546 failed: 547 mutex_unlock(&smu->mutex); 548 return ret; 549 } 550 551 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) 552 { 553 struct smu_feature *feature = &smu->smu_feature; 554 int ret = 0; 555 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; 556 557 bitmap_zero(feature->allowed, SMU_FEATURE_MAX); 558 559 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, 560 SMU_FEATURE_MAX/32); 561 if (ret) 562 return ret; 563 564 bitmap_or(feature->allowed, feature->allowed, 565 (unsigned long *)allowed_feature_mask, 566 feature->feature_num); 567 568 return ret; 569 } 570 571 static int smu_set_funcs(struct amdgpu_device *adev) 572 { 573 struct smu_context *smu = &adev->smu; 574 575 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 576 smu->od_enabled = true; 577 578 switch (adev->ip_versions[MP1_HWIP][0]) { 579 case IP_VERSION(11, 0, 0): 580 case IP_VERSION(11, 0, 5): 581 case IP_VERSION(11, 0, 9): 582 navi10_set_ppt_funcs(smu); 583 break; 584 case IP_VERSION(11, 0, 7): 585 case IP_VERSION(11, 0, 11): 586 case IP_VERSION(11, 0, 12): 587 case IP_VERSION(11, 0, 13): 588 sienna_cichlid_set_ppt_funcs(smu); 589 break; 590 case IP_VERSION(12, 0, 0): 591 case IP_VERSION(12, 0, 1): 592 renoir_set_ppt_funcs(smu); 593 break; 594 case IP_VERSION(11, 5, 0): 595 vangogh_set_ppt_funcs(smu); 596 break; 597 case IP_VERSION(13, 0, 1): 598 case IP_VERSION(13, 0, 3): 599 yellow_carp_set_ppt_funcs(smu); 600 break; 601 case IP_VERSION(11, 0, 8): 602 cyan_skillfish_set_ppt_funcs(smu); 603 break; 604 case IP_VERSION(11, 0, 2): 605 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 606 arcturus_set_ppt_funcs(smu); 607 /* OD is not supported on Arcturus */ 608 smu->od_enabled =false; 609 break; 610 case IP_VERSION(13, 0, 2): 611 aldebaran_set_ppt_funcs(smu); 612 /* Enable pp_od_clk_voltage node */ 613 smu->od_enabled = true; 614 break; 615 default: 616 return -EINVAL; 617 } 618 619 return 0; 620 } 621 622 static int smu_early_init(void *handle) 623 { 624 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 625 struct smu_context *smu = &adev->smu; 626 627 smu->adev = adev; 628 smu->pm_enabled = !!amdgpu_dpm; 629 smu->is_apu = false; 630 mutex_init(&smu->mutex); 631 mutex_init(&smu->smu_baco.mutex); 632 smu->smu_baco.state = SMU_BACO_STATE_EXIT; 633 smu->smu_baco.platform_support = false; 634 smu->user_dpm_profile.fan_mode = -1; 635 636 adev->powerplay.pp_handle = smu; 637 adev->powerplay.pp_funcs = &swsmu_pm_funcs; 638 639 return smu_set_funcs(adev); 640 } 641 642 static int smu_set_default_dpm_table(struct smu_context *smu) 643 { 644 struct smu_power_context *smu_power = &smu->smu_power; 645 struct smu_power_gate *power_gate = &smu_power->power_gate; 646 int vcn_gate, jpeg_gate; 647 int ret = 0; 648 649 if (!smu->ppt_funcs->set_default_dpm_table) 650 return 0; 651 652 mutex_lock(&power_gate->vcn_gate_lock); 653 mutex_lock(&power_gate->jpeg_gate_lock); 654 655 vcn_gate = atomic_read(&power_gate->vcn_gated); 656 jpeg_gate = atomic_read(&power_gate->jpeg_gated); 657 658 ret = smu_dpm_set_vcn_enable_locked(smu, true); 659 if (ret) 660 goto err0_out; 661 662 ret = smu_dpm_set_jpeg_enable_locked(smu, true); 663 if (ret) 664 goto err1_out; 665 666 ret = smu->ppt_funcs->set_default_dpm_table(smu); 667 if (ret) 668 dev_err(smu->adev->dev, 669 "Failed to setup default dpm clock tables!\n"); 670 671 smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate); 672 err1_out: 673 smu_dpm_set_vcn_enable_locked(smu, !vcn_gate); 674 err0_out: 675 mutex_unlock(&power_gate->jpeg_gate_lock); 676 mutex_unlock(&power_gate->vcn_gate_lock); 677 678 return ret; 679 } 680 681 682 static int smu_late_init(void *handle) 683 { 684 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 685 struct smu_context *smu = &adev->smu; 686 int ret = 0; 687 688 smu_set_fine_grain_gfx_freq_parameters(smu); 689 690 if (!smu->pm_enabled) 691 return 0; 692 693 ret = smu_post_init(smu); 694 if (ret) { 695 dev_err(adev->dev, "Failed to post smu init!\n"); 696 return ret; 697 } 698 699 if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) || 700 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3))) 701 return 0; 702 703 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { 704 ret = smu_set_default_od_settings(smu); 705 if (ret) { 706 dev_err(adev->dev, "Failed to setup default OD settings!\n"); 707 return ret; 708 } 709 } 710 711 ret = smu_populate_umd_state_clk(smu); 712 if (ret) { 713 dev_err(adev->dev, "Failed to populate UMD state clocks!\n"); 714 return ret; 715 } 716 717 ret = smu_get_asic_power_limits(smu, 718 &smu->current_power_limit, 719 &smu->default_power_limit, 720 &smu->max_power_limit); 721 if (ret) { 722 dev_err(adev->dev, "Failed to get asic power limits!\n"); 723 return ret; 724 } 725 726 if (!amdgpu_sriov_vf(adev)) 727 smu_get_unique_id(smu); 728 729 smu_get_fan_parameters(smu); 730 731 smu_handle_task(&adev->smu, 732 smu->smu_dpm.dpm_level, 733 AMD_PP_TASK_COMPLETE_INIT, 734 false); 735 736 smu_restore_dpm_user_profile(smu); 737 738 return 0; 739 } 740 741 static int smu_init_fb_allocations(struct smu_context *smu) 742 { 743 struct amdgpu_device *adev = smu->adev; 744 struct smu_table_context *smu_table = &smu->smu_table; 745 struct smu_table *tables = smu_table->tables; 746 struct smu_table *driver_table = &(smu_table->driver_table); 747 uint32_t max_table_size = 0; 748 int ret, i; 749 750 /* VRAM allocation for tool table */ 751 if (tables[SMU_TABLE_PMSTATUSLOG].size) { 752 ret = amdgpu_bo_create_kernel(adev, 753 tables[SMU_TABLE_PMSTATUSLOG].size, 754 tables[SMU_TABLE_PMSTATUSLOG].align, 755 tables[SMU_TABLE_PMSTATUSLOG].domain, 756 &tables[SMU_TABLE_PMSTATUSLOG].bo, 757 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 758 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 759 if (ret) { 760 dev_err(adev->dev, "VRAM allocation for tool table failed!\n"); 761 return ret; 762 } 763 } 764 765 /* VRAM allocation for driver table */ 766 for (i = 0; i < SMU_TABLE_COUNT; i++) { 767 if (tables[i].size == 0) 768 continue; 769 770 if (i == SMU_TABLE_PMSTATUSLOG) 771 continue; 772 773 if (max_table_size < tables[i].size) 774 max_table_size = tables[i].size; 775 } 776 777 driver_table->size = max_table_size; 778 driver_table->align = PAGE_SIZE; 779 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 780 781 ret = amdgpu_bo_create_kernel(adev, 782 driver_table->size, 783 driver_table->align, 784 driver_table->domain, 785 &driver_table->bo, 786 &driver_table->mc_address, 787 &driver_table->cpu_addr); 788 if (ret) { 789 dev_err(adev->dev, "VRAM allocation for driver table failed!\n"); 790 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 791 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 792 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 793 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 794 } 795 796 return ret; 797 } 798 799 static int smu_fini_fb_allocations(struct smu_context *smu) 800 { 801 struct smu_table_context *smu_table = &smu->smu_table; 802 struct smu_table *tables = smu_table->tables; 803 struct smu_table *driver_table = &(smu_table->driver_table); 804 805 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 806 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 807 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 808 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 809 810 amdgpu_bo_free_kernel(&driver_table->bo, 811 &driver_table->mc_address, 812 &driver_table->cpu_addr); 813 814 return 0; 815 } 816 817 /** 818 * smu_alloc_memory_pool - allocate memory pool in the system memory 819 * 820 * @smu: amdgpu_device pointer 821 * 822 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr 823 * and DramLogSetDramAddr can notify it changed. 824 * 825 * Returns 0 on success, error on failure. 826 */ 827 static int smu_alloc_memory_pool(struct smu_context *smu) 828 { 829 struct amdgpu_device *adev = smu->adev; 830 struct smu_table_context *smu_table = &smu->smu_table; 831 struct smu_table *memory_pool = &smu_table->memory_pool; 832 uint64_t pool_size = smu->pool_size; 833 int ret = 0; 834 835 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) 836 return ret; 837 838 memory_pool->size = pool_size; 839 memory_pool->align = PAGE_SIZE; 840 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; 841 842 switch (pool_size) { 843 case SMU_MEMORY_POOL_SIZE_256_MB: 844 case SMU_MEMORY_POOL_SIZE_512_MB: 845 case SMU_MEMORY_POOL_SIZE_1_GB: 846 case SMU_MEMORY_POOL_SIZE_2_GB: 847 ret = amdgpu_bo_create_kernel(adev, 848 memory_pool->size, 849 memory_pool->align, 850 memory_pool->domain, 851 &memory_pool->bo, 852 &memory_pool->mc_address, 853 &memory_pool->cpu_addr); 854 if (ret) 855 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n"); 856 break; 857 default: 858 break; 859 } 860 861 return ret; 862 } 863 864 static int smu_free_memory_pool(struct smu_context *smu) 865 { 866 struct smu_table_context *smu_table = &smu->smu_table; 867 struct smu_table *memory_pool = &smu_table->memory_pool; 868 869 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) 870 return 0; 871 872 amdgpu_bo_free_kernel(&memory_pool->bo, 873 &memory_pool->mc_address, 874 &memory_pool->cpu_addr); 875 876 memset(memory_pool, 0, sizeof(struct smu_table)); 877 878 return 0; 879 } 880 881 static int smu_alloc_dummy_read_table(struct smu_context *smu) 882 { 883 struct smu_table_context *smu_table = &smu->smu_table; 884 struct smu_table *dummy_read_1_table = 885 &smu_table->dummy_read_1_table; 886 struct amdgpu_device *adev = smu->adev; 887 int ret = 0; 888 889 dummy_read_1_table->size = 0x40000; 890 dummy_read_1_table->align = PAGE_SIZE; 891 dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 892 893 ret = amdgpu_bo_create_kernel(adev, 894 dummy_read_1_table->size, 895 dummy_read_1_table->align, 896 dummy_read_1_table->domain, 897 &dummy_read_1_table->bo, 898 &dummy_read_1_table->mc_address, 899 &dummy_read_1_table->cpu_addr); 900 if (ret) 901 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n"); 902 903 return ret; 904 } 905 906 static void smu_free_dummy_read_table(struct smu_context *smu) 907 { 908 struct smu_table_context *smu_table = &smu->smu_table; 909 struct smu_table *dummy_read_1_table = 910 &smu_table->dummy_read_1_table; 911 912 913 amdgpu_bo_free_kernel(&dummy_read_1_table->bo, 914 &dummy_read_1_table->mc_address, 915 &dummy_read_1_table->cpu_addr); 916 917 memset(dummy_read_1_table, 0, sizeof(struct smu_table)); 918 } 919 920 static int smu_smc_table_sw_init(struct smu_context *smu) 921 { 922 int ret; 923 924 /** 925 * Create smu_table structure, and init smc tables such as 926 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. 927 */ 928 ret = smu_init_smc_tables(smu); 929 if (ret) { 930 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); 931 return ret; 932 } 933 934 /** 935 * Create smu_power_context structure, and allocate smu_dpm_context and 936 * context size to fill the smu_power_context data. 937 */ 938 ret = smu_init_power(smu); 939 if (ret) { 940 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); 941 return ret; 942 } 943 944 /* 945 * allocate vram bos to store smc table contents. 946 */ 947 ret = smu_init_fb_allocations(smu); 948 if (ret) 949 return ret; 950 951 ret = smu_alloc_memory_pool(smu); 952 if (ret) 953 return ret; 954 955 ret = smu_alloc_dummy_read_table(smu); 956 if (ret) 957 return ret; 958 959 ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c); 960 if (ret) 961 return ret; 962 963 return 0; 964 } 965 966 static int smu_smc_table_sw_fini(struct smu_context *smu) 967 { 968 int ret; 969 970 smu_i2c_fini(smu, &smu->adev->pm.smu_i2c); 971 972 smu_free_dummy_read_table(smu); 973 974 ret = smu_free_memory_pool(smu); 975 if (ret) 976 return ret; 977 978 ret = smu_fini_fb_allocations(smu); 979 if (ret) 980 return ret; 981 982 ret = smu_fini_power(smu); 983 if (ret) { 984 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); 985 return ret; 986 } 987 988 ret = smu_fini_smc_tables(smu); 989 if (ret) { 990 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); 991 return ret; 992 } 993 994 return 0; 995 } 996 997 static void smu_throttling_logging_work_fn(struct work_struct *work) 998 { 999 struct smu_context *smu = container_of(work, struct smu_context, 1000 throttling_logging_work); 1001 1002 smu_log_thermal_throttling(smu); 1003 } 1004 1005 static void smu_interrupt_work_fn(struct work_struct *work) 1006 { 1007 struct smu_context *smu = container_of(work, struct smu_context, 1008 interrupt_work); 1009 1010 mutex_lock(&smu->mutex); 1011 1012 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) 1013 smu->ppt_funcs->interrupt_work(smu); 1014 1015 mutex_unlock(&smu->mutex); 1016 } 1017 1018 static int smu_sw_init(void *handle) 1019 { 1020 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1021 struct smu_context *smu = &adev->smu; 1022 int ret; 1023 1024 smu->pool_size = adev->pm.smu_prv_buffer_size; 1025 smu->smu_feature.feature_num = SMU_FEATURE_MAX; 1026 mutex_init(&smu->smu_feature.mutex); 1027 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); 1028 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); 1029 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); 1030 1031 mutex_init(&smu->sensor_lock); 1032 mutex_init(&smu->metrics_lock); 1033 mutex_init(&smu->message_lock); 1034 1035 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); 1036 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); 1037 atomic64_set(&smu->throttle_int_counter, 0); 1038 smu->watermarks_bitmap = 0; 1039 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1040 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1041 1042 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); 1043 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); 1044 mutex_init(&smu->smu_power.power_gate.vcn_gate_lock); 1045 mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock); 1046 1047 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 1048 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; 1049 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; 1050 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; 1051 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; 1052 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; 1053 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; 1054 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; 1055 1056 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1057 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 1058 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; 1059 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; 1060 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; 1061 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; 1062 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; 1063 smu->display_config = &adev->pm.pm_display_cfg; 1064 1065 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1066 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1067 1068 ret = smu_init_microcode(smu); 1069 if (ret) { 1070 dev_err(adev->dev, "Failed to load smu firmware!\n"); 1071 return ret; 1072 } 1073 1074 ret = smu_smc_table_sw_init(smu); 1075 if (ret) { 1076 dev_err(adev->dev, "Failed to sw init smc table!\n"); 1077 return ret; 1078 } 1079 1080 ret = smu_register_irq_handler(smu); 1081 if (ret) { 1082 dev_err(adev->dev, "Failed to register smc irq handler!\n"); 1083 return ret; 1084 } 1085 1086 /* If there is no way to query fan control mode, fan control is not supported */ 1087 if (!smu->ppt_funcs->get_fan_control_mode) 1088 smu->adev->pm.no_fan = true; 1089 1090 return 0; 1091 } 1092 1093 static int smu_sw_fini(void *handle) 1094 { 1095 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1096 struct smu_context *smu = &adev->smu; 1097 int ret; 1098 1099 ret = smu_smc_table_sw_fini(smu); 1100 if (ret) { 1101 dev_err(adev->dev, "Failed to sw fini smc table!\n"); 1102 return ret; 1103 } 1104 1105 smu_fini_microcode(smu); 1106 1107 return 0; 1108 } 1109 1110 static int smu_get_thermal_temperature_range(struct smu_context *smu) 1111 { 1112 struct amdgpu_device *adev = smu->adev; 1113 struct smu_temperature_range *range = 1114 &smu->thermal_range; 1115 int ret = 0; 1116 1117 if (!smu->ppt_funcs->get_thermal_temperature_range) 1118 return 0; 1119 1120 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); 1121 if (ret) 1122 return ret; 1123 1124 adev->pm.dpm.thermal.min_temp = range->min; 1125 adev->pm.dpm.thermal.max_temp = range->max; 1126 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max; 1127 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min; 1128 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max; 1129 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max; 1130 adev->pm.dpm.thermal.min_mem_temp = range->mem_min; 1131 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max; 1132 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max; 1133 1134 return ret; 1135 } 1136 1137 static int smu_smc_hw_setup(struct smu_context *smu) 1138 { 1139 struct amdgpu_device *adev = smu->adev; 1140 uint32_t pcie_gen = 0, pcie_width = 0; 1141 int ret = 0; 1142 1143 if (adev->in_suspend && smu_is_dpm_running(smu)) { 1144 dev_info(adev->dev, "dpm has been enabled\n"); 1145 /* this is needed specifically */ 1146 switch (adev->ip_versions[MP1_HWIP][0]) { 1147 case IP_VERSION(11, 0, 7): 1148 case IP_VERSION(11, 0, 11): 1149 case IP_VERSION(11, 5, 0): 1150 case IP_VERSION(11, 0, 12): 1151 ret = smu_system_features_control(smu, true); 1152 break; 1153 default: 1154 break; 1155 } 1156 return ret; 1157 } 1158 1159 ret = smu_init_display_count(smu, 0); 1160 if (ret) { 1161 dev_info(adev->dev, "Failed to pre-set display count as 0!\n"); 1162 return ret; 1163 } 1164 1165 ret = smu_set_driver_table_location(smu); 1166 if (ret) { 1167 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n"); 1168 return ret; 1169 } 1170 1171 /* 1172 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 1173 */ 1174 ret = smu_set_tool_table_location(smu); 1175 if (ret) { 1176 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n"); 1177 return ret; 1178 } 1179 1180 /* 1181 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify 1182 * pool location. 1183 */ 1184 ret = smu_notify_memory_pool_location(smu); 1185 if (ret) { 1186 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n"); 1187 return ret; 1188 } 1189 1190 /* smu_dump_pptable(smu); */ 1191 /* 1192 * Copy pptable bo in the vram to smc with SMU MSGs such as 1193 * SetDriverDramAddr and TransferTableDram2Smu. 1194 */ 1195 ret = smu_write_pptable(smu); 1196 if (ret) { 1197 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n"); 1198 return ret; 1199 } 1200 1201 /* issue Run*Btc msg */ 1202 ret = smu_run_btc(smu); 1203 if (ret) 1204 return ret; 1205 1206 ret = smu_feature_set_allowed_mask(smu); 1207 if (ret) { 1208 dev_err(adev->dev, "Failed to set driver allowed features mask!\n"); 1209 return ret; 1210 } 1211 1212 ret = smu_system_features_control(smu, true); 1213 if (ret) { 1214 dev_err(adev->dev, "Failed to enable requested dpm features!\n"); 1215 return ret; 1216 } 1217 1218 if (!smu_is_dpm_running(smu)) 1219 dev_info(adev->dev, "dpm has been disabled\n"); 1220 1221 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 1222 pcie_gen = 3; 1223 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 1224 pcie_gen = 2; 1225 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 1226 pcie_gen = 1; 1227 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 1228 pcie_gen = 0; 1229 1230 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 1231 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 1232 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 1233 */ 1234 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 1235 pcie_width = 6; 1236 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 1237 pcie_width = 5; 1238 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 1239 pcie_width = 4; 1240 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 1241 pcie_width = 3; 1242 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 1243 pcie_width = 2; 1244 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 1245 pcie_width = 1; 1246 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); 1247 if (ret) { 1248 dev_err(adev->dev, "Attempt to override pcie params failed!\n"); 1249 return ret; 1250 } 1251 1252 ret = smu_get_thermal_temperature_range(smu); 1253 if (ret) { 1254 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); 1255 return ret; 1256 } 1257 1258 ret = smu_enable_thermal_alert(smu); 1259 if (ret) { 1260 dev_err(adev->dev, "Failed to enable thermal alert!\n"); 1261 return ret; 1262 } 1263 1264 /* 1265 * Set initialized values (get from vbios) to dpm tables context such as 1266 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each 1267 * type of clks. 1268 */ 1269 ret = smu_set_default_dpm_table(smu); 1270 if (ret) { 1271 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); 1272 return ret; 1273 } 1274 1275 ret = smu_notify_display_change(smu); 1276 if (ret) 1277 return ret; 1278 1279 /* 1280 * Set min deep sleep dce fclk with bootup value from vbios via 1281 * SetMinDeepSleepDcefclk MSG. 1282 */ 1283 ret = smu_set_min_dcef_deep_sleep(smu, 1284 smu->smu_table.boot_values.dcefclk / 100); 1285 if (ret) 1286 return ret; 1287 1288 return ret; 1289 } 1290 1291 static int smu_start_smc_engine(struct smu_context *smu) 1292 { 1293 struct amdgpu_device *adev = smu->adev; 1294 int ret = 0; 1295 1296 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1297 if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) { 1298 if (smu->ppt_funcs->load_microcode) { 1299 ret = smu->ppt_funcs->load_microcode(smu); 1300 if (ret) 1301 return ret; 1302 } 1303 } 1304 } 1305 1306 if (smu->ppt_funcs->check_fw_status) { 1307 ret = smu->ppt_funcs->check_fw_status(smu); 1308 if (ret) { 1309 dev_err(adev->dev, "SMC is not ready\n"); 1310 return ret; 1311 } 1312 } 1313 1314 /* 1315 * Send msg GetDriverIfVersion to check if the return value is equal 1316 * with DRIVER_IF_VERSION of smc header. 1317 */ 1318 ret = smu_check_fw_version(smu); 1319 if (ret) 1320 return ret; 1321 1322 return ret; 1323 } 1324 1325 static int smu_hw_init(void *handle) 1326 { 1327 int ret; 1328 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1329 struct smu_context *smu = &adev->smu; 1330 1331 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { 1332 smu->pm_enabled = false; 1333 return 0; 1334 } 1335 1336 ret = smu_start_smc_engine(smu); 1337 if (ret) { 1338 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1339 return ret; 1340 } 1341 1342 if (smu->is_apu) { 1343 smu_powergate_sdma(&adev->smu, false); 1344 smu_dpm_set_vcn_enable(smu, true); 1345 smu_dpm_set_jpeg_enable(smu, true); 1346 smu_set_gfx_cgpg(&adev->smu, true); 1347 } 1348 1349 if (!smu->pm_enabled) 1350 return 0; 1351 1352 /* get boot_values from vbios to set revision, gfxclk, and etc. */ 1353 ret = smu_get_vbios_bootup_values(smu); 1354 if (ret) { 1355 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n"); 1356 return ret; 1357 } 1358 1359 ret = smu_setup_pptable(smu); 1360 if (ret) { 1361 dev_err(adev->dev, "Failed to setup pptable!\n"); 1362 return ret; 1363 } 1364 1365 ret = smu_get_driver_allowed_feature_mask(smu); 1366 if (ret) 1367 return ret; 1368 1369 ret = smu_smc_hw_setup(smu); 1370 if (ret) { 1371 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1372 return ret; 1373 } 1374 1375 /* 1376 * Move maximum sustainable clock retrieving here considering 1377 * 1. It is not needed on resume(from S3). 1378 * 2. DAL settings come between .hw_init and .late_init of SMU. 1379 * And DAL needs to know the maximum sustainable clocks. Thus 1380 * it cannot be put in .late_init(). 1381 */ 1382 ret = smu_init_max_sustainable_clocks(smu); 1383 if (ret) { 1384 dev_err(adev->dev, "Failed to init max sustainable clocks!\n"); 1385 return ret; 1386 } 1387 1388 adev->pm.dpm_enabled = true; 1389 1390 dev_info(adev->dev, "SMU is initialized successfully!\n"); 1391 1392 return 0; 1393 } 1394 1395 static int smu_disable_dpms(struct smu_context *smu) 1396 { 1397 struct amdgpu_device *adev = smu->adev; 1398 int ret = 0; 1399 bool use_baco = !smu->is_apu && 1400 ((amdgpu_in_reset(adev) && 1401 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 1402 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); 1403 1404 /* 1405 * For custom pptable uploading, skip the DPM features 1406 * disable process on Navi1x ASICs. 1407 * - As the gfx related features are under control of 1408 * RLC on those ASICs. RLC reinitialization will be 1409 * needed to reenable them. That will cost much more 1410 * efforts. 1411 * 1412 * - SMU firmware can handle the DPM reenablement 1413 * properly. 1414 */ 1415 if (smu->uploading_custom_pp_table) { 1416 switch (adev->ip_versions[MP1_HWIP][0]) { 1417 case IP_VERSION(11, 0, 0): 1418 case IP_VERSION(11, 0, 5): 1419 case IP_VERSION(11, 0, 9): 1420 case IP_VERSION(11, 0, 7): 1421 case IP_VERSION(11, 0, 11): 1422 case IP_VERSION(11, 5, 0): 1423 case IP_VERSION(11, 0, 12): 1424 case IP_VERSION(11, 0, 13): 1425 return smu_disable_all_features_with_exception(smu, 1426 true, 1427 SMU_FEATURE_COUNT); 1428 default: 1429 break; 1430 } 1431 } 1432 1433 /* 1434 * For Sienna_Cichlid, PMFW will handle the features disablement properly 1435 * on BACO in. Driver involvement is unnecessary. 1436 */ 1437 if (use_baco) { 1438 switch (adev->ip_versions[MP1_HWIP][0]) { 1439 case IP_VERSION(11, 0, 7): 1440 case IP_VERSION(11, 0, 0): 1441 case IP_VERSION(11, 0, 5): 1442 case IP_VERSION(11, 0, 9): 1443 return smu_disable_all_features_with_exception(smu, 1444 true, 1445 SMU_FEATURE_BACO_BIT); 1446 default: 1447 break; 1448 } 1449 } 1450 1451 /* 1452 * For gpu reset, runpm and hibernation through BACO, 1453 * BACO feature has to be kept enabled. 1454 */ 1455 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { 1456 ret = smu_disable_all_features_with_exception(smu, 1457 false, 1458 SMU_FEATURE_BACO_BIT); 1459 if (ret) 1460 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); 1461 } else { 1462 ret = smu_system_features_control(smu, false); 1463 if (ret) 1464 dev_err(adev->dev, "Failed to disable smu features.\n"); 1465 } 1466 1467 if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0) && 1468 adev->gfx.rlc.funcs->stop) 1469 adev->gfx.rlc.funcs->stop(adev); 1470 1471 return ret; 1472 } 1473 1474 static int smu_smc_hw_cleanup(struct smu_context *smu) 1475 { 1476 struct amdgpu_device *adev = smu->adev; 1477 int ret = 0; 1478 1479 cancel_work_sync(&smu->throttling_logging_work); 1480 cancel_work_sync(&smu->interrupt_work); 1481 1482 ret = smu_disable_thermal_alert(smu); 1483 if (ret) { 1484 dev_err(adev->dev, "Fail to disable thermal alert!\n"); 1485 return ret; 1486 } 1487 1488 ret = smu_disable_dpms(smu); 1489 if (ret) { 1490 dev_err(adev->dev, "Fail to disable dpm features!\n"); 1491 return ret; 1492 } 1493 1494 return 0; 1495 } 1496 1497 static int smu_hw_fini(void *handle) 1498 { 1499 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1500 struct smu_context *smu = &adev->smu; 1501 1502 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1503 return 0; 1504 1505 if (smu->is_apu) { 1506 smu_powergate_sdma(&adev->smu, true); 1507 } 1508 1509 smu_dpm_set_vcn_enable(smu, false); 1510 smu_dpm_set_jpeg_enable(smu, false); 1511 1512 adev->vcn.cur_state = AMD_PG_STATE_GATE; 1513 adev->jpeg.cur_state = AMD_PG_STATE_GATE; 1514 1515 if (!smu->pm_enabled) 1516 return 0; 1517 1518 adev->pm.dpm_enabled = false; 1519 1520 return smu_smc_hw_cleanup(smu); 1521 } 1522 1523 static int smu_reset(struct smu_context *smu) 1524 { 1525 struct amdgpu_device *adev = smu->adev; 1526 int ret; 1527 1528 amdgpu_gfx_off_ctrl(smu->adev, false); 1529 1530 ret = smu_hw_fini(adev); 1531 if (ret) 1532 return ret; 1533 1534 ret = smu_hw_init(adev); 1535 if (ret) 1536 return ret; 1537 1538 ret = smu_late_init(adev); 1539 if (ret) 1540 return ret; 1541 1542 amdgpu_gfx_off_ctrl(smu->adev, true); 1543 1544 return 0; 1545 } 1546 1547 static int smu_suspend(void *handle) 1548 { 1549 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1550 struct smu_context *smu = &adev->smu; 1551 int ret; 1552 1553 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1554 return 0; 1555 1556 if (!smu->pm_enabled) 1557 return 0; 1558 1559 adev->pm.dpm_enabled = false; 1560 1561 ret = smu_smc_hw_cleanup(smu); 1562 if (ret) 1563 return ret; 1564 1565 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 1566 1567 /* skip CGPG when in S0ix */ 1568 if (smu->is_apu && !adev->in_s0ix) 1569 smu_set_gfx_cgpg(&adev->smu, false); 1570 1571 return 0; 1572 } 1573 1574 static int smu_resume(void *handle) 1575 { 1576 int ret; 1577 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1578 struct smu_context *smu = &adev->smu; 1579 1580 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1581 return 0; 1582 1583 if (!smu->pm_enabled) 1584 return 0; 1585 1586 dev_info(adev->dev, "SMU is resuming...\n"); 1587 1588 ret = smu_start_smc_engine(smu); 1589 if (ret) { 1590 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1591 return ret; 1592 } 1593 1594 ret = smu_smc_hw_setup(smu); 1595 if (ret) { 1596 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1597 return ret; 1598 } 1599 1600 if (smu->is_apu) 1601 smu_set_gfx_cgpg(&adev->smu, true); 1602 1603 smu->disable_uclk_switch = 0; 1604 1605 adev->pm.dpm_enabled = true; 1606 1607 dev_info(adev->dev, "SMU is resumed successfully!\n"); 1608 1609 return 0; 1610 } 1611 1612 static int smu_display_configuration_change(void *handle, 1613 const struct amd_pp_display_configuration *display_config) 1614 { 1615 struct smu_context *smu = handle; 1616 int index = 0; 1617 int num_of_active_display = 0; 1618 1619 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1620 return -EOPNOTSUPP; 1621 1622 if (!display_config) 1623 return -EINVAL; 1624 1625 mutex_lock(&smu->mutex); 1626 1627 smu_set_min_dcef_deep_sleep(smu, 1628 display_config->min_dcef_deep_sleep_set_clk / 100); 1629 1630 for (index = 0; index < display_config->num_path_including_non_display; index++) { 1631 if (display_config->displays[index].controller_id != 0) 1632 num_of_active_display++; 1633 } 1634 1635 mutex_unlock(&smu->mutex); 1636 1637 return 0; 1638 } 1639 1640 static int smu_set_clockgating_state(void *handle, 1641 enum amd_clockgating_state state) 1642 { 1643 return 0; 1644 } 1645 1646 static int smu_set_powergating_state(void *handle, 1647 enum amd_powergating_state state) 1648 { 1649 return 0; 1650 } 1651 1652 static int smu_enable_umd_pstate(void *handle, 1653 enum amd_dpm_forced_level *level) 1654 { 1655 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1656 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1657 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1658 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1659 1660 struct smu_context *smu = (struct smu_context*)(handle); 1661 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1662 1663 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1664 return -EINVAL; 1665 1666 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { 1667 /* enter umd pstate, save current level, disable gfx cg*/ 1668 if (*level & profile_mode_mask) { 1669 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; 1670 smu_dpm_ctx->enable_umd_pstate = true; 1671 smu_gpo_control(smu, false); 1672 amdgpu_device_ip_set_powergating_state(smu->adev, 1673 AMD_IP_BLOCK_TYPE_GFX, 1674 AMD_PG_STATE_UNGATE); 1675 amdgpu_device_ip_set_clockgating_state(smu->adev, 1676 AMD_IP_BLOCK_TYPE_GFX, 1677 AMD_CG_STATE_UNGATE); 1678 smu_gfx_ulv_control(smu, false); 1679 smu_deep_sleep_control(smu, false); 1680 amdgpu_asic_update_umd_stable_pstate(smu->adev, true); 1681 } 1682 } else { 1683 /* exit umd pstate, restore level, enable gfx cg*/ 1684 if (!(*level & profile_mode_mask)) { 1685 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 1686 *level = smu_dpm_ctx->saved_dpm_level; 1687 smu_dpm_ctx->enable_umd_pstate = false; 1688 amdgpu_asic_update_umd_stable_pstate(smu->adev, false); 1689 smu_deep_sleep_control(smu, true); 1690 smu_gfx_ulv_control(smu, true); 1691 amdgpu_device_ip_set_clockgating_state(smu->adev, 1692 AMD_IP_BLOCK_TYPE_GFX, 1693 AMD_CG_STATE_GATE); 1694 amdgpu_device_ip_set_powergating_state(smu->adev, 1695 AMD_IP_BLOCK_TYPE_GFX, 1696 AMD_PG_STATE_GATE); 1697 smu_gpo_control(smu, true); 1698 } 1699 } 1700 1701 return 0; 1702 } 1703 1704 static int smu_bump_power_profile_mode(struct smu_context *smu, 1705 long *param, 1706 uint32_t param_size) 1707 { 1708 int ret = 0; 1709 1710 if (smu->ppt_funcs->set_power_profile_mode) 1711 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); 1712 1713 return ret; 1714 } 1715 1716 static int smu_adjust_power_state_dynamic(struct smu_context *smu, 1717 enum amd_dpm_forced_level level, 1718 bool skip_display_settings) 1719 { 1720 int ret = 0; 1721 int index = 0; 1722 long workload; 1723 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1724 1725 if (!skip_display_settings) { 1726 ret = smu_display_config_changed(smu); 1727 if (ret) { 1728 dev_err(smu->adev->dev, "Failed to change display config!"); 1729 return ret; 1730 } 1731 } 1732 1733 ret = smu_apply_clocks_adjust_rules(smu); 1734 if (ret) { 1735 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); 1736 return ret; 1737 } 1738 1739 if (!skip_display_settings) { 1740 ret = smu_notify_smc_display_config(smu); 1741 if (ret) { 1742 dev_err(smu->adev->dev, "Failed to notify smc display config!"); 1743 return ret; 1744 } 1745 } 1746 1747 if (smu_dpm_ctx->dpm_level != level) { 1748 ret = smu_asic_set_performance_level(smu, level); 1749 if (ret) { 1750 dev_err(smu->adev->dev, "Failed to set performance level!"); 1751 return ret; 1752 } 1753 1754 /* update the saved copy */ 1755 smu_dpm_ctx->dpm_level = level; 1756 } 1757 1758 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1759 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { 1760 index = fls(smu->workload_mask); 1761 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1762 workload = smu->workload_setting[index]; 1763 1764 if (smu->power_profile_mode != workload) 1765 smu_bump_power_profile_mode(smu, &workload, 0); 1766 } 1767 1768 return ret; 1769 } 1770 1771 static int smu_handle_task(struct smu_context *smu, 1772 enum amd_dpm_forced_level level, 1773 enum amd_pp_task task_id, 1774 bool lock_needed) 1775 { 1776 int ret = 0; 1777 1778 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1779 return -EOPNOTSUPP; 1780 1781 if (lock_needed) 1782 mutex_lock(&smu->mutex); 1783 1784 switch (task_id) { 1785 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 1786 ret = smu_pre_display_config_changed(smu); 1787 if (ret) 1788 goto out; 1789 ret = smu_adjust_power_state_dynamic(smu, level, false); 1790 break; 1791 case AMD_PP_TASK_COMPLETE_INIT: 1792 case AMD_PP_TASK_READJUST_POWER_STATE: 1793 ret = smu_adjust_power_state_dynamic(smu, level, true); 1794 break; 1795 default: 1796 break; 1797 } 1798 1799 out: 1800 if (lock_needed) 1801 mutex_unlock(&smu->mutex); 1802 1803 return ret; 1804 } 1805 1806 static int smu_handle_dpm_task(void *handle, 1807 enum amd_pp_task task_id, 1808 enum amd_pm_state_type *user_state) 1809 { 1810 struct smu_context *smu = handle; 1811 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1812 1813 return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true); 1814 1815 } 1816 1817 static int smu_switch_power_profile(void *handle, 1818 enum PP_SMC_POWER_PROFILE type, 1819 bool en) 1820 { 1821 struct smu_context *smu = handle; 1822 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1823 long workload; 1824 uint32_t index; 1825 1826 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1827 return -EOPNOTSUPP; 1828 1829 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1830 return -EINVAL; 1831 1832 mutex_lock(&smu->mutex); 1833 1834 if (!en) { 1835 smu->workload_mask &= ~(1 << smu->workload_prority[type]); 1836 index = fls(smu->workload_mask); 1837 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1838 workload = smu->workload_setting[index]; 1839 } else { 1840 smu->workload_mask |= (1 << smu->workload_prority[type]); 1841 index = fls(smu->workload_mask); 1842 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1843 workload = smu->workload_setting[index]; 1844 } 1845 1846 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1847 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) 1848 smu_bump_power_profile_mode(smu, &workload, 0); 1849 1850 mutex_unlock(&smu->mutex); 1851 1852 return 0; 1853 } 1854 1855 static enum amd_dpm_forced_level smu_get_performance_level(void *handle) 1856 { 1857 struct smu_context *smu = handle; 1858 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1859 enum amd_dpm_forced_level level; 1860 1861 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1862 return -EOPNOTSUPP; 1863 1864 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1865 return -EINVAL; 1866 1867 mutex_lock(&(smu->mutex)); 1868 level = smu_dpm_ctx->dpm_level; 1869 mutex_unlock(&(smu->mutex)); 1870 1871 return level; 1872 } 1873 1874 static int smu_force_performance_level(void *handle, 1875 enum amd_dpm_forced_level level) 1876 { 1877 struct smu_context *smu = handle; 1878 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1879 int ret = 0; 1880 1881 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1882 return -EOPNOTSUPP; 1883 1884 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1885 return -EINVAL; 1886 1887 mutex_lock(&smu->mutex); 1888 1889 ret = smu_enable_umd_pstate(smu, &level); 1890 if (ret) { 1891 mutex_unlock(&smu->mutex); 1892 return ret; 1893 } 1894 1895 ret = smu_handle_task(smu, level, 1896 AMD_PP_TASK_READJUST_POWER_STATE, 1897 false); 1898 1899 mutex_unlock(&smu->mutex); 1900 1901 /* reset user dpm clock state */ 1902 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1903 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask)); 1904 smu->user_dpm_profile.clk_dependency = 0; 1905 } 1906 1907 return ret; 1908 } 1909 1910 static int smu_set_display_count(void *handle, uint32_t count) 1911 { 1912 struct smu_context *smu = handle; 1913 int ret = 0; 1914 1915 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1916 return -EOPNOTSUPP; 1917 1918 mutex_lock(&smu->mutex); 1919 ret = smu_init_display_count(smu, count); 1920 mutex_unlock(&smu->mutex); 1921 1922 return ret; 1923 } 1924 1925 static int smu_force_smuclk_levels(struct smu_context *smu, 1926 enum smu_clk_type clk_type, 1927 uint32_t mask) 1928 { 1929 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1930 int ret = 0; 1931 1932 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1933 return -EOPNOTSUPP; 1934 1935 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1936 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); 1937 return -EINVAL; 1938 } 1939 1940 mutex_lock(&smu->mutex); 1941 1942 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { 1943 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); 1944 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 1945 smu->user_dpm_profile.clk_mask[clk_type] = mask; 1946 smu_set_user_clk_dependencies(smu, clk_type); 1947 } 1948 } 1949 1950 mutex_unlock(&smu->mutex); 1951 1952 return ret; 1953 } 1954 1955 static int smu_force_ppclk_levels(void *handle, 1956 enum pp_clock_type type, 1957 uint32_t mask) 1958 { 1959 struct smu_context *smu = handle; 1960 enum smu_clk_type clk_type; 1961 1962 switch (type) { 1963 case PP_SCLK: 1964 clk_type = SMU_SCLK; break; 1965 case PP_MCLK: 1966 clk_type = SMU_MCLK; break; 1967 case PP_PCIE: 1968 clk_type = SMU_PCIE; break; 1969 case PP_SOCCLK: 1970 clk_type = SMU_SOCCLK; break; 1971 case PP_FCLK: 1972 clk_type = SMU_FCLK; break; 1973 case PP_DCEFCLK: 1974 clk_type = SMU_DCEFCLK; break; 1975 case PP_VCLK: 1976 clk_type = SMU_VCLK; break; 1977 case PP_DCLK: 1978 clk_type = SMU_DCLK; break; 1979 case OD_SCLK: 1980 clk_type = SMU_OD_SCLK; break; 1981 case OD_MCLK: 1982 clk_type = SMU_OD_MCLK; break; 1983 case OD_VDDC_CURVE: 1984 clk_type = SMU_OD_VDDC_CURVE; break; 1985 case OD_RANGE: 1986 clk_type = SMU_OD_RANGE; break; 1987 default: 1988 return -EINVAL; 1989 } 1990 1991 return smu_force_smuclk_levels(smu, clk_type, mask); 1992 } 1993 1994 /* 1995 * On system suspending or resetting, the dpm_enabled 1996 * flag will be cleared. So that those SMU services which 1997 * are not supported will be gated. 1998 * However, the mp1 state setting should still be granted 1999 * even if the dpm_enabled cleared. 2000 */ 2001 static int smu_set_mp1_state(void *handle, 2002 enum pp_mp1_state mp1_state) 2003 { 2004 struct smu_context *smu = handle; 2005 int ret = 0; 2006 2007 if (!smu->pm_enabled) 2008 return -EOPNOTSUPP; 2009 2010 mutex_lock(&smu->mutex); 2011 2012 if (smu->ppt_funcs && 2013 smu->ppt_funcs->set_mp1_state) 2014 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); 2015 2016 mutex_unlock(&smu->mutex); 2017 2018 return ret; 2019 } 2020 2021 static int smu_set_df_cstate(void *handle, 2022 enum pp_df_cstate state) 2023 { 2024 struct smu_context *smu = handle; 2025 int ret = 0; 2026 2027 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2028 return -EOPNOTSUPP; 2029 2030 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) 2031 return 0; 2032 2033 mutex_lock(&smu->mutex); 2034 2035 ret = smu->ppt_funcs->set_df_cstate(smu, state); 2036 if (ret) 2037 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); 2038 2039 mutex_unlock(&smu->mutex); 2040 2041 return ret; 2042 } 2043 2044 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en) 2045 { 2046 int ret = 0; 2047 2048 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2049 return -EOPNOTSUPP; 2050 2051 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down) 2052 return 0; 2053 2054 mutex_lock(&smu->mutex); 2055 2056 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en); 2057 if (ret) 2058 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n"); 2059 2060 mutex_unlock(&smu->mutex); 2061 2062 return ret; 2063 } 2064 2065 int smu_write_watermarks_table(struct smu_context *smu) 2066 { 2067 int ret = 0; 2068 2069 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2070 return -EOPNOTSUPP; 2071 2072 mutex_lock(&smu->mutex); 2073 2074 ret = smu_set_watermarks_table(smu, NULL); 2075 2076 mutex_unlock(&smu->mutex); 2077 2078 return ret; 2079 } 2080 2081 static int smu_set_watermarks_for_clock_ranges(void *handle, 2082 struct pp_smu_wm_range_sets *clock_ranges) 2083 { 2084 struct smu_context *smu = handle; 2085 int ret = 0; 2086 2087 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2088 return -EOPNOTSUPP; 2089 2090 if (smu->disable_watermark) 2091 return 0; 2092 2093 mutex_lock(&smu->mutex); 2094 2095 ret = smu_set_watermarks_table(smu, clock_ranges); 2096 2097 mutex_unlock(&smu->mutex); 2098 2099 return ret; 2100 } 2101 2102 int smu_set_ac_dc(struct smu_context *smu) 2103 { 2104 int ret = 0; 2105 2106 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2107 return -EOPNOTSUPP; 2108 2109 /* controlled by firmware */ 2110 if (smu->dc_controlled_by_gpio) 2111 return 0; 2112 2113 mutex_lock(&smu->mutex); 2114 ret = smu_set_power_source(smu, 2115 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : 2116 SMU_POWER_SOURCE_DC); 2117 if (ret) 2118 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", 2119 smu->adev->pm.ac_power ? "AC" : "DC"); 2120 mutex_unlock(&smu->mutex); 2121 2122 return ret; 2123 } 2124 2125 const struct amd_ip_funcs smu_ip_funcs = { 2126 .name = "smu", 2127 .early_init = smu_early_init, 2128 .late_init = smu_late_init, 2129 .sw_init = smu_sw_init, 2130 .sw_fini = smu_sw_fini, 2131 .hw_init = smu_hw_init, 2132 .hw_fini = smu_hw_fini, 2133 .suspend = smu_suspend, 2134 .resume = smu_resume, 2135 .is_idle = NULL, 2136 .check_soft_reset = NULL, 2137 .wait_for_idle = NULL, 2138 .soft_reset = NULL, 2139 .set_clockgating_state = smu_set_clockgating_state, 2140 .set_powergating_state = smu_set_powergating_state, 2141 .enable_umd_pstate = smu_enable_umd_pstate, 2142 }; 2143 2144 const struct amdgpu_ip_block_version smu_v11_0_ip_block = 2145 { 2146 .type = AMD_IP_BLOCK_TYPE_SMC, 2147 .major = 11, 2148 .minor = 0, 2149 .rev = 0, 2150 .funcs = &smu_ip_funcs, 2151 }; 2152 2153 const struct amdgpu_ip_block_version smu_v12_0_ip_block = 2154 { 2155 .type = AMD_IP_BLOCK_TYPE_SMC, 2156 .major = 12, 2157 .minor = 0, 2158 .rev = 0, 2159 .funcs = &smu_ip_funcs, 2160 }; 2161 2162 const struct amdgpu_ip_block_version smu_v13_0_ip_block = 2163 { 2164 .type = AMD_IP_BLOCK_TYPE_SMC, 2165 .major = 13, 2166 .minor = 0, 2167 .rev = 0, 2168 .funcs = &smu_ip_funcs, 2169 }; 2170 2171 static int smu_load_microcode(void *handle) 2172 { 2173 struct smu_context *smu = handle; 2174 struct amdgpu_device *adev = smu->adev; 2175 int ret = 0; 2176 2177 if (!smu->pm_enabled) 2178 return -EOPNOTSUPP; 2179 2180 /* This should be used for non PSP loading */ 2181 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 2182 return 0; 2183 2184 if (smu->ppt_funcs->load_microcode) { 2185 ret = smu->ppt_funcs->load_microcode(smu); 2186 if (ret) { 2187 dev_err(adev->dev, "Load microcode failed\n"); 2188 return ret; 2189 } 2190 } 2191 2192 if (smu->ppt_funcs->check_fw_status) { 2193 ret = smu->ppt_funcs->check_fw_status(smu); 2194 if (ret) { 2195 dev_err(adev->dev, "SMC is not ready\n"); 2196 return ret; 2197 } 2198 } 2199 2200 return ret; 2201 } 2202 2203 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) 2204 { 2205 int ret = 0; 2206 2207 mutex_lock(&smu->mutex); 2208 2209 if (smu->ppt_funcs->set_gfx_cgpg) 2210 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); 2211 2212 mutex_unlock(&smu->mutex); 2213 2214 return ret; 2215 } 2216 2217 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) 2218 { 2219 struct smu_context *smu = handle; 2220 int ret = 0; 2221 2222 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2223 return -EOPNOTSUPP; 2224 2225 mutex_lock(&smu->mutex); 2226 2227 if (smu->ppt_funcs->set_fan_speed_rpm) { 2228 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); 2229 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2230 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; 2231 smu->user_dpm_profile.fan_speed_rpm = speed; 2232 2233 /* Override custom PWM setting as they cannot co-exist */ 2234 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM; 2235 smu->user_dpm_profile.fan_speed_pwm = 0; 2236 } 2237 } 2238 2239 mutex_unlock(&smu->mutex); 2240 2241 return ret; 2242 } 2243 2244 /** 2245 * smu_get_power_limit - Request one of the SMU Power Limits 2246 * 2247 * @handle: pointer to smu context 2248 * @limit: requested limit is written back to this variable 2249 * @pp_limit_level: &pp_power_limit_level which limit of the power to return 2250 * @pp_power_type: &pp_power_type type of power 2251 * Return: 0 on success, <0 on error 2252 * 2253 */ 2254 int smu_get_power_limit(void *handle, 2255 uint32_t *limit, 2256 enum pp_power_limit_level pp_limit_level, 2257 enum pp_power_type pp_power_type) 2258 { 2259 struct smu_context *smu = handle; 2260 struct amdgpu_device *adev = smu->adev; 2261 enum smu_ppt_limit_level limit_level; 2262 uint32_t limit_type; 2263 int ret = 0; 2264 2265 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2266 return -EOPNOTSUPP; 2267 2268 switch(pp_power_type) { 2269 case PP_PWR_TYPE_SUSTAINED: 2270 limit_type = SMU_DEFAULT_PPT_LIMIT; 2271 break; 2272 case PP_PWR_TYPE_FAST: 2273 limit_type = SMU_FAST_PPT_LIMIT; 2274 break; 2275 default: 2276 return -EOPNOTSUPP; 2277 break; 2278 } 2279 2280 switch(pp_limit_level){ 2281 case PP_PWR_LIMIT_CURRENT: 2282 limit_level = SMU_PPT_LIMIT_CURRENT; 2283 break; 2284 case PP_PWR_LIMIT_DEFAULT: 2285 limit_level = SMU_PPT_LIMIT_DEFAULT; 2286 break; 2287 case PP_PWR_LIMIT_MAX: 2288 limit_level = SMU_PPT_LIMIT_MAX; 2289 break; 2290 case PP_PWR_LIMIT_MIN: 2291 default: 2292 return -EOPNOTSUPP; 2293 break; 2294 } 2295 2296 mutex_lock(&smu->mutex); 2297 2298 if (limit_type != SMU_DEFAULT_PPT_LIMIT) { 2299 if (smu->ppt_funcs->get_ppt_limit) 2300 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); 2301 } else { 2302 switch (limit_level) { 2303 case SMU_PPT_LIMIT_CURRENT: 2304 switch (adev->ip_versions[MP1_HWIP][0]) { 2305 case IP_VERSION(13, 0, 2): 2306 case IP_VERSION(11, 0, 7): 2307 case IP_VERSION(11, 0, 11): 2308 case IP_VERSION(11, 0, 12): 2309 case IP_VERSION(11, 0, 13): 2310 ret = smu_get_asic_power_limits(smu, 2311 &smu->current_power_limit, 2312 NULL, 2313 NULL); 2314 break; 2315 default: 2316 break; 2317 } 2318 *limit = smu->current_power_limit; 2319 break; 2320 case SMU_PPT_LIMIT_DEFAULT: 2321 *limit = smu->default_power_limit; 2322 break; 2323 case SMU_PPT_LIMIT_MAX: 2324 *limit = smu->max_power_limit; 2325 break; 2326 default: 2327 break; 2328 } 2329 } 2330 2331 mutex_unlock(&smu->mutex); 2332 2333 return ret; 2334 } 2335 2336 static int smu_set_power_limit(void *handle, uint32_t limit) 2337 { 2338 struct smu_context *smu = handle; 2339 uint32_t limit_type = limit >> 24; 2340 int ret = 0; 2341 2342 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2343 return -EOPNOTSUPP; 2344 2345 mutex_lock(&smu->mutex); 2346 2347 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 2348 if (smu->ppt_funcs->set_power_limit) { 2349 ret = smu->ppt_funcs->set_power_limit(smu, limit); 2350 goto out; 2351 } 2352 2353 if (limit > smu->max_power_limit) { 2354 dev_err(smu->adev->dev, 2355 "New power limit (%d) is over the max allowed %d\n", 2356 limit, smu->max_power_limit); 2357 ret = -EINVAL; 2358 goto out; 2359 } 2360 2361 if (!limit) 2362 limit = smu->current_power_limit; 2363 2364 if (smu->ppt_funcs->set_power_limit) { 2365 ret = smu->ppt_funcs->set_power_limit(smu, limit); 2366 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) 2367 smu->user_dpm_profile.power_limit = limit; 2368 } 2369 2370 out: 2371 mutex_unlock(&smu->mutex); 2372 2373 return ret; 2374 } 2375 2376 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) 2377 { 2378 int ret = 0; 2379 2380 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2381 return -EOPNOTSUPP; 2382 2383 mutex_lock(&smu->mutex); 2384 2385 if (smu->ppt_funcs->print_clk_levels) 2386 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); 2387 2388 mutex_unlock(&smu->mutex); 2389 2390 return ret; 2391 } 2392 2393 static int smu_print_ppclk_levels(void *handle, 2394 enum pp_clock_type type, 2395 char *buf) 2396 { 2397 struct smu_context *smu = handle; 2398 enum smu_clk_type clk_type; 2399 2400 switch (type) { 2401 case PP_SCLK: 2402 clk_type = SMU_SCLK; break; 2403 case PP_MCLK: 2404 clk_type = SMU_MCLK; break; 2405 case PP_PCIE: 2406 clk_type = SMU_PCIE; break; 2407 case PP_SOCCLK: 2408 clk_type = SMU_SOCCLK; break; 2409 case PP_FCLK: 2410 clk_type = SMU_FCLK; break; 2411 case PP_DCEFCLK: 2412 clk_type = SMU_DCEFCLK; break; 2413 case PP_VCLK: 2414 clk_type = SMU_VCLK; break; 2415 case PP_DCLK: 2416 clk_type = SMU_DCLK; break; 2417 case OD_SCLK: 2418 clk_type = SMU_OD_SCLK; break; 2419 case OD_MCLK: 2420 clk_type = SMU_OD_MCLK; break; 2421 case OD_VDDC_CURVE: 2422 clk_type = SMU_OD_VDDC_CURVE; break; 2423 case OD_RANGE: 2424 clk_type = SMU_OD_RANGE; break; 2425 case OD_VDDGFX_OFFSET: 2426 clk_type = SMU_OD_VDDGFX_OFFSET; break; 2427 case OD_CCLK: 2428 clk_type = SMU_OD_CCLK; break; 2429 default: 2430 return -EINVAL; 2431 } 2432 2433 return smu_print_smuclk_levels(smu, clk_type, buf); 2434 } 2435 2436 static int smu_od_edit_dpm_table(void *handle, 2437 enum PP_OD_DPM_TABLE_COMMAND type, 2438 long *input, uint32_t size) 2439 { 2440 struct smu_context *smu = handle; 2441 int ret = 0; 2442 2443 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2444 return -EOPNOTSUPP; 2445 2446 mutex_lock(&smu->mutex); 2447 2448 if (smu->ppt_funcs->od_edit_dpm_table) { 2449 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); 2450 } 2451 2452 mutex_unlock(&smu->mutex); 2453 2454 return ret; 2455 } 2456 2457 static int smu_read_sensor(void *handle, 2458 int sensor, 2459 void *data, 2460 int *size_arg) 2461 { 2462 struct smu_context *smu = handle; 2463 struct smu_umd_pstate_table *pstate_table = 2464 &smu->pstate_table; 2465 int ret = 0; 2466 uint32_t *size, size_val; 2467 2468 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2469 return -EOPNOTSUPP; 2470 2471 if (!data || !size_arg) 2472 return -EINVAL; 2473 2474 size_val = *size_arg; 2475 size = &size_val; 2476 2477 mutex_lock(&smu->mutex); 2478 2479 if (smu->ppt_funcs->read_sensor) 2480 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) 2481 goto unlock; 2482 2483 switch (sensor) { 2484 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 2485 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; 2486 *size = 4; 2487 break; 2488 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 2489 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; 2490 *size = 4; 2491 break; 2492 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 2493 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2); 2494 *size = 8; 2495 break; 2496 case AMDGPU_PP_SENSOR_UVD_POWER: 2497 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 2498 *size = 4; 2499 break; 2500 case AMDGPU_PP_SENSOR_VCE_POWER: 2501 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 2502 *size = 4; 2503 break; 2504 case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 2505 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1; 2506 *size = 4; 2507 break; 2508 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 2509 *(uint32_t *)data = 0; 2510 *size = 4; 2511 break; 2512 default: 2513 *size = 0; 2514 ret = -EOPNOTSUPP; 2515 break; 2516 } 2517 2518 unlock: 2519 mutex_unlock(&smu->mutex); 2520 2521 // assign uint32_t to int 2522 *size_arg = size_val; 2523 2524 return ret; 2525 } 2526 2527 static int smu_get_power_profile_mode(void *handle, char *buf) 2528 { 2529 struct smu_context *smu = handle; 2530 int ret = 0; 2531 2532 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2533 return -EOPNOTSUPP; 2534 2535 mutex_lock(&smu->mutex); 2536 2537 if (smu->ppt_funcs->get_power_profile_mode) 2538 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); 2539 2540 mutex_unlock(&smu->mutex); 2541 2542 return ret; 2543 } 2544 2545 static int smu_set_power_profile_mode(void *handle, 2546 long *param, 2547 uint32_t param_size) 2548 { 2549 struct smu_context *smu = handle; 2550 int ret = 0; 2551 2552 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2553 return -EOPNOTSUPP; 2554 2555 mutex_lock(&smu->mutex); 2556 2557 smu_bump_power_profile_mode(smu, param, param_size); 2558 2559 mutex_unlock(&smu->mutex); 2560 2561 return ret; 2562 } 2563 2564 2565 static u32 smu_get_fan_control_mode(void *handle) 2566 { 2567 struct smu_context *smu = handle; 2568 u32 ret = 0; 2569 2570 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2571 return AMD_FAN_CTRL_NONE; 2572 2573 mutex_lock(&smu->mutex); 2574 2575 if (smu->ppt_funcs->get_fan_control_mode) 2576 ret = smu->ppt_funcs->get_fan_control_mode(smu); 2577 2578 mutex_unlock(&smu->mutex); 2579 2580 return ret; 2581 } 2582 2583 static int smu_set_fan_control_mode(struct smu_context *smu, int value) 2584 { 2585 int ret = 0; 2586 2587 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2588 return -EOPNOTSUPP; 2589 2590 mutex_lock(&smu->mutex); 2591 2592 if (smu->ppt_funcs->set_fan_control_mode) { 2593 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); 2594 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) 2595 smu->user_dpm_profile.fan_mode = value; 2596 } 2597 2598 mutex_unlock(&smu->mutex); 2599 2600 /* reset user dpm fan speed */ 2601 if (!ret && value != AMD_FAN_CTRL_MANUAL && 2602 !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2603 smu->user_dpm_profile.fan_speed_pwm = 0; 2604 smu->user_dpm_profile.fan_speed_rpm = 0; 2605 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM); 2606 } 2607 2608 return ret; 2609 } 2610 2611 static void smu_pp_set_fan_control_mode(void *handle, u32 value) 2612 { 2613 struct smu_context *smu = handle; 2614 2615 smu_set_fan_control_mode(smu, value); 2616 } 2617 2618 2619 static int smu_get_fan_speed_pwm(void *handle, u32 *speed) 2620 { 2621 struct smu_context *smu = handle; 2622 int ret = 0; 2623 2624 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2625 return -EOPNOTSUPP; 2626 2627 mutex_lock(&smu->mutex); 2628 2629 if (smu->ppt_funcs->get_fan_speed_pwm) 2630 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); 2631 2632 mutex_unlock(&smu->mutex); 2633 2634 return ret; 2635 } 2636 2637 static int smu_set_fan_speed_pwm(void *handle, u32 speed) 2638 { 2639 struct smu_context *smu = handle; 2640 int ret = 0; 2641 2642 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2643 return -EOPNOTSUPP; 2644 2645 mutex_lock(&smu->mutex); 2646 2647 if (smu->ppt_funcs->set_fan_speed_pwm) { 2648 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); 2649 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2650 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; 2651 smu->user_dpm_profile.fan_speed_pwm = speed; 2652 2653 /* Override custom RPM setting as they cannot co-exist */ 2654 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM; 2655 smu->user_dpm_profile.fan_speed_rpm = 0; 2656 } 2657 } 2658 2659 mutex_unlock(&smu->mutex); 2660 2661 return ret; 2662 } 2663 2664 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) 2665 { 2666 struct smu_context *smu = handle; 2667 int ret = 0; 2668 2669 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2670 return -EOPNOTSUPP; 2671 2672 mutex_lock(&smu->mutex); 2673 2674 if (smu->ppt_funcs->get_fan_speed_rpm) 2675 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); 2676 2677 mutex_unlock(&smu->mutex); 2678 2679 return ret; 2680 } 2681 2682 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) 2683 { 2684 struct smu_context *smu = handle; 2685 int ret = 0; 2686 2687 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2688 return -EOPNOTSUPP; 2689 2690 mutex_lock(&smu->mutex); 2691 2692 ret = smu_set_min_dcef_deep_sleep(smu, clk); 2693 2694 mutex_unlock(&smu->mutex); 2695 2696 return ret; 2697 } 2698 2699 static int smu_get_clock_by_type_with_latency(void *handle, 2700 enum amd_pp_clock_type type, 2701 struct pp_clock_levels_with_latency *clocks) 2702 { 2703 struct smu_context *smu = handle; 2704 enum smu_clk_type clk_type; 2705 int ret = 0; 2706 2707 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2708 return -EOPNOTSUPP; 2709 2710 mutex_lock(&smu->mutex); 2711 2712 if (smu->ppt_funcs->get_clock_by_type_with_latency) { 2713 switch (type) { 2714 case amd_pp_sys_clock: 2715 clk_type = SMU_GFXCLK; 2716 break; 2717 case amd_pp_mem_clock: 2718 clk_type = SMU_MCLK; 2719 break; 2720 case amd_pp_dcef_clock: 2721 clk_type = SMU_DCEFCLK; 2722 break; 2723 case amd_pp_disp_clock: 2724 clk_type = SMU_DISPCLK; 2725 break; 2726 default: 2727 dev_err(smu->adev->dev, "Invalid clock type!\n"); 2728 mutex_unlock(&smu->mutex); 2729 return -EINVAL; 2730 } 2731 2732 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); 2733 } 2734 2735 mutex_unlock(&smu->mutex); 2736 2737 return ret; 2738 } 2739 2740 static int smu_display_clock_voltage_request(void *handle, 2741 struct pp_display_clock_request *clock_req) 2742 { 2743 struct smu_context *smu = handle; 2744 int ret = 0; 2745 2746 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2747 return -EOPNOTSUPP; 2748 2749 mutex_lock(&smu->mutex); 2750 2751 if (smu->ppt_funcs->display_clock_voltage_request) 2752 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); 2753 2754 mutex_unlock(&smu->mutex); 2755 2756 return ret; 2757 } 2758 2759 2760 static int smu_display_disable_memory_clock_switch(void *handle, 2761 bool disable_memory_clock_switch) 2762 { 2763 struct smu_context *smu = handle; 2764 int ret = -EINVAL; 2765 2766 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2767 return -EOPNOTSUPP; 2768 2769 mutex_lock(&smu->mutex); 2770 2771 if (smu->ppt_funcs->display_disable_memory_clock_switch) 2772 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); 2773 2774 mutex_unlock(&smu->mutex); 2775 2776 return ret; 2777 } 2778 2779 static int smu_set_xgmi_pstate(void *handle, 2780 uint32_t pstate) 2781 { 2782 struct smu_context *smu = handle; 2783 int ret = 0; 2784 2785 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2786 return -EOPNOTSUPP; 2787 2788 mutex_lock(&smu->mutex); 2789 2790 if (smu->ppt_funcs->set_xgmi_pstate) 2791 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); 2792 2793 mutex_unlock(&smu->mutex); 2794 2795 if(ret) 2796 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); 2797 2798 return ret; 2799 } 2800 2801 static int smu_get_baco_capability(void *handle, bool *cap) 2802 { 2803 struct smu_context *smu = handle; 2804 int ret = 0; 2805 2806 *cap = false; 2807 2808 if (!smu->pm_enabled) 2809 return 0; 2810 2811 mutex_lock(&smu->mutex); 2812 2813 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) 2814 *cap = smu->ppt_funcs->baco_is_support(smu); 2815 2816 mutex_unlock(&smu->mutex); 2817 2818 return ret; 2819 } 2820 2821 static int smu_baco_set_state(void *handle, int state) 2822 { 2823 struct smu_context *smu = handle; 2824 int ret = 0; 2825 2826 if (!smu->pm_enabled) 2827 return -EOPNOTSUPP; 2828 2829 if (state == 0) { 2830 mutex_lock(&smu->mutex); 2831 2832 if (smu->ppt_funcs->baco_exit) 2833 ret = smu->ppt_funcs->baco_exit(smu); 2834 2835 mutex_unlock(&smu->mutex); 2836 } else if (state == 1) { 2837 mutex_lock(&smu->mutex); 2838 2839 if (smu->ppt_funcs->baco_enter) 2840 ret = smu->ppt_funcs->baco_enter(smu); 2841 2842 mutex_unlock(&smu->mutex); 2843 2844 } else { 2845 return -EINVAL; 2846 } 2847 2848 if (ret) 2849 dev_err(smu->adev->dev, "Failed to %s BACO state!\n", 2850 (state)?"enter":"exit"); 2851 2852 return ret; 2853 } 2854 2855 bool smu_mode1_reset_is_support(struct smu_context *smu) 2856 { 2857 bool ret = false; 2858 2859 if (!smu->pm_enabled) 2860 return false; 2861 2862 mutex_lock(&smu->mutex); 2863 2864 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) 2865 ret = smu->ppt_funcs->mode1_reset_is_support(smu); 2866 2867 mutex_unlock(&smu->mutex); 2868 2869 return ret; 2870 } 2871 2872 bool smu_mode2_reset_is_support(struct smu_context *smu) 2873 { 2874 bool ret = false; 2875 2876 if (!smu->pm_enabled) 2877 return false; 2878 2879 mutex_lock(&smu->mutex); 2880 2881 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) 2882 ret = smu->ppt_funcs->mode2_reset_is_support(smu); 2883 2884 mutex_unlock(&smu->mutex); 2885 2886 return ret; 2887 } 2888 2889 int smu_mode1_reset(struct smu_context *smu) 2890 { 2891 int ret = 0; 2892 2893 if (!smu->pm_enabled) 2894 return -EOPNOTSUPP; 2895 2896 mutex_lock(&smu->mutex); 2897 2898 if (smu->ppt_funcs->mode1_reset) 2899 ret = smu->ppt_funcs->mode1_reset(smu); 2900 2901 mutex_unlock(&smu->mutex); 2902 2903 return ret; 2904 } 2905 2906 static int smu_mode2_reset(void *handle) 2907 { 2908 struct smu_context *smu = handle; 2909 int ret = 0; 2910 2911 if (!smu->pm_enabled) 2912 return -EOPNOTSUPP; 2913 2914 mutex_lock(&smu->mutex); 2915 2916 if (smu->ppt_funcs->mode2_reset) 2917 ret = smu->ppt_funcs->mode2_reset(smu); 2918 2919 mutex_unlock(&smu->mutex); 2920 2921 if (ret) 2922 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); 2923 2924 return ret; 2925 } 2926 2927 static int smu_get_max_sustainable_clocks_by_dc(void *handle, 2928 struct pp_smu_nv_clock_table *max_clocks) 2929 { 2930 struct smu_context *smu = handle; 2931 int ret = 0; 2932 2933 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2934 return -EOPNOTSUPP; 2935 2936 mutex_lock(&smu->mutex); 2937 2938 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) 2939 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); 2940 2941 mutex_unlock(&smu->mutex); 2942 2943 return ret; 2944 } 2945 2946 static int smu_get_uclk_dpm_states(void *handle, 2947 unsigned int *clock_values_in_khz, 2948 unsigned int *num_states) 2949 { 2950 struct smu_context *smu = handle; 2951 int ret = 0; 2952 2953 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2954 return -EOPNOTSUPP; 2955 2956 mutex_lock(&smu->mutex); 2957 2958 if (smu->ppt_funcs->get_uclk_dpm_states) 2959 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); 2960 2961 mutex_unlock(&smu->mutex); 2962 2963 return ret; 2964 } 2965 2966 static enum amd_pm_state_type smu_get_current_power_state(void *handle) 2967 { 2968 struct smu_context *smu = handle; 2969 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; 2970 2971 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2972 return -EOPNOTSUPP; 2973 2974 mutex_lock(&smu->mutex); 2975 2976 if (smu->ppt_funcs->get_current_power_state) 2977 pm_state = smu->ppt_funcs->get_current_power_state(smu); 2978 2979 mutex_unlock(&smu->mutex); 2980 2981 return pm_state; 2982 } 2983 2984 static int smu_get_dpm_clock_table(void *handle, 2985 struct dpm_clocks *clock_table) 2986 { 2987 struct smu_context *smu = handle; 2988 int ret = 0; 2989 2990 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2991 return -EOPNOTSUPP; 2992 2993 mutex_lock(&smu->mutex); 2994 2995 if (smu->ppt_funcs->get_dpm_clock_table) 2996 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); 2997 2998 mutex_unlock(&smu->mutex); 2999 3000 return ret; 3001 } 3002 3003 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) 3004 { 3005 struct smu_context *smu = handle; 3006 ssize_t size; 3007 3008 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3009 return -EOPNOTSUPP; 3010 3011 if (!smu->ppt_funcs->get_gpu_metrics) 3012 return -EOPNOTSUPP; 3013 3014 mutex_lock(&smu->mutex); 3015 3016 size = smu->ppt_funcs->get_gpu_metrics(smu, table); 3017 3018 mutex_unlock(&smu->mutex); 3019 3020 return size; 3021 } 3022 3023 static int smu_enable_mgpu_fan_boost(void *handle) 3024 { 3025 struct smu_context *smu = handle; 3026 int ret = 0; 3027 3028 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3029 return -EOPNOTSUPP; 3030 3031 mutex_lock(&smu->mutex); 3032 3033 if (smu->ppt_funcs->enable_mgpu_fan_boost) 3034 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); 3035 3036 mutex_unlock(&smu->mutex); 3037 3038 return ret; 3039 } 3040 3041 static int smu_gfx_state_change_set(void *handle, 3042 uint32_t state) 3043 { 3044 struct smu_context *smu = handle; 3045 int ret = 0; 3046 3047 mutex_lock(&smu->mutex); 3048 if (smu->ppt_funcs->gfx_state_change_set) 3049 ret = smu->ppt_funcs->gfx_state_change_set(smu, state); 3050 mutex_unlock(&smu->mutex); 3051 3052 return ret; 3053 } 3054 3055 int smu_set_light_sbr(struct smu_context *smu, bool enable) 3056 { 3057 int ret = 0; 3058 3059 mutex_lock(&smu->mutex); 3060 if (smu->ppt_funcs->set_light_sbr) 3061 ret = smu->ppt_funcs->set_light_sbr(smu, enable); 3062 mutex_unlock(&smu->mutex); 3063 3064 return ret; 3065 } 3066 3067 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) 3068 { 3069 struct smu_context *smu = handle; 3070 struct smu_table_context *smu_table = &smu->smu_table; 3071 struct smu_table *memory_pool = &smu_table->memory_pool; 3072 3073 if (!addr || !size) 3074 return -EINVAL; 3075 3076 *addr = NULL; 3077 *size = 0; 3078 mutex_lock(&smu->mutex); 3079 if (memory_pool->bo) { 3080 *addr = memory_pool->cpu_addr; 3081 *size = memory_pool->size; 3082 } 3083 mutex_unlock(&smu->mutex); 3084 3085 return 0; 3086 } 3087 3088 static const struct amd_pm_funcs swsmu_pm_funcs = { 3089 /* export for sysfs */ 3090 .set_fan_control_mode = smu_pp_set_fan_control_mode, 3091 .get_fan_control_mode = smu_get_fan_control_mode, 3092 .set_fan_speed_pwm = smu_set_fan_speed_pwm, 3093 .get_fan_speed_pwm = smu_get_fan_speed_pwm, 3094 .force_clock_level = smu_force_ppclk_levels, 3095 .print_clock_levels = smu_print_ppclk_levels, 3096 .force_performance_level = smu_force_performance_level, 3097 .read_sensor = smu_read_sensor, 3098 .get_performance_level = smu_get_performance_level, 3099 .get_current_power_state = smu_get_current_power_state, 3100 .get_fan_speed_rpm = smu_get_fan_speed_rpm, 3101 .set_fan_speed_rpm = smu_set_fan_speed_rpm, 3102 .get_pp_num_states = smu_get_power_num_states, 3103 .get_pp_table = smu_sys_get_pp_table, 3104 .set_pp_table = smu_sys_set_pp_table, 3105 .switch_power_profile = smu_switch_power_profile, 3106 /* export to amdgpu */ 3107 .dispatch_tasks = smu_handle_dpm_task, 3108 .load_firmware = smu_load_microcode, 3109 .set_powergating_by_smu = smu_dpm_set_power_gate, 3110 .set_power_limit = smu_set_power_limit, 3111 .get_power_limit = smu_get_power_limit, 3112 .get_power_profile_mode = smu_get_power_profile_mode, 3113 .set_power_profile_mode = smu_set_power_profile_mode, 3114 .odn_edit_dpm_table = smu_od_edit_dpm_table, 3115 .set_mp1_state = smu_set_mp1_state, 3116 .gfx_state_change_set = smu_gfx_state_change_set, 3117 /* export to DC */ 3118 .get_sclk = smu_get_sclk, 3119 .get_mclk = smu_get_mclk, 3120 .display_configuration_change = smu_display_configuration_change, 3121 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency, 3122 .display_clock_voltage_request = smu_display_clock_voltage_request, 3123 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost, 3124 .set_active_display_count = smu_set_display_count, 3125 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk, 3126 .get_asic_baco_capability = smu_get_baco_capability, 3127 .set_asic_baco_state = smu_baco_set_state, 3128 .get_ppfeature_status = smu_sys_get_pp_feature_mask, 3129 .set_ppfeature_status = smu_sys_set_pp_feature_mask, 3130 .asic_reset_mode_2 = smu_mode2_reset, 3131 .set_df_cstate = smu_set_df_cstate, 3132 .set_xgmi_pstate = smu_set_xgmi_pstate, 3133 .get_gpu_metrics = smu_sys_get_gpu_metrics, 3134 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges, 3135 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch, 3136 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, 3137 .get_uclk_dpm_states = smu_get_uclk_dpm_states, 3138 .get_dpm_clock_table = smu_get_dpm_clock_table, 3139 .get_smu_prv_buf_details = smu_get_prv_buffer_details, 3140 }; 3141 3142 int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, 3143 uint64_t event_arg) 3144 { 3145 int ret = -EINVAL; 3146 struct smu_context *smu = &adev->smu; 3147 3148 if (smu->ppt_funcs->wait_for_event) { 3149 mutex_lock(&smu->mutex); 3150 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); 3151 mutex_unlock(&smu->mutex); 3152 } 3153 3154 return ret; 3155 } 3156