1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L1 24 25 #include <linux/firmware.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_smu.h" 30 #include "smu_internal.h" 31 #include "atom.h" 32 #include "arcturus_ppt.h" 33 #include "navi10_ppt.h" 34 #include "sienna_cichlid_ppt.h" 35 #include "renoir_ppt.h" 36 #include "vangogh_ppt.h" 37 #include "aldebaran_ppt.h" 38 #include "yellow_carp_ppt.h" 39 #include "cyan_skillfish_ppt.h" 40 #include "smu_v13_0_0_ppt.h" 41 #include "smu_v13_0_4_ppt.h" 42 #include "smu_v13_0_5_ppt.h" 43 #include "smu_v13_0_7_ppt.h" 44 #include "amd_pcie.h" 45 46 /* 47 * DO NOT use these for err/warn/info/debug messages. 48 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 49 * They are more MGPU friendly. 50 */ 51 #undef pr_err 52 #undef pr_warn 53 #undef pr_info 54 #undef pr_debug 55 56 static const struct amd_pm_funcs swsmu_pm_funcs; 57 static int smu_force_smuclk_levels(struct smu_context *smu, 58 enum smu_clk_type clk_type, 59 uint32_t mask); 60 static int smu_handle_task(struct smu_context *smu, 61 enum amd_dpm_forced_level level, 62 enum amd_pp_task task_id); 63 static int smu_reset(struct smu_context *smu); 64 static int smu_set_fan_speed_pwm(void *handle, u32 speed); 65 static int smu_set_fan_control_mode(void *handle, u32 value); 66 static int smu_set_power_limit(void *handle, uint32_t limit); 67 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); 68 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); 69 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); 70 71 static int smu_sys_get_pp_feature_mask(void *handle, 72 char *buf) 73 { 74 struct smu_context *smu = handle; 75 76 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 77 return -EOPNOTSUPP; 78 79 return smu_get_pp_feature_mask(smu, buf); 80 } 81 82 static int smu_sys_set_pp_feature_mask(void *handle, 83 uint64_t new_mask) 84 { 85 struct smu_context *smu = handle; 86 87 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 88 return -EOPNOTSUPP; 89 90 return smu_set_pp_feature_mask(smu, new_mask); 91 } 92 93 int smu_set_residency_gfxoff(struct smu_context *smu, bool value) 94 { 95 if (!smu->ppt_funcs->set_gfx_off_residency) 96 return -EINVAL; 97 98 return smu_set_gfx_off_residency(smu, value); 99 } 100 101 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value) 102 { 103 if (!smu->ppt_funcs->get_gfx_off_residency) 104 return -EINVAL; 105 106 return smu_get_gfx_off_residency(smu, value); 107 } 108 109 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value) 110 { 111 if (!smu->ppt_funcs->get_gfx_off_entrycount) 112 return -EINVAL; 113 114 return smu_get_gfx_off_entrycount(smu, value); 115 } 116 117 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) 118 { 119 if (!smu->ppt_funcs->get_gfx_off_status) 120 return -EINVAL; 121 122 *value = smu_get_gfx_off_status(smu); 123 124 return 0; 125 } 126 127 int smu_set_soft_freq_range(struct smu_context *smu, 128 enum smu_clk_type clk_type, 129 uint32_t min, 130 uint32_t max) 131 { 132 int ret = 0; 133 134 if (smu->ppt_funcs->set_soft_freq_limited_range) 135 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, 136 clk_type, 137 min, 138 max); 139 140 return ret; 141 } 142 143 int smu_get_dpm_freq_range(struct smu_context *smu, 144 enum smu_clk_type clk_type, 145 uint32_t *min, 146 uint32_t *max) 147 { 148 int ret = -ENOTSUPP; 149 150 if (!min && !max) 151 return -EINVAL; 152 153 if (smu->ppt_funcs->get_dpm_ultimate_freq) 154 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, 155 clk_type, 156 min, 157 max); 158 159 return ret; 160 } 161 162 int smu_set_gfx_power_up_by_imu(struct smu_context *smu) 163 { 164 if (!smu->ppt_funcs || !smu->ppt_funcs->set_gfx_power_up_by_imu) 165 return -EOPNOTSUPP; 166 167 return smu->ppt_funcs->set_gfx_power_up_by_imu(smu); 168 } 169 170 static u32 smu_get_mclk(void *handle, bool low) 171 { 172 struct smu_context *smu = handle; 173 uint32_t clk_freq; 174 int ret = 0; 175 176 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, 177 low ? &clk_freq : NULL, 178 !low ? &clk_freq : NULL); 179 if (ret) 180 return 0; 181 return clk_freq * 100; 182 } 183 184 static u32 smu_get_sclk(void *handle, bool low) 185 { 186 struct smu_context *smu = handle; 187 uint32_t clk_freq; 188 int ret = 0; 189 190 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, 191 low ? &clk_freq : NULL, 192 !low ? &clk_freq : NULL); 193 if (ret) 194 return 0; 195 return clk_freq * 100; 196 } 197 198 static int smu_dpm_set_vcn_enable(struct smu_context *smu, 199 bool enable) 200 { 201 struct smu_power_context *smu_power = &smu->smu_power; 202 struct smu_power_gate *power_gate = &smu_power->power_gate; 203 int ret = 0; 204 205 if (!smu->ppt_funcs->dpm_set_vcn_enable) 206 return 0; 207 208 if (atomic_read(&power_gate->vcn_gated) ^ enable) 209 return 0; 210 211 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); 212 if (!ret) 213 atomic_set(&power_gate->vcn_gated, !enable); 214 215 return ret; 216 } 217 218 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, 219 bool enable) 220 { 221 struct smu_power_context *smu_power = &smu->smu_power; 222 struct smu_power_gate *power_gate = &smu_power->power_gate; 223 int ret = 0; 224 225 if (!smu->ppt_funcs->dpm_set_jpeg_enable) 226 return 0; 227 228 if (atomic_read(&power_gate->jpeg_gated) ^ enable) 229 return 0; 230 231 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); 232 if (!ret) 233 atomic_set(&power_gate->jpeg_gated, !enable); 234 235 return ret; 236 } 237 238 /** 239 * smu_dpm_set_power_gate - power gate/ungate the specific IP block 240 * 241 * @handle: smu_context pointer 242 * @block_type: the IP block to power gate/ungate 243 * @gate: to power gate if true, ungate otherwise 244 * 245 * This API uses no smu->mutex lock protection due to: 246 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). 247 * This is guarded to be race condition free by the caller. 248 * 2. Or get called on user setting request of power_dpm_force_performance_level. 249 * Under this case, the smu->mutex lock protection is already enforced on 250 * the parent API smu_force_performance_level of the call path. 251 */ 252 static int smu_dpm_set_power_gate(void *handle, 253 uint32_t block_type, 254 bool gate) 255 { 256 struct smu_context *smu = handle; 257 int ret = 0; 258 259 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) { 260 dev_WARN(smu->adev->dev, 261 "SMU uninitialized but power %s requested for %u!\n", 262 gate ? "gate" : "ungate", block_type); 263 return -EOPNOTSUPP; 264 } 265 266 switch (block_type) { 267 /* 268 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses 269 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept. 270 */ 271 case AMD_IP_BLOCK_TYPE_UVD: 272 case AMD_IP_BLOCK_TYPE_VCN: 273 ret = smu_dpm_set_vcn_enable(smu, !gate); 274 if (ret) 275 dev_err(smu->adev->dev, "Failed to power %s VCN!\n", 276 gate ? "gate" : "ungate"); 277 break; 278 case AMD_IP_BLOCK_TYPE_GFX: 279 ret = smu_gfx_off_control(smu, gate); 280 if (ret) 281 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", 282 gate ? "enable" : "disable"); 283 break; 284 case AMD_IP_BLOCK_TYPE_SDMA: 285 ret = smu_powergate_sdma(smu, gate); 286 if (ret) 287 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", 288 gate ? "gate" : "ungate"); 289 break; 290 case AMD_IP_BLOCK_TYPE_JPEG: 291 ret = smu_dpm_set_jpeg_enable(smu, !gate); 292 if (ret) 293 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", 294 gate ? "gate" : "ungate"); 295 break; 296 default: 297 dev_err(smu->adev->dev, "Unsupported block type!\n"); 298 return -EINVAL; 299 } 300 301 return ret; 302 } 303 304 /** 305 * smu_set_user_clk_dependencies - set user profile clock dependencies 306 * 307 * @smu: smu_context pointer 308 * @clk: enum smu_clk_type type 309 * 310 * Enable/Disable the clock dependency for the @clk type. 311 */ 312 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk) 313 { 314 if (smu->adev->in_suspend) 315 return; 316 317 if (clk == SMU_MCLK) { 318 smu->user_dpm_profile.clk_dependency = 0; 319 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); 320 } else if (clk == SMU_FCLK) { 321 /* MCLK takes precedence over FCLK */ 322 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 323 return; 324 325 smu->user_dpm_profile.clk_dependency = 0; 326 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); 327 } else if (clk == SMU_SOCCLK) { 328 /* MCLK takes precedence over SOCCLK */ 329 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 330 return; 331 332 smu->user_dpm_profile.clk_dependency = 0; 333 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); 334 } else 335 /* Add clk dependencies here, if any */ 336 return; 337 } 338 339 /** 340 * smu_restore_dpm_user_profile - reinstate user dpm profile 341 * 342 * @smu: smu_context pointer 343 * 344 * Restore the saved user power configurations include power limit, 345 * clock frequencies, fan control mode and fan speed. 346 */ 347 static void smu_restore_dpm_user_profile(struct smu_context *smu) 348 { 349 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 350 int ret = 0; 351 352 if (!smu->adev->in_suspend) 353 return; 354 355 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 356 return; 357 358 /* Enable restore flag */ 359 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; 360 361 /* set the user dpm power limit */ 362 if (smu->user_dpm_profile.power_limit) { 363 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); 364 if (ret) 365 dev_err(smu->adev->dev, "Failed to set power limit value\n"); 366 } 367 368 /* set the user dpm clock configurations */ 369 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 370 enum smu_clk_type clk_type; 371 372 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) { 373 /* 374 * Iterate over smu clk type and force the saved user clk 375 * configs, skip if clock dependency is enabled 376 */ 377 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && 378 smu->user_dpm_profile.clk_mask[clk_type]) { 379 ret = smu_force_smuclk_levels(smu, clk_type, 380 smu->user_dpm_profile.clk_mask[clk_type]); 381 if (ret) 382 dev_err(smu->adev->dev, 383 "Failed to set clock type = %d\n", clk_type); 384 } 385 } 386 } 387 388 /* set the user dpm fan configurations */ 389 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL || 390 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) { 391 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); 392 if (ret != -EOPNOTSUPP) { 393 smu->user_dpm_profile.fan_speed_pwm = 0; 394 smu->user_dpm_profile.fan_speed_rpm = 0; 395 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO; 396 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n"); 397 } 398 399 if (smu->user_dpm_profile.fan_speed_pwm) { 400 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm); 401 if (ret != -EOPNOTSUPP) 402 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n"); 403 } 404 405 if (smu->user_dpm_profile.fan_speed_rpm) { 406 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm); 407 if (ret != -EOPNOTSUPP) 408 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n"); 409 } 410 } 411 412 /* Restore user customized OD settings */ 413 if (smu->user_dpm_profile.user_od) { 414 if (smu->ppt_funcs->restore_user_od_settings) { 415 ret = smu->ppt_funcs->restore_user_od_settings(smu); 416 if (ret) 417 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n"); 418 } 419 } 420 421 /* Disable restore flag */ 422 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; 423 } 424 425 static int smu_get_power_num_states(void *handle, 426 struct pp_states_info *state_info) 427 { 428 if (!state_info) 429 return -EINVAL; 430 431 /* not support power state */ 432 memset(state_info, 0, sizeof(struct pp_states_info)); 433 state_info->nums = 1; 434 state_info->states[0] = POWER_STATE_TYPE_DEFAULT; 435 436 return 0; 437 } 438 439 bool is_support_sw_smu(struct amdgpu_device *adev) 440 { 441 /* vega20 is 11.0.2, but it's supported via the powerplay code */ 442 if (adev->asic_type == CHIP_VEGA20) 443 return false; 444 445 if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0)) 446 return true; 447 448 return false; 449 } 450 451 bool is_support_cclk_dpm(struct amdgpu_device *adev) 452 { 453 struct smu_context *smu = adev->powerplay.pp_handle; 454 455 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) 456 return false; 457 458 return true; 459 } 460 461 462 static int smu_sys_get_pp_table(void *handle, 463 char **table) 464 { 465 struct smu_context *smu = handle; 466 struct smu_table_context *smu_table = &smu->smu_table; 467 468 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 469 return -EOPNOTSUPP; 470 471 if (!smu_table->power_play_table && !smu_table->hardcode_pptable) 472 return -EINVAL; 473 474 if (smu_table->hardcode_pptable) 475 *table = smu_table->hardcode_pptable; 476 else 477 *table = smu_table->power_play_table; 478 479 return smu_table->power_play_table_size; 480 } 481 482 static int smu_sys_set_pp_table(void *handle, 483 const char *buf, 484 size_t size) 485 { 486 struct smu_context *smu = handle; 487 struct smu_table_context *smu_table = &smu->smu_table; 488 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; 489 int ret = 0; 490 491 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 492 return -EOPNOTSUPP; 493 494 if (header->usStructureSize != size) { 495 dev_err(smu->adev->dev, "pp table size not matched !\n"); 496 return -EIO; 497 } 498 499 if (!smu_table->hardcode_pptable) { 500 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); 501 if (!smu_table->hardcode_pptable) 502 return -ENOMEM; 503 } 504 505 memcpy(smu_table->hardcode_pptable, buf, size); 506 smu_table->power_play_table = smu_table->hardcode_pptable; 507 smu_table->power_play_table_size = size; 508 509 /* 510 * Special hw_fini action(for Navi1x, the DPMs disablement will be 511 * skipped) may be needed for custom pptable uploading. 512 */ 513 smu->uploading_custom_pp_table = true; 514 515 ret = smu_reset(smu); 516 if (ret) 517 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); 518 519 smu->uploading_custom_pp_table = false; 520 521 return ret; 522 } 523 524 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) 525 { 526 struct smu_feature *feature = &smu->smu_feature; 527 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; 528 int ret = 0; 529 530 /* 531 * With SCPM enabled, the allowed featuremasks setting(via 532 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted. 533 * That means there is no way to let PMFW knows the settings below. 534 * Thus, we just assume all the features are allowed under 535 * such scenario. 536 */ 537 if (smu->adev->scpm_enabled) { 538 bitmap_fill(feature->allowed, SMU_FEATURE_MAX); 539 return 0; 540 } 541 542 bitmap_zero(feature->allowed, SMU_FEATURE_MAX); 543 544 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, 545 SMU_FEATURE_MAX/32); 546 if (ret) 547 return ret; 548 549 bitmap_or(feature->allowed, feature->allowed, 550 (unsigned long *)allowed_feature_mask, 551 feature->feature_num); 552 553 return ret; 554 } 555 556 static int smu_set_funcs(struct amdgpu_device *adev) 557 { 558 struct smu_context *smu = adev->powerplay.pp_handle; 559 560 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 561 smu->od_enabled = true; 562 563 switch (adev->ip_versions[MP1_HWIP][0]) { 564 case IP_VERSION(11, 0, 0): 565 case IP_VERSION(11, 0, 5): 566 case IP_VERSION(11, 0, 9): 567 navi10_set_ppt_funcs(smu); 568 break; 569 case IP_VERSION(11, 0, 7): 570 case IP_VERSION(11, 0, 11): 571 case IP_VERSION(11, 0, 12): 572 case IP_VERSION(11, 0, 13): 573 sienna_cichlid_set_ppt_funcs(smu); 574 break; 575 case IP_VERSION(12, 0, 0): 576 case IP_VERSION(12, 0, 1): 577 renoir_set_ppt_funcs(smu); 578 break; 579 case IP_VERSION(11, 5, 0): 580 vangogh_set_ppt_funcs(smu); 581 break; 582 case IP_VERSION(13, 0, 1): 583 case IP_VERSION(13, 0, 3): 584 case IP_VERSION(13, 0, 8): 585 yellow_carp_set_ppt_funcs(smu); 586 break; 587 case IP_VERSION(13, 0, 4): 588 case IP_VERSION(13, 0, 11): 589 smu_v13_0_4_set_ppt_funcs(smu); 590 break; 591 case IP_VERSION(13, 0, 5): 592 smu_v13_0_5_set_ppt_funcs(smu); 593 break; 594 case IP_VERSION(11, 0, 8): 595 cyan_skillfish_set_ppt_funcs(smu); 596 break; 597 case IP_VERSION(11, 0, 2): 598 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 599 arcturus_set_ppt_funcs(smu); 600 /* OD is not supported on Arcturus */ 601 smu->od_enabled =false; 602 break; 603 case IP_VERSION(13, 0, 2): 604 aldebaran_set_ppt_funcs(smu); 605 /* Enable pp_od_clk_voltage node */ 606 smu->od_enabled = true; 607 break; 608 case IP_VERSION(13, 0, 0): 609 case IP_VERSION(13, 0, 10): 610 smu_v13_0_0_set_ppt_funcs(smu); 611 break; 612 case IP_VERSION(13, 0, 7): 613 smu_v13_0_7_set_ppt_funcs(smu); 614 break; 615 default: 616 return -EINVAL; 617 } 618 619 return 0; 620 } 621 622 static int smu_early_init(void *handle) 623 { 624 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 625 struct smu_context *smu; 626 int r; 627 628 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL); 629 if (!smu) 630 return -ENOMEM; 631 632 smu->adev = adev; 633 smu->pm_enabled = !!amdgpu_dpm; 634 smu->is_apu = false; 635 smu->smu_baco.state = SMU_BACO_STATE_EXIT; 636 smu->smu_baco.platform_support = false; 637 smu->user_dpm_profile.fan_mode = -1; 638 639 mutex_init(&smu->message_lock); 640 641 adev->powerplay.pp_handle = smu; 642 adev->powerplay.pp_funcs = &swsmu_pm_funcs; 643 644 r = smu_set_funcs(adev); 645 if (r) 646 return r; 647 return smu_init_microcode(smu); 648 } 649 650 static int smu_set_default_dpm_table(struct smu_context *smu) 651 { 652 struct smu_power_context *smu_power = &smu->smu_power; 653 struct smu_power_gate *power_gate = &smu_power->power_gate; 654 int vcn_gate, jpeg_gate; 655 int ret = 0; 656 657 if (!smu->ppt_funcs->set_default_dpm_table) 658 return 0; 659 660 vcn_gate = atomic_read(&power_gate->vcn_gated); 661 jpeg_gate = atomic_read(&power_gate->jpeg_gated); 662 663 ret = smu_dpm_set_vcn_enable(smu, true); 664 if (ret) 665 return ret; 666 667 ret = smu_dpm_set_jpeg_enable(smu, true); 668 if (ret) 669 goto err_out; 670 671 ret = smu->ppt_funcs->set_default_dpm_table(smu); 672 if (ret) 673 dev_err(smu->adev->dev, 674 "Failed to setup default dpm clock tables!\n"); 675 676 smu_dpm_set_jpeg_enable(smu, !jpeg_gate); 677 err_out: 678 smu_dpm_set_vcn_enable(smu, !vcn_gate); 679 return ret; 680 } 681 682 static int smu_apply_default_config_table_settings(struct smu_context *smu) 683 { 684 struct amdgpu_device *adev = smu->adev; 685 int ret = 0; 686 687 ret = smu_get_default_config_table_settings(smu, 688 &adev->pm.config_table); 689 if (ret) 690 return ret; 691 692 return smu_set_config_table(smu, &adev->pm.config_table); 693 } 694 695 static int smu_late_init(void *handle) 696 { 697 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 698 struct smu_context *smu = adev->powerplay.pp_handle; 699 int ret = 0; 700 701 smu_set_fine_grain_gfx_freq_parameters(smu); 702 703 if (!smu->pm_enabled) 704 return 0; 705 706 ret = smu_post_init(smu); 707 if (ret) { 708 dev_err(adev->dev, "Failed to post smu init!\n"); 709 return ret; 710 } 711 712 if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) || 713 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3))) 714 return 0; 715 716 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { 717 ret = smu_set_default_od_settings(smu); 718 if (ret) { 719 dev_err(adev->dev, "Failed to setup default OD settings!\n"); 720 return ret; 721 } 722 } 723 724 ret = smu_populate_umd_state_clk(smu); 725 if (ret) { 726 dev_err(adev->dev, "Failed to populate UMD state clocks!\n"); 727 return ret; 728 } 729 730 ret = smu_get_asic_power_limits(smu, 731 &smu->current_power_limit, 732 &smu->default_power_limit, 733 &smu->max_power_limit); 734 if (ret) { 735 dev_err(adev->dev, "Failed to get asic power limits!\n"); 736 return ret; 737 } 738 739 if (!amdgpu_sriov_vf(adev)) 740 smu_get_unique_id(smu); 741 742 smu_get_fan_parameters(smu); 743 744 smu_handle_task(smu, 745 smu->smu_dpm.dpm_level, 746 AMD_PP_TASK_COMPLETE_INIT); 747 748 ret = smu_apply_default_config_table_settings(smu); 749 if (ret && (ret != -EOPNOTSUPP)) { 750 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n"); 751 return ret; 752 } 753 754 smu_restore_dpm_user_profile(smu); 755 756 return 0; 757 } 758 759 static int smu_init_fb_allocations(struct smu_context *smu) 760 { 761 struct amdgpu_device *adev = smu->adev; 762 struct smu_table_context *smu_table = &smu->smu_table; 763 struct smu_table *tables = smu_table->tables; 764 struct smu_table *driver_table = &(smu_table->driver_table); 765 uint32_t max_table_size = 0; 766 int ret, i; 767 768 /* VRAM allocation for tool table */ 769 if (tables[SMU_TABLE_PMSTATUSLOG].size) { 770 ret = amdgpu_bo_create_kernel(adev, 771 tables[SMU_TABLE_PMSTATUSLOG].size, 772 tables[SMU_TABLE_PMSTATUSLOG].align, 773 tables[SMU_TABLE_PMSTATUSLOG].domain, 774 &tables[SMU_TABLE_PMSTATUSLOG].bo, 775 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 776 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 777 if (ret) { 778 dev_err(adev->dev, "VRAM allocation for tool table failed!\n"); 779 return ret; 780 } 781 } 782 783 /* VRAM allocation for driver table */ 784 for (i = 0; i < SMU_TABLE_COUNT; i++) { 785 if (tables[i].size == 0) 786 continue; 787 788 if (i == SMU_TABLE_PMSTATUSLOG) 789 continue; 790 791 if (max_table_size < tables[i].size) 792 max_table_size = tables[i].size; 793 } 794 795 driver_table->size = max_table_size; 796 driver_table->align = PAGE_SIZE; 797 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 798 799 ret = amdgpu_bo_create_kernel(adev, 800 driver_table->size, 801 driver_table->align, 802 driver_table->domain, 803 &driver_table->bo, 804 &driver_table->mc_address, 805 &driver_table->cpu_addr); 806 if (ret) { 807 dev_err(adev->dev, "VRAM allocation for driver table failed!\n"); 808 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 809 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 810 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 811 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 812 } 813 814 return ret; 815 } 816 817 static int smu_fini_fb_allocations(struct smu_context *smu) 818 { 819 struct smu_table_context *smu_table = &smu->smu_table; 820 struct smu_table *tables = smu_table->tables; 821 struct smu_table *driver_table = &(smu_table->driver_table); 822 823 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 824 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 825 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 826 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 827 828 amdgpu_bo_free_kernel(&driver_table->bo, 829 &driver_table->mc_address, 830 &driver_table->cpu_addr); 831 832 return 0; 833 } 834 835 /** 836 * smu_alloc_memory_pool - allocate memory pool in the system memory 837 * 838 * @smu: amdgpu_device pointer 839 * 840 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr 841 * and DramLogSetDramAddr can notify it changed. 842 * 843 * Returns 0 on success, error on failure. 844 */ 845 static int smu_alloc_memory_pool(struct smu_context *smu) 846 { 847 struct amdgpu_device *adev = smu->adev; 848 struct smu_table_context *smu_table = &smu->smu_table; 849 struct smu_table *memory_pool = &smu_table->memory_pool; 850 uint64_t pool_size = smu->pool_size; 851 int ret = 0; 852 853 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) 854 return ret; 855 856 memory_pool->size = pool_size; 857 memory_pool->align = PAGE_SIZE; 858 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; 859 860 switch (pool_size) { 861 case SMU_MEMORY_POOL_SIZE_256_MB: 862 case SMU_MEMORY_POOL_SIZE_512_MB: 863 case SMU_MEMORY_POOL_SIZE_1_GB: 864 case SMU_MEMORY_POOL_SIZE_2_GB: 865 ret = amdgpu_bo_create_kernel(adev, 866 memory_pool->size, 867 memory_pool->align, 868 memory_pool->domain, 869 &memory_pool->bo, 870 &memory_pool->mc_address, 871 &memory_pool->cpu_addr); 872 if (ret) 873 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n"); 874 break; 875 default: 876 break; 877 } 878 879 return ret; 880 } 881 882 static int smu_free_memory_pool(struct smu_context *smu) 883 { 884 struct smu_table_context *smu_table = &smu->smu_table; 885 struct smu_table *memory_pool = &smu_table->memory_pool; 886 887 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) 888 return 0; 889 890 amdgpu_bo_free_kernel(&memory_pool->bo, 891 &memory_pool->mc_address, 892 &memory_pool->cpu_addr); 893 894 memset(memory_pool, 0, sizeof(struct smu_table)); 895 896 return 0; 897 } 898 899 static int smu_alloc_dummy_read_table(struct smu_context *smu) 900 { 901 struct smu_table_context *smu_table = &smu->smu_table; 902 struct smu_table *dummy_read_1_table = 903 &smu_table->dummy_read_1_table; 904 struct amdgpu_device *adev = smu->adev; 905 int ret = 0; 906 907 dummy_read_1_table->size = 0x40000; 908 dummy_read_1_table->align = PAGE_SIZE; 909 dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 910 911 ret = amdgpu_bo_create_kernel(adev, 912 dummy_read_1_table->size, 913 dummy_read_1_table->align, 914 dummy_read_1_table->domain, 915 &dummy_read_1_table->bo, 916 &dummy_read_1_table->mc_address, 917 &dummy_read_1_table->cpu_addr); 918 if (ret) 919 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n"); 920 921 return ret; 922 } 923 924 static void smu_free_dummy_read_table(struct smu_context *smu) 925 { 926 struct smu_table_context *smu_table = &smu->smu_table; 927 struct smu_table *dummy_read_1_table = 928 &smu_table->dummy_read_1_table; 929 930 931 amdgpu_bo_free_kernel(&dummy_read_1_table->bo, 932 &dummy_read_1_table->mc_address, 933 &dummy_read_1_table->cpu_addr); 934 935 memset(dummy_read_1_table, 0, sizeof(struct smu_table)); 936 } 937 938 static int smu_smc_table_sw_init(struct smu_context *smu) 939 { 940 int ret; 941 942 /** 943 * Create smu_table structure, and init smc tables such as 944 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. 945 */ 946 ret = smu_init_smc_tables(smu); 947 if (ret) { 948 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); 949 return ret; 950 } 951 952 /** 953 * Create smu_power_context structure, and allocate smu_dpm_context and 954 * context size to fill the smu_power_context data. 955 */ 956 ret = smu_init_power(smu); 957 if (ret) { 958 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); 959 return ret; 960 } 961 962 /* 963 * allocate vram bos to store smc table contents. 964 */ 965 ret = smu_init_fb_allocations(smu); 966 if (ret) 967 return ret; 968 969 ret = smu_alloc_memory_pool(smu); 970 if (ret) 971 return ret; 972 973 ret = smu_alloc_dummy_read_table(smu); 974 if (ret) 975 return ret; 976 977 ret = smu_i2c_init(smu); 978 if (ret) 979 return ret; 980 981 return 0; 982 } 983 984 static int smu_smc_table_sw_fini(struct smu_context *smu) 985 { 986 int ret; 987 988 smu_i2c_fini(smu); 989 990 smu_free_dummy_read_table(smu); 991 992 ret = smu_free_memory_pool(smu); 993 if (ret) 994 return ret; 995 996 ret = smu_fini_fb_allocations(smu); 997 if (ret) 998 return ret; 999 1000 ret = smu_fini_power(smu); 1001 if (ret) { 1002 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); 1003 return ret; 1004 } 1005 1006 ret = smu_fini_smc_tables(smu); 1007 if (ret) { 1008 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); 1009 return ret; 1010 } 1011 1012 return 0; 1013 } 1014 1015 static void smu_throttling_logging_work_fn(struct work_struct *work) 1016 { 1017 struct smu_context *smu = container_of(work, struct smu_context, 1018 throttling_logging_work); 1019 1020 smu_log_thermal_throttling(smu); 1021 } 1022 1023 static void smu_interrupt_work_fn(struct work_struct *work) 1024 { 1025 struct smu_context *smu = container_of(work, struct smu_context, 1026 interrupt_work); 1027 1028 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) 1029 smu->ppt_funcs->interrupt_work(smu); 1030 } 1031 1032 static int smu_sw_init(void *handle) 1033 { 1034 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1035 struct smu_context *smu = adev->powerplay.pp_handle; 1036 int ret; 1037 1038 smu->pool_size = adev->pm.smu_prv_buffer_size; 1039 smu->smu_feature.feature_num = SMU_FEATURE_MAX; 1040 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); 1041 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); 1042 1043 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); 1044 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); 1045 atomic64_set(&smu->throttle_int_counter, 0); 1046 smu->watermarks_bitmap = 0; 1047 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1048 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1049 1050 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); 1051 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); 1052 1053 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 1054 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; 1055 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; 1056 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; 1057 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; 1058 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; 1059 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; 1060 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; 1061 1062 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1063 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 1064 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; 1065 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; 1066 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; 1067 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; 1068 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; 1069 smu->display_config = &adev->pm.pm_display_cfg; 1070 1071 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1072 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1073 1074 ret = smu_smc_table_sw_init(smu); 1075 if (ret) { 1076 dev_err(adev->dev, "Failed to sw init smc table!\n"); 1077 return ret; 1078 } 1079 1080 /* get boot_values from vbios to set revision, gfxclk, and etc. */ 1081 ret = smu_get_vbios_bootup_values(smu); 1082 if (ret) { 1083 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n"); 1084 return ret; 1085 } 1086 1087 ret = smu_init_pptable_microcode(smu); 1088 if (ret) { 1089 dev_err(adev->dev, "Failed to setup pptable firmware!\n"); 1090 return ret; 1091 } 1092 1093 ret = smu_register_irq_handler(smu); 1094 if (ret) { 1095 dev_err(adev->dev, "Failed to register smc irq handler!\n"); 1096 return ret; 1097 } 1098 1099 /* If there is no way to query fan control mode, fan control is not supported */ 1100 if (!smu->ppt_funcs->get_fan_control_mode) 1101 smu->adev->pm.no_fan = true; 1102 1103 return 0; 1104 } 1105 1106 static int smu_sw_fini(void *handle) 1107 { 1108 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1109 struct smu_context *smu = adev->powerplay.pp_handle; 1110 int ret; 1111 1112 ret = smu_smc_table_sw_fini(smu); 1113 if (ret) { 1114 dev_err(adev->dev, "Failed to sw fini smc table!\n"); 1115 return ret; 1116 } 1117 1118 smu_fini_microcode(smu); 1119 1120 return 0; 1121 } 1122 1123 static int smu_get_thermal_temperature_range(struct smu_context *smu) 1124 { 1125 struct amdgpu_device *adev = smu->adev; 1126 struct smu_temperature_range *range = 1127 &smu->thermal_range; 1128 int ret = 0; 1129 1130 if (!smu->ppt_funcs->get_thermal_temperature_range) 1131 return 0; 1132 1133 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); 1134 if (ret) 1135 return ret; 1136 1137 adev->pm.dpm.thermal.min_temp = range->min; 1138 adev->pm.dpm.thermal.max_temp = range->max; 1139 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max; 1140 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min; 1141 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max; 1142 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max; 1143 adev->pm.dpm.thermal.min_mem_temp = range->mem_min; 1144 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max; 1145 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max; 1146 1147 return ret; 1148 } 1149 1150 static int smu_smc_hw_setup(struct smu_context *smu) 1151 { 1152 struct smu_feature *feature = &smu->smu_feature; 1153 struct amdgpu_device *adev = smu->adev; 1154 uint32_t pcie_gen = 0, pcie_width = 0; 1155 uint64_t features_supported; 1156 int ret = 0; 1157 1158 switch (adev->ip_versions[MP1_HWIP][0]) { 1159 case IP_VERSION(11, 0, 7): 1160 case IP_VERSION(11, 0, 11): 1161 case IP_VERSION(11, 5, 0): 1162 case IP_VERSION(11, 0, 12): 1163 if (adev->in_suspend && smu_is_dpm_running(smu)) { 1164 dev_info(adev->dev, "dpm has been enabled\n"); 1165 ret = smu_system_features_control(smu, true); 1166 if (ret) 1167 dev_err(adev->dev, "Failed system features control!\n"); 1168 return ret; 1169 } 1170 break; 1171 default: 1172 break; 1173 } 1174 1175 ret = smu_init_display_count(smu, 0); 1176 if (ret) { 1177 dev_info(adev->dev, "Failed to pre-set display count as 0!\n"); 1178 return ret; 1179 } 1180 1181 ret = smu_set_driver_table_location(smu); 1182 if (ret) { 1183 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n"); 1184 return ret; 1185 } 1186 1187 /* 1188 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 1189 */ 1190 ret = smu_set_tool_table_location(smu); 1191 if (ret) { 1192 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n"); 1193 return ret; 1194 } 1195 1196 /* 1197 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify 1198 * pool location. 1199 */ 1200 ret = smu_notify_memory_pool_location(smu); 1201 if (ret) { 1202 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n"); 1203 return ret; 1204 } 1205 1206 ret = smu_setup_pptable(smu); 1207 if (ret) { 1208 dev_err(adev->dev, "Failed to setup pptable!\n"); 1209 return ret; 1210 } 1211 1212 /* smu_dump_pptable(smu); */ 1213 1214 /* 1215 * With SCPM enabled, PSP is responsible for the PPTable transferring 1216 * (to SMU). Driver involvement is not needed and permitted. 1217 */ 1218 if (!adev->scpm_enabled) { 1219 /* 1220 * Copy pptable bo in the vram to smc with SMU MSGs such as 1221 * SetDriverDramAddr and TransferTableDram2Smu. 1222 */ 1223 ret = smu_write_pptable(smu); 1224 if (ret) { 1225 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n"); 1226 return ret; 1227 } 1228 } 1229 1230 /* issue Run*Btc msg */ 1231 ret = smu_run_btc(smu); 1232 if (ret) 1233 return ret; 1234 1235 /* 1236 * With SCPM enabled, these actions(and relevant messages) are 1237 * not needed and permitted. 1238 */ 1239 if (!adev->scpm_enabled) { 1240 ret = smu_feature_set_allowed_mask(smu); 1241 if (ret) { 1242 dev_err(adev->dev, "Failed to set driver allowed features mask!\n"); 1243 return ret; 1244 } 1245 } 1246 1247 ret = smu_system_features_control(smu, true); 1248 if (ret) { 1249 dev_err(adev->dev, "Failed to enable requested dpm features!\n"); 1250 return ret; 1251 } 1252 1253 ret = smu_feature_get_enabled_mask(smu, &features_supported); 1254 if (ret) { 1255 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); 1256 return ret; 1257 } 1258 bitmap_copy(feature->supported, 1259 (unsigned long *)&features_supported, 1260 feature->feature_num); 1261 1262 if (!smu_is_dpm_running(smu)) 1263 dev_info(adev->dev, "dpm has been disabled\n"); 1264 1265 /* 1266 * Set initialized values (get from vbios) to dpm tables context such as 1267 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each 1268 * type of clks. 1269 */ 1270 ret = smu_set_default_dpm_table(smu); 1271 if (ret) { 1272 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); 1273 return ret; 1274 } 1275 1276 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 1277 pcie_gen = 3; 1278 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 1279 pcie_gen = 2; 1280 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 1281 pcie_gen = 1; 1282 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 1283 pcie_gen = 0; 1284 1285 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 1286 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 1287 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 1288 */ 1289 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 1290 pcie_width = 6; 1291 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 1292 pcie_width = 5; 1293 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 1294 pcie_width = 4; 1295 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 1296 pcie_width = 3; 1297 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 1298 pcie_width = 2; 1299 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 1300 pcie_width = 1; 1301 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); 1302 if (ret) { 1303 dev_err(adev->dev, "Attempt to override pcie params failed!\n"); 1304 return ret; 1305 } 1306 1307 ret = smu_get_thermal_temperature_range(smu); 1308 if (ret) { 1309 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); 1310 return ret; 1311 } 1312 1313 ret = smu_enable_thermal_alert(smu); 1314 if (ret) { 1315 dev_err(adev->dev, "Failed to enable thermal alert!\n"); 1316 return ret; 1317 } 1318 1319 ret = smu_notify_display_change(smu); 1320 if (ret) { 1321 dev_err(adev->dev, "Failed to notify display change!\n"); 1322 return ret; 1323 } 1324 1325 /* 1326 * Set min deep sleep dce fclk with bootup value from vbios via 1327 * SetMinDeepSleepDcefclk MSG. 1328 */ 1329 ret = smu_set_min_dcef_deep_sleep(smu, 1330 smu->smu_table.boot_values.dcefclk / 100); 1331 1332 return ret; 1333 } 1334 1335 static int smu_start_smc_engine(struct smu_context *smu) 1336 { 1337 struct amdgpu_device *adev = smu->adev; 1338 int ret = 0; 1339 1340 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1341 if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) { 1342 if (smu->ppt_funcs->load_microcode) { 1343 ret = smu->ppt_funcs->load_microcode(smu); 1344 if (ret) 1345 return ret; 1346 } 1347 } 1348 } 1349 1350 if (smu->ppt_funcs->check_fw_status) { 1351 ret = smu->ppt_funcs->check_fw_status(smu); 1352 if (ret) { 1353 dev_err(adev->dev, "SMC is not ready\n"); 1354 return ret; 1355 } 1356 } 1357 1358 /* 1359 * Send msg GetDriverIfVersion to check if the return value is equal 1360 * with DRIVER_IF_VERSION of smc header. 1361 */ 1362 ret = smu_check_fw_version(smu); 1363 if (ret) 1364 return ret; 1365 1366 return ret; 1367 } 1368 1369 static int smu_hw_init(void *handle) 1370 { 1371 int ret; 1372 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1373 struct smu_context *smu = adev->powerplay.pp_handle; 1374 1375 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { 1376 smu->pm_enabled = false; 1377 return 0; 1378 } 1379 1380 ret = smu_start_smc_engine(smu); 1381 if (ret) { 1382 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1383 return ret; 1384 } 1385 1386 if (smu->is_apu) { 1387 if ((smu->ppt_funcs->set_gfx_power_up_by_imu) && 1388 likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 1389 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu); 1390 if (ret) { 1391 dev_err(adev->dev, "Failed to Enable gfx imu!\n"); 1392 return ret; 1393 } 1394 } 1395 1396 smu_dpm_set_vcn_enable(smu, true); 1397 smu_dpm_set_jpeg_enable(smu, true); 1398 smu_set_gfx_cgpg(smu, true); 1399 } 1400 1401 if (!smu->pm_enabled) 1402 return 0; 1403 1404 ret = smu_get_driver_allowed_feature_mask(smu); 1405 if (ret) 1406 return ret; 1407 1408 ret = smu_smc_hw_setup(smu); 1409 if (ret) { 1410 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1411 return ret; 1412 } 1413 1414 /* 1415 * Move maximum sustainable clock retrieving here considering 1416 * 1. It is not needed on resume(from S3). 1417 * 2. DAL settings come between .hw_init and .late_init of SMU. 1418 * And DAL needs to know the maximum sustainable clocks. Thus 1419 * it cannot be put in .late_init(). 1420 */ 1421 ret = smu_init_max_sustainable_clocks(smu); 1422 if (ret) { 1423 dev_err(adev->dev, "Failed to init max sustainable clocks!\n"); 1424 return ret; 1425 } 1426 1427 adev->pm.dpm_enabled = true; 1428 1429 dev_info(adev->dev, "SMU is initialized successfully!\n"); 1430 1431 return 0; 1432 } 1433 1434 static int smu_disable_dpms(struct smu_context *smu) 1435 { 1436 struct amdgpu_device *adev = smu->adev; 1437 int ret = 0; 1438 bool use_baco = !smu->is_apu && 1439 ((amdgpu_in_reset(adev) && 1440 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 1441 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); 1442 1443 /* 1444 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others) 1445 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues. 1446 */ 1447 switch (adev->ip_versions[MP1_HWIP][0]) { 1448 case IP_VERSION(13, 0, 0): 1449 case IP_VERSION(13, 0, 7): 1450 case IP_VERSION(13, 0, 10): 1451 return 0; 1452 default: 1453 break; 1454 } 1455 1456 /* 1457 * For custom pptable uploading, skip the DPM features 1458 * disable process on Navi1x ASICs. 1459 * - As the gfx related features are under control of 1460 * RLC on those ASICs. RLC reinitialization will be 1461 * needed to reenable them. That will cost much more 1462 * efforts. 1463 * 1464 * - SMU firmware can handle the DPM reenablement 1465 * properly. 1466 */ 1467 if (smu->uploading_custom_pp_table) { 1468 switch (adev->ip_versions[MP1_HWIP][0]) { 1469 case IP_VERSION(11, 0, 0): 1470 case IP_VERSION(11, 0, 5): 1471 case IP_VERSION(11, 0, 9): 1472 case IP_VERSION(11, 0, 7): 1473 case IP_VERSION(11, 0, 11): 1474 case IP_VERSION(11, 5, 0): 1475 case IP_VERSION(11, 0, 12): 1476 case IP_VERSION(11, 0, 13): 1477 return 0; 1478 default: 1479 break; 1480 } 1481 } 1482 1483 /* 1484 * For Sienna_Cichlid, PMFW will handle the features disablement properly 1485 * on BACO in. Driver involvement is unnecessary. 1486 */ 1487 if (use_baco) { 1488 switch (adev->ip_versions[MP1_HWIP][0]) { 1489 case IP_VERSION(11, 0, 7): 1490 case IP_VERSION(11, 0, 0): 1491 case IP_VERSION(11, 0, 5): 1492 case IP_VERSION(11, 0, 9): 1493 case IP_VERSION(13, 0, 7): 1494 return 0; 1495 default: 1496 break; 1497 } 1498 } 1499 1500 /* 1501 * For gpu reset, runpm and hibernation through BACO, 1502 * BACO feature has to be kept enabled. 1503 */ 1504 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { 1505 ret = smu_disable_all_features_with_exception(smu, 1506 SMU_FEATURE_BACO_BIT); 1507 if (ret) 1508 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); 1509 } else { 1510 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */ 1511 if (!adev->scpm_enabled) { 1512 ret = smu_system_features_control(smu, false); 1513 if (ret) 1514 dev_err(adev->dev, "Failed to disable smu features.\n"); 1515 } 1516 } 1517 1518 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) && 1519 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop) 1520 adev->gfx.rlc.funcs->stop(adev); 1521 1522 return ret; 1523 } 1524 1525 static int smu_smc_hw_cleanup(struct smu_context *smu) 1526 { 1527 struct amdgpu_device *adev = smu->adev; 1528 int ret = 0; 1529 1530 cancel_work_sync(&smu->throttling_logging_work); 1531 cancel_work_sync(&smu->interrupt_work); 1532 1533 ret = smu_disable_thermal_alert(smu); 1534 if (ret) { 1535 dev_err(adev->dev, "Fail to disable thermal alert!\n"); 1536 return ret; 1537 } 1538 1539 ret = smu_disable_dpms(smu); 1540 if (ret) { 1541 dev_err(adev->dev, "Fail to disable dpm features!\n"); 1542 return ret; 1543 } 1544 1545 return 0; 1546 } 1547 1548 static int smu_hw_fini(void *handle) 1549 { 1550 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1551 struct smu_context *smu = adev->powerplay.pp_handle; 1552 1553 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1554 return 0; 1555 1556 smu_dpm_set_vcn_enable(smu, false); 1557 smu_dpm_set_jpeg_enable(smu, false); 1558 1559 adev->vcn.cur_state = AMD_PG_STATE_GATE; 1560 adev->jpeg.cur_state = AMD_PG_STATE_GATE; 1561 1562 if (!smu->pm_enabled) 1563 return 0; 1564 1565 adev->pm.dpm_enabled = false; 1566 1567 return smu_smc_hw_cleanup(smu); 1568 } 1569 1570 static void smu_late_fini(void *handle) 1571 { 1572 struct amdgpu_device *adev = handle; 1573 struct smu_context *smu = adev->powerplay.pp_handle; 1574 1575 kfree(smu); 1576 } 1577 1578 static int smu_reset(struct smu_context *smu) 1579 { 1580 struct amdgpu_device *adev = smu->adev; 1581 int ret; 1582 1583 ret = smu_hw_fini(adev); 1584 if (ret) 1585 return ret; 1586 1587 ret = smu_hw_init(adev); 1588 if (ret) 1589 return ret; 1590 1591 ret = smu_late_init(adev); 1592 if (ret) 1593 return ret; 1594 1595 return 0; 1596 } 1597 1598 static int smu_suspend(void *handle) 1599 { 1600 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1601 struct smu_context *smu = adev->powerplay.pp_handle; 1602 int ret; 1603 uint64_t count; 1604 1605 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1606 return 0; 1607 1608 if (!smu->pm_enabled) 1609 return 0; 1610 1611 adev->pm.dpm_enabled = false; 1612 1613 ret = smu_smc_hw_cleanup(smu); 1614 if (ret) 1615 return ret; 1616 1617 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 1618 1619 smu_set_gfx_cgpg(smu, false); 1620 1621 /* 1622 * pwfw resets entrycount when device is suspended, so we save the 1623 * last value to be used when we resume to keep it consistent 1624 */ 1625 ret = smu_get_entrycount_gfxoff(smu, &count); 1626 if (!ret) 1627 adev->gfx.gfx_off_entrycount = count; 1628 1629 return 0; 1630 } 1631 1632 static int smu_resume(void *handle) 1633 { 1634 int ret; 1635 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1636 struct smu_context *smu = adev->powerplay.pp_handle; 1637 1638 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1639 return 0; 1640 1641 if (!smu->pm_enabled) 1642 return 0; 1643 1644 dev_info(adev->dev, "SMU is resuming...\n"); 1645 1646 ret = smu_start_smc_engine(smu); 1647 if (ret) { 1648 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1649 return ret; 1650 } 1651 1652 ret = smu_smc_hw_setup(smu); 1653 if (ret) { 1654 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1655 return ret; 1656 } 1657 1658 smu_set_gfx_cgpg(smu, true); 1659 1660 smu->disable_uclk_switch = 0; 1661 1662 adev->pm.dpm_enabled = true; 1663 1664 dev_info(adev->dev, "SMU is resumed successfully!\n"); 1665 1666 return 0; 1667 } 1668 1669 static int smu_display_configuration_change(void *handle, 1670 const struct amd_pp_display_configuration *display_config) 1671 { 1672 struct smu_context *smu = handle; 1673 int index = 0; 1674 int num_of_active_display = 0; 1675 1676 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1677 return -EOPNOTSUPP; 1678 1679 if (!display_config) 1680 return -EINVAL; 1681 1682 smu_set_min_dcef_deep_sleep(smu, 1683 display_config->min_dcef_deep_sleep_set_clk / 100); 1684 1685 for (index = 0; index < display_config->num_path_including_non_display; index++) { 1686 if (display_config->displays[index].controller_id != 0) 1687 num_of_active_display++; 1688 } 1689 1690 return 0; 1691 } 1692 1693 static int smu_set_clockgating_state(void *handle, 1694 enum amd_clockgating_state state) 1695 { 1696 return 0; 1697 } 1698 1699 static int smu_set_powergating_state(void *handle, 1700 enum amd_powergating_state state) 1701 { 1702 return 0; 1703 } 1704 1705 static int smu_enable_umd_pstate(void *handle, 1706 enum amd_dpm_forced_level *level) 1707 { 1708 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1709 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1710 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1711 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1712 1713 struct smu_context *smu = (struct smu_context*)(handle); 1714 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1715 1716 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1717 return -EINVAL; 1718 1719 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { 1720 /* enter umd pstate, save current level, disable gfx cg*/ 1721 if (*level & profile_mode_mask) { 1722 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; 1723 smu_gpo_control(smu, false); 1724 smu_gfx_ulv_control(smu, false); 1725 smu_deep_sleep_control(smu, false); 1726 amdgpu_asic_update_umd_stable_pstate(smu->adev, true); 1727 } 1728 } else { 1729 /* exit umd pstate, restore level, enable gfx cg*/ 1730 if (!(*level & profile_mode_mask)) { 1731 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 1732 *level = smu_dpm_ctx->saved_dpm_level; 1733 amdgpu_asic_update_umd_stable_pstate(smu->adev, false); 1734 smu_deep_sleep_control(smu, true); 1735 smu_gfx_ulv_control(smu, true); 1736 smu_gpo_control(smu, true); 1737 } 1738 } 1739 1740 return 0; 1741 } 1742 1743 static int smu_bump_power_profile_mode(struct smu_context *smu, 1744 long *param, 1745 uint32_t param_size) 1746 { 1747 int ret = 0; 1748 1749 if (smu->ppt_funcs->set_power_profile_mode) 1750 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); 1751 1752 return ret; 1753 } 1754 1755 static int smu_adjust_power_state_dynamic(struct smu_context *smu, 1756 enum amd_dpm_forced_level level, 1757 bool skip_display_settings) 1758 { 1759 int ret = 0; 1760 int index = 0; 1761 long workload; 1762 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1763 1764 if (!skip_display_settings) { 1765 ret = smu_display_config_changed(smu); 1766 if (ret) { 1767 dev_err(smu->adev->dev, "Failed to change display config!"); 1768 return ret; 1769 } 1770 } 1771 1772 ret = smu_apply_clocks_adjust_rules(smu); 1773 if (ret) { 1774 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); 1775 return ret; 1776 } 1777 1778 if (!skip_display_settings) { 1779 ret = smu_notify_smc_display_config(smu); 1780 if (ret) { 1781 dev_err(smu->adev->dev, "Failed to notify smc display config!"); 1782 return ret; 1783 } 1784 } 1785 1786 if (smu_dpm_ctx->dpm_level != level) { 1787 ret = smu_asic_set_performance_level(smu, level); 1788 if (ret) { 1789 dev_err(smu->adev->dev, "Failed to set performance level!"); 1790 return ret; 1791 } 1792 1793 /* update the saved copy */ 1794 smu_dpm_ctx->dpm_level = level; 1795 } 1796 1797 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1798 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { 1799 index = fls(smu->workload_mask); 1800 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1801 workload = smu->workload_setting[index]; 1802 1803 if (smu->power_profile_mode != workload) 1804 smu_bump_power_profile_mode(smu, &workload, 0); 1805 } 1806 1807 return ret; 1808 } 1809 1810 static int smu_handle_task(struct smu_context *smu, 1811 enum amd_dpm_forced_level level, 1812 enum amd_pp_task task_id) 1813 { 1814 int ret = 0; 1815 1816 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1817 return -EOPNOTSUPP; 1818 1819 switch (task_id) { 1820 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 1821 ret = smu_pre_display_config_changed(smu); 1822 if (ret) 1823 return ret; 1824 ret = smu_adjust_power_state_dynamic(smu, level, false); 1825 break; 1826 case AMD_PP_TASK_COMPLETE_INIT: 1827 case AMD_PP_TASK_READJUST_POWER_STATE: 1828 ret = smu_adjust_power_state_dynamic(smu, level, true); 1829 break; 1830 default: 1831 break; 1832 } 1833 1834 return ret; 1835 } 1836 1837 static int smu_handle_dpm_task(void *handle, 1838 enum amd_pp_task task_id, 1839 enum amd_pm_state_type *user_state) 1840 { 1841 struct smu_context *smu = handle; 1842 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1843 1844 return smu_handle_task(smu, smu_dpm->dpm_level, task_id); 1845 1846 } 1847 1848 static int smu_switch_power_profile(void *handle, 1849 enum PP_SMC_POWER_PROFILE type, 1850 bool en) 1851 { 1852 struct smu_context *smu = handle; 1853 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1854 long workload; 1855 uint32_t index; 1856 1857 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1858 return -EOPNOTSUPP; 1859 1860 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1861 return -EINVAL; 1862 1863 if (!en) { 1864 smu->workload_mask &= ~(1 << smu->workload_prority[type]); 1865 index = fls(smu->workload_mask); 1866 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1867 workload = smu->workload_setting[index]; 1868 } else { 1869 smu->workload_mask |= (1 << smu->workload_prority[type]); 1870 index = fls(smu->workload_mask); 1871 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1872 workload = smu->workload_setting[index]; 1873 } 1874 1875 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1876 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) 1877 smu_bump_power_profile_mode(smu, &workload, 0); 1878 1879 return 0; 1880 } 1881 1882 static enum amd_dpm_forced_level smu_get_performance_level(void *handle) 1883 { 1884 struct smu_context *smu = handle; 1885 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1886 1887 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1888 return -EOPNOTSUPP; 1889 1890 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1891 return -EINVAL; 1892 1893 return smu_dpm_ctx->dpm_level; 1894 } 1895 1896 static int smu_force_performance_level(void *handle, 1897 enum amd_dpm_forced_level level) 1898 { 1899 struct smu_context *smu = handle; 1900 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1901 int ret = 0; 1902 1903 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1904 return -EOPNOTSUPP; 1905 1906 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1907 return -EINVAL; 1908 1909 ret = smu_enable_umd_pstate(smu, &level); 1910 if (ret) 1911 return ret; 1912 1913 ret = smu_handle_task(smu, level, 1914 AMD_PP_TASK_READJUST_POWER_STATE); 1915 1916 /* reset user dpm clock state */ 1917 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1918 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask)); 1919 smu->user_dpm_profile.clk_dependency = 0; 1920 } 1921 1922 return ret; 1923 } 1924 1925 static int smu_set_display_count(void *handle, uint32_t count) 1926 { 1927 struct smu_context *smu = handle; 1928 1929 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1930 return -EOPNOTSUPP; 1931 1932 return smu_init_display_count(smu, count); 1933 } 1934 1935 static int smu_force_smuclk_levels(struct smu_context *smu, 1936 enum smu_clk_type clk_type, 1937 uint32_t mask) 1938 { 1939 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1940 int ret = 0; 1941 1942 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1943 return -EOPNOTSUPP; 1944 1945 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1946 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); 1947 return -EINVAL; 1948 } 1949 1950 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { 1951 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); 1952 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 1953 smu->user_dpm_profile.clk_mask[clk_type] = mask; 1954 smu_set_user_clk_dependencies(smu, clk_type); 1955 } 1956 } 1957 1958 return ret; 1959 } 1960 1961 static int smu_force_ppclk_levels(void *handle, 1962 enum pp_clock_type type, 1963 uint32_t mask) 1964 { 1965 struct smu_context *smu = handle; 1966 enum smu_clk_type clk_type; 1967 1968 switch (type) { 1969 case PP_SCLK: 1970 clk_type = SMU_SCLK; break; 1971 case PP_MCLK: 1972 clk_type = SMU_MCLK; break; 1973 case PP_PCIE: 1974 clk_type = SMU_PCIE; break; 1975 case PP_SOCCLK: 1976 clk_type = SMU_SOCCLK; break; 1977 case PP_FCLK: 1978 clk_type = SMU_FCLK; break; 1979 case PP_DCEFCLK: 1980 clk_type = SMU_DCEFCLK; break; 1981 case PP_VCLK: 1982 clk_type = SMU_VCLK; break; 1983 case PP_DCLK: 1984 clk_type = SMU_DCLK; break; 1985 case OD_SCLK: 1986 clk_type = SMU_OD_SCLK; break; 1987 case OD_MCLK: 1988 clk_type = SMU_OD_MCLK; break; 1989 case OD_VDDC_CURVE: 1990 clk_type = SMU_OD_VDDC_CURVE; break; 1991 case OD_RANGE: 1992 clk_type = SMU_OD_RANGE; break; 1993 default: 1994 return -EINVAL; 1995 } 1996 1997 return smu_force_smuclk_levels(smu, clk_type, mask); 1998 } 1999 2000 /* 2001 * On system suspending or resetting, the dpm_enabled 2002 * flag will be cleared. So that those SMU services which 2003 * are not supported will be gated. 2004 * However, the mp1 state setting should still be granted 2005 * even if the dpm_enabled cleared. 2006 */ 2007 static int smu_set_mp1_state(void *handle, 2008 enum pp_mp1_state mp1_state) 2009 { 2010 struct smu_context *smu = handle; 2011 int ret = 0; 2012 2013 if (!smu->pm_enabled) 2014 return -EOPNOTSUPP; 2015 2016 if (smu->ppt_funcs && 2017 smu->ppt_funcs->set_mp1_state) 2018 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); 2019 2020 return ret; 2021 } 2022 2023 static int smu_set_df_cstate(void *handle, 2024 enum pp_df_cstate state) 2025 { 2026 struct smu_context *smu = handle; 2027 int ret = 0; 2028 2029 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2030 return -EOPNOTSUPP; 2031 2032 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) 2033 return 0; 2034 2035 ret = smu->ppt_funcs->set_df_cstate(smu, state); 2036 if (ret) 2037 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); 2038 2039 return ret; 2040 } 2041 2042 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en) 2043 { 2044 int ret = 0; 2045 2046 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2047 return -EOPNOTSUPP; 2048 2049 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down) 2050 return 0; 2051 2052 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en); 2053 if (ret) 2054 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n"); 2055 2056 return ret; 2057 } 2058 2059 int smu_write_watermarks_table(struct smu_context *smu) 2060 { 2061 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2062 return -EOPNOTSUPP; 2063 2064 return smu_set_watermarks_table(smu, NULL); 2065 } 2066 2067 static int smu_set_watermarks_for_clock_ranges(void *handle, 2068 struct pp_smu_wm_range_sets *clock_ranges) 2069 { 2070 struct smu_context *smu = handle; 2071 2072 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2073 return -EOPNOTSUPP; 2074 2075 if (smu->disable_watermark) 2076 return 0; 2077 2078 return smu_set_watermarks_table(smu, clock_ranges); 2079 } 2080 2081 int smu_set_ac_dc(struct smu_context *smu) 2082 { 2083 int ret = 0; 2084 2085 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2086 return -EOPNOTSUPP; 2087 2088 /* controlled by firmware */ 2089 if (smu->dc_controlled_by_gpio) 2090 return 0; 2091 2092 ret = smu_set_power_source(smu, 2093 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : 2094 SMU_POWER_SOURCE_DC); 2095 if (ret) 2096 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", 2097 smu->adev->pm.ac_power ? "AC" : "DC"); 2098 2099 return ret; 2100 } 2101 2102 const struct amd_ip_funcs smu_ip_funcs = { 2103 .name = "smu", 2104 .early_init = smu_early_init, 2105 .late_init = smu_late_init, 2106 .sw_init = smu_sw_init, 2107 .sw_fini = smu_sw_fini, 2108 .hw_init = smu_hw_init, 2109 .hw_fini = smu_hw_fini, 2110 .late_fini = smu_late_fini, 2111 .suspend = smu_suspend, 2112 .resume = smu_resume, 2113 .is_idle = NULL, 2114 .check_soft_reset = NULL, 2115 .wait_for_idle = NULL, 2116 .soft_reset = NULL, 2117 .set_clockgating_state = smu_set_clockgating_state, 2118 .set_powergating_state = smu_set_powergating_state, 2119 }; 2120 2121 const struct amdgpu_ip_block_version smu_v11_0_ip_block = 2122 { 2123 .type = AMD_IP_BLOCK_TYPE_SMC, 2124 .major = 11, 2125 .minor = 0, 2126 .rev = 0, 2127 .funcs = &smu_ip_funcs, 2128 }; 2129 2130 const struct amdgpu_ip_block_version smu_v12_0_ip_block = 2131 { 2132 .type = AMD_IP_BLOCK_TYPE_SMC, 2133 .major = 12, 2134 .minor = 0, 2135 .rev = 0, 2136 .funcs = &smu_ip_funcs, 2137 }; 2138 2139 const struct amdgpu_ip_block_version smu_v13_0_ip_block = 2140 { 2141 .type = AMD_IP_BLOCK_TYPE_SMC, 2142 .major = 13, 2143 .minor = 0, 2144 .rev = 0, 2145 .funcs = &smu_ip_funcs, 2146 }; 2147 2148 static int smu_load_microcode(void *handle) 2149 { 2150 struct smu_context *smu = handle; 2151 struct amdgpu_device *adev = smu->adev; 2152 int ret = 0; 2153 2154 if (!smu->pm_enabled) 2155 return -EOPNOTSUPP; 2156 2157 /* This should be used for non PSP loading */ 2158 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 2159 return 0; 2160 2161 if (smu->ppt_funcs->load_microcode) { 2162 ret = smu->ppt_funcs->load_microcode(smu); 2163 if (ret) { 2164 dev_err(adev->dev, "Load microcode failed\n"); 2165 return ret; 2166 } 2167 } 2168 2169 if (smu->ppt_funcs->check_fw_status) { 2170 ret = smu->ppt_funcs->check_fw_status(smu); 2171 if (ret) { 2172 dev_err(adev->dev, "SMC is not ready\n"); 2173 return ret; 2174 } 2175 } 2176 2177 return ret; 2178 } 2179 2180 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) 2181 { 2182 int ret = 0; 2183 2184 if (smu->ppt_funcs->set_gfx_cgpg) 2185 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); 2186 2187 return ret; 2188 } 2189 2190 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) 2191 { 2192 struct smu_context *smu = handle; 2193 int ret = 0; 2194 2195 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2196 return -EOPNOTSUPP; 2197 2198 if (!smu->ppt_funcs->set_fan_speed_rpm) 2199 return -EOPNOTSUPP; 2200 2201 if (speed == U32_MAX) 2202 return -EINVAL; 2203 2204 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); 2205 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2206 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; 2207 smu->user_dpm_profile.fan_speed_rpm = speed; 2208 2209 /* Override custom PWM setting as they cannot co-exist */ 2210 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM; 2211 smu->user_dpm_profile.fan_speed_pwm = 0; 2212 } 2213 2214 return ret; 2215 } 2216 2217 /** 2218 * smu_get_power_limit - Request one of the SMU Power Limits 2219 * 2220 * @handle: pointer to smu context 2221 * @limit: requested limit is written back to this variable 2222 * @pp_limit_level: &pp_power_limit_level which limit of the power to return 2223 * @pp_power_type: &pp_power_type type of power 2224 * Return: 0 on success, <0 on error 2225 * 2226 */ 2227 int smu_get_power_limit(void *handle, 2228 uint32_t *limit, 2229 enum pp_power_limit_level pp_limit_level, 2230 enum pp_power_type pp_power_type) 2231 { 2232 struct smu_context *smu = handle; 2233 struct amdgpu_device *adev = smu->adev; 2234 enum smu_ppt_limit_level limit_level; 2235 uint32_t limit_type; 2236 int ret = 0; 2237 2238 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2239 return -EOPNOTSUPP; 2240 2241 switch(pp_power_type) { 2242 case PP_PWR_TYPE_SUSTAINED: 2243 limit_type = SMU_DEFAULT_PPT_LIMIT; 2244 break; 2245 case PP_PWR_TYPE_FAST: 2246 limit_type = SMU_FAST_PPT_LIMIT; 2247 break; 2248 default: 2249 return -EOPNOTSUPP; 2250 break; 2251 } 2252 2253 switch(pp_limit_level){ 2254 case PP_PWR_LIMIT_CURRENT: 2255 limit_level = SMU_PPT_LIMIT_CURRENT; 2256 break; 2257 case PP_PWR_LIMIT_DEFAULT: 2258 limit_level = SMU_PPT_LIMIT_DEFAULT; 2259 break; 2260 case PP_PWR_LIMIT_MAX: 2261 limit_level = SMU_PPT_LIMIT_MAX; 2262 break; 2263 case PP_PWR_LIMIT_MIN: 2264 default: 2265 return -EOPNOTSUPP; 2266 break; 2267 } 2268 2269 if (limit_type != SMU_DEFAULT_PPT_LIMIT) { 2270 if (smu->ppt_funcs->get_ppt_limit) 2271 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); 2272 } else { 2273 switch (limit_level) { 2274 case SMU_PPT_LIMIT_CURRENT: 2275 switch (adev->ip_versions[MP1_HWIP][0]) { 2276 case IP_VERSION(13, 0, 2): 2277 case IP_VERSION(11, 0, 7): 2278 case IP_VERSION(11, 0, 11): 2279 case IP_VERSION(11, 0, 12): 2280 case IP_VERSION(11, 0, 13): 2281 ret = smu_get_asic_power_limits(smu, 2282 &smu->current_power_limit, 2283 NULL, 2284 NULL); 2285 break; 2286 default: 2287 break; 2288 } 2289 *limit = smu->current_power_limit; 2290 break; 2291 case SMU_PPT_LIMIT_DEFAULT: 2292 *limit = smu->default_power_limit; 2293 break; 2294 case SMU_PPT_LIMIT_MAX: 2295 *limit = smu->max_power_limit; 2296 break; 2297 default: 2298 break; 2299 } 2300 } 2301 2302 return ret; 2303 } 2304 2305 static int smu_set_power_limit(void *handle, uint32_t limit) 2306 { 2307 struct smu_context *smu = handle; 2308 uint32_t limit_type = limit >> 24; 2309 int ret = 0; 2310 2311 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2312 return -EOPNOTSUPP; 2313 2314 limit &= (1<<24)-1; 2315 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 2316 if (smu->ppt_funcs->set_power_limit) 2317 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); 2318 2319 if (limit > smu->max_power_limit) { 2320 dev_err(smu->adev->dev, 2321 "New power limit (%d) is over the max allowed %d\n", 2322 limit, smu->max_power_limit); 2323 return -EINVAL; 2324 } 2325 2326 if (!limit) 2327 limit = smu->current_power_limit; 2328 2329 if (smu->ppt_funcs->set_power_limit) { 2330 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); 2331 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) 2332 smu->user_dpm_profile.power_limit = limit; 2333 } 2334 2335 return ret; 2336 } 2337 2338 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) 2339 { 2340 int ret = 0; 2341 2342 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2343 return -EOPNOTSUPP; 2344 2345 if (smu->ppt_funcs->print_clk_levels) 2346 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); 2347 2348 return ret; 2349 } 2350 2351 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) 2352 { 2353 enum smu_clk_type clk_type; 2354 2355 switch (type) { 2356 case PP_SCLK: 2357 clk_type = SMU_SCLK; break; 2358 case PP_MCLK: 2359 clk_type = SMU_MCLK; break; 2360 case PP_PCIE: 2361 clk_type = SMU_PCIE; break; 2362 case PP_SOCCLK: 2363 clk_type = SMU_SOCCLK; break; 2364 case PP_FCLK: 2365 clk_type = SMU_FCLK; break; 2366 case PP_DCEFCLK: 2367 clk_type = SMU_DCEFCLK; break; 2368 case PP_VCLK: 2369 clk_type = SMU_VCLK; break; 2370 case PP_DCLK: 2371 clk_type = SMU_DCLK; break; 2372 case OD_SCLK: 2373 clk_type = SMU_OD_SCLK; break; 2374 case OD_MCLK: 2375 clk_type = SMU_OD_MCLK; break; 2376 case OD_VDDC_CURVE: 2377 clk_type = SMU_OD_VDDC_CURVE; break; 2378 case OD_RANGE: 2379 clk_type = SMU_OD_RANGE; break; 2380 case OD_VDDGFX_OFFSET: 2381 clk_type = SMU_OD_VDDGFX_OFFSET; break; 2382 case OD_CCLK: 2383 clk_type = SMU_OD_CCLK; break; 2384 default: 2385 clk_type = SMU_CLK_COUNT; break; 2386 } 2387 2388 return clk_type; 2389 } 2390 2391 static int smu_print_ppclk_levels(void *handle, 2392 enum pp_clock_type type, 2393 char *buf) 2394 { 2395 struct smu_context *smu = handle; 2396 enum smu_clk_type clk_type; 2397 2398 clk_type = smu_convert_to_smuclk(type); 2399 if (clk_type == SMU_CLK_COUNT) 2400 return -EINVAL; 2401 2402 return smu_print_smuclk_levels(smu, clk_type, buf); 2403 } 2404 2405 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset) 2406 { 2407 struct smu_context *smu = handle; 2408 enum smu_clk_type clk_type; 2409 2410 clk_type = smu_convert_to_smuclk(type); 2411 if (clk_type == SMU_CLK_COUNT) 2412 return -EINVAL; 2413 2414 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2415 return -EOPNOTSUPP; 2416 2417 if (!smu->ppt_funcs->emit_clk_levels) 2418 return -ENOENT; 2419 2420 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset); 2421 2422 } 2423 2424 static int smu_od_edit_dpm_table(void *handle, 2425 enum PP_OD_DPM_TABLE_COMMAND type, 2426 long *input, uint32_t size) 2427 { 2428 struct smu_context *smu = handle; 2429 int ret = 0; 2430 2431 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2432 return -EOPNOTSUPP; 2433 2434 if (smu->ppt_funcs->od_edit_dpm_table) { 2435 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); 2436 } 2437 2438 return ret; 2439 } 2440 2441 static int smu_read_sensor(void *handle, 2442 int sensor, 2443 void *data, 2444 int *size_arg) 2445 { 2446 struct smu_context *smu = handle; 2447 struct smu_umd_pstate_table *pstate_table = 2448 &smu->pstate_table; 2449 int ret = 0; 2450 uint32_t *size, size_val; 2451 2452 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2453 return -EOPNOTSUPP; 2454 2455 if (!data || !size_arg) 2456 return -EINVAL; 2457 2458 size_val = *size_arg; 2459 size = &size_val; 2460 2461 if (smu->ppt_funcs->read_sensor) 2462 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) 2463 goto unlock; 2464 2465 switch (sensor) { 2466 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 2467 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; 2468 *size = 4; 2469 break; 2470 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 2471 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; 2472 *size = 4; 2473 break; 2474 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK: 2475 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100; 2476 *size = 4; 2477 break; 2478 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK: 2479 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100; 2480 *size = 4; 2481 break; 2482 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 2483 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); 2484 *size = 8; 2485 break; 2486 case AMDGPU_PP_SENSOR_UVD_POWER: 2487 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 2488 *size = 4; 2489 break; 2490 case AMDGPU_PP_SENSOR_VCE_POWER: 2491 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 2492 *size = 4; 2493 break; 2494 case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 2495 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1; 2496 *size = 4; 2497 break; 2498 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 2499 *(uint32_t *)data = 0; 2500 *size = 4; 2501 break; 2502 default: 2503 *size = 0; 2504 ret = -EOPNOTSUPP; 2505 break; 2506 } 2507 2508 unlock: 2509 // assign uint32_t to int 2510 *size_arg = size_val; 2511 2512 return ret; 2513 } 2514 2515 static int smu_get_power_profile_mode(void *handle, char *buf) 2516 { 2517 struct smu_context *smu = handle; 2518 2519 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || 2520 !smu->ppt_funcs->get_power_profile_mode) 2521 return -EOPNOTSUPP; 2522 if (!buf) 2523 return -EINVAL; 2524 2525 return smu->ppt_funcs->get_power_profile_mode(smu, buf); 2526 } 2527 2528 static int smu_set_power_profile_mode(void *handle, 2529 long *param, 2530 uint32_t param_size) 2531 { 2532 struct smu_context *smu = handle; 2533 2534 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || 2535 !smu->ppt_funcs->set_power_profile_mode) 2536 return -EOPNOTSUPP; 2537 2538 return smu_bump_power_profile_mode(smu, param, param_size); 2539 } 2540 2541 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) 2542 { 2543 struct smu_context *smu = handle; 2544 2545 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2546 return -EOPNOTSUPP; 2547 2548 if (!smu->ppt_funcs->get_fan_control_mode) 2549 return -EOPNOTSUPP; 2550 2551 if (!fan_mode) 2552 return -EINVAL; 2553 2554 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu); 2555 2556 return 0; 2557 } 2558 2559 static int smu_set_fan_control_mode(void *handle, u32 value) 2560 { 2561 struct smu_context *smu = handle; 2562 int ret = 0; 2563 2564 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2565 return -EOPNOTSUPP; 2566 2567 if (!smu->ppt_funcs->set_fan_control_mode) 2568 return -EOPNOTSUPP; 2569 2570 if (value == U32_MAX) 2571 return -EINVAL; 2572 2573 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); 2574 if (ret) 2575 goto out; 2576 2577 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2578 smu->user_dpm_profile.fan_mode = value; 2579 2580 /* reset user dpm fan speed */ 2581 if (value != AMD_FAN_CTRL_MANUAL) { 2582 smu->user_dpm_profile.fan_speed_pwm = 0; 2583 smu->user_dpm_profile.fan_speed_rpm = 0; 2584 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM); 2585 } 2586 } 2587 2588 out: 2589 return ret; 2590 } 2591 2592 static int smu_get_fan_speed_pwm(void *handle, u32 *speed) 2593 { 2594 struct smu_context *smu = handle; 2595 int ret = 0; 2596 2597 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2598 return -EOPNOTSUPP; 2599 2600 if (!smu->ppt_funcs->get_fan_speed_pwm) 2601 return -EOPNOTSUPP; 2602 2603 if (!speed) 2604 return -EINVAL; 2605 2606 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); 2607 2608 return ret; 2609 } 2610 2611 static int smu_set_fan_speed_pwm(void *handle, u32 speed) 2612 { 2613 struct smu_context *smu = handle; 2614 int ret = 0; 2615 2616 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2617 return -EOPNOTSUPP; 2618 2619 if (!smu->ppt_funcs->set_fan_speed_pwm) 2620 return -EOPNOTSUPP; 2621 2622 if (speed == U32_MAX) 2623 return -EINVAL; 2624 2625 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); 2626 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2627 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; 2628 smu->user_dpm_profile.fan_speed_pwm = speed; 2629 2630 /* Override custom RPM setting as they cannot co-exist */ 2631 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM; 2632 smu->user_dpm_profile.fan_speed_rpm = 0; 2633 } 2634 2635 return ret; 2636 } 2637 2638 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) 2639 { 2640 struct smu_context *smu = handle; 2641 int ret = 0; 2642 2643 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2644 return -EOPNOTSUPP; 2645 2646 if (!smu->ppt_funcs->get_fan_speed_rpm) 2647 return -EOPNOTSUPP; 2648 2649 if (!speed) 2650 return -EINVAL; 2651 2652 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); 2653 2654 return ret; 2655 } 2656 2657 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) 2658 { 2659 struct smu_context *smu = handle; 2660 2661 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2662 return -EOPNOTSUPP; 2663 2664 return smu_set_min_dcef_deep_sleep(smu, clk); 2665 } 2666 2667 static int smu_get_clock_by_type_with_latency(void *handle, 2668 enum amd_pp_clock_type type, 2669 struct pp_clock_levels_with_latency *clocks) 2670 { 2671 struct smu_context *smu = handle; 2672 enum smu_clk_type clk_type; 2673 int ret = 0; 2674 2675 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2676 return -EOPNOTSUPP; 2677 2678 if (smu->ppt_funcs->get_clock_by_type_with_latency) { 2679 switch (type) { 2680 case amd_pp_sys_clock: 2681 clk_type = SMU_GFXCLK; 2682 break; 2683 case amd_pp_mem_clock: 2684 clk_type = SMU_MCLK; 2685 break; 2686 case amd_pp_dcef_clock: 2687 clk_type = SMU_DCEFCLK; 2688 break; 2689 case amd_pp_disp_clock: 2690 clk_type = SMU_DISPCLK; 2691 break; 2692 default: 2693 dev_err(smu->adev->dev, "Invalid clock type!\n"); 2694 return -EINVAL; 2695 } 2696 2697 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); 2698 } 2699 2700 return ret; 2701 } 2702 2703 static int smu_display_clock_voltage_request(void *handle, 2704 struct pp_display_clock_request *clock_req) 2705 { 2706 struct smu_context *smu = handle; 2707 int ret = 0; 2708 2709 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2710 return -EOPNOTSUPP; 2711 2712 if (smu->ppt_funcs->display_clock_voltage_request) 2713 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); 2714 2715 return ret; 2716 } 2717 2718 2719 static int smu_display_disable_memory_clock_switch(void *handle, 2720 bool disable_memory_clock_switch) 2721 { 2722 struct smu_context *smu = handle; 2723 int ret = -EINVAL; 2724 2725 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2726 return -EOPNOTSUPP; 2727 2728 if (smu->ppt_funcs->display_disable_memory_clock_switch) 2729 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); 2730 2731 return ret; 2732 } 2733 2734 static int smu_set_xgmi_pstate(void *handle, 2735 uint32_t pstate) 2736 { 2737 struct smu_context *smu = handle; 2738 int ret = 0; 2739 2740 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2741 return -EOPNOTSUPP; 2742 2743 if (smu->ppt_funcs->set_xgmi_pstate) 2744 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); 2745 2746 if(ret) 2747 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); 2748 2749 return ret; 2750 } 2751 2752 static int smu_get_baco_capability(void *handle, bool *cap) 2753 { 2754 struct smu_context *smu = handle; 2755 2756 *cap = false; 2757 2758 if (!smu->pm_enabled) 2759 return 0; 2760 2761 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) 2762 *cap = smu->ppt_funcs->baco_is_support(smu); 2763 2764 return 0; 2765 } 2766 2767 static int smu_baco_set_state(void *handle, int state) 2768 { 2769 struct smu_context *smu = handle; 2770 int ret = 0; 2771 2772 if (!smu->pm_enabled) 2773 return -EOPNOTSUPP; 2774 2775 if (state == 0) { 2776 if (smu->ppt_funcs->baco_exit) 2777 ret = smu->ppt_funcs->baco_exit(smu); 2778 } else if (state == 1) { 2779 if (smu->ppt_funcs->baco_enter) 2780 ret = smu->ppt_funcs->baco_enter(smu); 2781 } else { 2782 return -EINVAL; 2783 } 2784 2785 if (ret) 2786 dev_err(smu->adev->dev, "Failed to %s BACO state!\n", 2787 (state)?"enter":"exit"); 2788 2789 return ret; 2790 } 2791 2792 bool smu_mode1_reset_is_support(struct smu_context *smu) 2793 { 2794 bool ret = false; 2795 2796 if (!smu->pm_enabled) 2797 return false; 2798 2799 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) 2800 ret = smu->ppt_funcs->mode1_reset_is_support(smu); 2801 2802 return ret; 2803 } 2804 2805 bool smu_mode2_reset_is_support(struct smu_context *smu) 2806 { 2807 bool ret = false; 2808 2809 if (!smu->pm_enabled) 2810 return false; 2811 2812 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) 2813 ret = smu->ppt_funcs->mode2_reset_is_support(smu); 2814 2815 return ret; 2816 } 2817 2818 int smu_mode1_reset(struct smu_context *smu) 2819 { 2820 int ret = 0; 2821 2822 if (!smu->pm_enabled) 2823 return -EOPNOTSUPP; 2824 2825 if (smu->ppt_funcs->mode1_reset) 2826 ret = smu->ppt_funcs->mode1_reset(smu); 2827 2828 return ret; 2829 } 2830 2831 static int smu_mode2_reset(void *handle) 2832 { 2833 struct smu_context *smu = handle; 2834 int ret = 0; 2835 2836 if (!smu->pm_enabled) 2837 return -EOPNOTSUPP; 2838 2839 if (smu->ppt_funcs->mode2_reset) 2840 ret = smu->ppt_funcs->mode2_reset(smu); 2841 2842 if (ret) 2843 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); 2844 2845 return ret; 2846 } 2847 2848 static int smu_get_max_sustainable_clocks_by_dc(void *handle, 2849 struct pp_smu_nv_clock_table *max_clocks) 2850 { 2851 struct smu_context *smu = handle; 2852 int ret = 0; 2853 2854 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2855 return -EOPNOTSUPP; 2856 2857 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) 2858 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); 2859 2860 return ret; 2861 } 2862 2863 static int smu_get_uclk_dpm_states(void *handle, 2864 unsigned int *clock_values_in_khz, 2865 unsigned int *num_states) 2866 { 2867 struct smu_context *smu = handle; 2868 int ret = 0; 2869 2870 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2871 return -EOPNOTSUPP; 2872 2873 if (smu->ppt_funcs->get_uclk_dpm_states) 2874 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); 2875 2876 return ret; 2877 } 2878 2879 static enum amd_pm_state_type smu_get_current_power_state(void *handle) 2880 { 2881 struct smu_context *smu = handle; 2882 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; 2883 2884 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2885 return -EOPNOTSUPP; 2886 2887 if (smu->ppt_funcs->get_current_power_state) 2888 pm_state = smu->ppt_funcs->get_current_power_state(smu); 2889 2890 return pm_state; 2891 } 2892 2893 static int smu_get_dpm_clock_table(void *handle, 2894 struct dpm_clocks *clock_table) 2895 { 2896 struct smu_context *smu = handle; 2897 int ret = 0; 2898 2899 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2900 return -EOPNOTSUPP; 2901 2902 if (smu->ppt_funcs->get_dpm_clock_table) 2903 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); 2904 2905 return ret; 2906 } 2907 2908 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) 2909 { 2910 struct smu_context *smu = handle; 2911 2912 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2913 return -EOPNOTSUPP; 2914 2915 if (!smu->ppt_funcs->get_gpu_metrics) 2916 return -EOPNOTSUPP; 2917 2918 return smu->ppt_funcs->get_gpu_metrics(smu, table); 2919 } 2920 2921 static int smu_enable_mgpu_fan_boost(void *handle) 2922 { 2923 struct smu_context *smu = handle; 2924 int ret = 0; 2925 2926 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2927 return -EOPNOTSUPP; 2928 2929 if (smu->ppt_funcs->enable_mgpu_fan_boost) 2930 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); 2931 2932 return ret; 2933 } 2934 2935 static int smu_gfx_state_change_set(void *handle, 2936 uint32_t state) 2937 { 2938 struct smu_context *smu = handle; 2939 int ret = 0; 2940 2941 if (smu->ppt_funcs->gfx_state_change_set) 2942 ret = smu->ppt_funcs->gfx_state_change_set(smu, state); 2943 2944 return ret; 2945 } 2946 2947 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) 2948 { 2949 int ret = 0; 2950 2951 if (smu->ppt_funcs->smu_handle_passthrough_sbr) 2952 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable); 2953 2954 return ret; 2955 } 2956 2957 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) 2958 { 2959 int ret = -EOPNOTSUPP; 2960 2961 if (smu->ppt_funcs && 2962 smu->ppt_funcs->get_ecc_info) 2963 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); 2964 2965 return ret; 2966 2967 } 2968 2969 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) 2970 { 2971 struct smu_context *smu = handle; 2972 struct smu_table_context *smu_table = &smu->smu_table; 2973 struct smu_table *memory_pool = &smu_table->memory_pool; 2974 2975 if (!addr || !size) 2976 return -EINVAL; 2977 2978 *addr = NULL; 2979 *size = 0; 2980 if (memory_pool->bo) { 2981 *addr = memory_pool->cpu_addr; 2982 *size = memory_pool->size; 2983 } 2984 2985 return 0; 2986 } 2987 2988 static const struct amd_pm_funcs swsmu_pm_funcs = { 2989 /* export for sysfs */ 2990 .set_fan_control_mode = smu_set_fan_control_mode, 2991 .get_fan_control_mode = smu_get_fan_control_mode, 2992 .set_fan_speed_pwm = smu_set_fan_speed_pwm, 2993 .get_fan_speed_pwm = smu_get_fan_speed_pwm, 2994 .force_clock_level = smu_force_ppclk_levels, 2995 .print_clock_levels = smu_print_ppclk_levels, 2996 .emit_clock_levels = smu_emit_ppclk_levels, 2997 .force_performance_level = smu_force_performance_level, 2998 .read_sensor = smu_read_sensor, 2999 .get_performance_level = smu_get_performance_level, 3000 .get_current_power_state = smu_get_current_power_state, 3001 .get_fan_speed_rpm = smu_get_fan_speed_rpm, 3002 .set_fan_speed_rpm = smu_set_fan_speed_rpm, 3003 .get_pp_num_states = smu_get_power_num_states, 3004 .get_pp_table = smu_sys_get_pp_table, 3005 .set_pp_table = smu_sys_set_pp_table, 3006 .switch_power_profile = smu_switch_power_profile, 3007 /* export to amdgpu */ 3008 .dispatch_tasks = smu_handle_dpm_task, 3009 .load_firmware = smu_load_microcode, 3010 .set_powergating_by_smu = smu_dpm_set_power_gate, 3011 .set_power_limit = smu_set_power_limit, 3012 .get_power_limit = smu_get_power_limit, 3013 .get_power_profile_mode = smu_get_power_profile_mode, 3014 .set_power_profile_mode = smu_set_power_profile_mode, 3015 .odn_edit_dpm_table = smu_od_edit_dpm_table, 3016 .set_mp1_state = smu_set_mp1_state, 3017 .gfx_state_change_set = smu_gfx_state_change_set, 3018 /* export to DC */ 3019 .get_sclk = smu_get_sclk, 3020 .get_mclk = smu_get_mclk, 3021 .display_configuration_change = smu_display_configuration_change, 3022 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency, 3023 .display_clock_voltage_request = smu_display_clock_voltage_request, 3024 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost, 3025 .set_active_display_count = smu_set_display_count, 3026 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk, 3027 .get_asic_baco_capability = smu_get_baco_capability, 3028 .set_asic_baco_state = smu_baco_set_state, 3029 .get_ppfeature_status = smu_sys_get_pp_feature_mask, 3030 .set_ppfeature_status = smu_sys_set_pp_feature_mask, 3031 .asic_reset_mode_2 = smu_mode2_reset, 3032 .set_df_cstate = smu_set_df_cstate, 3033 .set_xgmi_pstate = smu_set_xgmi_pstate, 3034 .get_gpu_metrics = smu_sys_get_gpu_metrics, 3035 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges, 3036 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch, 3037 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, 3038 .get_uclk_dpm_states = smu_get_uclk_dpm_states, 3039 .get_dpm_clock_table = smu_get_dpm_clock_table, 3040 .get_smu_prv_buf_details = smu_get_prv_buffer_details, 3041 }; 3042 3043 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, 3044 uint64_t event_arg) 3045 { 3046 int ret = -EINVAL; 3047 3048 if (smu->ppt_funcs->wait_for_event) 3049 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); 3050 3051 return ret; 3052 } 3053 3054 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) 3055 { 3056 3057 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled) 3058 return -EOPNOTSUPP; 3059 3060 /* Confirm the buffer allocated is of correct size */ 3061 if (size != smu->stb_context.stb_buf_size) 3062 return -EINVAL; 3063 3064 /* 3065 * No need to lock smu mutex as we access STB directly through MMIO 3066 * and not going through SMU messaging route (for now at least). 3067 * For registers access rely on implementation internal locking. 3068 */ 3069 return smu->ppt_funcs->stb_collect_info(smu, buf, size); 3070 } 3071 3072 #if defined(CONFIG_DEBUG_FS) 3073 3074 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp) 3075 { 3076 struct amdgpu_device *adev = filp->f_inode->i_private; 3077 struct smu_context *smu = adev->powerplay.pp_handle; 3078 unsigned char *buf; 3079 int r; 3080 3081 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL); 3082 if (!buf) 3083 return -ENOMEM; 3084 3085 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size); 3086 if (r) 3087 goto out; 3088 3089 filp->private_data = buf; 3090 3091 return 0; 3092 3093 out: 3094 kvfree(buf); 3095 return r; 3096 } 3097 3098 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, 3099 loff_t *pos) 3100 { 3101 struct amdgpu_device *adev = filp->f_inode->i_private; 3102 struct smu_context *smu = adev->powerplay.pp_handle; 3103 3104 3105 if (!filp->private_data) 3106 return -EINVAL; 3107 3108 return simple_read_from_buffer(buf, 3109 size, 3110 pos, filp->private_data, 3111 smu->stb_context.stb_buf_size); 3112 } 3113 3114 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp) 3115 { 3116 kvfree(filp->private_data); 3117 filp->private_data = NULL; 3118 3119 return 0; 3120 } 3121 3122 /* 3123 * We have to define not only read method but also 3124 * open and release because .read takes up to PAGE_SIZE 3125 * data each time so and so is invoked multiple times. 3126 * We allocate the STB buffer in .open and release it 3127 * in .release 3128 */ 3129 static const struct file_operations smu_stb_debugfs_fops = { 3130 .owner = THIS_MODULE, 3131 .open = smu_stb_debugfs_open, 3132 .read = smu_stb_debugfs_read, 3133 .release = smu_stb_debugfs_release, 3134 .llseek = default_llseek, 3135 }; 3136 3137 #endif 3138 3139 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) 3140 { 3141 #if defined(CONFIG_DEBUG_FS) 3142 3143 struct smu_context *smu = adev->powerplay.pp_handle; 3144 3145 if (!smu || (!smu->stb_context.stb_buf_size)) 3146 return; 3147 3148 debugfs_create_file_size("amdgpu_smu_stb_dump", 3149 S_IRUSR, 3150 adev_to_drm(adev)->primary->debugfs_root, 3151 adev, 3152 &smu_stb_debugfs_fops, 3153 smu->stb_context.stb_buf_size); 3154 #endif 3155 } 3156 3157 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size) 3158 { 3159 int ret = 0; 3160 3161 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num) 3162 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size); 3163 3164 return ret; 3165 } 3166 3167 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size) 3168 { 3169 int ret = 0; 3170 3171 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag) 3172 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size); 3173 3174 return ret; 3175 } 3176