1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L1 24 25 #include <linux/firmware.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_smu.h" 30 #include "smu_internal.h" 31 #include "atom.h" 32 #include "arcturus_ppt.h" 33 #include "navi10_ppt.h" 34 #include "sienna_cichlid_ppt.h" 35 #include "renoir_ppt.h" 36 #include "vangogh_ppt.h" 37 #include "amd_pcie.h" 38 39 /* 40 * DO NOT use these for err/warn/info/debug messages. 41 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 42 * They are more MGPU friendly. 43 */ 44 #undef pr_err 45 #undef pr_warn 46 #undef pr_info 47 #undef pr_debug 48 49 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) 50 { 51 size_t size = 0; 52 53 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 54 return -EOPNOTSUPP; 55 56 mutex_lock(&smu->mutex); 57 58 size = smu_get_pp_feature_mask(smu, buf); 59 60 mutex_unlock(&smu->mutex); 61 62 return size; 63 } 64 65 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) 66 { 67 int ret = 0; 68 69 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 70 return -EOPNOTSUPP; 71 72 mutex_lock(&smu->mutex); 73 74 ret = smu_set_pp_feature_mask(smu, new_mask); 75 76 mutex_unlock(&smu->mutex); 77 78 return ret; 79 } 80 81 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 82 { 83 int ret = 0; 84 struct smu_context *smu = &adev->smu; 85 86 if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status) 87 *value = smu_get_gfx_off_status(smu); 88 else 89 ret = -EINVAL; 90 91 return ret; 92 } 93 94 int smu_set_soft_freq_range(struct smu_context *smu, 95 enum smu_clk_type clk_type, 96 uint32_t min, 97 uint32_t max) 98 { 99 int ret = 0; 100 101 mutex_lock(&smu->mutex); 102 103 if (smu->ppt_funcs->set_soft_freq_limited_range) 104 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, 105 clk_type, 106 min, 107 max); 108 109 mutex_unlock(&smu->mutex); 110 111 return ret; 112 } 113 114 int smu_get_dpm_freq_range(struct smu_context *smu, 115 enum smu_clk_type clk_type, 116 uint32_t *min, 117 uint32_t *max) 118 { 119 int ret = 0; 120 121 if (!min && !max) 122 return -EINVAL; 123 124 mutex_lock(&smu->mutex); 125 126 if (smu->ppt_funcs->get_dpm_ultimate_freq) 127 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, 128 clk_type, 129 min, 130 max); 131 132 mutex_unlock(&smu->mutex); 133 134 return ret; 135 } 136 137 static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu, 138 bool enable) 139 { 140 struct smu_power_context *smu_power = &smu->smu_power; 141 struct smu_power_gate *power_gate = &smu_power->power_gate; 142 int ret = 0; 143 144 if (!smu->ppt_funcs->dpm_set_vcn_enable) 145 return 0; 146 147 if (atomic_read(&power_gate->vcn_gated) ^ enable) 148 return 0; 149 150 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); 151 if (!ret) 152 atomic_set(&power_gate->vcn_gated, !enable); 153 154 return ret; 155 } 156 157 static int smu_dpm_set_vcn_enable(struct smu_context *smu, 158 bool enable) 159 { 160 struct smu_power_context *smu_power = &smu->smu_power; 161 struct smu_power_gate *power_gate = &smu_power->power_gate; 162 int ret = 0; 163 164 mutex_lock(&power_gate->vcn_gate_lock); 165 166 ret = smu_dpm_set_vcn_enable_locked(smu, enable); 167 168 mutex_unlock(&power_gate->vcn_gate_lock); 169 170 return ret; 171 } 172 173 static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu, 174 bool enable) 175 { 176 struct smu_power_context *smu_power = &smu->smu_power; 177 struct smu_power_gate *power_gate = &smu_power->power_gate; 178 int ret = 0; 179 180 if (!smu->ppt_funcs->dpm_set_jpeg_enable) 181 return 0; 182 183 if (atomic_read(&power_gate->jpeg_gated) ^ enable) 184 return 0; 185 186 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); 187 if (!ret) 188 atomic_set(&power_gate->jpeg_gated, !enable); 189 190 return ret; 191 } 192 193 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, 194 bool enable) 195 { 196 struct smu_power_context *smu_power = &smu->smu_power; 197 struct smu_power_gate *power_gate = &smu_power->power_gate; 198 int ret = 0; 199 200 mutex_lock(&power_gate->jpeg_gate_lock); 201 202 ret = smu_dpm_set_jpeg_enable_locked(smu, enable); 203 204 mutex_unlock(&power_gate->jpeg_gate_lock); 205 206 return ret; 207 } 208 209 /** 210 * smu_dpm_set_power_gate - power gate/ungate the specific IP block 211 * 212 * @smu: smu_context pointer 213 * @block_type: the IP block to power gate/ungate 214 * @gate: to power gate if true, ungate otherwise 215 * 216 * This API uses no smu->mutex lock protection due to: 217 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). 218 * This is guarded to be race condition free by the caller. 219 * 2. Or get called on user setting request of power_dpm_force_performance_level. 220 * Under this case, the smu->mutex lock protection is already enforced on 221 * the parent API smu_force_performance_level of the call path. 222 */ 223 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, 224 bool gate) 225 { 226 int ret = 0; 227 228 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 229 return -EOPNOTSUPP; 230 231 switch (block_type) { 232 /* 233 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses 234 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept. 235 */ 236 case AMD_IP_BLOCK_TYPE_UVD: 237 case AMD_IP_BLOCK_TYPE_VCN: 238 ret = smu_dpm_set_vcn_enable(smu, !gate); 239 if (ret) 240 dev_err(smu->adev->dev, "Failed to power %s VCN!\n", 241 gate ? "gate" : "ungate"); 242 break; 243 case AMD_IP_BLOCK_TYPE_GFX: 244 ret = smu_gfx_off_control(smu, gate); 245 if (ret) 246 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", 247 gate ? "enable" : "disable"); 248 break; 249 case AMD_IP_BLOCK_TYPE_SDMA: 250 ret = smu_powergate_sdma(smu, gate); 251 if (ret) 252 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", 253 gate ? "gate" : "ungate"); 254 break; 255 case AMD_IP_BLOCK_TYPE_JPEG: 256 ret = smu_dpm_set_jpeg_enable(smu, !gate); 257 if (ret) 258 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", 259 gate ? "gate" : "ungate"); 260 break; 261 default: 262 dev_err(smu->adev->dev, "Unsupported block type!\n"); 263 return -EINVAL; 264 } 265 266 return ret; 267 } 268 269 int smu_get_power_num_states(struct smu_context *smu, 270 struct pp_states_info *state_info) 271 { 272 if (!state_info) 273 return -EINVAL; 274 275 /* not support power state */ 276 memset(state_info, 0, sizeof(struct pp_states_info)); 277 state_info->nums = 1; 278 state_info->states[0] = POWER_STATE_TYPE_DEFAULT; 279 280 return 0; 281 } 282 283 bool is_support_sw_smu(struct amdgpu_device *adev) 284 { 285 if (adev->asic_type >= CHIP_ARCTURUS) 286 return true; 287 288 return false; 289 } 290 291 int smu_sys_get_pp_table(struct smu_context *smu, void **table) 292 { 293 struct smu_table_context *smu_table = &smu->smu_table; 294 uint32_t powerplay_table_size; 295 296 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 297 return -EOPNOTSUPP; 298 299 if (!smu_table->power_play_table && !smu_table->hardcode_pptable) 300 return -EINVAL; 301 302 mutex_lock(&smu->mutex); 303 304 if (smu_table->hardcode_pptable) 305 *table = smu_table->hardcode_pptable; 306 else 307 *table = smu_table->power_play_table; 308 309 powerplay_table_size = smu_table->power_play_table_size; 310 311 mutex_unlock(&smu->mutex); 312 313 return powerplay_table_size; 314 } 315 316 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) 317 { 318 struct smu_table_context *smu_table = &smu->smu_table; 319 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; 320 int ret = 0; 321 322 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 323 return -EOPNOTSUPP; 324 325 if (header->usStructureSize != size) { 326 dev_err(smu->adev->dev, "pp table size not matched !\n"); 327 return -EIO; 328 } 329 330 mutex_lock(&smu->mutex); 331 if (!smu_table->hardcode_pptable) 332 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); 333 if (!smu_table->hardcode_pptable) { 334 ret = -ENOMEM; 335 goto failed; 336 } 337 338 memcpy(smu_table->hardcode_pptable, buf, size); 339 smu_table->power_play_table = smu_table->hardcode_pptable; 340 smu_table->power_play_table_size = size; 341 342 /* 343 * Special hw_fini action(for Navi1x, the DPMs disablement will be 344 * skipped) may be needed for custom pptable uploading. 345 */ 346 smu->uploading_custom_pp_table = true; 347 348 ret = smu_reset(smu); 349 if (ret) 350 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); 351 352 smu->uploading_custom_pp_table = false; 353 354 failed: 355 mutex_unlock(&smu->mutex); 356 return ret; 357 } 358 359 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) 360 { 361 struct smu_feature *feature = &smu->smu_feature; 362 int ret = 0; 363 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; 364 365 bitmap_zero(feature->allowed, SMU_FEATURE_MAX); 366 367 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, 368 SMU_FEATURE_MAX/32); 369 if (ret) 370 return ret; 371 372 bitmap_or(feature->allowed, feature->allowed, 373 (unsigned long *)allowed_feature_mask, 374 feature->feature_num); 375 376 return ret; 377 } 378 379 static int smu_set_funcs(struct amdgpu_device *adev) 380 { 381 struct smu_context *smu = &adev->smu; 382 383 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 384 smu->od_enabled = true; 385 386 switch (adev->asic_type) { 387 case CHIP_NAVI10: 388 case CHIP_NAVI14: 389 case CHIP_NAVI12: 390 navi10_set_ppt_funcs(smu); 391 break; 392 case CHIP_ARCTURUS: 393 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 394 arcturus_set_ppt_funcs(smu); 395 /* OD is not supported on Arcturus */ 396 smu->od_enabled =false; 397 break; 398 case CHIP_SIENNA_CICHLID: 399 case CHIP_NAVY_FLOUNDER: 400 case CHIP_DIMGREY_CAVEFISH: 401 sienna_cichlid_set_ppt_funcs(smu); 402 break; 403 case CHIP_RENOIR: 404 renoir_set_ppt_funcs(smu); 405 /* enable the fine grain tuning function by default */ 406 smu->fine_grain_enabled = true; 407 /* close the fine grain tuning function by default */ 408 smu->fine_grain_started = false; 409 break; 410 case CHIP_VANGOGH: 411 vangogh_set_ppt_funcs(smu); 412 /* enable the OD by default to allow the fine grain tuning function */ 413 smu->od_enabled = true; 414 break; 415 default: 416 return -EINVAL; 417 } 418 419 return 0; 420 } 421 422 static int smu_early_init(void *handle) 423 { 424 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 425 struct smu_context *smu = &adev->smu; 426 427 smu->adev = adev; 428 smu->pm_enabled = !!amdgpu_dpm; 429 smu->is_apu = false; 430 mutex_init(&smu->mutex); 431 mutex_init(&smu->smu_baco.mutex); 432 smu->smu_baco.state = SMU_BACO_STATE_EXIT; 433 smu->smu_baco.platform_support = false; 434 435 return smu_set_funcs(adev); 436 } 437 438 static int smu_set_default_dpm_table(struct smu_context *smu) 439 { 440 struct smu_power_context *smu_power = &smu->smu_power; 441 struct smu_power_gate *power_gate = &smu_power->power_gate; 442 int vcn_gate, jpeg_gate; 443 int ret = 0; 444 445 if (!smu->ppt_funcs->set_default_dpm_table) 446 return 0; 447 448 mutex_lock(&power_gate->vcn_gate_lock); 449 mutex_lock(&power_gate->jpeg_gate_lock); 450 451 vcn_gate = atomic_read(&power_gate->vcn_gated); 452 jpeg_gate = atomic_read(&power_gate->jpeg_gated); 453 454 ret = smu_dpm_set_vcn_enable_locked(smu, true); 455 if (ret) 456 goto err0_out; 457 458 ret = smu_dpm_set_jpeg_enable_locked(smu, true); 459 if (ret) 460 goto err1_out; 461 462 ret = smu->ppt_funcs->set_default_dpm_table(smu); 463 if (ret) 464 dev_err(smu->adev->dev, 465 "Failed to setup default dpm clock tables!\n"); 466 467 smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate); 468 err1_out: 469 smu_dpm_set_vcn_enable_locked(smu, !vcn_gate); 470 err0_out: 471 mutex_unlock(&power_gate->jpeg_gate_lock); 472 mutex_unlock(&power_gate->vcn_gate_lock); 473 474 return ret; 475 } 476 477 static int smu_late_init(void *handle) 478 { 479 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 480 struct smu_context *smu = &adev->smu; 481 int ret = 0; 482 483 smu_set_fine_grain_gfx_freq_parameters(smu); 484 485 if (!smu->pm_enabled) 486 return 0; 487 488 ret = smu_post_init(smu); 489 if (ret) { 490 dev_err(adev->dev, "Failed to post smu init!\n"); 491 return ret; 492 } 493 494 if (adev->asic_type == CHIP_VANGOGH) 495 return 0; 496 497 ret = smu_set_default_od_settings(smu); 498 if (ret) { 499 dev_err(adev->dev, "Failed to setup default OD settings!\n"); 500 return ret; 501 } 502 503 ret = smu_populate_umd_state_clk(smu); 504 if (ret) { 505 dev_err(adev->dev, "Failed to populate UMD state clocks!\n"); 506 return ret; 507 } 508 509 ret = smu_get_asic_power_limits(smu); 510 if (ret) { 511 dev_err(adev->dev, "Failed to get asic power limits!\n"); 512 return ret; 513 } 514 515 smu_get_unique_id(smu); 516 517 smu_get_fan_parameters(smu); 518 519 smu_handle_task(&adev->smu, 520 smu->smu_dpm.dpm_level, 521 AMD_PP_TASK_COMPLETE_INIT, 522 false); 523 524 return 0; 525 } 526 527 static int smu_init_fb_allocations(struct smu_context *smu) 528 { 529 struct amdgpu_device *adev = smu->adev; 530 struct smu_table_context *smu_table = &smu->smu_table; 531 struct smu_table *tables = smu_table->tables; 532 struct smu_table *driver_table = &(smu_table->driver_table); 533 uint32_t max_table_size = 0; 534 int ret, i; 535 536 /* VRAM allocation for tool table */ 537 if (tables[SMU_TABLE_PMSTATUSLOG].size) { 538 ret = amdgpu_bo_create_kernel(adev, 539 tables[SMU_TABLE_PMSTATUSLOG].size, 540 tables[SMU_TABLE_PMSTATUSLOG].align, 541 tables[SMU_TABLE_PMSTATUSLOG].domain, 542 &tables[SMU_TABLE_PMSTATUSLOG].bo, 543 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 544 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 545 if (ret) { 546 dev_err(adev->dev, "VRAM allocation for tool table failed!\n"); 547 return ret; 548 } 549 } 550 551 /* VRAM allocation for driver table */ 552 for (i = 0; i < SMU_TABLE_COUNT; i++) { 553 if (tables[i].size == 0) 554 continue; 555 556 if (i == SMU_TABLE_PMSTATUSLOG) 557 continue; 558 559 if (max_table_size < tables[i].size) 560 max_table_size = tables[i].size; 561 } 562 563 driver_table->size = max_table_size; 564 driver_table->align = PAGE_SIZE; 565 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 566 567 ret = amdgpu_bo_create_kernel(adev, 568 driver_table->size, 569 driver_table->align, 570 driver_table->domain, 571 &driver_table->bo, 572 &driver_table->mc_address, 573 &driver_table->cpu_addr); 574 if (ret) { 575 dev_err(adev->dev, "VRAM allocation for driver table failed!\n"); 576 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 577 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 578 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 579 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 580 } 581 582 return ret; 583 } 584 585 static int smu_fini_fb_allocations(struct smu_context *smu) 586 { 587 struct smu_table_context *smu_table = &smu->smu_table; 588 struct smu_table *tables = smu_table->tables; 589 struct smu_table *driver_table = &(smu_table->driver_table); 590 591 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 592 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 593 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 594 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 595 596 amdgpu_bo_free_kernel(&driver_table->bo, 597 &driver_table->mc_address, 598 &driver_table->cpu_addr); 599 600 return 0; 601 } 602 603 /** 604 * smu_alloc_memory_pool - allocate memory pool in the system memory 605 * 606 * @smu: amdgpu_device pointer 607 * 608 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr 609 * and DramLogSetDramAddr can notify it changed. 610 * 611 * Returns 0 on success, error on failure. 612 */ 613 static int smu_alloc_memory_pool(struct smu_context *smu) 614 { 615 struct amdgpu_device *adev = smu->adev; 616 struct smu_table_context *smu_table = &smu->smu_table; 617 struct smu_table *memory_pool = &smu_table->memory_pool; 618 uint64_t pool_size = smu->pool_size; 619 int ret = 0; 620 621 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) 622 return ret; 623 624 memory_pool->size = pool_size; 625 memory_pool->align = PAGE_SIZE; 626 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; 627 628 switch (pool_size) { 629 case SMU_MEMORY_POOL_SIZE_256_MB: 630 case SMU_MEMORY_POOL_SIZE_512_MB: 631 case SMU_MEMORY_POOL_SIZE_1_GB: 632 case SMU_MEMORY_POOL_SIZE_2_GB: 633 ret = amdgpu_bo_create_kernel(adev, 634 memory_pool->size, 635 memory_pool->align, 636 memory_pool->domain, 637 &memory_pool->bo, 638 &memory_pool->mc_address, 639 &memory_pool->cpu_addr); 640 if (ret) 641 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n"); 642 break; 643 default: 644 break; 645 } 646 647 return ret; 648 } 649 650 static int smu_free_memory_pool(struct smu_context *smu) 651 { 652 struct smu_table_context *smu_table = &smu->smu_table; 653 struct smu_table *memory_pool = &smu_table->memory_pool; 654 655 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) 656 return 0; 657 658 amdgpu_bo_free_kernel(&memory_pool->bo, 659 &memory_pool->mc_address, 660 &memory_pool->cpu_addr); 661 662 memset(memory_pool, 0, sizeof(struct smu_table)); 663 664 return 0; 665 } 666 667 static int smu_alloc_dummy_read_table(struct smu_context *smu) 668 { 669 struct smu_table_context *smu_table = &smu->smu_table; 670 struct smu_table *dummy_read_1_table = 671 &smu_table->dummy_read_1_table; 672 struct amdgpu_device *adev = smu->adev; 673 int ret = 0; 674 675 dummy_read_1_table->size = 0x40000; 676 dummy_read_1_table->align = PAGE_SIZE; 677 dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 678 679 ret = amdgpu_bo_create_kernel(adev, 680 dummy_read_1_table->size, 681 dummy_read_1_table->align, 682 dummy_read_1_table->domain, 683 &dummy_read_1_table->bo, 684 &dummy_read_1_table->mc_address, 685 &dummy_read_1_table->cpu_addr); 686 if (ret) 687 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n"); 688 689 return ret; 690 } 691 692 static void smu_free_dummy_read_table(struct smu_context *smu) 693 { 694 struct smu_table_context *smu_table = &smu->smu_table; 695 struct smu_table *dummy_read_1_table = 696 &smu_table->dummy_read_1_table; 697 698 699 amdgpu_bo_free_kernel(&dummy_read_1_table->bo, 700 &dummy_read_1_table->mc_address, 701 &dummy_read_1_table->cpu_addr); 702 703 memset(dummy_read_1_table, 0, sizeof(struct smu_table)); 704 } 705 706 static int smu_smc_table_sw_init(struct smu_context *smu) 707 { 708 int ret; 709 710 /** 711 * Create smu_table structure, and init smc tables such as 712 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. 713 */ 714 ret = smu_init_smc_tables(smu); 715 if (ret) { 716 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); 717 return ret; 718 } 719 720 /** 721 * Create smu_power_context structure, and allocate smu_dpm_context and 722 * context size to fill the smu_power_context data. 723 */ 724 ret = smu_init_power(smu); 725 if (ret) { 726 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); 727 return ret; 728 } 729 730 /* 731 * allocate vram bos to store smc table contents. 732 */ 733 ret = smu_init_fb_allocations(smu); 734 if (ret) 735 return ret; 736 737 ret = smu_alloc_memory_pool(smu); 738 if (ret) 739 return ret; 740 741 ret = smu_alloc_dummy_read_table(smu); 742 if (ret) 743 return ret; 744 745 ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c); 746 if (ret) 747 return ret; 748 749 return 0; 750 } 751 752 static int smu_smc_table_sw_fini(struct smu_context *smu) 753 { 754 int ret; 755 756 smu_i2c_fini(smu, &smu->adev->pm.smu_i2c); 757 758 smu_free_dummy_read_table(smu); 759 760 ret = smu_free_memory_pool(smu); 761 if (ret) 762 return ret; 763 764 ret = smu_fini_fb_allocations(smu); 765 if (ret) 766 return ret; 767 768 ret = smu_fini_power(smu); 769 if (ret) { 770 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); 771 return ret; 772 } 773 774 ret = smu_fini_smc_tables(smu); 775 if (ret) { 776 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); 777 return ret; 778 } 779 780 return 0; 781 } 782 783 static void smu_throttling_logging_work_fn(struct work_struct *work) 784 { 785 struct smu_context *smu = container_of(work, struct smu_context, 786 throttling_logging_work); 787 788 smu_log_thermal_throttling(smu); 789 } 790 791 static void smu_interrupt_work_fn(struct work_struct *work) 792 { 793 struct smu_context *smu = container_of(work, struct smu_context, 794 interrupt_work); 795 796 mutex_lock(&smu->mutex); 797 798 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) 799 smu->ppt_funcs->interrupt_work(smu); 800 801 mutex_unlock(&smu->mutex); 802 } 803 804 static int smu_sw_init(void *handle) 805 { 806 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 807 struct smu_context *smu = &adev->smu; 808 int ret; 809 810 smu->pool_size = adev->pm.smu_prv_buffer_size; 811 smu->smu_feature.feature_num = SMU_FEATURE_MAX; 812 mutex_init(&smu->smu_feature.mutex); 813 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); 814 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); 815 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); 816 817 mutex_init(&smu->sensor_lock); 818 mutex_init(&smu->metrics_lock); 819 mutex_init(&smu->message_lock); 820 821 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); 822 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); 823 atomic64_set(&smu->throttle_int_counter, 0); 824 smu->watermarks_bitmap = 0; 825 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 826 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 827 828 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); 829 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); 830 mutex_init(&smu->smu_power.power_gate.vcn_gate_lock); 831 mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock); 832 833 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 834 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; 835 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; 836 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; 837 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; 838 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; 839 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; 840 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; 841 842 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 843 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 844 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; 845 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; 846 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; 847 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; 848 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; 849 smu->display_config = &adev->pm.pm_display_cfg; 850 851 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 852 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 853 854 ret = smu_init_microcode(smu); 855 if (ret) { 856 dev_err(adev->dev, "Failed to load smu firmware!\n"); 857 return ret; 858 } 859 860 ret = smu_smc_table_sw_init(smu); 861 if (ret) { 862 dev_err(adev->dev, "Failed to sw init smc table!\n"); 863 return ret; 864 } 865 866 ret = smu_register_irq_handler(smu); 867 if (ret) { 868 dev_err(adev->dev, "Failed to register smc irq handler!\n"); 869 return ret; 870 } 871 872 return 0; 873 } 874 875 static int smu_sw_fini(void *handle) 876 { 877 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 878 struct smu_context *smu = &adev->smu; 879 int ret; 880 881 ret = smu_smc_table_sw_fini(smu); 882 if (ret) { 883 dev_err(adev->dev, "Failed to sw fini smc table!\n"); 884 return ret; 885 } 886 887 smu_fini_microcode(smu); 888 889 return 0; 890 } 891 892 static int smu_get_thermal_temperature_range(struct smu_context *smu) 893 { 894 struct amdgpu_device *adev = smu->adev; 895 struct smu_temperature_range *range = 896 &smu->thermal_range; 897 int ret = 0; 898 899 if (!smu->ppt_funcs->get_thermal_temperature_range) 900 return 0; 901 902 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); 903 if (ret) 904 return ret; 905 906 adev->pm.dpm.thermal.min_temp = range->min; 907 adev->pm.dpm.thermal.max_temp = range->max; 908 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max; 909 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min; 910 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max; 911 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max; 912 adev->pm.dpm.thermal.min_mem_temp = range->mem_min; 913 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max; 914 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max; 915 916 return ret; 917 } 918 919 static int smu_smc_hw_setup(struct smu_context *smu) 920 { 921 struct amdgpu_device *adev = smu->adev; 922 uint32_t pcie_gen = 0, pcie_width = 0; 923 int ret = 0; 924 925 if (adev->in_suspend && smu_is_dpm_running(smu)) { 926 dev_info(adev->dev, "dpm has been enabled\n"); 927 /* this is needed specifically */ 928 if ((adev->asic_type >= CHIP_SIENNA_CICHLID) && 929 (adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) 930 ret = smu_system_features_control(smu, true); 931 return ret; 932 } 933 934 ret = smu_init_display_count(smu, 0); 935 if (ret) { 936 dev_info(adev->dev, "Failed to pre-set display count as 0!\n"); 937 return ret; 938 } 939 940 ret = smu_set_driver_table_location(smu); 941 if (ret) { 942 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n"); 943 return ret; 944 } 945 946 /* 947 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 948 */ 949 ret = smu_set_tool_table_location(smu); 950 if (ret) { 951 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n"); 952 return ret; 953 } 954 955 /* 956 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify 957 * pool location. 958 */ 959 ret = smu_notify_memory_pool_location(smu); 960 if (ret) { 961 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n"); 962 return ret; 963 } 964 965 /* smu_dump_pptable(smu); */ 966 /* 967 * Copy pptable bo in the vram to smc with SMU MSGs such as 968 * SetDriverDramAddr and TransferTableDram2Smu. 969 */ 970 ret = smu_write_pptable(smu); 971 if (ret) { 972 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n"); 973 return ret; 974 } 975 976 /* issue Run*Btc msg */ 977 ret = smu_run_btc(smu); 978 if (ret) 979 return ret; 980 981 ret = smu_feature_set_allowed_mask(smu); 982 if (ret) { 983 dev_err(adev->dev, "Failed to set driver allowed features mask!\n"); 984 return ret; 985 } 986 987 ret = smu_system_features_control(smu, true); 988 if (ret) { 989 dev_err(adev->dev, "Failed to enable requested dpm features!\n"); 990 return ret; 991 } 992 993 if (!smu_is_dpm_running(smu)) 994 dev_info(adev->dev, "dpm has been disabled\n"); 995 996 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 997 pcie_gen = 3; 998 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 999 pcie_gen = 2; 1000 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 1001 pcie_gen = 1; 1002 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 1003 pcie_gen = 0; 1004 1005 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 1006 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 1007 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 1008 */ 1009 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 1010 pcie_width = 6; 1011 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 1012 pcie_width = 5; 1013 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 1014 pcie_width = 4; 1015 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 1016 pcie_width = 3; 1017 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 1018 pcie_width = 2; 1019 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 1020 pcie_width = 1; 1021 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); 1022 if (ret) { 1023 dev_err(adev->dev, "Attempt to override pcie params failed!\n"); 1024 return ret; 1025 } 1026 1027 ret = smu_get_thermal_temperature_range(smu); 1028 if (ret) { 1029 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); 1030 return ret; 1031 } 1032 1033 ret = smu_enable_thermal_alert(smu); 1034 if (ret) { 1035 dev_err(adev->dev, "Failed to enable thermal alert!\n"); 1036 return ret; 1037 } 1038 1039 /* 1040 * Set initialized values (get from vbios) to dpm tables context such as 1041 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each 1042 * type of clks. 1043 */ 1044 ret = smu_set_default_dpm_table(smu); 1045 if (ret) { 1046 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); 1047 return ret; 1048 } 1049 1050 ret = smu_notify_display_change(smu); 1051 if (ret) 1052 return ret; 1053 1054 /* 1055 * Set min deep sleep dce fclk with bootup value from vbios via 1056 * SetMinDeepSleepDcefclk MSG. 1057 */ 1058 ret = smu_set_min_dcef_deep_sleep(smu, 1059 smu->smu_table.boot_values.dcefclk / 100); 1060 if (ret) 1061 return ret; 1062 1063 return ret; 1064 } 1065 1066 static int smu_start_smc_engine(struct smu_context *smu) 1067 { 1068 struct amdgpu_device *adev = smu->adev; 1069 int ret = 0; 1070 1071 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1072 if (adev->asic_type < CHIP_NAVI10) { 1073 if (smu->ppt_funcs->load_microcode) { 1074 ret = smu->ppt_funcs->load_microcode(smu); 1075 if (ret) 1076 return ret; 1077 } 1078 } 1079 } 1080 1081 if (smu->ppt_funcs->check_fw_status) { 1082 ret = smu->ppt_funcs->check_fw_status(smu); 1083 if (ret) { 1084 dev_err(adev->dev, "SMC is not ready\n"); 1085 return ret; 1086 } 1087 } 1088 1089 /* 1090 * Send msg GetDriverIfVersion to check if the return value is equal 1091 * with DRIVER_IF_VERSION of smc header. 1092 */ 1093 ret = smu_check_fw_version(smu); 1094 if (ret) 1095 return ret; 1096 1097 return ret; 1098 } 1099 1100 static int smu_hw_init(void *handle) 1101 { 1102 int ret; 1103 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1104 struct smu_context *smu = &adev->smu; 1105 1106 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { 1107 smu->pm_enabled = false; 1108 return 0; 1109 } 1110 1111 ret = smu_start_smc_engine(smu); 1112 if (ret) { 1113 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1114 return ret; 1115 } 1116 1117 if (smu->is_apu) { 1118 smu_powergate_sdma(&adev->smu, false); 1119 smu_dpm_set_vcn_enable(smu, true); 1120 smu_dpm_set_jpeg_enable(smu, true); 1121 smu_set_gfx_cgpg(&adev->smu, true); 1122 } 1123 1124 if (!smu->pm_enabled) 1125 return 0; 1126 1127 /* get boot_values from vbios to set revision, gfxclk, and etc. */ 1128 ret = smu_get_vbios_bootup_values(smu); 1129 if (ret) { 1130 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n"); 1131 return ret; 1132 } 1133 1134 ret = smu_setup_pptable(smu); 1135 if (ret) { 1136 dev_err(adev->dev, "Failed to setup pptable!\n"); 1137 return ret; 1138 } 1139 1140 ret = smu_get_driver_allowed_feature_mask(smu); 1141 if (ret) 1142 return ret; 1143 1144 ret = smu_smc_hw_setup(smu); 1145 if (ret) { 1146 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1147 return ret; 1148 } 1149 1150 /* 1151 * Move maximum sustainable clock retrieving here considering 1152 * 1. It is not needed on resume(from S3). 1153 * 2. DAL settings come between .hw_init and .late_init of SMU. 1154 * And DAL needs to know the maximum sustainable clocks. Thus 1155 * it cannot be put in .late_init(). 1156 */ 1157 ret = smu_init_max_sustainable_clocks(smu); 1158 if (ret) { 1159 dev_err(adev->dev, "Failed to init max sustainable clocks!\n"); 1160 return ret; 1161 } 1162 1163 adev->pm.dpm_enabled = true; 1164 1165 dev_info(adev->dev, "SMU is initialized successfully!\n"); 1166 1167 return 0; 1168 } 1169 1170 static int smu_disable_dpms(struct smu_context *smu) 1171 { 1172 struct amdgpu_device *adev = smu->adev; 1173 int ret = 0; 1174 bool use_baco = !smu->is_apu && 1175 ((amdgpu_in_reset(adev) && 1176 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 1177 ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev))); 1178 1179 /* 1180 * For custom pptable uploading, skip the DPM features 1181 * disable process on Navi1x ASICs. 1182 * - As the gfx related features are under control of 1183 * RLC on those ASICs. RLC reinitialization will be 1184 * needed to reenable them. That will cost much more 1185 * efforts. 1186 * 1187 * - SMU firmware can handle the DPM reenablement 1188 * properly. 1189 */ 1190 if (smu->uploading_custom_pp_table && 1191 (adev->asic_type >= CHIP_NAVI10) && 1192 (adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) 1193 return 0; 1194 1195 /* 1196 * For Sienna_Cichlid, PMFW will handle the features disablement properly 1197 * on BACO in. Driver involvement is unnecessary. 1198 */ 1199 if ((adev->asic_type == CHIP_SIENNA_CICHLID) && 1200 use_baco) 1201 return 0; 1202 1203 /* 1204 * For gpu reset, runpm and hibernation through BACO, 1205 * BACO feature has to be kept enabled. 1206 */ 1207 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { 1208 ret = smu_disable_all_features_with_exception(smu, 1209 SMU_FEATURE_BACO_BIT); 1210 if (ret) 1211 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); 1212 } else { 1213 ret = smu_system_features_control(smu, false); 1214 if (ret) 1215 dev_err(adev->dev, "Failed to disable smu features.\n"); 1216 } 1217 1218 if (adev->asic_type >= CHIP_NAVI10 && 1219 adev->gfx.rlc.funcs->stop) 1220 adev->gfx.rlc.funcs->stop(adev); 1221 1222 return ret; 1223 } 1224 1225 static int smu_smc_hw_cleanup(struct smu_context *smu) 1226 { 1227 struct amdgpu_device *adev = smu->adev; 1228 int ret = 0; 1229 1230 cancel_work_sync(&smu->throttling_logging_work); 1231 cancel_work_sync(&smu->interrupt_work); 1232 1233 ret = smu_disable_thermal_alert(smu); 1234 if (ret) { 1235 dev_err(adev->dev, "Fail to disable thermal alert!\n"); 1236 return ret; 1237 } 1238 1239 ret = smu_disable_dpms(smu); 1240 if (ret) { 1241 dev_err(adev->dev, "Fail to disable dpm features!\n"); 1242 return ret; 1243 } 1244 1245 return 0; 1246 } 1247 1248 static int smu_hw_fini(void *handle) 1249 { 1250 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1251 struct smu_context *smu = &adev->smu; 1252 1253 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1254 return 0; 1255 1256 if (smu->is_apu) { 1257 smu_powergate_sdma(&adev->smu, true); 1258 smu_dpm_set_vcn_enable(smu, false); 1259 smu_dpm_set_jpeg_enable(smu, false); 1260 } 1261 1262 if (!smu->pm_enabled) 1263 return 0; 1264 1265 adev->pm.dpm_enabled = false; 1266 1267 return smu_smc_hw_cleanup(smu); 1268 } 1269 1270 int smu_reset(struct smu_context *smu) 1271 { 1272 struct amdgpu_device *adev = smu->adev; 1273 int ret; 1274 1275 amdgpu_gfx_off_ctrl(smu->adev, false); 1276 1277 ret = smu_hw_fini(adev); 1278 if (ret) 1279 return ret; 1280 1281 ret = smu_hw_init(adev); 1282 if (ret) 1283 return ret; 1284 1285 ret = smu_late_init(adev); 1286 if (ret) 1287 return ret; 1288 1289 amdgpu_gfx_off_ctrl(smu->adev, true); 1290 1291 return 0; 1292 } 1293 1294 static int smu_suspend(void *handle) 1295 { 1296 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1297 struct smu_context *smu = &adev->smu; 1298 int ret; 1299 1300 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1301 return 0; 1302 1303 if (!smu->pm_enabled) 1304 return 0; 1305 1306 adev->pm.dpm_enabled = false; 1307 1308 ret = smu_smc_hw_cleanup(smu); 1309 if (ret) 1310 return ret; 1311 1312 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 1313 1314 if (smu->is_apu) 1315 smu_set_gfx_cgpg(&adev->smu, false); 1316 1317 return 0; 1318 } 1319 1320 static int smu_resume(void *handle) 1321 { 1322 int ret; 1323 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1324 struct smu_context *smu = &adev->smu; 1325 1326 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1327 return 0; 1328 1329 if (!smu->pm_enabled) 1330 return 0; 1331 1332 dev_info(adev->dev, "SMU is resuming...\n"); 1333 1334 ret = smu_start_smc_engine(smu); 1335 if (ret) { 1336 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1337 return ret; 1338 } 1339 1340 ret = smu_smc_hw_setup(smu); 1341 if (ret) { 1342 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1343 return ret; 1344 } 1345 1346 if (smu->is_apu) 1347 smu_set_gfx_cgpg(&adev->smu, true); 1348 1349 smu->disable_uclk_switch = 0; 1350 1351 adev->pm.dpm_enabled = true; 1352 1353 dev_info(adev->dev, "SMU is resumed successfully!\n"); 1354 1355 return 0; 1356 } 1357 1358 int smu_display_configuration_change(struct smu_context *smu, 1359 const struct amd_pp_display_configuration *display_config) 1360 { 1361 int index = 0; 1362 int num_of_active_display = 0; 1363 1364 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1365 return -EOPNOTSUPP; 1366 1367 if (!display_config) 1368 return -EINVAL; 1369 1370 mutex_lock(&smu->mutex); 1371 1372 smu_set_min_dcef_deep_sleep(smu, 1373 display_config->min_dcef_deep_sleep_set_clk / 100); 1374 1375 for (index = 0; index < display_config->num_path_including_non_display; index++) { 1376 if (display_config->displays[index].controller_id != 0) 1377 num_of_active_display++; 1378 } 1379 1380 mutex_unlock(&smu->mutex); 1381 1382 return 0; 1383 } 1384 1385 static int smu_set_clockgating_state(void *handle, 1386 enum amd_clockgating_state state) 1387 { 1388 return 0; 1389 } 1390 1391 static int smu_set_powergating_state(void *handle, 1392 enum amd_powergating_state state) 1393 { 1394 return 0; 1395 } 1396 1397 static int smu_enable_umd_pstate(void *handle, 1398 enum amd_dpm_forced_level *level) 1399 { 1400 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1401 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1402 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1403 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1404 1405 struct smu_context *smu = (struct smu_context*)(handle); 1406 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1407 1408 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1409 return -EINVAL; 1410 1411 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { 1412 /* enter umd pstate, save current level, disable gfx cg*/ 1413 if (*level & profile_mode_mask) { 1414 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; 1415 smu_dpm_ctx->enable_umd_pstate = true; 1416 smu_gpo_control(smu, false); 1417 amdgpu_device_ip_set_powergating_state(smu->adev, 1418 AMD_IP_BLOCK_TYPE_GFX, 1419 AMD_PG_STATE_UNGATE); 1420 amdgpu_device_ip_set_clockgating_state(smu->adev, 1421 AMD_IP_BLOCK_TYPE_GFX, 1422 AMD_CG_STATE_UNGATE); 1423 smu_gfx_ulv_control(smu, false); 1424 smu_deep_sleep_control(smu, false); 1425 amdgpu_asic_update_umd_stable_pstate(smu->adev, true); 1426 } 1427 } else { 1428 /* exit umd pstate, restore level, enable gfx cg*/ 1429 if (!(*level & profile_mode_mask)) { 1430 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 1431 *level = smu_dpm_ctx->saved_dpm_level; 1432 smu_dpm_ctx->enable_umd_pstate = false; 1433 amdgpu_asic_update_umd_stable_pstate(smu->adev, false); 1434 smu_deep_sleep_control(smu, true); 1435 smu_gfx_ulv_control(smu, true); 1436 amdgpu_device_ip_set_clockgating_state(smu->adev, 1437 AMD_IP_BLOCK_TYPE_GFX, 1438 AMD_CG_STATE_GATE); 1439 amdgpu_device_ip_set_powergating_state(smu->adev, 1440 AMD_IP_BLOCK_TYPE_GFX, 1441 AMD_PG_STATE_GATE); 1442 smu_gpo_control(smu, true); 1443 } 1444 } 1445 1446 return 0; 1447 } 1448 1449 static int smu_adjust_power_state_dynamic(struct smu_context *smu, 1450 enum amd_dpm_forced_level level, 1451 bool skip_display_settings) 1452 { 1453 int ret = 0; 1454 int index = 0; 1455 long workload; 1456 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1457 1458 if (!skip_display_settings) { 1459 ret = smu_display_config_changed(smu); 1460 if (ret) { 1461 dev_err(smu->adev->dev, "Failed to change display config!"); 1462 return ret; 1463 } 1464 } 1465 1466 ret = smu_apply_clocks_adjust_rules(smu); 1467 if (ret) { 1468 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); 1469 return ret; 1470 } 1471 1472 if (!skip_display_settings) { 1473 ret = smu_notify_smc_display_config(smu); 1474 if (ret) { 1475 dev_err(smu->adev->dev, "Failed to notify smc display config!"); 1476 return ret; 1477 } 1478 } 1479 1480 if (smu_dpm_ctx->dpm_level != level) { 1481 ret = smu_asic_set_performance_level(smu, level); 1482 if (ret) { 1483 dev_err(smu->adev->dev, "Failed to set performance level!"); 1484 return ret; 1485 } 1486 1487 /* update the saved copy */ 1488 smu_dpm_ctx->dpm_level = level; 1489 } 1490 1491 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1492 index = fls(smu->workload_mask); 1493 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1494 workload = smu->workload_setting[index]; 1495 1496 if (smu->power_profile_mode != workload) 1497 smu_set_power_profile_mode(smu, &workload, 0, false); 1498 } 1499 1500 return ret; 1501 } 1502 1503 int smu_handle_task(struct smu_context *smu, 1504 enum amd_dpm_forced_level level, 1505 enum amd_pp_task task_id, 1506 bool lock_needed) 1507 { 1508 int ret = 0; 1509 1510 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1511 return -EOPNOTSUPP; 1512 1513 if (lock_needed) 1514 mutex_lock(&smu->mutex); 1515 1516 switch (task_id) { 1517 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 1518 ret = smu_pre_display_config_changed(smu); 1519 if (ret) 1520 goto out; 1521 ret = smu_adjust_power_state_dynamic(smu, level, false); 1522 break; 1523 case AMD_PP_TASK_COMPLETE_INIT: 1524 case AMD_PP_TASK_READJUST_POWER_STATE: 1525 ret = smu_adjust_power_state_dynamic(smu, level, true); 1526 break; 1527 default: 1528 break; 1529 } 1530 1531 out: 1532 if (lock_needed) 1533 mutex_unlock(&smu->mutex); 1534 1535 return ret; 1536 } 1537 1538 int smu_switch_power_profile(struct smu_context *smu, 1539 enum PP_SMC_POWER_PROFILE type, 1540 bool en) 1541 { 1542 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1543 long workload; 1544 uint32_t index; 1545 1546 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1547 return -EOPNOTSUPP; 1548 1549 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1550 return -EINVAL; 1551 1552 mutex_lock(&smu->mutex); 1553 1554 if (!en) { 1555 smu->workload_mask &= ~(1 << smu->workload_prority[type]); 1556 index = fls(smu->workload_mask); 1557 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1558 workload = smu->workload_setting[index]; 1559 } else { 1560 smu->workload_mask |= (1 << smu->workload_prority[type]); 1561 index = fls(smu->workload_mask); 1562 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1563 workload = smu->workload_setting[index]; 1564 } 1565 1566 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 1567 smu_set_power_profile_mode(smu, &workload, 0, false); 1568 1569 mutex_unlock(&smu->mutex); 1570 1571 return 0; 1572 } 1573 1574 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) 1575 { 1576 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1577 enum amd_dpm_forced_level level; 1578 1579 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1580 return -EOPNOTSUPP; 1581 1582 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1583 return -EINVAL; 1584 1585 mutex_lock(&(smu->mutex)); 1586 level = smu_dpm_ctx->dpm_level; 1587 mutex_unlock(&(smu->mutex)); 1588 1589 return level; 1590 } 1591 1592 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) 1593 { 1594 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1595 int ret = 0; 1596 1597 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1598 return -EOPNOTSUPP; 1599 1600 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1601 return -EINVAL; 1602 1603 mutex_lock(&smu->mutex); 1604 1605 ret = smu_enable_umd_pstate(smu, &level); 1606 if (ret) { 1607 mutex_unlock(&smu->mutex); 1608 return ret; 1609 } 1610 1611 ret = smu_handle_task(smu, level, 1612 AMD_PP_TASK_READJUST_POWER_STATE, 1613 false); 1614 1615 mutex_unlock(&smu->mutex); 1616 1617 return ret; 1618 } 1619 1620 int smu_set_display_count(struct smu_context *smu, uint32_t count) 1621 { 1622 int ret = 0; 1623 1624 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1625 return -EOPNOTSUPP; 1626 1627 mutex_lock(&smu->mutex); 1628 ret = smu_init_display_count(smu, count); 1629 mutex_unlock(&smu->mutex); 1630 1631 return ret; 1632 } 1633 1634 int smu_force_clk_levels(struct smu_context *smu, 1635 enum smu_clk_type clk_type, 1636 uint32_t mask) 1637 { 1638 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1639 int ret = 0; 1640 1641 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1642 return -EOPNOTSUPP; 1643 1644 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1645 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); 1646 return -EINVAL; 1647 } 1648 1649 mutex_lock(&smu->mutex); 1650 1651 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) 1652 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); 1653 1654 mutex_unlock(&smu->mutex); 1655 1656 return ret; 1657 } 1658 1659 /* 1660 * On system suspending or resetting, the dpm_enabled 1661 * flag will be cleared. So that those SMU services which 1662 * are not supported will be gated. 1663 * However, the mp1 state setting should still be granted 1664 * even if the dpm_enabled cleared. 1665 */ 1666 int smu_set_mp1_state(struct smu_context *smu, 1667 enum pp_mp1_state mp1_state) 1668 { 1669 uint16_t msg; 1670 int ret; 1671 1672 if (!smu->pm_enabled) 1673 return -EOPNOTSUPP; 1674 1675 mutex_lock(&smu->mutex); 1676 1677 switch (mp1_state) { 1678 case PP_MP1_STATE_SHUTDOWN: 1679 msg = SMU_MSG_PrepareMp1ForShutdown; 1680 break; 1681 case PP_MP1_STATE_UNLOAD: 1682 msg = SMU_MSG_PrepareMp1ForUnload; 1683 break; 1684 case PP_MP1_STATE_RESET: 1685 msg = SMU_MSG_PrepareMp1ForReset; 1686 break; 1687 case PP_MP1_STATE_NONE: 1688 default: 1689 mutex_unlock(&smu->mutex); 1690 return 0; 1691 } 1692 1693 ret = smu_send_smc_msg(smu, msg, NULL); 1694 /* some asics may not support those messages */ 1695 if (ret == -EINVAL) 1696 ret = 0; 1697 if (ret) 1698 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); 1699 1700 mutex_unlock(&smu->mutex); 1701 1702 return ret; 1703 } 1704 1705 int smu_set_df_cstate(struct smu_context *smu, 1706 enum pp_df_cstate state) 1707 { 1708 int ret = 0; 1709 1710 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1711 return -EOPNOTSUPP; 1712 1713 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) 1714 return 0; 1715 1716 mutex_lock(&smu->mutex); 1717 1718 ret = smu->ppt_funcs->set_df_cstate(smu, state); 1719 if (ret) 1720 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); 1721 1722 mutex_unlock(&smu->mutex); 1723 1724 return ret; 1725 } 1726 1727 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en) 1728 { 1729 int ret = 0; 1730 1731 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1732 return -EOPNOTSUPP; 1733 1734 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down) 1735 return 0; 1736 1737 mutex_lock(&smu->mutex); 1738 1739 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en); 1740 if (ret) 1741 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n"); 1742 1743 mutex_unlock(&smu->mutex); 1744 1745 return ret; 1746 } 1747 1748 int smu_write_watermarks_table(struct smu_context *smu) 1749 { 1750 int ret = 0; 1751 1752 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1753 return -EOPNOTSUPP; 1754 1755 mutex_lock(&smu->mutex); 1756 1757 ret = smu_set_watermarks_table(smu, NULL); 1758 1759 mutex_unlock(&smu->mutex); 1760 1761 return ret; 1762 } 1763 1764 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, 1765 struct pp_smu_wm_range_sets *clock_ranges) 1766 { 1767 int ret = 0; 1768 1769 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1770 return -EOPNOTSUPP; 1771 1772 if (smu->disable_watermark) 1773 return 0; 1774 1775 mutex_lock(&smu->mutex); 1776 1777 ret = smu_set_watermarks_table(smu, clock_ranges); 1778 1779 mutex_unlock(&smu->mutex); 1780 1781 return ret; 1782 } 1783 1784 int smu_set_ac_dc(struct smu_context *smu) 1785 { 1786 int ret = 0; 1787 1788 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1789 return -EOPNOTSUPP; 1790 1791 /* controlled by firmware */ 1792 if (smu->dc_controlled_by_gpio) 1793 return 0; 1794 1795 mutex_lock(&smu->mutex); 1796 ret = smu_set_power_source(smu, 1797 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : 1798 SMU_POWER_SOURCE_DC); 1799 if (ret) 1800 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", 1801 smu->adev->pm.ac_power ? "AC" : "DC"); 1802 mutex_unlock(&smu->mutex); 1803 1804 return ret; 1805 } 1806 1807 const struct amd_ip_funcs smu_ip_funcs = { 1808 .name = "smu", 1809 .early_init = smu_early_init, 1810 .late_init = smu_late_init, 1811 .sw_init = smu_sw_init, 1812 .sw_fini = smu_sw_fini, 1813 .hw_init = smu_hw_init, 1814 .hw_fini = smu_hw_fini, 1815 .suspend = smu_suspend, 1816 .resume = smu_resume, 1817 .is_idle = NULL, 1818 .check_soft_reset = NULL, 1819 .wait_for_idle = NULL, 1820 .soft_reset = NULL, 1821 .set_clockgating_state = smu_set_clockgating_state, 1822 .set_powergating_state = smu_set_powergating_state, 1823 .enable_umd_pstate = smu_enable_umd_pstate, 1824 }; 1825 1826 const struct amdgpu_ip_block_version smu_v11_0_ip_block = 1827 { 1828 .type = AMD_IP_BLOCK_TYPE_SMC, 1829 .major = 11, 1830 .minor = 0, 1831 .rev = 0, 1832 .funcs = &smu_ip_funcs, 1833 }; 1834 1835 const struct amdgpu_ip_block_version smu_v12_0_ip_block = 1836 { 1837 .type = AMD_IP_BLOCK_TYPE_SMC, 1838 .major = 12, 1839 .minor = 0, 1840 .rev = 0, 1841 .funcs = &smu_ip_funcs, 1842 }; 1843 1844 int smu_load_microcode(struct smu_context *smu) 1845 { 1846 int ret = 0; 1847 1848 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1849 return -EOPNOTSUPP; 1850 1851 mutex_lock(&smu->mutex); 1852 1853 if (smu->ppt_funcs->load_microcode) 1854 ret = smu->ppt_funcs->load_microcode(smu); 1855 1856 mutex_unlock(&smu->mutex); 1857 1858 return ret; 1859 } 1860 1861 int smu_check_fw_status(struct smu_context *smu) 1862 { 1863 int ret = 0; 1864 1865 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1866 return -EOPNOTSUPP; 1867 1868 mutex_lock(&smu->mutex); 1869 1870 if (smu->ppt_funcs->check_fw_status) 1871 ret = smu->ppt_funcs->check_fw_status(smu); 1872 1873 mutex_unlock(&smu->mutex); 1874 1875 return ret; 1876 } 1877 1878 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) 1879 { 1880 int ret = 0; 1881 1882 mutex_lock(&smu->mutex); 1883 1884 if (smu->ppt_funcs->set_gfx_cgpg) 1885 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); 1886 1887 mutex_unlock(&smu->mutex); 1888 1889 return ret; 1890 } 1891 1892 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) 1893 { 1894 int ret = 0; 1895 1896 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1897 return -EOPNOTSUPP; 1898 1899 mutex_lock(&smu->mutex); 1900 1901 if (smu->ppt_funcs->set_fan_speed_rpm) 1902 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); 1903 1904 mutex_unlock(&smu->mutex); 1905 1906 return ret; 1907 } 1908 1909 int smu_get_power_limit(struct smu_context *smu, 1910 uint32_t *limit, 1911 bool max_setting) 1912 { 1913 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1914 return -EOPNOTSUPP; 1915 1916 mutex_lock(&smu->mutex); 1917 1918 *limit = (max_setting ? smu->max_power_limit : smu->current_power_limit); 1919 1920 mutex_unlock(&smu->mutex); 1921 1922 return 0; 1923 } 1924 1925 int smu_set_power_limit(struct smu_context *smu, uint32_t limit) 1926 { 1927 int ret = 0; 1928 1929 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1930 return -EOPNOTSUPP; 1931 1932 mutex_lock(&smu->mutex); 1933 1934 if (limit > smu->max_power_limit) { 1935 dev_err(smu->adev->dev, 1936 "New power limit (%d) is over the max allowed %d\n", 1937 limit, smu->max_power_limit); 1938 goto out; 1939 } 1940 1941 if (!limit) 1942 limit = smu->current_power_limit; 1943 1944 if (smu->ppt_funcs->set_power_limit) 1945 ret = smu->ppt_funcs->set_power_limit(smu, limit); 1946 1947 out: 1948 mutex_unlock(&smu->mutex); 1949 1950 return ret; 1951 } 1952 1953 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) 1954 { 1955 int ret = 0; 1956 1957 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1958 return -EOPNOTSUPP; 1959 1960 mutex_lock(&smu->mutex); 1961 1962 if (smu->ppt_funcs->print_clk_levels) 1963 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); 1964 1965 mutex_unlock(&smu->mutex); 1966 1967 return ret; 1968 } 1969 1970 int smu_od_edit_dpm_table(struct smu_context *smu, 1971 enum PP_OD_DPM_TABLE_COMMAND type, 1972 long *input, uint32_t size) 1973 { 1974 int ret = 0; 1975 1976 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1977 return -EOPNOTSUPP; 1978 1979 mutex_lock(&smu->mutex); 1980 1981 if (smu->ppt_funcs->od_edit_dpm_table) { 1982 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); 1983 if (!ret && (type == PP_OD_COMMIT_DPM_TABLE)) 1984 ret = smu_handle_task(smu, 1985 smu->smu_dpm.dpm_level, 1986 AMD_PP_TASK_READJUST_POWER_STATE, 1987 false); 1988 } 1989 1990 mutex_unlock(&smu->mutex); 1991 1992 return ret; 1993 } 1994 1995 int smu_read_sensor(struct smu_context *smu, 1996 enum amd_pp_sensors sensor, 1997 void *data, uint32_t *size) 1998 { 1999 struct smu_umd_pstate_table *pstate_table = 2000 &smu->pstate_table; 2001 int ret = 0; 2002 2003 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2004 return -EOPNOTSUPP; 2005 2006 if (!data || !size) 2007 return -EINVAL; 2008 2009 mutex_lock(&smu->mutex); 2010 2011 if (smu->ppt_funcs->read_sensor) 2012 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) 2013 goto unlock; 2014 2015 switch (sensor) { 2016 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 2017 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; 2018 *size = 4; 2019 break; 2020 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 2021 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; 2022 *size = 4; 2023 break; 2024 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 2025 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2); 2026 *size = 8; 2027 break; 2028 case AMDGPU_PP_SENSOR_UVD_POWER: 2029 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 2030 *size = 4; 2031 break; 2032 case AMDGPU_PP_SENSOR_VCE_POWER: 2033 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 2034 *size = 4; 2035 break; 2036 case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 2037 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1; 2038 *size = 4; 2039 break; 2040 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 2041 *(uint32_t *)data = 0; 2042 *size = 4; 2043 break; 2044 default: 2045 *size = 0; 2046 ret = -EOPNOTSUPP; 2047 break; 2048 } 2049 2050 unlock: 2051 mutex_unlock(&smu->mutex); 2052 2053 return ret; 2054 } 2055 2056 int smu_get_power_profile_mode(struct smu_context *smu, char *buf) 2057 { 2058 int ret = 0; 2059 2060 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2061 return -EOPNOTSUPP; 2062 2063 mutex_lock(&smu->mutex); 2064 2065 if (smu->ppt_funcs->get_power_profile_mode) 2066 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); 2067 2068 mutex_unlock(&smu->mutex); 2069 2070 return ret; 2071 } 2072 2073 int smu_set_power_profile_mode(struct smu_context *smu, 2074 long *param, 2075 uint32_t param_size, 2076 bool lock_needed) 2077 { 2078 int ret = 0; 2079 2080 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2081 return -EOPNOTSUPP; 2082 2083 if (lock_needed) 2084 mutex_lock(&smu->mutex); 2085 2086 if (smu->ppt_funcs->set_power_profile_mode) 2087 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); 2088 2089 if (lock_needed) 2090 mutex_unlock(&smu->mutex); 2091 2092 return ret; 2093 } 2094 2095 2096 int smu_get_fan_control_mode(struct smu_context *smu) 2097 { 2098 int ret = 0; 2099 2100 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2101 return -EOPNOTSUPP; 2102 2103 mutex_lock(&smu->mutex); 2104 2105 if (smu->ppt_funcs->get_fan_control_mode) 2106 ret = smu->ppt_funcs->get_fan_control_mode(smu); 2107 2108 mutex_unlock(&smu->mutex); 2109 2110 return ret; 2111 } 2112 2113 int smu_set_fan_control_mode(struct smu_context *smu, int value) 2114 { 2115 int ret = 0; 2116 2117 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2118 return -EOPNOTSUPP; 2119 2120 mutex_lock(&smu->mutex); 2121 2122 if (smu->ppt_funcs->set_fan_control_mode) 2123 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); 2124 2125 mutex_unlock(&smu->mutex); 2126 2127 return ret; 2128 } 2129 2130 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) 2131 { 2132 int ret = 0; 2133 uint32_t percent; 2134 uint32_t current_rpm; 2135 2136 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2137 return -EOPNOTSUPP; 2138 2139 mutex_lock(&smu->mutex); 2140 2141 if (smu->ppt_funcs->get_fan_speed_rpm) { 2142 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, ¤t_rpm); 2143 if (!ret) { 2144 percent = current_rpm * 100 / smu->fan_max_rpm; 2145 *speed = percent > 100 ? 100 : percent; 2146 } 2147 } 2148 2149 mutex_unlock(&smu->mutex); 2150 2151 2152 return ret; 2153 } 2154 2155 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) 2156 { 2157 int ret = 0; 2158 uint32_t rpm; 2159 2160 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2161 return -EOPNOTSUPP; 2162 2163 mutex_lock(&smu->mutex); 2164 2165 if (smu->ppt_funcs->set_fan_speed_rpm) { 2166 if (speed > 100) 2167 speed = 100; 2168 rpm = speed * smu->fan_max_rpm / 100; 2169 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm); 2170 } 2171 2172 mutex_unlock(&smu->mutex); 2173 2174 return ret; 2175 } 2176 2177 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) 2178 { 2179 int ret = 0; 2180 2181 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2182 return -EOPNOTSUPP; 2183 2184 mutex_lock(&smu->mutex); 2185 2186 if (smu->ppt_funcs->get_fan_speed_rpm) 2187 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); 2188 2189 mutex_unlock(&smu->mutex); 2190 2191 return ret; 2192 } 2193 2194 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) 2195 { 2196 int ret = 0; 2197 2198 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2199 return -EOPNOTSUPP; 2200 2201 mutex_lock(&smu->mutex); 2202 2203 ret = smu_set_min_dcef_deep_sleep(smu, clk); 2204 2205 mutex_unlock(&smu->mutex); 2206 2207 return ret; 2208 } 2209 2210 int smu_get_clock_by_type_with_latency(struct smu_context *smu, 2211 enum smu_clk_type clk_type, 2212 struct pp_clock_levels_with_latency *clocks) 2213 { 2214 int ret = 0; 2215 2216 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2217 return -EOPNOTSUPP; 2218 2219 mutex_lock(&smu->mutex); 2220 2221 if (smu->ppt_funcs->get_clock_by_type_with_latency) 2222 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); 2223 2224 mutex_unlock(&smu->mutex); 2225 2226 return ret; 2227 } 2228 2229 int smu_display_clock_voltage_request(struct smu_context *smu, 2230 struct pp_display_clock_request *clock_req) 2231 { 2232 int ret = 0; 2233 2234 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2235 return -EOPNOTSUPP; 2236 2237 mutex_lock(&smu->mutex); 2238 2239 if (smu->ppt_funcs->display_clock_voltage_request) 2240 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); 2241 2242 mutex_unlock(&smu->mutex); 2243 2244 return ret; 2245 } 2246 2247 2248 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) 2249 { 2250 int ret = -EINVAL; 2251 2252 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2253 return -EOPNOTSUPP; 2254 2255 mutex_lock(&smu->mutex); 2256 2257 if (smu->ppt_funcs->display_disable_memory_clock_switch) 2258 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); 2259 2260 mutex_unlock(&smu->mutex); 2261 2262 return ret; 2263 } 2264 2265 int smu_set_xgmi_pstate(struct smu_context *smu, 2266 uint32_t pstate) 2267 { 2268 int ret = 0; 2269 2270 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2271 return -EOPNOTSUPP; 2272 2273 mutex_lock(&smu->mutex); 2274 2275 if (smu->ppt_funcs->set_xgmi_pstate) 2276 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); 2277 2278 mutex_unlock(&smu->mutex); 2279 2280 if(ret) 2281 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); 2282 2283 return ret; 2284 } 2285 2286 int smu_set_azalia_d3_pme(struct smu_context *smu) 2287 { 2288 int ret = 0; 2289 2290 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2291 return -EOPNOTSUPP; 2292 2293 mutex_lock(&smu->mutex); 2294 2295 if (smu->ppt_funcs->set_azalia_d3_pme) 2296 ret = smu->ppt_funcs->set_azalia_d3_pme(smu); 2297 2298 mutex_unlock(&smu->mutex); 2299 2300 return ret; 2301 } 2302 2303 /* 2304 * On system suspending or resetting, the dpm_enabled 2305 * flag will be cleared. So that those SMU services which 2306 * are not supported will be gated. 2307 * 2308 * However, the baco/mode1 reset should still be granted 2309 * as they are still supported and necessary. 2310 */ 2311 bool smu_baco_is_support(struct smu_context *smu) 2312 { 2313 bool ret = false; 2314 2315 if (!smu->pm_enabled) 2316 return false; 2317 2318 mutex_lock(&smu->mutex); 2319 2320 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) 2321 ret = smu->ppt_funcs->baco_is_support(smu); 2322 2323 mutex_unlock(&smu->mutex); 2324 2325 return ret; 2326 } 2327 2328 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) 2329 { 2330 if (smu->ppt_funcs->baco_get_state) 2331 return -EINVAL; 2332 2333 mutex_lock(&smu->mutex); 2334 *state = smu->ppt_funcs->baco_get_state(smu); 2335 mutex_unlock(&smu->mutex); 2336 2337 return 0; 2338 } 2339 2340 int smu_baco_enter(struct smu_context *smu) 2341 { 2342 int ret = 0; 2343 2344 if (!smu->pm_enabled) 2345 return -EOPNOTSUPP; 2346 2347 mutex_lock(&smu->mutex); 2348 2349 if (smu->ppt_funcs->baco_enter) 2350 ret = smu->ppt_funcs->baco_enter(smu); 2351 2352 mutex_unlock(&smu->mutex); 2353 2354 if (ret) 2355 dev_err(smu->adev->dev, "Failed to enter BACO state!\n"); 2356 2357 return ret; 2358 } 2359 2360 int smu_baco_exit(struct smu_context *smu) 2361 { 2362 int ret = 0; 2363 2364 if (!smu->pm_enabled) 2365 return -EOPNOTSUPP; 2366 2367 mutex_lock(&smu->mutex); 2368 2369 if (smu->ppt_funcs->baco_exit) 2370 ret = smu->ppt_funcs->baco_exit(smu); 2371 2372 mutex_unlock(&smu->mutex); 2373 2374 if (ret) 2375 dev_err(smu->adev->dev, "Failed to exit BACO state!\n"); 2376 2377 return ret; 2378 } 2379 2380 bool smu_mode1_reset_is_support(struct smu_context *smu) 2381 { 2382 bool ret = false; 2383 2384 if (!smu->pm_enabled) 2385 return false; 2386 2387 mutex_lock(&smu->mutex); 2388 2389 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) 2390 ret = smu->ppt_funcs->mode1_reset_is_support(smu); 2391 2392 mutex_unlock(&smu->mutex); 2393 2394 return ret; 2395 } 2396 2397 int smu_mode1_reset(struct smu_context *smu) 2398 { 2399 int ret = 0; 2400 2401 if (!smu->pm_enabled) 2402 return -EOPNOTSUPP; 2403 2404 mutex_lock(&smu->mutex); 2405 2406 if (smu->ppt_funcs->mode1_reset) 2407 ret = smu->ppt_funcs->mode1_reset(smu); 2408 2409 mutex_unlock(&smu->mutex); 2410 2411 return ret; 2412 } 2413 2414 int smu_mode2_reset(struct smu_context *smu) 2415 { 2416 int ret = 0; 2417 2418 if (!smu->pm_enabled) 2419 return -EOPNOTSUPP; 2420 2421 mutex_lock(&smu->mutex); 2422 2423 if (smu->ppt_funcs->mode2_reset) 2424 ret = smu->ppt_funcs->mode2_reset(smu); 2425 2426 mutex_unlock(&smu->mutex); 2427 2428 if (ret) 2429 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); 2430 2431 return ret; 2432 } 2433 2434 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, 2435 struct pp_smu_nv_clock_table *max_clocks) 2436 { 2437 int ret = 0; 2438 2439 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2440 return -EOPNOTSUPP; 2441 2442 mutex_lock(&smu->mutex); 2443 2444 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) 2445 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); 2446 2447 mutex_unlock(&smu->mutex); 2448 2449 return ret; 2450 } 2451 2452 int smu_get_uclk_dpm_states(struct smu_context *smu, 2453 unsigned int *clock_values_in_khz, 2454 unsigned int *num_states) 2455 { 2456 int ret = 0; 2457 2458 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2459 return -EOPNOTSUPP; 2460 2461 mutex_lock(&smu->mutex); 2462 2463 if (smu->ppt_funcs->get_uclk_dpm_states) 2464 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); 2465 2466 mutex_unlock(&smu->mutex); 2467 2468 return ret; 2469 } 2470 2471 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) 2472 { 2473 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; 2474 2475 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2476 return -EOPNOTSUPP; 2477 2478 mutex_lock(&smu->mutex); 2479 2480 if (smu->ppt_funcs->get_current_power_state) 2481 pm_state = smu->ppt_funcs->get_current_power_state(smu); 2482 2483 mutex_unlock(&smu->mutex); 2484 2485 return pm_state; 2486 } 2487 2488 int smu_get_dpm_clock_table(struct smu_context *smu, 2489 struct dpm_clocks *clock_table) 2490 { 2491 int ret = 0; 2492 2493 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2494 return -EOPNOTSUPP; 2495 2496 mutex_lock(&smu->mutex); 2497 2498 if (smu->ppt_funcs->get_dpm_clock_table) 2499 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); 2500 2501 mutex_unlock(&smu->mutex); 2502 2503 return ret; 2504 } 2505 2506 ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu, 2507 void **table) 2508 { 2509 ssize_t size; 2510 2511 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2512 return -EOPNOTSUPP; 2513 2514 if (!smu->ppt_funcs->get_gpu_metrics) 2515 return -EOPNOTSUPP; 2516 2517 mutex_lock(&smu->mutex); 2518 2519 size = smu->ppt_funcs->get_gpu_metrics(smu, table); 2520 2521 mutex_unlock(&smu->mutex); 2522 2523 return size; 2524 } 2525 2526 int smu_enable_mgpu_fan_boost(struct smu_context *smu) 2527 { 2528 int ret = 0; 2529 2530 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2531 return -EOPNOTSUPP; 2532 2533 mutex_lock(&smu->mutex); 2534 2535 if (smu->ppt_funcs->enable_mgpu_fan_boost) 2536 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); 2537 2538 mutex_unlock(&smu->mutex); 2539 2540 return ret; 2541 } 2542 2543 int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state) 2544 { 2545 int ret = 0; 2546 2547 mutex_lock(&smu->mutex); 2548 if (smu->ppt_funcs->gfx_state_change_set) 2549 ret = smu->ppt_funcs->gfx_state_change_set(smu, state); 2550 mutex_unlock(&smu->mutex); 2551 2552 return ret; 2553 } 2554