1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L1 24 25 #include <linux/firmware.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_smu.h" 30 #include "smu_internal.h" 31 #include "atom.h" 32 #include "arcturus_ppt.h" 33 #include "navi10_ppt.h" 34 #include "sienna_cichlid_ppt.h" 35 #include "renoir_ppt.h" 36 #include "amd_pcie.h" 37 38 /* 39 * DO NOT use these for err/warn/info/debug messages. 40 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 41 * They are more MGPU friendly. 42 */ 43 #undef pr_err 44 #undef pr_warn 45 #undef pr_info 46 #undef pr_debug 47 48 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) 49 { 50 size_t size = 0; 51 52 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 53 return -EOPNOTSUPP; 54 55 mutex_lock(&smu->mutex); 56 57 size = smu_get_pp_feature_mask(smu, buf); 58 59 mutex_unlock(&smu->mutex); 60 61 return size; 62 } 63 64 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) 65 { 66 int ret = 0; 67 68 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 69 return -EOPNOTSUPP; 70 71 mutex_lock(&smu->mutex); 72 73 ret = smu_set_pp_feature_mask(smu, new_mask); 74 75 mutex_unlock(&smu->mutex); 76 77 return ret; 78 } 79 80 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 81 { 82 int ret = 0; 83 struct smu_context *smu = &adev->smu; 84 85 if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status) 86 *value = smu_get_gfx_off_status(smu); 87 else 88 ret = -EINVAL; 89 90 return ret; 91 } 92 93 int smu_set_soft_freq_range(struct smu_context *smu, 94 enum smu_clk_type clk_type, 95 uint32_t min, 96 uint32_t max) 97 { 98 int ret = 0; 99 100 mutex_lock(&smu->mutex); 101 102 if (smu->ppt_funcs->set_soft_freq_limited_range) 103 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, 104 clk_type, 105 min, 106 max); 107 108 mutex_unlock(&smu->mutex); 109 110 return ret; 111 } 112 113 int smu_get_dpm_freq_range(struct smu_context *smu, 114 enum smu_clk_type clk_type, 115 uint32_t *min, 116 uint32_t *max) 117 { 118 int ret = 0; 119 120 if (!min && !max) 121 return -EINVAL; 122 123 mutex_lock(&smu->mutex); 124 125 if (smu->ppt_funcs->get_dpm_ultimate_freq) 126 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, 127 clk_type, 128 min, 129 max); 130 131 mutex_unlock(&smu->mutex); 132 133 return ret; 134 } 135 136 static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu, 137 bool enable) 138 { 139 struct smu_power_context *smu_power = &smu->smu_power; 140 struct smu_power_gate *power_gate = &smu_power->power_gate; 141 int ret = 0; 142 143 if (!smu->ppt_funcs->dpm_set_vcn_enable) 144 return 0; 145 146 if (atomic_read(&power_gate->vcn_gated) ^ enable) 147 return 0; 148 149 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); 150 if (!ret) 151 atomic_set(&power_gate->vcn_gated, !enable); 152 153 return ret; 154 } 155 156 static int smu_dpm_set_vcn_enable(struct smu_context *smu, 157 bool enable) 158 { 159 struct smu_power_context *smu_power = &smu->smu_power; 160 struct smu_power_gate *power_gate = &smu_power->power_gate; 161 int ret = 0; 162 163 mutex_lock(&power_gate->vcn_gate_lock); 164 165 ret = smu_dpm_set_vcn_enable_locked(smu, enable); 166 167 mutex_unlock(&power_gate->vcn_gate_lock); 168 169 return ret; 170 } 171 172 static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu, 173 bool enable) 174 { 175 struct smu_power_context *smu_power = &smu->smu_power; 176 struct smu_power_gate *power_gate = &smu_power->power_gate; 177 int ret = 0; 178 179 if (!smu->ppt_funcs->dpm_set_jpeg_enable) 180 return 0; 181 182 if (atomic_read(&power_gate->jpeg_gated) ^ enable) 183 return 0; 184 185 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); 186 if (!ret) 187 atomic_set(&power_gate->jpeg_gated, !enable); 188 189 return ret; 190 } 191 192 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, 193 bool enable) 194 { 195 struct smu_power_context *smu_power = &smu->smu_power; 196 struct smu_power_gate *power_gate = &smu_power->power_gate; 197 int ret = 0; 198 199 mutex_lock(&power_gate->jpeg_gate_lock); 200 201 ret = smu_dpm_set_jpeg_enable_locked(smu, enable); 202 203 mutex_unlock(&power_gate->jpeg_gate_lock); 204 205 return ret; 206 } 207 208 /** 209 * smu_dpm_set_power_gate - power gate/ungate the specific IP block 210 * 211 * @smu: smu_context pointer 212 * @block_type: the IP block to power gate/ungate 213 * @gate: to power gate if true, ungate otherwise 214 * 215 * This API uses no smu->mutex lock protection due to: 216 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). 217 * This is guarded to be race condition free by the caller. 218 * 2. Or get called on user setting request of power_dpm_force_performance_level. 219 * Under this case, the smu->mutex lock protection is already enforced on 220 * the parent API smu_force_performance_level of the call path. 221 */ 222 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, 223 bool gate) 224 { 225 int ret = 0; 226 227 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 228 return -EOPNOTSUPP; 229 230 switch (block_type) { 231 /* 232 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses 233 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept. 234 */ 235 case AMD_IP_BLOCK_TYPE_UVD: 236 case AMD_IP_BLOCK_TYPE_VCN: 237 ret = smu_dpm_set_vcn_enable(smu, !gate); 238 if (ret) 239 dev_err(smu->adev->dev, "Failed to power %s VCN!\n", 240 gate ? "gate" : "ungate"); 241 break; 242 case AMD_IP_BLOCK_TYPE_GFX: 243 ret = smu_gfx_off_control(smu, gate); 244 if (ret) 245 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", 246 gate ? "enable" : "disable"); 247 break; 248 case AMD_IP_BLOCK_TYPE_SDMA: 249 ret = smu_powergate_sdma(smu, gate); 250 if (ret) 251 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", 252 gate ? "gate" : "ungate"); 253 break; 254 case AMD_IP_BLOCK_TYPE_JPEG: 255 ret = smu_dpm_set_jpeg_enable(smu, !gate); 256 if (ret) 257 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", 258 gate ? "gate" : "ungate"); 259 break; 260 default: 261 dev_err(smu->adev->dev, "Unsupported block type!\n"); 262 return -EINVAL; 263 } 264 265 return ret; 266 } 267 268 int smu_get_power_num_states(struct smu_context *smu, 269 struct pp_states_info *state_info) 270 { 271 if (!state_info) 272 return -EINVAL; 273 274 /* not support power state */ 275 memset(state_info, 0, sizeof(struct pp_states_info)); 276 state_info->nums = 1; 277 state_info->states[0] = POWER_STATE_TYPE_DEFAULT; 278 279 return 0; 280 } 281 282 bool is_support_sw_smu(struct amdgpu_device *adev) 283 { 284 if (adev->asic_type >= CHIP_ARCTURUS) 285 return true; 286 287 return false; 288 } 289 290 int smu_sys_get_pp_table(struct smu_context *smu, void **table) 291 { 292 struct smu_table_context *smu_table = &smu->smu_table; 293 uint32_t powerplay_table_size; 294 295 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 296 return -EOPNOTSUPP; 297 298 if (!smu_table->power_play_table && !smu_table->hardcode_pptable) 299 return -EINVAL; 300 301 mutex_lock(&smu->mutex); 302 303 if (smu_table->hardcode_pptable) 304 *table = smu_table->hardcode_pptable; 305 else 306 *table = smu_table->power_play_table; 307 308 powerplay_table_size = smu_table->power_play_table_size; 309 310 mutex_unlock(&smu->mutex); 311 312 return powerplay_table_size; 313 } 314 315 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) 316 { 317 struct smu_table_context *smu_table = &smu->smu_table; 318 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; 319 int ret = 0; 320 321 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 322 return -EOPNOTSUPP; 323 324 if (header->usStructureSize != size) { 325 dev_err(smu->adev->dev, "pp table size not matched !\n"); 326 return -EIO; 327 } 328 329 mutex_lock(&smu->mutex); 330 if (!smu_table->hardcode_pptable) 331 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); 332 if (!smu_table->hardcode_pptable) { 333 ret = -ENOMEM; 334 goto failed; 335 } 336 337 memcpy(smu_table->hardcode_pptable, buf, size); 338 smu_table->power_play_table = smu_table->hardcode_pptable; 339 smu_table->power_play_table_size = size; 340 341 /* 342 * Special hw_fini action(for Navi1x, the DPMs disablement will be 343 * skipped) may be needed for custom pptable uploading. 344 */ 345 smu->uploading_custom_pp_table = true; 346 347 ret = smu_reset(smu); 348 if (ret) 349 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); 350 351 smu->uploading_custom_pp_table = false; 352 353 failed: 354 mutex_unlock(&smu->mutex); 355 return ret; 356 } 357 358 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) 359 { 360 struct smu_feature *feature = &smu->smu_feature; 361 int ret = 0; 362 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; 363 364 mutex_lock(&feature->mutex); 365 bitmap_zero(feature->allowed, SMU_FEATURE_MAX); 366 mutex_unlock(&feature->mutex); 367 368 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, 369 SMU_FEATURE_MAX/32); 370 if (ret) 371 return ret; 372 373 mutex_lock(&feature->mutex); 374 bitmap_or(feature->allowed, feature->allowed, 375 (unsigned long *)allowed_feature_mask, 376 feature->feature_num); 377 mutex_unlock(&feature->mutex); 378 379 return ret; 380 } 381 382 static int smu_set_funcs(struct amdgpu_device *adev) 383 { 384 struct smu_context *smu = &adev->smu; 385 386 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 387 smu->od_enabled = true; 388 389 switch (adev->asic_type) { 390 case CHIP_NAVI10: 391 case CHIP_NAVI14: 392 case CHIP_NAVI12: 393 navi10_set_ppt_funcs(smu); 394 break; 395 case CHIP_ARCTURUS: 396 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 397 arcturus_set_ppt_funcs(smu); 398 /* OD is not supported on Arcturus */ 399 smu->od_enabled =false; 400 break; 401 case CHIP_SIENNA_CICHLID: 402 case CHIP_NAVY_FLOUNDER: 403 sienna_cichlid_set_ppt_funcs(smu); 404 break; 405 case CHIP_RENOIR: 406 renoir_set_ppt_funcs(smu); 407 break; 408 default: 409 return -EINVAL; 410 } 411 412 return 0; 413 } 414 415 static int smu_early_init(void *handle) 416 { 417 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 418 struct smu_context *smu = &adev->smu; 419 420 smu->adev = adev; 421 smu->pm_enabled = !!amdgpu_dpm; 422 smu->is_apu = false; 423 mutex_init(&smu->mutex); 424 425 return smu_set_funcs(adev); 426 } 427 428 static int smu_set_default_dpm_table(struct smu_context *smu) 429 { 430 struct smu_power_context *smu_power = &smu->smu_power; 431 struct smu_power_gate *power_gate = &smu_power->power_gate; 432 int vcn_gate, jpeg_gate; 433 int ret = 0; 434 435 if (!smu->ppt_funcs->set_default_dpm_table) 436 return 0; 437 438 mutex_lock(&power_gate->vcn_gate_lock); 439 mutex_lock(&power_gate->jpeg_gate_lock); 440 441 vcn_gate = atomic_read(&power_gate->vcn_gated); 442 jpeg_gate = atomic_read(&power_gate->jpeg_gated); 443 444 ret = smu_dpm_set_vcn_enable_locked(smu, true); 445 if (ret) 446 goto err0_out; 447 448 ret = smu_dpm_set_jpeg_enable_locked(smu, true); 449 if (ret) 450 goto err1_out; 451 452 ret = smu->ppt_funcs->set_default_dpm_table(smu); 453 if (ret) 454 dev_err(smu->adev->dev, 455 "Failed to setup default dpm clock tables!\n"); 456 457 smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate); 458 err1_out: 459 smu_dpm_set_vcn_enable_locked(smu, !vcn_gate); 460 err0_out: 461 mutex_unlock(&power_gate->jpeg_gate_lock); 462 mutex_unlock(&power_gate->vcn_gate_lock); 463 464 return ret; 465 } 466 467 static int smu_late_init(void *handle) 468 { 469 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 470 struct smu_context *smu = &adev->smu; 471 int ret = 0; 472 473 if (!smu->pm_enabled) 474 return 0; 475 476 ret = smu_set_default_od_settings(smu); 477 if (ret) { 478 dev_err(adev->dev, "Failed to setup default OD settings!\n"); 479 return ret; 480 } 481 482 /* 483 * Set initialized values (get from vbios) to dpm tables context such as 484 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each 485 * type of clks. 486 */ 487 ret = smu_set_default_dpm_table(smu); 488 if (ret) { 489 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); 490 return ret; 491 } 492 493 ret = smu_populate_umd_state_clk(smu); 494 if (ret) { 495 dev_err(adev->dev, "Failed to populate UMD state clocks!\n"); 496 return ret; 497 } 498 499 ret = smu_get_asic_power_limits(smu); 500 if (ret) { 501 dev_err(adev->dev, "Failed to get asic power limits!\n"); 502 return ret; 503 } 504 505 smu_get_unique_id(smu); 506 507 smu_get_fan_parameters(smu); 508 509 smu_handle_task(&adev->smu, 510 smu->smu_dpm.dpm_level, 511 AMD_PP_TASK_COMPLETE_INIT, 512 false); 513 514 return 0; 515 } 516 517 static int smu_init_fb_allocations(struct smu_context *smu) 518 { 519 struct amdgpu_device *adev = smu->adev; 520 struct smu_table_context *smu_table = &smu->smu_table; 521 struct smu_table *tables = smu_table->tables; 522 struct smu_table *driver_table = &(smu_table->driver_table); 523 uint32_t max_table_size = 0; 524 int ret, i; 525 526 /* VRAM allocation for tool table */ 527 if (tables[SMU_TABLE_PMSTATUSLOG].size) { 528 ret = amdgpu_bo_create_kernel(adev, 529 tables[SMU_TABLE_PMSTATUSLOG].size, 530 tables[SMU_TABLE_PMSTATUSLOG].align, 531 tables[SMU_TABLE_PMSTATUSLOG].domain, 532 &tables[SMU_TABLE_PMSTATUSLOG].bo, 533 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 534 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 535 if (ret) { 536 dev_err(adev->dev, "VRAM allocation for tool table failed!\n"); 537 return ret; 538 } 539 } 540 541 /* VRAM allocation for driver table */ 542 for (i = 0; i < SMU_TABLE_COUNT; i++) { 543 if (tables[i].size == 0) 544 continue; 545 546 if (i == SMU_TABLE_PMSTATUSLOG) 547 continue; 548 549 if (max_table_size < tables[i].size) 550 max_table_size = tables[i].size; 551 } 552 553 driver_table->size = max_table_size; 554 driver_table->align = PAGE_SIZE; 555 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 556 557 ret = amdgpu_bo_create_kernel(adev, 558 driver_table->size, 559 driver_table->align, 560 driver_table->domain, 561 &driver_table->bo, 562 &driver_table->mc_address, 563 &driver_table->cpu_addr); 564 if (ret) { 565 dev_err(adev->dev, "VRAM allocation for driver table failed!\n"); 566 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 567 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 568 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 569 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 570 } 571 572 return ret; 573 } 574 575 static int smu_fini_fb_allocations(struct smu_context *smu) 576 { 577 struct smu_table_context *smu_table = &smu->smu_table; 578 struct smu_table *tables = smu_table->tables; 579 struct smu_table *driver_table = &(smu_table->driver_table); 580 581 if (!tables) 582 return 0; 583 584 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 585 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 586 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 587 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 588 589 amdgpu_bo_free_kernel(&driver_table->bo, 590 &driver_table->mc_address, 591 &driver_table->cpu_addr); 592 593 return 0; 594 } 595 596 /** 597 * smu_alloc_memory_pool - allocate memory pool in the system memory 598 * 599 * @smu: amdgpu_device pointer 600 * 601 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr 602 * and DramLogSetDramAddr can notify it changed. 603 * 604 * Returns 0 on success, error on failure. 605 */ 606 static int smu_alloc_memory_pool(struct smu_context *smu) 607 { 608 struct amdgpu_device *adev = smu->adev; 609 struct smu_table_context *smu_table = &smu->smu_table; 610 struct smu_table *memory_pool = &smu_table->memory_pool; 611 uint64_t pool_size = smu->pool_size; 612 int ret = 0; 613 614 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) 615 return ret; 616 617 memory_pool->size = pool_size; 618 memory_pool->align = PAGE_SIZE; 619 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; 620 621 switch (pool_size) { 622 case SMU_MEMORY_POOL_SIZE_256_MB: 623 case SMU_MEMORY_POOL_SIZE_512_MB: 624 case SMU_MEMORY_POOL_SIZE_1_GB: 625 case SMU_MEMORY_POOL_SIZE_2_GB: 626 ret = amdgpu_bo_create_kernel(adev, 627 memory_pool->size, 628 memory_pool->align, 629 memory_pool->domain, 630 &memory_pool->bo, 631 &memory_pool->mc_address, 632 &memory_pool->cpu_addr); 633 if (ret) 634 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n"); 635 break; 636 default: 637 break; 638 } 639 640 return ret; 641 } 642 643 static int smu_free_memory_pool(struct smu_context *smu) 644 { 645 struct smu_table_context *smu_table = &smu->smu_table; 646 struct smu_table *memory_pool = &smu_table->memory_pool; 647 648 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) 649 return 0; 650 651 amdgpu_bo_free_kernel(&memory_pool->bo, 652 &memory_pool->mc_address, 653 &memory_pool->cpu_addr); 654 655 memset(memory_pool, 0, sizeof(struct smu_table)); 656 657 return 0; 658 } 659 660 static int smu_smc_table_sw_init(struct smu_context *smu) 661 { 662 int ret; 663 664 /** 665 * Create smu_table structure, and init smc tables such as 666 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. 667 */ 668 ret = smu_init_smc_tables(smu); 669 if (ret) { 670 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); 671 return ret; 672 } 673 674 /** 675 * Create smu_power_context structure, and allocate smu_dpm_context and 676 * context size to fill the smu_power_context data. 677 */ 678 ret = smu_init_power(smu); 679 if (ret) { 680 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); 681 return ret; 682 } 683 684 /* 685 * allocate vram bos to store smc table contents. 686 */ 687 ret = smu_init_fb_allocations(smu); 688 if (ret) 689 return ret; 690 691 ret = smu_alloc_memory_pool(smu); 692 if (ret) 693 return ret; 694 695 ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c); 696 if (ret) 697 return ret; 698 699 return 0; 700 } 701 702 static int smu_smc_table_sw_fini(struct smu_context *smu) 703 { 704 int ret; 705 706 smu_i2c_fini(smu, &smu->adev->pm.smu_i2c); 707 708 ret = smu_free_memory_pool(smu); 709 if (ret) 710 return ret; 711 712 ret = smu_fini_fb_allocations(smu); 713 if (ret) 714 return ret; 715 716 ret = smu_fini_power(smu); 717 if (ret) { 718 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); 719 return ret; 720 } 721 722 ret = smu_fini_smc_tables(smu); 723 if (ret) { 724 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); 725 return ret; 726 } 727 728 return 0; 729 } 730 731 static void smu_throttling_logging_work_fn(struct work_struct *work) 732 { 733 struct smu_context *smu = container_of(work, struct smu_context, 734 throttling_logging_work); 735 736 smu_log_thermal_throttling(smu); 737 } 738 739 static int smu_sw_init(void *handle) 740 { 741 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 742 struct smu_context *smu = &adev->smu; 743 int ret; 744 745 smu->pool_size = adev->pm.smu_prv_buffer_size; 746 smu->smu_feature.feature_num = SMU_FEATURE_MAX; 747 mutex_init(&smu->smu_feature.mutex); 748 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); 749 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); 750 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); 751 752 mutex_init(&smu->smu_baco.mutex); 753 smu->smu_baco.state = SMU_BACO_STATE_EXIT; 754 smu->smu_baco.platform_support = false; 755 756 mutex_init(&smu->sensor_lock); 757 mutex_init(&smu->metrics_lock); 758 mutex_init(&smu->message_lock); 759 760 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); 761 atomic64_set(&smu->throttle_int_counter, 0); 762 smu->watermarks_bitmap = 0; 763 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 764 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 765 766 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); 767 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); 768 mutex_init(&smu->smu_power.power_gate.vcn_gate_lock); 769 mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock); 770 771 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 772 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; 773 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; 774 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; 775 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; 776 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; 777 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; 778 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; 779 780 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 781 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 782 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; 783 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; 784 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; 785 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; 786 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; 787 smu->display_config = &adev->pm.pm_display_cfg; 788 789 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 790 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 791 ret = smu_init_microcode(smu); 792 if (ret) { 793 dev_err(adev->dev, "Failed to load smu firmware!\n"); 794 return ret; 795 } 796 797 ret = smu_smc_table_sw_init(smu); 798 if (ret) { 799 dev_err(adev->dev, "Failed to sw init smc table!\n"); 800 return ret; 801 } 802 803 ret = smu_register_irq_handler(smu); 804 if (ret) { 805 dev_err(adev->dev, "Failed to register smc irq handler!\n"); 806 return ret; 807 } 808 809 return 0; 810 } 811 812 static int smu_sw_fini(void *handle) 813 { 814 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 815 struct smu_context *smu = &adev->smu; 816 int ret; 817 818 ret = smu_smc_table_sw_fini(smu); 819 if (ret) { 820 dev_err(adev->dev, "Failed to sw fini smc table!\n"); 821 return ret; 822 } 823 824 smu_fini_microcode(smu); 825 826 return 0; 827 } 828 829 static int smu_get_thermal_temperature_range(struct smu_context *smu) 830 { 831 struct amdgpu_device *adev = smu->adev; 832 struct smu_temperature_range *range = 833 &smu->thermal_range; 834 int ret = 0; 835 836 if (!smu->ppt_funcs->get_thermal_temperature_range) 837 return 0; 838 839 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); 840 if (ret) 841 return ret; 842 843 adev->pm.dpm.thermal.min_temp = range->min; 844 adev->pm.dpm.thermal.max_temp = range->max; 845 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max; 846 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min; 847 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max; 848 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max; 849 adev->pm.dpm.thermal.min_mem_temp = range->mem_min; 850 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max; 851 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max; 852 853 return ret; 854 } 855 856 static int smu_smc_hw_setup(struct smu_context *smu) 857 { 858 struct amdgpu_device *adev = smu->adev; 859 uint32_t pcie_gen = 0, pcie_width = 0; 860 int ret; 861 862 if (adev->in_suspend && smu_is_dpm_running(smu)) { 863 dev_info(adev->dev, "dpm has been enabled\n"); 864 return 0; 865 } 866 867 ret = smu_init_display_count(smu, 0); 868 if (ret) { 869 dev_info(adev->dev, "Failed to pre-set display count as 0!\n"); 870 return ret; 871 } 872 873 ret = smu_set_driver_table_location(smu); 874 if (ret) { 875 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n"); 876 return ret; 877 } 878 879 /* 880 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 881 */ 882 ret = smu_set_tool_table_location(smu); 883 if (ret) { 884 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n"); 885 return ret; 886 } 887 888 /* 889 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify 890 * pool location. 891 */ 892 ret = smu_notify_memory_pool_location(smu); 893 if (ret) { 894 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n"); 895 return ret; 896 } 897 898 /* smu_dump_pptable(smu); */ 899 /* 900 * Copy pptable bo in the vram to smc with SMU MSGs such as 901 * SetDriverDramAddr and TransferTableDram2Smu. 902 */ 903 ret = smu_write_pptable(smu); 904 if (ret) { 905 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n"); 906 return ret; 907 } 908 909 /* issue Run*Btc msg */ 910 ret = smu_run_btc(smu); 911 if (ret) 912 return ret; 913 914 ret = smu_feature_set_allowed_mask(smu); 915 if (ret) { 916 dev_err(adev->dev, "Failed to set driver allowed features mask!\n"); 917 return ret; 918 } 919 920 ret = smu_system_features_control(smu, true); 921 if (ret) { 922 dev_err(adev->dev, "Failed to enable requested dpm features!\n"); 923 return ret; 924 } 925 926 if (!smu_is_dpm_running(smu)) 927 dev_info(adev->dev, "dpm has been disabled\n"); 928 929 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 930 pcie_gen = 3; 931 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 932 pcie_gen = 2; 933 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 934 pcie_gen = 1; 935 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 936 pcie_gen = 0; 937 938 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 939 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 940 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 941 */ 942 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 943 pcie_width = 6; 944 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 945 pcie_width = 5; 946 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 947 pcie_width = 4; 948 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 949 pcie_width = 3; 950 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 951 pcie_width = 2; 952 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 953 pcie_width = 1; 954 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); 955 if (ret) { 956 dev_err(adev->dev, "Attempt to override pcie params failed!\n"); 957 return ret; 958 } 959 960 ret = smu_get_thermal_temperature_range(smu); 961 if (ret) { 962 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); 963 return ret; 964 } 965 966 ret = smu_enable_thermal_alert(smu); 967 if (ret) { 968 dev_err(adev->dev, "Failed to enable thermal alert!\n"); 969 return ret; 970 } 971 972 ret = smu_disable_umc_cdr_12gbps_workaround(smu); 973 if (ret) { 974 dev_err(adev->dev, "Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n"); 975 return ret; 976 } 977 978 /* 979 * For Navi1X, manually switch it to AC mode as PMFW 980 * may boot it with DC mode. 981 */ 982 ret = smu_set_power_source(smu, 983 adev->pm.ac_power ? SMU_POWER_SOURCE_AC : 984 SMU_POWER_SOURCE_DC); 985 if (ret) { 986 dev_err(adev->dev, "Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC"); 987 return ret; 988 } 989 990 ret = smu_notify_display_change(smu); 991 if (ret) 992 return ret; 993 994 /* 995 * Set min deep sleep dce fclk with bootup value from vbios via 996 * SetMinDeepSleepDcefclk MSG. 997 */ 998 ret = smu_set_min_dcef_deep_sleep(smu, 999 smu->smu_table.boot_values.dcefclk / 100); 1000 if (ret) 1001 return ret; 1002 1003 return ret; 1004 } 1005 1006 static int smu_start_smc_engine(struct smu_context *smu) 1007 { 1008 struct amdgpu_device *adev = smu->adev; 1009 int ret = 0; 1010 1011 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1012 if (adev->asic_type < CHIP_NAVI10) { 1013 if (smu->ppt_funcs->load_microcode) { 1014 ret = smu->ppt_funcs->load_microcode(smu); 1015 if (ret) 1016 return ret; 1017 } 1018 } 1019 } 1020 1021 if (smu->ppt_funcs->check_fw_status) { 1022 ret = smu->ppt_funcs->check_fw_status(smu); 1023 if (ret) { 1024 dev_err(adev->dev, "SMC is not ready\n"); 1025 return ret; 1026 } 1027 } 1028 1029 /* 1030 * Send msg GetDriverIfVersion to check if the return value is equal 1031 * with DRIVER_IF_VERSION of smc header. 1032 */ 1033 ret = smu_check_fw_version(smu); 1034 if (ret) 1035 return ret; 1036 1037 return ret; 1038 } 1039 1040 static int smu_hw_init(void *handle) 1041 { 1042 int ret; 1043 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1044 struct smu_context *smu = &adev->smu; 1045 1046 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { 1047 smu->pm_enabled = false; 1048 return 0; 1049 } 1050 1051 ret = smu_start_smc_engine(smu); 1052 if (ret) { 1053 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1054 return ret; 1055 } 1056 1057 if (smu->is_apu) { 1058 smu_powergate_sdma(&adev->smu, false); 1059 smu_dpm_set_vcn_enable(smu, true); 1060 smu_dpm_set_jpeg_enable(smu, true); 1061 smu_set_gfx_cgpg(&adev->smu, true); 1062 } 1063 1064 if (!smu->pm_enabled) 1065 return 0; 1066 1067 /* get boot_values from vbios to set revision, gfxclk, and etc. */ 1068 ret = smu_get_vbios_bootup_values(smu); 1069 if (ret) { 1070 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n"); 1071 return ret; 1072 } 1073 1074 ret = smu_setup_pptable(smu); 1075 if (ret) { 1076 dev_err(adev->dev, "Failed to setup pptable!\n"); 1077 return ret; 1078 } 1079 1080 ret = smu_get_driver_allowed_feature_mask(smu); 1081 if (ret) 1082 return ret; 1083 1084 ret = smu_smc_hw_setup(smu); 1085 if (ret) { 1086 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1087 return ret; 1088 } 1089 1090 /* 1091 * Move maximum sustainable clock retrieving here considering 1092 * 1. It is not needed on resume(from S3). 1093 * 2. DAL settings come between .hw_init and .late_init of SMU. 1094 * And DAL needs to know the maximum sustainable clocks. Thus 1095 * it cannot be put in .late_init(). 1096 */ 1097 ret = smu_init_max_sustainable_clocks(smu); 1098 if (ret) { 1099 dev_err(adev->dev, "Failed to init max sustainable clocks!\n"); 1100 return ret; 1101 } 1102 1103 adev->pm.dpm_enabled = true; 1104 1105 dev_info(adev->dev, "SMU is initialized successfully!\n"); 1106 1107 return 0; 1108 } 1109 1110 static int smu_disable_dpms(struct smu_context *smu) 1111 { 1112 struct amdgpu_device *adev = smu->adev; 1113 int ret = 0; 1114 bool use_baco = !smu->is_apu && 1115 ((amdgpu_in_reset(adev) && 1116 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 1117 ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev))); 1118 1119 /* 1120 * For custom pptable uploading, skip the DPM features 1121 * disable process on Navi1x ASICs. 1122 * - As the gfx related features are under control of 1123 * RLC on those ASICs. RLC reinitialization will be 1124 * needed to reenable them. That will cost much more 1125 * efforts. 1126 * 1127 * - SMU firmware can handle the DPM reenablement 1128 * properly. 1129 */ 1130 if (smu->uploading_custom_pp_table && 1131 (adev->asic_type >= CHIP_NAVI10) && 1132 (adev->asic_type <= CHIP_NAVI12)) 1133 return 0; 1134 1135 /* 1136 * For Sienna_Cichlid, PMFW will handle the features disablement properly 1137 * on BACO in. Driver involvement is unnecessary. 1138 */ 1139 if ((adev->asic_type == CHIP_SIENNA_CICHLID) && 1140 use_baco) 1141 return 0; 1142 1143 /* 1144 * For gpu reset, runpm and hibernation through BACO, 1145 * BACO feature has to be kept enabled. 1146 */ 1147 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { 1148 ret = smu_disable_all_features_with_exception(smu, 1149 SMU_FEATURE_BACO_BIT); 1150 if (ret) 1151 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); 1152 } else { 1153 ret = smu_system_features_control(smu, false); 1154 if (ret) 1155 dev_err(adev->dev, "Failed to disable smu features.\n"); 1156 } 1157 1158 if (adev->asic_type >= CHIP_NAVI10 && 1159 adev->gfx.rlc.funcs->stop) 1160 adev->gfx.rlc.funcs->stop(adev); 1161 1162 return ret; 1163 } 1164 1165 static int smu_smc_hw_cleanup(struct smu_context *smu) 1166 { 1167 struct amdgpu_device *adev = smu->adev; 1168 int ret = 0; 1169 1170 cancel_work_sync(&smu->throttling_logging_work); 1171 1172 ret = smu_disable_thermal_alert(smu); 1173 if (ret) { 1174 dev_err(adev->dev, "Fail to disable thermal alert!\n"); 1175 return ret; 1176 } 1177 1178 ret = smu_disable_dpms(smu); 1179 if (ret) { 1180 dev_err(adev->dev, "Fail to disable dpm features!\n"); 1181 return ret; 1182 } 1183 1184 return 0; 1185 } 1186 1187 static int smu_hw_fini(void *handle) 1188 { 1189 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1190 struct smu_context *smu = &adev->smu; 1191 int ret = 0; 1192 1193 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1194 return 0; 1195 1196 if (smu->is_apu) { 1197 smu_powergate_sdma(&adev->smu, true); 1198 smu_dpm_set_vcn_enable(smu, false); 1199 smu_dpm_set_jpeg_enable(smu, false); 1200 } 1201 1202 if (!smu->pm_enabled) 1203 return 0; 1204 1205 adev->pm.dpm_enabled = false; 1206 1207 ret = smu_smc_hw_cleanup(smu); 1208 if (ret) 1209 return ret; 1210 1211 return 0; 1212 } 1213 1214 int smu_reset(struct smu_context *smu) 1215 { 1216 struct amdgpu_device *adev = smu->adev; 1217 int ret = 0; 1218 1219 ret = smu_hw_fini(adev); 1220 if (ret) 1221 return ret; 1222 1223 ret = smu_hw_init(adev); 1224 if (ret) 1225 return ret; 1226 1227 ret = smu_late_init(adev); 1228 1229 return ret; 1230 } 1231 1232 static int smu_suspend(void *handle) 1233 { 1234 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1235 struct smu_context *smu = &adev->smu; 1236 int ret; 1237 1238 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1239 return 0; 1240 1241 if (!smu->pm_enabled) 1242 return 0; 1243 1244 adev->pm.dpm_enabled = false; 1245 1246 ret = smu_smc_hw_cleanup(smu); 1247 if (ret) 1248 return ret; 1249 1250 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 1251 1252 if (smu->is_apu) 1253 smu_set_gfx_cgpg(&adev->smu, false); 1254 1255 return 0; 1256 } 1257 1258 static int smu_resume(void *handle) 1259 { 1260 int ret; 1261 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1262 struct smu_context *smu = &adev->smu; 1263 1264 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1265 return 0; 1266 1267 if (!smu->pm_enabled) 1268 return 0; 1269 1270 dev_info(adev->dev, "SMU is resuming...\n"); 1271 1272 ret = smu_start_smc_engine(smu); 1273 if (ret) { 1274 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1275 return ret; 1276 } 1277 1278 ret = smu_smc_hw_setup(smu); 1279 if (ret) { 1280 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1281 return ret; 1282 } 1283 1284 if (smu->is_apu) 1285 smu_set_gfx_cgpg(&adev->smu, true); 1286 1287 smu->disable_uclk_switch = 0; 1288 1289 adev->pm.dpm_enabled = true; 1290 1291 dev_info(adev->dev, "SMU is resumed successfully!\n"); 1292 1293 return 0; 1294 } 1295 1296 int smu_display_configuration_change(struct smu_context *smu, 1297 const struct amd_pp_display_configuration *display_config) 1298 { 1299 int index = 0; 1300 int num_of_active_display = 0; 1301 1302 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1303 return -EOPNOTSUPP; 1304 1305 if (!display_config) 1306 return -EINVAL; 1307 1308 mutex_lock(&smu->mutex); 1309 1310 smu_set_min_dcef_deep_sleep(smu, 1311 display_config->min_dcef_deep_sleep_set_clk / 100); 1312 1313 for (index = 0; index < display_config->num_path_including_non_display; index++) { 1314 if (display_config->displays[index].controller_id != 0) 1315 num_of_active_display++; 1316 } 1317 1318 smu_set_active_display_count(smu, num_of_active_display); 1319 1320 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time, 1321 display_config->cpu_cc6_disable, 1322 display_config->cpu_pstate_disable, 1323 display_config->nb_pstate_switch_disable); 1324 1325 mutex_unlock(&smu->mutex); 1326 1327 return 0; 1328 } 1329 1330 static int smu_get_clock_info(struct smu_context *smu, 1331 struct smu_clock_info *clk_info, 1332 enum smu_perf_level_designation designation) 1333 { 1334 int ret; 1335 struct smu_performance_level level = {0}; 1336 1337 if (!clk_info) 1338 return -EINVAL; 1339 1340 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level); 1341 if (ret) 1342 return -EINVAL; 1343 1344 clk_info->min_mem_clk = level.memory_clock; 1345 clk_info->min_eng_clk = level.core_clock; 1346 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; 1347 1348 ret = smu_get_perf_level(smu, designation, &level); 1349 if (ret) 1350 return -EINVAL; 1351 1352 clk_info->min_mem_clk = level.memory_clock; 1353 clk_info->min_eng_clk = level.core_clock; 1354 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; 1355 1356 return 0; 1357 } 1358 1359 int smu_get_current_clocks(struct smu_context *smu, 1360 struct amd_pp_clock_info *clocks) 1361 { 1362 struct amd_pp_simple_clock_info simple_clocks = {0}; 1363 struct smu_clock_info hw_clocks; 1364 int ret = 0; 1365 1366 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1367 return -EOPNOTSUPP; 1368 1369 mutex_lock(&smu->mutex); 1370 1371 smu_get_dal_power_level(smu, &simple_clocks); 1372 1373 if (smu->support_power_containment) 1374 ret = smu_get_clock_info(smu, &hw_clocks, 1375 PERF_LEVEL_POWER_CONTAINMENT); 1376 else 1377 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY); 1378 1379 if (ret) { 1380 dev_err(smu->adev->dev, "Error in smu_get_clock_info\n"); 1381 goto failed; 1382 } 1383 1384 clocks->min_engine_clock = hw_clocks.min_eng_clk; 1385 clocks->max_engine_clock = hw_clocks.max_eng_clk; 1386 clocks->min_memory_clock = hw_clocks.min_mem_clk; 1387 clocks->max_memory_clock = hw_clocks.max_mem_clk; 1388 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth; 1389 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth; 1390 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1391 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1392 1393 if (simple_clocks.level == 0) 1394 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7; 1395 else 1396 clocks->max_clocks_state = simple_clocks.level; 1397 1398 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) { 1399 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1400 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1401 } 1402 1403 failed: 1404 mutex_unlock(&smu->mutex); 1405 return ret; 1406 } 1407 1408 static int smu_set_clockgating_state(void *handle, 1409 enum amd_clockgating_state state) 1410 { 1411 return 0; 1412 } 1413 1414 static int smu_set_powergating_state(void *handle, 1415 enum amd_powergating_state state) 1416 { 1417 return 0; 1418 } 1419 1420 static int smu_enable_umd_pstate(void *handle, 1421 enum amd_dpm_forced_level *level) 1422 { 1423 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1424 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1425 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1426 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1427 1428 struct smu_context *smu = (struct smu_context*)(handle); 1429 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1430 1431 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1432 return -EINVAL; 1433 1434 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { 1435 /* enter umd pstate, save current level, disable gfx cg*/ 1436 if (*level & profile_mode_mask) { 1437 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; 1438 smu_dpm_ctx->enable_umd_pstate = true; 1439 amdgpu_device_ip_set_powergating_state(smu->adev, 1440 AMD_IP_BLOCK_TYPE_GFX, 1441 AMD_PG_STATE_UNGATE); 1442 amdgpu_device_ip_set_clockgating_state(smu->adev, 1443 AMD_IP_BLOCK_TYPE_GFX, 1444 AMD_CG_STATE_UNGATE); 1445 smu_gfx_ulv_control(smu, false); 1446 smu_deep_sleep_control(smu, false); 1447 } 1448 } else { 1449 /* exit umd pstate, restore level, enable gfx cg*/ 1450 if (!(*level & profile_mode_mask)) { 1451 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 1452 *level = smu_dpm_ctx->saved_dpm_level; 1453 smu_dpm_ctx->enable_umd_pstate = false; 1454 smu_deep_sleep_control(smu, true); 1455 smu_gfx_ulv_control(smu, true); 1456 amdgpu_device_ip_set_clockgating_state(smu->adev, 1457 AMD_IP_BLOCK_TYPE_GFX, 1458 AMD_CG_STATE_GATE); 1459 amdgpu_device_ip_set_powergating_state(smu->adev, 1460 AMD_IP_BLOCK_TYPE_GFX, 1461 AMD_PG_STATE_GATE); 1462 } 1463 } 1464 1465 return 0; 1466 } 1467 1468 static int smu_adjust_power_state_dynamic(struct smu_context *smu, 1469 enum amd_dpm_forced_level level, 1470 bool skip_display_settings) 1471 { 1472 int ret = 0; 1473 int index = 0; 1474 long workload; 1475 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1476 1477 if (!skip_display_settings) { 1478 ret = smu_display_config_changed(smu); 1479 if (ret) { 1480 dev_err(smu->adev->dev, "Failed to change display config!"); 1481 return ret; 1482 } 1483 } 1484 1485 ret = smu_apply_clocks_adjust_rules(smu); 1486 if (ret) { 1487 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); 1488 return ret; 1489 } 1490 1491 if (!skip_display_settings) { 1492 ret = smu_notify_smc_display_config(smu); 1493 if (ret) { 1494 dev_err(smu->adev->dev, "Failed to notify smc display config!"); 1495 return ret; 1496 } 1497 } 1498 1499 if (smu_dpm_ctx->dpm_level != level) { 1500 ret = smu_asic_set_performance_level(smu, level); 1501 if (ret) { 1502 dev_err(smu->adev->dev, "Failed to set performance level!"); 1503 return ret; 1504 } 1505 1506 /* update the saved copy */ 1507 smu_dpm_ctx->dpm_level = level; 1508 } 1509 1510 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1511 index = fls(smu->workload_mask); 1512 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1513 workload = smu->workload_setting[index]; 1514 1515 if (smu->power_profile_mode != workload) 1516 smu_set_power_profile_mode(smu, &workload, 0, false); 1517 } 1518 1519 return ret; 1520 } 1521 1522 int smu_handle_task(struct smu_context *smu, 1523 enum amd_dpm_forced_level level, 1524 enum amd_pp_task task_id, 1525 bool lock_needed) 1526 { 1527 int ret = 0; 1528 1529 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1530 return -EOPNOTSUPP; 1531 1532 if (lock_needed) 1533 mutex_lock(&smu->mutex); 1534 1535 switch (task_id) { 1536 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 1537 ret = smu_pre_display_config_changed(smu); 1538 if (ret) 1539 goto out; 1540 ret = smu_set_cpu_power_state(smu); 1541 if (ret) 1542 goto out; 1543 ret = smu_adjust_power_state_dynamic(smu, level, false); 1544 break; 1545 case AMD_PP_TASK_COMPLETE_INIT: 1546 case AMD_PP_TASK_READJUST_POWER_STATE: 1547 ret = smu_adjust_power_state_dynamic(smu, level, true); 1548 break; 1549 default: 1550 break; 1551 } 1552 1553 out: 1554 if (lock_needed) 1555 mutex_unlock(&smu->mutex); 1556 1557 return ret; 1558 } 1559 1560 int smu_switch_power_profile(struct smu_context *smu, 1561 enum PP_SMC_POWER_PROFILE type, 1562 bool en) 1563 { 1564 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1565 long workload; 1566 uint32_t index; 1567 1568 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1569 return -EOPNOTSUPP; 1570 1571 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1572 return -EINVAL; 1573 1574 mutex_lock(&smu->mutex); 1575 1576 if (!en) { 1577 smu->workload_mask &= ~(1 << smu->workload_prority[type]); 1578 index = fls(smu->workload_mask); 1579 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1580 workload = smu->workload_setting[index]; 1581 } else { 1582 smu->workload_mask |= (1 << smu->workload_prority[type]); 1583 index = fls(smu->workload_mask); 1584 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1585 workload = smu->workload_setting[index]; 1586 } 1587 1588 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 1589 smu_set_power_profile_mode(smu, &workload, 0, false); 1590 1591 mutex_unlock(&smu->mutex); 1592 1593 return 0; 1594 } 1595 1596 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) 1597 { 1598 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1599 enum amd_dpm_forced_level level; 1600 1601 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1602 return -EOPNOTSUPP; 1603 1604 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1605 return -EINVAL; 1606 1607 mutex_lock(&(smu->mutex)); 1608 level = smu_dpm_ctx->dpm_level; 1609 mutex_unlock(&(smu->mutex)); 1610 1611 return level; 1612 } 1613 1614 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) 1615 { 1616 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1617 int ret = 0; 1618 1619 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1620 return -EOPNOTSUPP; 1621 1622 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1623 return -EINVAL; 1624 1625 mutex_lock(&smu->mutex); 1626 1627 ret = smu_enable_umd_pstate(smu, &level); 1628 if (ret) { 1629 mutex_unlock(&smu->mutex); 1630 return ret; 1631 } 1632 1633 ret = smu_handle_task(smu, level, 1634 AMD_PP_TASK_READJUST_POWER_STATE, 1635 false); 1636 1637 mutex_unlock(&smu->mutex); 1638 1639 return ret; 1640 } 1641 1642 int smu_set_display_count(struct smu_context *smu, uint32_t count) 1643 { 1644 int ret = 0; 1645 1646 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1647 return -EOPNOTSUPP; 1648 1649 mutex_lock(&smu->mutex); 1650 ret = smu_init_display_count(smu, count); 1651 mutex_unlock(&smu->mutex); 1652 1653 return ret; 1654 } 1655 1656 int smu_force_clk_levels(struct smu_context *smu, 1657 enum smu_clk_type clk_type, 1658 uint32_t mask) 1659 { 1660 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1661 int ret = 0; 1662 1663 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1664 return -EOPNOTSUPP; 1665 1666 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1667 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); 1668 return -EINVAL; 1669 } 1670 1671 mutex_lock(&smu->mutex); 1672 1673 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) 1674 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); 1675 1676 mutex_unlock(&smu->mutex); 1677 1678 return ret; 1679 } 1680 1681 /* 1682 * On system suspending or resetting, the dpm_enabled 1683 * flag will be cleared. So that those SMU services which 1684 * are not supported will be gated. 1685 * However, the mp1 state setting should still be granted 1686 * even if the dpm_enabled cleared. 1687 */ 1688 int smu_set_mp1_state(struct smu_context *smu, 1689 enum pp_mp1_state mp1_state) 1690 { 1691 uint16_t msg; 1692 int ret; 1693 1694 if (!smu->pm_enabled) 1695 return -EOPNOTSUPP; 1696 1697 mutex_lock(&smu->mutex); 1698 1699 switch (mp1_state) { 1700 case PP_MP1_STATE_SHUTDOWN: 1701 msg = SMU_MSG_PrepareMp1ForShutdown; 1702 break; 1703 case PP_MP1_STATE_UNLOAD: 1704 msg = SMU_MSG_PrepareMp1ForUnload; 1705 break; 1706 case PP_MP1_STATE_RESET: 1707 msg = SMU_MSG_PrepareMp1ForReset; 1708 break; 1709 case PP_MP1_STATE_NONE: 1710 default: 1711 mutex_unlock(&smu->mutex); 1712 return 0; 1713 } 1714 1715 ret = smu_send_smc_msg(smu, msg, NULL); 1716 /* some asics may not support those messages */ 1717 if (ret == -EINVAL) 1718 ret = 0; 1719 if (ret) 1720 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); 1721 1722 mutex_unlock(&smu->mutex); 1723 1724 return ret; 1725 } 1726 1727 int smu_set_df_cstate(struct smu_context *smu, 1728 enum pp_df_cstate state) 1729 { 1730 int ret = 0; 1731 1732 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1733 return -EOPNOTSUPP; 1734 1735 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) 1736 return 0; 1737 1738 mutex_lock(&smu->mutex); 1739 1740 ret = smu->ppt_funcs->set_df_cstate(smu, state); 1741 if (ret) 1742 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); 1743 1744 mutex_unlock(&smu->mutex); 1745 1746 return ret; 1747 } 1748 1749 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en) 1750 { 1751 int ret = 0; 1752 1753 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1754 return -EOPNOTSUPP; 1755 1756 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down) 1757 return 0; 1758 1759 mutex_lock(&smu->mutex); 1760 1761 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en); 1762 if (ret) 1763 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n"); 1764 1765 mutex_unlock(&smu->mutex); 1766 1767 return ret; 1768 } 1769 1770 int smu_write_watermarks_table(struct smu_context *smu) 1771 { 1772 int ret = 0; 1773 1774 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1775 return -EOPNOTSUPP; 1776 1777 mutex_lock(&smu->mutex); 1778 1779 ret = smu_set_watermarks_table(smu, NULL); 1780 1781 mutex_unlock(&smu->mutex); 1782 1783 return ret; 1784 } 1785 1786 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, 1787 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) 1788 { 1789 int ret = 0; 1790 1791 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1792 return -EOPNOTSUPP; 1793 1794 mutex_lock(&smu->mutex); 1795 1796 if (!smu->disable_watermark && 1797 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && 1798 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 1799 ret = smu_set_watermarks_table(smu, clock_ranges); 1800 1801 if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) { 1802 smu->watermarks_bitmap |= WATERMARKS_EXIST; 1803 smu->watermarks_bitmap &= ~WATERMARKS_LOADED; 1804 } 1805 } 1806 1807 mutex_unlock(&smu->mutex); 1808 1809 return ret; 1810 } 1811 1812 int smu_set_ac_dc(struct smu_context *smu) 1813 { 1814 int ret = 0; 1815 1816 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1817 return -EOPNOTSUPP; 1818 1819 /* controlled by firmware */ 1820 if (smu->dc_controlled_by_gpio) 1821 return 0; 1822 1823 mutex_lock(&smu->mutex); 1824 ret = smu_set_power_source(smu, 1825 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : 1826 SMU_POWER_SOURCE_DC); 1827 if (ret) 1828 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", 1829 smu->adev->pm.ac_power ? "AC" : "DC"); 1830 mutex_unlock(&smu->mutex); 1831 1832 return ret; 1833 } 1834 1835 const struct amd_ip_funcs smu_ip_funcs = { 1836 .name = "smu", 1837 .early_init = smu_early_init, 1838 .late_init = smu_late_init, 1839 .sw_init = smu_sw_init, 1840 .sw_fini = smu_sw_fini, 1841 .hw_init = smu_hw_init, 1842 .hw_fini = smu_hw_fini, 1843 .suspend = smu_suspend, 1844 .resume = smu_resume, 1845 .is_idle = NULL, 1846 .check_soft_reset = NULL, 1847 .wait_for_idle = NULL, 1848 .soft_reset = NULL, 1849 .set_clockgating_state = smu_set_clockgating_state, 1850 .set_powergating_state = smu_set_powergating_state, 1851 .enable_umd_pstate = smu_enable_umd_pstate, 1852 }; 1853 1854 const struct amdgpu_ip_block_version smu_v11_0_ip_block = 1855 { 1856 .type = AMD_IP_BLOCK_TYPE_SMC, 1857 .major = 11, 1858 .minor = 0, 1859 .rev = 0, 1860 .funcs = &smu_ip_funcs, 1861 }; 1862 1863 const struct amdgpu_ip_block_version smu_v12_0_ip_block = 1864 { 1865 .type = AMD_IP_BLOCK_TYPE_SMC, 1866 .major = 12, 1867 .minor = 0, 1868 .rev = 0, 1869 .funcs = &smu_ip_funcs, 1870 }; 1871 1872 int smu_load_microcode(struct smu_context *smu) 1873 { 1874 int ret = 0; 1875 1876 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1877 return -EOPNOTSUPP; 1878 1879 mutex_lock(&smu->mutex); 1880 1881 if (smu->ppt_funcs->load_microcode) 1882 ret = smu->ppt_funcs->load_microcode(smu); 1883 1884 mutex_unlock(&smu->mutex); 1885 1886 return ret; 1887 } 1888 1889 int smu_check_fw_status(struct smu_context *smu) 1890 { 1891 int ret = 0; 1892 1893 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1894 return -EOPNOTSUPP; 1895 1896 mutex_lock(&smu->mutex); 1897 1898 if (smu->ppt_funcs->check_fw_status) 1899 ret = smu->ppt_funcs->check_fw_status(smu); 1900 1901 mutex_unlock(&smu->mutex); 1902 1903 return ret; 1904 } 1905 1906 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) 1907 { 1908 int ret = 0; 1909 1910 mutex_lock(&smu->mutex); 1911 1912 if (smu->ppt_funcs->set_gfx_cgpg) 1913 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); 1914 1915 mutex_unlock(&smu->mutex); 1916 1917 return ret; 1918 } 1919 1920 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) 1921 { 1922 int ret = 0; 1923 1924 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1925 return -EOPNOTSUPP; 1926 1927 mutex_lock(&smu->mutex); 1928 1929 if (smu->ppt_funcs->set_fan_speed_rpm) 1930 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); 1931 1932 mutex_unlock(&smu->mutex); 1933 1934 return ret; 1935 } 1936 1937 int smu_get_power_limit(struct smu_context *smu, 1938 uint32_t *limit, 1939 bool max_setting) 1940 { 1941 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1942 return -EOPNOTSUPP; 1943 1944 mutex_lock(&smu->mutex); 1945 1946 *limit = (max_setting ? smu->max_power_limit : smu->current_power_limit); 1947 1948 mutex_unlock(&smu->mutex); 1949 1950 return 0; 1951 } 1952 1953 int smu_set_power_limit(struct smu_context *smu, uint32_t limit) 1954 { 1955 int ret = 0; 1956 1957 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1958 return -EOPNOTSUPP; 1959 1960 mutex_lock(&smu->mutex); 1961 1962 if (limit > smu->max_power_limit) { 1963 dev_err(smu->adev->dev, 1964 "New power limit (%d) is over the max allowed %d\n", 1965 limit, smu->max_power_limit); 1966 goto out; 1967 } 1968 1969 if (!limit) 1970 limit = smu->current_power_limit; 1971 1972 if (smu->ppt_funcs->set_power_limit) 1973 ret = smu->ppt_funcs->set_power_limit(smu, limit); 1974 1975 out: 1976 mutex_unlock(&smu->mutex); 1977 1978 return ret; 1979 } 1980 1981 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) 1982 { 1983 int ret = 0; 1984 1985 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1986 return -EOPNOTSUPP; 1987 1988 mutex_lock(&smu->mutex); 1989 1990 if (smu->ppt_funcs->print_clk_levels) 1991 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); 1992 1993 mutex_unlock(&smu->mutex); 1994 1995 return ret; 1996 } 1997 1998 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type) 1999 { 2000 int ret = 0; 2001 2002 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2003 return -EOPNOTSUPP; 2004 2005 mutex_lock(&smu->mutex); 2006 2007 if (smu->ppt_funcs->get_od_percentage) 2008 ret = smu->ppt_funcs->get_od_percentage(smu, type); 2009 2010 mutex_unlock(&smu->mutex); 2011 2012 return ret; 2013 } 2014 2015 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value) 2016 { 2017 int ret = 0; 2018 2019 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2020 return -EOPNOTSUPP; 2021 2022 mutex_lock(&smu->mutex); 2023 2024 if (smu->ppt_funcs->set_od_percentage) 2025 ret = smu->ppt_funcs->set_od_percentage(smu, type, value); 2026 2027 mutex_unlock(&smu->mutex); 2028 2029 return ret; 2030 } 2031 2032 int smu_od_edit_dpm_table(struct smu_context *smu, 2033 enum PP_OD_DPM_TABLE_COMMAND type, 2034 long *input, uint32_t size) 2035 { 2036 int ret = 0; 2037 2038 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2039 return -EOPNOTSUPP; 2040 2041 mutex_lock(&smu->mutex); 2042 2043 if (smu->ppt_funcs->od_edit_dpm_table) { 2044 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); 2045 if (!ret && (type == PP_OD_COMMIT_DPM_TABLE)) 2046 ret = smu_handle_task(smu, 2047 smu->smu_dpm.dpm_level, 2048 AMD_PP_TASK_READJUST_POWER_STATE, 2049 false); 2050 } 2051 2052 mutex_unlock(&smu->mutex); 2053 2054 return ret; 2055 } 2056 2057 int smu_read_sensor(struct smu_context *smu, 2058 enum amd_pp_sensors sensor, 2059 void *data, uint32_t *size) 2060 { 2061 struct smu_umd_pstate_table *pstate_table = 2062 &smu->pstate_table; 2063 int ret = 0; 2064 2065 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2066 return -EOPNOTSUPP; 2067 2068 if (!data || !size) 2069 return -EINVAL; 2070 2071 mutex_lock(&smu->mutex); 2072 2073 if (smu->ppt_funcs->read_sensor) 2074 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) 2075 goto unlock; 2076 2077 switch (sensor) { 2078 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 2079 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; 2080 *size = 4; 2081 break; 2082 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 2083 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; 2084 *size = 4; 2085 break; 2086 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 2087 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2); 2088 *size = 8; 2089 break; 2090 case AMDGPU_PP_SENSOR_UVD_POWER: 2091 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 2092 *size = 4; 2093 break; 2094 case AMDGPU_PP_SENSOR_VCE_POWER: 2095 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 2096 *size = 4; 2097 break; 2098 case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 2099 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1; 2100 *size = 4; 2101 break; 2102 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 2103 *(uint32_t *)data = 0; 2104 *size = 4; 2105 break; 2106 default: 2107 *size = 0; 2108 ret = -EOPNOTSUPP; 2109 break; 2110 } 2111 2112 unlock: 2113 mutex_unlock(&smu->mutex); 2114 2115 return ret; 2116 } 2117 2118 int smu_get_power_profile_mode(struct smu_context *smu, char *buf) 2119 { 2120 int ret = 0; 2121 2122 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2123 return -EOPNOTSUPP; 2124 2125 mutex_lock(&smu->mutex); 2126 2127 if (smu->ppt_funcs->get_power_profile_mode) 2128 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); 2129 2130 mutex_unlock(&smu->mutex); 2131 2132 return ret; 2133 } 2134 2135 int smu_set_power_profile_mode(struct smu_context *smu, 2136 long *param, 2137 uint32_t param_size, 2138 bool lock_needed) 2139 { 2140 int ret = 0; 2141 2142 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2143 return -EOPNOTSUPP; 2144 2145 if (lock_needed) 2146 mutex_lock(&smu->mutex); 2147 2148 if (smu->ppt_funcs->set_power_profile_mode) 2149 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); 2150 2151 if (lock_needed) 2152 mutex_unlock(&smu->mutex); 2153 2154 return ret; 2155 } 2156 2157 2158 int smu_get_fan_control_mode(struct smu_context *smu) 2159 { 2160 int ret = 0; 2161 2162 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2163 return -EOPNOTSUPP; 2164 2165 mutex_lock(&smu->mutex); 2166 2167 if (smu->ppt_funcs->get_fan_control_mode) 2168 ret = smu->ppt_funcs->get_fan_control_mode(smu); 2169 2170 mutex_unlock(&smu->mutex); 2171 2172 return ret; 2173 } 2174 2175 int smu_set_fan_control_mode(struct smu_context *smu, int value) 2176 { 2177 int ret = 0; 2178 2179 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2180 return -EOPNOTSUPP; 2181 2182 mutex_lock(&smu->mutex); 2183 2184 if (smu->ppt_funcs->set_fan_control_mode) 2185 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); 2186 2187 mutex_unlock(&smu->mutex); 2188 2189 return ret; 2190 } 2191 2192 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) 2193 { 2194 int ret = 0; 2195 uint32_t percent; 2196 uint32_t current_rpm; 2197 2198 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2199 return -EOPNOTSUPP; 2200 2201 mutex_lock(&smu->mutex); 2202 2203 if (smu->ppt_funcs->get_fan_speed_rpm) { 2204 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, ¤t_rpm); 2205 if (!ret) { 2206 percent = current_rpm * 100 / smu->fan_max_rpm; 2207 *speed = percent > 100 ? 100 : percent; 2208 } 2209 } 2210 2211 mutex_unlock(&smu->mutex); 2212 2213 2214 return ret; 2215 } 2216 2217 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) 2218 { 2219 int ret = 0; 2220 uint32_t rpm; 2221 2222 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2223 return -EOPNOTSUPP; 2224 2225 mutex_lock(&smu->mutex); 2226 2227 if (smu->ppt_funcs->set_fan_speed_rpm) { 2228 if (speed > 100) 2229 speed = 100; 2230 rpm = speed * smu->fan_max_rpm / 100; 2231 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm); 2232 } 2233 2234 mutex_unlock(&smu->mutex); 2235 2236 return ret; 2237 } 2238 2239 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) 2240 { 2241 int ret = 0; 2242 2243 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2244 return -EOPNOTSUPP; 2245 2246 mutex_lock(&smu->mutex); 2247 2248 if (smu->ppt_funcs->get_fan_speed_rpm) 2249 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); 2250 2251 mutex_unlock(&smu->mutex); 2252 2253 return ret; 2254 } 2255 2256 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) 2257 { 2258 int ret = 0; 2259 2260 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2261 return -EOPNOTSUPP; 2262 2263 mutex_lock(&smu->mutex); 2264 2265 ret = smu_set_min_dcef_deep_sleep(smu, clk); 2266 2267 mutex_unlock(&smu->mutex); 2268 2269 return ret; 2270 } 2271 2272 int smu_set_active_display_count(struct smu_context *smu, uint32_t count) 2273 { 2274 int ret = 0; 2275 2276 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2277 return -EOPNOTSUPP; 2278 2279 if (smu->ppt_funcs->set_active_display_count) 2280 ret = smu->ppt_funcs->set_active_display_count(smu, count); 2281 2282 return ret; 2283 } 2284 2285 int smu_get_clock_by_type(struct smu_context *smu, 2286 enum amd_pp_clock_type type, 2287 struct amd_pp_clocks *clocks) 2288 { 2289 int ret = 0; 2290 2291 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2292 return -EOPNOTSUPP; 2293 2294 mutex_lock(&smu->mutex); 2295 2296 if (smu->ppt_funcs->get_clock_by_type) 2297 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks); 2298 2299 mutex_unlock(&smu->mutex); 2300 2301 return ret; 2302 } 2303 2304 int smu_get_max_high_clocks(struct smu_context *smu, 2305 struct amd_pp_simple_clock_info *clocks) 2306 { 2307 int ret = 0; 2308 2309 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2310 return -EOPNOTSUPP; 2311 2312 mutex_lock(&smu->mutex); 2313 2314 if (smu->ppt_funcs->get_max_high_clocks) 2315 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks); 2316 2317 mutex_unlock(&smu->mutex); 2318 2319 return ret; 2320 } 2321 2322 int smu_get_clock_by_type_with_latency(struct smu_context *smu, 2323 enum smu_clk_type clk_type, 2324 struct pp_clock_levels_with_latency *clocks) 2325 { 2326 int ret = 0; 2327 2328 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2329 return -EOPNOTSUPP; 2330 2331 mutex_lock(&smu->mutex); 2332 2333 if (smu->ppt_funcs->get_clock_by_type_with_latency) 2334 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); 2335 2336 mutex_unlock(&smu->mutex); 2337 2338 return ret; 2339 } 2340 2341 int smu_get_clock_by_type_with_voltage(struct smu_context *smu, 2342 enum amd_pp_clock_type type, 2343 struct pp_clock_levels_with_voltage *clocks) 2344 { 2345 int ret = 0; 2346 2347 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2348 return -EOPNOTSUPP; 2349 2350 mutex_lock(&smu->mutex); 2351 2352 if (smu->ppt_funcs->get_clock_by_type_with_voltage) 2353 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks); 2354 2355 mutex_unlock(&smu->mutex); 2356 2357 return ret; 2358 } 2359 2360 2361 int smu_display_clock_voltage_request(struct smu_context *smu, 2362 struct pp_display_clock_request *clock_req) 2363 { 2364 int ret = 0; 2365 2366 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2367 return -EOPNOTSUPP; 2368 2369 mutex_lock(&smu->mutex); 2370 2371 if (smu->ppt_funcs->display_clock_voltage_request) 2372 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); 2373 2374 mutex_unlock(&smu->mutex); 2375 2376 return ret; 2377 } 2378 2379 2380 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) 2381 { 2382 int ret = -EINVAL; 2383 2384 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2385 return -EOPNOTSUPP; 2386 2387 mutex_lock(&smu->mutex); 2388 2389 if (smu->ppt_funcs->display_disable_memory_clock_switch) 2390 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); 2391 2392 mutex_unlock(&smu->mutex); 2393 2394 return ret; 2395 } 2396 2397 int smu_notify_smu_enable_pwe(struct smu_context *smu) 2398 { 2399 int ret = 0; 2400 2401 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2402 return -EOPNOTSUPP; 2403 2404 mutex_lock(&smu->mutex); 2405 2406 if (smu->ppt_funcs->notify_smu_enable_pwe) 2407 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu); 2408 2409 mutex_unlock(&smu->mutex); 2410 2411 return ret; 2412 } 2413 2414 int smu_set_xgmi_pstate(struct smu_context *smu, 2415 uint32_t pstate) 2416 { 2417 int ret = 0; 2418 2419 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2420 return -EOPNOTSUPP; 2421 2422 mutex_lock(&smu->mutex); 2423 2424 if (smu->ppt_funcs->set_xgmi_pstate) 2425 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); 2426 2427 mutex_unlock(&smu->mutex); 2428 2429 if(ret) 2430 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); 2431 2432 return ret; 2433 } 2434 2435 int smu_set_azalia_d3_pme(struct smu_context *smu) 2436 { 2437 int ret = 0; 2438 2439 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2440 return -EOPNOTSUPP; 2441 2442 mutex_lock(&smu->mutex); 2443 2444 if (smu->ppt_funcs->set_azalia_d3_pme) 2445 ret = smu->ppt_funcs->set_azalia_d3_pme(smu); 2446 2447 mutex_unlock(&smu->mutex); 2448 2449 return ret; 2450 } 2451 2452 /* 2453 * On system suspending or resetting, the dpm_enabled 2454 * flag will be cleared. So that those SMU services which 2455 * are not supported will be gated. 2456 * 2457 * However, the baco/mode1 reset should still be granted 2458 * as they are still supported and necessary. 2459 */ 2460 bool smu_baco_is_support(struct smu_context *smu) 2461 { 2462 bool ret = false; 2463 2464 if (!smu->pm_enabled) 2465 return false; 2466 2467 mutex_lock(&smu->mutex); 2468 2469 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) 2470 ret = smu->ppt_funcs->baco_is_support(smu); 2471 2472 mutex_unlock(&smu->mutex); 2473 2474 return ret; 2475 } 2476 2477 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) 2478 { 2479 if (smu->ppt_funcs->baco_get_state) 2480 return -EINVAL; 2481 2482 mutex_lock(&smu->mutex); 2483 *state = smu->ppt_funcs->baco_get_state(smu); 2484 mutex_unlock(&smu->mutex); 2485 2486 return 0; 2487 } 2488 2489 int smu_baco_enter(struct smu_context *smu) 2490 { 2491 int ret = 0; 2492 2493 if (!smu->pm_enabled) 2494 return -EOPNOTSUPP; 2495 2496 mutex_lock(&smu->mutex); 2497 2498 if (smu->ppt_funcs->baco_enter) 2499 ret = smu->ppt_funcs->baco_enter(smu); 2500 2501 mutex_unlock(&smu->mutex); 2502 2503 if (ret) 2504 dev_err(smu->adev->dev, "Failed to enter BACO state!\n"); 2505 2506 return ret; 2507 } 2508 2509 int smu_baco_exit(struct smu_context *smu) 2510 { 2511 int ret = 0; 2512 2513 if (!smu->pm_enabled) 2514 return -EOPNOTSUPP; 2515 2516 mutex_lock(&smu->mutex); 2517 2518 if (smu->ppt_funcs->baco_exit) 2519 ret = smu->ppt_funcs->baco_exit(smu); 2520 2521 mutex_unlock(&smu->mutex); 2522 2523 if (ret) 2524 dev_err(smu->adev->dev, "Failed to exit BACO state!\n"); 2525 2526 return ret; 2527 } 2528 2529 bool smu_mode1_reset_is_support(struct smu_context *smu) 2530 { 2531 bool ret = false; 2532 2533 if (!smu->pm_enabled) 2534 return false; 2535 2536 mutex_lock(&smu->mutex); 2537 2538 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) 2539 ret = smu->ppt_funcs->mode1_reset_is_support(smu); 2540 2541 mutex_unlock(&smu->mutex); 2542 2543 return ret; 2544 } 2545 2546 int smu_mode1_reset(struct smu_context *smu) 2547 { 2548 int ret = 0; 2549 2550 if (!smu->pm_enabled) 2551 return -EOPNOTSUPP; 2552 2553 mutex_lock(&smu->mutex); 2554 2555 if (smu->ppt_funcs->mode1_reset) 2556 ret = smu->ppt_funcs->mode1_reset(smu); 2557 2558 mutex_unlock(&smu->mutex); 2559 2560 return ret; 2561 } 2562 2563 int smu_mode2_reset(struct smu_context *smu) 2564 { 2565 int ret = 0; 2566 2567 if (!smu->pm_enabled) 2568 return -EOPNOTSUPP; 2569 2570 mutex_lock(&smu->mutex); 2571 2572 if (smu->ppt_funcs->mode2_reset) 2573 ret = smu->ppt_funcs->mode2_reset(smu); 2574 2575 mutex_unlock(&smu->mutex); 2576 2577 if (ret) 2578 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); 2579 2580 return ret; 2581 } 2582 2583 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, 2584 struct pp_smu_nv_clock_table *max_clocks) 2585 { 2586 int ret = 0; 2587 2588 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2589 return -EOPNOTSUPP; 2590 2591 mutex_lock(&smu->mutex); 2592 2593 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) 2594 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); 2595 2596 mutex_unlock(&smu->mutex); 2597 2598 return ret; 2599 } 2600 2601 int smu_get_uclk_dpm_states(struct smu_context *smu, 2602 unsigned int *clock_values_in_khz, 2603 unsigned int *num_states) 2604 { 2605 int ret = 0; 2606 2607 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2608 return -EOPNOTSUPP; 2609 2610 mutex_lock(&smu->mutex); 2611 2612 if (smu->ppt_funcs->get_uclk_dpm_states) 2613 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); 2614 2615 mutex_unlock(&smu->mutex); 2616 2617 return ret; 2618 } 2619 2620 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) 2621 { 2622 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; 2623 2624 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2625 return -EOPNOTSUPP; 2626 2627 mutex_lock(&smu->mutex); 2628 2629 if (smu->ppt_funcs->get_current_power_state) 2630 pm_state = smu->ppt_funcs->get_current_power_state(smu); 2631 2632 mutex_unlock(&smu->mutex); 2633 2634 return pm_state; 2635 } 2636 2637 int smu_get_dpm_clock_table(struct smu_context *smu, 2638 struct dpm_clocks *clock_table) 2639 { 2640 int ret = 0; 2641 2642 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2643 return -EOPNOTSUPP; 2644 2645 mutex_lock(&smu->mutex); 2646 2647 if (smu->ppt_funcs->get_dpm_clock_table) 2648 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); 2649 2650 mutex_unlock(&smu->mutex); 2651 2652 return ret; 2653 } 2654 2655 ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu, 2656 void **table) 2657 { 2658 ssize_t size; 2659 2660 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2661 return -EOPNOTSUPP; 2662 2663 if (!smu->ppt_funcs->get_gpu_metrics) 2664 return -EOPNOTSUPP; 2665 2666 mutex_lock(&smu->mutex); 2667 2668 size = smu->ppt_funcs->get_gpu_metrics(smu, table); 2669 2670 mutex_unlock(&smu->mutex); 2671 2672 return size; 2673 } 2674 2675 int smu_enable_mgpu_fan_boost(struct smu_context *smu) 2676 { 2677 int ret = 0; 2678 2679 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2680 return -EOPNOTSUPP; 2681 2682 mutex_lock(&smu->mutex); 2683 2684 if (smu->ppt_funcs->enable_mgpu_fan_boost) 2685 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); 2686 2687 mutex_unlock(&smu->mutex); 2688 2689 return ret; 2690 } 2691