1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L4 24 25 #include "amdgpu.h" 26 #include "amdgpu_smu.h" 27 #include "smu_cmn.h" 28 #include "soc15_common.h" 29 30 /* 31 * DO NOT use these for err/warn/info/debug messages. 32 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 33 * They are more MGPU friendly. 34 */ 35 #undef pr_err 36 #undef pr_warn 37 #undef pr_info 38 #undef pr_debug 39 40 /* 41 * Although these are defined in each ASIC's specific header file. 42 * They share the same definitions and values. That makes common 43 * APIs for SMC messages issuing for all ASICs possible. 44 */ 45 #define mmMP1_SMN_C2PMSG_66 0x0282 46 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0 47 48 #define mmMP1_SMN_C2PMSG_82 0x0292 49 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0 50 51 #define mmMP1_SMN_C2PMSG_90 0x029a 52 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 53 54 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL 55 56 #undef __SMU_DUMMY_MAP 57 #define __SMU_DUMMY_MAP(type) #type 58 static const char * const __smu_message_names[] = { 59 SMU_MESSAGE_TYPES 60 }; 61 62 static const char *smu_get_message_name(struct smu_context *smu, 63 enum smu_message_type type) 64 { 65 if (type < 0 || type >= SMU_MSG_MAX_COUNT) 66 return "unknown smu message"; 67 68 return __smu_message_names[type]; 69 } 70 71 static void smu_cmn_read_arg(struct smu_context *smu, 72 uint32_t *arg) 73 { 74 struct amdgpu_device *adev = smu->adev; 75 76 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); 77 } 78 79 /* Redefine the SMU error codes here. 80 * 81 * Note that these definitions are redundant and should be removed 82 * when the SMU has exported a unified header file containing these 83 * macros, which header file we can just include and use the SMU's 84 * macros. At the moment, these error codes are defined by the SMU 85 * per-ASIC unfortunately, yet we're a one driver for all ASICs. 86 */ 87 #define SMU_RESP_NONE 0 88 #define SMU_RESP_OK 1 89 #define SMU_RESP_CMD_FAIL 0xFF 90 #define SMU_RESP_CMD_UNKNOWN 0xFE 91 #define SMU_RESP_CMD_BAD_PREREQ 0xFD 92 #define SMU_RESP_BUSY_OTHER 0xFC 93 #define SMU_RESP_DEBUG_END 0xFB 94 95 /** 96 * __smu_cmn_poll_stat -- poll for a status from the SMU 97 * @smu: a pointer to SMU context 98 * 99 * Returns the status of the SMU, which could be, 100 * 0, the SMU is busy with your command; 101 * 1, execution status: success, execution result: success; 102 * 0xFF, execution status: success, execution result: failure; 103 * 0xFE, unknown command; 104 * 0xFD, valid command, but bad (command) prerequisites; 105 * 0xFC, the command was rejected as the SMU is busy; 106 * 0xFB, "SMC_Result_DebugDataDumpEnd". 107 * 108 * The values here are not defined by macros, because I'd rather we 109 * include a single header file which defines them, which is 110 * maintained by the SMU FW team, so that we're impervious to firmware 111 * changes. At the moment those values are defined in various header 112 * files, one for each ASIC, yet here we're a single ASIC-agnostic 113 * interface. Such a change can be followed-up by a subsequent patch. 114 */ 115 static u32 __smu_cmn_poll_stat(struct smu_context *smu) 116 { 117 struct amdgpu_device *adev = smu->adev; 118 int timeout = adev->usec_timeout * 20; 119 u32 reg; 120 121 for ( ; timeout > 0; timeout--) { 122 reg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); 123 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0) 124 break; 125 126 udelay(1); 127 } 128 129 return reg; 130 } 131 132 static void __smu_cmn_reg_print_error(struct smu_context *smu, 133 u32 reg_c2pmsg_90, 134 int msg_index, 135 u32 param, 136 enum smu_message_type msg) 137 { 138 struct amdgpu_device *adev = smu->adev; 139 const char *message = smu_get_message_name(smu, msg); 140 141 switch (reg_c2pmsg_90) { 142 case SMU_RESP_NONE: { 143 u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66); 144 u32 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); 145 dev_err_ratelimited(adev->dev, 146 "SMU: I'm not done with your command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X", 147 msg_idx, prm); 148 } 149 break; 150 case SMU_RESP_OK: 151 /* The SMU executed the command. It completed with a 152 * successful result. 153 */ 154 break; 155 case SMU_RESP_CMD_FAIL: 156 /* The SMU executed the command. It completed with an 157 * unsuccessful result. 158 */ 159 break; 160 case SMU_RESP_CMD_UNKNOWN: 161 dev_err_ratelimited(adev->dev, 162 "SMU: unknown command: index:%d param:0x%08X message:%s", 163 msg_index, param, message); 164 break; 165 case SMU_RESP_CMD_BAD_PREREQ: 166 dev_err_ratelimited(adev->dev, 167 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s", 168 msg_index, param, message); 169 break; 170 case SMU_RESP_BUSY_OTHER: 171 dev_err_ratelimited(adev->dev, 172 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", 173 msg_index, param, message); 174 break; 175 case SMU_RESP_DEBUG_END: 176 dev_err_ratelimited(adev->dev, 177 "SMU: I'm debugging!"); 178 break; 179 default: 180 dev_err_ratelimited(adev->dev, 181 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", 182 reg_c2pmsg_90, msg_index, param, message); 183 break; 184 } 185 } 186 187 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90) 188 { 189 int res; 190 191 switch (reg_c2pmsg_90) { 192 case SMU_RESP_NONE: 193 /* The SMU is busy--still executing your command. 194 */ 195 res = -ETIME; 196 break; 197 case SMU_RESP_OK: 198 res = 0; 199 break; 200 case SMU_RESP_CMD_FAIL: 201 /* Command completed successfully, but the command 202 * status was failure. 203 */ 204 res = -EIO; 205 break; 206 case SMU_RESP_CMD_UNKNOWN: 207 /* Unknown command--ignored by the SMU. 208 */ 209 res = -EOPNOTSUPP; 210 break; 211 case SMU_RESP_CMD_BAD_PREREQ: 212 /* Valid command--bad prerequisites. 213 */ 214 res = -EINVAL; 215 break; 216 case SMU_RESP_BUSY_OTHER: 217 /* The SMU is busy with other commands. The client 218 * should retry in 10 us. 219 */ 220 res = -EBUSY; 221 break; 222 default: 223 /* Unknown or debug response from the SMU. 224 */ 225 res = -EREMOTEIO; 226 break; 227 } 228 229 return res; 230 } 231 232 static void __smu_cmn_send_msg(struct smu_context *smu, 233 u16 msg, 234 u32 param) 235 { 236 struct amdgpu_device *adev = smu->adev; 237 238 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 239 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param); 240 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); 241 } 242 243 /** 244 * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status 245 * @smu: pointer to an SMU context 246 * @msg_index: message index 247 * @param: message parameter to send to the SMU 248 * 249 * Send a message to the SMU with the parameter passed. Do not wait 250 * for status/result of the message, thus the "without_waiting". 251 * 252 * Return 0 on success, -errno on error if we weren't able to _send_ 253 * the message for some reason. See __smu_cmn_reg2errno() for details 254 * of the -errno. 255 */ 256 int smu_cmn_send_msg_without_waiting(struct smu_context *smu, 257 uint16_t msg_index, 258 uint32_t param) 259 { 260 u32 reg; 261 int res; 262 263 if (smu->adev->no_hw_access) 264 return 0; 265 266 reg = __smu_cmn_poll_stat(smu); 267 res = __smu_cmn_reg2errno(smu, reg); 268 if (reg == SMU_RESP_NONE || 269 reg == SMU_RESP_BUSY_OTHER || 270 res == -EREMOTEIO) 271 goto Out; 272 __smu_cmn_send_msg(smu, msg_index, param); 273 res = 0; 274 Out: 275 if (unlikely(smu->smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && 276 res && (res != -ETIME)) { 277 amdgpu_device_halt(smu->adev); 278 WARN_ON(1); 279 } 280 281 return res; 282 } 283 284 /** 285 * smu_cmn_wait_for_response -- wait for response from the SMU 286 * @smu: pointer to an SMU context 287 * 288 * Wait for status from the SMU. 289 * 290 * Return 0 on success, -errno on error, indicating the execution 291 * status and result of the message being waited for. See 292 * __smu_cmn_reg2errno() for details of the -errno. 293 */ 294 int smu_cmn_wait_for_response(struct smu_context *smu) 295 { 296 u32 reg; 297 int res; 298 299 reg = __smu_cmn_poll_stat(smu); 300 res = __smu_cmn_reg2errno(smu, reg); 301 302 if (unlikely(smu->smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && 303 res && (res != -ETIME)) { 304 amdgpu_device_halt(smu->adev); 305 WARN_ON(1); 306 } 307 308 return res; 309 } 310 311 /** 312 * smu_cmn_send_smc_msg_with_param -- send a message with parameter 313 * @smu: pointer to an SMU context 314 * @msg: message to send 315 * @param: parameter to send to the SMU 316 * @read_arg: pointer to u32 to return a value from the SMU back 317 * to the caller 318 * 319 * Send the message @msg with parameter @param to the SMU, wait for 320 * completion of the command, and return back a value from the SMU in 321 * @read_arg pointer. 322 * 323 * Return 0 on success, -errno on error, if we weren't able to send 324 * the message or if the message completed with some kind of 325 * error. See __smu_cmn_reg2errno() for details of the -errno. 326 * 327 * If we weren't able to send the message to the SMU, we also print 328 * the error to the standard log. 329 * 330 * Command completion status is printed only if the -errno is 331 * -EREMOTEIO, indicating that the SMU returned back an 332 * undefined/unknown/unspecified result. All other cases are 333 * well-defined, not printed, but instead given back to the client to 334 * decide what further to do. 335 * 336 * The return value, @read_arg is read back regardless, to give back 337 * more information to the client, which on error would most likely be 338 * @param, but we can't assume that. This also eliminates more 339 * conditionals. 340 */ 341 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, 342 enum smu_message_type msg, 343 uint32_t param, 344 uint32_t *read_arg) 345 { 346 int res, index; 347 u32 reg; 348 349 if (smu->adev->no_hw_access) 350 return 0; 351 352 index = smu_cmn_to_asic_specific_index(smu, 353 CMN2ASIC_MAPPING_MSG, 354 msg); 355 if (index < 0) 356 return index == -EACCES ? 0 : index; 357 358 mutex_lock(&smu->message_lock); 359 reg = __smu_cmn_poll_stat(smu); 360 res = __smu_cmn_reg2errno(smu, reg); 361 if (reg == SMU_RESP_NONE || 362 reg == SMU_RESP_BUSY_OTHER || 363 res == -EREMOTEIO) { 364 __smu_cmn_reg_print_error(smu, reg, index, param, msg); 365 goto Out; 366 } 367 __smu_cmn_send_msg(smu, (uint16_t) index, param); 368 reg = __smu_cmn_poll_stat(smu); 369 res = __smu_cmn_reg2errno(smu, reg); 370 if (res != 0) 371 __smu_cmn_reg_print_error(smu, reg, index, param, msg); 372 if (read_arg) 373 smu_cmn_read_arg(smu, read_arg); 374 Out: 375 if (unlikely(smu->smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) { 376 amdgpu_device_halt(smu->adev); 377 WARN_ON(1); 378 } 379 380 mutex_unlock(&smu->message_lock); 381 return res; 382 } 383 384 int smu_cmn_send_smc_msg(struct smu_context *smu, 385 enum smu_message_type msg, 386 uint32_t *read_arg) 387 { 388 return smu_cmn_send_smc_msg_with_param(smu, 389 msg, 390 0, 391 read_arg); 392 } 393 394 int smu_cmn_to_asic_specific_index(struct smu_context *smu, 395 enum smu_cmn2asic_mapping_type type, 396 uint32_t index) 397 { 398 struct cmn2asic_msg_mapping msg_mapping; 399 struct cmn2asic_mapping mapping; 400 401 switch (type) { 402 case CMN2ASIC_MAPPING_MSG: 403 if (index >= SMU_MSG_MAX_COUNT || 404 !smu->message_map) 405 return -EINVAL; 406 407 msg_mapping = smu->message_map[index]; 408 if (!msg_mapping.valid_mapping) 409 return -EINVAL; 410 411 if (amdgpu_sriov_vf(smu->adev) && 412 !msg_mapping.valid_in_vf) 413 return -EACCES; 414 415 return msg_mapping.map_to; 416 417 case CMN2ASIC_MAPPING_CLK: 418 if (index >= SMU_CLK_COUNT || 419 !smu->clock_map) 420 return -EINVAL; 421 422 mapping = smu->clock_map[index]; 423 if (!mapping.valid_mapping) 424 return -EINVAL; 425 426 return mapping.map_to; 427 428 case CMN2ASIC_MAPPING_FEATURE: 429 if (index >= SMU_FEATURE_COUNT || 430 !smu->feature_map) 431 return -EINVAL; 432 433 mapping = smu->feature_map[index]; 434 if (!mapping.valid_mapping) 435 return -EINVAL; 436 437 return mapping.map_to; 438 439 case CMN2ASIC_MAPPING_TABLE: 440 if (index >= SMU_TABLE_COUNT || 441 !smu->table_map) 442 return -EINVAL; 443 444 mapping = smu->table_map[index]; 445 if (!mapping.valid_mapping) 446 return -EINVAL; 447 448 return mapping.map_to; 449 450 case CMN2ASIC_MAPPING_PWR: 451 if (index >= SMU_POWER_SOURCE_COUNT || 452 !smu->pwr_src_map) 453 return -EINVAL; 454 455 mapping = smu->pwr_src_map[index]; 456 if (!mapping.valid_mapping) 457 return -EINVAL; 458 459 return mapping.map_to; 460 461 case CMN2ASIC_MAPPING_WORKLOAD: 462 if (index > PP_SMC_POWER_PROFILE_CUSTOM || 463 !smu->workload_map) 464 return -EINVAL; 465 466 mapping = smu->workload_map[index]; 467 if (!mapping.valid_mapping) 468 return -EINVAL; 469 470 return mapping.map_to; 471 472 default: 473 return -EINVAL; 474 } 475 } 476 477 int smu_cmn_feature_is_supported(struct smu_context *smu, 478 enum smu_feature_mask mask) 479 { 480 struct smu_feature *feature = &smu->smu_feature; 481 int feature_id; 482 int ret = 0; 483 484 feature_id = smu_cmn_to_asic_specific_index(smu, 485 CMN2ASIC_MAPPING_FEATURE, 486 mask); 487 if (feature_id < 0) 488 return 0; 489 490 WARN_ON(feature_id > feature->feature_num); 491 492 mutex_lock(&feature->mutex); 493 ret = test_bit(feature_id, feature->supported); 494 mutex_unlock(&feature->mutex); 495 496 return ret; 497 } 498 499 int smu_cmn_feature_is_enabled(struct smu_context *smu, 500 enum smu_feature_mask mask) 501 { 502 struct smu_feature *feature = &smu->smu_feature; 503 struct amdgpu_device *adev = smu->adev; 504 int feature_id; 505 int ret = 0; 506 507 if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH) 508 return 1; 509 510 feature_id = smu_cmn_to_asic_specific_index(smu, 511 CMN2ASIC_MAPPING_FEATURE, 512 mask); 513 if (feature_id < 0) 514 return 0; 515 516 WARN_ON(feature_id > feature->feature_num); 517 518 mutex_lock(&feature->mutex); 519 ret = test_bit(feature_id, feature->enabled); 520 mutex_unlock(&feature->mutex); 521 522 return ret; 523 } 524 525 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, 526 enum smu_clk_type clk_type) 527 { 528 enum smu_feature_mask feature_id = 0; 529 530 switch (clk_type) { 531 case SMU_MCLK: 532 case SMU_UCLK: 533 feature_id = SMU_FEATURE_DPM_UCLK_BIT; 534 break; 535 case SMU_GFXCLK: 536 case SMU_SCLK: 537 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 538 break; 539 case SMU_SOCCLK: 540 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 541 break; 542 default: 543 return true; 544 } 545 546 if (!smu_cmn_feature_is_enabled(smu, feature_id)) 547 return false; 548 549 return true; 550 } 551 552 int smu_cmn_get_enabled_mask(struct smu_context *smu, 553 uint32_t *feature_mask, 554 uint32_t num) 555 { 556 uint32_t feature_mask_high = 0, feature_mask_low = 0; 557 struct smu_feature *feature = &smu->smu_feature; 558 int ret = 0; 559 560 if (!feature_mask || num < 2) 561 return -EINVAL; 562 563 if (bitmap_empty(feature->enabled, feature->feature_num)) { 564 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high); 565 if (ret) 566 return ret; 567 568 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low); 569 if (ret) 570 return ret; 571 572 feature_mask[0] = feature_mask_low; 573 feature_mask[1] = feature_mask_high; 574 } else { 575 bitmap_copy((unsigned long *)feature_mask, feature->enabled, 576 feature->feature_num); 577 } 578 579 return ret; 580 } 581 582 int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu, 583 uint32_t *feature_mask, 584 uint32_t num) 585 { 586 uint32_t feature_mask_en_low = 0; 587 uint32_t feature_mask_en_high = 0; 588 struct smu_feature *feature = &smu->smu_feature; 589 int ret = 0; 590 591 if (!feature_mask || num < 2) 592 return -EINVAL; 593 594 if (bitmap_empty(feature->enabled, feature->feature_num)) { 595 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 0, 596 &feature_mask_en_low); 597 598 if (ret) 599 return ret; 600 601 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 1, 602 &feature_mask_en_high); 603 604 if (ret) 605 return ret; 606 607 feature_mask[0] = feature_mask_en_low; 608 feature_mask[1] = feature_mask_en_high; 609 610 } else { 611 bitmap_copy((unsigned long *)feature_mask, feature->enabled, 612 feature->feature_num); 613 } 614 615 return ret; 616 617 } 618 619 uint64_t smu_cmn_get_indep_throttler_status( 620 const unsigned long dep_status, 621 const uint8_t *throttler_map) 622 { 623 uint64_t indep_status = 0; 624 uint8_t dep_bit = 0; 625 626 for_each_set_bit(dep_bit, &dep_status, 32) 627 indep_status |= 1ULL << throttler_map[dep_bit]; 628 629 return indep_status; 630 } 631 632 int smu_cmn_feature_update_enable_state(struct smu_context *smu, 633 uint64_t feature_mask, 634 bool enabled) 635 { 636 struct smu_feature *feature = &smu->smu_feature; 637 int ret = 0; 638 639 if (enabled) { 640 ret = smu_cmn_send_smc_msg_with_param(smu, 641 SMU_MSG_EnableSmuFeaturesLow, 642 lower_32_bits(feature_mask), 643 NULL); 644 if (ret) 645 return ret; 646 ret = smu_cmn_send_smc_msg_with_param(smu, 647 SMU_MSG_EnableSmuFeaturesHigh, 648 upper_32_bits(feature_mask), 649 NULL); 650 if (ret) 651 return ret; 652 } else { 653 ret = smu_cmn_send_smc_msg_with_param(smu, 654 SMU_MSG_DisableSmuFeaturesLow, 655 lower_32_bits(feature_mask), 656 NULL); 657 if (ret) 658 return ret; 659 ret = smu_cmn_send_smc_msg_with_param(smu, 660 SMU_MSG_DisableSmuFeaturesHigh, 661 upper_32_bits(feature_mask), 662 NULL); 663 if (ret) 664 return ret; 665 } 666 667 mutex_lock(&feature->mutex); 668 if (enabled) 669 bitmap_or(feature->enabled, feature->enabled, 670 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); 671 else 672 bitmap_andnot(feature->enabled, feature->enabled, 673 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); 674 mutex_unlock(&feature->mutex); 675 676 return ret; 677 } 678 679 int smu_cmn_feature_set_enabled(struct smu_context *smu, 680 enum smu_feature_mask mask, 681 bool enable) 682 { 683 struct smu_feature *feature = &smu->smu_feature; 684 int feature_id; 685 686 feature_id = smu_cmn_to_asic_specific_index(smu, 687 CMN2ASIC_MAPPING_FEATURE, 688 mask); 689 if (feature_id < 0) 690 return -EINVAL; 691 692 WARN_ON(feature_id > feature->feature_num); 693 694 return smu_cmn_feature_update_enable_state(smu, 695 1ULL << feature_id, 696 enable); 697 } 698 699 #undef __SMU_DUMMY_MAP 700 #define __SMU_DUMMY_MAP(fea) #fea 701 static const char* __smu_feature_names[] = { 702 SMU_FEATURE_MASKS 703 }; 704 705 static const char *smu_get_feature_name(struct smu_context *smu, 706 enum smu_feature_mask feature) 707 { 708 if (feature < 0 || feature >= SMU_FEATURE_COUNT) 709 return "unknown smu feature"; 710 return __smu_feature_names[feature]; 711 } 712 713 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, 714 char *buf) 715 { 716 uint32_t feature_mask[2] = { 0 }; 717 int feature_index = 0; 718 uint32_t count = 0; 719 int8_t sort_feature[SMU_FEATURE_COUNT]; 720 size_t size = 0; 721 int ret = 0, i; 722 723 if (!smu->is_apu) { 724 ret = smu_cmn_get_enabled_mask(smu, 725 feature_mask, 726 2); 727 if (ret) 728 return 0; 729 } else { 730 ret = smu_cmn_get_enabled_32_bits_mask(smu, 731 feature_mask, 732 2); 733 if (ret) 734 return 0; 735 } 736 737 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n", 738 feature_mask[1], feature_mask[0]); 739 740 memset(sort_feature, -1, sizeof(sort_feature)); 741 742 for (i = 0; i < SMU_FEATURE_COUNT; i++) { 743 feature_index = smu_cmn_to_asic_specific_index(smu, 744 CMN2ASIC_MAPPING_FEATURE, 745 i); 746 if (feature_index < 0) 747 continue; 748 749 sort_feature[feature_index] = i; 750 } 751 752 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n", 753 "No", "Feature", "Bit", "State"); 754 755 for (i = 0; i < SMU_FEATURE_COUNT; i++) { 756 if (sort_feature[i] < 0) 757 continue; 758 759 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n", 760 count++, 761 smu_get_feature_name(smu, sort_feature[i]), 762 i, 763 !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ? 764 "enabled" : "disabled"); 765 } 766 767 return size; 768 } 769 770 int smu_cmn_set_pp_feature_mask(struct smu_context *smu, 771 uint64_t new_mask) 772 { 773 int ret = 0; 774 uint32_t feature_mask[2] = { 0 }; 775 uint64_t feature_2_enabled = 0; 776 uint64_t feature_2_disabled = 0; 777 uint64_t feature_enables = 0; 778 779 ret = smu_cmn_get_enabled_mask(smu, 780 feature_mask, 781 2); 782 if (ret) 783 return ret; 784 785 feature_enables = ((uint64_t)feature_mask[1] << 32 | 786 (uint64_t)feature_mask[0]); 787 788 feature_2_enabled = ~feature_enables & new_mask; 789 feature_2_disabled = feature_enables & ~new_mask; 790 791 if (feature_2_enabled) { 792 ret = smu_cmn_feature_update_enable_state(smu, 793 feature_2_enabled, 794 true); 795 if (ret) 796 return ret; 797 } 798 if (feature_2_disabled) { 799 ret = smu_cmn_feature_update_enable_state(smu, 800 feature_2_disabled, 801 false); 802 if (ret) 803 return ret; 804 } 805 806 return ret; 807 } 808 809 /** 810 * smu_cmn_disable_all_features_with_exception - disable all dpm features 811 * except this specified by 812 * @mask 813 * 814 * @smu: smu_context pointer 815 * @no_hw_disablement: whether real dpm disablement should be performed 816 * true: update the cache(about dpm enablement state) only 817 * false: real dpm disablement plus cache update 818 * @mask: the dpm feature which should not be disabled 819 * SMU_FEATURE_COUNT: no exception, all dpm features 820 * to disable 821 * 822 * Returns: 823 * 0 on success or a negative error code on failure. 824 */ 825 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu, 826 bool no_hw_disablement, 827 enum smu_feature_mask mask) 828 { 829 struct smu_feature *feature = &smu->smu_feature; 830 uint64_t features_to_disable = U64_MAX; 831 int skipped_feature_id; 832 833 if (mask != SMU_FEATURE_COUNT) { 834 skipped_feature_id = smu_cmn_to_asic_specific_index(smu, 835 CMN2ASIC_MAPPING_FEATURE, 836 mask); 837 if (skipped_feature_id < 0) 838 return -EINVAL; 839 840 features_to_disable &= ~(1ULL << skipped_feature_id); 841 } 842 843 if (no_hw_disablement) { 844 mutex_lock(&feature->mutex); 845 bitmap_andnot(feature->enabled, feature->enabled, 846 (unsigned long *)(&features_to_disable), SMU_FEATURE_MAX); 847 mutex_unlock(&feature->mutex); 848 849 return 0; 850 } else { 851 return smu_cmn_feature_update_enable_state(smu, 852 features_to_disable, 853 0); 854 } 855 } 856 857 int smu_cmn_get_smc_version(struct smu_context *smu, 858 uint32_t *if_version, 859 uint32_t *smu_version) 860 { 861 int ret = 0; 862 863 if (!if_version && !smu_version) 864 return -EINVAL; 865 866 if (smu->smc_fw_if_version && smu->smc_fw_version) 867 { 868 if (if_version) 869 *if_version = smu->smc_fw_if_version; 870 871 if (smu_version) 872 *smu_version = smu->smc_fw_version; 873 874 return 0; 875 } 876 877 if (if_version) { 878 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); 879 if (ret) 880 return ret; 881 882 smu->smc_fw_if_version = *if_version; 883 } 884 885 if (smu_version) { 886 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); 887 if (ret) 888 return ret; 889 890 smu->smc_fw_version = *smu_version; 891 } 892 893 return ret; 894 } 895 896 int smu_cmn_update_table(struct smu_context *smu, 897 enum smu_table_id table_index, 898 int argument, 899 void *table_data, 900 bool drv2smu) 901 { 902 struct smu_table_context *smu_table = &smu->smu_table; 903 struct amdgpu_device *adev = smu->adev; 904 struct smu_table *table = &smu_table->driver_table; 905 int table_id = smu_cmn_to_asic_specific_index(smu, 906 CMN2ASIC_MAPPING_TABLE, 907 table_index); 908 uint32_t table_size; 909 int ret = 0; 910 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) 911 return -EINVAL; 912 913 table_size = smu_table->tables[table_index].size; 914 915 if (drv2smu) { 916 memcpy(table->cpu_addr, table_data, table_size); 917 /* 918 * Flush hdp cache: to guard the content seen by 919 * GPU is consitent with CPU. 920 */ 921 amdgpu_asic_flush_hdp(adev, NULL); 922 } 923 924 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ? 925 SMU_MSG_TransferTableDram2Smu : 926 SMU_MSG_TransferTableSmu2Dram, 927 table_id | ((argument & 0xFFFF) << 16), 928 NULL); 929 if (ret) 930 return ret; 931 932 if (!drv2smu) { 933 amdgpu_asic_invalidate_hdp(adev, NULL); 934 memcpy(table_data, table->cpu_addr, table_size); 935 } 936 937 return 0; 938 } 939 940 int smu_cmn_write_watermarks_table(struct smu_context *smu) 941 { 942 void *watermarks_table = smu->smu_table.watermarks_table; 943 944 if (!watermarks_table) 945 return -EINVAL; 946 947 return smu_cmn_update_table(smu, 948 SMU_TABLE_WATERMARKS, 949 0, 950 watermarks_table, 951 true); 952 } 953 954 int smu_cmn_write_pptable(struct smu_context *smu) 955 { 956 void *pptable = smu->smu_table.driver_pptable; 957 958 return smu_cmn_update_table(smu, 959 SMU_TABLE_PPTABLE, 960 0, 961 pptable, 962 true); 963 } 964 965 int smu_cmn_get_metrics_table_locked(struct smu_context *smu, 966 void *metrics_table, 967 bool bypass_cache) 968 { 969 struct smu_table_context *smu_table= &smu->smu_table; 970 uint32_t table_size = 971 smu_table->tables[SMU_TABLE_SMU_METRICS].size; 972 int ret = 0; 973 974 if (bypass_cache || 975 !smu_table->metrics_time || 976 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) { 977 ret = smu_cmn_update_table(smu, 978 SMU_TABLE_SMU_METRICS, 979 0, 980 smu_table->metrics_table, 981 false); 982 if (ret) { 983 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n"); 984 return ret; 985 } 986 smu_table->metrics_time = jiffies; 987 } 988 989 if (metrics_table) 990 memcpy(metrics_table, smu_table->metrics_table, table_size); 991 992 return 0; 993 } 994 995 int smu_cmn_get_metrics_table(struct smu_context *smu, 996 void *metrics_table, 997 bool bypass_cache) 998 { 999 int ret = 0; 1000 1001 mutex_lock(&smu->metrics_lock); 1002 ret = smu_cmn_get_metrics_table_locked(smu, 1003 metrics_table, 1004 bypass_cache); 1005 mutex_unlock(&smu->metrics_lock); 1006 1007 return ret; 1008 } 1009 1010 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) 1011 { 1012 struct metrics_table_header *header = (struct metrics_table_header *)table; 1013 uint16_t structure_size; 1014 1015 #define METRICS_VERSION(a, b) ((a << 16) | b ) 1016 1017 switch (METRICS_VERSION(frev, crev)) { 1018 case METRICS_VERSION(1, 0): 1019 structure_size = sizeof(struct gpu_metrics_v1_0); 1020 break; 1021 case METRICS_VERSION(1, 1): 1022 structure_size = sizeof(struct gpu_metrics_v1_1); 1023 break; 1024 case METRICS_VERSION(1, 2): 1025 structure_size = sizeof(struct gpu_metrics_v1_2); 1026 break; 1027 case METRICS_VERSION(1, 3): 1028 structure_size = sizeof(struct gpu_metrics_v1_3); 1029 break; 1030 case METRICS_VERSION(2, 0): 1031 structure_size = sizeof(struct gpu_metrics_v2_0); 1032 break; 1033 case METRICS_VERSION(2, 1): 1034 structure_size = sizeof(struct gpu_metrics_v2_1); 1035 break; 1036 case METRICS_VERSION(2, 2): 1037 structure_size = sizeof(struct gpu_metrics_v2_2); 1038 break; 1039 default: 1040 return; 1041 } 1042 1043 #undef METRICS_VERSION 1044 1045 memset(header, 0xFF, structure_size); 1046 1047 header->format_revision = frev; 1048 header->content_revision = crev; 1049 header->structure_size = structure_size; 1050 1051 } 1052 1053 int smu_cmn_set_mp1_state(struct smu_context *smu, 1054 enum pp_mp1_state mp1_state) 1055 { 1056 enum smu_message_type msg; 1057 int ret; 1058 1059 switch (mp1_state) { 1060 case PP_MP1_STATE_SHUTDOWN: 1061 msg = SMU_MSG_PrepareMp1ForShutdown; 1062 break; 1063 case PP_MP1_STATE_UNLOAD: 1064 msg = SMU_MSG_PrepareMp1ForUnload; 1065 break; 1066 case PP_MP1_STATE_RESET: 1067 msg = SMU_MSG_PrepareMp1ForReset; 1068 break; 1069 case PP_MP1_STATE_NONE: 1070 default: 1071 return 0; 1072 } 1073 1074 ret = smu_cmn_send_smc_msg(smu, msg, NULL); 1075 if (ret) 1076 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); 1077 1078 return ret; 1079 } 1080 1081 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) 1082 { 1083 struct pci_dev *p = NULL; 1084 bool snd_driver_loaded; 1085 1086 /* 1087 * If the ASIC comes with no audio function, we always assume 1088 * it is "enabled". 1089 */ 1090 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 1091 adev->pdev->bus->number, 1); 1092 if (!p) 1093 return true; 1094 1095 snd_driver_loaded = pci_is_enabled(p) ? true : false; 1096 1097 pci_dev_put(p); 1098 1099 return snd_driver_loaded; 1100 } 1101