1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L4 24 25 #include "amdgpu.h" 26 #include "amdgpu_smu.h" 27 #include "smu_cmn.h" 28 #include "soc15_common.h" 29 30 /* 31 * DO NOT use these for err/warn/info/debug messages. 32 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 33 * They are more MGPU friendly. 34 */ 35 #undef pr_err 36 #undef pr_warn 37 #undef pr_info 38 #undef pr_debug 39 40 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL 41 42 #undef __SMU_DUMMY_MAP 43 #define __SMU_DUMMY_MAP(type) #type 44 static const char * const __smu_message_names[] = { 45 SMU_MESSAGE_TYPES 46 }; 47 48 #define smu_cmn_call_asic_func(intf, smu, args...) \ 49 ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \ 50 (smu)->ppt_funcs->intf(smu, ##args) : \ 51 -ENOTSUPP) : \ 52 -EINVAL) 53 54 static const char *smu_get_message_name(struct smu_context *smu, 55 enum smu_message_type type) 56 { 57 if (type < 0 || type >= SMU_MSG_MAX_COUNT) 58 return "unknown smu message"; 59 60 return __smu_message_names[type]; 61 } 62 63 static void smu_cmn_read_arg(struct smu_context *smu, 64 uint32_t *arg) 65 { 66 struct amdgpu_device *adev = smu->adev; 67 68 *arg = RREG32(smu->param_reg); 69 } 70 71 /* Redefine the SMU error codes here. 72 * 73 * Note that these definitions are redundant and should be removed 74 * when the SMU has exported a unified header file containing these 75 * macros, which header file we can just include and use the SMU's 76 * macros. At the moment, these error codes are defined by the SMU 77 * per-ASIC unfortunately, yet we're a one driver for all ASICs. 78 */ 79 #define SMU_RESP_NONE 0 80 #define SMU_RESP_OK 1 81 #define SMU_RESP_CMD_FAIL 0xFF 82 #define SMU_RESP_CMD_UNKNOWN 0xFE 83 #define SMU_RESP_CMD_BAD_PREREQ 0xFD 84 #define SMU_RESP_BUSY_OTHER 0xFC 85 #define SMU_RESP_DEBUG_END 0xFB 86 87 /** 88 * __smu_cmn_poll_stat -- poll for a status from the SMU 89 * @smu: a pointer to SMU context 90 * 91 * Returns the status of the SMU, which could be, 92 * 0, the SMU is busy with your command; 93 * 1, execution status: success, execution result: success; 94 * 0xFF, execution status: success, execution result: failure; 95 * 0xFE, unknown command; 96 * 0xFD, valid command, but bad (command) prerequisites; 97 * 0xFC, the command was rejected as the SMU is busy; 98 * 0xFB, "SMC_Result_DebugDataDumpEnd". 99 * 100 * The values here are not defined by macros, because I'd rather we 101 * include a single header file which defines them, which is 102 * maintained by the SMU FW team, so that we're impervious to firmware 103 * changes. At the moment those values are defined in various header 104 * files, one for each ASIC, yet here we're a single ASIC-agnostic 105 * interface. Such a change can be followed-up by a subsequent patch. 106 */ 107 static u32 __smu_cmn_poll_stat(struct smu_context *smu) 108 { 109 struct amdgpu_device *adev = smu->adev; 110 int timeout = adev->usec_timeout * 20; 111 u32 reg; 112 113 for ( ; timeout > 0; timeout--) { 114 reg = RREG32(smu->resp_reg); 115 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0) 116 break; 117 118 udelay(1); 119 } 120 121 return reg; 122 } 123 124 static void __smu_cmn_reg_print_error(struct smu_context *smu, 125 u32 reg_c2pmsg_90, 126 int msg_index, 127 u32 param, 128 enum smu_message_type msg) 129 { 130 struct amdgpu_device *adev = smu->adev; 131 const char *message = smu_get_message_name(smu, msg); 132 u32 msg_idx, prm; 133 134 switch (reg_c2pmsg_90) { 135 case SMU_RESP_NONE: { 136 msg_idx = RREG32(smu->msg_reg); 137 prm = RREG32(smu->param_reg); 138 dev_err_ratelimited(adev->dev, 139 "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X", 140 msg_idx, prm); 141 } 142 break; 143 case SMU_RESP_OK: 144 /* The SMU executed the command. It completed with a 145 * successful result. 146 */ 147 break; 148 case SMU_RESP_CMD_FAIL: 149 /* The SMU executed the command. It completed with an 150 * unsuccessful result. 151 */ 152 break; 153 case SMU_RESP_CMD_UNKNOWN: 154 dev_err_ratelimited(adev->dev, 155 "SMU: unknown command: index:%d param:0x%08X message:%s", 156 msg_index, param, message); 157 break; 158 case SMU_RESP_CMD_BAD_PREREQ: 159 dev_err_ratelimited(adev->dev, 160 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s", 161 msg_index, param, message); 162 break; 163 case SMU_RESP_BUSY_OTHER: 164 dev_err_ratelimited(adev->dev, 165 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", 166 msg_index, param, message); 167 break; 168 case SMU_RESP_DEBUG_END: 169 dev_err_ratelimited(adev->dev, 170 "SMU: I'm debugging!"); 171 break; 172 default: 173 dev_err_ratelimited(adev->dev, 174 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", 175 reg_c2pmsg_90, msg_index, param, message); 176 break; 177 } 178 } 179 180 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90) 181 { 182 int res; 183 184 switch (reg_c2pmsg_90) { 185 case SMU_RESP_NONE: 186 /* The SMU is busy--still executing your command. 187 */ 188 res = -ETIME; 189 break; 190 case SMU_RESP_OK: 191 res = 0; 192 break; 193 case SMU_RESP_CMD_FAIL: 194 /* Command completed successfully, but the command 195 * status was failure. 196 */ 197 res = -EIO; 198 break; 199 case SMU_RESP_CMD_UNKNOWN: 200 /* Unknown command--ignored by the SMU. 201 */ 202 res = -EOPNOTSUPP; 203 break; 204 case SMU_RESP_CMD_BAD_PREREQ: 205 /* Valid command--bad prerequisites. 206 */ 207 res = -EINVAL; 208 break; 209 case SMU_RESP_BUSY_OTHER: 210 /* The SMU is busy with other commands. The client 211 * should retry in 10 us. 212 */ 213 res = -EBUSY; 214 break; 215 default: 216 /* Unknown or debug response from the SMU. 217 */ 218 res = -EREMOTEIO; 219 break; 220 } 221 222 return res; 223 } 224 225 static void __smu_cmn_send_msg(struct smu_context *smu, 226 u16 msg, 227 u32 param) 228 { 229 struct amdgpu_device *adev = smu->adev; 230 231 WREG32(smu->resp_reg, 0); 232 WREG32(smu->param_reg, param); 233 WREG32(smu->msg_reg, msg); 234 } 235 236 static int __smu_cmn_send_debug_msg(struct smu_context *smu, 237 u32 msg, 238 u32 param) 239 { 240 struct amdgpu_device *adev = smu->adev; 241 242 WREG32(smu->debug_param_reg, param); 243 WREG32(smu->debug_msg_reg, msg); 244 WREG32(smu->debug_resp_reg, 0); 245 246 return 0; 247 } 248 /** 249 * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status 250 * @smu: pointer to an SMU context 251 * @msg_index: message index 252 * @param: message parameter to send to the SMU 253 * 254 * Send a message to the SMU with the parameter passed. Do not wait 255 * for status/result of the message, thus the "without_waiting". 256 * 257 * Return 0 on success, -errno on error if we weren't able to _send_ 258 * the message for some reason. See __smu_cmn_reg2errno() for details 259 * of the -errno. 260 */ 261 int smu_cmn_send_msg_without_waiting(struct smu_context *smu, 262 uint16_t msg_index, 263 uint32_t param) 264 { 265 struct amdgpu_device *adev = smu->adev; 266 u32 reg; 267 int res; 268 269 if (adev->no_hw_access) 270 return 0; 271 272 reg = __smu_cmn_poll_stat(smu); 273 res = __smu_cmn_reg2errno(smu, reg); 274 if (reg == SMU_RESP_NONE || 275 res == -EREMOTEIO) 276 goto Out; 277 __smu_cmn_send_msg(smu, msg_index, param); 278 res = 0; 279 Out: 280 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && 281 res && (res != -ETIME)) { 282 amdgpu_device_halt(adev); 283 WARN_ON(1); 284 } 285 286 return res; 287 } 288 289 /** 290 * smu_cmn_wait_for_response -- wait for response from the SMU 291 * @smu: pointer to an SMU context 292 * 293 * Wait for status from the SMU. 294 * 295 * Return 0 on success, -errno on error, indicating the execution 296 * status and result of the message being waited for. See 297 * __smu_cmn_reg2errno() for details of the -errno. 298 */ 299 int smu_cmn_wait_for_response(struct smu_context *smu) 300 { 301 u32 reg; 302 int res; 303 304 reg = __smu_cmn_poll_stat(smu); 305 res = __smu_cmn_reg2errno(smu, reg); 306 307 if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && 308 res && (res != -ETIME)) { 309 amdgpu_device_halt(smu->adev); 310 WARN_ON(1); 311 } 312 313 return res; 314 } 315 316 /** 317 * smu_cmn_send_smc_msg_with_param -- send a message with parameter 318 * @smu: pointer to an SMU context 319 * @msg: message to send 320 * @param: parameter to send to the SMU 321 * @read_arg: pointer to u32 to return a value from the SMU back 322 * to the caller 323 * 324 * Send the message @msg with parameter @param to the SMU, wait for 325 * completion of the command, and return back a value from the SMU in 326 * @read_arg pointer. 327 * 328 * Return 0 on success, -errno when a problem is encountered sending 329 * message or receiving reply. If there is a PCI bus recovery or 330 * the destination is a virtual GPU which does not allow this message 331 * type, the message is simply dropped and success is also returned. 332 * See __smu_cmn_reg2errno() for details of the -errno. 333 * 334 * If we weren't able to send the message to the SMU, we also print 335 * the error to the standard log. 336 * 337 * Command completion status is printed only if the -errno is 338 * -EREMOTEIO, indicating that the SMU returned back an 339 * undefined/unknown/unspecified result. All other cases are 340 * well-defined, not printed, but instead given back to the client to 341 * decide what further to do. 342 * 343 * The return value, @read_arg is read back regardless, to give back 344 * more information to the client, which on error would most likely be 345 * @param, but we can't assume that. This also eliminates more 346 * conditionals. 347 */ 348 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, 349 enum smu_message_type msg, 350 uint32_t param, 351 uint32_t *read_arg) 352 { 353 struct amdgpu_device *adev = smu->adev; 354 int res, index; 355 u32 reg; 356 357 if (adev->no_hw_access) 358 return 0; 359 360 index = smu_cmn_to_asic_specific_index(smu, 361 CMN2ASIC_MAPPING_MSG, 362 msg); 363 if (index < 0) 364 return index == -EACCES ? 0 : index; 365 366 mutex_lock(&smu->message_lock); 367 reg = __smu_cmn_poll_stat(smu); 368 res = __smu_cmn_reg2errno(smu, reg); 369 if (reg == SMU_RESP_NONE || 370 res == -EREMOTEIO) { 371 __smu_cmn_reg_print_error(smu, reg, index, param, msg); 372 goto Out; 373 } 374 __smu_cmn_send_msg(smu, (uint16_t) index, param); 375 reg = __smu_cmn_poll_stat(smu); 376 res = __smu_cmn_reg2errno(smu, reg); 377 if (res != 0) 378 __smu_cmn_reg_print_error(smu, reg, index, param, msg); 379 if (read_arg) 380 smu_cmn_read_arg(smu, read_arg); 381 Out: 382 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) { 383 amdgpu_device_halt(adev); 384 WARN_ON(1); 385 } 386 387 mutex_unlock(&smu->message_lock); 388 return res; 389 } 390 391 int smu_cmn_send_smc_msg(struct smu_context *smu, 392 enum smu_message_type msg, 393 uint32_t *read_arg) 394 { 395 return smu_cmn_send_smc_msg_with_param(smu, 396 msg, 397 0, 398 read_arg); 399 } 400 401 int smu_cmn_send_debug_smc_msg(struct smu_context *smu, 402 uint32_t msg) 403 { 404 return __smu_cmn_send_debug_msg(smu, msg, 0); 405 } 406 407 int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu, 408 uint32_t msg, uint32_t param) 409 { 410 return __smu_cmn_send_debug_msg(smu, msg, param); 411 } 412 413 int smu_cmn_to_asic_specific_index(struct smu_context *smu, 414 enum smu_cmn2asic_mapping_type type, 415 uint32_t index) 416 { 417 struct cmn2asic_msg_mapping msg_mapping; 418 struct cmn2asic_mapping mapping; 419 420 switch (type) { 421 case CMN2ASIC_MAPPING_MSG: 422 if (index >= SMU_MSG_MAX_COUNT || 423 !smu->message_map) 424 return -EINVAL; 425 426 msg_mapping = smu->message_map[index]; 427 if (!msg_mapping.valid_mapping) 428 return -EINVAL; 429 430 if (amdgpu_sriov_vf(smu->adev) && 431 !msg_mapping.valid_in_vf) 432 return -EACCES; 433 434 return msg_mapping.map_to; 435 436 case CMN2ASIC_MAPPING_CLK: 437 if (index >= SMU_CLK_COUNT || 438 !smu->clock_map) 439 return -EINVAL; 440 441 mapping = smu->clock_map[index]; 442 if (!mapping.valid_mapping) 443 return -EINVAL; 444 445 return mapping.map_to; 446 447 case CMN2ASIC_MAPPING_FEATURE: 448 if (index >= SMU_FEATURE_COUNT || 449 !smu->feature_map) 450 return -EINVAL; 451 452 mapping = smu->feature_map[index]; 453 if (!mapping.valid_mapping) 454 return -EINVAL; 455 456 return mapping.map_to; 457 458 case CMN2ASIC_MAPPING_TABLE: 459 if (index >= SMU_TABLE_COUNT || 460 !smu->table_map) 461 return -EINVAL; 462 463 mapping = smu->table_map[index]; 464 if (!mapping.valid_mapping) 465 return -EINVAL; 466 467 return mapping.map_to; 468 469 case CMN2ASIC_MAPPING_PWR: 470 if (index >= SMU_POWER_SOURCE_COUNT || 471 !smu->pwr_src_map) 472 return -EINVAL; 473 474 mapping = smu->pwr_src_map[index]; 475 if (!mapping.valid_mapping) 476 return -EINVAL; 477 478 return mapping.map_to; 479 480 case CMN2ASIC_MAPPING_WORKLOAD: 481 if (index > PP_SMC_POWER_PROFILE_WINDOW3D || 482 !smu->workload_map) 483 return -EINVAL; 484 485 mapping = smu->workload_map[index]; 486 if (!mapping.valid_mapping) 487 return -EINVAL; 488 489 return mapping.map_to; 490 491 default: 492 return -EINVAL; 493 } 494 } 495 496 int smu_cmn_feature_is_supported(struct smu_context *smu, 497 enum smu_feature_mask mask) 498 { 499 struct smu_feature *feature = &smu->smu_feature; 500 int feature_id; 501 502 feature_id = smu_cmn_to_asic_specific_index(smu, 503 CMN2ASIC_MAPPING_FEATURE, 504 mask); 505 if (feature_id < 0) 506 return 0; 507 508 WARN_ON(feature_id > feature->feature_num); 509 510 return test_bit(feature_id, feature->supported); 511 } 512 513 static int __smu_get_enabled_features(struct smu_context *smu, 514 uint64_t *enabled_features) 515 { 516 return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features); 517 } 518 519 int smu_cmn_feature_is_enabled(struct smu_context *smu, 520 enum smu_feature_mask mask) 521 { 522 struct amdgpu_device *adev = smu->adev; 523 uint64_t enabled_features; 524 int feature_id; 525 526 if (__smu_get_enabled_features(smu, &enabled_features)) { 527 dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n"); 528 return 0; 529 } 530 531 /* 532 * For Renoir and Cyan Skillfish, they are assumed to have all features 533 * enabled. Also considering they have no feature_map available, the 534 * check here can avoid unwanted feature_map check below. 535 */ 536 if (enabled_features == ULLONG_MAX) 537 return 1; 538 539 feature_id = smu_cmn_to_asic_specific_index(smu, 540 CMN2ASIC_MAPPING_FEATURE, 541 mask); 542 if (feature_id < 0) 543 return 0; 544 545 return test_bit(feature_id, (unsigned long *)&enabled_features); 546 } 547 548 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, 549 enum smu_clk_type clk_type) 550 { 551 enum smu_feature_mask feature_id = 0; 552 553 switch (clk_type) { 554 case SMU_MCLK: 555 case SMU_UCLK: 556 feature_id = SMU_FEATURE_DPM_UCLK_BIT; 557 break; 558 case SMU_GFXCLK: 559 case SMU_SCLK: 560 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 561 break; 562 case SMU_SOCCLK: 563 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 564 break; 565 case SMU_VCLK: 566 case SMU_VCLK1: 567 feature_id = SMU_FEATURE_DPM_VCLK_BIT; 568 break; 569 case SMU_DCLK: 570 case SMU_DCLK1: 571 feature_id = SMU_FEATURE_DPM_DCLK_BIT; 572 break; 573 case SMU_FCLK: 574 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 575 break; 576 default: 577 return true; 578 } 579 580 if (!smu_cmn_feature_is_enabled(smu, feature_id)) 581 return false; 582 583 return true; 584 } 585 586 int smu_cmn_get_enabled_mask(struct smu_context *smu, 587 uint64_t *feature_mask) 588 { 589 uint32_t *feature_mask_high; 590 uint32_t *feature_mask_low; 591 int ret = 0, index = 0; 592 593 if (!feature_mask) 594 return -EINVAL; 595 596 feature_mask_low = &((uint32_t *)feature_mask)[0]; 597 feature_mask_high = &((uint32_t *)feature_mask)[1]; 598 599 index = smu_cmn_to_asic_specific_index(smu, 600 CMN2ASIC_MAPPING_MSG, 601 SMU_MSG_GetEnabledSmuFeatures); 602 if (index > 0) { 603 ret = smu_cmn_send_smc_msg_with_param(smu, 604 SMU_MSG_GetEnabledSmuFeatures, 605 0, 606 feature_mask_low); 607 if (ret) 608 return ret; 609 610 ret = smu_cmn_send_smc_msg_with_param(smu, 611 SMU_MSG_GetEnabledSmuFeatures, 612 1, 613 feature_mask_high); 614 } else { 615 ret = smu_cmn_send_smc_msg(smu, 616 SMU_MSG_GetEnabledSmuFeaturesHigh, 617 feature_mask_high); 618 if (ret) 619 return ret; 620 621 ret = smu_cmn_send_smc_msg(smu, 622 SMU_MSG_GetEnabledSmuFeaturesLow, 623 feature_mask_low); 624 } 625 626 return ret; 627 } 628 629 uint64_t smu_cmn_get_indep_throttler_status( 630 const unsigned long dep_status, 631 const uint8_t *throttler_map) 632 { 633 uint64_t indep_status = 0; 634 uint8_t dep_bit = 0; 635 636 for_each_set_bit(dep_bit, &dep_status, 32) 637 indep_status |= 1ULL << throttler_map[dep_bit]; 638 639 return indep_status; 640 } 641 642 int smu_cmn_feature_update_enable_state(struct smu_context *smu, 643 uint64_t feature_mask, 644 bool enabled) 645 { 646 int ret = 0; 647 648 if (enabled) { 649 ret = smu_cmn_send_smc_msg_with_param(smu, 650 SMU_MSG_EnableSmuFeaturesLow, 651 lower_32_bits(feature_mask), 652 NULL); 653 if (ret) 654 return ret; 655 ret = smu_cmn_send_smc_msg_with_param(smu, 656 SMU_MSG_EnableSmuFeaturesHigh, 657 upper_32_bits(feature_mask), 658 NULL); 659 } else { 660 ret = smu_cmn_send_smc_msg_with_param(smu, 661 SMU_MSG_DisableSmuFeaturesLow, 662 lower_32_bits(feature_mask), 663 NULL); 664 if (ret) 665 return ret; 666 ret = smu_cmn_send_smc_msg_with_param(smu, 667 SMU_MSG_DisableSmuFeaturesHigh, 668 upper_32_bits(feature_mask), 669 NULL); 670 } 671 672 return ret; 673 } 674 675 int smu_cmn_feature_set_enabled(struct smu_context *smu, 676 enum smu_feature_mask mask, 677 bool enable) 678 { 679 int feature_id; 680 681 feature_id = smu_cmn_to_asic_specific_index(smu, 682 CMN2ASIC_MAPPING_FEATURE, 683 mask); 684 if (feature_id < 0) 685 return -EINVAL; 686 687 return smu_cmn_feature_update_enable_state(smu, 688 1ULL << feature_id, 689 enable); 690 } 691 692 #undef __SMU_DUMMY_MAP 693 #define __SMU_DUMMY_MAP(fea) #fea 694 static const char* __smu_feature_names[] = { 695 SMU_FEATURE_MASKS 696 }; 697 698 static const char *smu_get_feature_name(struct smu_context *smu, 699 enum smu_feature_mask feature) 700 { 701 if (feature < 0 || feature >= SMU_FEATURE_COUNT) 702 return "unknown smu feature"; 703 return __smu_feature_names[feature]; 704 } 705 706 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, 707 char *buf) 708 { 709 int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)]; 710 uint64_t feature_mask; 711 int i, feature_index; 712 uint32_t count = 0; 713 size_t size = 0; 714 715 if (__smu_get_enabled_features(smu, &feature_mask)) 716 return 0; 717 718 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n", 719 upper_32_bits(feature_mask), lower_32_bits(feature_mask)); 720 721 memset(sort_feature, -1, sizeof(sort_feature)); 722 723 for (i = 0; i < SMU_FEATURE_COUNT; i++) { 724 feature_index = smu_cmn_to_asic_specific_index(smu, 725 CMN2ASIC_MAPPING_FEATURE, 726 i); 727 if (feature_index < 0) 728 continue; 729 730 sort_feature[feature_index] = i; 731 } 732 733 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n", 734 "No", "Feature", "Bit", "State"); 735 736 for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) { 737 if (sort_feature[feature_index] < 0) 738 continue; 739 740 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n", 741 count++, 742 smu_get_feature_name(smu, sort_feature[feature_index]), 743 feature_index, 744 !!test_bit(feature_index, (unsigned long *)&feature_mask) ? 745 "enabled" : "disabled"); 746 } 747 748 return size; 749 } 750 751 int smu_cmn_set_pp_feature_mask(struct smu_context *smu, 752 uint64_t new_mask) 753 { 754 int ret = 0; 755 uint64_t feature_mask; 756 uint64_t feature_2_enabled = 0; 757 uint64_t feature_2_disabled = 0; 758 759 ret = __smu_get_enabled_features(smu, &feature_mask); 760 if (ret) 761 return ret; 762 763 feature_2_enabled = ~feature_mask & new_mask; 764 feature_2_disabled = feature_mask & ~new_mask; 765 766 if (feature_2_enabled) { 767 ret = smu_cmn_feature_update_enable_state(smu, 768 feature_2_enabled, 769 true); 770 if (ret) 771 return ret; 772 } 773 if (feature_2_disabled) { 774 ret = smu_cmn_feature_update_enable_state(smu, 775 feature_2_disabled, 776 false); 777 if (ret) 778 return ret; 779 } 780 781 return ret; 782 } 783 784 /** 785 * smu_cmn_disable_all_features_with_exception - disable all dpm features 786 * except this specified by 787 * @mask 788 * 789 * @smu: smu_context pointer 790 * @mask: the dpm feature which should not be disabled 791 * SMU_FEATURE_COUNT: no exception, all dpm features 792 * to disable 793 * 794 * Returns: 795 * 0 on success or a negative error code on failure. 796 */ 797 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu, 798 enum smu_feature_mask mask) 799 { 800 uint64_t features_to_disable = U64_MAX; 801 int skipped_feature_id; 802 803 if (mask != SMU_FEATURE_COUNT) { 804 skipped_feature_id = smu_cmn_to_asic_specific_index(smu, 805 CMN2ASIC_MAPPING_FEATURE, 806 mask); 807 if (skipped_feature_id < 0) 808 return -EINVAL; 809 810 features_to_disable &= ~(1ULL << skipped_feature_id); 811 } 812 813 return smu_cmn_feature_update_enable_state(smu, 814 features_to_disable, 815 0); 816 } 817 818 int smu_cmn_get_smc_version(struct smu_context *smu, 819 uint32_t *if_version, 820 uint32_t *smu_version) 821 { 822 int ret = 0; 823 824 if (!if_version && !smu_version) 825 return -EINVAL; 826 827 if (smu->smc_fw_if_version && smu->smc_fw_version) 828 { 829 if (if_version) 830 *if_version = smu->smc_fw_if_version; 831 832 if (smu_version) 833 *smu_version = smu->smc_fw_version; 834 835 return 0; 836 } 837 838 if (if_version) { 839 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); 840 if (ret) 841 return ret; 842 843 smu->smc_fw_if_version = *if_version; 844 } 845 846 if (smu_version) { 847 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); 848 if (ret) 849 return ret; 850 851 smu->smc_fw_version = *smu_version; 852 } 853 854 return ret; 855 } 856 857 int smu_cmn_update_table(struct smu_context *smu, 858 enum smu_table_id table_index, 859 int argument, 860 void *table_data, 861 bool drv2smu) 862 { 863 struct smu_table_context *smu_table = &smu->smu_table; 864 struct amdgpu_device *adev = smu->adev; 865 struct smu_table *table = &smu_table->driver_table; 866 int table_id = smu_cmn_to_asic_specific_index(smu, 867 CMN2ASIC_MAPPING_TABLE, 868 table_index); 869 uint32_t table_size; 870 int ret = 0; 871 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) 872 return -EINVAL; 873 874 table_size = smu_table->tables[table_index].size; 875 876 if (drv2smu) { 877 memcpy(table->cpu_addr, table_data, table_size); 878 /* 879 * Flush hdp cache: to guard the content seen by 880 * GPU is consitent with CPU. 881 */ 882 amdgpu_asic_flush_hdp(adev, NULL); 883 } 884 885 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ? 886 SMU_MSG_TransferTableDram2Smu : 887 SMU_MSG_TransferTableSmu2Dram, 888 table_id | ((argument & 0xFFFF) << 16), 889 NULL); 890 if (ret) 891 return ret; 892 893 if (!drv2smu) { 894 amdgpu_asic_invalidate_hdp(adev, NULL); 895 memcpy(table_data, table->cpu_addr, table_size); 896 } 897 898 return 0; 899 } 900 901 int smu_cmn_write_watermarks_table(struct smu_context *smu) 902 { 903 void *watermarks_table = smu->smu_table.watermarks_table; 904 905 if (!watermarks_table) 906 return -EINVAL; 907 908 return smu_cmn_update_table(smu, 909 SMU_TABLE_WATERMARKS, 910 0, 911 watermarks_table, 912 true); 913 } 914 915 int smu_cmn_write_pptable(struct smu_context *smu) 916 { 917 void *pptable = smu->smu_table.driver_pptable; 918 919 return smu_cmn_update_table(smu, 920 SMU_TABLE_PPTABLE, 921 0, 922 pptable, 923 true); 924 } 925 926 int smu_cmn_get_metrics_table(struct smu_context *smu, 927 void *metrics_table, 928 bool bypass_cache) 929 { 930 struct smu_table_context *smu_table= &smu->smu_table; 931 uint32_t table_size = 932 smu_table->tables[SMU_TABLE_SMU_METRICS].size; 933 int ret = 0; 934 935 if (bypass_cache || 936 !smu_table->metrics_time || 937 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) { 938 ret = smu_cmn_update_table(smu, 939 SMU_TABLE_SMU_METRICS, 940 0, 941 smu_table->metrics_table, 942 false); 943 if (ret) { 944 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n"); 945 return ret; 946 } 947 smu_table->metrics_time = jiffies; 948 } 949 950 if (metrics_table) 951 memcpy(metrics_table, smu_table->metrics_table, table_size); 952 953 return 0; 954 } 955 956 int smu_cmn_get_combo_pptable(struct smu_context *smu) 957 { 958 void *pptable = smu->smu_table.combo_pptable; 959 960 return smu_cmn_update_table(smu, 961 SMU_TABLE_COMBO_PPTABLE, 962 0, 963 pptable, 964 false); 965 } 966 967 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) 968 { 969 struct metrics_table_header *header = (struct metrics_table_header *)table; 970 uint16_t structure_size; 971 972 #define METRICS_VERSION(a, b) ((a << 16) | b ) 973 974 switch (METRICS_VERSION(frev, crev)) { 975 case METRICS_VERSION(1, 0): 976 structure_size = sizeof(struct gpu_metrics_v1_0); 977 break; 978 case METRICS_VERSION(1, 1): 979 structure_size = sizeof(struct gpu_metrics_v1_1); 980 break; 981 case METRICS_VERSION(1, 2): 982 structure_size = sizeof(struct gpu_metrics_v1_2); 983 break; 984 case METRICS_VERSION(1, 3): 985 structure_size = sizeof(struct gpu_metrics_v1_3); 986 break; 987 case METRICS_VERSION(2, 0): 988 structure_size = sizeof(struct gpu_metrics_v2_0); 989 break; 990 case METRICS_VERSION(2, 1): 991 structure_size = sizeof(struct gpu_metrics_v2_1); 992 break; 993 case METRICS_VERSION(2, 2): 994 structure_size = sizeof(struct gpu_metrics_v2_2); 995 break; 996 case METRICS_VERSION(2, 3): 997 structure_size = sizeof(struct gpu_metrics_v2_3); 998 break; 999 default: 1000 return; 1001 } 1002 1003 #undef METRICS_VERSION 1004 1005 memset(header, 0xFF, structure_size); 1006 1007 header->format_revision = frev; 1008 header->content_revision = crev; 1009 header->structure_size = structure_size; 1010 1011 } 1012 1013 int smu_cmn_set_mp1_state(struct smu_context *smu, 1014 enum pp_mp1_state mp1_state) 1015 { 1016 enum smu_message_type msg; 1017 int ret; 1018 1019 switch (mp1_state) { 1020 case PP_MP1_STATE_SHUTDOWN: 1021 msg = SMU_MSG_PrepareMp1ForShutdown; 1022 break; 1023 case PP_MP1_STATE_UNLOAD: 1024 msg = SMU_MSG_PrepareMp1ForUnload; 1025 break; 1026 case PP_MP1_STATE_RESET: 1027 msg = SMU_MSG_PrepareMp1ForReset; 1028 break; 1029 case PP_MP1_STATE_NONE: 1030 default: 1031 return 0; 1032 } 1033 1034 ret = smu_cmn_send_smc_msg(smu, msg, NULL); 1035 if (ret) 1036 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); 1037 1038 return ret; 1039 } 1040 1041 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) 1042 { 1043 struct pci_dev *p = NULL; 1044 bool snd_driver_loaded; 1045 1046 /* 1047 * If the ASIC comes with no audio function, we always assume 1048 * it is "enabled". 1049 */ 1050 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 1051 adev->pdev->bus->number, 1); 1052 if (!p) 1053 return true; 1054 1055 snd_driver_loaded = pci_is_enabled(p) ? true : false; 1056 1057 pci_dev_put(p); 1058 1059 return snd_driver_loaded; 1060 } 1061