1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L4 24 25 #include "amdgpu.h" 26 #include "amdgpu_smu.h" 27 #include "smu_cmn.h" 28 #include "soc15_common.h" 29 30 /* 31 * DO NOT use these for err/warn/info/debug messages. 32 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 33 * They are more MGPU friendly. 34 */ 35 #undef pr_err 36 #undef pr_warn 37 #undef pr_info 38 #undef pr_debug 39 40 /* 41 * Although these are defined in each ASIC's specific header file. 42 * They share the same definitions and values. That makes common 43 * APIs for SMC messages issuing for all ASICs possible. 44 */ 45 #define mmMP1_SMN_C2PMSG_66 0x0282 46 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0 47 48 #define mmMP1_SMN_C2PMSG_82 0x0292 49 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0 50 51 #define mmMP1_SMN_C2PMSG_90 0x029a 52 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 53 54 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL 55 56 #undef __SMU_DUMMY_MAP 57 #define __SMU_DUMMY_MAP(type) #type 58 static const char * const __smu_message_names[] = { 59 SMU_MESSAGE_TYPES 60 }; 61 62 static const char *smu_get_message_name(struct smu_context *smu, 63 enum smu_message_type type) 64 { 65 if (type < 0 || type >= SMU_MSG_MAX_COUNT) 66 return "unknown smu message"; 67 68 return __smu_message_names[type]; 69 } 70 71 static void smu_cmn_read_arg(struct smu_context *smu, 72 uint32_t *arg) 73 { 74 struct amdgpu_device *adev = smu->adev; 75 76 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); 77 } 78 79 /* Redefine the SMU error codes here. 80 * 81 * Note that these definitions are redundant and should be removed 82 * when the SMU has exported a unified header file containing these 83 * macros, which header file we can just include and use the SMU's 84 * macros. At the moment, these error codes are defined by the SMU 85 * per-ASIC unfortunately, yet we're a one driver for all ASICs. 86 */ 87 #define SMU_RESP_NONE 0 88 #define SMU_RESP_OK 1 89 #define SMU_RESP_CMD_FAIL 0xFF 90 #define SMU_RESP_CMD_UNKNOWN 0xFE 91 #define SMU_RESP_CMD_BAD_PREREQ 0xFD 92 #define SMU_RESP_BUSY_OTHER 0xFC 93 #define SMU_RESP_DEBUG_END 0xFB 94 95 /** 96 * __smu_cmn_poll_stat -- poll for a status from the SMU 97 * smu: a pointer to SMU context 98 * 99 * Returns the status of the SMU, which could be, 100 * 0, the SMU is busy with your previous command; 101 * 1, execution status: success, execution result: success; 102 * 0xFF, execution status: success, execution result: failure; 103 * 0xFE, unknown command; 104 * 0xFD, valid command, but bad (command) prerequisites; 105 * 0xFC, the command was rejected as the SMU is busy; 106 * 0xFB, "SMC_Result_DebugDataDumpEnd". 107 * 108 * The values here are not defined by macros, because I'd rather we 109 * include a single header file which defines them, which is 110 * maintained by the SMU FW team, so that we're impervious to firmware 111 * changes. At the moment those values are defined in various header 112 * files, one for each ASIC, yet here we're a single ASIC-agnostic 113 * interface. Such a change can be followed-up by a subsequent patch. 114 */ 115 static u32 __smu_cmn_poll_stat(struct smu_context *smu) 116 { 117 struct amdgpu_device *adev = smu->adev; 118 int timeout = adev->usec_timeout * 20; 119 u32 reg; 120 121 for ( ; timeout > 0; timeout--) { 122 reg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); 123 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0) 124 break; 125 126 udelay(1); 127 } 128 129 return reg; 130 } 131 132 static void __smu_cmn_reg_print_error(struct smu_context *smu, 133 u32 reg_c2pmsg_90, 134 int msg_index, 135 u32 param, 136 enum smu_message_type msg) 137 { 138 struct amdgpu_device *adev = smu->adev; 139 const char *message = smu_get_message_name(smu, msg); 140 141 switch (reg_c2pmsg_90) { 142 case SMU_RESP_NONE: { 143 u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66); 144 u32 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); 145 dev_err_ratelimited(adev->dev, 146 "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X", 147 msg_idx, prm); 148 } 149 break; 150 case SMU_RESP_OK: 151 /* The SMU executed the command. It completed with a 152 * successful result. 153 */ 154 break; 155 case SMU_RESP_CMD_FAIL: 156 /* The SMU executed the command. It completed with an 157 * unsuccessful result. 158 */ 159 break; 160 case SMU_RESP_CMD_UNKNOWN: 161 dev_err_ratelimited(adev->dev, 162 "SMU: unknown command: index:%d param:0x%08X message:%s", 163 msg_index, param, message); 164 break; 165 case SMU_RESP_CMD_BAD_PREREQ: 166 dev_err_ratelimited(adev->dev, 167 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s", 168 msg_index, param, message); 169 break; 170 case SMU_RESP_BUSY_OTHER: 171 dev_err_ratelimited(adev->dev, 172 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", 173 msg_index, param, message); 174 break; 175 case SMU_RESP_DEBUG_END: 176 dev_err_ratelimited(adev->dev, 177 "SMU: I'm debugging!"); 178 break; 179 default: 180 dev_err_ratelimited(adev->dev, 181 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", 182 reg_c2pmsg_90, msg_index, param, message); 183 break; 184 } 185 } 186 187 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90) 188 { 189 int res; 190 191 switch (reg_c2pmsg_90) { 192 case SMU_RESP_NONE: 193 /* The SMU is busy--still executing your command. 194 */ 195 res = -ETIME; 196 break; 197 case SMU_RESP_OK: 198 res = 0; 199 break; 200 case SMU_RESP_CMD_FAIL: 201 /* Command completed successfully, but the command 202 * status was failure. 203 */ 204 res = -EIO; 205 break; 206 case SMU_RESP_CMD_UNKNOWN: 207 /* Unknown command--ignored by the SMU. 208 */ 209 res = -EOPNOTSUPP; 210 break; 211 case SMU_RESP_CMD_BAD_PREREQ: 212 /* Valid command--bad prerequisites. 213 */ 214 res = -EINVAL; 215 break; 216 case SMU_RESP_BUSY_OTHER: 217 /* The SMU is busy with other commands. The client 218 * should retry in 10 us. 219 */ 220 res = -EBUSY; 221 break; 222 default: 223 /* Unknown or debug response from the SMU. 224 */ 225 res = -EREMOTEIO; 226 break; 227 } 228 229 return res; 230 } 231 232 static void __smu_cmn_send_msg(struct smu_context *smu, 233 u16 msg, 234 u32 param) 235 { 236 struct amdgpu_device *adev = smu->adev; 237 238 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 239 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param); 240 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); 241 } 242 243 /** 244 * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status 245 * @smu: pointer to an SMU context 246 * @msg_index: message index 247 * @param: message parameter to send to the SMU 248 * 249 * Send a message to the SMU with the parameter passed. Do not wait 250 * for status/result of the message, thus the "without_waiting". 251 * 252 * Return 0 on success, -errno on error if we weren't able to _send_ 253 * the message for some reason. See __smu_cmn_reg2errno() for details 254 * of the -errno. 255 */ 256 int smu_cmn_send_msg_without_waiting(struct smu_context *smu, 257 uint16_t msg_index, 258 uint32_t param) 259 { 260 u32 reg; 261 int res; 262 263 if (smu->adev->no_hw_access) 264 return 0; 265 266 reg = __smu_cmn_poll_stat(smu); 267 res = __smu_cmn_reg2errno(smu, reg); 268 if (reg == SMU_RESP_NONE || 269 reg == SMU_RESP_BUSY_OTHER || 270 res == -EREMOTEIO) 271 goto Out; 272 __smu_cmn_send_msg(smu, msg_index, param); 273 res = 0; 274 Out: 275 return res; 276 } 277 278 /** 279 * smu_cmn_wait_for_response -- wait for response from the SMU 280 * @smu: pointer to an SMU context 281 * 282 * Wait for status from the SMU. 283 * 284 * Return 0 on success, -errno on error, indicating the execution 285 * status and result of the message being waited for. See 286 * __smu_cmn_reg2errno() for details of the -errno. 287 */ 288 int smu_cmn_wait_for_response(struct smu_context *smu) 289 { 290 u32 reg; 291 292 reg = __smu_cmn_poll_stat(smu); 293 return __smu_cmn_reg2errno(smu, reg); 294 } 295 296 /** 297 * smu_cmn_send_smc_msg_with_param -- send a message with parameter 298 * @smu: pointer to an SMU context 299 * @msg: message to send 300 * @param: parameter to send to the SMU 301 * @read_arg: pointer to u32 to return a value from the SMU back 302 * to the caller 303 * 304 * Send the message @msg with parameter @param to the SMU, wait for 305 * completion of the command, and return back a value from the SMU in 306 * @read_arg pointer. 307 * 308 * Return 0 on success, -errno on error, if we weren't able to send 309 * the message or if the message completed with some kind of 310 * error. See __smu_cmn_reg2errno() for details of the -errno. 311 * 312 * If we weren't able to send the message to the SMU, we also print 313 * the error to the standard log. 314 * 315 * Command completion status is printed only if the -errno is 316 * -EREMOTEIO, indicating that the SMU returned back an 317 * undefined/unknown/unspecified result. All other cases are 318 * well-defined, not printed, but instead given back to the client to 319 * decide what further to do. 320 * 321 * The return value, @read_arg is read back regardless, to give back 322 * more information to the client, which on error would most likely be 323 * @param, but we can't assume that. This also eliminates more 324 * conditionals. 325 */ 326 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, 327 enum smu_message_type msg, 328 uint32_t param, 329 uint32_t *read_arg) 330 { 331 int res, index; 332 u32 reg; 333 334 if (smu->adev->no_hw_access) 335 return 0; 336 337 index = smu_cmn_to_asic_specific_index(smu, 338 CMN2ASIC_MAPPING_MSG, 339 msg); 340 if (index < 0) 341 return index == -EACCES ? 0 : index; 342 343 mutex_lock(&smu->message_lock); 344 reg = __smu_cmn_poll_stat(smu); 345 res = __smu_cmn_reg2errno(smu, reg); 346 if (reg == SMU_RESP_NONE || 347 reg == SMU_RESP_BUSY_OTHER || 348 res == -EREMOTEIO) { 349 __smu_cmn_reg_print_error(smu, reg, index, param, msg); 350 goto Out; 351 } 352 __smu_cmn_send_msg(smu, (uint16_t) index, param); 353 reg = __smu_cmn_poll_stat(smu); 354 res = __smu_cmn_reg2errno(smu, reg); 355 if (res == -EREMOTEIO) 356 __smu_cmn_reg_print_error(smu, reg, index, param, msg); 357 if (read_arg) 358 smu_cmn_read_arg(smu, read_arg); 359 Out: 360 mutex_unlock(&smu->message_lock); 361 return res; 362 } 363 364 int smu_cmn_send_smc_msg(struct smu_context *smu, 365 enum smu_message_type msg, 366 uint32_t *read_arg) 367 { 368 return smu_cmn_send_smc_msg_with_param(smu, 369 msg, 370 0, 371 read_arg); 372 } 373 374 int smu_cmn_to_asic_specific_index(struct smu_context *smu, 375 enum smu_cmn2asic_mapping_type type, 376 uint32_t index) 377 { 378 struct cmn2asic_msg_mapping msg_mapping; 379 struct cmn2asic_mapping mapping; 380 381 switch (type) { 382 case CMN2ASIC_MAPPING_MSG: 383 if (index >= SMU_MSG_MAX_COUNT || 384 !smu->message_map) 385 return -EINVAL; 386 387 msg_mapping = smu->message_map[index]; 388 if (!msg_mapping.valid_mapping) 389 return -EINVAL; 390 391 if (amdgpu_sriov_vf(smu->adev) && 392 !msg_mapping.valid_in_vf) 393 return -EACCES; 394 395 return msg_mapping.map_to; 396 397 case CMN2ASIC_MAPPING_CLK: 398 if (index >= SMU_CLK_COUNT || 399 !smu->clock_map) 400 return -EINVAL; 401 402 mapping = smu->clock_map[index]; 403 if (!mapping.valid_mapping) 404 return -EINVAL; 405 406 return mapping.map_to; 407 408 case CMN2ASIC_MAPPING_FEATURE: 409 if (index >= SMU_FEATURE_COUNT || 410 !smu->feature_map) 411 return -EINVAL; 412 413 mapping = smu->feature_map[index]; 414 if (!mapping.valid_mapping) 415 return -EINVAL; 416 417 return mapping.map_to; 418 419 case CMN2ASIC_MAPPING_TABLE: 420 if (index >= SMU_TABLE_COUNT || 421 !smu->table_map) 422 return -EINVAL; 423 424 mapping = smu->table_map[index]; 425 if (!mapping.valid_mapping) 426 return -EINVAL; 427 428 return mapping.map_to; 429 430 case CMN2ASIC_MAPPING_PWR: 431 if (index >= SMU_POWER_SOURCE_COUNT || 432 !smu->pwr_src_map) 433 return -EINVAL; 434 435 mapping = smu->pwr_src_map[index]; 436 if (!mapping.valid_mapping) 437 return -EINVAL; 438 439 return mapping.map_to; 440 441 case CMN2ASIC_MAPPING_WORKLOAD: 442 if (index > PP_SMC_POWER_PROFILE_CUSTOM || 443 !smu->workload_map) 444 return -EINVAL; 445 446 mapping = smu->workload_map[index]; 447 if (!mapping.valid_mapping) 448 return -EINVAL; 449 450 return mapping.map_to; 451 452 default: 453 return -EINVAL; 454 } 455 } 456 457 int smu_cmn_feature_is_supported(struct smu_context *smu, 458 enum smu_feature_mask mask) 459 { 460 struct smu_feature *feature = &smu->smu_feature; 461 int feature_id; 462 int ret = 0; 463 464 feature_id = smu_cmn_to_asic_specific_index(smu, 465 CMN2ASIC_MAPPING_FEATURE, 466 mask); 467 if (feature_id < 0) 468 return 0; 469 470 WARN_ON(feature_id > feature->feature_num); 471 472 mutex_lock(&feature->mutex); 473 ret = test_bit(feature_id, feature->supported); 474 mutex_unlock(&feature->mutex); 475 476 return ret; 477 } 478 479 int smu_cmn_feature_is_enabled(struct smu_context *smu, 480 enum smu_feature_mask mask) 481 { 482 struct smu_feature *feature = &smu->smu_feature; 483 struct amdgpu_device *adev = smu->adev; 484 int feature_id; 485 int ret = 0; 486 487 if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH) 488 return 1; 489 490 feature_id = smu_cmn_to_asic_specific_index(smu, 491 CMN2ASIC_MAPPING_FEATURE, 492 mask); 493 if (feature_id < 0) 494 return 0; 495 496 WARN_ON(feature_id > feature->feature_num); 497 498 mutex_lock(&feature->mutex); 499 ret = test_bit(feature_id, feature->enabled); 500 mutex_unlock(&feature->mutex); 501 502 return ret; 503 } 504 505 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, 506 enum smu_clk_type clk_type) 507 { 508 enum smu_feature_mask feature_id = 0; 509 510 switch (clk_type) { 511 case SMU_MCLK: 512 case SMU_UCLK: 513 feature_id = SMU_FEATURE_DPM_UCLK_BIT; 514 break; 515 case SMU_GFXCLK: 516 case SMU_SCLK: 517 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 518 break; 519 case SMU_SOCCLK: 520 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 521 break; 522 default: 523 return true; 524 } 525 526 if (!smu_cmn_feature_is_enabled(smu, feature_id)) 527 return false; 528 529 return true; 530 } 531 532 int smu_cmn_get_enabled_mask(struct smu_context *smu, 533 uint32_t *feature_mask, 534 uint32_t num) 535 { 536 uint32_t feature_mask_high = 0, feature_mask_low = 0; 537 struct smu_feature *feature = &smu->smu_feature; 538 int ret = 0; 539 540 if (!feature_mask || num < 2) 541 return -EINVAL; 542 543 if (bitmap_empty(feature->enabled, feature->feature_num)) { 544 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high); 545 if (ret) 546 return ret; 547 548 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low); 549 if (ret) 550 return ret; 551 552 feature_mask[0] = feature_mask_low; 553 feature_mask[1] = feature_mask_high; 554 } else { 555 bitmap_copy((unsigned long *)feature_mask, feature->enabled, 556 feature->feature_num); 557 } 558 559 return ret; 560 } 561 562 int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu, 563 uint32_t *feature_mask, 564 uint32_t num) 565 { 566 uint32_t feature_mask_en_low = 0; 567 uint32_t feature_mask_en_high = 0; 568 struct smu_feature *feature = &smu->smu_feature; 569 int ret = 0; 570 571 if (!feature_mask || num < 2) 572 return -EINVAL; 573 574 if (bitmap_empty(feature->enabled, feature->feature_num)) { 575 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 0, 576 &feature_mask_en_low); 577 578 if (ret) 579 return ret; 580 581 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 1, 582 &feature_mask_en_high); 583 584 if (ret) 585 return ret; 586 587 feature_mask[0] = feature_mask_en_low; 588 feature_mask[1] = feature_mask_en_high; 589 590 } else { 591 bitmap_copy((unsigned long *)feature_mask, feature->enabled, 592 feature->feature_num); 593 } 594 595 return ret; 596 597 } 598 599 uint64_t smu_cmn_get_indep_throttler_status( 600 const unsigned long dep_status, 601 const uint8_t *throttler_map) 602 { 603 uint64_t indep_status = 0; 604 uint8_t dep_bit = 0; 605 606 for_each_set_bit(dep_bit, &dep_status, 32) 607 indep_status |= 1ULL << throttler_map[dep_bit]; 608 609 return indep_status; 610 } 611 612 int smu_cmn_feature_update_enable_state(struct smu_context *smu, 613 uint64_t feature_mask, 614 bool enabled) 615 { 616 struct smu_feature *feature = &smu->smu_feature; 617 int ret = 0; 618 619 if (enabled) { 620 ret = smu_cmn_send_smc_msg_with_param(smu, 621 SMU_MSG_EnableSmuFeaturesLow, 622 lower_32_bits(feature_mask), 623 NULL); 624 if (ret) 625 return ret; 626 ret = smu_cmn_send_smc_msg_with_param(smu, 627 SMU_MSG_EnableSmuFeaturesHigh, 628 upper_32_bits(feature_mask), 629 NULL); 630 if (ret) 631 return ret; 632 } else { 633 ret = smu_cmn_send_smc_msg_with_param(smu, 634 SMU_MSG_DisableSmuFeaturesLow, 635 lower_32_bits(feature_mask), 636 NULL); 637 if (ret) 638 return ret; 639 ret = smu_cmn_send_smc_msg_with_param(smu, 640 SMU_MSG_DisableSmuFeaturesHigh, 641 upper_32_bits(feature_mask), 642 NULL); 643 if (ret) 644 return ret; 645 } 646 647 mutex_lock(&feature->mutex); 648 if (enabled) 649 bitmap_or(feature->enabled, feature->enabled, 650 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); 651 else 652 bitmap_andnot(feature->enabled, feature->enabled, 653 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); 654 mutex_unlock(&feature->mutex); 655 656 return ret; 657 } 658 659 int smu_cmn_feature_set_enabled(struct smu_context *smu, 660 enum smu_feature_mask mask, 661 bool enable) 662 { 663 struct smu_feature *feature = &smu->smu_feature; 664 int feature_id; 665 666 feature_id = smu_cmn_to_asic_specific_index(smu, 667 CMN2ASIC_MAPPING_FEATURE, 668 mask); 669 if (feature_id < 0) 670 return -EINVAL; 671 672 WARN_ON(feature_id > feature->feature_num); 673 674 return smu_cmn_feature_update_enable_state(smu, 675 1ULL << feature_id, 676 enable); 677 } 678 679 #undef __SMU_DUMMY_MAP 680 #define __SMU_DUMMY_MAP(fea) #fea 681 static const char* __smu_feature_names[] = { 682 SMU_FEATURE_MASKS 683 }; 684 685 static const char *smu_get_feature_name(struct smu_context *smu, 686 enum smu_feature_mask feature) 687 { 688 if (feature < 0 || feature >= SMU_FEATURE_COUNT) 689 return "unknown smu feature"; 690 return __smu_feature_names[feature]; 691 } 692 693 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, 694 char *buf) 695 { 696 uint32_t feature_mask[2] = { 0 }; 697 int feature_index = 0; 698 uint32_t count = 0; 699 int8_t sort_feature[SMU_FEATURE_COUNT]; 700 size_t size = 0; 701 int ret = 0, i; 702 703 if (!smu->is_apu) { 704 ret = smu_cmn_get_enabled_mask(smu, 705 feature_mask, 706 2); 707 if (ret) 708 return 0; 709 } else { 710 ret = smu_cmn_get_enabled_32_bits_mask(smu, 711 feature_mask, 712 2); 713 if (ret) 714 return 0; 715 } 716 717 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n", 718 feature_mask[1], feature_mask[0]); 719 720 memset(sort_feature, -1, sizeof(sort_feature)); 721 722 for (i = 0; i < SMU_FEATURE_COUNT; i++) { 723 feature_index = smu_cmn_to_asic_specific_index(smu, 724 CMN2ASIC_MAPPING_FEATURE, 725 i); 726 if (feature_index < 0) 727 continue; 728 729 sort_feature[feature_index] = i; 730 } 731 732 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n", 733 "No", "Feature", "Bit", "State"); 734 735 for (i = 0; i < SMU_FEATURE_COUNT; i++) { 736 if (sort_feature[i] < 0) 737 continue; 738 739 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n", 740 count++, 741 smu_get_feature_name(smu, sort_feature[i]), 742 i, 743 !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ? 744 "enabled" : "disabled"); 745 } 746 747 return size; 748 } 749 750 int smu_cmn_set_pp_feature_mask(struct smu_context *smu, 751 uint64_t new_mask) 752 { 753 int ret = 0; 754 uint32_t feature_mask[2] = { 0 }; 755 uint64_t feature_2_enabled = 0; 756 uint64_t feature_2_disabled = 0; 757 uint64_t feature_enables = 0; 758 759 ret = smu_cmn_get_enabled_mask(smu, 760 feature_mask, 761 2); 762 if (ret) 763 return ret; 764 765 feature_enables = ((uint64_t)feature_mask[1] << 32 | 766 (uint64_t)feature_mask[0]); 767 768 feature_2_enabled = ~feature_enables & new_mask; 769 feature_2_disabled = feature_enables & ~new_mask; 770 771 if (feature_2_enabled) { 772 ret = smu_cmn_feature_update_enable_state(smu, 773 feature_2_enabled, 774 true); 775 if (ret) 776 return ret; 777 } 778 if (feature_2_disabled) { 779 ret = smu_cmn_feature_update_enable_state(smu, 780 feature_2_disabled, 781 false); 782 if (ret) 783 return ret; 784 } 785 786 return ret; 787 } 788 789 /** 790 * smu_cmn_disable_all_features_with_exception - disable all dpm features 791 * except this specified by 792 * @mask 793 * 794 * @smu: smu_context pointer 795 * @no_hw_disablement: whether real dpm disablement should be performed 796 * true: update the cache(about dpm enablement state) only 797 * false: real dpm disablement plus cache update 798 * @mask: the dpm feature which should not be disabled 799 * SMU_FEATURE_COUNT: no exception, all dpm features 800 * to disable 801 * 802 * Returns: 803 * 0 on success or a negative error code on failure. 804 */ 805 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu, 806 bool no_hw_disablement, 807 enum smu_feature_mask mask) 808 { 809 struct smu_feature *feature = &smu->smu_feature; 810 uint64_t features_to_disable = U64_MAX; 811 int skipped_feature_id; 812 813 if (mask != SMU_FEATURE_COUNT) { 814 skipped_feature_id = smu_cmn_to_asic_specific_index(smu, 815 CMN2ASIC_MAPPING_FEATURE, 816 mask); 817 if (skipped_feature_id < 0) 818 return -EINVAL; 819 820 features_to_disable &= ~(1ULL << skipped_feature_id); 821 } 822 823 if (no_hw_disablement) { 824 mutex_lock(&feature->mutex); 825 bitmap_andnot(feature->enabled, feature->enabled, 826 (unsigned long *)(&features_to_disable), SMU_FEATURE_MAX); 827 mutex_unlock(&feature->mutex); 828 829 return 0; 830 } else { 831 return smu_cmn_feature_update_enable_state(smu, 832 features_to_disable, 833 0); 834 } 835 } 836 837 int smu_cmn_get_smc_version(struct smu_context *smu, 838 uint32_t *if_version, 839 uint32_t *smu_version) 840 { 841 int ret = 0; 842 843 if (!if_version && !smu_version) 844 return -EINVAL; 845 846 if (smu->smc_fw_if_version && smu->smc_fw_version) 847 { 848 if (if_version) 849 *if_version = smu->smc_fw_if_version; 850 851 if (smu_version) 852 *smu_version = smu->smc_fw_version; 853 854 return 0; 855 } 856 857 if (if_version) { 858 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); 859 if (ret) 860 return ret; 861 862 smu->smc_fw_if_version = *if_version; 863 } 864 865 if (smu_version) { 866 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); 867 if (ret) 868 return ret; 869 870 smu->smc_fw_version = *smu_version; 871 } 872 873 return ret; 874 } 875 876 int smu_cmn_update_table(struct smu_context *smu, 877 enum smu_table_id table_index, 878 int argument, 879 void *table_data, 880 bool drv2smu) 881 { 882 struct smu_table_context *smu_table = &smu->smu_table; 883 struct amdgpu_device *adev = smu->adev; 884 struct smu_table *table = &smu_table->driver_table; 885 int table_id = smu_cmn_to_asic_specific_index(smu, 886 CMN2ASIC_MAPPING_TABLE, 887 table_index); 888 uint32_t table_size; 889 int ret = 0; 890 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) 891 return -EINVAL; 892 893 table_size = smu_table->tables[table_index].size; 894 895 if (drv2smu) { 896 memcpy(table->cpu_addr, table_data, table_size); 897 /* 898 * Flush hdp cache: to guard the content seen by 899 * GPU is consitent with CPU. 900 */ 901 amdgpu_asic_flush_hdp(adev, NULL); 902 } 903 904 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ? 905 SMU_MSG_TransferTableDram2Smu : 906 SMU_MSG_TransferTableSmu2Dram, 907 table_id | ((argument & 0xFFFF) << 16), 908 NULL); 909 if (ret) 910 return ret; 911 912 if (!drv2smu) { 913 amdgpu_asic_invalidate_hdp(adev, NULL); 914 memcpy(table_data, table->cpu_addr, table_size); 915 } 916 917 return 0; 918 } 919 920 int smu_cmn_write_watermarks_table(struct smu_context *smu) 921 { 922 void *watermarks_table = smu->smu_table.watermarks_table; 923 924 if (!watermarks_table) 925 return -EINVAL; 926 927 return smu_cmn_update_table(smu, 928 SMU_TABLE_WATERMARKS, 929 0, 930 watermarks_table, 931 true); 932 } 933 934 int smu_cmn_write_pptable(struct smu_context *smu) 935 { 936 void *pptable = smu->smu_table.driver_pptable; 937 938 return smu_cmn_update_table(smu, 939 SMU_TABLE_PPTABLE, 940 0, 941 pptable, 942 true); 943 } 944 945 int smu_cmn_get_metrics_table_locked(struct smu_context *smu, 946 void *metrics_table, 947 bool bypass_cache) 948 { 949 struct smu_table_context *smu_table= &smu->smu_table; 950 uint32_t table_size = 951 smu_table->tables[SMU_TABLE_SMU_METRICS].size; 952 int ret = 0; 953 954 if (bypass_cache || 955 !smu_table->metrics_time || 956 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) { 957 ret = smu_cmn_update_table(smu, 958 SMU_TABLE_SMU_METRICS, 959 0, 960 smu_table->metrics_table, 961 false); 962 if (ret) { 963 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n"); 964 return ret; 965 } 966 smu_table->metrics_time = jiffies; 967 } 968 969 if (metrics_table) 970 memcpy(metrics_table, smu_table->metrics_table, table_size); 971 972 return 0; 973 } 974 975 int smu_cmn_get_metrics_table(struct smu_context *smu, 976 void *metrics_table, 977 bool bypass_cache) 978 { 979 int ret = 0; 980 981 mutex_lock(&smu->metrics_lock); 982 ret = smu_cmn_get_metrics_table_locked(smu, 983 metrics_table, 984 bypass_cache); 985 mutex_unlock(&smu->metrics_lock); 986 987 return ret; 988 } 989 990 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) 991 { 992 struct metrics_table_header *header = (struct metrics_table_header *)table; 993 uint16_t structure_size; 994 995 #define METRICS_VERSION(a, b) ((a << 16) | b ) 996 997 switch (METRICS_VERSION(frev, crev)) { 998 case METRICS_VERSION(1, 0): 999 structure_size = sizeof(struct gpu_metrics_v1_0); 1000 break; 1001 case METRICS_VERSION(1, 1): 1002 structure_size = sizeof(struct gpu_metrics_v1_1); 1003 break; 1004 case METRICS_VERSION(1, 2): 1005 structure_size = sizeof(struct gpu_metrics_v1_2); 1006 break; 1007 case METRICS_VERSION(1, 3): 1008 structure_size = sizeof(struct gpu_metrics_v1_3); 1009 break; 1010 case METRICS_VERSION(2, 0): 1011 structure_size = sizeof(struct gpu_metrics_v2_0); 1012 break; 1013 case METRICS_VERSION(2, 1): 1014 structure_size = sizeof(struct gpu_metrics_v2_1); 1015 break; 1016 case METRICS_VERSION(2, 2): 1017 structure_size = sizeof(struct gpu_metrics_v2_2); 1018 break; 1019 default: 1020 return; 1021 } 1022 1023 #undef METRICS_VERSION 1024 1025 memset(header, 0xFF, structure_size); 1026 1027 header->format_revision = frev; 1028 header->content_revision = crev; 1029 header->structure_size = structure_size; 1030 1031 } 1032 1033 int smu_cmn_set_mp1_state(struct smu_context *smu, 1034 enum pp_mp1_state mp1_state) 1035 { 1036 enum smu_message_type msg; 1037 int ret; 1038 1039 switch (mp1_state) { 1040 case PP_MP1_STATE_SHUTDOWN: 1041 msg = SMU_MSG_PrepareMp1ForShutdown; 1042 break; 1043 case PP_MP1_STATE_UNLOAD: 1044 msg = SMU_MSG_PrepareMp1ForUnload; 1045 break; 1046 case PP_MP1_STATE_RESET: 1047 msg = SMU_MSG_PrepareMp1ForReset; 1048 break; 1049 case PP_MP1_STATE_NONE: 1050 default: 1051 return 0; 1052 } 1053 1054 ret = smu_cmn_send_smc_msg(smu, msg, NULL); 1055 if (ret) 1056 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); 1057 1058 return ret; 1059 } 1060 1061 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) 1062 { 1063 struct pci_dev *p = NULL; 1064 bool snd_driver_loaded; 1065 1066 /* 1067 * If the ASIC comes with no audio function, we always assume 1068 * it is "enabled". 1069 */ 1070 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 1071 adev->pdev->bus->number, 1); 1072 if (!p) 1073 return true; 1074 1075 snd_driver_loaded = pci_is_enabled(p) ? true : false; 1076 1077 pci_dev_put(p); 1078 1079 return snd_driver_loaded; 1080 } 1081