1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L4 24 25 #include "amdgpu.h" 26 #include "amdgpu_smu.h" 27 #include "smu_cmn.h" 28 #include "soc15_common.h" 29 30 /* 31 * DO NOT use these for err/warn/info/debug messages. 32 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 33 * They are more MGPU friendly. 34 */ 35 #undef pr_err 36 #undef pr_warn 37 #undef pr_info 38 #undef pr_debug 39 40 /* 41 * Although these are defined in each ASIC's specific header file. 42 * They share the same definitions and values. That makes common 43 * APIs for SMC messages issuing for all ASICs possible. 44 */ 45 #define mmMP1_SMN_C2PMSG_66 0x0282 46 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0 47 48 #define mmMP1_SMN_C2PMSG_82 0x0292 49 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0 50 51 #define mmMP1_SMN_C2PMSG_90 0x029a 52 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 53 54 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL 55 56 #undef __SMU_DUMMY_MAP 57 #define __SMU_DUMMY_MAP(type) #type 58 static const char * const __smu_message_names[] = { 59 SMU_MESSAGE_TYPES 60 }; 61 62 static const char *smu_get_message_name(struct smu_context *smu, 63 enum smu_message_type type) 64 { 65 if (type < 0 || type >= SMU_MSG_MAX_COUNT) 66 return "unknown smu message"; 67 68 return __smu_message_names[type]; 69 } 70 71 static void smu_cmn_read_arg(struct smu_context *smu, 72 uint32_t *arg) 73 { 74 struct amdgpu_device *adev = smu->adev; 75 76 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); 77 } 78 79 /* Redefine the SMU error codes here. 80 * 81 * Note that these definitions are redundant and should be removed 82 * when the SMU has exported a unified header file containing these 83 * macros, which header file we can just include and use the SMU's 84 * macros. At the moment, these error codes are defined by the SMU 85 * per-ASIC unfortunately, yet we're a one driver for all ASICs. 86 */ 87 #define SMU_RESP_NONE 0 88 #define SMU_RESP_OK 1 89 #define SMU_RESP_CMD_FAIL 0xFF 90 #define SMU_RESP_CMD_UNKNOWN 0xFE 91 #define SMU_RESP_CMD_BAD_PREREQ 0xFD 92 #define SMU_RESP_BUSY_OTHER 0xFC 93 #define SMU_RESP_DEBUG_END 0xFB 94 95 /** 96 * __smu_cmn_poll_stat -- poll for a status from the SMU 97 * @smu: a pointer to SMU context 98 * 99 * Returns the status of the SMU, which could be, 100 * 0, the SMU is busy with your command; 101 * 1, execution status: success, execution result: success; 102 * 0xFF, execution status: success, execution result: failure; 103 * 0xFE, unknown command; 104 * 0xFD, valid command, but bad (command) prerequisites; 105 * 0xFC, the command was rejected as the SMU is busy; 106 * 0xFB, "SMC_Result_DebugDataDumpEnd". 107 * 108 * The values here are not defined by macros, because I'd rather we 109 * include a single header file which defines them, which is 110 * maintained by the SMU FW team, so that we're impervious to firmware 111 * changes. At the moment those values are defined in various header 112 * files, one for each ASIC, yet here we're a single ASIC-agnostic 113 * interface. Such a change can be followed-up by a subsequent patch. 114 */ 115 static u32 __smu_cmn_poll_stat(struct smu_context *smu) 116 { 117 struct amdgpu_device *adev = smu->adev; 118 int timeout = adev->usec_timeout * 20; 119 u32 reg; 120 121 for ( ; timeout > 0; timeout--) { 122 reg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); 123 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0) 124 break; 125 126 udelay(1); 127 } 128 129 return reg; 130 } 131 132 static void __smu_cmn_reg_print_error(struct smu_context *smu, 133 u32 reg_c2pmsg_90, 134 int msg_index, 135 u32 param, 136 enum smu_message_type msg) 137 { 138 struct amdgpu_device *adev = smu->adev; 139 const char *message = smu_get_message_name(smu, msg); 140 141 switch (reg_c2pmsg_90) { 142 case SMU_RESP_NONE: { 143 u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66); 144 u32 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); 145 dev_err_ratelimited(adev->dev, 146 "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X", 147 msg_idx, prm); 148 } 149 break; 150 case SMU_RESP_OK: 151 /* The SMU executed the command. It completed with a 152 * successful result. 153 */ 154 break; 155 case SMU_RESP_CMD_FAIL: 156 /* The SMU executed the command. It completed with an 157 * unsuccessful result. 158 */ 159 break; 160 case SMU_RESP_CMD_UNKNOWN: 161 dev_err_ratelimited(adev->dev, 162 "SMU: unknown command: index:%d param:0x%08X message:%s", 163 msg_index, param, message); 164 break; 165 case SMU_RESP_CMD_BAD_PREREQ: 166 dev_err_ratelimited(adev->dev, 167 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s", 168 msg_index, param, message); 169 break; 170 case SMU_RESP_BUSY_OTHER: 171 dev_err_ratelimited(adev->dev, 172 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", 173 msg_index, param, message); 174 break; 175 case SMU_RESP_DEBUG_END: 176 dev_err_ratelimited(adev->dev, 177 "SMU: I'm debugging!"); 178 break; 179 default: 180 dev_err_ratelimited(adev->dev, 181 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", 182 reg_c2pmsg_90, msg_index, param, message); 183 break; 184 } 185 } 186 187 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90) 188 { 189 int res; 190 191 switch (reg_c2pmsg_90) { 192 case SMU_RESP_NONE: 193 /* The SMU is busy--still executing your command. 194 */ 195 res = -ETIME; 196 break; 197 case SMU_RESP_OK: 198 res = 0; 199 break; 200 case SMU_RESP_CMD_FAIL: 201 /* Command completed successfully, but the command 202 * status was failure. 203 */ 204 res = -EIO; 205 break; 206 case SMU_RESP_CMD_UNKNOWN: 207 /* Unknown command--ignored by the SMU. 208 */ 209 res = -EOPNOTSUPP; 210 break; 211 case SMU_RESP_CMD_BAD_PREREQ: 212 /* Valid command--bad prerequisites. 213 */ 214 res = -EINVAL; 215 break; 216 case SMU_RESP_BUSY_OTHER: 217 /* The SMU is busy with other commands. The client 218 * should retry in 10 us. 219 */ 220 res = -EBUSY; 221 break; 222 default: 223 /* Unknown or debug response from the SMU. 224 */ 225 res = -EREMOTEIO; 226 break; 227 } 228 229 return res; 230 } 231 232 static void __smu_cmn_send_msg(struct smu_context *smu, 233 u16 msg, 234 u32 param) 235 { 236 struct amdgpu_device *adev = smu->adev; 237 238 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 239 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param); 240 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); 241 } 242 243 /** 244 * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status 245 * @smu: pointer to an SMU context 246 * @msg_index: message index 247 * @param: message parameter to send to the SMU 248 * 249 * Send a message to the SMU with the parameter passed. Do not wait 250 * for status/result of the message, thus the "without_waiting". 251 * 252 * Return 0 on success, -errno on error if we weren't able to _send_ 253 * the message for some reason. See __smu_cmn_reg2errno() for details 254 * of the -errno. 255 */ 256 int smu_cmn_send_msg_without_waiting(struct smu_context *smu, 257 uint16_t msg_index, 258 uint32_t param) 259 { 260 struct amdgpu_device *adev = smu->adev; 261 u32 reg; 262 int res; 263 264 if (adev->no_hw_access) 265 return 0; 266 267 reg = __smu_cmn_poll_stat(smu); 268 res = __smu_cmn_reg2errno(smu, reg); 269 if (reg == SMU_RESP_NONE || 270 reg == SMU_RESP_BUSY_OTHER || 271 res == -EREMOTEIO) 272 goto Out; 273 __smu_cmn_send_msg(smu, msg_index, param); 274 res = 0; 275 Out: 276 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && 277 res && (res != -ETIME)) { 278 amdgpu_device_halt(adev); 279 WARN_ON(1); 280 } 281 282 return res; 283 } 284 285 /** 286 * smu_cmn_wait_for_response -- wait for response from the SMU 287 * @smu: pointer to an SMU context 288 * 289 * Wait for status from the SMU. 290 * 291 * Return 0 on success, -errno on error, indicating the execution 292 * status and result of the message being waited for. See 293 * __smu_cmn_reg2errno() for details of the -errno. 294 */ 295 int smu_cmn_wait_for_response(struct smu_context *smu) 296 { 297 u32 reg; 298 int res; 299 300 reg = __smu_cmn_poll_stat(smu); 301 res = __smu_cmn_reg2errno(smu, reg); 302 303 if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && 304 res && (res != -ETIME)) { 305 amdgpu_device_halt(smu->adev); 306 WARN_ON(1); 307 } 308 309 return res; 310 } 311 312 /** 313 * smu_cmn_send_smc_msg_with_param -- send a message with parameter 314 * @smu: pointer to an SMU context 315 * @msg: message to send 316 * @param: parameter to send to the SMU 317 * @read_arg: pointer to u32 to return a value from the SMU back 318 * to the caller 319 * 320 * Send the message @msg with parameter @param to the SMU, wait for 321 * completion of the command, and return back a value from the SMU in 322 * @read_arg pointer. 323 * 324 * Return 0 on success, -errno on error, if we weren't able to send 325 * the message or if the message completed with some kind of 326 * error. See __smu_cmn_reg2errno() for details of the -errno. 327 * 328 * If we weren't able to send the message to the SMU, we also print 329 * the error to the standard log. 330 * 331 * Command completion status is printed only if the -errno is 332 * -EREMOTEIO, indicating that the SMU returned back an 333 * undefined/unknown/unspecified result. All other cases are 334 * well-defined, not printed, but instead given back to the client to 335 * decide what further to do. 336 * 337 * The return value, @read_arg is read back regardless, to give back 338 * more information to the client, which on error would most likely be 339 * @param, but we can't assume that. This also eliminates more 340 * conditionals. 341 */ 342 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, 343 enum smu_message_type msg, 344 uint32_t param, 345 uint32_t *read_arg) 346 { 347 struct amdgpu_device *adev = smu->adev; 348 int res, index; 349 u32 reg; 350 351 if (adev->no_hw_access) 352 return 0; 353 354 index = smu_cmn_to_asic_specific_index(smu, 355 CMN2ASIC_MAPPING_MSG, 356 msg); 357 if (index < 0) 358 return index == -EACCES ? 0 : index; 359 360 mutex_lock(&smu->message_lock); 361 reg = __smu_cmn_poll_stat(smu); 362 res = __smu_cmn_reg2errno(smu, reg); 363 if (reg == SMU_RESP_NONE || 364 reg == SMU_RESP_BUSY_OTHER || 365 res == -EREMOTEIO) { 366 __smu_cmn_reg_print_error(smu, reg, index, param, msg); 367 goto Out; 368 } 369 __smu_cmn_send_msg(smu, (uint16_t) index, param); 370 reg = __smu_cmn_poll_stat(smu); 371 res = __smu_cmn_reg2errno(smu, reg); 372 if (res != 0) 373 __smu_cmn_reg_print_error(smu, reg, index, param, msg); 374 if (read_arg) 375 smu_cmn_read_arg(smu, read_arg); 376 Out: 377 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) { 378 amdgpu_device_halt(adev); 379 WARN_ON(1); 380 } 381 382 mutex_unlock(&smu->message_lock); 383 return res; 384 } 385 386 int smu_cmn_send_smc_msg(struct smu_context *smu, 387 enum smu_message_type msg, 388 uint32_t *read_arg) 389 { 390 return smu_cmn_send_smc_msg_with_param(smu, 391 msg, 392 0, 393 read_arg); 394 } 395 396 int smu_cmn_to_asic_specific_index(struct smu_context *smu, 397 enum smu_cmn2asic_mapping_type type, 398 uint32_t index) 399 { 400 struct cmn2asic_msg_mapping msg_mapping; 401 struct cmn2asic_mapping mapping; 402 403 switch (type) { 404 case CMN2ASIC_MAPPING_MSG: 405 if (index >= SMU_MSG_MAX_COUNT || 406 !smu->message_map) 407 return -EINVAL; 408 409 msg_mapping = smu->message_map[index]; 410 if (!msg_mapping.valid_mapping) 411 return -EINVAL; 412 413 if (amdgpu_sriov_vf(smu->adev) && 414 !msg_mapping.valid_in_vf) 415 return -EACCES; 416 417 return msg_mapping.map_to; 418 419 case CMN2ASIC_MAPPING_CLK: 420 if (index >= SMU_CLK_COUNT || 421 !smu->clock_map) 422 return -EINVAL; 423 424 mapping = smu->clock_map[index]; 425 if (!mapping.valid_mapping) 426 return -EINVAL; 427 428 return mapping.map_to; 429 430 case CMN2ASIC_MAPPING_FEATURE: 431 if (index >= SMU_FEATURE_COUNT || 432 !smu->feature_map) 433 return -EINVAL; 434 435 mapping = smu->feature_map[index]; 436 if (!mapping.valid_mapping) 437 return -EINVAL; 438 439 return mapping.map_to; 440 441 case CMN2ASIC_MAPPING_TABLE: 442 if (index >= SMU_TABLE_COUNT || 443 !smu->table_map) 444 return -EINVAL; 445 446 mapping = smu->table_map[index]; 447 if (!mapping.valid_mapping) 448 return -EINVAL; 449 450 return mapping.map_to; 451 452 case CMN2ASIC_MAPPING_PWR: 453 if (index >= SMU_POWER_SOURCE_COUNT || 454 !smu->pwr_src_map) 455 return -EINVAL; 456 457 mapping = smu->pwr_src_map[index]; 458 if (!mapping.valid_mapping) 459 return -EINVAL; 460 461 return mapping.map_to; 462 463 case CMN2ASIC_MAPPING_WORKLOAD: 464 if (index > PP_SMC_POWER_PROFILE_CUSTOM || 465 !smu->workload_map) 466 return -EINVAL; 467 468 mapping = smu->workload_map[index]; 469 if (!mapping.valid_mapping) 470 return -EINVAL; 471 472 return mapping.map_to; 473 474 default: 475 return -EINVAL; 476 } 477 } 478 479 int smu_cmn_feature_is_supported(struct smu_context *smu, 480 enum smu_feature_mask mask) 481 { 482 struct smu_feature *feature = &smu->smu_feature; 483 int feature_id; 484 int ret = 0; 485 486 feature_id = smu_cmn_to_asic_specific_index(smu, 487 CMN2ASIC_MAPPING_FEATURE, 488 mask); 489 if (feature_id < 0) 490 return 0; 491 492 WARN_ON(feature_id > feature->feature_num); 493 494 mutex_lock(&feature->mutex); 495 ret = test_bit(feature_id, feature->supported); 496 mutex_unlock(&feature->mutex); 497 498 return ret; 499 } 500 501 int smu_cmn_feature_is_enabled(struct smu_context *smu, 502 enum smu_feature_mask mask) 503 { 504 struct smu_feature *feature = &smu->smu_feature; 505 struct amdgpu_device *adev = smu->adev; 506 int feature_id; 507 int ret = 0; 508 509 if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH) 510 return 1; 511 512 feature_id = smu_cmn_to_asic_specific_index(smu, 513 CMN2ASIC_MAPPING_FEATURE, 514 mask); 515 if (feature_id < 0) 516 return 0; 517 518 WARN_ON(feature_id > feature->feature_num); 519 520 mutex_lock(&feature->mutex); 521 ret = test_bit(feature_id, feature->enabled); 522 mutex_unlock(&feature->mutex); 523 524 return ret; 525 } 526 527 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, 528 enum smu_clk_type clk_type) 529 { 530 enum smu_feature_mask feature_id = 0; 531 532 switch (clk_type) { 533 case SMU_MCLK: 534 case SMU_UCLK: 535 feature_id = SMU_FEATURE_DPM_UCLK_BIT; 536 break; 537 case SMU_GFXCLK: 538 case SMU_SCLK: 539 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 540 break; 541 case SMU_SOCCLK: 542 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 543 break; 544 default: 545 return true; 546 } 547 548 if (!smu_cmn_feature_is_enabled(smu, feature_id)) 549 return false; 550 551 return true; 552 } 553 554 int smu_cmn_get_enabled_mask(struct smu_context *smu, 555 uint32_t *feature_mask, 556 uint32_t num) 557 { 558 uint32_t feature_mask_high = 0, feature_mask_low = 0; 559 struct smu_feature *feature = &smu->smu_feature; 560 int ret = 0; 561 562 if (!feature_mask || num < 2) 563 return -EINVAL; 564 565 if (bitmap_empty(feature->enabled, feature->feature_num)) { 566 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high); 567 if (ret) 568 return ret; 569 570 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low); 571 if (ret) 572 return ret; 573 574 feature_mask[0] = feature_mask_low; 575 feature_mask[1] = feature_mask_high; 576 } else { 577 bitmap_copy((unsigned long *)feature_mask, feature->enabled, 578 feature->feature_num); 579 } 580 581 return ret; 582 } 583 584 int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu, 585 uint32_t *feature_mask, 586 uint32_t num) 587 { 588 uint32_t feature_mask_en_low = 0; 589 uint32_t feature_mask_en_high = 0; 590 struct smu_feature *feature = &smu->smu_feature; 591 int ret = 0; 592 593 if (!feature_mask || num < 2) 594 return -EINVAL; 595 596 if (bitmap_empty(feature->enabled, feature->feature_num)) { 597 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 0, 598 &feature_mask_en_low); 599 600 if (ret) 601 return ret; 602 603 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 1, 604 &feature_mask_en_high); 605 606 if (ret) 607 return ret; 608 609 feature_mask[0] = feature_mask_en_low; 610 feature_mask[1] = feature_mask_en_high; 611 612 } else { 613 bitmap_copy((unsigned long *)feature_mask, feature->enabled, 614 feature->feature_num); 615 } 616 617 return ret; 618 619 } 620 621 uint64_t smu_cmn_get_indep_throttler_status( 622 const unsigned long dep_status, 623 const uint8_t *throttler_map) 624 { 625 uint64_t indep_status = 0; 626 uint8_t dep_bit = 0; 627 628 for_each_set_bit(dep_bit, &dep_status, 32) 629 indep_status |= 1ULL << throttler_map[dep_bit]; 630 631 return indep_status; 632 } 633 634 int smu_cmn_feature_update_enable_state(struct smu_context *smu, 635 uint64_t feature_mask, 636 bool enabled) 637 { 638 struct smu_feature *feature = &smu->smu_feature; 639 int ret = 0; 640 641 if (enabled) { 642 ret = smu_cmn_send_smc_msg_with_param(smu, 643 SMU_MSG_EnableSmuFeaturesLow, 644 lower_32_bits(feature_mask), 645 NULL); 646 if (ret) 647 return ret; 648 ret = smu_cmn_send_smc_msg_with_param(smu, 649 SMU_MSG_EnableSmuFeaturesHigh, 650 upper_32_bits(feature_mask), 651 NULL); 652 if (ret) 653 return ret; 654 } else { 655 ret = smu_cmn_send_smc_msg_with_param(smu, 656 SMU_MSG_DisableSmuFeaturesLow, 657 lower_32_bits(feature_mask), 658 NULL); 659 if (ret) 660 return ret; 661 ret = smu_cmn_send_smc_msg_with_param(smu, 662 SMU_MSG_DisableSmuFeaturesHigh, 663 upper_32_bits(feature_mask), 664 NULL); 665 if (ret) 666 return ret; 667 } 668 669 mutex_lock(&feature->mutex); 670 if (enabled) 671 bitmap_or(feature->enabled, feature->enabled, 672 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); 673 else 674 bitmap_andnot(feature->enabled, feature->enabled, 675 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); 676 mutex_unlock(&feature->mutex); 677 678 return ret; 679 } 680 681 int smu_cmn_feature_set_enabled(struct smu_context *smu, 682 enum smu_feature_mask mask, 683 bool enable) 684 { 685 struct smu_feature *feature = &smu->smu_feature; 686 int feature_id; 687 688 feature_id = smu_cmn_to_asic_specific_index(smu, 689 CMN2ASIC_MAPPING_FEATURE, 690 mask); 691 if (feature_id < 0) 692 return -EINVAL; 693 694 WARN_ON(feature_id > feature->feature_num); 695 696 return smu_cmn_feature_update_enable_state(smu, 697 1ULL << feature_id, 698 enable); 699 } 700 701 #undef __SMU_DUMMY_MAP 702 #define __SMU_DUMMY_MAP(fea) #fea 703 static const char* __smu_feature_names[] = { 704 SMU_FEATURE_MASKS 705 }; 706 707 static const char *smu_get_feature_name(struct smu_context *smu, 708 enum smu_feature_mask feature) 709 { 710 if (feature < 0 || feature >= SMU_FEATURE_COUNT) 711 return "unknown smu feature"; 712 return __smu_feature_names[feature]; 713 } 714 715 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, 716 char *buf) 717 { 718 uint32_t feature_mask[2] = { 0 }; 719 int feature_index = 0; 720 uint32_t count = 0; 721 int8_t sort_feature[SMU_FEATURE_COUNT]; 722 size_t size = 0; 723 int ret = 0, i; 724 725 if (!smu->is_apu) { 726 ret = smu_cmn_get_enabled_mask(smu, 727 feature_mask, 728 2); 729 if (ret) 730 return 0; 731 } else { 732 ret = smu_cmn_get_enabled_32_bits_mask(smu, 733 feature_mask, 734 2); 735 if (ret) 736 return 0; 737 } 738 739 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n", 740 feature_mask[1], feature_mask[0]); 741 742 memset(sort_feature, -1, sizeof(sort_feature)); 743 744 for (i = 0; i < SMU_FEATURE_COUNT; i++) { 745 feature_index = smu_cmn_to_asic_specific_index(smu, 746 CMN2ASIC_MAPPING_FEATURE, 747 i); 748 if (feature_index < 0) 749 continue; 750 751 sort_feature[feature_index] = i; 752 } 753 754 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n", 755 "No", "Feature", "Bit", "State"); 756 757 for (i = 0; i < SMU_FEATURE_COUNT; i++) { 758 if (sort_feature[i] < 0) 759 continue; 760 761 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n", 762 count++, 763 smu_get_feature_name(smu, sort_feature[i]), 764 i, 765 !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ? 766 "enabled" : "disabled"); 767 } 768 769 return size; 770 } 771 772 int smu_cmn_set_pp_feature_mask(struct smu_context *smu, 773 uint64_t new_mask) 774 { 775 int ret = 0; 776 uint32_t feature_mask[2] = { 0 }; 777 uint64_t feature_2_enabled = 0; 778 uint64_t feature_2_disabled = 0; 779 uint64_t feature_enables = 0; 780 781 ret = smu_cmn_get_enabled_mask(smu, 782 feature_mask, 783 2); 784 if (ret) 785 return ret; 786 787 feature_enables = ((uint64_t)feature_mask[1] << 32 | 788 (uint64_t)feature_mask[0]); 789 790 feature_2_enabled = ~feature_enables & new_mask; 791 feature_2_disabled = feature_enables & ~new_mask; 792 793 if (feature_2_enabled) { 794 ret = smu_cmn_feature_update_enable_state(smu, 795 feature_2_enabled, 796 true); 797 if (ret) 798 return ret; 799 } 800 if (feature_2_disabled) { 801 ret = smu_cmn_feature_update_enable_state(smu, 802 feature_2_disabled, 803 false); 804 if (ret) 805 return ret; 806 } 807 808 return ret; 809 } 810 811 /** 812 * smu_cmn_disable_all_features_with_exception - disable all dpm features 813 * except this specified by 814 * @mask 815 * 816 * @smu: smu_context pointer 817 * @no_hw_disablement: whether real dpm disablement should be performed 818 * true: update the cache(about dpm enablement state) only 819 * false: real dpm disablement plus cache update 820 * @mask: the dpm feature which should not be disabled 821 * SMU_FEATURE_COUNT: no exception, all dpm features 822 * to disable 823 * 824 * Returns: 825 * 0 on success or a negative error code on failure. 826 */ 827 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu, 828 bool no_hw_disablement, 829 enum smu_feature_mask mask) 830 { 831 struct smu_feature *feature = &smu->smu_feature; 832 uint64_t features_to_disable = U64_MAX; 833 int skipped_feature_id; 834 835 if (mask != SMU_FEATURE_COUNT) { 836 skipped_feature_id = smu_cmn_to_asic_specific_index(smu, 837 CMN2ASIC_MAPPING_FEATURE, 838 mask); 839 if (skipped_feature_id < 0) 840 return -EINVAL; 841 842 features_to_disable &= ~(1ULL << skipped_feature_id); 843 } 844 845 if (no_hw_disablement) { 846 mutex_lock(&feature->mutex); 847 bitmap_andnot(feature->enabled, feature->enabled, 848 (unsigned long *)(&features_to_disable), SMU_FEATURE_MAX); 849 mutex_unlock(&feature->mutex); 850 851 return 0; 852 } else { 853 return smu_cmn_feature_update_enable_state(smu, 854 features_to_disable, 855 0); 856 } 857 } 858 859 int smu_cmn_get_smc_version(struct smu_context *smu, 860 uint32_t *if_version, 861 uint32_t *smu_version) 862 { 863 int ret = 0; 864 865 if (!if_version && !smu_version) 866 return -EINVAL; 867 868 if (smu->smc_fw_if_version && smu->smc_fw_version) 869 { 870 if (if_version) 871 *if_version = smu->smc_fw_if_version; 872 873 if (smu_version) 874 *smu_version = smu->smc_fw_version; 875 876 return 0; 877 } 878 879 if (if_version) { 880 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); 881 if (ret) 882 return ret; 883 884 smu->smc_fw_if_version = *if_version; 885 } 886 887 if (smu_version) { 888 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); 889 if (ret) 890 return ret; 891 892 smu->smc_fw_version = *smu_version; 893 } 894 895 return ret; 896 } 897 898 int smu_cmn_update_table(struct smu_context *smu, 899 enum smu_table_id table_index, 900 int argument, 901 void *table_data, 902 bool drv2smu) 903 { 904 struct smu_table_context *smu_table = &smu->smu_table; 905 struct amdgpu_device *adev = smu->adev; 906 struct smu_table *table = &smu_table->driver_table; 907 int table_id = smu_cmn_to_asic_specific_index(smu, 908 CMN2ASIC_MAPPING_TABLE, 909 table_index); 910 uint32_t table_size; 911 int ret = 0; 912 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) 913 return -EINVAL; 914 915 table_size = smu_table->tables[table_index].size; 916 917 if (drv2smu) { 918 memcpy(table->cpu_addr, table_data, table_size); 919 /* 920 * Flush hdp cache: to guard the content seen by 921 * GPU is consitent with CPU. 922 */ 923 amdgpu_asic_flush_hdp(adev, NULL); 924 } 925 926 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ? 927 SMU_MSG_TransferTableDram2Smu : 928 SMU_MSG_TransferTableSmu2Dram, 929 table_id | ((argument & 0xFFFF) << 16), 930 NULL); 931 if (ret) 932 return ret; 933 934 if (!drv2smu) { 935 amdgpu_asic_invalidate_hdp(adev, NULL); 936 memcpy(table_data, table->cpu_addr, table_size); 937 } 938 939 return 0; 940 } 941 942 int smu_cmn_write_watermarks_table(struct smu_context *smu) 943 { 944 void *watermarks_table = smu->smu_table.watermarks_table; 945 946 if (!watermarks_table) 947 return -EINVAL; 948 949 return smu_cmn_update_table(smu, 950 SMU_TABLE_WATERMARKS, 951 0, 952 watermarks_table, 953 true); 954 } 955 956 int smu_cmn_write_pptable(struct smu_context *smu) 957 { 958 void *pptable = smu->smu_table.driver_pptable; 959 960 return smu_cmn_update_table(smu, 961 SMU_TABLE_PPTABLE, 962 0, 963 pptable, 964 true); 965 } 966 967 int smu_cmn_get_metrics_table_locked(struct smu_context *smu, 968 void *metrics_table, 969 bool bypass_cache) 970 { 971 struct smu_table_context *smu_table= &smu->smu_table; 972 uint32_t table_size = 973 smu_table->tables[SMU_TABLE_SMU_METRICS].size; 974 int ret = 0; 975 976 if (bypass_cache || 977 !smu_table->metrics_time || 978 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) { 979 ret = smu_cmn_update_table(smu, 980 SMU_TABLE_SMU_METRICS, 981 0, 982 smu_table->metrics_table, 983 false); 984 if (ret) { 985 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n"); 986 return ret; 987 } 988 smu_table->metrics_time = jiffies; 989 } 990 991 if (metrics_table) 992 memcpy(metrics_table, smu_table->metrics_table, table_size); 993 994 return 0; 995 } 996 997 int smu_cmn_get_metrics_table(struct smu_context *smu, 998 void *metrics_table, 999 bool bypass_cache) 1000 { 1001 int ret = 0; 1002 1003 mutex_lock(&smu->metrics_lock); 1004 ret = smu_cmn_get_metrics_table_locked(smu, 1005 metrics_table, 1006 bypass_cache); 1007 mutex_unlock(&smu->metrics_lock); 1008 1009 return ret; 1010 } 1011 1012 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) 1013 { 1014 struct metrics_table_header *header = (struct metrics_table_header *)table; 1015 uint16_t structure_size; 1016 1017 #define METRICS_VERSION(a, b) ((a << 16) | b ) 1018 1019 switch (METRICS_VERSION(frev, crev)) { 1020 case METRICS_VERSION(1, 0): 1021 structure_size = sizeof(struct gpu_metrics_v1_0); 1022 break; 1023 case METRICS_VERSION(1, 1): 1024 structure_size = sizeof(struct gpu_metrics_v1_1); 1025 break; 1026 case METRICS_VERSION(1, 2): 1027 structure_size = sizeof(struct gpu_metrics_v1_2); 1028 break; 1029 case METRICS_VERSION(1, 3): 1030 structure_size = sizeof(struct gpu_metrics_v1_3); 1031 break; 1032 case METRICS_VERSION(2, 0): 1033 structure_size = sizeof(struct gpu_metrics_v2_0); 1034 break; 1035 case METRICS_VERSION(2, 1): 1036 structure_size = sizeof(struct gpu_metrics_v2_1); 1037 break; 1038 case METRICS_VERSION(2, 2): 1039 structure_size = sizeof(struct gpu_metrics_v2_2); 1040 break; 1041 default: 1042 return; 1043 } 1044 1045 #undef METRICS_VERSION 1046 1047 memset(header, 0xFF, structure_size); 1048 1049 header->format_revision = frev; 1050 header->content_revision = crev; 1051 header->structure_size = structure_size; 1052 1053 } 1054 1055 int smu_cmn_set_mp1_state(struct smu_context *smu, 1056 enum pp_mp1_state mp1_state) 1057 { 1058 enum smu_message_type msg; 1059 int ret; 1060 1061 switch (mp1_state) { 1062 case PP_MP1_STATE_SHUTDOWN: 1063 msg = SMU_MSG_PrepareMp1ForShutdown; 1064 break; 1065 case PP_MP1_STATE_UNLOAD: 1066 msg = SMU_MSG_PrepareMp1ForUnload; 1067 break; 1068 case PP_MP1_STATE_RESET: 1069 msg = SMU_MSG_PrepareMp1ForReset; 1070 break; 1071 case PP_MP1_STATE_NONE: 1072 default: 1073 return 0; 1074 } 1075 1076 ret = smu_cmn_send_smc_msg(smu, msg, NULL); 1077 if (ret) 1078 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); 1079 1080 return ret; 1081 } 1082 1083 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) 1084 { 1085 struct pci_dev *p = NULL; 1086 bool snd_driver_loaded; 1087 1088 /* 1089 * If the ASIC comes with no audio function, we always assume 1090 * it is "enabled". 1091 */ 1092 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 1093 adev->pdev->bus->number, 1); 1094 if (!p) 1095 return true; 1096 1097 snd_driver_loaded = pci_is_enabled(p) ? true : false; 1098 1099 pci_dev_put(p); 1100 1101 return snd_driver_loaded; 1102 } 1103